diff options
Diffstat (limited to 'src')
25 files changed, 598 insertions, 107 deletions
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 67f8e70f9c1..7dc121f73f1 100644 --- a/src/backend/commands/trigger.c +++ b/src/backend/commands/trigger.c @@ -80,6 +80,7 @@ static bool GetTupleForTrigger(EState *estate, ItemPointer tid, LockTupleMode lockmode, TupleTableSlot *oldslot, + bool do_epq_recheck, TupleTableSlot **epqslot, TM_Result *tmresultp, TM_FailureData *tmfdp); @@ -2693,7 +2694,8 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, HeapTuple fdw_trigtuple, TupleTableSlot **epqslot, TM_Result *tmresult, - TM_FailureData *tmfd) + TM_FailureData *tmfd, + bool is_merge_delete) { TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo); TriggerDesc *trigdesc = relinfo->ri_TrigDesc; @@ -2708,9 +2710,17 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate, { TupleTableSlot *epqslot_candidate = NULL; + /* + * Get a copy of the on-disk tuple we are planning to delete. In + * general, if the tuple has been concurrently updated, we should + * recheck it using EPQ. However, if this is a MERGE DELETE action, + * we skip this EPQ recheck and leave it to the caller (it must do + * additional rechecking, and might end up executing a different + * action entirely). + */ if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid, - LockTupleExclusive, slot, &epqslot_candidate, - tmresult, tmfd)) + LockTupleExclusive, slot, !is_merge_delete, + &epqslot_candidate, tmresult, tmfd)) return false; /* @@ -2800,6 +2810,7 @@ ExecARDeleteTriggers(EState *estate, tupleid, LockTupleExclusive, slot, + false, NULL, NULL, NULL); @@ -2944,7 +2955,8 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, TM_Result *tmresult, - TM_FailureData *tmfd) + TM_FailureData *tmfd, + bool is_merge_update) { TriggerDesc *trigdesc = relinfo->ri_TrigDesc; TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo); @@ -2965,10 +2977,17 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate, { TupleTableSlot *epqslot_candidate = NULL; - /* get a copy of the on-disk tuple we are planning to update */ + /* + * Get a copy of the on-disk tuple we are planning to update. In + * general, if the tuple has been concurrently updated, we should + * recheck it using EPQ. However, if this is a MERGE UPDATE action, + * we skip this EPQ recheck and leave it to the caller (it must do + * additional rechecking, and might end up executing a different + * action entirely). + */ if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid, - lockmode, oldslot, &epqslot_candidate, - tmresult, tmfd)) + lockmode, oldslot, !is_merge_update, + &epqslot_candidate, tmresult, tmfd)) return false; /* cancel the update action */ /* @@ -3142,6 +3161,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, tupleid, LockTupleExclusive, oldslot, + false, NULL, NULL, NULL); @@ -3298,6 +3318,7 @@ GetTupleForTrigger(EState *estate, ItemPointer tid, LockTupleMode lockmode, TupleTableSlot *oldslot, + bool do_epq_recheck, TupleTableSlot **epqslot, TM_Result *tmresultp, TM_FailureData *tmfdp) @@ -3357,29 +3378,30 @@ GetTupleForTrigger(EState *estate, if (tmfd.traversed) { /* - * Recheck the tuple using EPQ. For MERGE, we leave this - * to the caller (it must do additional rechecking, and - * might end up executing a different action entirely). + * Recheck the tuple using EPQ, if requested. Otherwise, + * just return that it was concurrently updated. */ - if (estate->es_plannedstmt->commandType == CMD_MERGE) + if (do_epq_recheck) { - if (tmresultp) - *tmresultp = TM_Updated; - return false; + *epqslot = EvalPlanQual(epqstate, + relation, + relinfo->ri_RangeTableIndex, + oldslot); + + /* + * If PlanQual failed for updated tuple - we must not + * process this tuple! + */ + if (TupIsNull(*epqslot)) + { + *epqslot = NULL; + return false; + } } - - *epqslot = EvalPlanQual(epqstate, - relation, - relinfo->ri_RangeTableIndex, - oldslot); - - /* - * If PlanQual failed for updated tuple - we must not - * process this tuple! - */ - if (TupIsNull(*epqslot)) + else { - *epqslot = NULL; + if (tmresultp) + *tmresultp = TM_Updated; return false; } } diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index 53ddd25c42d..f262e7a66f7 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -670,7 +670,7 @@ ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo, resultRelInfo->ri_TrigDesc->trig_update_before_row) { if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo, - tid, NULL, slot, NULL, NULL)) + tid, NULL, slot, NULL, NULL, false)) skip_tuple = true; /* "do nothing" */ } @@ -746,7 +746,7 @@ ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo, resultRelInfo->ri_TrigDesc->trig_delete_before_row) { skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo, - tid, NULL, NULL, NULL, NULL); + tid, NULL, NULL, NULL, NULL, false); } if (!skip_tuple) diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 54da8e7995b..7c6c2c1f6e4 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -1474,7 +1474,8 @@ ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, return ExecBRDeleteTriggers(context->estate, context->epqstate, resultRelInfo, tupleid, oldtuple, - epqreturnslot, result, &context->tmfd); + epqreturnslot, result, &context->tmfd, + context->mtstate->operation == CMD_MERGE); } return true; @@ -2117,7 +2118,8 @@ ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo, return ExecBRUpdateTriggers(context->estate, context->epqstate, resultRelInfo, tupleid, oldtuple, slot, - result, &context->tmfd); + result, &context->tmfd, + context->mtstate->operation == CMD_MERGE); } return true; diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl index 4441b7cba0c..c7a6720440d 100644 --- a/src/backend/storage/lmgr/generate-lwlocknames.pl +++ b/src/backend/storage/lmgr/generate-lwlocknames.pl @@ -10,7 +10,6 @@ use Getopt::Long; my $output_path = '.'; my $lastlockidx = -1; -my $continue = "\n"; GetOptions('outdir:s' => \$output_path); @@ -102,10 +101,8 @@ while (<$lwlocklist>) while ($lastlockidx < $lockidx - 1) { ++$lastlockidx; - $continue = ",\n"; } $lastlockidx = $lockidx; - $continue = ",\n"; # Add a "Lock" suffix to each lock name, as the C code depends on that printf $h "#define %-32s (&MainLWLockArray[$lockidx].lock)\n", diff --git a/src/backend/utils/adt/bytea.c b/src/backend/utils/adt/bytea.c index 2e539c2504e..6e7b914c563 100644 --- a/src/backend/utils/adt/bytea.c +++ b/src/backend/utils/adt/bytea.c @@ -182,27 +182,21 @@ bytea_overlay(bytea *t1, bytea *t2, int sp, int sl) * * Non-printable characters must be passed as '\nnn' (octal) and are * converted to internal form. '\' must be passed as '\\'. - * ereport(ERROR, ...) if bad form. - * - * BUGS: - * The input is scanned twice. - * The error checking of input is minimal. */ Datum byteain(PG_FUNCTION_ARGS) { char *inputText = PG_GETARG_CSTRING(0); Node *escontext = fcinfo->context; + size_t len = strlen(inputText); + size_t bc; char *tp; char *rp; - int bc; bytea *result; /* Recognize hex input */ if (inputText[0] == '\\' && inputText[1] == 'x') { - size_t len = strlen(inputText); - bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */ result = palloc(bc); bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result), @@ -213,33 +207,7 @@ byteain(PG_FUNCTION_ARGS) } /* Else, it's the traditional escaped style */ - for (bc = 0, tp = inputText; *tp != '\0'; bc++) - { - if (tp[0] != '\\') - tp++; - else if ((tp[0] == '\\') && - (tp[1] >= '0' && tp[1] <= '3') && - (tp[2] >= '0' && tp[2] <= '7') && - (tp[3] >= '0' && tp[3] <= '7')) - tp += 4; - else if ((tp[0] == '\\') && - (tp[1] == '\\')) - tp += 2; - else - { - /* - * one backslash, not followed by another or ### valid octal - */ - ereturn(escontext, (Datum) 0, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s", "bytea"))); - } - } - - bc += VARHDRSZ; - - result = (bytea *) palloc(bc); - SET_VARSIZE(result, bc); + result = (bytea *) palloc(len + VARHDRSZ); /* maximum possible length */ tp = inputText; rp = VARDATA(result); @@ -247,21 +215,21 @@ byteain(PG_FUNCTION_ARGS) { if (tp[0] != '\\') *rp++ = *tp++; - else if ((tp[0] == '\\') && - (tp[1] >= '0' && tp[1] <= '3') && + else if ((tp[1] >= '0' && tp[1] <= '3') && (tp[2] >= '0' && tp[2] <= '7') && (tp[3] >= '0' && tp[3] <= '7')) { - bc = VAL(tp[1]); - bc <<= 3; - bc += VAL(tp[2]); - bc <<= 3; - *rp++ = bc + VAL(tp[3]); + int v; + + v = VAL(tp[1]); + v <<= 3; + v += VAL(tp[2]); + v <<= 3; + *rp++ = v + VAL(tp[3]); tp += 4; } - else if ((tp[0] == '\\') && - (tp[1] == '\\')) + else if (tp[1] == '\\') { *rp++ = '\\'; tp += 2; @@ -269,7 +237,7 @@ byteain(PG_FUNCTION_ARGS) else { /* - * We should never get here. The first pass should not allow it. + * one backslash, not followed by another or ### valid octal */ ereturn(escontext, (Datum) 0, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), @@ -277,6 +245,9 @@ byteain(PG_FUNCTION_ARGS) } } + bc = rp - VARDATA(result); /* actual length */ + SET_VARSIZE(result, bc + VARHDRSZ); + PG_RETURN_BYTEA_P(result); } diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 197c1295d93..30e0da31aa3 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -31,6 +31,8 @@ #endif #include "catalog/pg_class_d.h" +#include "catalog/pg_largeobject_metadata_d.h" +#include "catalog/pg_shdepend_d.h" #include "common/string.h" #include "compress_io.h" #include "dumputils.h" @@ -2974,6 +2976,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) int res = REQ_SCHEMA | REQ_DATA; RestoreOptions *ropt = AH->public.ropt; + /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 && + strcmp(te->desc, "TABLE DATA") == 0 && + (te->catalogId.oid == LargeObjectMetadataRelationId || + te->catalogId.oid == SharedDependRelationId)) + return REQ_DATA; + /* These items are treated specially */ if (strcmp(te->desc, "ENCODING") == 0 || strcmp(te->desc, "STDSTRINGS") == 0 || diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index c6226175528..604fc109416 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -49,8 +49,10 @@ #include "catalog/pg_class_d.h" #include "catalog/pg_default_acl_d.h" #include "catalog/pg_largeobject_d.h" +#include "catalog/pg_largeobject_metadata_d.h" #include "catalog/pg_proc_d.h" #include "catalog/pg_publication_d.h" +#include "catalog/pg_shdepend_d.h" #include "catalog/pg_subscription_d.h" #include "catalog/pg_type_d.h" #include "common/connect.h" @@ -209,6 +211,12 @@ static int nbinaryUpgradeClassOids = 0; static SequenceItem *sequences = NULL; static int nsequences = 0; +/* + * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use + * as a dependency for pg_shdepend and any large object comments/seclabels. + */ +static DumpId lo_metadata_dumpId; + /* Maximum number of relations to fetch in a fetchAttributeStats() call. */ #define MAX_ATTR_STATS_RELS 64 @@ -1086,6 +1094,36 @@ main(int argc, char **argv) getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE); /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (dopt.binary_upgrade && fout->remoteVersion >= 120000) + { + TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId); + TableInfo *shdepend = findTableByOid(SharedDependRelationId); + + makeTableDataInfo(&dopt, lo_metadata); + makeTableDataInfo(&dopt, shdepend); + + /* + * Save pg_largeobject_metadata's dump ID for use as a dependency for + * pg_shdepend and any large object comments/seclabels. + */ + lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId; + addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId); + + /* + * Only dump large object shdepend rows for this database. + */ + shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass " + "AND dbid = (SELECT oid FROM pg_database " + " WHERE datname = current_database())"; + } + + /* * In binary-upgrade mode, we do not have to worry about the actual LO * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. @@ -3924,10 +3962,37 @@ getLOs(Archive *fout) * as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not - * pg_largeobject_metadata, after the dump is restored. + * pg_largeobject_metadata, after the dump is restored. In versions + * before v12, this is done via proper large object commands. In + * newer versions, we dump the content of pg_largeobject_metadata and + * any associated pg_shdepend rows, which is faster to restore. (On + * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID + * column is hidden and won't be dumped.) */ if (dopt->binary_upgrade) - loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + { + if (fout->remoteVersion >= 120000) + { + /* + * We should've saved pg_largeobject_metadata's dump ID before + * this point. + */ + Assert(lo_metadata_dumpId); + + loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION); + + /* + * Mark the large object as dependent on + * pg_largeobject_metadata so that any large object + * comments/seclables are dumped after it. + */ + loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId)); + loinfo->dobj.dependencies[0] = lo_metadata_dumpId; + loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1; + } + else + loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + } /* * Create a "BLOBS" data item for the group, too. This is just a @@ -9039,8 +9104,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) if (tbinfo->relkind == RELKIND_SEQUENCE) continue; - /* Don't bother with uninteresting tables, either */ - if (!tbinfo->interesting) + /* + * Don't bother with uninteresting tables, either. For binary + * upgrades, this is bypassed for pg_largeobject_metadata and + * pg_shdepend so that the columns names are collected for the + * corresponding COPY commands. Restoring the data for those catalogs + * is faster than restoring the equivalent set of large object + * commands. We can only do this for upgrades from v12 and newer; in + * older versions, pg_largeobject_metadata was created WITH OIDS, so + * the OID column is hidden and won't be dumped. + */ + if (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId))) continue; /* OK, we need info for this table */ @@ -9244,7 +9321,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) pg_fatal("unrecognized table OID %u", attrelid); /* cross-check that we only got requested tables */ if (tbinfo->relkind == RELKIND_SEQUENCE || - !tbinfo->interesting) + (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId)))) pg_fatal("unexpected column data for table \"%s\"", tbinfo->dobj.name); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 2485d8f360e..d8330e2bd17 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -1087,6 +1087,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_owner => 1, schema_only => 1, @@ -1605,6 +1606,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, schema_only => 1, schema_only_with_statistics => 1, no_large_objects => 1, @@ -4612,9 +4614,9 @@ my %tests = ( no_schema => 1, section_data => 1, test_schema_plus_large_objects => 1, - binary_upgrade => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_privs => 1, schema_only => 1, diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h index 2ed2c4bb378..cfd7daa20ed 100644 --- a/src/include/commands/trigger.h +++ b/src/include/commands/trigger.h @@ -213,7 +213,8 @@ extern bool ExecBRDeleteTriggers(EState *estate, HeapTuple fdw_trigtuple, TupleTableSlot **epqslot, TM_Result *tmresult, - TM_FailureData *tmfd); + TM_FailureData *tmfd, + bool is_merge_delete); extern void ExecARDeleteTriggers(EState *estate, ResultRelInfo *relinfo, ItemPointer tupleid, @@ -235,7 +236,8 @@ extern bool ExecBRUpdateTriggers(EState *estate, HeapTuple fdw_trigtuple, TupleTableSlot *newslot, TM_Result *tmresult, - TM_FailureData *tmfd); + TM_FailureData *tmfd, + bool is_merge_update); extern void ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo, ResultRelInfo *src_partinfo, diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c index 2a2b10d5a29..afa85d9fca9 100644 --- a/src/interfaces/libpq/fe-connect.c +++ b/src/interfaces/libpq/fe-connect.c @@ -7574,10 +7574,12 @@ PQport(const PGconn *conn) if (!conn) return NULL; - if (conn->connhost != NULL) + if (conn->connhost != NULL && + conn->connhost[conn->whichhost].port != NULL && + conn->connhost[conn->whichhost].port[0] != '\0') return conn->connhost[conn->whichhost].port; - return ""; + return DEF_PGPORT_STR; } /* diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index 70c28f2ffca..a701c25038a 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -357,7 +357,8 @@ typedef struct pg_conn_host pg_conn_host_type type; /* type of host address */ char *host; /* host name or socket path */ char *hostaddr; /* host numeric IP address */ - char *port; /* port number (always provided) */ + char *port; /* port number (if NULL or empty, use + * DEF_PGPORT[_STR]) */ char *password; /* password for this host, read from the * password file; NULL if not sought or not * found in password file. */ diff --git a/src/test/isolation/expected/merge-match-recheck.out b/src/test/isolation/expected/merge-match-recheck.out index 9a44a595927..90300f1db5a 100644 --- a/src/test/isolation/expected/merge-match-recheck.out +++ b/src/test/isolation/expected/merge-match-recheck.out @@ -241,19 +241,28 @@ starting permutation: update_bal1_tg merge_bal_tg c2 select1_tg c1 s2: NOTICE: Update: (1,160,s1,setup) -> (1,50,s1,"setup updated by update_bal1_tg") step update_bal1_tg: UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1; step merge_bal_tg: - MERGE INTO target_tg t - USING (SELECT 1 as key) s - ON s.key = t.key - WHEN MATCHED AND balance < 100 THEN - UPDATE SET balance = balance * 2, val = t.val || ' when1' - WHEN MATCHED AND balance < 200 THEN - UPDATE SET balance = balance * 4, val = t.val || ' when2' - WHEN MATCHED AND balance < 300 THEN - UPDATE SET balance = balance * 8, val = t.val || ' when3'; + WITH t AS ( + MERGE INTO target_tg t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3' + RETURNING t.* + ) + SELECT * FROM t; <waiting ...> step c2: COMMIT; s1: NOTICE: Update: (1,50,s1,"setup updated by update_bal1_tg") -> (1,100,s1,"setup updated by update_bal1_tg when1") step merge_bal_tg: <... completed> +key|balance|status|val +---+-------+------+------------------------------------- + 1| 100|s1 |setup updated by update_bal1_tg when1 +(1 row) + step select1_tg: SELECT * FROM target_tg; key|balance|status|val ---+-------+------+------------------------------------- diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec index 26266b8c297..15226e40c9e 100644 --- a/src/test/isolation/specs/merge-match-recheck.spec +++ b/src/test/isolation/specs/merge-match-recheck.spec @@ -99,15 +99,19 @@ step "merge_bal_pa" } step "merge_bal_tg" { - MERGE INTO target_tg t - USING (SELECT 1 as key) s - ON s.key = t.key - WHEN MATCHED AND balance < 100 THEN - UPDATE SET balance = balance * 2, val = t.val || ' when1' - WHEN MATCHED AND balance < 200 THEN - UPDATE SET balance = balance * 4, val = t.val || ' when2' - WHEN MATCHED AND balance < 300 THEN - UPDATE SET balance = balance * 8, val = t.val || ' when3'; + WITH t AS ( + MERGE INTO target_tg t + USING (SELECT 1 as key) s + ON s.key = t.key + WHEN MATCHED AND balance < 100 THEN + UPDATE SET balance = balance * 2, val = t.val || ' when1' + WHEN MATCHED AND balance < 200 THEN + UPDATE SET balance = balance * 4, val = t.val || ' when2' + WHEN MATCHED AND balance < 300 THEN + UPDATE SET balance = balance * 8, val = t.val || ' when3' + RETURNING t.* + ) + SELECT * FROM t; } step "merge_delete" diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile index aa1d27bbed3..7d3d3d52b45 100644 --- a/src/test/modules/Makefile +++ b/src/test/modules/Makefile @@ -15,6 +15,7 @@ SUBDIRS = \ plsample \ spgist_name_ops \ test_aio \ + test_binaryheap \ test_bloomfilter \ test_copy_callbacks \ test_custom_rmgrs \ diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build index 9de0057bd1d..dd5cd065ba1 100644 --- a/src/test/modules/meson.build +++ b/src/test/modules/meson.build @@ -14,6 +14,7 @@ subdir('plsample') subdir('spgist_name_ops') subdir('ssl_passphrase_callback') subdir('test_aio') +subdir('test_binaryheap') subdir('test_bloomfilter') subdir('test_copy_callbacks') subdir('test_custom_rmgrs') diff --git a/src/test/modules/test_binaryheap/.gitignore b/src/test/modules/test_binaryheap/.gitignore new file mode 100644 index 00000000000..5dcb3ff9723 --- /dev/null +++ b/src/test/modules/test_binaryheap/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/src/test/modules/test_binaryheap/Makefile b/src/test/modules/test_binaryheap/Makefile new file mode 100644 index 00000000000..d310fbc9e88 --- /dev/null +++ b/src/test/modules/test_binaryheap/Makefile @@ -0,0 +1,24 @@ +# src/test/modules/test_binaryheap/Makefile + +MODULE_big = test_binaryheap +OBJS = \ + $(WIN32RES) \ + test_binaryheap.o + +PGFILEDESC = "test_binaryheap - test code for binaryheap" + +EXTENSION = test_binaryheap +DATA = test_binaryheap--1.0.sql + +REGRESS = test_binaryheap + +ifdef USE_PGXS +PG_CONFIG = pg_config +PGXS := $(shell $(PG_CONFIG) --pgxs) +include $(PGXS) +else +subdir = src/test/modules/test_binaryheap +top_builddir = ../../../.. +include $(top_builddir)/src/Makefile.global +include $(top_srcdir)/contrib/contrib-global.mk +endif diff --git a/src/test/modules/test_binaryheap/expected/test_binaryheap.out b/src/test/modules/test_binaryheap/expected/test_binaryheap.out new file mode 100644 index 00000000000..16ce07875e3 --- /dev/null +++ b/src/test/modules/test_binaryheap/expected/test_binaryheap.out @@ -0,0 +1,12 @@ +CREATE EXTENSION test_binaryheap; +-- +-- These tests don't produce any interesting output. We're checking that +-- the operations complete without crashing or hanging and that none of their +-- internal sanity tests fail. +-- +SELECT test_binaryheap(); + test_binaryheap +----------------- + +(1 row) + diff --git a/src/test/modules/test_binaryheap/meson.build b/src/test/modules/test_binaryheap/meson.build new file mode 100644 index 00000000000..816a43c93e9 --- /dev/null +++ b/src/test/modules/test_binaryheap/meson.build @@ -0,0 +1,33 @@ +# Copyright (c) 2025, PostgreSQL Global Development Group + +test_binaryheap_sources = files( + 'test_binaryheap.c', +) + +if host_system == 'windows' + test_binaryheap_sources += rc_lib_gen.process(win32ver_rc, extra_args: [ + '--NAME', 'test_binaryheap', + '--FILEDESC', 'test_binaryheap - test code for binaryheap',]) +endif + +test_binaryheap = shared_module('test_binaryheap', + test_binaryheap_sources, + kwargs: pg_test_mod_args, +) +test_install_libs += test_binaryheap + +test_install_data += files( + 'test_binaryheap.control', + 'test_binaryheap--1.0.sql', +) + +tests += { + 'name': 'test_binaryheap', + 'sd': meson.current_source_dir(), + 'bd': meson.current_build_dir(), + 'regress': { + 'sql': [ + 'test_binaryheap', + ], + }, +} diff --git a/src/test/modules/test_binaryheap/sql/test_binaryheap.sql b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql new file mode 100644 index 00000000000..8439545815b --- /dev/null +++ b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql @@ -0,0 +1,8 @@ +CREATE EXTENSION test_binaryheap; + +-- +-- These tests don't produce any interesting output. We're checking that +-- the operations complete without crashing or hanging and that none of their +-- internal sanity tests fail. +-- +SELECT test_binaryheap(); diff --git a/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql new file mode 100644 index 00000000000..cddceeee603 --- /dev/null +++ b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql @@ -0,0 +1,7 @@ +/* src/test/modules/test_binaryheap/test_binaryheap--1.0.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION test_binaryheap" to load this file. \quit + +CREATE FUNCTION test_binaryheap() RETURNS VOID + AS 'MODULE_PATHNAME' LANGUAGE C; diff --git a/src/test/modules/test_binaryheap/test_binaryheap.c b/src/test/modules/test_binaryheap/test_binaryheap.c new file mode 100644 index 00000000000..583dae1da30 --- /dev/null +++ b/src/test/modules/test_binaryheap/test_binaryheap.c @@ -0,0 +1,275 @@ +/*-------------------------------------------------------------------------- + * + * test_binaryheap.c + * Test correctness of binary heap implementation. + * + * Copyright (c) 2025, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_binaryheap/test_binaryheap.c + * + * ------------------------------------------------------------------------- + */ + +#include "postgres.h" + +#include "common/int.h" +#include "common/pg_prng.h" +#include "fmgr.h" +#include "lib/binaryheap.h" + +PG_MODULE_MAGIC; + +/* + * Test binaryheap_comparator for max-heap of integers. + */ +static int +int_cmp(Datum a, Datum b, void *arg) +{ + return pg_cmp_s32(DatumGetInt32(a), DatumGetInt32(b)); +} + +/* + * Loops through all nodes and returns the maximum value. + */ +static int +get_max_from_heap(binaryheap *heap) +{ + int max = -1; + + for (int i = 0; i < binaryheap_size(heap); i++) + max = Max(max, DatumGetInt32(binaryheap_get_node(heap, i))); + + return max; +} + +/* + * Generate a random permutation of the integers 0..size-1. + */ +static int * +get_permutation(int size) +{ + int *permutation = (int *) palloc(size * sizeof(int)); + + permutation[0] = 0; + + /* + * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm. + * Notionally, we append each new value to the array and then swap it with + * a randomly-chosen array element (possibly including itself, else we + * fail to generate permutations with the last integer last). The swap + * step can be optimized by combining it with the insertion. + */ + for (int i = 1; i < size; i++) + { + int j = pg_prng_uint64_range(&pg_global_prng_state, 0, i); + + if (j < i) /* avoid fetching undefined data if j=i */ + permutation[i] = permutation[j]; + permutation[j] = i; + } + + return permutation; +} + +/* + * Ensure that the heap property holds for the given heap, i.e., each parent is + * greater than or equal to its children. + */ +static void +verify_heap_property(binaryheap *heap) +{ + for (int i = 0; i < binaryheap_size(heap); i++) + { + int left = 2 * i + 1; + int right = 2 * i + 2; + int parent_val = DatumGetInt32(binaryheap_get_node(heap, i)); + + if (left < binaryheap_size(heap) && + parent_val < DatumGetInt32(binaryheap_get_node(heap, left))) + elog(ERROR, "parent node less than left child"); + + if (right < binaryheap_size(heap) && + parent_val < DatumGetInt32(binaryheap_get_node(heap, right))) + elog(ERROR, "parent node less than right child"); + } +} + +/* + * Check correctness of basic operations. + */ +static void +test_basic(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + int *permutation = get_permutation(size); + + if (!binaryheap_empty(heap)) + elog(ERROR, "new heap not empty"); + if (binaryheap_size(heap) != 0) + elog(ERROR, "wrong size for new heap"); + + for (int i = 0; i < size; i++) + { + binaryheap_add(heap, Int32GetDatum(permutation[i])); + verify_heap_property(heap); + } + + if (binaryheap_empty(heap)) + elog(ERROR, "heap empty after adding values"); + if (binaryheap_size(heap) != size) + elog(ERROR, "wrong size for heap after adding values"); + + if (DatumGetInt32(binaryheap_first(heap)) != get_max_from_heap(heap)) + elog(ERROR, "incorrect root node after adding values"); + + for (int i = 0; i < size; i++) + { + int expected = get_max_from_heap(heap); + int actual = DatumGetInt32(binaryheap_remove_first(heap)); + + if (actual != expected) + elog(ERROR, "incorrect root node after removing root"); + verify_heap_property(heap); + } + + if (!binaryheap_empty(heap)) + elog(ERROR, "heap not empty after removing all nodes"); +} + +/* + * Test building heap after unordered additions. + */ +static void +test_build(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + int *permutation = get_permutation(size); + + for (int i = 0; i < size; i++) + binaryheap_add_unordered(heap, Int32GetDatum(permutation[i])); + + if (binaryheap_size(heap) != size) + elog(ERROR, "wrong size for heap after unordered additions"); + + binaryheap_build(heap); + verify_heap_property(heap); +} + +/* + * Test removing nodes. + */ +static void +test_remove_node(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + int *permutation = get_permutation(size); + int remove_count = pg_prng_uint64_range(&pg_global_prng_state, + 0, size - 1); + + for (int i = 0; i < size; i++) + binaryheap_add(heap, Int32GetDatum(permutation[i])); + + for (int i = 0; i < remove_count; i++) + { + int idx = pg_prng_uint64_range(&pg_global_prng_state, + 0, binaryheap_size(heap) - 1); + + binaryheap_remove_node(heap, idx); + verify_heap_property(heap); + } + + if (binaryheap_size(heap) != size - remove_count) + elog(ERROR, "wrong size after removing nodes"); +} + +/* + * Test replacing the root node. + */ +static void +test_replace_first(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + + for (int i = 0; i < size; i++) + binaryheap_add(heap, Int32GetDatum(i)); + + /* + * Replace root with a value smaller than everything in the heap. + */ + binaryheap_replace_first(heap, Int32GetDatum(-1)); + verify_heap_property(heap); + + /* + * Replace root with a value in the middle of the heap. + */ + binaryheap_replace_first(heap, Int32GetDatum(size / 2)); + verify_heap_property(heap); + + /* + * Replace root with a larger value than everything in the heap. + */ + binaryheap_replace_first(heap, Int32GetDatum(size + 1)); + verify_heap_property(heap); +} + +/* + * Test duplicate values. + */ +static void +test_duplicates(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + int dup = pg_prng_uint64_range(&pg_global_prng_state, 0, size - 1); + + for (int i = 0; i < size; i++) + binaryheap_add(heap, Int32GetDatum(dup)); + + for (int i = 0; i < size; i++) + { + if (DatumGetInt32(binaryheap_remove_first(heap)) != dup) + elog(ERROR, "unexpected value in heap with duplicates"); + } +} + +/* + * Test resetting. + */ +static void +test_reset(int size) +{ + binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL); + + for (int i = 0; i < size; i++) + binaryheap_add(heap, Int32GetDatum(i)); + + binaryheap_reset(heap); + + if (!binaryheap_empty(heap)) + elog(ERROR, "heap not empty after resetting"); +} + +/* + * SQL-callable entry point to perform all tests. + */ +PG_FUNCTION_INFO_V1(test_binaryheap); + +Datum +test_binaryheap(PG_FUNCTION_ARGS) +{ + static const int test_sizes[] = {1, 2, 3, 10, 100, 1000}; + + for (int i = 0; i < sizeof(test_sizes) / sizeof(int); i++) + { + int size = test_sizes[i]; + + test_basic(size); + test_build(size); + test_remove_node(size); + test_replace_first(size); + test_duplicates(size); + test_reset(size); + } + + PG_RETURN_VOID(); +} diff --git a/src/test/modules/test_binaryheap/test_binaryheap.control b/src/test/modules/test_binaryheap/test_binaryheap.control new file mode 100644 index 00000000000..dd0785e05bd --- /dev/null +++ b/src/test/modules/test_binaryheap/test_binaryheap.control @@ -0,0 +1,5 @@ +# test_binaryheap extension +comment = 'Test code for binaryheap' +default_version = '1.0' +module_pathname = '$libdir/test_binaryheap' +relocatable = true diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out index 788844abd20..1bfd33de3f3 100644 --- a/src/test/regress/expected/strings.out +++ b/src/test/regress/expected/strings.out @@ -236,6 +236,12 @@ SELECT E'De\\678dBeEf'::bytea; ERROR: invalid input syntax for type bytea LINE 1: SELECT E'De\\678dBeEf'::bytea; ^ +SELECT E'DeAd\\\\BeEf'::bytea; + bytea +---------------------- + \x446541645c42654566 +(1 row) + SELECT reverse(''::bytea); reverse --------- @@ -291,6 +297,12 @@ SELECT E'De\\123dBeEf'::bytea; DeSdBeEf (1 row) +SELECT E'DeAd\\\\BeEf'::bytea; + bytea +------------ + DeAd\\BeEf +(1 row) + -- Test non-error-throwing API too SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea'); pg_input_is_valid diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql index 2577a42987d..92c445c2439 100644 --- a/src/test/regress/sql/strings.sql +++ b/src/test/regress/sql/strings.sql @@ -76,6 +76,7 @@ SELECT E'De\\000dBeEf'::bytea; SELECT E'De\123dBeEf'::bytea; SELECT E'De\\123dBeEf'::bytea; SELECT E'De\\678dBeEf'::bytea; +SELECT E'DeAd\\\\BeEf'::bytea; SELECT reverse(''::bytea); SELECT reverse('\xaa'::bytea); @@ -88,6 +89,7 @@ SELECT E'\\xDe00BeEf'::bytea; SELECT E'DeAdBeEf'::bytea; SELECT E'De\\000dBeEf'::bytea; SELECT E'De\\123dBeEf'::bytea; +SELECT E'DeAd\\\\BeEf'::bytea; -- Test non-error-throwing API too SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea'); |