summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/common/toast_compression.c14
-rw-r--r--src/backend/access/nbtree/nbtsearch.c14
-rw-r--r--src/backend/executor/execExprInterp.c1
-rw-r--r--src/backend/libpq/pg_ident.conf.sample26
-rw-r--r--src/backend/main/main.c16
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c2
-rw-r--r--src/backend/storage/lmgr/lwlock.c4
-rw-r--r--src/backend/tcop/pquery.c25
-rw-r--r--src/backend/utils/adt/jsonb_util.c43
-rw-r--r--src/backend/utils/init/postinit.c11
-rw-r--r--src/bin/pg_dump/pg_dump.c23
-rw-r--r--src/bin/pg_dump/pg_dumpall.c2
-rw-r--r--src/bin/psql/help.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c6
-rw-r--r--src/interfaces/libpq/libpq-int.h3
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/test_binaryheap/.gitignore4
-rw-r--r--src/test/modules/test_binaryheap/Makefile24
-rw-r--r--src/test/modules/test_binaryheap/expected/test_binaryheap.out12
-rw-r--r--src/test/modules/test_binaryheap/meson.build33
-rw-r--r--src/test/modules/test_binaryheap/sql/test_binaryheap.sql8
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap--1.0.sql7
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.c275
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.control5
-rw-r--r--src/test/regress/expected/compression.out235
-rw-r--r--src/test/regress/expected/compression_1.out360
-rw-r--r--src/test/regress/expected/compression_lz4.out249
-rw-r--r--src/test/regress/expected/compression_lz4_1.out7
-rw-r--r--src/test/regress/expected/constraints.out4
-rw-r--r--src/test/regress/parallel_schedule2
-rw-r--r--src/test/regress/sql/compression.sql84
-rw-r--r--src/test/regress/sql/compression_lz4.sql129
-rw-r--r--src/test/regress/sql/constraints.sql6
34 files changed, 900 insertions, 738 deletions
diff --git a/src/backend/access/common/toast_compression.c b/src/backend/access/common/toast_compression.c
index 21f2f4af97e..926f1e4008a 100644
--- a/src/backend/access/common/toast_compression.c
+++ b/src/backend/access/common/toast_compression.c
@@ -25,11 +25,11 @@
/* GUC */
int default_toast_compression = TOAST_PGLZ_COMPRESSION;
-#define NO_LZ4_SUPPORT() \
+#define NO_COMPRESSION_SUPPORT(method) \
ereport(ERROR, \
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \
- errmsg("compression method lz4 not supported"), \
- errdetail("This functionality requires the server to be built with lz4 support.")))
+ errmsg("compression method %s not supported", method), \
+ errdetail("This functionality requires the server to be built with %s support.", method)))
/*
* Compress a varlena using PGLZ.
@@ -139,7 +139,7 @@ struct varlena *
lz4_compress_datum(const struct varlena *value)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 valsize;
@@ -182,7 +182,7 @@ struct varlena *
lz4_decompress_datum(const struct varlena *value)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 rawsize;
@@ -215,7 +215,7 @@ struct varlena *
lz4_decompress_datum_slice(const struct varlena *value, int32 slicelength)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 rawsize;
@@ -289,7 +289,7 @@ CompressionNameToMethod(const char *compression)
else if (strcmp(compression, "lz4") == 0)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
#endif
return TOAST_LZ4_COMPRESSION;
}
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index 4af1ff1e9e5..d69798795b4 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -892,9 +892,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
OffsetNumber offnum;
BTScanInsertData inskey;
ScanKey startKeys[INDEX_MAX_KEYS];
- ScanKeyData notnullkeys[INDEX_MAX_KEYS];
+ ScanKeyData notnullkey;
int keysz = 0;
- StrategyNumber strat_total;
+ StrategyNumber strat_total = InvalidStrategy;
BlockNumber blkno = InvalidBlockNumber,
lastcurrblkno;
@@ -1034,7 +1034,6 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* need to be kept in sync.
*----------
*/
- strat_total = BTEqualStrategyNumber;
if (so->numberOfKeys > 0)
{
AttrNumber curattr;
@@ -1122,16 +1121,15 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
ScanDirectionIsForward(dir) :
ScanDirectionIsBackward(dir)))
{
- /* Yes, so build the key in notnullkeys[keysz] */
- bkey = &notnullkeys[keysz];
+ /* Final startKeys[] entry will be deduced NOT NULL key */
+ bkey = &notnullkey;
ScanKeyEntryInitialize(bkey,
(SK_SEARCHNOTNULL | SK_ISNULL |
(impliesNN->sk_flags &
(SK_BT_DESC | SK_BT_NULLS_FIRST))),
curattr,
- ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
- BTGreaterStrategyNumber :
- BTLessStrategyNumber),
+ ScanDirectionIsForward(dir) ?
+ BTGreaterStrategyNumber : BTLessStrategyNumber,
InvalidOid,
InvalidOid,
InvalidOid,
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 8a72b5e70a4..1a37737d4a2 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -5228,7 +5228,6 @@ ExecEvalJsonCoercionFinish(ExprState *state, ExprEvalStep *op)
* JsonBehavior expression.
*/
jsestate->escontext.error_occurred = false;
- jsestate->escontext.error_occurred = false;
jsestate->escontext.details_wanted = true;
}
}
diff --git a/src/backend/libpq/pg_ident.conf.sample b/src/backend/libpq/pg_ident.conf.sample
index f5225f26cdf..8ee6c0ba315 100644
--- a/src/backend/libpq/pg_ident.conf.sample
+++ b/src/backend/libpq/pg_ident.conf.sample
@@ -13,25 +13,25 @@
# user names to their corresponding PostgreSQL user names. Records
# are of the form:
#
-# MAPNAME SYSTEM-USERNAME PG-USERNAME
+# MAPNAME SYSTEM-USERNAME DATABASE-USERNAME
#
# (The uppercase quantities must be replaced by actual values.)
#
# MAPNAME is the (otherwise freely chosen) map name that was used in
# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the
-# client. PG-USERNAME is the requested PostgreSQL user name. The
-# existence of a record specifies that SYSTEM-USERNAME may connect as
-# PG-USERNAME.
+# client. DATABASE-USERNAME is the requested PostgreSQL user name.
+# The existence of a record specifies that SYSTEM-USERNAME may connect
+# as DATABASE-USERNAME.
#
-# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a
-# regular expression. Optionally this can contain a capture (a
-# parenthesized subexpression). The substring matching the capture
-# will be substituted for \1 (backslash-one) if present in
-# PG-USERNAME.
+# If SYSTEM-USERNAME starts with a slash (/), the rest of it will be
+# treated as a regular expression. Optionally this can contain a capture
+# (a parenthesized subexpression). The substring matching the capture
+# will be substituted for \1 (backslash-one) if that appears in
+# DATABASE-USERNAME.
#
-# PG-USERNAME can be "all", a user name, a group name prefixed with "+", or
-# a regular expression (if it starts with a slash (/)). If it is a regular
-# expression, the substring matching with \1 has no effect.
+# DATABASE-USERNAME can be "all", a user name, a group name prefixed with "+",
+# or a regular expression (if it starts with a slash (/)). If it is a regular
+# expression, no substitution for \1 will occur.
#
# Multiple maps may be specified in this file and used by pg_hba.conf.
#
@@ -69,4 +69,4 @@
# Put your actual configuration here
# ----------------------------------
-# MAPNAME SYSTEM-USERNAME PG-USERNAME
+# MAPNAME SYSTEM-USERNAME DATABASE-USERNAME
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 7d63cf94a6b..bdcb5e4f261 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -125,13 +125,17 @@ main(int argc, char *argv[])
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres"));
/*
- * In the postmaster, absorb the environment values for LC_COLLATE and
- * LC_CTYPE. Individual backends will change these later to settings
- * taken from pg_database, but the postmaster cannot do that. If we leave
- * these set to "C" then message localization might not work well in the
- * postmaster.
+ * Collation is handled by pg_locale.c, and the behavior is dependent on
+ * the provider. strcoll(), etc., should not be called directly.
+ */
+ init_locale("LC_COLLATE", LC_COLLATE, "C");
+
+ /*
+ * In the postmaster, absorb the environment value for LC_CTYPE.
+ * Individual backends will change it later to pg_database.datctype, but
+ * the postmaster cannot do that. If we leave it set to "C" then message
+ * localization might not work well in the postmaster.
*/
- init_locale("LC_COLLATE", LC_COLLATE, "");
init_locale("LC_CTYPE", LC_CTYPE, "");
/*
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 082b4d9d327..f4c977262c5 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -297,10 +297,12 @@ parse_output_parameters(List *options, PGOutputData *data)
bool two_phase_option_given = false;
bool origin_option_given = false;
+ /* Initialize optional parameters to defaults */
data->binary = false;
data->streaming = LOGICALREP_STREAM_OFF;
data->messages = false;
data->two_phase = false;
+ data->publish_no_origin = false;
foreach(lc, options)
{
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 46f44bc4511..2d43bf2cc13 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -170,8 +170,8 @@ static const char *const BuiltinTrancheNames[] = {
[LWTRANCHE_DSM_REGISTRY_DSA] = "DSMRegistryDSA",
[LWTRANCHE_DSM_REGISTRY_HASH] = "DSMRegistryHash",
[LWTRANCHE_COMMITTS_SLRU] = "CommitTsSLRU",
- [LWTRANCHE_MULTIXACTOFFSET_SLRU] = "MultixactOffsetSLRU",
- [LWTRANCHE_MULTIXACTMEMBER_SLRU] = "MultixactMemberSLRU",
+ [LWTRANCHE_MULTIXACTOFFSET_SLRU] = "MultiXactOffsetSLRU",
+ [LWTRANCHE_MULTIXACTMEMBER_SLRU] = "MultiXactMemberSLRU",
[LWTRANCHE_NOTIFY_SLRU] = "NotifySLRU",
[LWTRANCHE_SERIAL_SLRU] = "SerialSLRU",
[LWTRANCHE_SUBTRANS_SLRU] = "SubtransSLRU",
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d1593f38b35..08791b8f75e 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1350,24 +1350,15 @@ PortalRunMulti(Portal portal,
PopActiveSnapshot();
/*
- * If a query completion data was supplied, use it. Otherwise use the
- * portal's query completion data.
- *
- * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
- * fake them with zeros. This can happen with DO INSTEAD rules if there
- * is no replacement query of the same type as the original. We print "0
- * 0" here because technically there is no query of the matching tag type,
- * and printing a non-zero count for a different query type seems wrong,
- * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
- * one row was updated. See QueryRewrite(), step 3, for details.
+ * If a command tag was requested and we did not fill in a run-time-
+ * determined tag above, copy the parse-time tag from the Portal. (There
+ * might not be any tag there either, in edge cases such as empty prepared
+ * statements. That's OK.)
*/
- if (qc && qc->commandTag == CMDTAG_UNKNOWN)
- {
- if (portal->qc.commandTag != CMDTAG_UNKNOWN)
- CopyQueryCompletion(qc, &portal->qc);
- /* If the caller supplied a qc, we should have set it by now. */
- Assert(qc->commandTag != CMDTAG_UNKNOWN);
- }
+ if (qc &&
+ qc->commandTag == CMDTAG_UNKNOWN &&
+ portal->qc.commandTag != CMDTAG_UNKNOWN)
+ CopyQueryCompletion(qc, &portal->qc);
}
/*
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index c8b6c15e059..82b807d067a 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -277,22 +277,16 @@ compareJsonbContainers(JsonbContainer *a, JsonbContainer *b)
else
{
/*
- * It's safe to assume that the types differed, and that the va
- * and vb values passed were set.
- *
- * If the two values were of the same container type, then there'd
- * have been a chance to observe the variation in the number of
- * elements/pairs (when processing WJB_BEGIN_OBJECT, say). They're
- * either two heterogeneously-typed containers, or a container and
- * some scalar type.
- *
- * We don't have to consider the WJB_END_ARRAY and WJB_END_OBJECT
- * cases here, because we would have seen the corresponding
- * WJB_BEGIN_ARRAY and WJB_BEGIN_OBJECT tokens first, and
- * concluded that they don't match.
+ * It's not possible for one iterator to report end of array or
+ * object while the other one reports something else, because we
+ * would have detected a length mismatch when we processed the
+ * container-start tokens above. Likewise we can't see WJB_DONE
+ * from one but not the other. So we have two different-type
+ * containers, or a container and some scalar type, or two
+ * different scalar types. Sort on the basis of the type code.
*/
- Assert(ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
- Assert(rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
+ Assert(ra != WJB_DONE && ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
+ Assert(rb != WJB_DONE && rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
Assert(va.type != vb.type);
Assert(va.type != jbvBinary);
@@ -852,15 +846,20 @@ JsonbIteratorInit(JsonbContainer *container)
* It is our job to expand the jbvBinary representation without bothering them
* with it. However, clients should not take it upon themselves to touch array
* or Object element/pair buffers, since their element/pair pointers are
- * garbage. Also, *val will not be set when returning WJB_END_ARRAY or
- * WJB_END_OBJECT, on the assumption that it's only useful to access values
- * when recursing in.
+ * garbage.
+ *
+ * *val is not meaningful when the result is WJB_DONE, WJB_END_ARRAY or
+ * WJB_END_OBJECT. However, we set val->type = jbvNull in those cases,
+ * so that callers may assume that val->type is always well-defined.
*/
JsonbIteratorToken
JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
if (*it == NULL)
+ {
+ val->type = jbvNull;
return WJB_DONE;
+ }
/*
* When stepping into a nested container, we jump back here to start
@@ -898,6 +897,7 @@ recurse:
* nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_ARRAY;
}
@@ -951,6 +951,7 @@ recurse:
* of nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_OBJECT;
}
else
@@ -995,8 +996,10 @@ recurse:
return WJB_VALUE;
}
- elog(ERROR, "invalid iterator state");
- return -1;
+ elog(ERROR, "invalid jsonb iterator state");
+ /* satisfy compilers that don't know that elog(ERROR) doesn't return */
+ val->type = jbvNull;
+ return WJB_DONE;
}
/*
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index c86ceefda94..641e535a73c 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -417,12 +417,11 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
datum = SysCacheGetAttrNotNull(DATABASEOID, tup, Anum_pg_database_datctype);
ctype = TextDatumGetCString(datum);
- if (pg_perm_setlocale(LC_COLLATE, collate) == NULL)
- ereport(FATAL,
- (errmsg("database locale is incompatible with operating system"),
- errdetail("The database was initialized with LC_COLLATE \"%s\", "
- " which is not recognized by setlocale().", collate),
- errhint("Recreate the database with another locale or install the missing locale.")));
+ /*
+ * Historcally, we set LC_COLLATE from datcollate, as well. That's no
+ * longer necessary because all collation behavior is handled through
+ * pg_locale_t.
+ */
if (pg_perm_setlocale(LC_CTYPE, ctype) == NULL)
ereport(FATAL,
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1937997ea67..c6226175528 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -12583,8 +12583,13 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
for (i = 0; i < tyinfo->nDomChecks; i++)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- PQExpBuffer conprefix = createPQExpBuffer();
+ PQExpBuffer conprefix;
+ /* but only if the constraint itself was dumped here */
+ if (domcheck->separate)
+ continue;
+
+ conprefix = createPQExpBuffer();
appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
fmtId(domcheck->dobj.name));
@@ -18488,6 +18493,22 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
.section = SECTION_POST_DATA,
.createStmt = q->data,
.dropStmt = delq->data));
+
+ if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+ char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(coninfo->dobj.name));
+
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ coninfo->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ free(qtypname);
+ }
}
}
else
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 3cbcad65c5f..100317b1aa9 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -632,7 +632,7 @@ main(int argc, char *argv[])
fprintf(OPF, "SET escape_string_warning = off;\n");
fprintf(OPF, "\n");
- if (!data_only)
+ if (!data_only && !statistics_only && !no_schema)
{
/*
* If asked to --clean, do that first. We can avoid detailed
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index a2e009ab9be..8c62729a0d1 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -748,7 +748,7 @@ void
print_copyright(void)
{
puts("PostgreSQL Database Management System\n"
- "(formerly known as Postgres, then as Postgres95)\n\n"
+ "(also known as Postgres, formerly known as Postgres95)\n\n"
"Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group\n\n"
"Portions Copyright (c) 1994, The Regents of the University of California\n\n"
"Permission to use, copy, modify, and distribute this software and its\n"
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 2a2b10d5a29..afa85d9fca9 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -7574,10 +7574,12 @@ PQport(const PGconn *conn)
if (!conn)
return NULL;
- if (conn->connhost != NULL)
+ if (conn->connhost != NULL &&
+ conn->connhost[conn->whichhost].port != NULL &&
+ conn->connhost[conn->whichhost].port[0] != '\0')
return conn->connhost[conn->whichhost].port;
- return "";
+ return DEF_PGPORT_STR;
}
/*
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index 70c28f2ffca..a701c25038a 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -357,7 +357,8 @@ typedef struct pg_conn_host
pg_conn_host_type type; /* type of host address */
char *host; /* host name or socket path */
char *hostaddr; /* host numeric IP address */
- char *port; /* port number (always provided) */
+ char *port; /* port number (if NULL or empty, use
+ * DEF_PGPORT[_STR]) */
char *password; /* password for this host, read from the
* password file; NULL if not sought or not
* found in password file. */
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index aa1d27bbed3..7d3d3d52b45 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -15,6 +15,7 @@ SUBDIRS = \
plsample \
spgist_name_ops \
test_aio \
+ test_binaryheap \
test_bloomfilter \
test_copy_callbacks \
test_custom_rmgrs \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index 9de0057bd1d..dd5cd065ba1 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -14,6 +14,7 @@ subdir('plsample')
subdir('spgist_name_ops')
subdir('ssl_passphrase_callback')
subdir('test_aio')
+subdir('test_binaryheap')
subdir('test_bloomfilter')
subdir('test_copy_callbacks')
subdir('test_custom_rmgrs')
diff --git a/src/test/modules/test_binaryheap/.gitignore b/src/test/modules/test_binaryheap/.gitignore
new file mode 100644
index 00000000000..5dcb3ff9723
--- /dev/null
+++ b/src/test/modules/test_binaryheap/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_binaryheap/Makefile b/src/test/modules/test_binaryheap/Makefile
new file mode 100644
index 00000000000..d310fbc9e88
--- /dev/null
+++ b/src/test/modules/test_binaryheap/Makefile
@@ -0,0 +1,24 @@
+# src/test/modules/test_binaryheap/Makefile
+
+MODULE_big = test_binaryheap
+OBJS = \
+ $(WIN32RES) \
+ test_binaryheap.o
+
+PGFILEDESC = "test_binaryheap - test code for binaryheap"
+
+EXTENSION = test_binaryheap
+DATA = test_binaryheap--1.0.sql
+
+REGRESS = test_binaryheap
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_binaryheap
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_binaryheap/expected/test_binaryheap.out b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
new file mode 100644
index 00000000000..16ce07875e3
--- /dev/null
+++ b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_binaryheap;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
+ test_binaryheap
+-----------------
+
+(1 row)
+
diff --git a/src/test/modules/test_binaryheap/meson.build b/src/test/modules/test_binaryheap/meson.build
new file mode 100644
index 00000000000..816a43c93e9
--- /dev/null
+++ b/src/test/modules/test_binaryheap/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_binaryheap_sources = files(
+ 'test_binaryheap.c',
+)
+
+if host_system == 'windows'
+ test_binaryheap_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_binaryheap',
+ '--FILEDESC', 'test_binaryheap - test code for binaryheap',])
+endif
+
+test_binaryheap = shared_module('test_binaryheap',
+ test_binaryheap_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += test_binaryheap
+
+test_install_data += files(
+ 'test_binaryheap.control',
+ 'test_binaryheap--1.0.sql',
+)
+
+tests += {
+ 'name': 'test_binaryheap',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'test_binaryheap',
+ ],
+ },
+}
diff --git a/src/test/modules/test_binaryheap/sql/test_binaryheap.sql b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
new file mode 100644
index 00000000000..8439545815b
--- /dev/null
+++ b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
@@ -0,0 +1,8 @@
+CREATE EXTENSION test_binaryheap;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
diff --git a/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
new file mode 100644
index 00000000000..cddceeee603
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
@@ -0,0 +1,7 @@
+/* src/test/modules/test_binaryheap/test_binaryheap--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_binaryheap" to load this file. \quit
+
+CREATE FUNCTION test_binaryheap() RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.c b/src/test/modules/test_binaryheap/test_binaryheap.c
new file mode 100644
index 00000000000..583dae1da30
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.c
@@ -0,0 +1,275 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_binaryheap.c
+ * Test correctness of binary heap implementation.
+ *
+ * Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_binaryheap/test_binaryheap.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "common/int.h"
+#include "common/pg_prng.h"
+#include "fmgr.h"
+#include "lib/binaryheap.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * Test binaryheap_comparator for max-heap of integers.
+ */
+static int
+int_cmp(Datum a, Datum b, void *arg)
+{
+ return pg_cmp_s32(DatumGetInt32(a), DatumGetInt32(b));
+}
+
+/*
+ * Loops through all nodes and returns the maximum value.
+ */
+static int
+get_max_from_heap(binaryheap *heap)
+{
+ int max = -1;
+
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ max = Max(max, DatumGetInt32(binaryheap_get_node(heap, i)));
+
+ return max;
+}
+
+/*
+ * Generate a random permutation of the integers 0..size-1.
+ */
+static int *
+get_permutation(int size)
+{
+ int *permutation = (int *) palloc(size * sizeof(int));
+
+ permutation[0] = 0;
+
+ /*
+ * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm.
+ * Notionally, we append each new value to the array and then swap it with
+ * a randomly-chosen array element (possibly including itself, else we
+ * fail to generate permutations with the last integer last). The swap
+ * step can be optimized by combining it with the insertion.
+ */
+ for (int i = 1; i < size; i++)
+ {
+ int j = pg_prng_uint64_range(&pg_global_prng_state, 0, i);
+
+ if (j < i) /* avoid fetching undefined data if j=i */
+ permutation[i] = permutation[j];
+ permutation[j] = i;
+ }
+
+ return permutation;
+}
+
+/*
+ * Ensure that the heap property holds for the given heap, i.e., each parent is
+ * greater than or equal to its children.
+ */
+static void
+verify_heap_property(binaryheap *heap)
+{
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ {
+ int left = 2 * i + 1;
+ int right = 2 * i + 2;
+ int parent_val = DatumGetInt32(binaryheap_get_node(heap, i));
+
+ if (left < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, left)))
+ elog(ERROR, "parent node less than left child");
+
+ if (right < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, right)))
+ elog(ERROR, "parent node less than right child");
+ }
+}
+
+/*
+ * Check correctness of basic operations.
+ */
+static void
+test_basic(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "new heap not empty");
+ if (binaryheap_size(heap) != 0)
+ elog(ERROR, "wrong size for new heap");
+
+ for (int i = 0; i < size; i++)
+ {
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_empty(heap))
+ elog(ERROR, "heap empty after adding values");
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after adding values");
+
+ if (DatumGetInt32(binaryheap_first(heap)) != get_max_from_heap(heap))
+ elog(ERROR, "incorrect root node after adding values");
+
+ for (int i = 0; i < size; i++)
+ {
+ int expected = get_max_from_heap(heap);
+ int actual = DatumGetInt32(binaryheap_remove_first(heap));
+
+ if (actual != expected)
+ elog(ERROR, "incorrect root node after removing root");
+ verify_heap_property(heap);
+ }
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after removing all nodes");
+}
+
+/*
+ * Test building heap after unordered additions.
+ */
+static void
+test_build(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add_unordered(heap, Int32GetDatum(permutation[i]));
+
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after unordered additions");
+
+ binaryheap_build(heap);
+ verify_heap_property(heap);
+}
+
+/*
+ * Test removing nodes.
+ */
+static void
+test_remove_node(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+ int remove_count = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+
+ for (int i = 0; i < remove_count; i++)
+ {
+ int idx = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, binaryheap_size(heap) - 1);
+
+ binaryheap_remove_node(heap, idx);
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_size(heap) != size - remove_count)
+ elog(ERROR, "wrong size after removing nodes");
+}
+
+/*
+ * Test replacing the root node.
+ */
+static void
+test_replace_first(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ /*
+ * Replace root with a value smaller than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(-1));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a value in the middle of the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size / 2));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a larger value than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size + 1));
+ verify_heap_property(heap);
+}
+
+/*
+ * Test duplicate values.
+ */
+static void
+test_duplicates(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int dup = pg_prng_uint64_range(&pg_global_prng_state, 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(dup));
+
+ for (int i = 0; i < size; i++)
+ {
+ if (DatumGetInt32(binaryheap_remove_first(heap)) != dup)
+ elog(ERROR, "unexpected value in heap with duplicates");
+ }
+}
+
+/*
+ * Test resetting.
+ */
+static void
+test_reset(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ binaryheap_reset(heap);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after resetting");
+}
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+PG_FUNCTION_INFO_V1(test_binaryheap);
+
+Datum
+test_binaryheap(PG_FUNCTION_ARGS)
+{
+ static const int test_sizes[] = {1, 2, 3, 10, 100, 1000};
+
+ for (int i = 0; i < sizeof(test_sizes) / sizeof(int); i++)
+ {
+ int size = test_sizes[i];
+
+ test_basic(size);
+ test_build(size);
+ test_remove_node(size);
+ test_replace_first(size);
+ test_duplicates(size);
+ test_reset(size);
+ }
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.control b/src/test/modules/test_binaryheap/test_binaryheap.control
new file mode 100644
index 00000000000..dd0785e05bd
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.control
@@ -0,0 +1,5 @@
+# test_binaryheap extension
+comment = 'Test code for binaryheap'
+default_version = '1.0'
+module_pathname = '$libdir/test_binaryheap'
+relocatable = true
diff --git a/src/test/regress/expected/compression.out b/src/test/regress/expected/compression.out
index 4dd9ee7200d..09f198149aa 100644
--- a/src/test/regress/expected/compression.out
+++ b/src/test/regress/expected/compression.out
@@ -1,3 +1,7 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
SET default_toast_compression = 'pglz';
@@ -6,21 +10,13 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
- Table "public.cmdata"
+ Table "pglz.cmdata"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | pglz | |
Indexes:
"idx" btree (f1)
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
- Table "public.cmdata1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
@@ -28,12 +24,6 @@ SELECT pg_column_compression(f1) FROM cmdata;
pglz
(1 row)
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
substr
@@ -41,16 +31,10 @@ SELECT SUBSTR(f1, 200, 5) FROM cmdata;
01234
(1 row)
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- substr
-----------------------------------------------------
- 01234567890123456789012345678901234567890123456789
-(1 row)
-
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
- Table "public.cmmove1"
+ Table "pglz.cmmove1"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | | |
@@ -61,45 +45,9 @@ SELECT pg_column_compression(f1) FROM cmmove1;
pglz
(1 row)
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-DROP TABLE cmdata2;
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
@@ -111,21 +59,6 @@ SELECT pg_column_compression(f1) FROM cmdata2;
pglz
(1 row)
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- substr
---------
- 01234
- 79026
-(2 rows)
-
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
substr
--------
@@ -136,21 +69,21 @@ DROP TABLE cmdata2;
--test column type update varlena/non-varlena
CREATE TABLE cmdata2 (f1 int);
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
@@ -160,14 +93,14 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | pglz | |
ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | pglz | |
@@ -179,164 +112,47 @@ SELECT pg_column_compression(f1) FROM cmdata2;
(1 row)
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT pg_column_compression(x) FROM compressmv;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-NOTICE: merging multiple inherited definitions of column "f1"
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
NOTICE: merging multiple inherited definitions of column "f1"
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz, lz4.
SET default_toast_compression = 'I do not exist compression';
ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz, lz4.
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | | |
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | lz4 | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
- pglz
-(2 rows)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-- check data is ok
SELECT length(f1) FROM cmdata;
length
--------
10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
- length
---------
- 10040
- 12449
-(2 rows)
+(1 row)
SELECT length(f1) FROM cmmove1;
length
@@ -344,19 +160,6 @@ SELECT length(f1) FROM cmmove1;
10000
(1 row)
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
- 10040
-(2 rows)
-
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
ERROR: invalid compression method "i_do_not_exist_compression"
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/expected/compression_1.out b/src/test/regress/expected/compression_1.out
deleted file mode 100644
index 7bd7642b4b9..00000000000
--- a/src/test/regress/expected/compression_1.out
+++ /dev/null
@@ -1,360 +0,0 @@
-\set HIDE_TOAST_COMPRESSION false
--- ensure we get stable results regardless of installation's default
-SET default_toast_compression = 'pglz';
--- test creating table with compression method
-CREATE TABLE cmdata(f1 text COMPRESSION pglz);
-CREATE INDEX idx ON cmdata(f1);
-INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
- ^
-\d+ cmdata1
--- verify stored compression method in the data
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
--- decompress data slice
-SELECT SUBSTR(f1, 200, 5) FROM cmdata;
- substr
---------
- 01234
-(1 row)
-
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- ^
--- copy with table creation
-SELECT * INTO cmmove1 FROM cmdata;
-\d+ cmmove1
- Table "public.cmmove1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | | |
-
-SELECT pg_column_compression(f1) FROM cmmove1;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmmove3 SELECT * FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-ERROR: relation "cmdata1" does not exist
-LINE 1: CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
- ^
-\d+ cmdata2
-DROP TABLE cmdata2;
-ERROR: table "cmdata2" does not exist
--- try setting compression for incompressible data type
-CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
-ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test externally stored compressed data
-CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
-'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
-CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
-INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
- ^
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
- substr
---------
- 79026
-(1 row)
-
-DROP TABLE cmdata2;
---test column type update varlena/non-varlena
-CREATE TABLE cmdata2 (f1 int);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
---changing column storage should not impact the compression method
---but the data should not be compressed
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | pglz | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | pglz | |
-
-INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
-
-(1 row)
-
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: ...TE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
- ^
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT pg_column_compression(x) FROM compressmv;
-ERROR: relation "compressmv" does not exist
-LINE 1: SELECT pg_column_compression(x) FROM compressmv;
- ^
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-ERROR: relation "cmpart" does not exist
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-ERROR: relation "cmpart" does not exist
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-ERROR: relation "cmdata1" does not exist
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cmdata3(f1 text);
-CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-NOTICE: merging multiple inherited definitions of column "f1"
--- test default_toast_compression GUC
-SET default_toast_compression = '';
-ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz.
-SET default_toast_compression = 'I do not exist compression';
-ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz.
-SET default_toast_compression = 'lz4';
-ERROR: invalid value for parameter "default_toast_compression": "lz4"
-HINT: Available values: pglz.
-SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | | |
-
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-ERROR: relation "compressmv" does not exist
-\d+ compressmv
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ERROR: relation "cmpart1" does not exist
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- VACUUM FULL does not recompress
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-VACUUM FULL cmdata;
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-ERROR: relation "cmdata2" does not exist
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-ERROR: relation "cmdata2" does not exist
-LINE 1: INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEX...
- ^
--- check data is ok
-SELECT length(f1) FROM cmdata;
- length
---------
- 10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT length(f1) FROM cmdata1;
- ^
-SELECT length(f1) FROM cmmove1;
- length
---------
- 10000
-(1 row)
-
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
-(1 row)
-
-CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-CREATE TABLE badcompresstbl (a text);
-ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-DROP TABLE badcompresstbl;
-\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4.out b/src/test/regress/expected/compression_lz4.out
new file mode 100644
index 00000000000..068dd7c3674
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4.out
@@ -0,0 +1,249 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+\set HIDE_TOAST_COMPRESSION false
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+ substr
+--------
+ 01234
+(1 row)
+
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+ substr
+----------------------------------------------------
+ 01234567890123456789012345678901234567890123456789
+(1 row)
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+ Table "lz4.cmmove1"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | | |
+
+SELECT pg_column_compression(f1) FROM cmmove1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+ Table "lz4.cmdata2"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | lz4 | |
+
+DROP TABLE cmdata2;
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+ substr
+--------
+ 79026
+(1 row)
+
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(x) FROM compressmv;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+NOTICE: merging multiple inherited definitions of column "f1"
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+NOTICE: merging column "f1" with inherited definition
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+NOTICE: merging multiple inherited definitions of column "f1"
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | lz4 | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+ pglz
+(2 rows)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+ length
+--------
+ 10000
+ 36036
+(2 rows)
+
+SELECT length(f1) FROM cmdata_lz4;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove1;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove2;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove3;
+ length
+--------
+ 10000
+ 10040
+(2 rows)
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4_1.out b/src/test/regress/expected/compression_lz4_1.out
new file mode 100644
index 00000000000..198056fa224
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4_1.out
@@ -0,0 +1,7 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+*** skipping TOAST tests with lz4 (not supported) ***
+ \quit
diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out
index ccea883cffd..3590d3274f0 100644
--- a/src/test/regress/expected/constraints.out
+++ b/src/test/regress/expected/constraints.out
@@ -1701,3 +1701,7 @@ DROP TABLE constraint_comments_tbl;
DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index a424be2a6bf..fbffc67ae60 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -123,7 +123,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# The stats test resets stats, so nothing else needing stats access can be in
# this group.
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize stats predicate numa
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression compression_lz4 memoize stats predicate numa
# event_trigger depends on create_am and cannot run concurrently with
# any test that runs DDL
diff --git a/src/test/regress/sql/compression.sql b/src/test/regress/sql/compression.sql
index 490595fcfb2..ce5ea37a660 100644
--- a/src/test/regress/sql/compression.sql
+++ b/src/test/regress/sql/compression.sql
@@ -1,3 +1,8 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
@@ -8,53 +13,27 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
-SELECT pg_column_compression(f1) FROM cmdata1;
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
SELECT pg_column_compression(f1) FROM cmmove1;
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
-DROP TABLE cmdata2;
-
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
SELECT pg_column_compression(f1) FROM cmdata2;
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
DROP TABLE cmdata2;
@@ -76,76 +55,31 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
SELECT pg_column_compression(f1) FROM cmdata2;
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT pg_column_compression(x) FROM compressmv;
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
SET default_toast_compression = 'I do not exist compression';
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
-
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
-SELECT pg_column_compression(f1) FROM cmdata;
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-
-- check data is ok
SELECT length(f1) FROM cmdata;
-SELECT length(f1) FROM cmdata1;
SELECT length(f1) FROM cmmove1;
-SELECT length(f1) FROM cmmove2;
-SELECT length(f1) FROM cmmove3;
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/sql/compression_lz4.sql b/src/test/regress/sql/compression_lz4.sql
new file mode 100644
index 00000000000..3849f8618de
--- /dev/null
+++ b/src/test/regress/sql/compression_lz4.sql
@@ -0,0 +1,129 @@
+-- Tests for TOAST compression with lz4
+
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+
+\set HIDE_TOAST_COMPRESSION false
+
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+SELECT pg_column_compression(f1) FROM cmmove1;
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+DROP TABLE cmdata2;
+
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+SELECT pg_column_compression(x) FROM compressmv;
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+SELECT length(f1) FROM cmdata_lz4;
+SELECT length(f1) FROM cmmove1;
+SELECT length(f1) FROM cmmove2;
+SELECT length(f1) FROM cmmove3;
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql
index 7487723ab84..1f6dc8fd69f 100644
--- a/src/test/regress/sql/constraints.sql
+++ b/src/test/regress/sql/constraints.sql
@@ -1043,3 +1043,9 @@ DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';