summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/backend/access/brin/brin.c4
-rw-r--r--src/backend/access/brin/brin_bloom.c2
-rw-r--r--src/backend/access/brin/brin_minmax.c10
-rw-r--r--src/backend/access/brin/brin_minmax_multi.c6
-rw-r--r--src/backend/access/common/heaptuple.c2
-rw-r--r--src/backend/access/common/reloptions.c10
-rw-r--r--src/backend/access/common/toast_internals.c4
-rw-r--r--src/backend/backup/basebackup_copy.c14
-rw-r--r--src/backend/catalog/objectaddress.c4
-rw-r--r--src/backend/catalog/pg_aggregate.c2
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/catalog/pg_conversion.c2
-rw-r--r--src/backend/catalog/pg_namespace.c2
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c2
-rw-r--r--src/backend/catalog/pg_publication.c2
-rw-r--r--src/backend/catalog/pg_shdepend.c12
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/event_trigger.c4
-rw-r--r--src/backend/commands/subscriptioncmds.c8
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/commands/trigger.c32
-rw-r--r--src/backend/commands/tsearchcmds.c8
-rw-r--r--src/backend/commands/user.c6
-rw-r--r--src/backend/executor/execExprInterp.c8
-rw-r--r--src/backend/executor/execMain.c3
-rw-r--r--src/backend/executor/spi.c2
-rw-r--r--src/backend/nodes/readfuncs.c2
-rw-r--r--src/backend/optimizer/plan/createplan.c24
-rw-r--r--src/backend/optimizer/plan/planner.c33
-rw-r--r--src/backend/optimizer/util/plancat.c54
-rw-r--r--src/backend/postmaster/checkpointer.c9
-rw-r--r--src/backend/replication/logical/applyparallelworker.c4
-rw-r--r--src/backend/replication/logical/worker.c10
-rw-r--r--src/backend/replication/walreceiver.c8
-rw-r--r--src/backend/replication/walsender.c25
-rw-r--r--src/backend/rewrite/rewriteDefine.c5
-rw-r--r--src/backend/statistics/attribute_stats.c6
-rw-r--r--src/backend/statistics/extended_stats.c110
-rw-r--r--src/backend/storage/aio/aio_funcs.c2
-rw-r--r--src/backend/storage/aio/read_stream.c34
-rw-r--r--src/backend/storage/buffer/bufmgr.c9
-rw-r--r--src/backend/storage/ipc/shmem.c2
-rw-r--r--src/backend/storage/large_object/inv_api.c4
-rw-r--r--src/backend/storage/lmgr/lock.c4
-rw-r--r--src/backend/tsearch/ts_parse.c4
-rw-r--r--src/backend/utils/activity/pgstat.c2
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c5
-rw-r--r--src/backend/utils/adt/datum.c6
-rw-r--r--src/backend/utils/adt/json.c2
-rw-r--r--src/backend/utils/adt/jsonfuncs.c6
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c2
-rw-r--r--src/backend/utils/adt/lockfuncs.c8
-rw-r--r--src/backend/utils/adt/multirangetypes.c19
-rw-r--r--src/backend/utils/adt/numeric.c502
-rw-r--r--src/backend/utils/adt/rangetypes.c16
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c2
-rw-r--r--src/backend/utils/adt/rowtypes.c4
-rw-r--r--src/backend/utils/adt/selfuncs.c486
-rw-r--r--src/backend/utils/adt/timestamp.c2
-rw-r--r--src/backend/utils/adt/varlena.c5
-rw-r--r--src/backend/utils/adt/waitfuncs.c2
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c2
-rw-r--r--src/backend/utils/cache/relcache.c4
-rw-r--r--src/backend/utils/cache/syscache.c6
-rw-r--r--src/backend/utils/error/csvlog.c2
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/error/jsonlog.c2
-rw-r--r--src/backend/utils/sort/sortsupport.c2
-rw-r--r--src/backend/utils/sort/tuplesortvariants.c6
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c9
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c11
-rw-r--r--src/bin/pg_basebackup/receivelog.c11
-rw-r--r--src/bin/pg_combinebackup/t/002_compare_backups.pl2
-rw-r--r--src/bin/pg_dump/dumputils.c75
-rw-r--r--src/bin/pg_dump/dumputils.h4
-rw-r--r--src/bin/pg_dump/filter.c13
-rw-r--r--src/bin/pg_dump/pg_backup.h4
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c69
-rw-r--r--src/bin/pg_dump/pg_dump.c26
-rw-r--r--src/bin/pg_dump/pg_dumpall.c49
-rw-r--r--src/bin/pg_dump/pg_restore.c23
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl47
-rw-r--r--src/bin/pg_dump/t/003_pg_dump_with_server.pl16
-rw-r--r--src/bin/pg_dump/t/005_pg_dump_filterfile.pl14
-rw-r--r--src/bin/pg_upgrade/check.c2
-rw-r--r--src/bin/pg_upgrade/t/002_pg_upgrade.pl3
-rw-r--r--src/bin/psql/command.c94
-rw-r--r--src/bin/psql/help.c4
-rw-r--r--src/bin/psql/t/001_basic.pl7
-rw-r--r--src/bin/psql/tab-complete.in.c4
-rw-r--r--src/include/access/htup_details.h2
-rw-r--r--src/include/access/itup.h2
-rw-r--r--src/include/access/reloptions.h2
-rw-r--r--src/include/c.h2
-rw-r--r--src/include/common/int128.h433
-rw-r--r--src/include/executor/executor.h1
-rw-r--r--src/include/libpq/protocol.h21
-rw-r--r--src/include/optimizer/plancat.h2
-rw-r--r--src/include/utils/pg_locale.h2
-rw-r--r--src/include/utils/selfuncs.h4
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c6
-rw-r--r--src/interfaces/libpq-oauth/oauth-curl.c219
-rw-r--r--src/pl/plperl/plperl.c10
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/oauth_validator/t/001_server.pl31
-rw-r--r--src/test/modules/test_int128/.gitignore2
-rw-r--r--src/test/modules/test_int128/Makefile23
-rw-r--r--src/test/modules/test_int128/meson.build33
-rw-r--r--src/test/modules/test_int128/t/001_test_int128.pl27
-rw-r--r--src/test/modules/test_int128/test_int128.c281
-rw-r--r--src/test/modules/test_radixtree/test_radixtree.c2
-rw-r--r--src/test/recovery/t/027_stream_regress.pl4
-rw-r--r--src/test/regress/expected/aggregates.out19
-rw-r--r--src/test/regress/expected/privileges.out13
-rw-r--r--src/test/regress/expected/psql.out2
-rw-r--r--src/test/regress/expected/rowsecurity.out73
-rw-r--r--src/test/regress/expected/stats_ext.out71
-rw-r--r--src/test/regress/expected/strings.out34
-rw-r--r--src/test/regress/expected/timestamp.out10
-rw-r--r--src/test/regress/expected/timestamptz.out18
-rw-r--r--src/test/regress/expected/triggers.out8
-rw-r--r--src/test/regress/regress.c2
-rw-r--r--src/test/regress/sql/aggregates.sql5
-rw-r--r--src/test/regress/sql/privileges.sql12
-rw-r--r--src/test/regress/sql/psql.sql2
-rw-r--r--src/test/regress/sql/rowsecurity.sql51
-rw-r--r--src/test/regress/sql/stats_ext.sql45
-rw-r--r--src/test/regress/sql/strings.sql20
-rw-r--r--src/test/regress/sql/timestamp.sql4
-rw-r--r--src/test/regress/sql/timestamptz.sql7
-rw-r--r--src/test/regress/sql/triggers.sql10
-rw-r--r--src/tools/testint128.c170
136 files changed, 2494 insertions, 1289 deletions
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 4204088fa0d..7ff7467e462 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -1608,7 +1608,7 @@ brin_build_desc(Relation rel)
opcInfoFn = index_getprocinfo(rel, keyno + 1, BRIN_PROCNUM_OPCINFO);
opcinfo[keyno] = (BrinOpcInfo *)
- DatumGetPointer(FunctionCall1(opcInfoFn, attr->atttypid));
+ DatumGetPointer(FunctionCall1(opcInfoFn, ObjectIdGetDatum(attr->atttypid)));
totalstored += opcinfo[keyno]->oi_nstored;
}
@@ -2262,7 +2262,7 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
PointerGetDatum(bdesc),
PointerGetDatum(bval),
values[keyno],
- nulls[keyno]);
+ BoolGetDatum(nulls[keyno]));
/* if that returned true, we need to insert the updated tuple */
modified |= DatumGetBool(result);
diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c
index 82b425ce37d..7c3f7d454fc 100644
--- a/src/backend/access/brin/brin_bloom.c
+++ b/src/backend/access/brin/brin_bloom.c
@@ -540,7 +540,7 @@ brin_bloom_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
BloomOptions *opts = (BloomOptions *) PG_GET_OPCLASS_OPTIONS();
Oid colloid = PG_GET_COLLATION();
FmgrInfo *hashFn;
diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c
index d21ab3a668c..79c5a0aa185 100644
--- a/src/backend/access/brin/brin_minmax.c
+++ b/src/backend/access/brin/brin_minmax.c
@@ -66,7 +66,7 @@ brin_minmax_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
Oid colloid = PG_GET_COLLATION();
FmgrInfo *cmpFn;
Datum compar;
@@ -225,8 +225,8 @@ brin_minmax_union(PG_FUNCTION_ARGS)
/* Adjust minimum, if B's min is less than A's min */
finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid,
BTLessStrategyNumber);
- needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[0],
- col_a->bv_values[0]);
+ needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[0],
+ col_a->bv_values[0]));
if (needsadj)
{
if (!attr->attbyval)
@@ -238,8 +238,8 @@ brin_minmax_union(PG_FUNCTION_ARGS)
/* Adjust maximum, if B's max is greater than A's max */
finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid,
BTGreaterStrategyNumber);
- needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[1],
- col_a->bv_values[1]);
+ needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[1],
+ col_a->bv_values[1]));
if (needsadj)
{
if (!attr->attbyval)
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c
index a5a414182ca..c87f1b9cd7e 100644
--- a/src/backend/access/brin/brin_minmax_multi.c
+++ b/src/backend/access/brin/brin_minmax_multi.c
@@ -1992,8 +1992,8 @@ brin_minmax_multi_distance_tid(PG_FUNCTION_ARGS)
double da1,
da2;
- ItemPointer pa1 = (ItemPointer) PG_GETARG_DATUM(0);
- ItemPointer pa2 = (ItemPointer) PG_GETARG_DATUM(1);
+ ItemPointer pa1 = (ItemPointer) PG_GETARG_POINTER(0);
+ ItemPointer pa2 = (ItemPointer) PG_GETARG_POINTER(1);
/*
* We know the values are range boundaries, but the range may be collapsed
@@ -2414,7 +2414,7 @@ brin_minmax_multi_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
MinMaxMultiOptions *opts = (MinMaxMultiOptions *) PG_GET_OPCLASS_OPTIONS();
Oid colloid = PG_GET_COLLATION();
bool modified = false;
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index a410b5eb99b..1173a6d81b5 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -105,7 +105,7 @@ missing_hash(const void *key, Size keysize)
{
const missing_cache_key *entry = (missing_cache_key *) key;
- return hash_bytes((const unsigned char *) entry->value, entry->len);
+ return hash_bytes((const unsigned char *) DatumGetPointer(entry->value), entry->len);
}
static int
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 594a657ea1a..0af3fea68fa 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1164,7 +1164,7 @@ add_local_string_reloption(local_relopts *relopts, const char *name,
* but we declare them as Datums to avoid including array.h in reloptions.h.
*/
Datum
-transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
+transformRelOptions(Datum oldOptions, List *defList, const char *nameSpace,
const char *const validnsps[], bool acceptOidsOff, bool isReset)
{
Datum result;
@@ -1200,14 +1200,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
int kw_len;
/* ignore if not in the same namespace */
- if (namspace == NULL)
+ if (nameSpace == NULL)
{
if (def->defnamespace != NULL)
continue;
}
else if (def->defnamespace == NULL)
continue;
- else if (strcmp(def->defnamespace, namspace) != 0)
+ else if (strcmp(def->defnamespace, nameSpace) != 0)
continue;
kw_len = strlen(def->defname);
@@ -1277,14 +1277,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
}
/* ignore if not in the same namespace */
- if (namspace == NULL)
+ if (nameSpace == NULL)
{
if (def->defnamespace != NULL)
continue;
}
else if (def->defnamespace == NULL)
continue;
- else if (strcmp(def->defnamespace, namspace) != 0)
+ else if (strcmp(def->defnamespace, nameSpace) != 0)
continue;
/*
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c
index 196e06115e9..a1d0eed8953 100644
--- a/src/backend/access/common/toast_internals.c
+++ b/src/backend/access/common/toast_internals.c
@@ -64,11 +64,11 @@ toast_compress_datum(Datum value, char cmethod)
switch (cmethod)
{
case TOAST_PGLZ_COMPRESSION:
- tmp = pglz_compress_datum((const struct varlena *) value);
+ tmp = pglz_compress_datum((const struct varlena *) DatumGetPointer(value));
cmid = TOAST_PGLZ_COMPRESSION_ID;
break;
case TOAST_LZ4_COMPRESSION:
- tmp = lz4_compress_datum((const struct varlena *) value);
+ tmp = lz4_compress_datum((const struct varlena *) DatumGetPointer(value));
cmid = TOAST_LZ4_COMPRESSION_ID;
break;
default:
diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c
index 18b0b5a52d3..eb45d3bcb66 100644
--- a/src/backend/backup/basebackup_copy.c
+++ b/src/backend/backup/basebackup_copy.c
@@ -143,7 +143,7 @@ bbsink_copystream_begin_backup(bbsink *sink)
buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF);
mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1);
mysink->base.bbs_buffer = buf + MAXIMUM_ALIGNOF;
- mysink->msgbuffer[0] = 'd'; /* archive or manifest data */
+ mysink->msgbuffer[0] = PqMsg_CopyData; /* archive or manifest data */
/* Tell client the backup start location. */
SendXlogRecPtrResult(state->startptr, state->starttli);
@@ -170,7 +170,7 @@ bbsink_copystream_begin_archive(bbsink *sink, const char *archive_name)
ti = list_nth(state->tablespaces, state->tablespace_num);
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'n'); /* New archive */
+ pq_sendbyte(&buf, PqBackupMsg_NewArchive);
pq_sendstring(&buf, archive_name);
pq_sendstring(&buf, ti->path == NULL ? "" : ti->path);
pq_endmessage(&buf);
@@ -191,7 +191,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len)
if (mysink->send_to_client)
{
/* Add one because we're also sending a leading type byte. */
- pq_putmessage('d', mysink->msgbuffer, len + 1);
+ pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1);
}
/* Consider whether to send a progress report to the client. */
@@ -221,7 +221,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len)
mysink->last_progress_report_time = now;
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'p'); /* Progress report */
+ pq_sendbyte(&buf, PqBackupMsg_ProgressReport);
pq_sendint64(&buf, state->bytes_done);
pq_endmessage(&buf);
pq_flush_if_writable();
@@ -247,7 +247,7 @@ bbsink_copystream_end_archive(bbsink *sink)
mysink->bytes_done_at_last_time_check = state->bytes_done;
mysink->last_progress_report_time = GetCurrentTimestamp();
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'p'); /* Progress report */
+ pq_sendbyte(&buf, PqBackupMsg_ProgressReport);
pq_sendint64(&buf, state->bytes_done);
pq_endmessage(&buf);
pq_flush_if_writable();
@@ -262,7 +262,7 @@ bbsink_copystream_begin_manifest(bbsink *sink)
StringInfoData buf;
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'm'); /* Manifest */
+ pq_sendbyte(&buf, PqBackupMsg_Manifest);
pq_endmessage(&buf);
}
@@ -277,7 +277,7 @@ bbsink_copystream_manifest_contents(bbsink *sink, size_t len)
if (mysink->send_to_client)
{
/* Add one because we're also sending a leading type byte. */
- pq_putmessage('d', mysink->msgbuffer, len + 1);
+ pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1);
}
}
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index b63fd57dc04..0102c9984e7 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -4283,8 +4283,8 @@ pg_identify_object(PG_FUNCTION_ARGS)
nspAttnum = get_object_attnum_namespace(address.classId);
if (nspAttnum != InvalidAttrNumber)
{
- schema_oid = heap_getattr(objtup, nspAttnum,
- RelationGetDescr(catalog), &isnull);
+ schema_oid = DatumGetObjectId(heap_getattr(objtup, nspAttnum,
+ RelationGetDescr(catalog), &isnull));
if (isnull)
elog(ERROR, "invalid null namespace in object %u/%u/%d",
address.classId, address.objectId, address.objectSubId);
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index a05f8a87c1f..c62e8acd413 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -654,7 +654,7 @@ AggregateCreate(const char *aggName,
for (i = 0; i < Natts_pg_aggregate; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
replaces[i] = true;
}
values[Anum_pg_aggregate_aggfnoid - 1] = ObjectIdGetDatum(procOid);
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 2d5ac1ea813..6002fd0002f 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -179,7 +179,7 @@ CreateConstraintEntry(const char *constraintName,
for (i = 0; i < Natts_pg_constraint; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
conOid = GetNewOidWithIndex(conDesc, ConstraintOidIndexId,
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 04cc375caea..090f680d190 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -87,7 +87,7 @@ ConversionCreate(const char *conname, Oid connamespace,
for (i = 0; i < Natts_pg_conversion; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
/* form a tuple */
diff --git a/src/backend/catalog/pg_namespace.c b/src/backend/catalog/pg_namespace.c
index 6f5634a4de6..616bcc78521 100644
--- a/src/backend/catalog/pg_namespace.c
+++ b/src/backend/catalog/pg_namespace.c
@@ -76,7 +76,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp)
for (i = 0; i < Natts_pg_namespace; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
nspoid = GetNewOidWithIndex(nspdesc, NamespaceOidIndexId,
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index bfcfa643464..44d2ccb6788 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -225,7 +225,7 @@ OperatorShellMake(const char *operatorName,
for (i = 0; i < Natts_pg_operator; ++i)
{
nulls[i] = false;
- values[i] = (Datum) NULL; /* redundant, but safe */
+ values[i] = (Datum) 0; /* redundant, but safe */
}
/*
@@ -453,7 +453,7 @@ OperatorCreate(const char *operatorName,
for (i = 0; i < Natts_pg_operator; ++i)
{
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
replaces[i] = true;
nulls[i] = false;
}
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 5fdcf24d5f8..75b17fed15e 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -1212,6 +1212,6 @@ oid_array_to_list(Datum datum)
deconstruct_array_builtin(array, OIDOID, &values, NULL, &nelems);
for (i = 0; i < nelems; i++)
- result = lappend_oid(result, values[i]);
+ result = lappend_oid(result, DatumGetObjectId(values[i]));
return result;
}
diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c
index d6f94db5d99..b911efcf9cb 100644
--- a/src/backend/catalog/pg_publication.c
+++ b/src/backend/catalog/pg_publication.c
@@ -1001,7 +1001,7 @@ GetSchemaPublicationRelations(Oid schemaid, PublicationPartOpt pub_partopt)
ScanKeyInit(&key[0],
Anum_pg_class_relnamespace,
BTEqualStrategyNumber, F_OIDEQ,
- schemaid);
+ ObjectIdGetDatum(schemaid));
/* get all the relations present in the specified schema */
scan = table_beginscan_catalog(classRel, 1, key);
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 536191284e8..32e544da28a 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -956,12 +956,12 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId)
shdep = (Form_pg_shdepend) GETSTRUCT(tup);
slot[slot_stored_count]->tts_values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId);
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = shdep->classid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = shdep->objid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = shdep->objsubid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = shdep->refclassid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = shdep->refobjid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = shdep->deptype;
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = ObjectIdGetDatum(shdep->classid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = ObjectIdGetDatum(shdep->objid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = Int32GetDatum(shdep->objsubid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = ObjectIdGetDatum(shdep->refclassid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = ObjectIdGetDatum(shdep->refobjid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(shdep->deptype);
ExecStoreVirtualTuple(slot[slot_stored_count]);
slot_stored_count++;
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index b36f81afb9d..1ec523ee3e5 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -80,7 +80,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
for (i = 0; i < Natts_pg_type; ++i)
{
nulls[i] = false;
- values[i] = (Datum) NULL; /* redundant, but safe */
+ values[i] = (Datum) 0; /* redundant, but safe */
}
/*
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index c801c869c1c..cb75e11fced 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -220,7 +220,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
Assert(!isnull);
ownerId = DatumGetObjectId(datum);
- if (!has_privs_of_role(GetUserId(), DatumGetObjectId(ownerId)))
+ if (!has_privs_of_role(GetUserId(), ownerId))
aclcheck_error(ACLCHECK_NOT_OWNER, get_object_type(classId, objectId),
old_name);
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index edc2c988e29..631fb0525f1 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -2021,8 +2021,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
elog(ERROR, "cache lookup failed for object %u/%u",
addr.classId, addr.objectId);
schema_oid =
- heap_getattr(objtup, nspAttnum,
- RelationGetDescr(catalog), &isnull);
+ DatumGetObjectId(heap_getattr(objtup, nspAttnum,
+ RelationGetDescr(catalog), &isnull));
if (isnull)
elog(ERROR,
"invalid null namespace in object %u/%u/%d",
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index cd6c3684482..faa3650d287 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -638,7 +638,7 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
/* Check if name is used */
subid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(stmt->subname));
+ ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(stmt->subname));
if (OidIsValid(subid))
{
ereport(ERROR,
@@ -1185,7 +1185,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
rel = table_open(SubscriptionRelationId, RowExclusiveLock);
/* Fetch the existing tuple. */
- tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(stmt->subname));
if (!HeapTupleIsValid(tup))
@@ -1808,7 +1808,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
*/
rel = table_open(SubscriptionRelationId, AccessExclusiveLock);
- tup = SearchSysCache2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCache2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(stmt->subname));
if (!HeapTupleIsValid(tup))
@@ -2193,7 +2193,7 @@ AlterSubscriptionOwner(const char *name, Oid newOwnerId)
rel = table_open(SubscriptionRelationId, RowExclusiveLock);
- tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(name));
if (!HeapTupleIsValid(tup))
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index cb811520c29..c6dd2e020da 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8985,7 +8985,7 @@ ATExecSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newVa
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
if (!newtarget_default)
- repl_val[Anum_pg_attribute_attstattarget - 1] = newtarget;
+ repl_val[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(newtarget);
else
repl_null[Anum_pg_attribute_attstattarget - 1] = true;
repl_repl[Anum_pg_attribute_attstattarget - 1] = true;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 7dc121f73f1..235533ac17f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -872,7 +872,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
- values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
+ values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(trigger_fires_when);
values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
@@ -2285,6 +2285,8 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
{
Trigger *trigger = &trigdesc->triggers[i];
+ if (!TRIGGER_FOR_ROW(trigger->tgtype))
+ continue;
if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
return trigger->tgname;
}
@@ -2545,6 +2547,15 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ transition_capture->tcs_insert_new_table)
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_insert_after_row) ||
(transition_capture && transition_capture->tcs_insert_new_table))
AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
@@ -2797,6 +2808,15 @@ ExecARDeleteTriggers(EState *estate,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ transition_capture->tcs_delete_old_table)
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_delete_after_row) ||
(transition_capture && transition_capture->tcs_delete_old_table))
{
@@ -3134,6 +3154,16 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ (transition_capture->tcs_update_old_table ||
+ transition_capture->tcs_update_new_table))
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_update_after_row) ||
(transition_capture &&
(transition_capture->tcs_update_old_table ||
diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c
index ab16d42ad56..dc7df736fb8 100644
--- a/src/backend/commands/tsearchcmds.c
+++ b/src/backend/commands/tsearchcmds.c
@@ -1058,10 +1058,10 @@ DefineTSConfiguration(List *names, List *parameters, ObjectAddress *copied)
memset(slot[slot_stored_count]->tts_isnull, false,
slot[slot_stored_count]->tts_tupleDescriptor->natts * sizeof(bool));
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = cfgOid;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = cfgmap->maptokentype;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = cfgmap->mapseqno;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = cfgmap->mapdict;
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = ObjectIdGetDatum(cfgOid);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = Int32GetDatum(cfgmap->maptokentype);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = Int32GetDatum(cfgmap->mapseqno);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = ObjectIdGetDatum(cfgmap->mapdict);
ExecStoreVirtualTuple(slot[slot_stored_count]);
slot_stored_count++;
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 0d638e29d00..1e3d4ab0e20 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -1924,7 +1924,7 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
*/
if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0)
new_record[Anum_pg_auth_members_inherit_option - 1] =
- popt->inherit;
+ BoolGetDatum(popt->inherit);
else
{
HeapTuple mrtup;
@@ -1935,14 +1935,14 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
elog(ERROR, "cache lookup failed for role %u", memberid);
mrform = (Form_pg_authid) GETSTRUCT(mrtup);
new_record[Anum_pg_auth_members_inherit_option - 1] =
- mrform->rolinherit;
+ BoolGetDatum(mrform->rolinherit);
ReleaseSysCache(mrtup);
}
/* get an OID for the new row and insert it */
objectId = GetNewOidWithIndex(pg_authmem_rel, AuthMemOidIndexId,
Anum_pg_auth_members_oid);
- new_record[Anum_pg_auth_members_oid - 1] = objectId;
+ new_record[Anum_pg_auth_members_oid - 1] = ObjectIdGetDatum(objectId);
tuple = heap_form_tuple(pg_authmem_dsc,
new_record, new_record_nulls);
CatalogTupleInsert(pg_authmem_rel, tuple);
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 1a37737d4a2..0e1a74976f7 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -2815,7 +2815,7 @@ ExecJustHashVarImpl(ExprState *state, TupleTableSlot *slot, bool *isnull)
*isnull = false;
if (!fcinfo->args[0].isnull)
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
else
return (Datum) 0;
}
@@ -2849,7 +2849,7 @@ ExecJustHashVarVirtImpl(ExprState *state, TupleTableSlot *slot, bool *isnull)
*isnull = false;
if (!fcinfo->args[0].isnull)
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
else
return (Datum) 0;
}
@@ -2892,7 +2892,7 @@ ExecJustHashOuterVarStrict(ExprState *state, ExprContext *econtext,
if (!fcinfo->args[0].isnull)
{
*isnull = false;
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
}
else
{
@@ -4393,7 +4393,7 @@ ExecEvalHashedScalarArrayOp(ExprState *state, ExprEvalStep *op, ExprContext *eco
* is the equality function and we need not-equals.
*/
if (!inclause)
- result = !result;
+ result = BoolGetDatum(!DatumGetBool(result));
}
}
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 0391798dd2c..b8b9d2a85f7 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -84,7 +84,6 @@ static void ExecutePlan(QueryDesc *queryDesc,
uint64 numberTuples,
ScanDirection direction,
DestReceiver *dest);
-static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
Bitmapset *modifiedCols,
AclMode requiredPerms);
@@ -643,7 +642,7 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos,
* ExecCheckOneRelPerms
* Check access permissions for a single relation.
*/
-static bool
+bool
ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
{
AclMode requiredPerms;
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index ecb2e4ccaa1..50fcd023776 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1258,7 +1258,7 @@ SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
{
SPI_result = SPI_ERROR_NOATTRIBUTE;
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
return heap_getattr(tuple, fnumber, tupdesc, isnull);
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 48b5d13b9b6..2f933e95cb9 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -630,7 +630,7 @@ readDatum(bool typbyval)
}
}
else if (length <= 0)
- res = (Datum) NULL;
+ res = (Datum) 0;
else
{
s = (char *) palloc(length);
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index bfefc7dbea1..9fd5c31edf2 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -7233,6 +7233,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
ModifyTable *node = makeNode(ModifyTable);
bool returning_old_or_new = false;
bool returning_old_or_new_valid = false;
+ bool transition_tables = false;
+ bool transition_tables_valid = false;
List *fdw_private_list;
Bitmapset *direct_modify_plans;
ListCell *lc;
@@ -7379,8 +7381,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
* callback functions needed for that and (2) there are no local
* structures that need to be run for each modified row: row-level
* triggers on the foreign table, stored generated columns, WITH CHECK
- * OPTIONs from parent views, or Vars returning OLD/NEW in the
- * RETURNING list.
+ * OPTIONs from parent views, Vars returning OLD/NEW in the RETURNING
+ * list, or transition tables on the named relation.
*/
direct_modify = false;
if (fdwroutine != NULL &&
@@ -7392,7 +7394,10 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
!has_row_triggers(root, rti, operation) &&
!has_stored_generated_columns(root, rti))
{
- /* returning_old_or_new is the same for all result relations */
+ /*
+ * returning_old_or_new and transition_tables are the same for all
+ * result relations, respectively
+ */
if (!returning_old_or_new_valid)
{
returning_old_or_new =
@@ -7401,7 +7406,18 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
returning_old_or_new_valid = true;
}
if (!returning_old_or_new)
- direct_modify = fdwroutine->PlanDirectModify(root, node, rti, i);
+ {
+ if (!transition_tables_valid)
+ {
+ transition_tables = has_transition_tables(root,
+ nominalRelation,
+ operation);
+ transition_tables_valid = true;
+ }
+ if (!transition_tables)
+ direct_modify = fdwroutine->PlanDirectModify(root, node,
+ rti, i);
+ }
}
if (direct_modify)
direct_modify_plans = bms_add_member(direct_modify_plans, i);
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index d59d6e4c6a0..5ba0d22befd 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -58,6 +58,7 @@
#include "parser/parsetree.h"
#include "partitioning/partdesc.h"
#include "rewrite/rewriteManip.h"
+#include "utils/acl.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -838,6 +839,38 @@ subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
}
/*
+ * This would be a convenient time to check access permissions for all
+ * relations mentioned in the query, since it would be better to fail now,
+ * before doing any detailed planning. However, for historical reasons,
+ * we leave this to be done at executor startup.
+ *
+ * Note, however, that we do need to check access permissions for any view
+ * relations mentioned in the query, in order to prevent information being
+ * leaked by selectivity estimation functions, which only check view owner
+ * permissions on underlying tables (see all_rows_selectable() and its
+ * callers). This is a little ugly, because it means that access
+ * permissions for views will be checked twice, which is another reason
+ * why it would be better to do all the ACL checks here.
+ */
+ foreach(l, parse->rtable)
+ {
+ RangeTblEntry *rte = lfirst_node(RangeTblEntry, l);
+
+ if (rte->perminfoindex != 0 &&
+ rte->relkind == RELKIND_VIEW)
+ {
+ RTEPermissionInfo *perminfo;
+ bool result;
+
+ perminfo = getRTEPermissionInfo(parse->rteperminfos, rte);
+ result = ExecCheckOneRelPerms(perminfo);
+ if (!result)
+ aclcheck_error(ACLCHECK_NO_PRIV, OBJECT_VIEW,
+ get_rel_name(perminfo->relid));
+ }
+ }
+
+ /*
* Preprocess RowMark information. We need to do this after subquery
* pullup, so that all base relations are present.
*/
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index c6a58afc5e5..6ce4efea118 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -2389,6 +2389,60 @@ has_row_triggers(PlannerInfo *root, Index rti, CmdType event)
}
/*
+ * has_transition_tables
+ *
+ * Detect whether the specified relation has any transition tables for event.
+ */
+bool
+has_transition_tables(PlannerInfo *root, Index rti, CmdType event)
+{
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+ Relation relation;
+ TriggerDesc *trigDesc;
+ bool result = false;
+
+ Assert(rte->rtekind == RTE_RELATION);
+
+ /* Currently foreign tables cannot have transition tables */
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ return result;
+
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ trigDesc = relation->trigdesc;
+ switch (event)
+ {
+ case CMD_INSERT:
+ if (trigDesc &&
+ trigDesc->trig_insert_new_table)
+ result = true;
+ break;
+ case CMD_UPDATE:
+ if (trigDesc &&
+ (trigDesc->trig_update_old_table ||
+ trigDesc->trig_update_new_table))
+ result = true;
+ break;
+ case CMD_DELETE:
+ if (trigDesc &&
+ trigDesc->trig_delete_old_table)
+ result = true;
+ break;
+ /* There is no separate event for MERGE, only INSERT/UPDATE/DELETE */
+ case CMD_MERGE:
+ result = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized CmdType: %d", (int) event);
+ break;
+ }
+
+ table_close(relation, NoLock);
+ return result;
+}
+
+/*
* has_stored_generated_columns
*
* Does table identified by RTI have any STORED GENERATED columns?
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 8490148a47d..e84e8663e96 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -953,11 +953,14 @@ CheckpointerShmemSize(void)
Size size;
/*
- * Currently, the size of the requests[] array is arbitrarily set equal to
- * NBuffers. This may prove too large or small ...
+ * The size of the requests[] array is arbitrarily set equal to NBuffers.
+ * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating
+ * too many checkpoint requests in the ring buffer.
*/
size = offsetof(CheckpointerShmemStruct, requests);
- size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
+ size = add_size(size, mul_size(Min(NBuffers,
+ MAX_CHECKPOINT_REQUESTS),
+ sizeof(CheckpointerRequest)));
return size;
}
diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index 1fa931a7422..cd0e19176fd 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -778,10 +778,10 @@ LogicalParallelApplyLoop(shm_mq_handle *mqh)
/*
* The first byte of messages sent from leader apply worker to
- * parallel apply workers can only be 'w'.
+ * parallel apply workers can only be PqReplMsg_WALData.
*/
c = pq_getmsgbyte(&s);
- if (c != 'w')
+ if (c != PqReplMsg_WALData)
elog(ERROR, "unexpected message \"%c\"", c);
/*
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 89e241c8392..0fdc5de57ba 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -3994,7 +3994,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
c = pq_getmsgbyte(&s);
- if (c == 'w')
+ if (c == PqReplMsg_WALData)
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn;
@@ -4016,7 +4016,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
maybe_advance_nonremovable_xid(&rdt_data, false);
}
- else if (c == 'k')
+ else if (c == PqReplMsg_Keepalive)
{
XLogRecPtr end_lsn;
TimestampTz timestamp;
@@ -4035,7 +4035,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
UpdateWorkerStats(last_received, timestamp, true);
}
- else if (c == 's') /* Primary status update */
+ else if (c == PqReplMsg_PrimaryStatusUpdate)
{
rdt_data.remote_lsn = pq_getmsgint64(&s);
rdt_data.remote_oldestxid = FullTransactionIdFromU64((uint64) pq_getmsgint64(&s));
@@ -4267,7 +4267,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
else
resetStringInfo(reply_message);
- pq_sendbyte(reply_message, 'r');
+ pq_sendbyte(reply_message, PqReplMsg_StandbyStatusUpdate);
pq_sendint64(reply_message, recvpos); /* write */
pq_sendint64(reply_message, flushpos); /* flush */
pq_sendint64(reply_message, writepos); /* apply */
@@ -4438,7 +4438,7 @@ request_publisher_status(RetainDeadTuplesData *rdt_data)
* Send the current time to update the remote walsender's latest reply
* message received time.
*/
- pq_sendbyte(request_message, 'p');
+ pq_sendbyte(request_message, PqReplMsg_PrimaryStatusRequest);
pq_sendint64(request_message, GetCurrentTimestamp());
elog(DEBUG2, "sending publisher status request message");
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index b6281101711..7361ffc9dcf 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -826,7 +826,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli)
switch (type)
{
- case 'w': /* WAL records */
+ case PqReplMsg_WALData:
{
StringInfoData incoming_message;
@@ -850,7 +850,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli)
XLogWalRcvWrite(buf, len, dataStart, tli);
break;
}
- case 'k': /* Keepalive */
+ case PqReplMsg_Keepalive:
{
StringInfoData incoming_message;
@@ -1130,7 +1130,7 @@ XLogWalRcvSendReply(bool force, bool requestReply)
applyPtr = GetXLogReplayRecPtr(NULL);
resetStringInfo(&reply_message);
- pq_sendbyte(&reply_message, 'r');
+ pq_sendbyte(&reply_message, PqReplMsg_StandbyStatusUpdate);
pq_sendint64(&reply_message, writePtr);
pq_sendint64(&reply_message, flushPtr);
pq_sendint64(&reply_message, applyPtr);
@@ -1234,7 +1234,7 @@ XLogWalRcvSendHSFeedback(bool immed)
/* Construct the message and send it. */
resetStringInfo(&reply_message);
- pq_sendbyte(&reply_message, 'h');
+ pq_sendbyte(&reply_message, PqReplMsg_HotStandbyFeedback);
pq_sendint64(&reply_message, GetCurrentTimestamp());
pq_sendint32(&reply_message, xmin);
pq_sendint32(&reply_message, xmin_epoch);
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index ee911394a23..0855bae3535 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1534,7 +1534,7 @@ WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi
resetStringInfo(ctx->out);
- pq_sendbyte(ctx->out, 'w');
+ pq_sendbyte(ctx->out, PqReplMsg_WALData);
pq_sendint64(ctx->out, lsn); /* dataStart */
pq_sendint64(ctx->out, lsn); /* walEnd */
@@ -2292,7 +2292,8 @@ ProcessRepliesIfAny(void)
switch (firstchar)
{
/*
- * 'd' means a standby reply wrapped in a CopyData packet.
+ * PqMsg_CopyData means a standby reply wrapped in a CopyData
+ * packet.
*/
case PqMsg_CopyData:
ProcessStandbyMessage();
@@ -2300,8 +2301,9 @@ ProcessRepliesIfAny(void)
break;
/*
- * CopyDone means the standby requested to finish streaming.
- * Reply with CopyDone, if we had not sent that already.
+ * PqMsg_CopyDone means the standby requested to finish
+ * streaming. Reply with CopyDone, if we had not sent that
+ * already.
*/
case PqMsg_CopyDone:
if (!streamingDoneSending)
@@ -2315,7 +2317,8 @@ ProcessRepliesIfAny(void)
break;
/*
- * 'X' means that the standby is closing down the socket.
+ * PqMsg_Terminate means that the standby is closing down the
+ * socket.
*/
case PqMsg_Terminate:
proc_exit(0);
@@ -2350,15 +2353,15 @@ ProcessStandbyMessage(void)
switch (msgtype)
{
- case 'r':
+ case PqReplMsg_StandbyStatusUpdate:
ProcessStandbyReplyMessage();
break;
- case 'h':
+ case PqReplMsg_HotStandbyFeedback:
ProcessStandbyHSFeedbackMessage();
break;
- case 'p':
+ case PqReplMsg_PrimaryStatusRequest:
ProcessStandbyPSRequestMessage();
break;
@@ -2752,7 +2755,7 @@ ProcessStandbyPSRequestMessage(void)
/* construct the message... */
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 's');
+ pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate);
pq_sendint64(&output_message, lsn);
pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
@@ -3364,7 +3367,7 @@ XLogSendPhysical(void)
* OK to read and send the slice.
*/
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 'w');
+ pq_sendbyte(&output_message, PqReplMsg_WALData);
pq_sendint64(&output_message, startptr); /* dataStart */
pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
@@ -4135,7 +4138,7 @@ WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
/* construct the message... */
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 'k');
+ pq_sendbyte(&output_message, PqReplMsg_Keepalive);
pq_sendint64(&output_message, XLogRecPtrIsInvalid(writePtr) ? sentPtr : writePtr);
pq_sendint64(&output_message, GetCurrentTimestamp());
pq_sendbyte(&output_message, requestReply ? 1 : 0);
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 8aa90b0d6fb..a96fbdc1ddd 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -725,10 +725,9 @@ EnableDisableRule(Relation rel, const char *rulename,
/*
* Change ev_enabled if it is different from the desired new state.
*/
- if (DatumGetChar(ruleform->ev_enabled) !=
- fires_when)
+ if (ruleform->ev_enabled != fires_when)
{
- ruleform->ev_enabled = CharGetDatum(fires_when);
+ ruleform->ev_enabled = fires_when;
CatalogTupleUpdate(pg_rewrite_desc, &ruletup->t_self, ruletup);
changed = true;
diff --git a/src/backend/statistics/attribute_stats.c b/src/backend/statistics/attribute_stats.c
index ab198076401..e8241926d2c 100644
--- a/src/backend/statistics/attribute_stats.c
+++ b/src/backend/statistics/attribute_stats.c
@@ -339,7 +339,7 @@ attribute_statistics_update(FunctionCallInfo fcinfo)
starel = table_open(StatisticRelationId, RowExclusiveLock);
- statup = SearchSysCache3(STATRELATTINH, reloid, attnum, inherited);
+ statup = SearchSysCache3(STATRELATTINH, ObjectIdGetDatum(reloid), Int16GetDatum(attnum), BoolGetDatum(inherited));
/* initialize from existing tuple if exists */
if (HeapTupleIsValid(statup))
@@ -895,9 +895,9 @@ init_empty_stats_tuple(Oid reloid, int16 attnum, bool inherited,
{
values[Anum_pg_statistic_stakind1 + slotnum - 1] = (Datum) 0;
nulls[Anum_pg_statistic_stakind1 + slotnum - 1] = false;
- values[Anum_pg_statistic_staop1 + slotnum - 1] = InvalidOid;
+ values[Anum_pg_statistic_staop1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid);
nulls[Anum_pg_statistic_staop1 + slotnum - 1] = false;
- values[Anum_pg_statistic_stacoll1 + slotnum - 1] = InvalidOid;
+ values[Anum_pg_statistic_stacoll1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid);
nulls[Anum_pg_statistic_stacoll1 + slotnum - 1] = false;
}
}
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index a8b63ec0884..af0b99243c6 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -1317,6 +1317,9 @@ choose_best_statistics(List *stats, char requiredkind, bool inh,
* so we can't cope with system columns.
* *exprs: input/output parameter collecting primitive subclauses within
* the clause tree
+ * *leakproof: input/output parameter recording the leakproofness of the
+ * clause tree. This should be true initially, and will be set to false
+ * if any operator function used in an OpExpr is not leakproof.
*
* Returns false if there is something we definitively can't handle.
* On true return, we can proceed to match the *exprs against statistics.
@@ -1324,7 +1327,7 @@ choose_best_statistics(List *stats, char requiredkind, bool inh,
static bool
statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
Index relid, Bitmapset **attnums,
- List **exprs)
+ List **exprs, bool *leakproof)
{
/* Look inside any binary-compatible relabeling (as in examine_variable) */
if (IsA(clause, RelabelType))
@@ -1359,7 +1362,6 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
/* (Var/Expr op Const) or (Const op Var/Expr) */
if (is_opclause(clause))
{
- RangeTblEntry *rte = root->simple_rte_array[relid];
OpExpr *expr = (OpExpr *) clause;
Node *clause_expr;
@@ -1394,24 +1396,15 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
return false;
}
- /*
- * If there are any securityQuals on the RTE from security barrier
- * views or RLS policies, then the user may not have access to all the
- * table's data, and we must check that the operator is leakproof.
- *
- * If the operator is leaky, then we must ignore this clause for the
- * purposes of estimating with MCV lists, otherwise the operator might
- * reveal values from the MCV list that the user doesn't have
- * permission to see.
- */
- if (rte->securityQuals != NIL &&
- !get_func_leakproof(get_opcode(expr->opno)))
- return false;
+ /* Check if the operator is leakproof */
+ if (*leakproof)
+ *leakproof = get_func_leakproof(get_opcode(expr->opno));
/* Check (Var op Const) or (Const op Var) clauses by recursing. */
if (IsA(clause_expr, Var))
return statext_is_compatible_clause_internal(root, clause_expr,
- relid, attnums, exprs);
+ relid, attnums,
+ exprs, leakproof);
/* Otherwise we have (Expr op Const) or (Const op Expr). */
*exprs = lappend(*exprs, clause_expr);
@@ -1421,7 +1414,6 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
/* Var/Expr IN Array */
if (IsA(clause, ScalarArrayOpExpr))
{
- RangeTblEntry *rte = root->simple_rte_array[relid];
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) clause;
Node *clause_expr;
bool expronleft;
@@ -1461,24 +1453,15 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
return false;
}
- /*
- * If there are any securityQuals on the RTE from security barrier
- * views or RLS policies, then the user may not have access to all the
- * table's data, and we must check that the operator is leakproof.
- *
- * If the operator is leaky, then we must ignore this clause for the
- * purposes of estimating with MCV lists, otherwise the operator might
- * reveal values from the MCV list that the user doesn't have
- * permission to see.
- */
- if (rte->securityQuals != NIL &&
- !get_func_leakproof(get_opcode(expr->opno)))
- return false;
+ /* Check if the operator is leakproof */
+ if (*leakproof)
+ *leakproof = get_func_leakproof(get_opcode(expr->opno));
/* Check Var IN Array clauses by recursing. */
if (IsA(clause_expr, Var))
return statext_is_compatible_clause_internal(root, clause_expr,
- relid, attnums, exprs);
+ relid, attnums,
+ exprs, leakproof);
/* Otherwise we have Expr IN Array. */
*exprs = lappend(*exprs, clause_expr);
@@ -1515,7 +1498,8 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
*/
if (!statext_is_compatible_clause_internal(root,
(Node *) lfirst(lc),
- relid, attnums, exprs))
+ relid, attnums, exprs,
+ leakproof))
return false;
}
@@ -1529,8 +1513,10 @@ statext_is_compatible_clause_internal(PlannerInfo *root, Node *clause,
/* Check Var IS NULL clauses by recursing. */
if (IsA(nt->arg, Var))
- return statext_is_compatible_clause_internal(root, (Node *) (nt->arg),
- relid, attnums, exprs);
+ return statext_is_compatible_clause_internal(root,
+ (Node *) (nt->arg),
+ relid, attnums,
+ exprs, leakproof);
/* Otherwise we have Expr IS NULL. */
*exprs = lappend(*exprs, nt->arg);
@@ -1569,11 +1555,9 @@ static bool
statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid,
Bitmapset **attnums, List **exprs)
{
- RangeTblEntry *rte = root->simple_rte_array[relid];
- RelOptInfo *rel = root->simple_rel_array[relid];
RestrictInfo *rinfo;
int clause_relid;
- Oid userid;
+ bool leakproof;
/*
* Special-case handling for bare BoolExpr AND clauses, because the
@@ -1613,18 +1597,31 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid,
clause_relid != relid)
return false;
- /* Check the clause and determine what attributes it references. */
+ /*
+ * Check the clause, determine what attributes it references, and whether
+ * it includes any non-leakproof operators.
+ */
+ leakproof = true;
if (!statext_is_compatible_clause_internal(root, (Node *) rinfo->clause,
- relid, attnums, exprs))
+ relid, attnums, exprs,
+ &leakproof))
return false;
/*
- * Check that the user has permission to read all required attributes.
+ * If the clause includes any non-leakproof operators, check that the user
+ * has permission to read all required attributes, otherwise the operators
+ * might reveal values from the MCV list that the user doesn't have
+ * permission to see. We require all rows to be selectable --- there must
+ * be no securityQuals from security barrier views or RLS policies. See
+ * similar code in examine_variable(), examine_simple_variable(), and
+ * statistic_proc_security_check().
+ *
+ * Note that for an inheritance child, the permission checks are performed
+ * on the inheritance root parent, and whole-table select privilege on the
+ * parent doesn't guarantee that the user could read all columns of the
+ * child. Therefore we must check all referenced columns.
*/
- userid = OidIsValid(rel->userid) ? rel->userid : GetUserId();
-
- /* Table-level SELECT privilege is sufficient for all columns */
- if (pg_class_aclcheck(rte->relid, userid, ACL_SELECT) != ACLCHECK_OK)
+ if (!leakproof)
{
Bitmapset *clause_attnums = NULL;
int attnum = -1;
@@ -1649,26 +1646,9 @@ statext_is_compatible_clause(PlannerInfo *root, Node *clause, Index relid,
if (*exprs != NIL)
pull_varattnos((Node *) *exprs, relid, &clause_attnums);
- attnum = -1;
- while ((attnum = bms_next_member(clause_attnums, attnum)) >= 0)
- {
- /* Undo the offset */
- AttrNumber attno = attnum + FirstLowInvalidHeapAttributeNumber;
-
- if (attno == InvalidAttrNumber)
- {
- /* Whole-row reference, so must have access to all columns */
- if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT,
- ACLMASK_ALL) != ACLCHECK_OK)
- return false;
- }
- else
- {
- if (pg_attribute_aclcheck(rte->relid, attno, userid,
- ACL_SELECT) != ACLCHECK_OK)
- return false;
- }
- }
+ /* Must have permission to read all rows from these columns */
+ if (!all_rows_selectable(root, relid, clause_attnums))
+ return false;
}
/* If we reach here, the clause is OK */
@@ -2618,7 +2598,7 @@ make_build_data(Relation rel, StatExtEntry *stat, int numrows, HeapTuple *rows,
}
else
{
- result->values[idx][i] = (Datum) datum;
+ result->values[idx][i] = datum;
result->nulls[idx][i] = false;
}
diff --git a/src/backend/storage/aio/aio_funcs.c b/src/backend/storage/aio/aio_funcs.c
index 584e683371a..905ea129c81 100644
--- a/src/backend/storage/aio/aio_funcs.c
+++ b/src/backend/storage/aio/aio_funcs.c
@@ -152,7 +152,7 @@ retry:
nulls[0] = false;
/* column: IO's id */
- values[1] = ioh_id;
+ values[1] = UInt32GetDatum(ioh_id);
/* column: IO's generation */
values[2] = Int64GetDatum(start_generation);
diff --git a/src/backend/storage/aio/read_stream.c b/src/backend/storage/aio/read_stream.c
index 0e7f5557f5c..031fde9f4cb 100644
--- a/src/backend/storage/aio/read_stream.c
+++ b/src/backend/storage/aio/read_stream.c
@@ -247,12 +247,33 @@ read_stream_start_pending_read(ReadStream *stream)
Assert(stream->pinned_buffers + stream->pending_read_nblocks <=
stream->max_pinned_buffers);
+#ifdef USE_ASSERT_CHECKING
/* We had better not be overwriting an existing pinned buffer. */
if (stream->pinned_buffers > 0)
Assert(stream->next_buffer_index != stream->oldest_buffer_index);
else
Assert(stream->next_buffer_index == stream->oldest_buffer_index);
+ /*
+ * Pinned buffers forwarded by a preceding StartReadBuffers() call that
+ * had to split the operation should match the leading blocks of this
+ * following StartReadBuffers() call.
+ */
+ Assert(stream->forwarded_buffers <= stream->pending_read_nblocks);
+ for (int i = 0; i < stream->forwarded_buffers; ++i)
+ Assert(BufferGetBlockNumber(stream->buffers[stream->next_buffer_index + i]) ==
+ stream->pending_read_blocknum + i);
+
+ /*
+ * Check that we've cleared the queue/overflow entries corresponding to
+ * the rest of the blocks covered by this read, unless it's the first go
+ * around and we haven't even initialized them yet.
+ */
+ for (int i = stream->forwarded_buffers; i < stream->pending_read_nblocks; ++i)
+ Assert(stream->next_buffer_index + i >= stream->initialized_buffers ||
+ stream->buffers[stream->next_buffer_index + i] == InvalidBuffer);
+#endif
+
/* Do we need to issue read-ahead advice? */
flags = stream->read_buffers_flags;
if (stream->advice_enabled)
@@ -979,6 +1000,19 @@ read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
stream->pending_read_nblocks == 0 &&
stream->per_buffer_data_size == 0)
{
+ /*
+ * The fast path spins on one buffer entry repeatedly instead of
+ * rotating through the whole queue and clearing the entries behind
+ * it. If the buffer it starts with happened to be forwarded between
+ * StartReadBuffers() calls and also wrapped around the circular queue
+ * partway through, then a copy also exists in the overflow zone, and
+ * it won't clear it out as the regular path would. Do that now, so
+ * it doesn't need code for that.
+ */
+ if (stream->oldest_buffer_index < stream->io_combine_limit - 1)
+ stream->buffers[stream->queue_size + stream->oldest_buffer_index] =
+ InvalidBuffer;
+
stream->fast_path = true;
}
#endif
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 67431208e7f..fd7e21d96d3 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1484,11 +1484,6 @@ StartReadBuffersImpl(ReadBuffersOperation *operation,
* buffers must remain valid until WaitReadBuffers() is called, and any
* forwarded buffers must also be preserved for a continuing call unless
* they are explicitly released.
- *
- * Currently the I/O is only started with optional operating system advice if
- * requested by the caller with READ_BUFFERS_ISSUE_ADVICE, and the real I/O
- * happens synchronously in WaitReadBuffers(). In future work, true I/O could
- * be initiated here.
*/
bool
StartReadBuffers(ReadBuffersOperation *operation,
@@ -6370,8 +6365,8 @@ ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
static int
ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
{
- CkptTsStatus *sa = (CkptTsStatus *) a;
- CkptTsStatus *sb = (CkptTsStatus *) b;
+ CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a);
+ CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b);
/* we want a min-heap, so return 1 for the a < b */
if (sa->progress < sb->progress)
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index ca3656fc76f..d12a3ca0684 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -714,7 +714,7 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
for (i = 0; i <= max_nodes; i++)
{
values[0] = CStringGetTextDatum(ent->key);
- values[1] = i;
+ values[1] = Int32GetDatum(i);
values[2] = Int64GetDatum(nodes[i] * os_page_size);
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 68b76f2cc18..a874000c8ca 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -561,7 +561,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes)
char data[LOBLKSIZE + VARHDRSZ];
/* ensure union is aligned well enough: */
int32 align_it;
- } workbuf;
+ } workbuf = {0};
char *workb = VARDATA(&workbuf.hdr);
HeapTuple newtup;
Datum values[Natts_pg_largeobject];
@@ -752,7 +752,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len)
char data[LOBLKSIZE + VARHDRSZ];
/* ensure union is aligned well enough: */
int32 align_it;
- } workbuf;
+ } workbuf = {0};
char *workb = VARDATA(&workbuf.hdr);
HeapTuple newtup;
Datum values[Natts_pg_largeobject];
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 62f3471448e..f8c88147160 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -589,7 +589,7 @@ proclock_hash(const void *key, Size keysize)
* intermediate variable to suppress cast-pointer-to-int warnings.
*/
procptr = PointerGetDatum(proclocktag->myProc);
- lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
+ lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
return lockhash;
}
@@ -610,7 +610,7 @@ ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
* This must match proclock_hash()!
*/
procptr = PointerGetDatum(proclocktag->myProc);
- lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
+ lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
return lockhash;
}
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
index e5da6cf17ec..cba421892bf 100644
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -218,7 +218,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
* position and go to multiword mode
*/
- ld->curDictId = DatumGetObjectId(map->dictIds[i]);
+ ld->curDictId = map->dictIds[i];
ld->posDict = i + 1;
ld->curSub = curVal->next;
if (res)
@@ -275,7 +275,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
* dictionaries ?
*/
for (i = 0; i < map->len && !dictExists; i++)
- if (ld->curDictId == DatumGetObjectId(map->dictIds[i]))
+ if (ld->curDictId == map->dictIds[i])
dictExists = true;
if (!dictExists)
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 6bc91ce0dad..ffb5b8cce34 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -821,7 +821,7 @@ pgstat_force_next_flush(void)
static bool
match_db_entries(PgStatShared_HashEntry *entry, Datum match_data)
{
- return entry->key.dboid == DatumGetObjectId(MyDatabaseId);
+ return entry->key.dboid == MyDatabaseId;
}
/*
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 53e7d534270..cca4277f234 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -874,11 +874,12 @@ pgstat_drop_entry_internal(PgStatShared_HashEntry *shent,
*/
if (shent->dropped)
elog(ERROR,
- "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u",
+ "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u generation=%u",
pgstat_get_kind_info(shent->key.kind)->name,
shent->key.dboid,
shent->key.objid,
- pg_atomic_read_u32(&shent->refcount));
+ pg_atomic_read_u32(&shent->refcount),
+ pg_atomic_read_u32(&shent->generation));
shent->dropped = true;
/* release refcount marking entry as not dropped */
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index fcd5b1653dd..614644a4e2a 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -299,9 +299,9 @@ datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
len1 - VARHDRSZ) == 0);
/* Only free memory if it's a copy made here. */
- if ((Pointer) arg1val != (Pointer) value1)
+ if ((Pointer) arg1val != DatumGetPointer(value1))
pfree(arg1val);
- if ((Pointer) arg2val != (Pointer) value2)
+ if ((Pointer) arg2val != DatumGetPointer(value2))
pfree(arg2val);
}
}
@@ -355,7 +355,7 @@ datum_image_hash(Datum value, bool typByVal, int typLen)
result = hash_bytes((unsigned char *) VARDATA_ANY(val), len - VARHDRSZ);
/* Only free memory if it's a copy made here. */
- if ((Pointer) val != (Pointer) value)
+ if ((Pointer) val != DatumGetPointer(value))
pfree(val);
}
else if (typLen == -2)
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 51452755f58..e9d370cb3da 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -904,7 +904,7 @@ json_unique_hash(const void *key, Size keysize)
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
- return DatumGetUInt32(hash);
+ return hash;
}
static int
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 370456408bf..c5e1a027956 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -2027,7 +2027,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text)
{
/* a json null is an sql null in text mode */
nulls[1] = true;
- values[1] = (Datum) NULL;
+ values[1] = (Datum) 0;
}
else
values[1] = PointerGetDatum(JsonbValueAsText(&v));
@@ -2266,7 +2266,7 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname,
{
/* a json null is an sql null in text mode */
nulls[0] = true;
- values[0] = (Datum) NULL;
+ values[0] = (Datum) 0;
}
else
values[0] = PointerGetDatum(JsonbValueAsText(&v));
@@ -2389,7 +2389,7 @@ elements_array_element_end(void *state, bool isnull)
if (isnull && _state->normalize_results)
{
nulls[0] = true;
- values[0] = (Datum) NULL;
+ values[0] = (Datum) 0;
}
else if (_state->next_scalar)
{
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 407041b14a1..5a562535223 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -1517,7 +1517,7 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp,
/* Convert numstr to Numeric with typmod */
Assert(numstr != NULL);
noerr = DirectInputFunctionCallSafe(numeric_in, numstr,
- InvalidOid, dtypmod,
+ InvalidOid, DatumGetInt32(dtypmod),
(Node *) &escontext,
&numdatum);
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 00e67fb46d0..df938812dd3 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -398,15 +398,15 @@ pg_lock_status(PG_FUNCTION_ARGS)
values[0] = CStringGetTextDatum(PredicateLockTagTypeNames[lockType]);
/* lock target */
- values[1] = GET_PREDICATELOCKTARGETTAG_DB(*predTag);
- values[2] = GET_PREDICATELOCKTARGETTAG_RELATION(*predTag);
+ values[1] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_DB(*predTag));
+ values[2] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_RELATION(*predTag));
if (lockType == PREDLOCKTAG_TUPLE)
- values[4] = GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag);
+ values[4] = UInt16GetDatum(GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag));
else
nulls[4] = true;
if ((lockType == PREDLOCKTAG_TUPLE) ||
(lockType == PREDLOCKTAG_PAGE))
- values[3] = GET_PREDICATELOCKTARGETTAG_PAGE(*predTag);
+ values[3] = UInt32GetDatum(GET_PREDICATELOCKTARGETTAG_PAGE(*predTag));
else
nulls[3] = true;
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index 46f2ec0c29f..84733dc5019 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -2082,15 +2082,14 @@ range_overleft_multirange_internal(TypeCacheEntry *rangetyp,
bool empty;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- PG_RETURN_BOOL(false);
-
+ return false;
range_deserialize(rangetyp, r, &lower1, &upper1, &empty);
Assert(!empty);
multirange_get_bounds(rangetyp, mr, mr->rangeCount - 1,
&lower2, &upper2);
- PG_RETURN_BOOL(range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0);
+ return (range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0);
}
Datum
@@ -2167,7 +2166,7 @@ range_overright_multirange_internal(TypeCacheEntry *rangetyp,
bool empty;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- PG_RETURN_BOOL(false);
+ return false;
range_deserialize(rangetyp, r, &lower1, &upper1, &empty);
Assert(!empty);
@@ -2524,7 +2523,7 @@ multirange_adjacent_range(PG_FUNCTION_ARGS)
TypeCacheEntry *typcache;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- return false;
+ PG_RETURN_BOOL(false);
typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr));
@@ -2545,7 +2544,7 @@ multirange_adjacent_multirange(PG_FUNCTION_ARGS)
upper2;
if (MultirangeIsEmpty(mr1) || MultirangeIsEmpty(mr2))
- return false;
+ PG_RETURN_BOOL(false);
typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr1));
@@ -2640,7 +2639,7 @@ multirange_cmp(PG_FUNCTION_ARGS)
Datum
multirange_lt(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp < 0);
}
@@ -2648,7 +2647,7 @@ multirange_lt(PG_FUNCTION_ARGS)
Datum
multirange_le(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp <= 0);
}
@@ -2656,7 +2655,7 @@ multirange_le(PG_FUNCTION_ARGS)
Datum
multirange_ge(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp >= 0);
}
@@ -2664,7 +2663,7 @@ multirange_ge(PG_FUNCTION_ARGS)
Datum
multirange_gt(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp > 0);
}
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index c9233565d57..122f2efab8b 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -28,6 +28,7 @@
#include "common/hashfn.h"
#include "common/int.h"
+#include "common/int128.h"
#include "funcapi.h"
#include "lib/hyperloglog.h"
#include "libpq/pqformat.h"
@@ -534,10 +535,7 @@ static bool numericvar_to_int32(const NumericVar *var, int32 *result);
static bool numericvar_to_int64(const NumericVar *var, int64 *result);
static void int64_to_numericvar(int64 val, NumericVar *var);
static bool numericvar_to_uint64(const NumericVar *var, uint64 *result);
-#ifdef HAVE_INT128
-static bool numericvar_to_int128(const NumericVar *var, int128 *result);
-static void int128_to_numericvar(int128 val, NumericVar *var);
-#endif
+static void int128_to_numericvar(INT128 val, NumericVar *var);
static double numericvar_to_double_no_overflow(const NumericVar *var);
static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup);
@@ -4463,25 +4461,13 @@ int64_div_fast_to_numeric(int64 val1, int log10val2)
if (unlikely(pg_mul_s64_overflow(val1, factor, &new_val1)))
{
-#ifdef HAVE_INT128
/* do the multiplication using 128-bit integers */
- int128 tmp;
+ INT128 tmp;
- tmp = (int128) val1 * (int128) factor;
+ tmp = int64_to_int128(0);
+ int128_add_int64_mul_int64(&tmp, val1, factor);
int128_to_numericvar(tmp, &result);
-#else
- /* do the multiplication using numerics */
- NumericVar tmp;
-
- init_var(&tmp);
-
- int64_to_numericvar(val1, &result);
- int64_to_numericvar(factor, &tmp);
- mul_var(&result, &tmp, &result, 0);
-
- free_var(&tmp);
-#endif
}
else
int64_to_numericvar(new_val1, &result);
@@ -4901,8 +4887,8 @@ numeric_pg_lsn(PG_FUNCTION_ARGS)
* Actually, it's a pointer to a NumericAggState allocated in the aggregate
* context. The digit buffers for the NumericVars will be there too.
*
- * On platforms which support 128-bit integers some aggregates instead use a
- * 128-bit integer based transition datatype to speed up calculations.
+ * For integer inputs, some aggregates use special-purpose 64-bit or 128-bit
+ * integer based transition datatypes to speed up calculations.
*
* ----------------------------------------------------------------------
*/
@@ -5566,26 +5552,27 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
/*
- * Integer data types in general use Numeric accumulators to share code
- * and avoid risk of overflow.
+ * Integer data types in general use Numeric accumulators to share code and
+ * avoid risk of overflow. However for performance reasons optimized
+ * special-purpose accumulator routines are used when possible:
*
- * However for performance reasons optimized special-purpose accumulator
- * routines are used when possible.
+ * For 16-bit and 32-bit inputs, N and sum(X) fit into 64-bit, so 64-bit
+ * accumulators are used for SUM and AVG of these data types.
*
- * On platforms with 128-bit integer support, the 128-bit routines will be
- * used when sum(X) or sum(X*X) fit into 128-bit.
+ * For 16-bit and 32-bit inputs, sum(X^2) fits into 128-bit, so 128-bit
+ * accumulators are used for STDDEV_POP, STDDEV_SAMP, VAR_POP, and VAR_SAMP of
+ * these data types.
*
- * For 16 and 32 bit inputs, the N and sum(X) fit into 64-bit so the 64-bit
- * accumulators will be used for SUM and AVG of these data types.
+ * For 64-bit inputs, sum(X) fits into 128-bit, so a 128-bit accumulator is
+ * used for SUM(int8) and AVG(int8).
*/
-#ifdef HAVE_INT128
typedef struct Int128AggState
{
bool calcSumX2; /* if true, calculate sumX2 */
int64 N; /* count of processed numbers */
- int128 sumX; /* sum of processed numbers */
- int128 sumX2; /* sum of squares of processed numbers */
+ INT128 sumX; /* sum of processed numbers */
+ INT128 sumX2; /* sum of squares of processed numbers */
} Int128AggState;
/*
@@ -5631,12 +5618,12 @@ makeInt128AggStateCurrentContext(bool calcSumX2)
* Accumulate a new input value for 128-bit aggregate functions.
*/
static void
-do_int128_accum(Int128AggState *state, int128 newval)
+do_int128_accum(Int128AggState *state, int64 newval)
{
if (state->calcSumX2)
- state->sumX2 += newval * newval;
+ int128_add_int64_mul_int64(&state->sumX2, newval, newval);
- state->sumX += newval;
+ int128_add_int64(&state->sumX, newval);
state->N++;
}
@@ -5644,43 +5631,28 @@ do_int128_accum(Int128AggState *state, int128 newval)
* Remove an input value from the aggregated state.
*/
static void
-do_int128_discard(Int128AggState *state, int128 newval)
+do_int128_discard(Int128AggState *state, int64 newval)
{
if (state->calcSumX2)
- state->sumX2 -= newval * newval;
+ int128_sub_int64_mul_int64(&state->sumX2, newval, newval);
- state->sumX -= newval;
+ int128_sub_int64(&state->sumX, newval);
state->N--;
}
-typedef Int128AggState PolyNumAggState;
-#define makePolyNumAggState makeInt128AggState
-#define makePolyNumAggStateCurrentContext makeInt128AggStateCurrentContext
-#else
-typedef NumericAggState PolyNumAggState;
-#define makePolyNumAggState makeNumericAggState
-#define makePolyNumAggStateCurrentContext makeNumericAggStateCurrentContext
-#endif
-
Datum
int2_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, true);
+ state = makeInt128AggState(fcinfo, true);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT16(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT16(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT16(1));
PG_RETURN_POINTER(state);
}
@@ -5688,22 +5660,16 @@ int2_accum(PG_FUNCTION_ARGS)
Datum
int4_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, true);
+ state = makeInt128AggState(fcinfo, true);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT32(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT32(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT32(1));
PG_RETURN_POINTER(state);
}
@@ -5726,21 +5692,21 @@ int8_accum(PG_FUNCTION_ARGS)
}
/*
- * Combine function for numeric aggregates which require sumX2
+ * Combine function for Int128AggState for aggregates which require sumX2
*/
Datum
numeric_poly_combine(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state1;
- PolyNumAggState *state2;
+ Int128AggState *state1;
+ Int128AggState *state2;
MemoryContext agg_context;
MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
- state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
- state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1);
+ state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
+ state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1);
if (state2 == NULL)
PG_RETURN_POINTER(state1);
@@ -5750,16 +5716,10 @@ numeric_poly_combine(PG_FUNCTION_ARGS)
{
old_context = MemoryContextSwitchTo(agg_context);
- state1 = makePolyNumAggState(fcinfo, true);
+ state1 = makeInt128AggState(fcinfo, true);
state1->N = state2->N;
-
-#ifdef HAVE_INT128
state1->sumX = state2->sumX;
state1->sumX2 = state2->sumX2;
-#else
- accum_sum_copy(&state1->sumX, &state2->sumX);
- accum_sum_copy(&state1->sumX2, &state2->sumX2);
-#endif
MemoryContextSwitchTo(old_context);
@@ -5769,54 +5729,51 @@ numeric_poly_combine(PG_FUNCTION_ARGS)
if (state2->N > 0)
{
state1->N += state2->N;
+ int128_add_int128(&state1->sumX, state2->sumX);
+ int128_add_int128(&state1->sumX2, state2->sumX2);
+ }
+ PG_RETURN_POINTER(state1);
+}
-#ifdef HAVE_INT128
- state1->sumX += state2->sumX;
- state1->sumX2 += state2->sumX2;
-#else
- /* The rest of this needs to work in the aggregate context */
- old_context = MemoryContextSwitchTo(agg_context);
-
- /* Accumulate sums */
- accum_sum_combine(&state1->sumX, &state2->sumX);
- accum_sum_combine(&state1->sumX2, &state2->sumX2);
+/*
+ * int128_serialize - serialize a 128-bit integer to binary format
+ */
+static inline void
+int128_serialize(StringInfo buf, INT128 val)
+{
+ pq_sendint64(buf, PG_INT128_HI_INT64(val));
+ pq_sendint64(buf, PG_INT128_LO_UINT64(val));
+}
- MemoryContextSwitchTo(old_context);
-#endif
+/*
+ * int128_deserialize - deserialize binary format to a 128-bit integer.
+ */
+static inline INT128
+int128_deserialize(StringInfo buf)
+{
+ int64 hi = pq_getmsgint64(buf);
+ uint64 lo = pq_getmsgint64(buf);
- }
- PG_RETURN_POINTER(state1);
+ return make_int128(hi, lo);
}
/*
* numeric_poly_serialize
- * Serialize PolyNumAggState into bytea for aggregate functions which
+ * Serialize Int128AggState into bytea for aggregate functions which
* require sumX2.
*/
Datum
numeric_poly_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
StringInfoData buf;
bytea *result;
- NumericVar tmp_var;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
- state = (PolyNumAggState *) PG_GETARG_POINTER(0);
-
- /*
- * If the platform supports int128 then sumX and sumX2 will be a 128 bit
- * integer type. Here we'll convert that into a numeric type so that the
- * combine state is in the same format for both int128 enabled machines
- * and machines which don't support that type. The logic here is that one
- * day we might like to send these over to another server for further
- * processing and we want a standard format to work with.
- */
-
- init_var(&tmp_var);
+ state = (Int128AggState *) PG_GETARG_POINTER(0);
pq_begintypsend(&buf);
@@ -5824,48 +5781,33 @@ numeric_poly_serialize(PG_FUNCTION_ARGS)
pq_sendint64(&buf, state->N);
/* sumX */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX, &tmp_var);
-#else
- accum_sum_final(&state->sumX, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX);
/* sumX2 */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX2, &tmp_var);
-#else
- accum_sum_final(&state->sumX2, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX2);
result = pq_endtypsend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_BYTEA_P(result);
}
/*
* numeric_poly_deserialize
- * Deserialize PolyNumAggState from bytea for aggregate functions which
+ * Deserialize Int128AggState from bytea for aggregate functions which
* require sumX2.
*/
Datum
numeric_poly_deserialize(PG_FUNCTION_ARGS)
{
bytea *sstate;
- PolyNumAggState *result;
+ Int128AggState *result;
StringInfoData buf;
- NumericVar tmp_var;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
sstate = PG_GETARG_BYTEA_PP(0);
- init_var(&tmp_var);
-
/*
* Initialize a StringInfo so that we can "receive" it using the standard
* recv-function infrastructure.
@@ -5873,31 +5815,19 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS)
initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate),
VARSIZE_ANY_EXHDR(sstate));
- result = makePolyNumAggStateCurrentContext(false);
+ result = makeInt128AggStateCurrentContext(false);
/* N */
result->N = pq_getmsgint64(&buf);
/* sumX */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX);
-#else
- accum_sum_add(&result->sumX, &tmp_var);
-#endif
+ result->sumX = int128_deserialize(&buf);
/* sumX2 */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX2);
-#else
- accum_sum_add(&result->sumX2, &tmp_var);
-#endif
+ result->sumX2 = int128_deserialize(&buf);
pq_getmsgend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_POINTER(result);
}
@@ -5907,43 +5837,37 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS)
Datum
int8_avg_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, false);
+ state = makeInt128AggState(fcinfo, false);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT64(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT64(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT64(1));
PG_RETURN_POINTER(state);
}
/*
- * Combine function for PolyNumAggState for aggregates which don't require
+ * Combine function for Int128AggState for aggregates which don't require
* sumX2
*/
Datum
int8_avg_combine(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state1;
- PolyNumAggState *state2;
+ Int128AggState *state1;
+ Int128AggState *state2;
MemoryContext agg_context;
MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
- state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
- state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1);
+ state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
+ state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1);
if (state2 == NULL)
PG_RETURN_POINTER(state1);
@@ -5953,14 +5877,10 @@ int8_avg_combine(PG_FUNCTION_ARGS)
{
old_context = MemoryContextSwitchTo(agg_context);
- state1 = makePolyNumAggState(fcinfo, false);
+ state1 = makeInt128AggState(fcinfo, false);
state1->N = state2->N;
-
-#ifdef HAVE_INT128
state1->sumX = state2->sumX;
-#else
- accum_sum_copy(&state1->sumX, &state2->sumX);
-#endif
+
MemoryContextSwitchTo(old_context);
PG_RETURN_POINTER(state1);
@@ -5969,52 +5889,28 @@ int8_avg_combine(PG_FUNCTION_ARGS)
if (state2->N > 0)
{
state1->N += state2->N;
-
-#ifdef HAVE_INT128
- state1->sumX += state2->sumX;
-#else
- /* The rest of this needs to work in the aggregate context */
- old_context = MemoryContextSwitchTo(agg_context);
-
- /* Accumulate sums */
- accum_sum_combine(&state1->sumX, &state2->sumX);
-
- MemoryContextSwitchTo(old_context);
-#endif
-
+ int128_add_int128(&state1->sumX, state2->sumX);
}
PG_RETURN_POINTER(state1);
}
/*
* int8_avg_serialize
- * Serialize PolyNumAggState into bytea using the standard
- * recv-function infrastructure.
+ * Serialize Int128AggState into bytea for aggregate functions which
+ * don't require sumX2.
*/
Datum
int8_avg_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
StringInfoData buf;
bytea *result;
- NumericVar tmp_var;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
- state = (PolyNumAggState *) PG_GETARG_POINTER(0);
-
- /*
- * If the platform supports int128 then sumX will be a 128 integer type.
- * Here we'll convert that into a numeric type so that the combine state
- * is in the same format for both int128 enabled machines and machines
- * which don't support that type. The logic here is that one day we might
- * like to send these over to another server for further processing and we
- * want a standard format to work with.
- */
-
- init_var(&tmp_var);
+ state = (Int128AggState *) PG_GETARG_POINTER(0);
pq_begintypsend(&buf);
@@ -6022,39 +5918,30 @@ int8_avg_serialize(PG_FUNCTION_ARGS)
pq_sendint64(&buf, state->N);
/* sumX */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX, &tmp_var);
-#else
- accum_sum_final(&state->sumX, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX);
result = pq_endtypsend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_BYTEA_P(result);
}
/*
* int8_avg_deserialize
- * Deserialize bytea back into PolyNumAggState.
+ * Deserialize Int128AggState from bytea for aggregate functions which
+ * don't require sumX2.
*/
Datum
int8_avg_deserialize(PG_FUNCTION_ARGS)
{
bytea *sstate;
- PolyNumAggState *result;
+ Int128AggState *result;
StringInfoData buf;
- NumericVar tmp_var;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
sstate = PG_GETARG_BYTEA_PP(0);
- init_var(&tmp_var);
-
/*
* Initialize a StringInfo so that we can "receive" it using the standard
* recv-function infrastructure.
@@ -6062,23 +5949,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS)
initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate),
VARSIZE_ANY_EXHDR(sstate));
- result = makePolyNumAggStateCurrentContext(false);
+ result = makeInt128AggStateCurrentContext(false);
/* N */
result->N = pq_getmsgint64(&buf);
/* sumX */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX);
-#else
- accum_sum_add(&result->sumX, &tmp_var);
-#endif
+ result->sumX = int128_deserialize(&buf);
pq_getmsgend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_POINTER(result);
}
@@ -6089,24 +5969,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS)
Datum
int2_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int2_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT16(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT16(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT16(1));
PG_RETURN_POINTER(state);
}
@@ -6114,24 +5986,16 @@ int2_accum_inv(PG_FUNCTION_ARGS)
Datum
int4_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int4_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT32(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT32(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT32(1));
PG_RETURN_POINTER(state);
}
@@ -6160,24 +6024,16 @@ int8_accum_inv(PG_FUNCTION_ARGS)
Datum
int8_avg_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int8_avg_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT64(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT64(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT64(1));
PG_RETURN_POINTER(state);
}
@@ -6185,12 +6041,11 @@ int8_avg_accum_inv(PG_FUNCTION_ARGS)
Datum
numeric_poly_sum(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
NumericVar result;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* If there were no non-null inputs, return NULL */
if (state == NULL || state->N == 0)
@@ -6205,21 +6060,17 @@ numeric_poly_sum(PG_FUNCTION_ARGS)
free_var(&result);
PG_RETURN_NUMERIC(res);
-#else
- return numeric_sum(fcinfo);
-#endif
}
Datum
numeric_poly_avg(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
NumericVar result;
Datum countd,
sumd;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* If there were no non-null inputs, return NULL */
if (state == NULL || state->N == 0)
@@ -6235,9 +6086,6 @@ numeric_poly_avg(PG_FUNCTION_ARGS)
free_var(&result);
PG_RETURN_DATUM(DirectFunctionCall2(numeric_div, sumd, countd));
-#else
- return numeric_avg(fcinfo);
-#endif
}
Datum
@@ -6470,7 +6318,6 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(res);
}
-#ifdef HAVE_INT128
static Numeric
numeric_poly_stddev_internal(Int128AggState *state,
bool variance, bool sample,
@@ -6514,17 +6361,15 @@ numeric_poly_stddev_internal(Int128AggState *state,
return res;
}
-#endif
Datum
numeric_poly_var_samp(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, true, true, &is_null);
@@ -6532,20 +6377,16 @@ numeric_poly_var_samp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_var_samp(fcinfo);
-#endif
}
Datum
numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, false, true, &is_null);
@@ -6553,20 +6394,16 @@ numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_stddev_samp(fcinfo);
-#endif
}
Datum
numeric_poly_var_pop(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, true, false, &is_null);
@@ -6574,20 +6411,16 @@ numeric_poly_var_pop(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_var_pop(fcinfo);
-#endif
}
Datum
numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, false, false, &is_null);
@@ -6595,9 +6428,6 @@ numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_stddev_pop(fcinfo);
-#endif
}
/*
@@ -8330,105 +8160,23 @@ numericvar_to_uint64(const NumericVar *var, uint64 *result)
return true;
}
-#ifdef HAVE_INT128
-/*
- * Convert numeric to int128, rounding if needed.
- *
- * If overflow, return false (no error is raised). Return true if okay.
- */
-static bool
-numericvar_to_int128(const NumericVar *var, int128 *result)
-{
- NumericDigit *digits;
- int ndigits;
- int weight;
- int i;
- int128 val,
- oldval;
- bool neg;
- NumericVar rounded;
-
- /* Round to nearest integer */
- init_var(&rounded);
- set_var_from_var(var, &rounded);
- round_var(&rounded, 0);
-
- /* Check for zero input */
- strip_var(&rounded);
- ndigits = rounded.ndigits;
- if (ndigits == 0)
- {
- *result = 0;
- free_var(&rounded);
- return true;
- }
-
- /*
- * For input like 10000000000, we must treat stripped digits as real. So
- * the loop assumes there are weight+1 digits before the decimal point.
- */
- weight = rounded.weight;
- Assert(weight >= 0 && ndigits <= weight + 1);
-
- /* Construct the result */
- digits = rounded.digits;
- neg = (rounded.sign == NUMERIC_NEG);
- val = digits[0];
- for (i = 1; i <= weight; i++)
- {
- oldval = val;
- val *= NBASE;
- if (i < ndigits)
- val += digits[i];
-
- /*
- * The overflow check is a bit tricky because we want to accept
- * INT128_MIN, which will overflow the positive accumulator. We can
- * detect this case easily though because INT128_MIN is the only
- * nonzero value for which -val == val (on a two's complement machine,
- * anyway).
- */
- if ((val / NBASE) != oldval) /* possible overflow? */
- {
- if (!neg || (-val) != val || val == 0 || oldval < 0)
- {
- free_var(&rounded);
- return false;
- }
- }
- }
-
- free_var(&rounded);
-
- *result = neg ? -val : val;
- return true;
-}
-
/*
* Convert 128 bit integer to numeric.
*/
static void
-int128_to_numericvar(int128 val, NumericVar *var)
+int128_to_numericvar(INT128 val, NumericVar *var)
{
- uint128 uval,
- newuval;
+ int sign;
NumericDigit *ptr;
int ndigits;
+ int32 dig;
/* int128 can require at most 39 decimal digits; add one for safety */
alloc_var(var, 40 / DEC_DIGITS);
- if (val < 0)
- {
- var->sign = NUMERIC_NEG;
- uval = -val;
- }
- else
- {
- var->sign = NUMERIC_POS;
- uval = val;
- }
+ sign = int128_sign(val);
+ var->sign = sign < 0 ? NUMERIC_NEG : NUMERIC_POS;
var->dscale = 0;
- if (val == 0)
+ if (sign == 0)
{
var->ndigits = 0;
var->weight = 0;
@@ -8440,15 +8188,13 @@ int128_to_numericvar(int128 val, NumericVar *var)
{
ptr--;
ndigits++;
- newuval = uval / NBASE;
- *ptr = uval - newuval * NBASE;
- uval = newuval;
- } while (uval);
+ int128_div_mod_int32(&val, NBASE, &dig);
+ *ptr = (NumericDigit) abs(dig);
+ } while (!int128_is_zero(val));
var->digits = ptr;
var->ndigits = ndigits;
var->weight = ndigits - 1;
}
-#endif
/*
* Convert a NumericVar to float8; if out of range, return +/- HUGE_VAL
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index c83b239b3bb..18e467bccd3 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1075,8 +1075,8 @@ range_union_internal(TypeCacheEntry *typcache, RangeType *r1, RangeType *r2,
return r1;
if (strict &&
- !DatumGetBool(range_overlaps_internal(typcache, r1, r2)) &&
- !DatumGetBool(range_adjacent_internal(typcache, r1, r2)))
+ !range_overlaps_internal(typcache, r1, r2) &&
+ !range_adjacent_internal(typcache, r1, r2))
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("result of range union would not be contiguous")));
@@ -1343,9 +1343,9 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup)
cmp = range_cmp_bounds(typcache, &upper1, &upper2);
}
- if ((Datum) range_a != a)
+ if ((Pointer) range_a != DatumGetPointer(a))
pfree(range_a);
- if ((Datum) range_b != b)
+ if ((Pointer) range_b != DatumGetPointer(b))
pfree(range_b);
return cmp;
@@ -1356,7 +1356,7 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup)
Datum
range_lt(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp < 0);
}
@@ -1364,7 +1364,7 @@ range_lt(PG_FUNCTION_ARGS)
Datum
range_le(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp <= 0);
}
@@ -1372,7 +1372,7 @@ range_le(PG_FUNCTION_ARGS)
Datum
range_ge(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp >= 0);
}
@@ -1380,7 +1380,7 @@ range_ge(PG_FUNCTION_ARGS)
Datum
range_gt(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp > 0);
}
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9b6d7061a18..be519654880 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -757,7 +757,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* because it's range
*/
previousCentroid = datumCopy(in->prefixDatum, false, -1);
- out->traversalValues[out->nNodes] = (void *) previousCentroid;
+ out->traversalValues[out->nNodes] = DatumGetPointer(previousCentroid);
}
out->nodeNumbers[out->nNodes] = i - 1;
out->nNodes++;
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index fe5edc0027d..9e5449f17d7 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -1529,9 +1529,9 @@ record_image_cmp(FunctionCallInfo fcinfo)
if ((cmpresult == 0) && (len1 != len2))
cmpresult = (len1 < len2) ? -1 : 1;
- if ((Pointer) arg1val != (Pointer) values1[i1])
+ if ((Pointer) arg1val != DatumGetPointer(values1[i1]))
pfree(arg1val);
- if ((Pointer) arg2val != (Pointer) values2[i2])
+ if ((Pointer) arg2val != DatumGetPointer(values2[i2]))
pfree(arg2val);
}
else
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 17fbfa9b410..1c480cfaaf7 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -5288,8 +5288,8 @@ ReleaseDummy(HeapTuple tuple)
* unique for this query. (Caution: this should be trusted for
* statistical purposes only, since we do not check indimmediate nor
* verify that the exact same definition of equality applies.)
- * acl_ok: true if current user has permission to read the column(s)
- * underlying the pg_statistic entry. This is consulted by
+ * acl_ok: true if current user has permission to read all table rows from
+ * the column(s) underlying the pg_statistic entry. This is consulted by
* statistic_proc_security_check().
*
* Caller is responsible for doing ReleaseVariableStats() before exiting.
@@ -5408,7 +5408,6 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
*/
ListCell *ilist;
ListCell *slist;
- Oid userid;
/*
* The nullingrels bits within the expression could prevent us from
@@ -5418,17 +5417,6 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (bms_overlap(varnos, root->outer_join_rels))
node = remove_nulling_relids(node, root->outer_join_rels, NULL);
- /*
- * Determine the user ID to use for privilege checks: either
- * onerel->userid if it's set (e.g., in case we're accessing the table
- * via a view), or the current user otherwise.
- *
- * If we drill down to child relations, we keep using the same userid:
- * it's going to be the same anyway, due to how we set up the relation
- * tree (q.v. build_simple_rel).
- */
- userid = OidIsValid(onerel->userid) ? onerel->userid : GetUserId();
-
foreach(ilist, onerel->indexlist)
{
IndexOptInfo *index = (IndexOptInfo *) lfirst(ilist);
@@ -5496,69 +5484,32 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (HeapTupleIsValid(vardata->statsTuple))
{
- /* Get index's table for permission check */
- RangeTblEntry *rte;
-
- rte = planner_rt_fetch(index->rel->relid, root);
- Assert(rte->rtekind == RTE_RELATION);
-
/*
+ * Test if user has permission to access all
+ * rows from the index's table.
+ *
* For simplicity, we insist on the whole
* table being selectable, rather than trying
* to identify which column(s) the index
- * depends on. Also require all rows to be
- * selectable --- there must be no
- * securityQuals from security barrier views
- * or RLS policies.
+ * depends on.
+ *
+ * Note that for an inheritance child,
+ * permissions are checked on the inheritance
+ * root parent, and whole-table select
+ * privilege on the parent doesn't quite
+ * guarantee that the user could read all
+ * columns of the child. But in practice it's
+ * unlikely that any interesting security
+ * violation could result from allowing access
+ * to the expression index's stats, so we
+ * allow it anyway. See similar code in
+ * examine_simple_variable() for additional
+ * comments.
*/
vardata->acl_ok =
- rte->securityQuals == NIL &&
- (pg_class_aclcheck(rte->relid, userid,
- ACL_SELECT) == ACLCHECK_OK);
-
- /*
- * If the user doesn't have permissions to
- * access an inheritance child relation, check
- * the permissions of the table actually
- * mentioned in the query, since most likely
- * the user does have that permission. Note
- * that whole-table select privilege on the
- * parent doesn't quite guarantee that the
- * user could read all columns of the child.
- * But in practice it's unlikely that any
- * interesting security violation could result
- * from allowing access to the expression
- * index's stats, so we allow it anyway. See
- * similar code in examine_simple_variable()
- * for additional comments.
- */
- if (!vardata->acl_ok &&
- root->append_rel_array != NULL)
- {
- AppendRelInfo *appinfo;
- Index varno = index->rel->relid;
-
- appinfo = root->append_rel_array[varno];
- while (appinfo &&
- planner_rt_fetch(appinfo->parent_relid,
- root)->rtekind == RTE_RELATION)
- {
- varno = appinfo->parent_relid;
- appinfo = root->append_rel_array[varno];
- }
- if (varno != index->rel->relid)
- {
- /* Repeat access check on this rel */
- rte = planner_rt_fetch(varno, root);
- Assert(rte->rtekind == RTE_RELATION);
-
- vardata->acl_ok =
- rte->securityQuals == NIL &&
- (pg_class_aclcheck(rte->relid,
- userid,
- ACL_SELECT) == ACLCHECK_OK);
- }
- }
+ all_rows_selectable(root,
+ index->rel->relid,
+ NULL);
}
else
{
@@ -5628,58 +5579,26 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
vardata->freefunc = ReleaseDummy;
/*
+ * Test if user has permission to access all rows from the
+ * table.
+ *
* For simplicity, we insist on the whole table being
* selectable, rather than trying to identify which
- * column(s) the statistics object depends on. Also
- * require all rows to be selectable --- there must be no
- * securityQuals from security barrier views or RLS
- * policies.
+ * column(s) the statistics object depends on.
+ *
+ * Note that for an inheritance child, permissions are
+ * checked on the inheritance root parent, and whole-table
+ * select privilege on the parent doesn't quite guarantee
+ * that the user could read all columns of the child. But
+ * in practice it's unlikely that any interesting security
+ * violation could result from allowing access to the
+ * expression stats, so we allow it anyway. See similar
+ * code in examine_simple_variable() for additional
+ * comments.
*/
- vardata->acl_ok =
- rte->securityQuals == NIL &&
- (pg_class_aclcheck(rte->relid, userid,
- ACL_SELECT) == ACLCHECK_OK);
-
- /*
- * If the user doesn't have permissions to access an
- * inheritance child relation, check the permissions of
- * the table actually mentioned in the query, since most
- * likely the user does have that permission. Note that
- * whole-table select privilege on the parent doesn't
- * quite guarantee that the user could read all columns of
- * the child. But in practice it's unlikely that any
- * interesting security violation could result from
- * allowing access to the expression stats, so we allow it
- * anyway. See similar code in examine_simple_variable()
- * for additional comments.
- */
- if (!vardata->acl_ok &&
- root->append_rel_array != NULL)
- {
- AppendRelInfo *appinfo;
- Index varno = onerel->relid;
-
- appinfo = root->append_rel_array[varno];
- while (appinfo &&
- planner_rt_fetch(appinfo->parent_relid,
- root)->rtekind == RTE_RELATION)
- {
- varno = appinfo->parent_relid;
- appinfo = root->append_rel_array[varno];
- }
- if (varno != onerel->relid)
- {
- /* Repeat access check on this rel */
- rte = planner_rt_fetch(varno, root);
- Assert(rte->rtekind == RTE_RELATION);
-
- vardata->acl_ok =
- rte->securityQuals == NIL &&
- (pg_class_aclcheck(rte->relid,
- userid,
- ACL_SELECT) == ACLCHECK_OK);
- }
- }
+ vardata->acl_ok = all_rows_selectable(root,
+ onerel->relid,
+ NULL);
break;
}
@@ -5734,109 +5653,20 @@ examine_simple_variable(PlannerInfo *root, Var *var,
if (HeapTupleIsValid(vardata->statsTuple))
{
- RelOptInfo *onerel = find_base_rel_noerr(root, var->varno);
- Oid userid;
-
/*
- * Check if user has permission to read this column. We require
- * all rows to be accessible, so there must be no securityQuals
- * from security barrier views or RLS policies.
+ * Test if user has permission to read all rows from this column.
*
- * Normally the Var will have an associated RelOptInfo from which
- * we can find out which userid to do the check as; but it might
- * not if it's a RETURNING Var for an INSERT target relation. In
- * that case use the RTEPermissionInfo associated with the RTE.
+ * This requires that the user has the appropriate SELECT
+ * privileges and that there are no securityQuals from security
+ * barrier views or RLS policies. If that's not the case, then we
+ * only permit leakproof functions to be passed pg_statistic data
+ * in vardata, otherwise the functions might reveal data that the
+ * user doesn't have permission to see --- see
+ * statistic_proc_security_check().
*/
- if (onerel)
- userid = onerel->userid;
- else
- {
- RTEPermissionInfo *perminfo;
-
- perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
- userid = perminfo->checkAsUser;
- }
- if (!OidIsValid(userid))
- userid = GetUserId();
-
vardata->acl_ok =
- rte->securityQuals == NIL &&
- ((pg_class_aclcheck(rte->relid, userid,
- ACL_SELECT) == ACLCHECK_OK) ||
- (pg_attribute_aclcheck(rte->relid, var->varattno, userid,
- ACL_SELECT) == ACLCHECK_OK));
-
- /*
- * If the user doesn't have permissions to access an inheritance
- * child relation or specifically this attribute, check the
- * permissions of the table/column actually mentioned in the
- * query, since most likely the user does have that permission
- * (else the query will fail at runtime), and if the user can read
- * the column there then he can get the values of the child table
- * too. To do that, we must find out which of the root parent's
- * attributes the child relation's attribute corresponds to.
- */
- if (!vardata->acl_ok && var->varattno > 0 &&
- root->append_rel_array != NULL)
- {
- AppendRelInfo *appinfo;
- Index varno = var->varno;
- int varattno = var->varattno;
- bool found = false;
-
- appinfo = root->append_rel_array[varno];
-
- /*
- * Partitions are mapped to their immediate parent, not the
- * root parent, so must be ready to walk up multiple
- * AppendRelInfos. But stop if we hit a parent that is not
- * RTE_RELATION --- that's a flattened UNION ALL subquery, not
- * an inheritance parent.
- */
- while (appinfo &&
- planner_rt_fetch(appinfo->parent_relid,
- root)->rtekind == RTE_RELATION)
- {
- int parent_varattno;
-
- found = false;
- if (varattno <= 0 || varattno > appinfo->num_child_cols)
- break; /* safety check */
- parent_varattno = appinfo->parent_colnos[varattno - 1];
- if (parent_varattno == 0)
- break; /* Var is local to child */
-
- varno = appinfo->parent_relid;
- varattno = parent_varattno;
- found = true;
-
- /* If the parent is itself a child, continue up. */
- appinfo = root->append_rel_array[varno];
- }
-
- /*
- * In rare cases, the Var may be local to the child table, in
- * which case, we've got to live with having no access to this
- * column's stats.
- */
- if (!found)
- return;
-
- /* Repeat the access check on this parent rel & column */
- rte = planner_rt_fetch(varno, root);
- Assert(rte->rtekind == RTE_RELATION);
-
- /*
- * Fine to use the same userid as it's the same in all
- * relations of a given inheritance tree.
- */
- vardata->acl_ok =
- rte->securityQuals == NIL &&
- ((pg_class_aclcheck(rte->relid, userid,
- ACL_SELECT) == ACLCHECK_OK) ||
- (pg_attribute_aclcheck(rte->relid, varattno, userid,
- ACL_SELECT) == ACLCHECK_OK));
- }
+ all_rows_selectable(root, var->varno,
+ bms_make_singleton(var->varattno - FirstLowInvalidHeapAttributeNumber));
}
else
{
@@ -6034,6 +5864,214 @@ examine_simple_variable(PlannerInfo *root, Var *var,
}
/*
+ * all_rows_selectable
+ * Test whether the user has permission to select all rows from a given
+ * relation.
+ *
+ * Inputs:
+ * root: the planner info
+ * varno: the index of the relation (assumed to be an RTE_RELATION)
+ * varattnos: the attributes for which permission is required, or NULL if
+ * whole-table access is required
+ *
+ * Returns true if the user has the required select permissions, and there are
+ * no securityQuals from security barrier views or RLS policies.
+ *
+ * Note that if the relation is an inheritance child relation, securityQuals
+ * and access permissions are checked against the inheritance root parent (the
+ * relation actually mentioned in the query) --- see the comments in
+ * expand_single_inheritance_child() for an explanation of why it has to be
+ * done this way.
+ *
+ * If varattnos is non-NULL, its attribute numbers should be offset by
+ * FirstLowInvalidHeapAttributeNumber so that system attributes can be
+ * checked. If varattnos is NULL, only table-level SELECT privileges are
+ * checked, not any column-level privileges.
+ *
+ * Note: if the relation is accessed via a view, this function actually tests
+ * whether the view owner has permission to select from the relation. To
+ * ensure that the current user has permission, it is also necessary to check
+ * that the current user has permission to select from the view, which we do
+ * at planner-startup --- see subquery_planner().
+ *
+ * This is exported so that other estimation functions can use it.
+ */
+bool
+all_rows_selectable(PlannerInfo *root, Index varno, Bitmapset *varattnos)
+{
+ RelOptInfo *rel = find_base_rel_noerr(root, varno);
+ RangeTblEntry *rte = planner_rt_fetch(varno, root);
+ Oid userid;
+ int varattno;
+
+ Assert(rte->rtekind == RTE_RELATION);
+
+ /*
+ * Determine the user ID to use for privilege checks (either the current
+ * user or the view owner, if we're accessing the table via a view).
+ *
+ * Normally the relation will have an associated RelOptInfo from which we
+ * can find the userid, but it might not if it's a RETURNING Var for an
+ * INSERT target relation. In that case use the RTEPermissionInfo
+ * associated with the RTE.
+ *
+ * If we navigate up to a parent relation, we keep using the same userid,
+ * since it's the same in all relations of a given inheritance tree.
+ */
+ if (rel)
+ userid = rel->userid;
+ else
+ {
+ RTEPermissionInfo *perminfo;
+
+ perminfo = getRTEPermissionInfo(root->parse->rteperminfos, rte);
+ userid = perminfo->checkAsUser;
+ }
+ if (!OidIsValid(userid))
+ userid = GetUserId();
+
+ /*
+ * Permissions and securityQuals must be checked on the table actually
+ * mentioned in the query, so if this is an inheritance child, navigate up
+ * to the inheritance root parent. If the user can read the whole table
+ * or the required columns there, then they can read from the child table
+ * too. For per-column checks, we must find out which of the root
+ * parent's attributes the child relation's attributes correspond to.
+ */
+ if (root->append_rel_array != NULL)
+ {
+ AppendRelInfo *appinfo;
+
+ appinfo = root->append_rel_array[varno];
+
+ /*
+ * Partitions are mapped to their immediate parent, not the root
+ * parent, so must be ready to walk up multiple AppendRelInfos. But
+ * stop if we hit a parent that is not RTE_RELATION --- that's a
+ * flattened UNION ALL subquery, not an inheritance parent.
+ */
+ while (appinfo &&
+ planner_rt_fetch(appinfo->parent_relid,
+ root)->rtekind == RTE_RELATION)
+ {
+ Bitmapset *parent_varattnos = NULL;
+
+ /*
+ * For each child attribute, find the corresponding parent
+ * attribute. In rare cases, the attribute may be local to the
+ * child table, in which case, we've got to live with having no
+ * access to this column.
+ */
+ varattno = -1;
+ while ((varattno = bms_next_member(varattnos, varattno)) >= 0)
+ {
+ AttrNumber attno;
+ AttrNumber parent_attno;
+
+ attno = varattno + FirstLowInvalidHeapAttributeNumber;
+
+ if (attno == InvalidAttrNumber)
+ {
+ /*
+ * Whole-row reference, so must map each column of the
+ * child to the parent table.
+ */
+ for (attno = 1; attno <= appinfo->num_child_cols; attno++)
+ {
+ parent_attno = appinfo->parent_colnos[attno - 1];
+ if (parent_attno == 0)
+ return false; /* attr is local to child */
+ parent_varattnos =
+ bms_add_member(parent_varattnos,
+ parent_attno - FirstLowInvalidHeapAttributeNumber);
+ }
+ }
+ else
+ {
+ if (attno < 0)
+ {
+ /* System attnos are the same in all tables */
+ parent_attno = attno;
+ }
+ else
+ {
+ if (attno > appinfo->num_child_cols)
+ return false; /* safety check */
+ parent_attno = appinfo->parent_colnos[attno - 1];
+ if (parent_attno == 0)
+ return false; /* attr is local to child */
+ }
+ parent_varattnos =
+ bms_add_member(parent_varattnos,
+ parent_attno - FirstLowInvalidHeapAttributeNumber);
+ }
+ }
+
+ /* If the parent is itself a child, continue up */
+ varno = appinfo->parent_relid;
+ varattnos = parent_varattnos;
+ appinfo = root->append_rel_array[varno];
+ }
+
+ /* Perform the access check on this parent rel */
+ rte = planner_rt_fetch(varno, root);
+ Assert(rte->rtekind == RTE_RELATION);
+ }
+
+ /*
+ * For all rows to be accessible, there must be no securityQuals from
+ * security barrier views or RLS policies.
+ */
+ if (rte->securityQuals != NIL)
+ return false;
+
+ /*
+ * Test for table-level SELECT privilege.
+ *
+ * If varattnos is non-NULL, this is sufficient to give access to all
+ * requested attributes, even for a child table, since we have verified
+ * that all required child columns have matching parent columns.
+ *
+ * If varattnos is NULL (whole-table access requested), this doesn't
+ * necessarily guarantee that the user can read all columns of a child
+ * table, but we allow it anyway (see comments in examine_variable()) and
+ * don't bother checking any column privileges.
+ */
+ if (pg_class_aclcheck(rte->relid, userid, ACL_SELECT) == ACLCHECK_OK)
+ return true;
+
+ if (varattnos == NULL)
+ return false; /* whole-table access requested */
+
+ /*
+ * Don't have table-level SELECT privilege, so check per-column
+ * privileges.
+ */
+ varattno = -1;
+ while ((varattno = bms_next_member(varattnos, varattno)) >= 0)
+ {
+ AttrNumber attno = varattno + FirstLowInvalidHeapAttributeNumber;
+
+ if (attno == InvalidAttrNumber)
+ {
+ /* Whole-row reference, so must have access to all columns */
+ if (pg_attribute_aclcheck_all(rte->relid, userid, ACL_SELECT,
+ ACLMASK_ALL) != ACLCHECK_OK)
+ return false;
+ }
+ else
+ {
+ if (pg_attribute_aclcheck(rte->relid, attno, userid,
+ ACL_SELECT) != ACLCHECK_OK)
+ return false;
+ }
+ }
+
+ /* If we reach here, have all required column privileges */
+ return true;
+}
+
+/*
* examine_indexcol_variable
* Try to look up statistical data about an index column/expression.
* Fill in a VariableStatData struct to describe the column.
@@ -6121,15 +6159,17 @@ examine_indexcol_variable(PlannerInfo *root, IndexOptInfo *index,
/*
* Check whether it is permitted to call func_oid passing some of the
- * pg_statistic data in vardata. We allow this either if the user has SELECT
- * privileges on the table or column underlying the pg_statistic data or if
- * the function is marked leakproof.
+ * pg_statistic data in vardata. We allow this if either of the following
+ * conditions is met: (1) the user has SELECT privileges on the table or
+ * column underlying the pg_statistic data and there are no securityQuals from
+ * security barrier views or RLS policies, or (2) the function is marked
+ * leakproof.
*/
bool
statistic_proc_security_check(VariableStatData *vardata, Oid func_oid)
{
if (vardata->acl_ok)
- return true;
+ return true; /* have SELECT privs and no securityQuals */
if (!OidIsValid(func_oid))
return false;
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 25cff56c3d0..e640b48205b 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -4954,7 +4954,7 @@ timestamptz_trunc_internal(text *units, TimestampTz timestamp, pg_tz *tzp)
case DTK_SECOND:
case DTK_MILLISEC:
case DTK_MICROSEC:
- PG_RETURN_TIMESTAMPTZ(timestamp);
+ return timestamp;
break;
default:
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index ffae8c23abf..11b442a5941 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -408,13 +408,12 @@ text_length(Datum str)
{
/* fastpath when max encoding length is one */
if (pg_database_encoding_max_length() == 1)
- PG_RETURN_INT32(toast_raw_datum_size(str) - VARHDRSZ);
+ return (toast_raw_datum_size(str) - VARHDRSZ);
else
{
text *t = DatumGetTextPP(str);
- PG_RETURN_INT32(pg_mbstrlen_with_len(VARDATA_ANY(t),
- VARSIZE_ANY_EXHDR(t)));
+ return (pg_mbstrlen_with_len(VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t)));
}
}
diff --git a/src/backend/utils/adt/waitfuncs.c b/src/backend/utils/adt/waitfuncs.c
index ddd0a57c0c5..f01cad72a0f 100644
--- a/src/backend/utils/adt/waitfuncs.c
+++ b/src/backend/utils/adt/waitfuncs.c
@@ -73,7 +73,7 @@ pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS)
* acquire heavyweight locks.
*/
blocking_pids_a =
- DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, blocked_pid));
+ DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, Int32GetDatum(blocked_pid)));
Assert(ARR_ELEMTYPE(blocking_pids_a) == INT4OID);
Assert(!array_contains_nulls(blocking_pids_a));
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 5c8360c08b5..45d1e2be007 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -86,7 +86,7 @@ relatt_cache_syshash(const void *key, Size keysize)
const AttoptCacheKey *ckey = key;
Assert(keysize == sizeof(*ckey));
- return GetSysCacheHashValue2(ATTNUM, ckey->attrelid, ckey->attnum);
+ return GetSysCacheHashValue2(ATTNUM, ObjectIdGetDatum(ckey->attrelid), Int32GetDatum(ckey->attnum));
}
/*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index c460a72b75d..032bb6222c4 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3817,7 +3817,7 @@ get_subscription_oid(const char *subname, bool missing_ok)
Oid oid;
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(subname));
+ ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(subname));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 559ba9cdb2c..6fe268a8eec 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -3184,7 +3184,7 @@ AssertPendingSyncs_RelationCache(void)
if ((LockTagType) locallock->tag.lock.locktag_type !=
LOCKTAG_RELATION)
continue;
- relid = ObjectIdGetDatum(locallock->tag.lock.locktag_field2);
+ relid = locallock->tag.lock.locktag_field2;
r = RelationIdGetRelation(relid);
if (!RelationIsValid(r))
continue;
@@ -6991,5 +6991,5 @@ ResOwnerReleaseRelation(Datum res)
Assert(rel->rd_refcnt > 0);
rel->rd_refcnt -= 1;
- RelationCloseCleanup((Relation) res);
+ RelationCloseCleanup((Relation) DatumGetPointer(res));
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index f944453a1d8..7828bdcba8f 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -459,9 +459,9 @@ GetSysCacheOid(int cacheId,
tuple = SearchSysCache(cacheId, key1, key2, key3, key4);
if (!HeapTupleIsValid(tuple))
return InvalidOid;
- result = heap_getattr(tuple, oidcol,
- SysCache[cacheId]->cc_tupdesc,
- &isNull);
+ result = DatumGetObjectId(heap_getattr(tuple, oidcol,
+ SysCache[cacheId]->cc_tupdesc,
+ &isNull));
Assert(!isNull); /* columns used as oids should never be NULL */
ReleaseSysCache(tuple);
return result;
diff --git a/src/backend/utils/error/csvlog.c b/src/backend/utils/error/csvlog.c
index fdac3c048e3..c3159ed7d97 100644
--- a/src/backend/utils/error/csvlog.c
+++ b/src/backend/utils/error/csvlog.c
@@ -120,7 +120,7 @@ write_csvlog(ErrorData *edata)
appendStringInfoChar(&buf, ',');
/* session id */
- appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid);
+ appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid);
appendStringInfoChar(&buf, ',');
/* Line number */
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index afce1a8e1f0..b7b9692f8c8 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -2959,12 +2959,12 @@ log_status_format(StringInfo buf, const char *format, ErrorData *edata)
{
char strfbuf[128];
- snprintf(strfbuf, sizeof(strfbuf) - 1, INT64_HEX_FORMAT ".%x",
+ snprintf(strfbuf, sizeof(strfbuf) - 1, "%" PRIx64 ".%x",
MyStartTime, MyProcPid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
- appendStringInfo(buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid);
+ appendStringInfo(buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid);
break;
case 'p':
if (padding != 0)
diff --git a/src/backend/utils/error/jsonlog.c b/src/backend/utils/error/jsonlog.c
index 519eacf17f8..2619f499042 100644
--- a/src/backend/utils/error/jsonlog.c
+++ b/src/backend/utils/error/jsonlog.c
@@ -168,7 +168,7 @@ write_jsonlog(ErrorData *edata)
}
/* Session id */
- appendJSONKeyValueFmt(&buf, "session_id", true, INT64_HEX_FORMAT ".%x",
+ appendJSONKeyValueFmt(&buf, "session_id", true, "%" PRIx64 ".%x",
MyStartTime, MyProcPid);
/* Line number */
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index e0f500b9aa2..f582c6624f1 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -57,7 +57,7 @@ comparison_shim(Datum x, Datum y, SortSupport ssup)
if (extra->fcinfo.isnull)
elog(ERROR, "function %u returned NULL", extra->flinfo.fn_oid);
- return result;
+ return DatumGetInt32(result);
}
/*
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index 5f70e8dddac..c5d18e46716 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -865,7 +865,7 @@ tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
memcpy(&bstup->tuple, tuple, size);
stup.tuple = bstup;
- stup.datum1 = tuple->bt_blkno;
+ stup.datum1 = UInt32GetDatum(tuple->bt_blkno);
stup.isnull1 = false;
/* GetMemoryChunkSpace is not supported for bump contexts */
@@ -1836,7 +1836,7 @@ removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
BrinSortTuple *tuple;
tuple = stups[i].tuple;
- stups[i].datum1 = tuple->tuple.bt_blkno;
+ stups[i].datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
}
}
@@ -1893,7 +1893,7 @@ readtup_index_brin(Tuplesortstate *state, SortTuple *stup,
stup->tuple = tuple;
/* set up first-column key value, which is block number */
- stup->datum1 = tuple->tuple.bt_blkno;
+ stup->datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
}
/*
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 55621f35fb6..0a3ca4315de 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -35,6 +35,7 @@
#include "fe_utils/option_utils.h"
#include "fe_utils/recovery_gen.h"
#include "getopt_long.h"
+#include "libpq/protocol.h"
#include "receivelog.h"
#include "streamutil.h"
@@ -1338,7 +1339,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
/* Each CopyData message begins with a type byte. */
switch (GetCopyDataByte(r, copybuf, &cursor))
{
- case 'n':
+ case PqBackupMsg_NewArchive:
{
/* New archive. */
char *archive_name;
@@ -1410,7 +1411,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'd':
+ case PqMsg_CopyData:
{
/* Archive or manifest data. */
if (state->manifest_buffer != NULL)
@@ -1446,7 +1447,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'p':
+ case PqBackupMsg_ProgressReport:
{
/*
* Progress report.
@@ -1465,7 +1466,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'm':
+ case PqBackupMsg_Manifest:
{
/*
* Manifest data will be sent next. This message is not
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index 0e9d2e23947..7a4d1a2d2ca 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -24,6 +24,7 @@
#include "getopt_long.h"
#include "libpq-fe.h"
#include "libpq/pqsignal.h"
+#include "libpq/protocol.h"
#include "pqexpbuffer.h"
#include "streamutil.h"
@@ -149,7 +150,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
LSN_FORMAT_ARGS(output_fsync_lsn),
replication_slot);
- replybuf[len] = 'r';
+ replybuf[len] = PqReplMsg_StandbyStatusUpdate;
len += 1;
fe_sendint64(output_written_lsn, &replybuf[len]); /* write */
len += 8;
@@ -454,7 +455,7 @@ StreamLogicalLog(void)
}
/* Check the message type. */
- if (copybuf[0] == 'k')
+ if (copybuf[0] == PqReplMsg_Keepalive)
{
int pos;
bool replyRequested;
@@ -466,7 +467,7 @@ StreamLogicalLog(void)
* We just check if the server requested a reply, and ignore the
* rest.
*/
- pos = 1; /* skip msgtype 'k' */
+ pos = 1; /* skip msgtype PqReplMsg_Keepalive */
walEnd = fe_recvint64(&copybuf[pos]);
output_written_lsn = Max(walEnd, output_written_lsn);
@@ -509,7 +510,7 @@ StreamLogicalLog(void)
continue;
}
- else if (copybuf[0] != 'w')
+ else if (copybuf[0] != PqReplMsg_WALData)
{
pg_log_error("unrecognized streaming header: \"%c\"",
copybuf[0]);
@@ -521,7 +522,7 @@ StreamLogicalLog(void)
* message. We only need the WAL location field (dataStart), the rest
* of the header is ignored.
*/
- hdr_len = 1; /* msgtype 'w' */
+ hdr_len = 1; /* msgtype PqReplMsg_WALData */
hdr_len += 8; /* dataStart */
hdr_len += 8; /* walEnd */
hdr_len += 8; /* sendTime */
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index f2b54d3c501..25b13c7f55c 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -21,6 +21,7 @@
#include "access/xlog_internal.h"
#include "common/logging.h"
#include "libpq-fe.h"
+#include "libpq/protocol.h"
#include "receivelog.h"
#include "streamutil.h"
@@ -338,7 +339,7 @@ sendFeedback(PGconn *conn, XLogRecPtr blockpos, TimestampTz now, bool replyReque
char replybuf[1 + 8 + 8 + 8 + 8 + 1];
int len = 0;
- replybuf[len] = 'r';
+ replybuf[len] = PqReplMsg_StandbyStatusUpdate;
len += 1;
fe_sendint64(blockpos, &replybuf[len]); /* write */
len += 8;
@@ -823,13 +824,13 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream,
}
/* Check the message type. */
- if (copybuf[0] == 'k')
+ if (copybuf[0] == PqReplMsg_Keepalive)
{
if (!ProcessKeepaliveMsg(conn, stream, copybuf, r, blockpos,
&last_status))
goto error;
}
- else if (copybuf[0] == 'w')
+ else if (copybuf[0] == PqReplMsg_WALData)
{
if (!ProcessWALDataMsg(conn, stream, copybuf, r, &blockpos))
goto error;
@@ -1001,7 +1002,7 @@ ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len,
* Parse the keepalive message, enclosed in the CopyData message. We just
* check if the server requested a reply, and ignore the rest.
*/
- pos = 1; /* skip msgtype 'k' */
+ pos = 1; /* skip msgtype PqReplMsg_Keepalive */
pos += 8; /* skip walEnd */
pos += 8; /* skip sendTime */
@@ -1064,7 +1065,7 @@ ProcessWALDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len,
* message. We only need the WAL location field (dataStart), the rest of
* the header is ignored.
*/
- hdr_len = 1; /* msgtype 'w' */
+ hdr_len = 1; /* msgtype PqReplMsg_WALData */
hdr_len += 8; /* dataStart */
hdr_len += 8; /* walEnd */
hdr_len += 8; /* sendTime */
diff --git a/src/bin/pg_combinebackup/t/002_compare_backups.pl b/src/bin/pg_combinebackup/t/002_compare_backups.pl
index 2c7ca89b92f..9c7fe56cb7c 100644
--- a/src/bin/pg_combinebackup/t/002_compare_backups.pl
+++ b/src/bin/pg_combinebackup/t/002_compare_backups.pl
@@ -174,6 +174,7 @@ my $dump2 = $backupdir . '/pitr2.dump';
$pitr1->command_ok(
[
'pg_dumpall',
+ '--restrict-key=test',
'--no-sync',
'--no-unlogged-table-data',
'--file' => $dump1,
@@ -183,6 +184,7 @@ $pitr1->command_ok(
$pitr2->command_ok(
[
'pg_dumpall',
+ '--restrict-key=test',
'--no-sync',
'--no-unlogged-table-data',
'--file' => $dump2,
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index 73ce34346b2..8945bdd42c5 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -21,6 +21,7 @@
#include "dumputils.h"
#include "fe_utils/string_utils.h"
+static const char restrict_chars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
static bool parseAclItem(const char *item, const char *type,
const char *name, const char *subname, int remoteVersion,
@@ -32,6 +33,43 @@ static void AddAcl(PQExpBuffer aclbuf, const char *keyword,
/*
+ * Sanitize a string to be included in an SQL comment or TOC listing, by
+ * replacing any newlines with spaces. This ensures each logical output line
+ * is in fact one physical output line, to prevent corruption of the dump
+ * (which could, in the worst case, present an SQL injection vulnerability
+ * if someone were to incautiously load a dump containing objects with
+ * maliciously crafted names).
+ *
+ * The result is a freshly malloc'd string. If the input string is NULL,
+ * return a malloc'ed empty string, unless want_hyphen, in which case return a
+ * malloc'ed hyphen.
+ *
+ * Note that we currently don't bother to quote names, meaning that the name
+ * fields aren't automatically parseable. "pg_restore -L" doesn't care because
+ * it only examines the dumpId field, but someday we might want to try harder.
+ */
+char *
+sanitize_line(const char *str, bool want_hyphen)
+{
+ char *result;
+ char *s;
+
+ if (!str)
+ return pg_strdup(want_hyphen ? "-" : "");
+
+ result = pg_strdup(str);
+
+ for (s = result; *s != '\0'; s++)
+ {
+ if (*s == '\n' || *s == '\r')
+ *s = ' ';
+ }
+
+ return result;
+}
+
+
+/*
* Build GRANT/REVOKE command(s) for an object.
*
* name: the object name, in the form to use in the commands (already quoted)
@@ -920,3 +958,40 @@ create_or_open_dir(const char *dirname)
pg_fatal("directory \"%s\" is not empty", dirname);
}
}
+
+/*
+ * Generates a valid restrict key (i.e., an alphanumeric string) for use with
+ * psql's \restrict and \unrestrict meta-commands. For safety, the value is
+ * chosen at random.
+ */
+char *
+generate_restrict_key(void)
+{
+ uint8 buf[64];
+ char *ret = palloc(sizeof(buf));
+
+ if (!pg_strong_random(buf, sizeof(buf)))
+ return NULL;
+
+ for (int i = 0; i < sizeof(buf) - 1; i++)
+ {
+ uint8 idx = buf[i] % strlen(restrict_chars);
+
+ ret[i] = restrict_chars[idx];
+ }
+ ret[sizeof(buf) - 1] = '\0';
+
+ return ret;
+}
+
+/*
+ * Checks that a given restrict key (intended for use with psql's \restrict and
+ * \unrestrict meta-commands) contains only alphanumeric characters.
+ */
+bool
+valid_restrict_key(const char *restrict_key)
+{
+ return restrict_key != NULL &&
+ restrict_key[0] != '\0' &&
+ strspn(restrict_key, restrict_chars) == strlen(restrict_key);
+}
diff --git a/src/bin/pg_dump/dumputils.h b/src/bin/pg_dump/dumputils.h
index 91c6e612e28..10f6e27c0a0 100644
--- a/src/bin/pg_dump/dumputils.h
+++ b/src/bin/pg_dump/dumputils.h
@@ -36,6 +36,7 @@
#endif
+extern char *sanitize_line(const char *str, bool want_hyphen);
extern bool buildACLCommands(const char *name, const char *subname, const char *nspname,
const char *type, const char *acls, const char *baseacls,
const char *owner, const char *prefix, int remoteVersion,
@@ -64,4 +65,7 @@ extern void makeAlterConfigCommand(PGconn *conn, const char *configitem,
PQExpBuffer buf);
extern void create_or_open_dir(const char *dirname);
+extern char *generate_restrict_key(void);
+extern bool valid_restrict_key(const char *restrict_key);
+
#endif /* DUMPUTILS_H */
diff --git a/src/bin/pg_dump/filter.c b/src/bin/pg_dump/filter.c
index 7214d514137..e3cdcf40975 100644
--- a/src/bin/pg_dump/filter.c
+++ b/src/bin/pg_dump/filter.c
@@ -171,9 +171,8 @@ pg_log_filter_error(FilterStateData *fstate, const char *fmt,...)
/*
* filter_get_keyword - read the next filter keyword from buffer
*
- * Search for keywords (limited to ascii alphabetic characters) in
- * the passed in line buffer. Returns NULL when the buffer is empty or the first
- * char is not alpha. The char '_' is allowed, except as the first character.
+ * Search for keywords (strings of non-whitespace characters) in the passed
+ * in line buffer. Returns NULL when the buffer is empty or no keyword exists.
* The length of the found keyword is returned in the size parameter.
*/
static const char *
@@ -182,6 +181,9 @@ filter_get_keyword(const char **line, int *size)
const char *ptr = *line;
const char *result = NULL;
+ /* The passed buffer must not be NULL */
+ Assert(*line != NULL);
+
/* Set returned length preemptively in case no keyword is found */
*size = 0;
@@ -189,11 +191,12 @@ filter_get_keyword(const char **line, int *size)
while (isspace((unsigned char) *ptr))
ptr++;
- if (isalpha((unsigned char) *ptr))
+ /* Grab one keyword that's the string of non-whitespace characters */
+ if (*ptr != '\0' && !isspace((unsigned char) *ptr))
{
result = ptr++;
- while (isalpha((unsigned char) *ptr) || *ptr == '_')
+ while (*ptr != '\0' && !isspace((unsigned char) *ptr))
ptr++;
*size = ptr - result;
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index 4ebef1e8644..d9041dad720 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -163,6 +163,8 @@ typedef struct _restoreOptions
bool dumpSchema;
bool dumpData;
bool dumpStatistics;
+
+ char *restrict_key;
} RestoreOptions;
typedef struct _dumpOptions
@@ -213,6 +215,8 @@ typedef struct _dumpOptions
bool dumpSchema;
bool dumpData;
bool dumpStatistics;
+
+ char *restrict_key;
} DumpOptions;
/*
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index dce88f040ac..3c3acbaccdb 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -59,7 +59,6 @@ static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
DataDirSyncMethod sync_method);
static void _getObjectDescription(PQExpBuffer buf, const TocEntry *te);
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx);
-static char *sanitize_line(const char *str, bool want_hyphen);
static void _doSetFixedOutputState(ArchiveHandle *AH);
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
static void _reconnectToDB(ArchiveHandle *AH, const char *dbname);
@@ -198,6 +197,7 @@ dumpOptionsFromRestoreOptions(RestoreOptions *ropt)
dopt->include_everything = ropt->include_everything;
dopt->enable_row_security = ropt->enable_row_security;
dopt->sequence_data = ropt->sequence_data;
+ dopt->restrict_key = ropt->restrict_key ? pg_strdup(ropt->restrict_key) : NULL;
return dopt;
}
@@ -462,6 +462,17 @@ RestoreArchive(Archive *AHX)
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
+ /*
+ * If generating plain-text output, enter restricted mode to block any
+ * unexpected psql meta-commands. A malicious source might try to inject
+ * a variety of things via bogus responses to queries. While we cannot
+ * prevent such sources from affecting the destination at restore time, we
+ * can block psql meta-commands so that the client machine that runs psql
+ * with the dump output remains unaffected.
+ */
+ if (ropt->restrict_key)
+ ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
+
if (AH->archiveRemoteVersion)
ahprintf(AH, "-- Dumped from database version %s\n",
AH->archiveRemoteVersion);
@@ -803,6 +814,14 @@ RestoreArchive(Archive *AHX)
ahprintf(AH, "--\n-- PostgreSQL database dump complete\n--\n\n");
/*
+ * If generating plain-text output, exit restricted mode at the very end
+ * of the script. This is not pro forma; in particular, pg_dumpall
+ * requires this when transitioning from one database to another.
+ */
+ if (ropt->restrict_key)
+ ahprintf(AH, "\\unrestrict %s\n\n", ropt->restrict_key);
+
+ /*
* Clean up & we're done.
*/
AH->stage = STAGE_FINALIZING;
@@ -3453,11 +3472,21 @@ _reconnectToDB(ArchiveHandle *AH, const char *dbname)
else
{
PQExpBufferData connectbuf;
+ RestoreOptions *ropt = AH->public.ropt;
+
+ /*
+ * We must temporarily exit restricted mode for \connect, etc.
+ * Anything added between this line and the following \restrict must
+ * be careful to avoid any possible meta-command injection vectors.
+ */
+ ahprintf(AH, "\\unrestrict %s\n", ropt->restrict_key);
initPQExpBuffer(&connectbuf);
appendPsqlMetaConnect(&connectbuf, dbname);
- ahprintf(AH, "%s\n", connectbuf.data);
+ ahprintf(AH, "%s", connectbuf.data);
termPQExpBuffer(&connectbuf);
+
+ ahprintf(AH, "\\restrict %s\n\n", ropt->restrict_key);
}
/*
@@ -4051,42 +4080,6 @@ _printTocEntry(ArchiveHandle *AH, TocEntry *te, const char *pfx)
}
/*
- * Sanitize a string to be included in an SQL comment or TOC listing, by
- * replacing any newlines with spaces. This ensures each logical output line
- * is in fact one physical output line, to prevent corruption of the dump
- * (which could, in the worst case, present an SQL injection vulnerability
- * if someone were to incautiously load a dump containing objects with
- * maliciously crafted names).
- *
- * The result is a freshly malloc'd string. If the input string is NULL,
- * return a malloc'ed empty string, unless want_hyphen, in which case return a
- * malloc'ed hyphen.
- *
- * Note that we currently don't bother to quote names, meaning that the name
- * fields aren't automatically parseable. "pg_restore -L" doesn't care because
- * it only examines the dumpId field, but someday we might want to try harder.
- */
-static char *
-sanitize_line(const char *str, bool want_hyphen)
-{
- char *result;
- char *s;
-
- if (!str)
- return pg_strdup(want_hyphen ? "-" : "");
-
- result = pg_strdup(str);
-
- for (s = result; *s != '\0'; s++)
- {
- if (*s == '\n' || *s == '\r')
- *s = ' ';
- }
-
- return result;
-}
-
-/*
* Write the file header for a custom-format archive
*/
void
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index f3a353a61a5..fc7a6639163 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -537,6 +537,7 @@ main(int argc, char **argv)
{"filter", required_argument, NULL, 16},
{"exclude-extension", required_argument, NULL, 17},
{"sequence-data", no_argument, &dopt.sequence_data, 1},
+ {"restrict-key", required_argument, NULL, 25},
{NULL, 0, NULL, 0}
};
@@ -797,6 +798,10 @@ main(int argc, char **argv)
with_statistics = true;
break;
+ case 25:
+ dopt.restrict_key = pg_strdup(optarg);
+ break;
+
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -889,8 +894,22 @@ main(int argc, char **argv)
/* archiveFormat specific setup */
if (archiveFormat == archNull)
+ {
plainText = 1;
+ /*
+ * If you don't provide a restrict key, one will be appointed for you.
+ */
+ if (!dopt.restrict_key)
+ dopt.restrict_key = generate_restrict_key();
+ if (!dopt.restrict_key)
+ pg_fatal("could not generate restrict key");
+ if (!valid_restrict_key(dopt.restrict_key))
+ pg_fatal("invalid restrict key");
+ }
+ else if (dopt.restrict_key)
+ pg_fatal("option --restrict-key can only be used with --format=plain");
+
/*
* Custom and directory formats are compressed by default with gzip when
* available, not the others. If gzip is not available, no compression is
@@ -1229,6 +1248,7 @@ main(int argc, char **argv)
ropt->enable_row_security = dopt.enable_row_security;
ropt->sequence_data = dopt.sequence_data;
ropt->binary_upgrade = dopt.binary_upgrade;
+ ropt->restrict_key = dopt.restrict_key ? pg_strdup(dopt.restrict_key) : NULL;
ropt->compression_spec = compression_spec;
@@ -1340,6 +1360,7 @@ help(const char *progname)
printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
+ printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
printf(_(" --section=SECTION dump named section (pre-data, data, or post-data)\n"));
printf(_(" --sequence-data include sequence data in dump\n"));
@@ -2840,11 +2861,14 @@ dumpTableData(Archive *fout, const TableDataInfo *tdinfo)
forcePartitionRootLoad(tbinfo)))
{
TableInfo *parentTbinfo;
+ char *sanitized;
parentTbinfo = getRootTableInfo(tbinfo);
copyFrom = fmtQualifiedDumpable(parentTbinfo);
+ sanitized = sanitize_line(copyFrom, true);
printfPQExpBuffer(copyBuf, "-- load via partition root %s",
- copyFrom);
+ sanitized);
+ free(sanitized);
tdDefn = pg_strdup(copyBuf->data);
}
else
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 27aa1b65698..bb451c1bae1 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -122,6 +122,8 @@ static char *filename = NULL;
static SimpleStringList database_exclude_patterns = {NULL, NULL};
static SimpleStringList database_exclude_names = {NULL, NULL};
+static char *restrict_key;
+
int
main(int argc, char *argv[])
{
@@ -184,6 +186,7 @@ main(int argc, char *argv[])
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 8},
{"sequence-data", no_argument, &sequence_data, 1},
+ {"restrict-key", required_argument, NULL, 9},
{NULL, 0, NULL, 0}
};
@@ -371,6 +374,12 @@ main(int argc, char *argv[])
read_dumpall_filters(optarg, &database_exclude_patterns);
break;
+ case 9:
+ restrict_key = pg_strdup(optarg);
+ appendPQExpBufferStr(pgdumpopts, " --restrict-key ");
+ appendShellString(pgdumpopts, optarg);
+ break;
+
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -481,6 +490,16 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " --sequence-data");
/*
+ * If you don't provide a restrict key, one will be appointed for you.
+ */
+ if (!restrict_key)
+ restrict_key = generate_restrict_key();
+ if (!restrict_key)
+ pg_fatal("could not generate restrict key");
+ if (!valid_restrict_key(restrict_key))
+ pg_fatal("invalid restrict key");
+
+ /*
* If there was a database specified on the command line, use that,
* otherwise try to connect to database "postgres", and failing that
* "template1".
@@ -571,6 +590,16 @@ main(int argc, char *argv[])
dumpTimestamp("Started on");
/*
+ * Enter restricted mode to block any unexpected psql meta-commands. A
+ * malicious source might try to inject a variety of things via bogus
+ * responses to queries. While we cannot prevent such sources from
+ * affecting the destination at restore time, we can block psql
+ * meta-commands so that the client machine that runs psql with the dump
+ * output remains unaffected.
+ */
+ fprintf(OPF, "\\restrict %s\n\n", restrict_key);
+
+ /*
* We used to emit \connect postgres here, but that served no purpose
* other than to break things for installations without a postgres
* database. Everything we're restoring here is a global, so whichever
@@ -630,6 +659,12 @@ main(int argc, char *argv[])
dumpTablespaces(conn);
}
+ /*
+ * Exit restricted mode just before dumping the databases. pg_dump will
+ * handle entering restricted mode again as appropriate.
+ */
+ fprintf(OPF, "\\unrestrict %s\n\n", restrict_key);
+
if (!globals_only && !roles_only && !tablespaces_only)
dumpDatabases(conn);
@@ -702,6 +737,7 @@ help(void)
printf(_(" --no-unlogged-table-data do not dump unlogged table data\n"));
printf(_(" --on-conflict-do-nothing add ON CONFLICT DO NOTHING to INSERT commands\n"));
printf(_(" --quote-all-identifiers quote all identifiers, even if not key words\n"));
+ printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
printf(_(" --rows-per-insert=NROWS number of rows per INSERT; implies --inserts\n"));
printf(_(" --sequence-data include sequence data in dump\n"));
printf(_(" --statistics dump the statistics\n"));
@@ -1492,7 +1528,13 @@ dumpUserConfig(PGconn *conn, const char *username)
res = executeQuery(conn, buf->data);
if (PQntuples(res) > 0)
- fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username);
+ {
+ char *sanitized;
+
+ sanitized = sanitize_line(username, true);
+ fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", sanitized);
+ free(sanitized);
+ }
for (int i = 0; i < PQntuples(res); i++)
{
@@ -1594,6 +1636,7 @@ dumpDatabases(PGconn *conn)
for (i = 0; i < PQntuples(res); i++)
{
char *dbname = PQgetvalue(res, i, 0);
+ char *sanitized;
const char *create_opts;
int ret;
@@ -1610,7 +1653,9 @@ dumpDatabases(PGconn *conn)
pg_log_info("dumping database \"%s\"", dbname);
- fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
+ sanitized = sanitize_line(dbname, true);
+ fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", sanitized);
+ free(sanitized);
/*
* We assume that "template1" and "postgres" already exist in the
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index 6c129278bc5..c9776306c5c 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -45,6 +45,7 @@
#include <termios.h>
#endif
+#include "dumputils.h"
#include "fe_utils/option_utils.h"
#include "filter.h"
#include "getopt_long.h"
@@ -140,6 +141,7 @@ main(int argc, char **argv)
{"statistics", no_argument, &with_statistics, 1},
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 4},
+ {"restrict-key", required_argument, NULL, 6},
{NULL, 0, NULL, 0}
};
@@ -315,6 +317,10 @@ main(int argc, char **argv)
opts->exit_on_error = true;
break;
+ case 6:
+ opts->restrict_key = pg_strdup(optarg);
+ break;
+
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -350,8 +356,24 @@ main(int argc, char **argv)
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit_nicely(1);
}
+
+ if (opts->restrict_key)
+ pg_fatal("options -d/--dbname and --restrict-key cannot be used together");
+
opts->useDB = 1;
}
+ else
+ {
+ /*
+ * If you don't provide a restrict key, one will be appointed for you.
+ */
+ if (!opts->restrict_key)
+ opts->restrict_key = generate_restrict_key();
+ if (!opts->restrict_key)
+ pg_fatal("could not generate restrict key");
+ if (!valid_restrict_key(opts->restrict_key))
+ pg_fatal("invalid restrict key");
+ }
/* reject conflicting "-only" options */
if (data_only && schema_only)
@@ -546,6 +568,7 @@ usage(const char *progname)
printf(_(" --no-subscriptions do not restore subscriptions\n"));
printf(_(" --no-table-access-method do not restore table access methods\n"));
printf(_(" --no-tablespaces do not restore tablespace assignments\n"));
+ printf(_(" --restrict-key=RESTRICT_KEY use provided string as psql \\restrict key\n"));
printf(_(" --section=SECTION restore named section (pre-data, data, or post-data)\n"));
printf(_(" --statistics restore the statistics\n"));
printf(_(" --statistics-only restore only the statistics, not schema or data\n"));
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index a86b38466de..e7a2d64f741 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -881,6 +881,16 @@ my %full_runs = (
# This is where the actual tests are defined.
my %tests = (
+ 'restrict' => {
+ all_runs => 1,
+ regexp => qr/^\\restrict [a-zA-Z0-9]+$/m,
+ },
+
+ 'unrestrict' => {
+ all_runs => 1,
+ regexp => qr/^\\unrestrict [a-zA-Z0-9]+$/m,
+ },
+
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => {
create_order => 14,
create_sql => 'ALTER DEFAULT PRIVILEGES
@@ -2224,6 +2234,27 @@ my %tests = (
},
},
+ 'newline of role or table name in comment' => {
+ create_sql => qq{CREATE ROLE regress_newline;
+ ALTER ROLE regress_newline SET enable_seqscan = off;
+ ALTER ROLE regress_newline
+ RENAME TO "regress_newline\nattack";
+
+ -- meet getPartitioningInfo() "unsafe" condition
+ CREATE TYPE pp_colors AS
+ ENUM ('green', 'blue', 'black');
+ CREATE TABLE pp_enumpart (a pp_colors)
+ PARTITION BY HASH (a);
+ CREATE TABLE pp_enumpart1 PARTITION OF pp_enumpart
+ FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+ CREATE TABLE pp_enumpart2 PARTITION OF pp_enumpart
+ FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+ ALTER TABLE pp_enumpart
+ RENAME TO "pp_enumpart\nattack";},
+ regexp => qr/\n--[^\n]*\nattack/s,
+ like => {},
+ },
+
'CREATE TABLESPACE regress_dump_tablespace' => {
create_order => 2,
create_sql => q(
@@ -4218,7 +4249,6 @@ my %tests = (
},
'ALTER TABLE measurement PRIMARY KEY' => {
- all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 93,
create_sql =>
@@ -4270,7 +4300,6 @@ my %tests = (
},
'ALTER INDEX ... ATTACH PARTITION (primary key)' => {
- all_runs => 1,
catch_all => 'CREATE ... commands',
regexp => qr/^
\QALTER INDEX dump_test.measurement_pkey ATTACH PARTITION dump_test_second_schema.measurement_y2006m2_pkey\E
@@ -5403,9 +5432,10 @@ foreach my $run (sort keys %pgdump_runs)
# Check for proper test definitions
#
- # There should be a "like" list, even if it is empty. (This
- # makes the test more self-documenting.)
- if (!defined($tests{$test}->{like}))
+ # Either "all_runs" should be set or there should be a "like" list,
+ # even if it is empty. (This makes the test more self-documenting.)
+ if (!defined($tests{$test}->{all_runs})
+ && !defined($tests{$test}->{like}))
{
die "missing \"like\" in test \"$test\"";
}
@@ -5441,9 +5471,10 @@ foreach my $run (sort keys %pgdump_runs)
next;
}
- # Run the test listed as a like, unless it is specifically noted
- # as an unlike (generally due to an explicit exclusion or similar).
- if ($tests{$test}->{like}->{$test_key}
+ # Run the test if all_runs is set or if listed as a like, unless it is
+ # specifically noted as an unlike (generally due to an explicit
+ # exclusion or similar).
+ if (($tests{$test}->{like}->{$test_key} || $tests{$test}->{all_runs})
&& !defined($tests{$test}->{unlike}->{$test_key}))
{
if (!ok($output_file =~ $tests{$test}->{regexp},
diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
index 8dc014ed6ed..c83d3899dfb 100644
--- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl
+++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl
@@ -17,6 +17,22 @@ $node->init;
$node->start;
#########################################
+# pg_dumpall: newline in database name
+
+$node->safe_psql('postgres', qq{CREATE DATABASE "regress_\nattack"});
+
+my (@cmd, $stdout, $stderr);
+@cmd = ("pg_dumpall", '--port' => $port, '--exclude-database=postgres');
+print("# Running: " . join(" ", @cmd) . "\n");
+my $result = IPC::Run::run \@cmd, '>' => \$stdout, '2>' => \$stderr;
+ok(!$result, "newline in dbname: exit code not 0");
+like(
+ $stderr,
+ qr/shell command argument contains a newline/,
+ "newline in dbname: stderr matches");
+unlike($stdout, qr/^attack/m, "newline in dbname: no comment escape");
+
+#########################################
# Verify that dumping foreign data includes only foreign tables of
# matching servers
diff --git a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
index f05e8a20e05..5c69ec31c39 100644
--- a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
+++ b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
@@ -418,10 +418,16 @@ command_fails_like(
qr/invalid filter command/,
"invalid syntax: incorrect filter command");
-# Test invalid object type
+# Test invalid object type.
+#
+# This test also verifies that keywords are correctly recognized as strings of
+# non-whitespace characters. If the parser incorrectly treats non-whitespace
+# delimiters (like hyphens) as keyword boundaries, "table-data" might be
+# misread as the valid object type "table". To catch such issues,
+# "table-data" is used here as an intentionally invalid object type.
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
-print $inputfile "include xxx";
+print $inputfile "exclude table-data one";
close $inputfile;
command_fails_like(
@@ -432,8 +438,8 @@ command_fails_like(
'--filter' => "$tempdir/inputfile.txt",
'postgres'
],
- qr/unsupported filter object type: "xxx"/,
- "invalid syntax: invalid object type specified, should be table, schema, foreign_data or data"
+ qr/unsupported filter object type: "table-data"/,
+ "invalid syntax: invalid object type specified"
);
# Test missing object identifier pattern
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 310f53c5577..67eedbae265 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -1713,7 +1713,7 @@ check_for_not_null_inheritance(ClusterInfo *cluster)
"If the parent column(s) are NOT NULL, then the child column must\n"
"also be marked NOT NULL, or the upgrade will fail.\n"
"You can fix this by running\n"
- " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
+ " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
"on each column listed in the file:\n"
" %s", report.path);
}
diff --git a/src/bin/pg_upgrade/t/002_pg_upgrade.pl b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
index 0b15e38297e..4b5e895809b 100644
--- a/src/bin/pg_upgrade/t/002_pg_upgrade.pl
+++ b/src/bin/pg_upgrade/t/002_pg_upgrade.pl
@@ -86,6 +86,7 @@ sub get_dump_for_comparison
$node->run_log(
[
'pg_dump', '--no-sync',
+ '--restrict-key=test',
'-d' => $node->connstr($db),
'-f' => $dumpfile
]);
@@ -427,6 +428,7 @@ SKIP:
# that we need to use pg_dumpall from the new node here.
my @dump_command = (
'pg_dumpall', '--no-sync',
+ '--restrict-key=test',
'--dbname' => $oldnode->connstr('postgres'),
'--file' => $dump1_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
@@ -624,6 +626,7 @@ is( $result,
# Second dump from the upgraded instance.
@dump_command = (
'pg_dumpall', '--no-sync',
+ '--restrict-key=test',
'--dbname' => $newnode->connstr('postgres'),
'--file' => $dump2_file);
# --extra-float-digits is needed when upgrading from a version older than 11.
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 0e00d73487c..cc602087db2 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -130,6 +130,8 @@ static backslashResult exec_command_pset(PsqlScanState scan_state, bool active_b
static backslashResult exec_command_quit(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_reset(PsqlScanState scan_state, bool active_branch,
PQExpBuffer query_buf);
+static backslashResult exec_command_restrict(PsqlScanState scan_state, bool active_branch,
+ const char *cmd);
static backslashResult exec_command_s(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_sendpipeline(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_set(PsqlScanState scan_state, bool active_branch);
@@ -142,6 +144,8 @@ static backslashResult exec_command_syncpipeline(PsqlScanState scan_state, bool
static backslashResult exec_command_t(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_T(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_timing(PsqlScanState scan_state, bool active_branch);
+static backslashResult exec_command_unrestrict(PsqlScanState scan_state, bool active_branch,
+ const char *cmd);
static backslashResult exec_command_unset(PsqlScanState scan_state, bool active_branch,
const char *cmd);
static backslashResult exec_command_write(PsqlScanState scan_state, bool active_branch,
@@ -192,6 +196,8 @@ static char *pset_value_string(const char *param, printQueryOpt *popt);
static void checkWin32Codepage(void);
#endif
+static bool restricted;
+static char *restrict_key;
/*----------
@@ -237,8 +243,19 @@ HandleSlashCmds(PsqlScanState scan_state,
/* Parse off the command name */
cmd = psql_scan_slash_command(scan_state);
- /* And try to execute it */
- status = exec_command(cmd, scan_state, cstack, query_buf, previous_buf);
+ /*
+ * And try to execute it.
+ *
+ * If we are in "restricted" mode, the only allowable backslash command is
+ * \unrestrict (to exit restricted mode).
+ */
+ if (restricted && strcmp(cmd, "unrestrict") != 0)
+ {
+ pg_log_error("backslash commands are restricted; only \\unrestrict is allowed");
+ status = PSQL_CMD_ERROR;
+ }
+ else
+ status = exec_command(cmd, scan_state, cstack, query_buf, previous_buf);
if (status == PSQL_CMD_UNKNOWN)
{
@@ -416,6 +433,8 @@ exec_command(const char *cmd,
status = exec_command_quit(scan_state, active_branch);
else if (strcmp(cmd, "r") == 0 || strcmp(cmd, "reset") == 0)
status = exec_command_reset(scan_state, active_branch, query_buf);
+ else if (strcmp(cmd, "restrict") == 0)
+ status = exec_command_restrict(scan_state, active_branch, cmd);
else if (strcmp(cmd, "s") == 0)
status = exec_command_s(scan_state, active_branch);
else if (strcmp(cmd, "sendpipeline") == 0)
@@ -438,6 +457,8 @@ exec_command(const char *cmd,
status = exec_command_T(scan_state, active_branch);
else if (strcmp(cmd, "timing") == 0)
status = exec_command_timing(scan_state, active_branch);
+ else if (strcmp(cmd, "unrestrict") == 0)
+ status = exec_command_unrestrict(scan_state, active_branch, cmd);
else if (strcmp(cmd, "unset") == 0)
status = exec_command_unset(scan_state, active_branch, cmd);
else if (strcmp(cmd, "w") == 0 || strcmp(cmd, "write") == 0)
@@ -2755,6 +2776,35 @@ exec_command_reset(PsqlScanState scan_state, bool active_branch,
}
/*
+ * \restrict -- enter "restricted mode" with the provided key
+ */
+static backslashResult
+exec_command_restrict(PsqlScanState scan_state, bool active_branch,
+ const char *cmd)
+{
+ if (active_branch)
+ {
+ char *opt;
+
+ Assert(!restricted);
+
+ opt = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, true);
+ if (opt == NULL || opt[0] == '\0')
+ {
+ pg_log_error("\\%s: missing required argument", cmd);
+ return PSQL_CMD_ERROR;
+ }
+
+ restrict_key = pstrdup(opt);
+ restricted = true;
+ }
+ else
+ ignore_slash_options(scan_state);
+
+ return PSQL_CMD_SKIP_LINE;
+}
+
+/*
* \s -- save history in a file or show it on the screen
*/
static backslashResult
@@ -3136,6 +3186,46 @@ exec_command_timing(PsqlScanState scan_state, bool active_branch)
}
/*
+ * \unrestrict -- exit "restricted mode" if provided key matches
+ */
+static backslashResult
+exec_command_unrestrict(PsqlScanState scan_state, bool active_branch,
+ const char *cmd)
+{
+ if (active_branch)
+ {
+ char *opt;
+
+ opt = psql_scan_slash_option(scan_state, OT_NORMAL, NULL, true);
+ if (opt == NULL || opt[0] == '\0')
+ {
+ pg_log_error("\\%s: missing required argument", cmd);
+ return PSQL_CMD_ERROR;
+ }
+
+ if (!restricted)
+ {
+ pg_log_error("\\%s: not currently in restricted mode", cmd);
+ return PSQL_CMD_ERROR;
+ }
+ else if (strcmp(opt, restrict_key) == 0)
+ {
+ pfree(restrict_key);
+ restricted = false;
+ }
+ else
+ {
+ pg_log_error("\\%s: wrong key", cmd);
+ return PSQL_CMD_ERROR;
+ }
+ }
+ else
+ ignore_slash_options(scan_state);
+
+ return PSQL_CMD_SKIP_LINE;
+}
+
+/*
* \unset -- unset variable
*/
static backslashResult
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index 8c62729a0d1..ed00c36695e 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -171,6 +171,10 @@ slashUsage(unsigned short int pager)
HELP0(" \\gset [PREFIX] execute query and store result in psql variables\n");
HELP0(" \\gx [(OPTIONS)] [FILE] as \\g, but forces expanded output mode\n");
HELP0(" \\q quit psql\n");
+ HELP0(" \\restrict RESTRICT_KEY\n"
+ " enter restricted mode with provided key\n");
+ HELP0(" \\unrestrict RESTRICT_KEY\n"
+ " exit restricted mode if key matches\n");
HELP0(" \\watch [[i=]SEC] [c=N] [m=MIN]\n"
" execute query every SEC seconds, up to N times,\n"
" stop if less than MIN rows are returned\n");
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index f42c3961e09..cf07a9dbd5e 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -530,4 +530,11 @@ psql_fails_like(
qr/COPY in a pipeline is not supported, aborting connection/,
'\copy to in pipeline: fails');
+psql_fails_like(
+ $node,
+ qq{\\restrict test
+\\! should_fail},
+ qr/backslash commands are restricted; only \\unrestrict is allowed/,
+ 'meta-command in restrict mode fails');
+
done_testing();
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index 1f2ca946fc5..8b10f2313f3 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -1918,11 +1918,11 @@ psql_completion(const char *text, int start, int end)
"\\out",
"\\parse", "\\password", "\\print", "\\prompt", "\\pset",
"\\qecho", "\\quit",
- "\\reset",
+ "\\reset", "\\restrict",
"\\s", "\\sendpipeline", "\\set", "\\setenv", "\\sf",
"\\startpipeline", "\\sv", "\\syncpipeline",
"\\t", "\\T", "\\timing",
- "\\unset",
+ "\\unrestrict", "\\unset",
"\\x",
"\\warn", "\\watch", "\\write",
"\\z",
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index aa957cf3b01..ae813a79041 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -884,7 +884,7 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
if (att_isnull(attnum - 1, tup->t_data->t_bits))
{
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
else
return nocachegetattr(tup, attnum, tupleDesc);
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 7066c2a2868..338e90749bd 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -154,7 +154,7 @@ index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
if (att_isnull(attnum - 1, (bits8 *) tup + sizeof(IndexTupleData)))
{
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
else
return nocache_index_getattr(tup, attnum, tupleDesc);
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index dfbb4c85460..a604a4702c3 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -233,7 +233,7 @@ extern void add_local_string_reloption(local_relopts *relopts, const char *name,
fill_string_relopt filler, int offset);
extern Datum transformRelOptions(Datum oldOptions, List *defList,
- const char *namspace, const char *const validnsps[],
+ const char *nameSpace, const char *const validnsps[],
bool acceptOidsOff, bool isReset);
extern List *untransformRelOptions(Datum options);
extern bytea *extractRelOptions(HeapTuple tuple, TupleDesc tupdesc,
diff --git a/src/include/c.h b/src/include/c.h
index 6d4495bdd9f..bbdaa88c63a 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -530,8 +530,6 @@ typedef uint32 bits32; /* >= 32 bits */
/* snprintf format strings to use for 64-bit integers */
#define INT64_FORMAT "%" PRId64
#define UINT64_FORMAT "%" PRIu64
-#define INT64_HEX_FORMAT "%" PRIx64
-#define UINT64_HEX_FORMAT "%" PRIx64
/*
* 128-bit signed and unsigned integers
diff --git a/src/include/common/int128.h b/src/include/common/int128.h
index a50f5709c29..62aae1bc6a7 100644
--- a/src/include/common/int128.h
+++ b/src/include/common/int128.h
@@ -6,7 +6,7 @@
* We make use of the native int128 type if there is one, otherwise
* implement things the hard way based on two int64 halves.
*
- * See src/tools/testint128.c for a simple test harness for this file.
+ * See src/test/modules/test_int128 for a simple test harness for this file.
*
* Copyright (c) 2017-2025, PostgreSQL Global Development Group
*
@@ -29,146 +29,172 @@
#endif
#endif
-
+/*
+ * If native int128 support is enabled, INT128 is just int128. Otherwise, it
+ * is a structure with separate 64-bit high and low parts.
+ *
+ * We lay out the INT128 structure with the same content and byte ordering
+ * that a native int128 type would (probably) have. This makes no difference
+ * for ordinary use of INT128, but allows union'ing INT128 with int128 for
+ * testing purposes.
+ *
+ * PG_INT128_HI_INT64 and PG_INT128_LO_UINT64 allow the (signed) high and
+ * (unsigned) low 64-bit integer parts to be extracted portably on all
+ * platforms.
+ */
#if USE_NATIVE_INT128
typedef int128 INT128;
-/*
- * Add an unsigned int64 value into an INT128 variable.
- */
-static inline void
-int128_add_uint64(INT128 *i128, uint64 v)
+#define PG_INT128_HI_INT64(i128) ((int64) ((i128) >> 64))
+#define PG_INT128_LO_UINT64(i128) ((uint64) (i128))
+
+#else
+
+typedef struct
{
- *i128 += v;
-}
+#ifdef WORDS_BIGENDIAN
+ int64 hi; /* most significant 64 bits, including sign */
+ uint64 lo; /* least significant 64 bits, without sign */
+#else
+ uint64 lo; /* least significant 64 bits, without sign */
+ int64 hi; /* most significant 64 bits, including sign */
+#endif
+} INT128;
+
+#define PG_INT128_HI_INT64(i128) ((i128).hi)
+#define PG_INT128_LO_UINT64(i128) ((i128).lo)
+
+#endif
/*
- * Add a signed int64 value into an INT128 variable.
+ * Construct an INT128 from (signed) high and (unsigned) low 64-bit integer
+ * parts.
*/
-static inline void
-int128_add_int64(INT128 *i128, int64 v)
+static inline INT128
+make_int128(int64 hi, uint64 lo)
{
- *i128 += v;
+#if USE_NATIVE_INT128
+ return (((int128) hi) << 64) + lo;
+#else
+ INT128 val;
+
+ val.hi = hi;
+ val.lo = lo;
+ return val;
+#endif
}
/*
- * Add the 128-bit product of two int64 values into an INT128 variable.
- *
- * XXX with a stupid compiler, this could actually be less efficient than
- * the other implementation; maybe we should do it by hand always?
+ * Add an unsigned int64 value into an INT128 variable.
*/
static inline void
-int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
+int128_add_uint64(INT128 *i128, uint64 v)
{
- *i128 += (int128) x * (int128) y;
-}
+#if USE_NATIVE_INT128
+ *i128 += v;
+#else
+ /*
+ * First add the value to the .lo part, then check to see if a carry needs
+ * to be propagated into the .hi part. Since this is unsigned integer
+ * arithmetic, which is just modular arithmetic, a carry is needed if the
+ * new .lo part is less than the old .lo part (i.e., if modular
+ * wrap-around occurred). Writing this in the form below, rather than
+ * using an "if" statement causes modern compilers to produce branchless
+ * machine code identical to the native code.
+ */
+ uint64 oldlo = i128->lo;
-/*
- * Compare two INT128 values, return -1, 0, or +1.
- */
-static inline int
-int128_compare(INT128 x, INT128 y)
-{
- if (x < y)
- return -1;
- if (x > y)
- return 1;
- return 0;
+ i128->lo += v;
+ i128->hi += (i128->lo < oldlo);
+#endif
}
/*
- * Widen int64 to INT128.
+ * Add a signed int64 value into an INT128 variable.
*/
-static inline INT128
-int64_to_int128(int64 v)
+static inline void
+int128_add_int64(INT128 *i128, int64 v)
{
- return (INT128) v;
-}
+#if USE_NATIVE_INT128
+ *i128 += v;
+#else
+ /*
+ * This is much like the above except that the carry logic differs for
+ * negative v -- we need to subtract 1 from the .hi part if the new .lo
+ * value is greater than the old .lo value. That can be achieved without
+ * any branching by adding the sign bit from v (v >> 63 = 0 or -1) to the
+ * previous result (for negative v, if the new .lo value is less than the
+ * old .lo value, the two terms cancel and we leave the .hi part
+ * unchanged, otherwise we subtract 1 from the .hi part). With modern
+ * compilers this often produces machine code identical to the native
+ * code.
+ */
+ uint64 oldlo = i128->lo;
-/*
- * Convert INT128 to int64 (losing any high-order bits).
- * This also works fine for casting down to uint64.
- */
-static inline int64
-int128_to_int64(INT128 val)
-{
- return (int64) val;
+ i128->lo += v;
+ i128->hi += (i128->lo < oldlo) + (v >> 63);
+#endif
}
-#else /* !USE_NATIVE_INT128 */
-
/*
- * We lay out the INT128 structure with the same content and byte ordering
- * that a native int128 type would (probably) have. This makes no difference
- * for ordinary use of INT128, but allows union'ing INT128 with int128 for
- * testing purposes.
+ * Add an INT128 value into an INT128 variable.
*/
-typedef struct
+static inline void
+int128_add_int128(INT128 *i128, INT128 v)
{
-#ifdef WORDS_BIGENDIAN
- int64 hi; /* most significant 64 bits, including sign */
- uint64 lo; /* least significant 64 bits, without sign */
+#if USE_NATIVE_INT128
+ *i128 += v;
#else
- uint64 lo; /* least significant 64 bits, without sign */
- int64 hi; /* most significant 64 bits, including sign */
+ int128_add_uint64(i128, v.lo);
+ i128->hi += v.hi;
#endif
-} INT128;
+}
/*
- * Add an unsigned int64 value into an INT128 variable.
+ * Subtract an unsigned int64 value from an INT128 variable.
*/
static inline void
-int128_add_uint64(INT128 *i128, uint64 v)
+int128_sub_uint64(INT128 *i128, uint64 v)
{
+#if USE_NATIVE_INT128
+ *i128 -= v;
+#else
/*
- * First add the value to the .lo part, then check to see if a carry needs
- * to be propagated into the .hi part. A carry is needed if both inputs
- * have high bits set, or if just one input has high bit set while the new
- * .lo part doesn't. Remember that .lo part is unsigned; we cast to
- * signed here just as a cheap way to check the high bit.
+ * This is like int128_add_uint64(), except we must propagate a borrow to
+ * (subtract 1 from) the .hi part if the new .lo part is greater than the
+ * old .lo part.
*/
uint64 oldlo = i128->lo;
- i128->lo += v;
- if (((int64) v < 0 && (int64) oldlo < 0) ||
- (((int64) v < 0 || (int64) oldlo < 0) && (int64) i128->lo >= 0))
- i128->hi++;
+ i128->lo -= v;
+ i128->hi -= (i128->lo > oldlo);
+#endif
}
/*
- * Add a signed int64 value into an INT128 variable.
+ * Subtract a signed int64 value from an INT128 variable.
*/
static inline void
-int128_add_int64(INT128 *i128, int64 v)
+int128_sub_int64(INT128 *i128, int64 v)
{
- /*
- * This is much like the above except that the carry logic differs for
- * negative v. Ordinarily we'd need to subtract 1 from the .hi part
- * (corresponding to adding the sign-extended bits of v to it); but if
- * there is a carry out of the .lo part, that cancels and we do nothing.
- */
+#if USE_NATIVE_INT128
+ *i128 -= v;
+#else
+ /* Like int128_add_int64() with the sign of v inverted */
uint64 oldlo = i128->lo;
- i128->lo += v;
- if (v >= 0)
- {
- if ((int64) oldlo < 0 && (int64) i128->lo >= 0)
- i128->hi++;
- }
- else
- {
- if (!((int64) oldlo < 0 || (int64) i128->lo >= 0))
- i128->hi--;
- }
+ i128->lo -= v;
+ i128->hi -= (i128->lo > oldlo) + (v >> 63);
+#endif
}
/*
- * INT64_AU32 extracts the most significant 32 bits of int64 as int64, while
- * INT64_AL32 extracts the least significant 32 bits as uint64.
+ * INT64_HI_INT32 extracts the most significant 32 bits of int64 as int32.
+ * INT64_LO_UINT32 extracts the least significant 32 bits as uint32.
*/
-#define INT64_AU32(i64) ((i64) >> 32)
-#define INT64_AL32(i64) ((i64) & UINT64CONST(0xFFFFFFFF))
+#define INT64_HI_INT32(i64) ((int32) ((i64) >> 32))
+#define INT64_LO_UINT32(i64) ((uint32) (i64))
/*
* Add the 128-bit product of two int64 values into an INT128 variable.
@@ -176,7 +202,14 @@ int128_add_int64(INT128 *i128, int64 v)
static inline void
int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
{
- /* INT64_AU32 must use arithmetic right shift */
+#if USE_NATIVE_INT128
+ /*
+ * XXX with a stupid compiler, this could actually be less efficient than
+ * the non-native implementation; maybe we should do it by hand always?
+ */
+ *i128 += (int128) x * (int128) y;
+#else
+ /* INT64_HI_INT32 must use arithmetic right shift */
StaticAssertDecl(((int64) -1 >> 1) == (int64) -1,
"arithmetic right shift is needed");
@@ -201,34 +234,188 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
/* No need to work hard if product must be zero */
if (x != 0 && y != 0)
{
- int64 x_u32 = INT64_AU32(x);
- uint64 x_l32 = INT64_AL32(x);
- int64 y_u32 = INT64_AU32(y);
- uint64 y_l32 = INT64_AL32(y);
+ int32 x_hi = INT64_HI_INT32(x);
+ uint32 x_lo = INT64_LO_UINT32(x);
+ int32 y_hi = INT64_HI_INT32(y);
+ uint32 y_lo = INT64_LO_UINT32(y);
int64 tmp;
/* the first term */
- i128->hi += x_u32 * y_u32;
-
- /* the second term: sign-extend it only if x is negative */
- tmp = x_u32 * y_l32;
- if (x < 0)
- i128->hi += INT64_AU32(tmp);
- else
- i128->hi += ((uint64) tmp) >> 32;
- int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32);
-
- /* the third term: sign-extend it only if y is negative */
- tmp = x_l32 * y_u32;
- if (y < 0)
- i128->hi += INT64_AU32(tmp);
- else
- i128->hi += ((uint64) tmp) >> 32;
- int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32);
+ i128->hi += (int64) x_hi * (int64) y_hi;
+
+ /* the second term: sign-extended with the sign of x */
+ tmp = (int64) x_hi * (int64) y_lo;
+ i128->hi += INT64_HI_INT32(tmp);
+ int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the third term: sign-extended with the sign of y */
+ tmp = (int64) x_lo * (int64) y_hi;
+ i128->hi += INT64_HI_INT32(tmp);
+ int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
/* the fourth term: always unsigned */
- int128_add_uint64(i128, x_l32 * y_l32);
+ int128_add_uint64(i128, (uint64) x_lo * (uint64) y_lo);
}
+#endif
+}
+
+/*
+ * Subtract the 128-bit product of two int64 values from an INT128 variable.
+ */
+static inline void
+int128_sub_int64_mul_int64(INT128 *i128, int64 x, int64 y)
+{
+#if USE_NATIVE_INT128
+ *i128 -= (int128) x * (int128) y;
+#else
+ /* As above, except subtract the 128-bit product */
+ if (x != 0 && y != 0)
+ {
+ int32 x_hi = INT64_HI_INT32(x);
+ uint32 x_lo = INT64_LO_UINT32(x);
+ int32 y_hi = INT64_HI_INT32(y);
+ uint32 y_lo = INT64_LO_UINT32(y);
+ int64 tmp;
+
+ /* the first term */
+ i128->hi -= (int64) x_hi * (int64) y_hi;
+
+ /* the second term: sign-extended with the sign of x */
+ tmp = (int64) x_hi * (int64) y_lo;
+ i128->hi -= INT64_HI_INT32(tmp);
+ int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the third term: sign-extended with the sign of y */
+ tmp = (int64) x_lo * (int64) y_hi;
+ i128->hi -= INT64_HI_INT32(tmp);
+ int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the fourth term: always unsigned */
+ int128_sub_uint64(i128, (uint64) x_lo * (uint64) y_lo);
+ }
+#endif
+}
+
+/*
+ * Divide an INT128 variable by a signed int32 value, returning the quotient
+ * and remainder. The remainder will have the same sign as *i128.
+ *
+ * Note: This provides no protection against dividing by 0, or dividing
+ * INT128_MIN by -1, which overflows. It is the caller's responsibility to
+ * guard against those.
+ */
+static inline void
+int128_div_mod_int32(INT128 *i128, int32 v, int32 *remainder)
+{
+#if USE_NATIVE_INT128
+ int128 old_i128 = *i128;
+
+ *i128 /= v;
+ *remainder = (int32) (old_i128 - *i128 * v);
+#else
+ /*
+ * To avoid any intermediate values overflowing (as happens if INT64_MIN
+ * is divided by -1), we first compute the quotient abs(*i128) / abs(v)
+ * using unsigned 64-bit arithmetic, and then fix the signs up at the end.
+ *
+ * The quotient is computed using the short division algorithm described
+ * in Knuth volume 2, section 4.3.1 exercise 16 (cf. div_var_int() in
+ * numeric.c). Since the absolute value of the divisor is known to be at
+ * most 2^31, the remainder carried from one digit to the next is at most
+ * 2^31 - 1, and so there is no danger of overflow when this is combined
+ * with the next digit (a 32-bit unsigned integer).
+ */
+ uint64 n_hi;
+ uint64 n_lo;
+ uint32 d;
+ uint64 q;
+ uint64 r;
+ uint64 tmp;
+
+ /* numerator: absolute value of *i128 */
+ if (i128->hi < 0)
+ {
+ n_hi = 0 - ((uint64) i128->hi);
+ n_lo = 0 - i128->lo;
+ if (n_lo != 0)
+ n_hi--;
+ }
+ else
+ {
+ n_hi = i128->hi;
+ n_lo = i128->lo;
+ }
+
+ /* denomimator: absolute value of v */
+ d = abs(v);
+
+ /* quotient and remainder of high 64 bits */
+ q = n_hi / d;
+ r = n_hi % d;
+ n_hi = q;
+
+ /* quotient and remainder of next 32 bits (upper half of n_lo) */
+ tmp = (r << 32) + (n_lo >> 32);
+ q = tmp / d;
+ r = tmp % d;
+
+ /* quotient and remainder of last 32 bits (lower half of n_lo) */
+ tmp = (r << 32) + (uint32) n_lo;
+ n_lo = q << 32;
+ q = tmp / d;
+ r = tmp % d;
+ n_lo += q;
+
+ /* final remainder should have the same sign as *i128 */
+ *remainder = i128->hi < 0 ? (int32) (0 - r) : (int32) r;
+
+ /* store the quotient in *i128, negating it if necessary */
+ if ((i128->hi < 0) != (v < 0))
+ {
+ n_hi = 0 - n_hi;
+ n_lo = 0 - n_lo;
+ if (n_lo != 0)
+ n_hi--;
+ }
+ i128->hi = (int64) n_hi;
+ i128->lo = n_lo;
+#endif
+}
+
+/*
+ * Test if an INT128 value is zero.
+ */
+static inline bool
+int128_is_zero(INT128 x)
+{
+#if USE_NATIVE_INT128
+ return x == 0;
+#else
+ return x.hi == 0 && x.lo == 0;
+#endif
+}
+
+/*
+ * Return the sign of an INT128 value (returns -1, 0, or +1).
+ */
+static inline int
+int128_sign(INT128 x)
+{
+#if USE_NATIVE_INT128
+ if (x < 0)
+ return -1;
+ if (x > 0)
+ return 1;
+ return 0;
+#else
+ if (x.hi < 0)
+ return -1;
+ if (x.hi > 0)
+ return 1;
+ if (x.lo > 0)
+ return 1;
+ return 0;
+#endif
}
/*
@@ -237,6 +424,13 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
static inline int
int128_compare(INT128 x, INT128 y)
{
+#if USE_NATIVE_INT128
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+ return 0;
+#else
if (x.hi < y.hi)
return -1;
if (x.hi > y.hi)
@@ -246,6 +440,7 @@ int128_compare(INT128 x, INT128 y)
if (x.lo > y.lo)
return 1;
return 0;
+#endif
}
/*
@@ -254,11 +449,15 @@ int128_compare(INT128 x, INT128 y)
static inline INT128
int64_to_int128(int64 v)
{
+#if USE_NATIVE_INT128
+ return (INT128) v;
+#else
INT128 val;
val.lo = (uint64) v;
val.hi = (v < 0) ? -INT64CONST(1) : INT64CONST(0);
return val;
+#endif
}
/*
@@ -268,9 +467,11 @@ int64_to_int128(int64 v)
static inline int64
int128_to_int64(INT128 val)
{
+#if USE_NATIVE_INT128
+ return (int64) val;
+#else
return (int64) val.lo;
+#endif
}
-#endif /* USE_NATIVE_INT128 */
-
#endif /* INT128_H */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index a71502efeed..10dcea037c3 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -242,6 +242,7 @@ extern void standard_ExecutorEnd(QueryDesc *queryDesc);
extern void ExecutorRewind(QueryDesc *queryDesc);
extern bool ExecCheckPermissions(List *rangeTable,
List *rteperminfos, bool ereport_on_violation);
+extern bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
extern void CheckValidResultRel(ResultRelInfo *resultRelInfo, CmdType operation,
List *mergeActions);
extern void InitResultRelInfo(ResultRelInfo *resultRelInfo,
diff --git a/src/include/libpq/protocol.h b/src/include/libpq/protocol.h
index b0bcb3cdc26..c64e628628d 100644
--- a/src/include/libpq/protocol.h
+++ b/src/include/libpq/protocol.h
@@ -69,6 +69,27 @@
#define PqMsg_Progress 'P'
+/* Replication codes sent by the primary (wrapped in CopyData messages). */
+
+#define PqReplMsg_Keepalive 'k'
+#define PqReplMsg_PrimaryStatusUpdate 's'
+#define PqReplMsg_WALData 'w'
+
+
+/* Replication codes sent by the standby (wrapped in CopyData messages). */
+
+#define PqReplMsg_HotStandbyFeedback 'h'
+#define PqReplMsg_PrimaryStatusRequest 'p'
+#define PqReplMsg_StandbyStatusUpdate 'r'
+
+
+/* Codes used for backups via COPY OUT (wrapped in CopyData messages). */
+
+#define PqBackupMsg_Manifest 'm'
+#define PqBackupMsg_NewArchive 'n'
+#define PqBackupMsg_ProgressReport 'p'
+
+
/* These are the authentication request codes sent by the backend. */
#define AUTH_REQ_OK 0 /* User is authenticated */
diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h
index d6f6f4ad2d7..dd8f2cd157f 100644
--- a/src/include/optimizer/plancat.h
+++ b/src/include/optimizer/plancat.h
@@ -76,6 +76,8 @@ extern double get_function_rows(PlannerInfo *root, Oid funcid, Node *node);
extern bool has_row_triggers(PlannerInfo *root, Index rti, CmdType event);
+extern bool has_transition_tables(PlannerInfo *root, Index rti, CmdType event);
+
extern bool has_stored_generated_columns(PlannerInfo *root, Index rti);
extern Bitmapset *get_dependent_generated_columns(PlannerInfo *root, Index rti,
diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h
index 931f5b3b880..2b072cafb4d 100644
--- a/src/include/utils/pg_locale.h
+++ b/src/include/utils/pg_locale.h
@@ -18,6 +18,8 @@
/* only include the C APIs, to avoid errors in cpluspluscheck */
#undef U_SHOW_CPLUSPLUS_API
#define U_SHOW_CPLUSPLUS_API 0
+#undef U_SHOW_CPLUSPLUS_HEADER_API
+#define U_SHOW_CPLUSPLUS_HEADER_API 0
#include <unicode/ucol.h>
#endif
diff --git a/src/include/utils/selfuncs.h b/src/include/utils/selfuncs.h
index 013049b3098..fb4fa53363d 100644
--- a/src/include/utils/selfuncs.h
+++ b/src/include/utils/selfuncs.h
@@ -96,7 +96,8 @@ typedef struct VariableStatData
int32 atttypmod; /* actual typmod (after stripping relabel) */
bool isunique; /* matches unique index, DISTINCT or GROUP-BY
* clause */
- bool acl_ok; /* result of ACL check on table or column */
+ bool acl_ok; /* true if user has SELECT privilege on all
+ * rows from the table or column */
} VariableStatData;
#define ReleaseVariableStats(vardata) \
@@ -153,6 +154,7 @@ extern PGDLLIMPORT get_index_stats_hook_type get_index_stats_hook;
extern void examine_variable(PlannerInfo *root, Node *node, int varRelid,
VariableStatData *vardata);
+extern bool all_rows_selectable(PlannerInfo *root, Index varno, Bitmapset *varattnos);
extern bool statistic_proc_security_check(VariableStatData *vardata, Oid func_oid);
extern bool get_restriction_variable(PlannerInfo *root, List *args,
int varRelid,
diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c
index e829d722f22..ca11a81f1bc 100644
--- a/src/interfaces/ecpg/compatlib/informix.c
+++ b/src/interfaces/ecpg/compatlib/informix.c
@@ -807,8 +807,10 @@ rfmtlong(long lng_val, const char *fmt, char *outbuf)
if (strchr(fmt, (int) '(') && strchr(fmt, (int) ')'))
brackets_ok = 1;
- /* get position of the right-most dot in the format-string */
- /* and fill the temp-string wit '0's up to there. */
+ /*
+ * get position of the right-most dot in the format-string and fill the
+ * temp-string with '0's up to there.
+ */
dotpos = getRightMostDot(fmt);
/* start to parse the format-string */
diff --git a/src/interfaces/libpq-oauth/oauth-curl.c b/src/interfaces/libpq-oauth/oauth-curl.c
index dba9a684fa8..aa50b00d053 100644
--- a/src/interfaces/libpq-oauth/oauth-curl.c
+++ b/src/interfaces/libpq-oauth/oauth-curl.c
@@ -278,6 +278,7 @@ struct async_ctx
bool user_prompted; /* have we already sent the authz prompt? */
bool used_basic_auth; /* did we send a client secret? */
bool debugging; /* can we give unsafe developer assistance? */
+ int dbg_num_calls; /* (debug mode) how many times were we called? */
};
/*
@@ -1291,22 +1292,31 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
return 0;
#elif defined(HAVE_SYS_EVENT_H)
- struct kevent ev[2] = {0};
+ struct kevent ev[2];
struct kevent ev_out[2];
struct timespec timeout = {0};
int nev = 0;
int res;
+ /*
+ * We don't know which of the events is currently registered, perhaps
+ * both, so we always try to remove unneeded events. This means we need to
+ * tolerate ENOENT below.
+ */
switch (what)
{
case CURL_POLL_IN:
EV_SET(&ev[nev], socket, EVFILT_READ, EV_ADD | EV_RECEIPT, 0, 0, 0);
nev++;
+ EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
break;
case CURL_POLL_OUT:
EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_ADD | EV_RECEIPT, 0, 0, 0);
nev++;
+ EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
break;
case CURL_POLL_INOUT:
@@ -1317,12 +1327,6 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
break;
case CURL_POLL_REMOVE:
-
- /*
- * We don't know which of these is currently registered, perhaps
- * both, so we try to remove both. This means we need to tolerate
- * ENOENT below.
- */
EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0);
nev++;
EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0);
@@ -1334,7 +1338,10 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
return -1;
}
- res = kevent(actx->mux, ev, nev, ev_out, lengthof(ev_out), &timeout);
+ Assert(nev <= lengthof(ev));
+ Assert(nev <= lengthof(ev_out));
+
+ res = kevent(actx->mux, ev, nev, ev_out, nev, &timeout);
if (res < 0)
{
actx_error(actx, "could not modify kqueue: %m");
@@ -1377,6 +1384,53 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
}
/*
+ * If there is no work to do on any of the descriptors in the multiplexer, then
+ * this function must ensure that the multiplexer is not readable.
+ *
+ * Unlike epoll descriptors, kqueue descriptors only transition from readable to
+ * unreadable when kevent() is called and finds nothing, after removing
+ * level-triggered conditions that have gone away. We therefore need a dummy
+ * kevent() call after operations might have been performed on the monitored
+ * sockets or timer_fd. Any event returned is ignored here, but it also remains
+ * queued (being level-triggered) and leaves the descriptor readable. This is a
+ * no-op for epoll descriptors.
+ */
+static bool
+comb_multiplexer(struct async_ctx *actx)
+{
+#if defined(HAVE_SYS_EPOLL_H)
+ /* The epoll implementation doesn't hold onto stale events. */
+ return true;
+#elif defined(HAVE_SYS_EVENT_H)
+ struct timespec timeout = {0};
+ struct kevent ev;
+
+ /*
+ * Try to read a single pending event. We can actually ignore the result:
+ * either we found an event to process, in which case the multiplexer is
+ * correctly readable for that event at minimum, and it doesn't matter if
+ * there are any stale events; or we didn't find any, in which case the
+ * kernel will have discarded any stale events as it traveled to the end
+ * of the queue.
+ *
+ * Note that this depends on our registrations being level-triggered --
+ * even the timer, so we use a chained kqueue for that instead of an
+ * EVFILT_TIMER on the top-level mux. If we used edge-triggered events,
+ * this call would improperly discard them.
+ */
+ if (kevent(actx->mux, NULL, 0, &ev, 1, &timeout) < 0)
+ {
+ actx_error(actx, "could not comb kqueue: %m");
+ return false;
+ }
+
+ return true;
+#else
+#error comb_multiplexer is not implemented on this platform
+#endif
+}
+
+/*
* Enables or disables the timer in the multiplexer set. The timeout value is
* in milliseconds (negative values disable the timer).
*
@@ -1483,40 +1537,20 @@ set_timer(struct async_ctx *actx, long timeout)
/*
* Returns 1 if the timeout in the multiplexer set has expired since the last
- * call to set_timer(), 0 if the timer is still running, or -1 (with an
- * actx_error() report) if the timer cannot be queried.
+ * call to set_timer(), 0 if the timer is either still running or disarmed, or
+ * -1 (with an actx_error() report) if the timer cannot be queried.
*/
static int
timer_expired(struct async_ctx *actx)
{
-#if defined(HAVE_SYS_EPOLL_H)
- struct itimerspec spec = {0};
-
- if (timerfd_gettime(actx->timerfd, &spec) < 0)
- {
- actx_error(actx, "getting timerfd value: %m");
- return -1;
- }
-
- /*
- * This implementation assumes we're using single-shot timers. If you
- * change to using intervals, you'll need to reimplement this function
- * too, possibly with the read() or select() interfaces for timerfd.
- */
- Assert(spec.it_interval.tv_sec == 0
- && spec.it_interval.tv_nsec == 0);
-
- /* If the remaining time to expiration is zero, we're done. */
- return (spec.it_value.tv_sec == 0
- && spec.it_value.tv_nsec == 0);
-#elif defined(HAVE_SYS_EVENT_H)
+#if defined(HAVE_SYS_EPOLL_H) || defined(HAVE_SYS_EVENT_H)
int res;
- /* Is the timer queue ready? */
+ /* Is the timer ready? */
res = PQsocketPoll(actx->timerfd, 1 /* forRead */ , 0, 0);
if (res < 0)
{
- actx_error(actx, "checking kqueue for timeout: %m");
+ actx_error(actx, "checking timer expiration: %m");
return -1;
}
@@ -1549,6 +1583,36 @@ register_timer(CURLM *curlm, long timeout, void *ctx)
}
/*
+ * Removes any expired-timer event from the multiplexer. If was_expired is not
+ * NULL, it will contain whether or not the timer was expired at time of call.
+ */
+static bool
+drain_timer_events(struct async_ctx *actx, bool *was_expired)
+{
+ int res;
+
+ res = timer_expired(actx);
+ if (res < 0)
+ return false;
+
+ if (res > 0)
+ {
+ /*
+ * Timer is expired. We could drain the event manually from the
+ * timerfd, but it's easier to simply disable it; that keeps the
+ * platform-specific code in set_timer().
+ */
+ if (!set_timer(actx, -1))
+ return false;
+ }
+
+ if (was_expired)
+ *was_expired = (res > 0);
+
+ return true;
+}
+
+/*
* Prints Curl request debugging information to stderr.
*
* Note that this will expose a number of critical secrets, so users have to opt
@@ -2751,38 +2815,64 @@ pg_fe_run_oauth_flow_impl(PGconn *conn)
{
PostgresPollingStatusType status;
+ /*
+ * Clear any expired timeout before calling back into
+ * Curl. Curl is not guaranteed to do this for us, because
+ * its API expects us to use single-shot (i.e.
+ * edge-triggered) timeouts, and ours are level-triggered
+ * via the mux.
+ *
+ * This can't be combined with the comb_multiplexer() call
+ * below: we might accidentally clear a short timeout that
+ * was both set and expired during the call to
+ * drive_request().
+ */
+ if (!drain_timer_events(actx, NULL))
+ goto error_return;
+
+ /* Move the request forward. */
status = drive_request(actx);
if (status == PGRES_POLLING_FAILED)
goto error_return;
- else if (status != PGRES_POLLING_OK)
- {
- /* not done yet */
- return status;
- }
+ else if (status == PGRES_POLLING_OK)
+ break; /* done! */
- break;
+ /*
+ * This request is still running.
+ *
+ * Make sure that stale events don't cause us to come back
+ * early. (Currently, this can occur only with kqueue.) If
+ * this is forgotten, the multiplexer can get stuck in a
+ * signaled state and we'll burn CPU cycles pointlessly.
+ */
+ if (!comb_multiplexer(actx))
+ goto error_return;
+
+ return status;
}
case OAUTH_STEP_WAIT_INTERVAL:
-
- /*
- * The client application is supposed to wait until our timer
- * expires before calling PQconnectPoll() again, but that
- * might not happen. To avoid sending a token request early,
- * check the timer before continuing.
- */
- if (!timer_expired(actx))
{
- set_conn_altsock(conn, actx->timerfd);
- return PGRES_POLLING_READING;
- }
+ bool expired;
- /* Disable the expired timer. */
- if (!set_timer(actx, -1))
- goto error_return;
+ /*
+ * The client application is supposed to wait until our
+ * timer expires before calling PQconnectPoll() again, but
+ * that might not happen. To avoid sending a token request
+ * early, check the timer before continuing.
+ */
+ if (!drain_timer_events(actx, &expired))
+ goto error_return;
- break;
+ if (!expired)
+ {
+ set_conn_altsock(conn, actx->timerfd);
+ return PGRES_POLLING_READING;
+ }
+
+ break;
+ }
}
/*
@@ -2932,6 +3022,8 @@ PostgresPollingStatusType
pg_fe_run_oauth_flow(PGconn *conn)
{
PostgresPollingStatusType result;
+ fe_oauth_state *state = conn_sasl_state(conn);
+ struct async_ctx *actx;
#ifndef WIN32
sigset_t osigset;
bool sigpipe_pending;
@@ -2960,6 +3052,25 @@ pg_fe_run_oauth_flow(PGconn *conn)
result = pg_fe_run_oauth_flow_impl(conn);
+ /*
+ * To assist with finding bugs in comb_multiplexer() and
+ * drain_timer_events(), when we're in debug mode, track the total number
+ * of calls to this function and print that at the end of the flow.
+ *
+ * Be careful that state->async_ctx could be NULL if early initialization
+ * fails during the first call.
+ */
+ actx = state->async_ctx;
+ Assert(actx || result == PGRES_POLLING_FAILED);
+
+ if (actx && actx->debugging)
+ {
+ actx->dbg_num_calls++;
+ if (result == PGRES_POLLING_OK || result == PGRES_POLLING_FAILED)
+ fprintf(stderr, "[libpq] total number of polls: %d\n",
+ actx->dbg_num_calls);
+ }
+
#ifndef WIN32
if (masked)
{
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 29cb4d7e47f..73ba1748fe0 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1453,7 +1453,7 @@ plperl_sv_to_literal(SV *sv, char *fqtypename)
check_spi_usage_allowed();
- typid = DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename));
+ typid = DatumGetObjectId(DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename)));
if (!OidIsValid(typid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -2569,13 +2569,13 @@ plperl_trigger_handler(PG_FUNCTION_ARGS)
TriggerData *trigdata = ((TriggerData *) fcinfo->context);
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_newtuple;
+ retval = PointerGetDatum(trigdata->tg_newtuple);
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else if (TRIGGER_FIRED_BY_TRUNCATE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else
retval = (Datum) 0; /* can this happen? */
}
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index 7d3d3d52b45..903a8ac151a 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -25,6 +25,7 @@ SUBDIRS = \
test_escape \
test_extensions \
test_ginpostinglist \
+ test_int128 \
test_integerset \
test_json_parser \
test_lfind \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index dd5cd065ba1..93be0f57289 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -24,6 +24,7 @@ subdir('test_dsm_registry')
subdir('test_escape')
subdir('test_extensions')
subdir('test_ginpostinglist')
+subdir('test_int128')
subdir('test_integerset')
subdir('test_json_parser')
subdir('test_lfind')
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 41672ebd5c6..c0dafb8be76 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -418,6 +418,35 @@ $node->connect_fails(
qr/failed to obtain access token: mutual TLS required for client \(invalid_client\)/
);
+# Count the number of calls to the internal flow when multiple retries are
+# triggered. The exact number depends on many things -- the TCP stack, the
+# version of Curl in use, random chance -- but a ridiculously high number
+# suggests something is wrong with our ability to clear multiplexer events after
+# they're no longer applicable.
+my ($ret, $stdout, $stderr) = $node->psql(
+ 'postgres',
+ "SELECT 'connected for call count'",
+ extra_params => ['-w'],
+ connstr => connstr(stage => 'token', retries => 2),
+ on_error_stop => 0);
+
+is($ret, 0, "call count connection succeeds");
+like(
+ $stderr,
+ qr@Visit https://example\.com/ and enter the code: postgresuser@,
+ "call count: stderr matches");
+
+my $count_pattern = qr/\[libpq\] total number of polls: (\d+)/;
+if (like($stderr, $count_pattern, "call count: count is printed"))
+{
+ # For reference, a typical flow with two retries might take between 5-15
+ # calls to the client implementation. And while this will probably continue
+ # to change across OSes and Curl updates, we're likely in trouble if we see
+ # hundreds or thousands of calls.
+ $stderr =~ $count_pattern;
+ cmp_ok($1, '<', 100, "call count is reasonably small");
+}
+
# Stress test: make sure our builtin flow operates correctly even if the client
# application isn't respecting PGRES_POLLING_READING/WRITING signals returned
# from PQconnectPoll().
@@ -428,7 +457,7 @@ my @cmd = (
connstr(stage => 'all', retries => 1, interval => 1));
note "running '" . join("' '", @cmd) . "'";
-my ($stdout, $stderr) = run_command(\@cmd);
+($stdout, $stderr) = run_command(\@cmd);
like($stdout, qr/connection succeeded/, "stress-async: stdout matches");
unlike(
diff --git a/src/test/modules/test_int128/.gitignore b/src/test/modules/test_int128/.gitignore
new file mode 100644
index 00000000000..277fec6ed2c
--- /dev/null
+++ b/src/test/modules/test_int128/.gitignore
@@ -0,0 +1,2 @@
+/tmp_check/
+/test_int128
diff --git a/src/test/modules/test_int128/Makefile b/src/test/modules/test_int128/Makefile
new file mode 100644
index 00000000000..2e86ee93a9d
--- /dev/null
+++ b/src/test/modules/test_int128/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_int128/Makefile
+
+PGFILEDESC = "test_int128 - test 128-bit integer arithmetic"
+
+PROGRAM = test_int128
+OBJS = $(WIN32RES) test_int128.o
+
+PG_CPPFLAGS = -I$(libpq_srcdir)
+PG_LIBS_INTERNAL += $(libpq_pgport)
+
+NO_INSTALL = 1
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_int128
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_int128/meson.build b/src/test/modules/test_int128/meson.build
new file mode 100644
index 00000000000..4c2be7a0326
--- /dev/null
+++ b/src/test/modules/test_int128/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_int128_sources = files(
+ 'test_int128.c',
+)
+
+if host_system == 'windows'
+ test_int128_sources += rc_bin_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_int128',
+ '--FILEDESC', 'test int128 program',])
+endif
+
+test_int128 = executable('test_int128',
+ test_int128_sources,
+ dependencies: [frontend_code, libpq],
+ kwargs: default_bin_args + {
+ 'install': false,
+ },
+)
+testprep_targets += test_int128
+
+
+tests += {
+ 'name': 'test_int128',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'tap': {
+ 'tests': [
+ 't/001_test_int128.pl',
+ ],
+ 'deps': [test_int128],
+ },
+}
diff --git a/src/test/modules/test_int128/t/001_test_int128.pl b/src/test/modules/test_int128/t/001_test_int128.pl
new file mode 100644
index 00000000000..0c683869f34
--- /dev/null
+++ b/src/test/modules/test_int128/t/001_test_int128.pl
@@ -0,0 +1,27 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+# Test 128-bit integer arithmetic code in int128.h
+
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Run the test program with 1M iterations
+my $exe = "test_int128";
+my $size = 1_000_000;
+
+note "testing executable $exe";
+
+my ($stdout, $stderr) = run_command([ $exe, $size ]);
+
+SKIP:
+{
+ skip "no native int128 type", 2 if $stdout =~ /skipping tests/;
+
+ is($stdout, "", "test_int128: no stdout");
+ is($stderr, "", "test_int128: no stderr");
+}
+
+done_testing();
diff --git a/src/test/modules/test_int128/test_int128.c b/src/test/modules/test_int128/test_int128.c
new file mode 100644
index 00000000000..c9c17a73a4e
--- /dev/null
+++ b/src/test/modules/test_int128/test_int128.c
@@ -0,0 +1,281 @@
+/*-------------------------------------------------------------------------
+ *
+ * test_int128.c
+ * Testbed for roll-our-own 128-bit integer arithmetic.
+ *
+ * This is a standalone test program that compares the behavior of an
+ * implementation in int128.h to an (assumed correct) int128 native type.
+ *
+ * Copyright (c) 2017-2025, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_int128/test_int128.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres_fe.h"
+
+#include <time.h>
+
+/* Require a native int128 type */
+#ifdef HAVE_INT128
+
+/*
+ * By default, we test the non-native implementation in int128.h; but
+ * by predefining USE_NATIVE_INT128 to 1, you can test the native
+ * implementation, just to be sure.
+ */
+#ifndef USE_NATIVE_INT128
+#define USE_NATIVE_INT128 0
+#endif
+
+#include "common/int128.h"
+#include "common/pg_prng.h"
+
+/*
+ * We assume the parts of this union are laid out compatibly.
+ */
+typedef union
+{
+ int128 i128;
+ INT128 I128;
+ struct
+ {
+#ifdef WORDS_BIGENDIAN
+ int64 hi;
+ uint64 lo;
+#else
+ uint64 lo;
+ int64 hi;
+#endif
+ } hl;
+} test128;
+
+#define INT128_HEX_FORMAT "%016" PRIx64 "%016" PRIx64
+
+/*
+ * Control version of comparator.
+ */
+static inline int
+my_int128_compare(int128 x, int128 y)
+{
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+ return 0;
+}
+
+/*
+ * Main program.
+ *
+ * Generates a lot of random numbers and tests the implementation for each.
+ * The results should be reproducible, since we use a fixed PRNG seed.
+ *
+ * You can give a loop count if you don't like the default 1B iterations.
+ */
+int
+main(int argc, char **argv)
+{
+ long count;
+
+ pg_prng_seed(&pg_global_prng_state, (uint64) time(NULL));
+
+ if (argc >= 2)
+ count = strtol(argv[1], NULL, 0);
+ else
+ count = 1000000000;
+
+ while (count-- > 0)
+ {
+ int64 x = pg_prng_uint64(&pg_global_prng_state);
+ int64 y = pg_prng_uint64(&pg_global_prng_state);
+ int64 z = pg_prng_uint64(&pg_global_prng_state);
+ int64 w = pg_prng_uint64(&pg_global_prng_state);
+ int32 z32 = (int32) z;
+ test128 t1;
+ test128 t2;
+ test128 t3;
+ int32 r1;
+ int32 r2;
+
+ /* check unsigned addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) (uint64) z;
+ int128_add_uint64(&t2.I128, (uint64) z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + unsigned %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check signed addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) z;
+ int128_add_int64(&t2.I128, z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + signed %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 128-bit signed addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t3.hl.hi = z;
+ t3.hl.lo = w;
+ t1.i128 += t3.i128;
+ int128_add_int128(&t2.I128, t3.I128);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + " INT128_HEX_FORMAT "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check unsigned subtraction */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) (uint64) z;
+ int128_sub_uint64(&t2.I128, (uint64) z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - unsigned %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check signed subtraction */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) z;
+ int128_sub_int64(&t2.I128, z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - signed %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 64x64-bit multiply-add */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) z * (int128) w;
+ int128_add_int64_mul_int64(&t2.I128, z, w);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 64x64-bit multiply-subtract */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) z * (int128) w;
+ int128_sub_int64_mul_int64(&t2.I128, z, w);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 128/32-bit division */
+ t3.hl.hi = x;
+ t3.hl.lo = y;
+ t1.i128 = t3.i128 / z32;
+ r1 = (int32) (t3.i128 % z32);
+ t2 = t3;
+ int128_div_mod_int32(&t2.I128, z32, &r2);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " / signed %08X\n", t3.hl.hi, t3.hl.lo, z32);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+ if (r1 != r2)
+ {
+ printf(INT128_HEX_FORMAT " %% signed %08X\n", t3.hl.hi, t3.hl.lo, z32);
+ printf("native = %08X\n", r1);
+ printf("result = %08X\n", r2);
+ return 1;
+ }
+
+ /* check comparison */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2.hl.hi = z;
+ t2.hl.lo = w;
+
+ if (my_int128_compare(t1.i128, t2.i128) !=
+ int128_compare(t1.I128, t2.I128))
+ {
+ printf("comparison failure: %d vs %d\n",
+ my_int128_compare(t1.i128, t2.i128),
+ int128_compare(t1.I128, t2.I128));
+ printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check case with identical hi parts; above will hardly ever hit it */
+ t2.hl.hi = x;
+
+ if (my_int128_compare(t1.i128, t2.i128) !=
+ int128_compare(t1.I128, t2.I128))
+ {
+ printf("comparison failure: %d vs %d\n",
+ my_int128_compare(t1.i128, t2.i128),
+ int128_compare(t1.I128, t2.I128));
+ printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#else /* ! HAVE_INT128 */
+
+/*
+ * For now, do nothing if we don't have a native int128 type.
+ */
+int
+main(int argc, char **argv)
+{
+ printf("skipping tests: no native int128 type\n");
+ return 0;
+}
+
+#endif
diff --git a/src/test/modules/test_radixtree/test_radixtree.c b/src/test/modules/test_radixtree/test_radixtree.c
index 32de6a3123e..80ad0296164 100644
--- a/src/test/modules/test_radixtree/test_radixtree.c
+++ b/src/test/modules/test_radixtree/test_radixtree.c
@@ -44,7 +44,7 @@
uint64 _expected = (expected_expr); \
if (_result != _expected) \
elog(ERROR, \
- "%s yielded " UINT64_HEX_FORMAT ", expected " UINT64_HEX_FORMAT " (%s) in file \"%s\" line %u", \
+ "%s yielded %" PRIx64 ", expected %" PRIx64 " (%s) in file \"%s\" line %u", \
#result_expr, _result, _expected, #expected_expr, __FILE__, __LINE__); \
} while (0)
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index 5d2c06ba06e..12d8852fb4b 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -117,6 +117,7 @@ command_ok(
'pg_dumpall',
'--file' => $outputdir . '/primary.dump',
'--no-sync', '--no-statistics',
+ '--restrict-key=test',
'--port' => $node_primary->port,
'--no-unlogged-table-data', # if unlogged, standby has schema only
],
@@ -126,6 +127,7 @@ command_ok(
'pg_dumpall',
'--file' => $outputdir . '/standby.dump',
'--no-sync', '--no-statistics',
+ '--restrict-key=test',
'--port' => $node_standby_1->port,
],
'dump standby server');
@@ -145,6 +147,7 @@ command_ok(
'--schema' => 'pg_catalog',
'--file' => $outputdir . '/catalogs_primary.dump',
'--no-sync',
+ '--restrict-key=test',
'--port', $node_primary->port,
'--no-unlogged-table-data',
'regression',
@@ -156,6 +159,7 @@ command_ok(
'--schema' => 'pg_catalog',
'--file' => $outputdir . '/catalogs_standby.dump',
'--no-sync',
+ '--restrict-key=test',
'--port' => $node_standby_1->port,
'regression',
],
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 1f1ce2380af..7319945ffe3 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -680,6 +680,25 @@ SELECT sum2(q1,q2) FROM int8_tbl;
18271560493827981
(1 row)
+-- sanity checks
+SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl;
+ sum | ?column?
+-------------------+-------------------
+ 18271560493827981 | 18271560493827981
+(1 row)
+
+SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl;
+ sum | sum | ?column?
+------------------+-------------------+------------------
+ 9135780246913245 | -9135780246913245 | 9135780246913245
+(1 row)
+
+SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl;
+ sum | sum | ?column?
+----------------------+-----------------------+----------------------
+ 27407340740741226000 | -27407340740741226000 | 27407340740741226000
+(1 row)
+
-- test for outer-level aggregates
-- this should work
select ten, sum(distinct four) from onek a
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index 602a6b255bc..845e477da04 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -513,8 +513,6 @@ CREATE VIEW atest12v AS
SELECT * FROM atest12 WHERE b <<< 5;
CREATE VIEW atest12sbv WITH (security_barrier=true) AS
SELECT * FROM atest12 WHERE b <<< 5;
-GRANT SELECT ON atest12v TO PUBLIC;
-GRANT SELECT ON atest12sbv TO PUBLIC;
-- This plan should use nestloop, knowing that few rows will be selected.
EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b;
QUERY PLAN
@@ -560,9 +558,18 @@ CREATE FUNCTION leak2(integer,integer) RETURNS boolean
LANGUAGE plpgsql immutable;
CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer,
restrict = scalargtsel);
--- This should not show any "leak" notices before failing.
+-- These should not show any "leak" notices before failing.
EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0;
ERROR: permission denied for table atest12
+EXPLAIN (COSTS OFF) SELECT * FROM atest12v WHERE a >>> 0;
+ERROR: permission denied for view atest12v
+EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv WHERE a >>> 0;
+ERROR: permission denied for view atest12sbv
+-- Now regress_priv_user1 grants access to regress_priv_user2 via the views.
+SET SESSION AUTHORIZATION regress_priv_user1;
+GRANT SELECT ON atest12v TO PUBLIC;
+GRANT SELECT ON atest12sbv TO PUBLIC;
+SET SESSION AUTHORIZATION regress_priv_user2;
-- These plans should continue to use a nestloop, since they execute with the
-- privileges of the view owner.
EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b;
diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out
index 236eba2540e..a79325e8a2f 100644
--- a/src/test/regress/expected/psql.out
+++ b/src/test/regress/expected/psql.out
@@ -4705,6 +4705,7 @@ invalid command \lo
\pset arg1 arg2
\q
\reset
+ \restrict test
\s arg1
\sendpipeline
\set arg1 arg2 arg3 arg4 arg5 arg6 arg7
@@ -4716,6 +4717,7 @@ invalid command \lo
\t arg1
\T arg1
\timing arg1
+ \unrestrict not_valid
\unset arg1
\w arg1
\watch arg1 arg2
diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out
index 1c4e37d2249..8c879509313 100644
--- a/src/test/regress/expected/rowsecurity.out
+++ b/src/test/regress/expected/rowsecurity.out
@@ -4506,7 +4506,7 @@ RESET SESSION AUTHORIZATION;
DROP VIEW rls_view;
DROP TABLE rls_tbl;
DROP TABLE ref_tbl;
--- Leaky operator test
+-- Leaky operator tests
CREATE TABLE rls_tbl (a int);
INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x;
ANALYZE rls_tbl;
@@ -4530,9 +4530,80 @@ EXPLAIN (COSTS OFF) SELECT * FROM rls_tbl WHERE a <<< 1000 or a <<< 900;
One-Time Filter: false
(2 rows)
+RESET SESSION AUTHORIZATION;
+CREATE TABLE rls_child_tbl () INHERITS (rls_tbl);
+INSERT INTO rls_child_tbl SELECT x/10 FROM generate_series(1, 100) x;
+ANALYZE rls_child_tbl;
+CREATE TABLE rls_ptbl (a int) PARTITION BY RANGE (a);
+CREATE TABLE rls_part PARTITION OF rls_ptbl FOR VALUES FROM (-100) TO (100);
+INSERT INTO rls_ptbl SELECT x/10 FROM generate_series(1, 100) x;
+ANALYZE rls_ptbl, rls_part;
+ALTER TABLE rls_ptbl ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_part ENABLE ROW LEVEL SECURITY;
+GRANT SELECT ON rls_ptbl TO regress_rls_alice;
+GRANT SELECT ON rls_part TO regress_rls_alice;
+CREATE POLICY p1 ON rls_tbl USING (a < 0);
+CREATE POLICY p2 ON rls_ptbl USING (a < 0);
+CREATE POLICY p3 ON rls_part USING (a < 0);
+SET SESSION AUTHORIZATION regress_rls_alice;
+SELECT * FROM rls_tbl WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
+SELECT * FROM rls_child_tbl WHERE a <<< 1000;
+ERROR: permission denied for table rls_child_tbl
+SELECT * FROM rls_ptbl WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
+SELECT * FROM rls_part WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
+SELECT * FROM (SELECT * FROM rls_tbl UNION ALL
+ SELECT * FROM rls_tbl) t WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
+SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL
+ SELECT * FROM rls_child_tbl) t WHERE a <<< 1000;
+ERROR: permission denied for table rls_child_tbl
+RESET SESSION AUTHORIZATION;
+REVOKE SELECT ON rls_tbl FROM regress_rls_alice;
+CREATE VIEW rls_tbl_view AS SELECT * FROM rls_tbl;
+ALTER TABLE rls_child_tbl ENABLE ROW LEVEL SECURITY;
+GRANT SELECT ON rls_child_tbl TO regress_rls_alice;
+CREATE POLICY p4 ON rls_child_tbl USING (a < 0);
+SET SESSION AUTHORIZATION regress_rls_alice;
+SELECT * FROM rls_tbl WHERE a <<< 1000;
+ERROR: permission denied for table rls_tbl
+SELECT * FROM rls_tbl_view WHERE a <<< 1000;
+ERROR: permission denied for view rls_tbl_view
+SELECT * FROM rls_child_tbl WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
+SELECT * FROM (SELECT * FROM rls_tbl UNION ALL
+ SELECT * FROM rls_tbl) t WHERE a <<< 1000;
+ERROR: permission denied for table rls_tbl
+SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL
+ SELECT * FROM rls_child_tbl) t WHERE a <<< 1000;
+ a
+---
+(0 rows)
+
DROP OPERATOR <<< (int, int);
DROP FUNCTION op_leak(int, int);
RESET SESSION AUTHORIZATION;
+DROP TABLE rls_part;
+DROP TABLE rls_ptbl;
+DROP TABLE rls_child_tbl;
+DROP VIEW rls_tbl_view;
DROP TABLE rls_tbl;
-- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects
SET SESSION AUTHORIZATION regress_rls_alice;
diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out
index 6359e5fb689..c66e09f8b16 100644
--- a/src/test/regress/expected/stats_ext.out
+++ b/src/test/regress/expected/stats_ext.out
@@ -3275,9 +3275,17 @@ CREATE FUNCTION op_leak(int, int) RETURNS bool
LANGUAGE plpgsql;
CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int,
restrict = scalarltsel);
+CREATE FUNCTION op_leak(record, record) RETURNS bool
+ AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END'
+ LANGUAGE plpgsql;
+CREATE OPERATOR <<< (procedure = op_leak, leftarg = record, rightarg = record,
+ restrict = scalarltsel);
SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
ERROR: permission denied for table priv_test_tbl
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0;
+SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied
+ERROR: permission denied for table priv_test_tbl
+SELECT * FROM tststats.priv_test_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied
ERROR: permission denied for table priv_test_tbl
DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
ERROR: permission denied for table priv_test_tbl
@@ -3298,10 +3306,17 @@ SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not le
---+---
(0 rows)
+SELECT * FROM tststats.priv_test_view t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
+ a | b
+---+---
+(0 rows)
+
DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak
-- Grant table access, but hide all data with RLS
RESET SESSION AUTHORIZATION;
ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY;
+CREATE POLICY priv_test_tbl_pol ON tststats.priv_test_tbl USING (2 * a < 0);
GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1;
-- Should now have direct table access, but see nothing and leak nothing
SET SESSION AUTHORIZATION regress_stats_user1;
@@ -3310,12 +3325,57 @@ SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not le
---+---
(0 rows)
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0;
+SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak
+ a | b
+---+---
+(0 rows)
+
+SELECT * FROM tststats.priv_test_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
a | b
---+---
(0 rows)
DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+-- Create plain inheritance parent table with no access permissions
+RESET SESSION AUTHORIZATION;
+CREATE TABLE tststats.priv_test_parent_tbl (a int, b int);
+ALTER TABLE tststats.priv_test_tbl INHERIT tststats.priv_test_parent_tbl;
+-- Should not have access to parent, and should leak nothing
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
+ERROR: permission denied for table priv_test_parent_tbl
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied
+ERROR: permission denied for table priv_test_parent_tbl
+SELECT * FROM tststats.priv_test_parent_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied
+ERROR: permission denied for table priv_test_parent_tbl
+DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
+ERROR: permission denied for table priv_test_parent_tbl
+-- Grant table access to parent, but hide all data with RLS
+RESET SESSION AUTHORIZATION;
+ALTER TABLE tststats.priv_test_parent_tbl ENABLE ROW LEVEL SECURITY;
+CREATE POLICY priv_test_parent_tbl_pol ON tststats.priv_test_parent_tbl USING (2 * a < 0);
+GRANT SELECT, DELETE ON tststats.priv_test_parent_tbl TO regress_stats_user1;
+-- Should now have direct table access to parent, but see nothing and leak nothing
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+ a | b
+---+---
+(0 rows)
+
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak
+ a | b
+---+---
+(0 rows)
+
+SELECT * FROM tststats.priv_test_parent_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
+ a | b
+---+---
+(0 rows)
+
+DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
-- privilege checks for pg_stats_ext and pg_stats_ext_exprs
RESET SESSION AUTHORIZATION;
CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT);
@@ -3361,11 +3421,14 @@ SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
-- Tidy up
DROP OPERATOR <<< (int, int);
DROP FUNCTION op_leak(int, int);
+DROP OPERATOR <<< (record, record);
+DROP FUNCTION op_leak(record, record);
RESET SESSION AUTHORIZATION;
DROP TABLE stats_ext_tbl;
DROP SCHEMA tststats CASCADE;
-NOTICE: drop cascades to 2 other objects
-DETAIL: drop cascades to table tststats.priv_test_tbl
+NOTICE: drop cascades to 3 other objects
+DETAIL: drop cascades to table tststats.priv_test_parent_tbl
+drop cascades to table tststats.priv_test_tbl
drop cascades to view tststats.priv_test_view
DROP USER regress_stats_user1;
CREATE TABLE grouping_unique (x integer);
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 1bfd33de3f3..ba302da51e7 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -2091,6 +2091,40 @@ SELECT c FROM toasttest;
(1 row)
DROP TABLE toasttest;
+-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header)
+-- being toasted.
+CREATE TABLE toasttest (f1 text, f2 text);
+ALTER TABLE toasttest SET (toast_tuple_target = 128);
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL;
+-- Here, the first value is a varlena large enough to make it toasted and
+-- stored uncompressed. The second value is a short varlena, toasted
+-- and stored uncompressed.
+INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30));
+SELECT reltoastrelid::regclass AS reltoastname FROM pg_class
+ WHERE oid = 'toasttest'::regclass \gset
+-- There should be two values inserted in the toast relation.
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+ count
+-------
+ 2
+(1 row)
+
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+ f1_data | f2_data
+------------+------------
+ 1234123412 | 5678567856
+(1 row)
+
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+ f1_comp | f2_comp
+---------+---------
+ |
+(1 row)
+
+DROP TABLE toasttest;
--
-- test length
--
diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out
index 6aaa19c8f4e..14a9f5b56a6 100644
--- a/src/test/regress/expected/timestamp.out
+++ b/src/test/regress/expected/timestamp.out
@@ -591,6 +591,16 @@ SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc
Mon Feb 23 00:00:00 2004
(1 row)
+SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc;
+ inf_trunc
+-----------
+ infinity
+(1 row)
+
+SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+ERROR: unit "timezone" not supported for type timestamp without time zone
+SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc;
+ERROR: unit "timezone" not supported for type timestamp without time zone
SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc;
ERROR: unit "ago" not recognized for type timestamp without time zone
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out
index 2a69953ff25..5dc8a621f6c 100644
--- a/src/test/regress/expected/timestamptz.out
+++ b/src/test/regress/expected/timestamptz.out
@@ -760,6 +760,16 @@ SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393'
Mon Feb 23 00:00:00 2004 PST
(1 row)
+SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc;
+ inf_trunc
+-----------
+ infinity
+(1 row)
+
+SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
+SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc;
ERROR: unit "ago" not recognized for type timestamp with time zone
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
@@ -780,6 +790,14 @@ SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET
Thu Feb 15 20:00:00 2001 PST
(1 row)
+SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
+SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc;
+ inf_zone_trunc
+----------------
+ infinity
+(1 row)
+
SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc;
ERROR: unit "ago" not recognized for type timestamp with time zone
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index 872b9100e1a..1eb8fba0953 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2769,6 +2769,10 @@ NOTICE: trigger = child3_delete_trig, old table = (42,CCC)
-- copy into parent sees parent-format tuples
copy parent (a, b) from stdin;
NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42)
+-- check detach/reattach behavior; statement triggers with transition tables
+-- should not prevent a table from becoming a partition again
+alter table parent detach partition child1;
+alter table parent attach partition child1 for values in ('AAA');
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
@@ -2966,6 +2970,10 @@ NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42)
create index on parent(b);
copy parent (a, b) from stdin;
NOTICE: trigger = parent_insert_trig, new table = (DDD,42)
+-- check disinherit/reinherit behavior; statement triggers with transition
+-- tables should not prevent a table from becoming an inheritance child again
+alter table child1 no inherit parent;
+alter table child1 inherit parent;
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 3dbba069024..465ac148ac9 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -727,7 +727,7 @@ PG_FUNCTION_INFO_V1(is_catalog_text_unique_index_oid);
Datum
is_catalog_text_unique_index_oid(PG_FUNCTION_ARGS)
{
- return IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0));
+ return BoolGetDatum(IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0)));
}
PG_FUNCTION_INFO_V1(test_support_func);
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 277b4b198cc..dde85d0dfb2 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -182,6 +182,11 @@ SELECT newcnt(*) AS cnt_1000 FROM onek;
SELECT oldcnt(*) AS cnt_1000 FROM onek;
SELECT sum2(q1,q2) FROM int8_tbl;
+-- sanity checks
+SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl;
+SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl;
+SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl;
+
-- test for outer-level aggregates
-- this should work
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index 3eacc1340aa..661cf186cfd 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -346,8 +346,6 @@ CREATE VIEW atest12v AS
SELECT * FROM atest12 WHERE b <<< 5;
CREATE VIEW atest12sbv WITH (security_barrier=true) AS
SELECT * FROM atest12 WHERE b <<< 5;
-GRANT SELECT ON atest12v TO PUBLIC;
-GRANT SELECT ON atest12sbv TO PUBLIC;
-- This plan should use nestloop, knowing that few rows will be selected.
EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b;
@@ -369,8 +367,16 @@ CREATE FUNCTION leak2(integer,integer) RETURNS boolean
CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer,
restrict = scalargtsel);
--- This should not show any "leak" notices before failing.
+-- These should not show any "leak" notices before failing.
EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0;
+EXPLAIN (COSTS OFF) SELECT * FROM atest12v WHERE a >>> 0;
+EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv WHERE a >>> 0;
+
+-- Now regress_priv_user1 grants access to regress_priv_user2 via the views.
+SET SESSION AUTHORIZATION regress_priv_user1;
+GRANT SELECT ON atest12v TO PUBLIC;
+GRANT SELECT ON atest12sbv TO PUBLIC;
+SET SESSION AUTHORIZATION regress_priv_user2;
-- These plans should continue to use a nestloop, since they execute with the
-- privileges of the view owner.
diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql
index e2e31245439..f064e4f5456 100644
--- a/src/test/regress/sql/psql.sql
+++ b/src/test/regress/sql/psql.sql
@@ -1073,6 +1073,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
\pset arg1 arg2
\q
\reset
+ \restrict test
\s arg1
\sendpipeline
\set arg1 arg2 arg3 arg4 arg5 arg6 arg7
@@ -1084,6 +1085,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
\t arg1
\T arg1
\timing arg1
+ \unrestrict not_valid
\unset arg1
\w arg1
\watch arg1 arg2
diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql
index 9da967a9ef2..21ac0ca51ee 100644
--- a/src/test/regress/sql/rowsecurity.sql
+++ b/src/test/regress/sql/rowsecurity.sql
@@ -2189,7 +2189,7 @@ DROP VIEW rls_view;
DROP TABLE rls_tbl;
DROP TABLE ref_tbl;
--- Leaky operator test
+-- Leaky operator tests
CREATE TABLE rls_tbl (a int);
INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x;
ANALYZE rls_tbl;
@@ -2205,9 +2205,58 @@ CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int,
restrict = scalarltsel);
SELECT * FROM rls_tbl WHERE a <<< 1000;
EXPLAIN (COSTS OFF) SELECT * FROM rls_tbl WHERE a <<< 1000 or a <<< 900;
+RESET SESSION AUTHORIZATION;
+
+CREATE TABLE rls_child_tbl () INHERITS (rls_tbl);
+INSERT INTO rls_child_tbl SELECT x/10 FROM generate_series(1, 100) x;
+ANALYZE rls_child_tbl;
+
+CREATE TABLE rls_ptbl (a int) PARTITION BY RANGE (a);
+CREATE TABLE rls_part PARTITION OF rls_ptbl FOR VALUES FROM (-100) TO (100);
+INSERT INTO rls_ptbl SELECT x/10 FROM generate_series(1, 100) x;
+ANALYZE rls_ptbl, rls_part;
+
+ALTER TABLE rls_ptbl ENABLE ROW LEVEL SECURITY;
+ALTER TABLE rls_part ENABLE ROW LEVEL SECURITY;
+GRANT SELECT ON rls_ptbl TO regress_rls_alice;
+GRANT SELECT ON rls_part TO regress_rls_alice;
+CREATE POLICY p1 ON rls_tbl USING (a < 0);
+CREATE POLICY p2 ON rls_ptbl USING (a < 0);
+CREATE POLICY p3 ON rls_part USING (a < 0);
+
+SET SESSION AUTHORIZATION regress_rls_alice;
+SELECT * FROM rls_tbl WHERE a <<< 1000;
+SELECT * FROM rls_child_tbl WHERE a <<< 1000;
+SELECT * FROM rls_ptbl WHERE a <<< 1000;
+SELECT * FROM rls_part WHERE a <<< 1000;
+SELECT * FROM (SELECT * FROM rls_tbl UNION ALL
+ SELECT * FROM rls_tbl) t WHERE a <<< 1000;
+SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL
+ SELECT * FROM rls_child_tbl) t WHERE a <<< 1000;
+RESET SESSION AUTHORIZATION;
+
+REVOKE SELECT ON rls_tbl FROM regress_rls_alice;
+CREATE VIEW rls_tbl_view AS SELECT * FROM rls_tbl;
+
+ALTER TABLE rls_child_tbl ENABLE ROW LEVEL SECURITY;
+GRANT SELECT ON rls_child_tbl TO regress_rls_alice;
+CREATE POLICY p4 ON rls_child_tbl USING (a < 0);
+
+SET SESSION AUTHORIZATION regress_rls_alice;
+SELECT * FROM rls_tbl WHERE a <<< 1000;
+SELECT * FROM rls_tbl_view WHERE a <<< 1000;
+SELECT * FROM rls_child_tbl WHERE a <<< 1000;
+SELECT * FROM (SELECT * FROM rls_tbl UNION ALL
+ SELECT * FROM rls_tbl) t WHERE a <<< 1000;
+SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL
+ SELECT * FROM rls_child_tbl) t WHERE a <<< 1000;
DROP OPERATOR <<< (int, int);
DROP FUNCTION op_leak(int, int);
RESET SESSION AUTHORIZATION;
+DROP TABLE rls_part;
+DROP TABLE rls_ptbl;
+DROP TABLE rls_child_tbl;
+DROP VIEW rls_tbl_view;
DROP TABLE rls_tbl;
-- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects
diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql
index da4f2fe9c93..9ce4c670ecb 100644
--- a/src/test/regress/sql/stats_ext.sql
+++ b/src/test/regress/sql/stats_ext.sql
@@ -1647,8 +1647,15 @@ CREATE FUNCTION op_leak(int, int) RETURNS bool
LANGUAGE plpgsql;
CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int,
restrict = scalarltsel);
+CREATE FUNCTION op_leak(record, record) RETURNS bool
+ AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END'
+ LANGUAGE plpgsql;
+CREATE OPERATOR <<< (procedure = op_leak, leftarg = record, rightarg = record,
+ restrict = scalarltsel);
SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0;
+SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied
+SELECT * FROM tststats.priv_test_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied
DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
-- Grant access via a security barrier view, but hide all data
@@ -1661,19 +1668,51 @@ GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1;
SET SESSION AUTHORIZATION regress_stats_user1;
SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak
SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not leak
+SELECT * FROM tststats.priv_test_view t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak
-- Grant table access, but hide all data with RLS
RESET SESSION AUTHORIZATION;
ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY;
+CREATE POLICY priv_test_tbl_pol ON tststats.priv_test_tbl USING (2 * a < 0);
GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1;
-- Should now have direct table access, but see nothing and leak nothing
SET SESSION AUTHORIZATION regress_stats_user1;
SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
-SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0;
+SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak
+SELECT * FROM tststats.priv_test_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+-- Create plain inheritance parent table with no access permissions
+RESET SESSION AUTHORIZATION;
+CREATE TABLE tststats.priv_test_parent_tbl (a int, b int);
+ALTER TABLE tststats.priv_test_tbl INHERIT tststats.priv_test_parent_tbl;
+
+-- Should not have access to parent, and should leak nothing
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied
+SELECT * FROM tststats.priv_test_parent_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied
+DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied
+
+-- Grant table access to parent, but hide all data with RLS
+RESET SESSION AUTHORIZATION;
+ALTER TABLE tststats.priv_test_parent_tbl ENABLE ROW LEVEL SECURITY;
+CREATE POLICY priv_test_parent_tbl_pol ON tststats.priv_test_parent_tbl USING (2 * a < 0);
+GRANT SELECT, DELETE ON tststats.priv_test_parent_tbl TO regress_stats_user1;
+
+-- Should now have direct table access to parent, but see nothing and leak nothing
+SET SESSION AUTHORIZATION regress_stats_user1;
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak
+SELECT * FROM tststats.priv_test_parent_tbl t
+ WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak
+DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak
+
-- privilege checks for pg_stats_ext and pg_stats_ext_exprs
RESET SESSION AUTHORIZATION;
CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT);
@@ -1703,6 +1742,8 @@ SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x
-- Tidy up
DROP OPERATOR <<< (int, int);
DROP FUNCTION op_leak(int, int);
+DROP OPERATOR <<< (record, record);
+DROP FUNCTION op_leak(record, record);
RESET SESSION AUTHORIZATION;
DROP TABLE stats_ext_tbl;
DROP SCHEMA tststats CASCADE;
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 92c445c2439..b94004cc08c 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -650,6 +650,26 @@ SELECT length(c), c::text FROM toasttest;
SELECT c FROM toasttest;
DROP TABLE toasttest;
+-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header)
+-- being toasted.
+CREATE TABLE toasttest (f1 text, f2 text);
+ALTER TABLE toasttest SET (toast_tuple_target = 128);
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL;
+-- Here, the first value is a varlena large enough to make it toasted and
+-- stored uncompressed. The second value is a short varlena, toasted
+-- and stored uncompressed.
+INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30));
+SELECT reltoastrelid::regclass AS reltoastname FROM pg_class
+ WHERE oid = 'toasttest'::regclass \gset
+-- There should be two values inserted in the toast relation.
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+DROP TABLE toasttest;
+
--
-- test length
--
diff --git a/src/test/regress/sql/timestamp.sql b/src/test/regress/sql/timestamp.sql
index 55f80530ea0..313757ed041 100644
--- a/src/test/regress/sql/timestamp.sql
+++ b/src/test/regress/sql/timestamp.sql
@@ -175,7 +175,9 @@ SELECT d1 - timestamp without time zone '1997-01-02' AS diff
FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
-
+SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc;
+SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc;
SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc;
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/sql/timestamptz.sql b/src/test/regress/sql/timestamptz.sql
index caca3123f13..6ace851d169 100644
--- a/src/test/regress/sql/timestamptz.sql
+++ b/src/test/regress/sql/timestamptz.sql
@@ -217,15 +217,18 @@ SELECT d1 - timestamp with time zone '1997-01-02' AS diff
FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
+SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc;
+SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc;
SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc;
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation
+SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc;
+SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc;
SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc;
-
-
-- verify date_bin behaves the same as date_trunc for relevant intervals
SELECT
str,
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d674b25c83b..5f7f75d7ba5 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1935,6 +1935,11 @@ BBB 42
CCC 42
\.
+-- check detach/reattach behavior; statement triggers with transition tables
+-- should not prevent a table from becoming a partition again
+alter table parent detach partition child1;
+alter table parent attach partition child1 for values in ('AAA');
+
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
@@ -2154,6 +2159,11 @@ copy parent (a, b) from stdin;
DDD 42
\.
+-- check disinherit/reinherit behavior; statement triggers with transition
+-- tables should not prevent a table from becoming an inheritance child again
+alter table child1 no inherit parent;
+alter table child1 inherit parent;
+
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
diff --git a/src/tools/testint128.c b/src/tools/testint128.c
deleted file mode 100644
index a25631e277d..00000000000
--- a/src/tools/testint128.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * testint128.c
- * Testbed for roll-our-own 128-bit integer arithmetic.
- *
- * This is a standalone test program that compares the behavior of an
- * implementation in int128.h to an (assumed correct) int128 native type.
- *
- * Copyright (c) 2017-2025, PostgreSQL Global Development Group
- *
- *
- * IDENTIFICATION
- * src/tools/testint128.c
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres_fe.h"
-
-/*
- * By default, we test the non-native implementation in int128.h; but
- * by predefining USE_NATIVE_INT128 to 1, you can test the native
- * implementation, just to be sure.
- */
-#ifndef USE_NATIVE_INT128
-#define USE_NATIVE_INT128 0
-#endif
-
-#include "common/int128.h"
-#include "common/pg_prng.h"
-
-/*
- * We assume the parts of this union are laid out compatibly.
- */
-typedef union
-{
- int128 i128;
- INT128 I128;
- union
- {
-#ifdef WORDS_BIGENDIAN
- int64 hi;
- uint64 lo;
-#else
- uint64 lo;
- int64 hi;
-#endif
- } hl;
-} test128;
-
-
-/*
- * Control version of comparator.
- */
-static inline int
-my_int128_compare(int128 x, int128 y)
-{
- if (x < y)
- return -1;
- if (x > y)
- return 1;
- return 0;
-}
-
-/*
- * Main program.
- *
- * Generates a lot of random numbers and tests the implementation for each.
- * The results should be reproducible, since we use a fixed PRNG seed.
- *
- * You can give a loop count if you don't like the default 1B iterations.
- */
-int
-main(int argc, char **argv)
-{
- long count;
-
- pg_prng_seed(&pg_global_prng_state, 0);
-
- if (argc >= 2)
- count = strtol(argv[1], NULL, 0);
- else
- count = 1000000000;
-
- while (count-- > 0)
- {
- int64 x = pg_prng_uint64(&pg_global_prng_state);
- int64 y = pg_prng_uint64(&pg_global_prng_state);
- int64 z = pg_prng_uint64(&pg_global_prng_state);
- test128 t1;
- test128 t2;
-
- /* check unsigned addition */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2 = t1;
- t1.i128 += (int128) (uint64) z;
- int128_add_uint64(&t2.I128, (uint64) z);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%016lX%016lX + unsigned %lX\n", x, y, z);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check signed addition */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2 = t1;
- t1.i128 += (int128) z;
- int128_add_int64(&t2.I128, z);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%016lX%016lX + signed %lX\n", x, y, z);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check multiplication */
- t1.i128 = (int128) x * (int128) y;
-
- t2.hl.hi = t2.hl.lo = 0;
- int128_add_int64_mul_int64(&t2.I128, x, y);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%lX * %lX\n", x, y);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check comparison */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2.hl.hi = z;
- t2.hl.lo = pg_prng_uint64(&pg_global_prng_state);
-
- if (my_int128_compare(t1.i128, t2.i128) !=
- int128_compare(t1.I128, t2.I128))
- {
- printf("comparison failure: %d vs %d\n",
- my_int128_compare(t1.i128, t2.i128),
- int128_compare(t1.I128, t2.I128));
- printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check case with identical hi parts; above will hardly ever hit it */
- t2.hl.hi = x;
-
- if (my_int128_compare(t1.i128, t2.i128) !=
- int128_compare(t1.I128, t2.I128))
- {
- printf("comparison failure: %d vs %d\n",
- my_int128_compare(t1.i128, t2.i128),
- int128_compare(t1.I128, t2.I128));
- printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
- }
-
- return 0;
-}