summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/btree_gist/btree_enum.c4
-rw-r--r--contrib/btree_gist/btree_numeric.c2
-rw-r--r--contrib/btree_gist/btree_utils_num.c22
-rw-r--r--contrib/dblink/dblink.c8
-rw-r--r--contrib/intarray/_int_op.c2
-rw-r--r--contrib/intarray/_int_selfuncs.c2
-rw-r--r--contrib/ltree/_ltree_gist.c2
-rw-r--r--contrib/pageinspect/heapfuncs.c2
-rw-r--r--contrib/pgrowlocks/pgrowlocks.c4
-rw-r--r--contrib/postgres_fdw/connection.c8
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out113
-rw-r--r--contrib/postgres_fdw/option.c2
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql78
-rw-r--r--contrib/seg/seg.c4
-rw-r--r--doc/src/sgml/datatype.sgml2
-rw-r--r--doc/src/sgml/logicaldecoding.sgml8
-rw-r--r--doc/src/sgml/ref/alter_table.sgml11
-rw-r--r--meson.build7
-rw-r--r--src/backend/access/brin/brin.c4
-rw-r--r--src/backend/access/brin/brin_bloom.c2
-rw-r--r--src/backend/access/brin/brin_minmax.c10
-rw-r--r--src/backend/access/brin/brin_minmax_multi.c6
-rw-r--r--src/backend/access/common/heaptuple.c2
-rw-r--r--src/backend/access/common/reloptions.c10
-rw-r--r--src/backend/access/common/toast_internals.c4
-rw-r--r--src/backend/backup/basebackup_copy.c14
-rw-r--r--src/backend/catalog/objectaddress.c4
-rw-r--r--src/backend/catalog/pg_aggregate.c2
-rw-r--r--src/backend/catalog/pg_constraint.c2
-rw-r--r--src/backend/catalog/pg_conversion.c2
-rw-r--r--src/backend/catalog/pg_namespace.c2
-rw-r--r--src/backend/catalog/pg_operator.c4
-rw-r--r--src/backend/catalog/pg_proc.c2
-rw-r--r--src/backend/catalog/pg_publication.c2
-rw-r--r--src/backend/catalog/pg_shdepend.c12
-rw-r--r--src/backend/catalog/pg_type.c2
-rw-r--r--src/backend/commands/alter.c2
-rw-r--r--src/backend/commands/event_trigger.c4
-rw-r--r--src/backend/commands/subscriptioncmds.c8
-rw-r--r--src/backend/commands/tablecmds.c2
-rw-r--r--src/backend/commands/trigger.c32
-rw-r--r--src/backend/commands/tsearchcmds.c8
-rw-r--r--src/backend/commands/user.c6
-rw-r--r--src/backend/executor/execExprInterp.c8
-rw-r--r--src/backend/executor/spi.c2
-rw-r--r--src/backend/nodes/readfuncs.c2
-rw-r--r--src/backend/optimizer/plan/createplan.c24
-rw-r--r--src/backend/optimizer/util/plancat.c54
-rw-r--r--src/backend/postmaster/checkpointer.c9
-rw-r--r--src/backend/replication/logical/applyparallelworker.c4
-rw-r--r--src/backend/replication/logical/worker.c10
-rw-r--r--src/backend/replication/walreceiver.c8
-rw-r--r--src/backend/replication/walsender.c25
-rw-r--r--src/backend/rewrite/rewriteDefine.c5
-rw-r--r--src/backend/statistics/attribute_stats.c6
-rw-r--r--src/backend/statistics/extended_stats.c2
-rw-r--r--src/backend/storage/aio/aio_funcs.c2
-rw-r--r--src/backend/storage/aio/read_stream.c34
-rw-r--r--src/backend/storage/buffer/bufmgr.c9
-rw-r--r--src/backend/storage/ipc/shmem.c2
-rw-r--r--src/backend/storage/large_object/inv_api.c4
-rw-r--r--src/backend/storage/lmgr/lock.c4
-rw-r--r--src/backend/tsearch/ts_parse.c4
-rw-r--r--src/backend/utils/activity/pgstat.c2
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c5
-rw-r--r--src/backend/utils/adt/datum.c6
-rw-r--r--src/backend/utils/adt/json.c2
-rw-r--r--src/backend/utils/adt/jsonfuncs.c6
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c2
-rw-r--r--src/backend/utils/adt/lockfuncs.c8
-rw-r--r--src/backend/utils/adt/multirangetypes.c19
-rw-r--r--src/backend/utils/adt/numeric.c502
-rw-r--r--src/backend/utils/adt/rangetypes.c16
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c2
-rw-r--r--src/backend/utils/adt/rowtypes.c4
-rw-r--r--src/backend/utils/adt/timestamp.c2
-rw-r--r--src/backend/utils/adt/varlena.c5
-rw-r--r--src/backend/utils/adt/waitfuncs.c2
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/lsyscache.c2
-rw-r--r--src/backend/utils/cache/relcache.c4
-rw-r--r--src/backend/utils/cache/syscache.c6
-rw-r--r--src/backend/utils/error/csvlog.c2
-rw-r--r--src/backend/utils/error/elog.c4
-rw-r--r--src/backend/utils/error/jsonlog.c2
-rw-r--r--src/backend/utils/sort/sortsupport.c2
-rw-r--r--src/backend/utils/sort/tuplesortvariants.c6
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c9
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c11
-rw-r--r--src/bin/pg_basebackup/receivelog.c11
-rw-r--r--src/bin/pg_dump/filter.c13
-rw-r--r--src/bin/pg_dump/t/005_pg_dump_filterfile.pl14
-rw-r--r--src/bin/pg_upgrade/check.c2
-rw-r--r--src/include/access/htup_details.h2
-rw-r--r--src/include/access/itup.h2
-rw-r--r--src/include/access/reloptions.h2
-rw-r--r--src/include/c.h2
-rw-r--r--src/include/common/int128.h433
-rw-r--r--src/include/libpq/protocol.h21
-rw-r--r--src/include/optimizer/plancat.h2
-rw-r--r--src/include/utils/pg_locale.h2
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c6
-rw-r--r--src/interfaces/libpq-oauth/oauth-curl.c219
-rw-r--r--src/pl/plperl/plperl.c10
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/oauth_validator/t/001_server.pl31
-rw-r--r--src/test/modules/test_int128/.gitignore2
-rw-r--r--src/test/modules/test_int128/Makefile23
-rw-r--r--src/test/modules/test_int128/meson.build33
-rw-r--r--src/test/modules/test_int128/t/001_test_int128.pl27
-rw-r--r--src/test/modules/test_int128/test_int128.c281
-rw-r--r--src/test/modules/test_radixtree/test_radixtree.c2
-rw-r--r--src/test/regress/expected/aggregates.out19
-rw-r--r--src/test/regress/expected/strings.out34
-rw-r--r--src/test/regress/expected/timestamp.out10
-rw-r--r--src/test/regress/expected/timestamptz.out18
-rw-r--r--src/test/regress/expected/triggers.out8
-rw-r--r--src/test/regress/regress.c2
-rw-r--r--src/test/regress/sql/aggregates.sql5
-rw-r--r--src/test/regress/sql/strings.sql20
-rw-r--r--src/test/regress/sql/timestamp.sql4
-rw-r--r--src/test/regress/sql/timestamptz.sql7
-rw-r--r--src/test/regress/sql/triggers.sql10
-rw-r--r--src/tools/testint128.c170
125 files changed, 1761 insertions, 968 deletions
diff --git a/contrib/btree_gist/btree_enum.c b/contrib/btree_gist/btree_enum.c
index 83c95c7bb04..c54cf3c7bae 100644
--- a/contrib/btree_gist/btree_enum.c
+++ b/contrib/btree_gist/btree_enum.c
@@ -193,8 +193,8 @@ gbt_enum_ssup_cmp(Datum x, Datum y, SortSupport ssup)
return DatumGetInt32(CallerFInfoFunctionCall2(enum_cmp,
ssup->ssup_extra,
InvalidOid,
- arg1->lower,
- arg2->lower));
+ ObjectIdGetDatum(arg1->lower),
+ ObjectIdGetDatum(arg2->lower)));
}
Datum
diff --git a/contrib/btree_gist/btree_numeric.c b/contrib/btree_gist/btree_numeric.c
index a39c05d9da1..052f27b0794 100644
--- a/contrib/btree_gist/btree_numeric.c
+++ b/contrib/btree_gist/btree_numeric.c
@@ -192,7 +192,7 @@ gbt_numeric_penalty(PG_FUNCTION_ARGS)
*result = 0.0;
- if (DirectFunctionCall2(numeric_gt, NumericGetDatum(ds), NumericGetDatum(nul)))
+ if (DatumGetBool(DirectFunctionCall2(numeric_gt, NumericGetDatum(ds), NumericGetDatum(nul))))
{
*result += FLT_MIN;
os = DatumGetNumeric(DirectFunctionCall2(numeric_div,
diff --git a/contrib/btree_gist/btree_utils_num.c b/contrib/btree_gist/btree_utils_num.c
index 346ee837d75..446fa930b92 100644
--- a/contrib/btree_gist/btree_utils_num.c
+++ b/contrib/btree_gist/btree_utils_num.c
@@ -119,38 +119,38 @@ gbt_num_fetch(GISTENTRY *entry, const gbtree_ninfo *tinfo)
switch (tinfo->t)
{
case gbt_t_bool:
- datum = BoolGetDatum(*(bool *) entry->key);
+ datum = BoolGetDatum(*(bool *) DatumGetPointer(entry->key));
break;
case gbt_t_int2:
- datum = Int16GetDatum(*(int16 *) entry->key);
+ datum = Int16GetDatum(*(int16 *) DatumGetPointer(entry->key));
break;
case gbt_t_int4:
- datum = Int32GetDatum(*(int32 *) entry->key);
+ datum = Int32GetDatum(*(int32 *) DatumGetPointer(entry->key));
break;
case gbt_t_int8:
- datum = Int64GetDatum(*(int64 *) entry->key);
+ datum = Int64GetDatum(*(int64 *) DatumGetPointer(entry->key));
break;
case gbt_t_oid:
case gbt_t_enum:
- datum = ObjectIdGetDatum(*(Oid *) entry->key);
+ datum = ObjectIdGetDatum(*(Oid *) DatumGetPointer(entry->key));
break;
case gbt_t_float4:
- datum = Float4GetDatum(*(float4 *) entry->key);
+ datum = Float4GetDatum(*(float4 *) DatumGetPointer(entry->key));
break;
case gbt_t_float8:
- datum = Float8GetDatum(*(float8 *) entry->key);
+ datum = Float8GetDatum(*(float8 *) DatumGetPointer(entry->key));
break;
case gbt_t_date:
- datum = DateADTGetDatum(*(DateADT *) entry->key);
+ datum = DateADTGetDatum(*(DateADT *) DatumGetPointer(entry->key));
break;
case gbt_t_time:
- datum = TimeADTGetDatum(*(TimeADT *) entry->key);
+ datum = TimeADTGetDatum(*(TimeADT *) DatumGetPointer(entry->key));
break;
case gbt_t_ts:
- datum = TimestampGetDatum(*(Timestamp *) entry->key);
+ datum = TimestampGetDatum(*(Timestamp *) DatumGetPointer(entry->key));
break;
case gbt_t_cash:
- datum = CashGetDatum(*(Cash *) entry->key);
+ datum = CashGetDatum(*(Cash *) DatumGetPointer(entry->key));
break;
default:
datum = entry->key;
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index f98805fb5f7..0cf4c27f2e9 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -2643,7 +2643,7 @@ dblink_connstr_has_required_scram_options(const char *connstr)
PQconninfoFree(options);
}
- has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys;
+ has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys;
return (has_scram_keys && has_require_auth);
}
@@ -2676,7 +2676,7 @@ dblink_security_check(PGconn *conn, const char *connname, const char *connstr)
* only added if UseScramPassthrough is set, and the user is not allowed
* to add the SCRAM keys on fdw and user mapping options.
*/
- if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr))
return;
#ifdef ENABLE_GSS
@@ -2749,7 +2749,7 @@ dblink_connstr_check(const char *connstr)
if (dblink_connstr_has_pw(connstr))
return;
- if (MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && dblink_connstr_has_required_scram_options(connstr))
return;
#ifdef ENABLE_GSS
@@ -2896,7 +2896,7 @@ get_connect_string(const char *servername)
* the user overwrites these options we can ereport on
* dblink_connstr_check and dblink_security_check.
*/
- if (MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(foreign_server, user_mapping))
appendSCRAMKeysInfo(&buf);
foreach(cell, fdw->options)
diff --git a/contrib/intarray/_int_op.c b/contrib/intarray/_int_op.c
index ba6d0a99995..a706e353c6f 100644
--- a/contrib/intarray/_int_op.c
+++ b/contrib/intarray/_int_op.c
@@ -108,7 +108,7 @@ _int_overlap(PG_FUNCTION_ARGS)
CHECKARRVALID(a);
CHECKARRVALID(b);
if (ARRISEMPTY(a) || ARRISEMPTY(b))
- return false;
+ PG_RETURN_BOOL(false);
SORT(a);
SORT(b);
diff --git a/contrib/intarray/_int_selfuncs.c b/contrib/intarray/_int_selfuncs.c
index 6c3b7ace146..9bf64486242 100644
--- a/contrib/intarray/_int_selfuncs.c
+++ b/contrib/intarray/_int_selfuncs.c
@@ -177,7 +177,7 @@ _int_matchsel(PG_FUNCTION_ARGS)
if (query->size == 0)
{
ReleaseVariableStats(vardata);
- return (Selectivity) 0.0;
+ PG_RETURN_FLOAT8(0.0);
}
/*
diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c
index 286ad24fbe8..30d516e60bc 100644
--- a/contrib/ltree/_ltree_gist.c
+++ b/contrib/ltree/_ltree_gist.c
@@ -84,7 +84,7 @@ _ltree_compress(PG_FUNCTION_ARGS)
entry->rel, entry->page,
entry->offset, false);
}
- else if (!LTG_ISALLTRUE(entry->key))
+ else if (!LTG_ISALLTRUE(DatumGetPointer(entry->key)))
{
int32 i;
ltree_gist *key;
diff --git a/contrib/pageinspect/heapfuncs.c b/contrib/pageinspect/heapfuncs.c
index 377ae30d1fa..c13f07655c4 100644
--- a/contrib/pageinspect/heapfuncs.c
+++ b/contrib/pageinspect/heapfuncs.c
@@ -256,7 +256,7 @@ heap_page_items(PG_FUNCTION_ARGS)
nulls[11] = true;
if (tuphdr->t_infomask & HEAP_HASOID_OLD)
- values[12] = HeapTupleHeaderGetOidOld(tuphdr);
+ values[12] = ObjectIdGetDatum(HeapTupleHeaderGetOidOld(tuphdr));
else
nulls[12] = true;
diff --git a/contrib/pgrowlocks/pgrowlocks.c b/contrib/pgrowlocks/pgrowlocks.c
index b75d80fa7a9..f88269332b6 100644
--- a/contrib/pgrowlocks/pgrowlocks.c
+++ b/contrib/pgrowlocks/pgrowlocks.c
@@ -141,8 +141,8 @@ pgrowlocks(PG_FUNCTION_ARGS)
*/
if (htsu == TM_BeingModified)
{
- values[Atnum_tid] = (char *) DirectFunctionCall1(tidout,
- PointerGetDatum(&tuple->t_self));
+ values[Atnum_tid] = DatumGetCString(DirectFunctionCall1(tidout,
+ PointerGetDatum(&tuple->t_self)));
values[Atnum_xmax] = palloc(NCHARS * sizeof(char));
snprintf(values[Atnum_xmax], NCHARS, "%u", xmax);
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index e8148f2c5a2..4fbb6c182b8 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -464,7 +464,7 @@ pgfdw_security_check(const char **keywords, const char **values, UserMapping *us
* assume that UseScramPassthrough is also true since SCRAM options are
* only set when UseScramPassthrough is enabled.
*/
- if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values))
return;
ereport(ERROR,
@@ -570,7 +570,7 @@ connect_pg_server(ForeignServer *server, UserMapping *user)
n++;
/* Add required SCRAM pass-through connection options if it's enabled. */
- if (MyProcPort->has_scram_keys && UseScramPassthrough(server, user))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && UseScramPassthrough(server, user))
{
int len;
int encoded_len;
@@ -748,7 +748,7 @@ check_conn_params(const char **keywords, const char **values, UserMapping *user)
* assume that UseScramPassthrough is also true since SCRAM options are
* only set when UseScramPassthrough is enabled.
*/
- if (MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values))
+ if (MyProcPort != NULL && MyProcPort->has_scram_keys && pgfdw_has_required_scram_options(keywords, values))
return;
ereport(ERROR,
@@ -2559,7 +2559,7 @@ pgfdw_has_required_scram_options(const char **keywords, const char **values)
}
}
- has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort->has_scram_keys;
+ has_scram_keys = has_scram_client_key && has_scram_server_key && MyProcPort != NULL && MyProcPort->has_scram_keys;
return (has_scram_keys && has_require_auth);
}
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 4b6e49a5d95..a434eb1395e 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -8233,6 +8233,119 @@ DELETE FROM rem1; -- can't be pushed down
(5 rows)
DROP TRIGGER trig_row_after_delete ON rem1;
+-- We are allowed to create transition-table triggers on both kinds of
+-- inheritance even if they contain foreign tables as children, but currently
+-- collecting transition tuples from such foreign tables is not supported.
+CREATE TABLE local_tbl (a text, b int);
+CREATE FOREIGN TABLE foreign_tbl (a text, b int)
+ SERVER loopback OPTIONS (table_name 'local_tbl');
+INSERT INTO foreign_tbl VALUES ('AAA', 42);
+-- Test case for partition hierarchy
+CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA');
+CREATE TRIGGER parent_tbl_insert_trig
+ AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_update_trig
+ AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_delete_trig
+ AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+INSERT INTO parent_tbl VALUES ('AAA', 42);
+ERROR: cannot collect transition tuples from child foreign tables
+COPY parent_tbl (a, b) FROM stdin;
+ERROR: cannot collect transition tuples from child foreign tables
+CONTEXT: COPY parent_tbl, line 1: "AAA 42"
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+INSERT INTO parent_tbl VALUES ('AAA', 42);
+ERROR: cannot collect transition tuples from child foreign tables
+COPY parent_tbl (a, b) FROM stdin;
+ERROR: cannot collect transition tuples from child foreign tables
+CONTEXT: COPY parent_tbl, line 1: "AAA 42"
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE parent_tbl SET b = b + 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Foreign Update on public.foreign_tbl parent_tbl_1
+ Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1
+ -> Foreign Scan on public.foreign_tbl parent_tbl_1
+ Output: (parent_tbl_1.b + 1), parent_tbl_1.tableoid, parent_tbl_1.ctid, parent_tbl_1.*
+ Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE
+(6 rows)
+
+UPDATE parent_tbl SET b = b + 1;
+ERROR: cannot collect transition tuples from child foreign tables
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM parent_tbl;
+ QUERY PLAN
+------------------------------------------------------------------
+ Delete on public.parent_tbl
+ Foreign Delete on public.foreign_tbl parent_tbl_1
+ Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1
+ -> Foreign Scan on public.foreign_tbl parent_tbl_1
+ Output: parent_tbl_1.tableoid, parent_tbl_1.ctid
+ Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE
+(6 rows)
+
+DELETE FROM parent_tbl;
+ERROR: cannot collect transition tuples from child foreign tables
+ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl;
+DROP TABLE parent_tbl;
+-- Test case for non-partition hierarchy
+CREATE TABLE parent_tbl (a text, b int);
+ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl;
+CREATE TRIGGER parent_tbl_update_trig
+ AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_delete_trig
+ AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE parent_tbl SET b = b + 1;
+ QUERY PLAN
+------------------------------------------------------------------------------------------------------
+ Update on public.parent_tbl
+ Update on public.parent_tbl parent_tbl_1
+ Foreign Update on public.foreign_tbl parent_tbl_2
+ Remote SQL: UPDATE public.local_tbl SET b = $2 WHERE ctid = $1
+ -> Result
+ Output: (parent_tbl.b + 1), parent_tbl.tableoid, parent_tbl.ctid, (NULL::record)
+ -> Append
+ -> Seq Scan on public.parent_tbl parent_tbl_1
+ Output: parent_tbl_1.b, parent_tbl_1.tableoid, parent_tbl_1.ctid, NULL::record
+ -> Foreign Scan on public.foreign_tbl parent_tbl_2
+ Output: parent_tbl_2.b, parent_tbl_2.tableoid, parent_tbl_2.ctid, parent_tbl_2.*
+ Remote SQL: SELECT a, b, ctid FROM public.local_tbl FOR UPDATE
+(12 rows)
+
+UPDATE parent_tbl SET b = b + 1;
+ERROR: cannot collect transition tuples from child foreign tables
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM parent_tbl;
+ QUERY PLAN
+------------------------------------------------------------------------
+ Delete on public.parent_tbl
+ Delete on public.parent_tbl parent_tbl_1
+ Foreign Delete on public.foreign_tbl parent_tbl_2
+ Remote SQL: DELETE FROM public.local_tbl WHERE ctid = $1
+ -> Append
+ -> Seq Scan on public.parent_tbl parent_tbl_1
+ Output: parent_tbl_1.tableoid, parent_tbl_1.ctid
+ -> Foreign Scan on public.foreign_tbl parent_tbl_2
+ Output: parent_tbl_2.tableoid, parent_tbl_2.ctid
+ Remote SQL: SELECT ctid FROM public.local_tbl FOR UPDATE
+(10 rows)
+
+DELETE FROM parent_tbl;
+ERROR: cannot collect transition tuples from child foreign tables
+ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl;
+DROP TABLE parent_tbl;
+-- Cleanup
+DROP FOREIGN TABLE foreign_tbl;
+DROP TABLE local_tbl;
-- ===================================================================
-- test inheritance features
-- ===================================================================
diff --git a/contrib/postgres_fdw/option.c b/contrib/postgres_fdw/option.c
index d6fa89bad93..04788b7e8b3 100644
--- a/contrib/postgres_fdw/option.c
+++ b/contrib/postgres_fdw/option.c
@@ -522,7 +522,7 @@ process_pgfdw_appname(const char *appname)
appendStringInfoString(&buf, application_name);
break;
case 'c':
- appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid);
+ appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid);
break;
case 'C':
appendStringInfoString(&buf, cluster_name);
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index 31b6c685b55..d9bed565c81 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -2281,6 +2281,84 @@ EXPLAIN (verbose, costs off)
DELETE FROM rem1; -- can't be pushed down
DROP TRIGGER trig_row_after_delete ON rem1;
+
+-- We are allowed to create transition-table triggers on both kinds of
+-- inheritance even if they contain foreign tables as children, but currently
+-- collecting transition tuples from such foreign tables is not supported.
+
+CREATE TABLE local_tbl (a text, b int);
+CREATE FOREIGN TABLE foreign_tbl (a text, b int)
+ SERVER loopback OPTIONS (table_name 'local_tbl');
+
+INSERT INTO foreign_tbl VALUES ('AAA', 42);
+
+-- Test case for partition hierarchy
+CREATE TABLE parent_tbl (a text, b int) PARTITION BY LIST (a);
+ALTER TABLE parent_tbl ATTACH PARTITION foreign_tbl FOR VALUES IN ('AAA');
+
+CREATE TRIGGER parent_tbl_insert_trig
+ AFTER INSERT ON parent_tbl REFERENCING NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_update_trig
+ AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_delete_trig
+ AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+
+INSERT INTO parent_tbl VALUES ('AAA', 42);
+
+COPY parent_tbl (a, b) FROM stdin;
+AAA 42
+\.
+
+ALTER SERVER loopback OPTIONS (ADD batch_size '10');
+
+INSERT INTO parent_tbl VALUES ('AAA', 42);
+
+COPY parent_tbl (a, b) FROM stdin;
+AAA 42
+\.
+
+ALTER SERVER loopback OPTIONS (DROP batch_size);
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE parent_tbl SET b = b + 1;
+UPDATE parent_tbl SET b = b + 1;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM parent_tbl;
+DELETE FROM parent_tbl;
+
+ALTER TABLE parent_tbl DETACH PARTITION foreign_tbl;
+DROP TABLE parent_tbl;
+
+-- Test case for non-partition hierarchy
+CREATE TABLE parent_tbl (a text, b int);
+ALTER FOREIGN TABLE foreign_tbl INHERIT parent_tbl;
+
+CREATE TRIGGER parent_tbl_update_trig
+ AFTER UPDATE ON parent_tbl REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+CREATE TRIGGER parent_tbl_delete_trig
+ AFTER DELETE ON parent_tbl REFERENCING OLD TABLE AS old_table
+ FOR EACH STATEMENT EXECUTE PROCEDURE trigger_func();
+
+EXPLAIN (VERBOSE, COSTS OFF)
+UPDATE parent_tbl SET b = b + 1;
+UPDATE parent_tbl SET b = b + 1;
+
+EXPLAIN (VERBOSE, COSTS OFF)
+DELETE FROM parent_tbl;
+DELETE FROM parent_tbl;
+
+ALTER FOREIGN TABLE foreign_tbl NO INHERIT parent_tbl;
+DROP TABLE parent_tbl;
+
+-- Cleanup
+DROP FOREIGN TABLE foreign_tbl;
+DROP TABLE local_tbl;
+
-- ===================================================================
-- test inheritance features
-- ===================================================================
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
index 151cbb954b9..b5de2a5e1be 100644
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -417,7 +417,7 @@ gseg_same(PG_FUNCTION_ARGS)
{
bool *result = (bool *) PG_GETARG_POINTER(2);
- if (DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1)))
+ if (DatumGetBool(DirectFunctionCall2(seg_same, PG_GETARG_DATUM(0), PG_GETARG_DATUM(1))))
*result = true;
else
*result = false;
@@ -470,7 +470,7 @@ gseg_leaf_consistent(Datum key, Datum query, StrategyNumber strategy)
retval = DirectFunctionCall2(seg_contained, key, query);
break;
default:
- retval = false;
+ retval = BoolGetDatum(false);
}
PG_RETURN_DATUM(retval);
diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml
index 0994e089311..7458f216e50 100644
--- a/doc/src/sgml/datatype.sgml
+++ b/doc/src/sgml/datatype.sgml
@@ -117,7 +117,7 @@
<row>
<entry><type>double precision</type></entry>
- <entry><type>float8</type></entry>
+ <entry><type>float</type>, <type>float8</type></entry>
<entry>double precision floating-point number (8 bytes)</entry>
</row>
diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml
index 593f784b69d..77c720c422c 100644
--- a/doc/src/sgml/logicaldecoding.sgml
+++ b/doc/src/sgml/logicaldecoding.sgml
@@ -290,7 +290,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
<para>
A logical slot will emit each change just once in normal operation.
The current position of each slot is persisted only at checkpoint, so in
- the case of a crash the slot may return to an earlier LSN, which will
+ the case of a crash the slot might return to an earlier LSN, which will
then cause recent changes to be sent again when the server restarts.
Logical decoding clients are responsible for avoiding ill effects from
handling the same message more than once. Clients may wish to record
@@ -409,7 +409,7 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
should be used with caution. Unlike automatic synchronization, it does not
include cyclic retries, making it more prone to synchronization failures,
particularly during initial sync scenarios where the required WAL files
- or catalog rows for the slot may have already been removed or are at risk
+ or catalog rows for the slot might have already been removed or are at risk
of being removed on the standby. In contrast, automatic synchronization
via <varname>sync_replication_slots</varname> provides continuous slot
updates, enabling seamless failover and supporting high availability.
@@ -430,8 +430,8 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
standby, the slot will not be persisted to avoid data loss. In such
cases, the following log message may appear:
<programlisting>
- LOG: could not synchronize replication slot "failover_slot"
- DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/03003F28 and catalog xmin 754, but the standby has LSN 0/03003F28 and catalog xmin 756
+LOG: could not synchronize replication slot "failover_slot"
+DETAIL: Synchronization could lead to data loss, because the remote slot needs WAL at LSN 0/03003F28 and catalog xmin 754, but the standby has LSN 0/03003F28 and catalog xmin 756.
</programlisting>
If the logical replication slot is actively used by a consumer, no
manual intervention is needed; the slot will advance automatically,
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 541e093a519..8867da6c693 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -210,6 +210,8 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
When this form is used, the column's statistics are removed,
so running <link linkend="sql-analyze"><command>ANALYZE</command></link>
on the table afterwards is recommended.
+ For a virtual generated column, <command>ANALYZE</command>
+ is not necessary because such columns never have statistics.
</para>
</listitem>
</varlistentry>
@@ -271,6 +273,15 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
in a stored generated column is rewritten and all the future changes
will apply the new generation expression.
</para>
+
+ <para>
+ When this form is used on a stored generated column, its statistics
+ are removed, so running
+ <link linkend="sql-analyze"><command>ANALYZE</command></link>
+ on the table afterwards is recommended.
+ For a virtual generated column, <command>ANALYZE</command>
+ is not necessary because such columns never have statistics.
+ </para>
</listitem>
</varlistentry>
diff --git a/meson.build b/meson.build
index ca423dc8e12..431fe9ccc9b 100644
--- a/meson.build
+++ b/meson.build
@@ -3472,6 +3472,13 @@ installed_targets = [
ecpg_targets,
]
+if oauth_flow_supported
+ installed_targets += [
+ libpq_oauth_so,
+ libpq_oauth_st,
+ ]
+endif
+
# all targets that require building code
all_built = [
installed_targets,
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 4204088fa0d..7ff7467e462 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -1608,7 +1608,7 @@ brin_build_desc(Relation rel)
opcInfoFn = index_getprocinfo(rel, keyno + 1, BRIN_PROCNUM_OPCINFO);
opcinfo[keyno] = (BrinOpcInfo *)
- DatumGetPointer(FunctionCall1(opcInfoFn, attr->atttypid));
+ DatumGetPointer(FunctionCall1(opcInfoFn, ObjectIdGetDatum(attr->atttypid)));
totalstored += opcinfo[keyno]->oi_nstored;
}
@@ -2262,7 +2262,7 @@ add_values_to_range(Relation idxRel, BrinDesc *bdesc, BrinMemTuple *dtup,
PointerGetDatum(bdesc),
PointerGetDatum(bval),
values[keyno],
- nulls[keyno]);
+ BoolGetDatum(nulls[keyno]));
/* if that returned true, we need to insert the updated tuple */
modified |= DatumGetBool(result);
diff --git a/src/backend/access/brin/brin_bloom.c b/src/backend/access/brin/brin_bloom.c
index 82b425ce37d..7c3f7d454fc 100644
--- a/src/backend/access/brin/brin_bloom.c
+++ b/src/backend/access/brin/brin_bloom.c
@@ -540,7 +540,7 @@ brin_bloom_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
BloomOptions *opts = (BloomOptions *) PG_GET_OPCLASS_OPTIONS();
Oid colloid = PG_GET_COLLATION();
FmgrInfo *hashFn;
diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c
index d21ab3a668c..79c5a0aa185 100644
--- a/src/backend/access/brin/brin_minmax.c
+++ b/src/backend/access/brin/brin_minmax.c
@@ -66,7 +66,7 @@ brin_minmax_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
Oid colloid = PG_GET_COLLATION();
FmgrInfo *cmpFn;
Datum compar;
@@ -225,8 +225,8 @@ brin_minmax_union(PG_FUNCTION_ARGS)
/* Adjust minimum, if B's min is less than A's min */
finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid,
BTLessStrategyNumber);
- needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[0],
- col_a->bv_values[0]);
+ needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[0],
+ col_a->bv_values[0]));
if (needsadj)
{
if (!attr->attbyval)
@@ -238,8 +238,8 @@ brin_minmax_union(PG_FUNCTION_ARGS)
/* Adjust maximum, if B's max is greater than A's max */
finfo = minmax_get_strategy_procinfo(bdesc, attno, attr->atttypid,
BTGreaterStrategyNumber);
- needsadj = FunctionCall2Coll(finfo, colloid, col_b->bv_values[1],
- col_a->bv_values[1]);
+ needsadj = DatumGetBool(FunctionCall2Coll(finfo, colloid, col_b->bv_values[1],
+ col_a->bv_values[1]));
if (needsadj)
{
if (!attr->attbyval)
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c
index a5a414182ca..c87f1b9cd7e 100644
--- a/src/backend/access/brin/brin_minmax_multi.c
+++ b/src/backend/access/brin/brin_minmax_multi.c
@@ -1992,8 +1992,8 @@ brin_minmax_multi_distance_tid(PG_FUNCTION_ARGS)
double da1,
da2;
- ItemPointer pa1 = (ItemPointer) PG_GETARG_DATUM(0);
- ItemPointer pa2 = (ItemPointer) PG_GETARG_DATUM(1);
+ ItemPointer pa1 = (ItemPointer) PG_GETARG_POINTER(0);
+ ItemPointer pa2 = (ItemPointer) PG_GETARG_POINTER(1);
/*
* We know the values are range boundaries, but the range may be collapsed
@@ -2414,7 +2414,7 @@ brin_minmax_multi_add_value(PG_FUNCTION_ARGS)
BrinDesc *bdesc = (BrinDesc *) PG_GETARG_POINTER(0);
BrinValues *column = (BrinValues *) PG_GETARG_POINTER(1);
Datum newval = PG_GETARG_DATUM(2);
- bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_DATUM(3);
+ bool isnull PG_USED_FOR_ASSERTS_ONLY = PG_GETARG_BOOL(3);
MinMaxMultiOptions *opts = (MinMaxMultiOptions *) PG_GET_OPCLASS_OPTIONS();
Oid colloid = PG_GET_COLLATION();
bool modified = false;
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index a410b5eb99b..1173a6d81b5 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -105,7 +105,7 @@ missing_hash(const void *key, Size keysize)
{
const missing_cache_key *entry = (missing_cache_key *) key;
- return hash_bytes((const unsigned char *) entry->value, entry->len);
+ return hash_bytes((const unsigned char *) DatumGetPointer(entry->value), entry->len);
}
static int
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 594a657ea1a..0af3fea68fa 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1164,7 +1164,7 @@ add_local_string_reloption(local_relopts *relopts, const char *name,
* but we declare them as Datums to avoid including array.h in reloptions.h.
*/
Datum
-transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
+transformRelOptions(Datum oldOptions, List *defList, const char *nameSpace,
const char *const validnsps[], bool acceptOidsOff, bool isReset)
{
Datum result;
@@ -1200,14 +1200,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
int kw_len;
/* ignore if not in the same namespace */
- if (namspace == NULL)
+ if (nameSpace == NULL)
{
if (def->defnamespace != NULL)
continue;
}
else if (def->defnamespace == NULL)
continue;
- else if (strcmp(def->defnamespace, namspace) != 0)
+ else if (strcmp(def->defnamespace, nameSpace) != 0)
continue;
kw_len = strlen(def->defname);
@@ -1277,14 +1277,14 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
}
/* ignore if not in the same namespace */
- if (namspace == NULL)
+ if (nameSpace == NULL)
{
if (def->defnamespace != NULL)
continue;
}
else if (def->defnamespace == NULL)
continue;
- else if (strcmp(def->defnamespace, namspace) != 0)
+ else if (strcmp(def->defnamespace, nameSpace) != 0)
continue;
/*
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c
index 196e06115e9..a1d0eed8953 100644
--- a/src/backend/access/common/toast_internals.c
+++ b/src/backend/access/common/toast_internals.c
@@ -64,11 +64,11 @@ toast_compress_datum(Datum value, char cmethod)
switch (cmethod)
{
case TOAST_PGLZ_COMPRESSION:
- tmp = pglz_compress_datum((const struct varlena *) value);
+ tmp = pglz_compress_datum((const struct varlena *) DatumGetPointer(value));
cmid = TOAST_PGLZ_COMPRESSION_ID;
break;
case TOAST_LZ4_COMPRESSION:
- tmp = lz4_compress_datum((const struct varlena *) value);
+ tmp = lz4_compress_datum((const struct varlena *) DatumGetPointer(value));
cmid = TOAST_LZ4_COMPRESSION_ID;
break;
default:
diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c
index 18b0b5a52d3..eb45d3bcb66 100644
--- a/src/backend/backup/basebackup_copy.c
+++ b/src/backend/backup/basebackup_copy.c
@@ -143,7 +143,7 @@ bbsink_copystream_begin_backup(bbsink *sink)
buf = palloc(mysink->base.bbs_buffer_length + MAXIMUM_ALIGNOF);
mysink->msgbuffer = buf + (MAXIMUM_ALIGNOF - 1);
mysink->base.bbs_buffer = buf + MAXIMUM_ALIGNOF;
- mysink->msgbuffer[0] = 'd'; /* archive or manifest data */
+ mysink->msgbuffer[0] = PqMsg_CopyData; /* archive or manifest data */
/* Tell client the backup start location. */
SendXlogRecPtrResult(state->startptr, state->starttli);
@@ -170,7 +170,7 @@ bbsink_copystream_begin_archive(bbsink *sink, const char *archive_name)
ti = list_nth(state->tablespaces, state->tablespace_num);
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'n'); /* New archive */
+ pq_sendbyte(&buf, PqBackupMsg_NewArchive);
pq_sendstring(&buf, archive_name);
pq_sendstring(&buf, ti->path == NULL ? "" : ti->path);
pq_endmessage(&buf);
@@ -191,7 +191,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len)
if (mysink->send_to_client)
{
/* Add one because we're also sending a leading type byte. */
- pq_putmessage('d', mysink->msgbuffer, len + 1);
+ pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1);
}
/* Consider whether to send a progress report to the client. */
@@ -221,7 +221,7 @@ bbsink_copystream_archive_contents(bbsink *sink, size_t len)
mysink->last_progress_report_time = now;
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'p'); /* Progress report */
+ pq_sendbyte(&buf, PqBackupMsg_ProgressReport);
pq_sendint64(&buf, state->bytes_done);
pq_endmessage(&buf);
pq_flush_if_writable();
@@ -247,7 +247,7 @@ bbsink_copystream_end_archive(bbsink *sink)
mysink->bytes_done_at_last_time_check = state->bytes_done;
mysink->last_progress_report_time = GetCurrentTimestamp();
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'p'); /* Progress report */
+ pq_sendbyte(&buf, PqBackupMsg_ProgressReport);
pq_sendint64(&buf, state->bytes_done);
pq_endmessage(&buf);
pq_flush_if_writable();
@@ -262,7 +262,7 @@ bbsink_copystream_begin_manifest(bbsink *sink)
StringInfoData buf;
pq_beginmessage(&buf, PqMsg_CopyData);
- pq_sendbyte(&buf, 'm'); /* Manifest */
+ pq_sendbyte(&buf, PqBackupMsg_Manifest);
pq_endmessage(&buf);
}
@@ -277,7 +277,7 @@ bbsink_copystream_manifest_contents(bbsink *sink, size_t len)
if (mysink->send_to_client)
{
/* Add one because we're also sending a leading type byte. */
- pq_putmessage('d', mysink->msgbuffer, len + 1);
+ pq_putmessage(PqMsg_CopyData, mysink->msgbuffer, len + 1);
}
}
diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c
index b63fd57dc04..0102c9984e7 100644
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -4283,8 +4283,8 @@ pg_identify_object(PG_FUNCTION_ARGS)
nspAttnum = get_object_attnum_namespace(address.classId);
if (nspAttnum != InvalidAttrNumber)
{
- schema_oid = heap_getattr(objtup, nspAttnum,
- RelationGetDescr(catalog), &isnull);
+ schema_oid = DatumGetObjectId(heap_getattr(objtup, nspAttnum,
+ RelationGetDescr(catalog), &isnull));
if (isnull)
elog(ERROR, "invalid null namespace in object %u/%u/%d",
address.classId, address.objectId, address.objectSubId);
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index a05f8a87c1f..c62e8acd413 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -654,7 +654,7 @@ AggregateCreate(const char *aggName,
for (i = 0; i < Natts_pg_aggregate; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
replaces[i] = true;
}
values[Anum_pg_aggregate_aggfnoid - 1] = ObjectIdGetDatum(procOid);
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 2d5ac1ea813..6002fd0002f 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -179,7 +179,7 @@ CreateConstraintEntry(const char *constraintName,
for (i = 0; i < Natts_pg_constraint; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
conOid = GetNewOidWithIndex(conDesc, ConstraintOidIndexId,
diff --git a/src/backend/catalog/pg_conversion.c b/src/backend/catalog/pg_conversion.c
index 04cc375caea..090f680d190 100644
--- a/src/backend/catalog/pg_conversion.c
+++ b/src/backend/catalog/pg_conversion.c
@@ -87,7 +87,7 @@ ConversionCreate(const char *conname, Oid connamespace,
for (i = 0; i < Natts_pg_conversion; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
/* form a tuple */
diff --git a/src/backend/catalog/pg_namespace.c b/src/backend/catalog/pg_namespace.c
index 6f5634a4de6..616bcc78521 100644
--- a/src/backend/catalog/pg_namespace.c
+++ b/src/backend/catalog/pg_namespace.c
@@ -76,7 +76,7 @@ NamespaceCreate(const char *nspName, Oid ownerId, bool isTemp)
for (i = 0; i < Natts_pg_namespace; i++)
{
nulls[i] = false;
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
}
nspoid = GetNewOidWithIndex(nspdesc, NamespaceOidIndexId,
diff --git a/src/backend/catalog/pg_operator.c b/src/backend/catalog/pg_operator.c
index bfcfa643464..44d2ccb6788 100644
--- a/src/backend/catalog/pg_operator.c
+++ b/src/backend/catalog/pg_operator.c
@@ -225,7 +225,7 @@ OperatorShellMake(const char *operatorName,
for (i = 0; i < Natts_pg_operator; ++i)
{
nulls[i] = false;
- values[i] = (Datum) NULL; /* redundant, but safe */
+ values[i] = (Datum) 0; /* redundant, but safe */
}
/*
@@ -453,7 +453,7 @@ OperatorCreate(const char *operatorName,
for (i = 0; i < Natts_pg_operator; ++i)
{
- values[i] = (Datum) NULL;
+ values[i] = (Datum) 0;
replaces[i] = true;
nulls[i] = false;
}
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index 5fdcf24d5f8..75b17fed15e 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -1212,6 +1212,6 @@ oid_array_to_list(Datum datum)
deconstruct_array_builtin(array, OIDOID, &values, NULL, &nelems);
for (i = 0; i < nelems; i++)
- result = lappend_oid(result, values[i]);
+ result = lappend_oid(result, DatumGetObjectId(values[i]));
return result;
}
diff --git a/src/backend/catalog/pg_publication.c b/src/backend/catalog/pg_publication.c
index d6f94db5d99..b911efcf9cb 100644
--- a/src/backend/catalog/pg_publication.c
+++ b/src/backend/catalog/pg_publication.c
@@ -1001,7 +1001,7 @@ GetSchemaPublicationRelations(Oid schemaid, PublicationPartOpt pub_partopt)
ScanKeyInit(&key[0],
Anum_pg_class_relnamespace,
BTEqualStrategyNumber, F_OIDEQ,
- schemaid);
+ ObjectIdGetDatum(schemaid));
/* get all the relations present in the specified schema */
scan = table_beginscan_catalog(classRel, 1, key);
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 536191284e8..32e544da28a 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -956,12 +956,12 @@ copyTemplateDependencies(Oid templateDbId, Oid newDbId)
shdep = (Form_pg_shdepend) GETSTRUCT(tup);
slot[slot_stored_count]->tts_values[Anum_pg_shdepend_dbid - 1] = ObjectIdGetDatum(newDbId);
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = shdep->classid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = shdep->objid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = shdep->objsubid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = shdep->refclassid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = shdep->refobjid;
- slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = shdep->deptype;
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_classid - 1] = ObjectIdGetDatum(shdep->classid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objid - 1] = ObjectIdGetDatum(shdep->objid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_objsubid - 1] = Int32GetDatum(shdep->objsubid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refclassid - 1] = ObjectIdGetDatum(shdep->refclassid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_refobjid - 1] = ObjectIdGetDatum(shdep->refobjid);
+ slot[slot_stored_count]->tts_values[Anum_pg_shdepend_deptype - 1] = CharGetDatum(shdep->deptype);
ExecStoreVirtualTuple(slot[slot_stored_count]);
slot_stored_count++;
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index b36f81afb9d..1ec523ee3e5 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -80,7 +80,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
for (i = 0; i < Natts_pg_type; ++i)
{
nulls[i] = false;
- values[i] = (Datum) NULL; /* redundant, but safe */
+ values[i] = (Datum) 0; /* redundant, but safe */
}
/*
diff --git a/src/backend/commands/alter.c b/src/backend/commands/alter.c
index c801c869c1c..cb75e11fced 100644
--- a/src/backend/commands/alter.c
+++ b/src/backend/commands/alter.c
@@ -220,7 +220,7 @@ AlterObjectRename_internal(Relation rel, Oid objectId, const char *new_name)
Assert(!isnull);
ownerId = DatumGetObjectId(datum);
- if (!has_privs_of_role(GetUserId(), DatumGetObjectId(ownerId)))
+ if (!has_privs_of_role(GetUserId(), ownerId))
aclcheck_error(ACLCHECK_NOT_OWNER, get_object_type(classId, objectId),
old_name);
diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c
index edc2c988e29..631fb0525f1 100644
--- a/src/backend/commands/event_trigger.c
+++ b/src/backend/commands/event_trigger.c
@@ -2021,8 +2021,8 @@ pg_event_trigger_ddl_commands(PG_FUNCTION_ARGS)
elog(ERROR, "cache lookup failed for object %u/%u",
addr.classId, addr.objectId);
schema_oid =
- heap_getattr(objtup, nspAttnum,
- RelationGetDescr(catalog), &isnull);
+ DatumGetObjectId(heap_getattr(objtup, nspAttnum,
+ RelationGetDescr(catalog), &isnull));
if (isnull)
elog(ERROR,
"invalid null namespace in object %u/%u/%d",
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index cd6c3684482..faa3650d287 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -638,7 +638,7 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
/* Check if name is used */
subid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(stmt->subname));
+ ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(stmt->subname));
if (OidIsValid(subid))
{
ereport(ERROR,
@@ -1185,7 +1185,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
rel = table_open(SubscriptionRelationId, RowExclusiveLock);
/* Fetch the existing tuple. */
- tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(stmt->subname));
if (!HeapTupleIsValid(tup))
@@ -1808,7 +1808,7 @@ DropSubscription(DropSubscriptionStmt *stmt, bool isTopLevel)
*/
rel = table_open(SubscriptionRelationId, AccessExclusiveLock);
- tup = SearchSysCache2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCache2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(stmt->subname));
if (!HeapTupleIsValid(tup))
@@ -2193,7 +2193,7 @@ AlterSubscriptionOwner(const char *name, Oid newOwnerId)
rel = table_open(SubscriptionRelationId, RowExclusiveLock);
- tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, MyDatabaseId,
+ tup = SearchSysCacheCopy2(SUBSCRIPTIONNAME, ObjectIdGetDatum(MyDatabaseId),
CStringGetDatum(name));
if (!HeapTupleIsValid(tup))
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index cb811520c29..c6dd2e020da 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8985,7 +8985,7 @@ ATExecSetStatistics(Relation rel, const char *colName, int16 colNum, Node *newVa
memset(repl_null, false, sizeof(repl_null));
memset(repl_repl, false, sizeof(repl_repl));
if (!newtarget_default)
- repl_val[Anum_pg_attribute_attstattarget - 1] = newtarget;
+ repl_val[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(newtarget);
else
repl_null[Anum_pg_attribute_attstattarget - 1] = true;
repl_repl[Anum_pg_attribute_attstattarget - 1] = true;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 7dc121f73f1..235533ac17f 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -872,7 +872,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char *queryString,
CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
- values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
+ values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(trigger_fires_when);
values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
values[Anum_pg_trigger_tgconstrrelid - 1] = ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
@@ -2285,6 +2285,8 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
{
Trigger *trigger = &trigdesc->triggers[i];
+ if (!TRIGGER_FOR_ROW(trigger->tgtype))
+ continue;
if (trigger->tgoldtable != NULL || trigger->tgnewtable != NULL)
return trigger->tgname;
}
@@ -2545,6 +2547,15 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo *relinfo,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ transition_capture->tcs_insert_new_table)
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_insert_after_row) ||
(transition_capture && transition_capture->tcs_insert_new_table))
AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
@@ -2797,6 +2808,15 @@ ExecARDeleteTriggers(EState *estate,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ transition_capture->tcs_delete_old_table)
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_delete_after_row) ||
(transition_capture && transition_capture->tcs_delete_old_table))
{
@@ -3134,6 +3154,16 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
+ if (relinfo->ri_FdwRoutine && transition_capture &&
+ (transition_capture->tcs_update_old_table ||
+ transition_capture->tcs_update_new_table))
+ {
+ Assert(relinfo->ri_RootResultRelInfo);
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot collect transition tuples from child foreign tables")));
+ }
+
if ((trigdesc && trigdesc->trig_update_after_row) ||
(transition_capture &&
(transition_capture->tcs_update_old_table ||
diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c
index ab16d42ad56..dc7df736fb8 100644
--- a/src/backend/commands/tsearchcmds.c
+++ b/src/backend/commands/tsearchcmds.c
@@ -1058,10 +1058,10 @@ DefineTSConfiguration(List *names, List *parameters, ObjectAddress *copied)
memset(slot[slot_stored_count]->tts_isnull, false,
slot[slot_stored_count]->tts_tupleDescriptor->natts * sizeof(bool));
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = cfgOid;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = cfgmap->maptokentype;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = cfgmap->mapseqno;
- slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = cfgmap->mapdict;
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapcfg - 1] = ObjectIdGetDatum(cfgOid);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_maptokentype - 1] = Int32GetDatum(cfgmap->maptokentype);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapseqno - 1] = Int32GetDatum(cfgmap->mapseqno);
+ slot[slot_stored_count]->tts_values[Anum_pg_ts_config_map_mapdict - 1] = ObjectIdGetDatum(cfgmap->mapdict);
ExecStoreVirtualTuple(slot[slot_stored_count]);
slot_stored_count++;
diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c
index 0d638e29d00..1e3d4ab0e20 100644
--- a/src/backend/commands/user.c
+++ b/src/backend/commands/user.c
@@ -1924,7 +1924,7 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
*/
if ((popt->specified & GRANT_ROLE_SPECIFIED_INHERIT) != 0)
new_record[Anum_pg_auth_members_inherit_option - 1] =
- popt->inherit;
+ BoolGetDatum(popt->inherit);
else
{
HeapTuple mrtup;
@@ -1935,14 +1935,14 @@ AddRoleMems(Oid currentUserId, const char *rolename, Oid roleid,
elog(ERROR, "cache lookup failed for role %u", memberid);
mrform = (Form_pg_authid) GETSTRUCT(mrtup);
new_record[Anum_pg_auth_members_inherit_option - 1] =
- mrform->rolinherit;
+ BoolGetDatum(mrform->rolinherit);
ReleaseSysCache(mrtup);
}
/* get an OID for the new row and insert it */
objectId = GetNewOidWithIndex(pg_authmem_rel, AuthMemOidIndexId,
Anum_pg_auth_members_oid);
- new_record[Anum_pg_auth_members_oid - 1] = objectId;
+ new_record[Anum_pg_auth_members_oid - 1] = ObjectIdGetDatum(objectId);
tuple = heap_form_tuple(pg_authmem_dsc,
new_record, new_record_nulls);
CatalogTupleInsert(pg_authmem_rel, tuple);
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 1a37737d4a2..0e1a74976f7 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -2815,7 +2815,7 @@ ExecJustHashVarImpl(ExprState *state, TupleTableSlot *slot, bool *isnull)
*isnull = false;
if (!fcinfo->args[0].isnull)
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
else
return (Datum) 0;
}
@@ -2849,7 +2849,7 @@ ExecJustHashVarVirtImpl(ExprState *state, TupleTableSlot *slot, bool *isnull)
*isnull = false;
if (!fcinfo->args[0].isnull)
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
else
return (Datum) 0;
}
@@ -2892,7 +2892,7 @@ ExecJustHashOuterVarStrict(ExprState *state, ExprContext *econtext,
if (!fcinfo->args[0].isnull)
{
*isnull = false;
- return DatumGetUInt32(hashop->d.hashdatum.fn_addr(fcinfo));
+ return hashop->d.hashdatum.fn_addr(fcinfo);
}
else
{
@@ -4393,7 +4393,7 @@ ExecEvalHashedScalarArrayOp(ExprState *state, ExprEvalStep *op, ExprContext *eco
* is the equality function and we need not-equals.
*/
if (!inclause)
- result = !result;
+ result = BoolGetDatum(!DatumGetBool(result));
}
}
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index ecb2e4ccaa1..50fcd023776 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -1258,7 +1258,7 @@ SPI_getbinval(HeapTuple tuple, TupleDesc tupdesc, int fnumber, bool *isnull)
{
SPI_result = SPI_ERROR_NOATTRIBUTE;
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
return heap_getattr(tuple, fnumber, tupdesc, isnull);
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 48b5d13b9b6..2f933e95cb9 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -630,7 +630,7 @@ readDatum(bool typbyval)
}
}
else if (length <= 0)
- res = (Datum) NULL;
+ res = (Datum) 0;
else
{
s = (char *) palloc(length);
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index bfefc7dbea1..9fd5c31edf2 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -7233,6 +7233,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
ModifyTable *node = makeNode(ModifyTable);
bool returning_old_or_new = false;
bool returning_old_or_new_valid = false;
+ bool transition_tables = false;
+ bool transition_tables_valid = false;
List *fdw_private_list;
Bitmapset *direct_modify_plans;
ListCell *lc;
@@ -7379,8 +7381,8 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
* callback functions needed for that and (2) there are no local
* structures that need to be run for each modified row: row-level
* triggers on the foreign table, stored generated columns, WITH CHECK
- * OPTIONs from parent views, or Vars returning OLD/NEW in the
- * RETURNING list.
+ * OPTIONs from parent views, Vars returning OLD/NEW in the RETURNING
+ * list, or transition tables on the named relation.
*/
direct_modify = false;
if (fdwroutine != NULL &&
@@ -7392,7 +7394,10 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
!has_row_triggers(root, rti, operation) &&
!has_stored_generated_columns(root, rti))
{
- /* returning_old_or_new is the same for all result relations */
+ /*
+ * returning_old_or_new and transition_tables are the same for all
+ * result relations, respectively
+ */
if (!returning_old_or_new_valid)
{
returning_old_or_new =
@@ -7401,7 +7406,18 @@ make_modifytable(PlannerInfo *root, Plan *subplan,
returning_old_or_new_valid = true;
}
if (!returning_old_or_new)
- direct_modify = fdwroutine->PlanDirectModify(root, node, rti, i);
+ {
+ if (!transition_tables_valid)
+ {
+ transition_tables = has_transition_tables(root,
+ nominalRelation,
+ operation);
+ transition_tables_valid = true;
+ }
+ if (!transition_tables)
+ direct_modify = fdwroutine->PlanDirectModify(root, node,
+ rti, i);
+ }
}
if (direct_modify)
direct_modify_plans = bms_add_member(direct_modify_plans, i);
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index c6a58afc5e5..6ce4efea118 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -2389,6 +2389,60 @@ has_row_triggers(PlannerInfo *root, Index rti, CmdType event)
}
/*
+ * has_transition_tables
+ *
+ * Detect whether the specified relation has any transition tables for event.
+ */
+bool
+has_transition_tables(PlannerInfo *root, Index rti, CmdType event)
+{
+ RangeTblEntry *rte = planner_rt_fetch(rti, root);
+ Relation relation;
+ TriggerDesc *trigDesc;
+ bool result = false;
+
+ Assert(rte->rtekind == RTE_RELATION);
+
+ /* Currently foreign tables cannot have transition tables */
+ if (rte->relkind == RELKIND_FOREIGN_TABLE)
+ return result;
+
+ /* Assume we already have adequate lock */
+ relation = table_open(rte->relid, NoLock);
+
+ trigDesc = relation->trigdesc;
+ switch (event)
+ {
+ case CMD_INSERT:
+ if (trigDesc &&
+ trigDesc->trig_insert_new_table)
+ result = true;
+ break;
+ case CMD_UPDATE:
+ if (trigDesc &&
+ (trigDesc->trig_update_old_table ||
+ trigDesc->trig_update_new_table))
+ result = true;
+ break;
+ case CMD_DELETE:
+ if (trigDesc &&
+ trigDesc->trig_delete_old_table)
+ result = true;
+ break;
+ /* There is no separate event for MERGE, only INSERT/UPDATE/DELETE */
+ case CMD_MERGE:
+ result = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized CmdType: %d", (int) event);
+ break;
+ }
+
+ table_close(relation, NoLock);
+ return result;
+}
+
+/*
* has_stored_generated_columns
*
* Does table identified by RTI have any STORED GENERATED columns?
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 8490148a47d..e84e8663e96 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -953,11 +953,14 @@ CheckpointerShmemSize(void)
Size size;
/*
- * Currently, the size of the requests[] array is arbitrarily set equal to
- * NBuffers. This may prove too large or small ...
+ * The size of the requests[] array is arbitrarily set equal to NBuffers.
+ * But there is a cap of MAX_CHECKPOINT_REQUESTS to prevent accumulating
+ * too many checkpoint requests in the ring buffer.
*/
size = offsetof(CheckpointerShmemStruct, requests);
- size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
+ size = add_size(size, mul_size(Min(NBuffers,
+ MAX_CHECKPOINT_REQUESTS),
+ sizeof(CheckpointerRequest)));
return size;
}
diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index 1fa931a7422..cd0e19176fd 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -778,10 +778,10 @@ LogicalParallelApplyLoop(shm_mq_handle *mqh)
/*
* The first byte of messages sent from leader apply worker to
- * parallel apply workers can only be 'w'.
+ * parallel apply workers can only be PqReplMsg_WALData.
*/
c = pq_getmsgbyte(&s);
- if (c != 'w')
+ if (c != PqReplMsg_WALData)
elog(ERROR, "unexpected message \"%c\"", c);
/*
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 89e241c8392..0fdc5de57ba 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -3994,7 +3994,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
c = pq_getmsgbyte(&s);
- if (c == 'w')
+ if (c == PqReplMsg_WALData)
{
XLogRecPtr start_lsn;
XLogRecPtr end_lsn;
@@ -4016,7 +4016,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
maybe_advance_nonremovable_xid(&rdt_data, false);
}
- else if (c == 'k')
+ else if (c == PqReplMsg_Keepalive)
{
XLogRecPtr end_lsn;
TimestampTz timestamp;
@@ -4035,7 +4035,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
UpdateWorkerStats(last_received, timestamp, true);
}
- else if (c == 's') /* Primary status update */
+ else if (c == PqReplMsg_PrimaryStatusUpdate)
{
rdt_data.remote_lsn = pq_getmsgint64(&s);
rdt_data.remote_oldestxid = FullTransactionIdFromU64((uint64) pq_getmsgint64(&s));
@@ -4267,7 +4267,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
else
resetStringInfo(reply_message);
- pq_sendbyte(reply_message, 'r');
+ pq_sendbyte(reply_message, PqReplMsg_StandbyStatusUpdate);
pq_sendint64(reply_message, recvpos); /* write */
pq_sendint64(reply_message, flushpos); /* flush */
pq_sendint64(reply_message, writepos); /* apply */
@@ -4438,7 +4438,7 @@ request_publisher_status(RetainDeadTuplesData *rdt_data)
* Send the current time to update the remote walsender's latest reply
* message received time.
*/
- pq_sendbyte(request_message, 'p');
+ pq_sendbyte(request_message, PqReplMsg_PrimaryStatusRequest);
pq_sendint64(request_message, GetCurrentTimestamp());
elog(DEBUG2, "sending publisher status request message");
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index b6281101711..7361ffc9dcf 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -826,7 +826,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli)
switch (type)
{
- case 'w': /* WAL records */
+ case PqReplMsg_WALData:
{
StringInfoData incoming_message;
@@ -850,7 +850,7 @@ XLogWalRcvProcessMsg(unsigned char type, char *buf, Size len, TimeLineID tli)
XLogWalRcvWrite(buf, len, dataStart, tli);
break;
}
- case 'k': /* Keepalive */
+ case PqReplMsg_Keepalive:
{
StringInfoData incoming_message;
@@ -1130,7 +1130,7 @@ XLogWalRcvSendReply(bool force, bool requestReply)
applyPtr = GetXLogReplayRecPtr(NULL);
resetStringInfo(&reply_message);
- pq_sendbyte(&reply_message, 'r');
+ pq_sendbyte(&reply_message, PqReplMsg_StandbyStatusUpdate);
pq_sendint64(&reply_message, writePtr);
pq_sendint64(&reply_message, flushPtr);
pq_sendint64(&reply_message, applyPtr);
@@ -1234,7 +1234,7 @@ XLogWalRcvSendHSFeedback(bool immed)
/* Construct the message and send it. */
resetStringInfo(&reply_message);
- pq_sendbyte(&reply_message, 'h');
+ pq_sendbyte(&reply_message, PqReplMsg_HotStandbyFeedback);
pq_sendint64(&reply_message, GetCurrentTimestamp());
pq_sendint32(&reply_message, xmin);
pq_sendint32(&reply_message, xmin_epoch);
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index ee911394a23..0855bae3535 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1534,7 +1534,7 @@ WalSndPrepareWrite(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xi
resetStringInfo(ctx->out);
- pq_sendbyte(ctx->out, 'w');
+ pq_sendbyte(ctx->out, PqReplMsg_WALData);
pq_sendint64(ctx->out, lsn); /* dataStart */
pq_sendint64(ctx->out, lsn); /* walEnd */
@@ -2292,7 +2292,8 @@ ProcessRepliesIfAny(void)
switch (firstchar)
{
/*
- * 'd' means a standby reply wrapped in a CopyData packet.
+ * PqMsg_CopyData means a standby reply wrapped in a CopyData
+ * packet.
*/
case PqMsg_CopyData:
ProcessStandbyMessage();
@@ -2300,8 +2301,9 @@ ProcessRepliesIfAny(void)
break;
/*
- * CopyDone means the standby requested to finish streaming.
- * Reply with CopyDone, if we had not sent that already.
+ * PqMsg_CopyDone means the standby requested to finish
+ * streaming. Reply with CopyDone, if we had not sent that
+ * already.
*/
case PqMsg_CopyDone:
if (!streamingDoneSending)
@@ -2315,7 +2317,8 @@ ProcessRepliesIfAny(void)
break;
/*
- * 'X' means that the standby is closing down the socket.
+ * PqMsg_Terminate means that the standby is closing down the
+ * socket.
*/
case PqMsg_Terminate:
proc_exit(0);
@@ -2350,15 +2353,15 @@ ProcessStandbyMessage(void)
switch (msgtype)
{
- case 'r':
+ case PqReplMsg_StandbyStatusUpdate:
ProcessStandbyReplyMessage();
break;
- case 'h':
+ case PqReplMsg_HotStandbyFeedback:
ProcessStandbyHSFeedbackMessage();
break;
- case 'p':
+ case PqReplMsg_PrimaryStatusRequest:
ProcessStandbyPSRequestMessage();
break;
@@ -2752,7 +2755,7 @@ ProcessStandbyPSRequestMessage(void)
/* construct the message... */
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 's');
+ pq_sendbyte(&output_message, PqReplMsg_PrimaryStatusUpdate);
pq_sendint64(&output_message, lsn);
pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
@@ -3364,7 +3367,7 @@ XLogSendPhysical(void)
* OK to read and send the slice.
*/
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 'w');
+ pq_sendbyte(&output_message, PqReplMsg_WALData);
pq_sendint64(&output_message, startptr); /* dataStart */
pq_sendint64(&output_message, SendRqstPtr); /* walEnd */
@@ -4135,7 +4138,7 @@ WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
/* construct the message... */
resetStringInfo(&output_message);
- pq_sendbyte(&output_message, 'k');
+ pq_sendbyte(&output_message, PqReplMsg_Keepalive);
pq_sendint64(&output_message, XLogRecPtrIsInvalid(writePtr) ? sentPtr : writePtr);
pq_sendint64(&output_message, GetCurrentTimestamp());
pq_sendbyte(&output_message, requestReply ? 1 : 0);
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 8aa90b0d6fb..a96fbdc1ddd 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -725,10 +725,9 @@ EnableDisableRule(Relation rel, const char *rulename,
/*
* Change ev_enabled if it is different from the desired new state.
*/
- if (DatumGetChar(ruleform->ev_enabled) !=
- fires_when)
+ if (ruleform->ev_enabled != fires_when)
{
- ruleform->ev_enabled = CharGetDatum(fires_when);
+ ruleform->ev_enabled = fires_when;
CatalogTupleUpdate(pg_rewrite_desc, &ruletup->t_self, ruletup);
changed = true;
diff --git a/src/backend/statistics/attribute_stats.c b/src/backend/statistics/attribute_stats.c
index ab198076401..e8241926d2c 100644
--- a/src/backend/statistics/attribute_stats.c
+++ b/src/backend/statistics/attribute_stats.c
@@ -339,7 +339,7 @@ attribute_statistics_update(FunctionCallInfo fcinfo)
starel = table_open(StatisticRelationId, RowExclusiveLock);
- statup = SearchSysCache3(STATRELATTINH, reloid, attnum, inherited);
+ statup = SearchSysCache3(STATRELATTINH, ObjectIdGetDatum(reloid), Int16GetDatum(attnum), BoolGetDatum(inherited));
/* initialize from existing tuple if exists */
if (HeapTupleIsValid(statup))
@@ -895,9 +895,9 @@ init_empty_stats_tuple(Oid reloid, int16 attnum, bool inherited,
{
values[Anum_pg_statistic_stakind1 + slotnum - 1] = (Datum) 0;
nulls[Anum_pg_statistic_stakind1 + slotnum - 1] = false;
- values[Anum_pg_statistic_staop1 + slotnum - 1] = InvalidOid;
+ values[Anum_pg_statistic_staop1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid);
nulls[Anum_pg_statistic_staop1 + slotnum - 1] = false;
- values[Anum_pg_statistic_stacoll1 + slotnum - 1] = InvalidOid;
+ values[Anum_pg_statistic_stacoll1 + slotnum - 1] = ObjectIdGetDatum(InvalidOid);
nulls[Anum_pg_statistic_stacoll1 + slotnum - 1] = false;
}
}
diff --git a/src/backend/statistics/extended_stats.c b/src/backend/statistics/extended_stats.c
index a8b63ec0884..3e031cf831a 100644
--- a/src/backend/statistics/extended_stats.c
+++ b/src/backend/statistics/extended_stats.c
@@ -2618,7 +2618,7 @@ make_build_data(Relation rel, StatExtEntry *stat, int numrows, HeapTuple *rows,
}
else
{
- result->values[idx][i] = (Datum) datum;
+ result->values[idx][i] = datum;
result->nulls[idx][i] = false;
}
diff --git a/src/backend/storage/aio/aio_funcs.c b/src/backend/storage/aio/aio_funcs.c
index 584e683371a..905ea129c81 100644
--- a/src/backend/storage/aio/aio_funcs.c
+++ b/src/backend/storage/aio/aio_funcs.c
@@ -152,7 +152,7 @@ retry:
nulls[0] = false;
/* column: IO's id */
- values[1] = ioh_id;
+ values[1] = UInt32GetDatum(ioh_id);
/* column: IO's generation */
values[2] = Int64GetDatum(start_generation);
diff --git a/src/backend/storage/aio/read_stream.c b/src/backend/storage/aio/read_stream.c
index 0e7f5557f5c..031fde9f4cb 100644
--- a/src/backend/storage/aio/read_stream.c
+++ b/src/backend/storage/aio/read_stream.c
@@ -247,12 +247,33 @@ read_stream_start_pending_read(ReadStream *stream)
Assert(stream->pinned_buffers + stream->pending_read_nblocks <=
stream->max_pinned_buffers);
+#ifdef USE_ASSERT_CHECKING
/* We had better not be overwriting an existing pinned buffer. */
if (stream->pinned_buffers > 0)
Assert(stream->next_buffer_index != stream->oldest_buffer_index);
else
Assert(stream->next_buffer_index == stream->oldest_buffer_index);
+ /*
+ * Pinned buffers forwarded by a preceding StartReadBuffers() call that
+ * had to split the operation should match the leading blocks of this
+ * following StartReadBuffers() call.
+ */
+ Assert(stream->forwarded_buffers <= stream->pending_read_nblocks);
+ for (int i = 0; i < stream->forwarded_buffers; ++i)
+ Assert(BufferGetBlockNumber(stream->buffers[stream->next_buffer_index + i]) ==
+ stream->pending_read_blocknum + i);
+
+ /*
+ * Check that we've cleared the queue/overflow entries corresponding to
+ * the rest of the blocks covered by this read, unless it's the first go
+ * around and we haven't even initialized them yet.
+ */
+ for (int i = stream->forwarded_buffers; i < stream->pending_read_nblocks; ++i)
+ Assert(stream->next_buffer_index + i >= stream->initialized_buffers ||
+ stream->buffers[stream->next_buffer_index + i] == InvalidBuffer);
+#endif
+
/* Do we need to issue read-ahead advice? */
flags = stream->read_buffers_flags;
if (stream->advice_enabled)
@@ -979,6 +1000,19 @@ read_stream_next_buffer(ReadStream *stream, void **per_buffer_data)
stream->pending_read_nblocks == 0 &&
stream->per_buffer_data_size == 0)
{
+ /*
+ * The fast path spins on one buffer entry repeatedly instead of
+ * rotating through the whole queue and clearing the entries behind
+ * it. If the buffer it starts with happened to be forwarded between
+ * StartReadBuffers() calls and also wrapped around the circular queue
+ * partway through, then a copy also exists in the overflow zone, and
+ * it won't clear it out as the regular path would. Do that now, so
+ * it doesn't need code for that.
+ */
+ if (stream->oldest_buffer_index < stream->io_combine_limit - 1)
+ stream->buffers[stream->queue_size + stream->oldest_buffer_index] =
+ InvalidBuffer;
+
stream->fast_path = true;
}
#endif
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 67431208e7f..fd7e21d96d3 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1484,11 +1484,6 @@ StartReadBuffersImpl(ReadBuffersOperation *operation,
* buffers must remain valid until WaitReadBuffers() is called, and any
* forwarded buffers must also be preserved for a continuing call unless
* they are explicitly released.
- *
- * Currently the I/O is only started with optional operating system advice if
- * requested by the caller with READ_BUFFERS_ISSUE_ADVICE, and the real I/O
- * happens synchronously in WaitReadBuffers(). In future work, true I/O could
- * be initiated here.
*/
bool
StartReadBuffers(ReadBuffersOperation *operation,
@@ -6370,8 +6365,8 @@ ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b)
static int
ts_ckpt_progress_comparator(Datum a, Datum b, void *arg)
{
- CkptTsStatus *sa = (CkptTsStatus *) a;
- CkptTsStatus *sb = (CkptTsStatus *) b;
+ CkptTsStatus *sa = (CkptTsStatus *) DatumGetPointer(a);
+ CkptTsStatus *sb = (CkptTsStatus *) DatumGetPointer(b);
/* we want a min-heap, so return 1 for the a < b */
if (sa->progress < sb->progress)
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index ca3656fc76f..d12a3ca0684 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -714,7 +714,7 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
for (i = 0; i <= max_nodes; i++)
{
values[0] = CStringGetTextDatum(ent->key);
- values[1] = i;
+ values[1] = Int32GetDatum(i);
values[2] = Int64GetDatum(nodes[i] * os_page_size);
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
diff --git a/src/backend/storage/large_object/inv_api.c b/src/backend/storage/large_object/inv_api.c
index 68b76f2cc18..a874000c8ca 100644
--- a/src/backend/storage/large_object/inv_api.c
+++ b/src/backend/storage/large_object/inv_api.c
@@ -561,7 +561,7 @@ inv_write(LargeObjectDesc *obj_desc, const char *buf, int nbytes)
char data[LOBLKSIZE + VARHDRSZ];
/* ensure union is aligned well enough: */
int32 align_it;
- } workbuf;
+ } workbuf = {0};
char *workb = VARDATA(&workbuf.hdr);
HeapTuple newtup;
Datum values[Natts_pg_largeobject];
@@ -752,7 +752,7 @@ inv_truncate(LargeObjectDesc *obj_desc, int64 len)
char data[LOBLKSIZE + VARHDRSZ];
/* ensure union is aligned well enough: */
int32 align_it;
- } workbuf;
+ } workbuf = {0};
char *workb = VARDATA(&workbuf.hdr);
HeapTuple newtup;
Datum values[Natts_pg_largeobject];
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 62f3471448e..f8c88147160 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -589,7 +589,7 @@ proclock_hash(const void *key, Size keysize)
* intermediate variable to suppress cast-pointer-to-int warnings.
*/
procptr = PointerGetDatum(proclocktag->myProc);
- lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
+ lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
return lockhash;
}
@@ -610,7 +610,7 @@ ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
* This must match proclock_hash()!
*/
procptr = PointerGetDatum(proclocktag->myProc);
- lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
+ lockhash ^= DatumGetUInt32(procptr) << LOG2_NUM_LOCK_PARTITIONS;
return lockhash;
}
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
index e5da6cf17ec..cba421892bf 100644
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -218,7 +218,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
* position and go to multiword mode
*/
- ld->curDictId = DatumGetObjectId(map->dictIds[i]);
+ ld->curDictId = map->dictIds[i];
ld->posDict = i + 1;
ld->curSub = curVal->next;
if (res)
@@ -275,7 +275,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem)
* dictionaries ?
*/
for (i = 0; i < map->len && !dictExists; i++)
- if (ld->curDictId == DatumGetObjectId(map->dictIds[i]))
+ if (ld->curDictId == map->dictIds[i])
dictExists = true;
if (!dictExists)
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 6bc91ce0dad..ffb5b8cce34 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -821,7 +821,7 @@ pgstat_force_next_flush(void)
static bool
match_db_entries(PgStatShared_HashEntry *entry, Datum match_data)
{
- return entry->key.dboid == DatumGetObjectId(MyDatabaseId);
+ return entry->key.dboid == MyDatabaseId;
}
/*
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 53e7d534270..cca4277f234 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -874,11 +874,12 @@ pgstat_drop_entry_internal(PgStatShared_HashEntry *shent,
*/
if (shent->dropped)
elog(ERROR,
- "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u",
+ "trying to drop stats entry already dropped: kind=%s dboid=%u objid=%" PRIu64 " refcount=%u generation=%u",
pgstat_get_kind_info(shent->key.kind)->name,
shent->key.dboid,
shent->key.objid,
- pg_atomic_read_u32(&shent->refcount));
+ pg_atomic_read_u32(&shent->refcount),
+ pg_atomic_read_u32(&shent->generation));
shent->dropped = true;
/* release refcount marking entry as not dropped */
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index fcd5b1653dd..614644a4e2a 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -299,9 +299,9 @@ datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
len1 - VARHDRSZ) == 0);
/* Only free memory if it's a copy made here. */
- if ((Pointer) arg1val != (Pointer) value1)
+ if ((Pointer) arg1val != DatumGetPointer(value1))
pfree(arg1val);
- if ((Pointer) arg2val != (Pointer) value2)
+ if ((Pointer) arg2val != DatumGetPointer(value2))
pfree(arg2val);
}
}
@@ -355,7 +355,7 @@ datum_image_hash(Datum value, bool typByVal, int typLen)
result = hash_bytes((unsigned char *) VARDATA_ANY(val), len - VARHDRSZ);
/* Only free memory if it's a copy made here. */
- if ((Pointer) val != (Pointer) value)
+ if ((Pointer) val != DatumGetPointer(value))
pfree(val);
}
else if (typLen == -2)
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 51452755f58..e9d370cb3da 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -904,7 +904,7 @@ json_unique_hash(const void *key, Size keysize)
hash ^= hash_bytes((const unsigned char *) entry->key, entry->key_len);
- return DatumGetUInt32(hash);
+ return hash;
}
static int
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 370456408bf..c5e1a027956 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -2027,7 +2027,7 @@ each_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname, bool as_text)
{
/* a json null is an sql null in text mode */
nulls[1] = true;
- values[1] = (Datum) NULL;
+ values[1] = (Datum) 0;
}
else
values[1] = PointerGetDatum(JsonbValueAsText(&v));
@@ -2266,7 +2266,7 @@ elements_worker_jsonb(FunctionCallInfo fcinfo, const char *funcname,
{
/* a json null is an sql null in text mode */
nulls[0] = true;
- values[0] = (Datum) NULL;
+ values[0] = (Datum) 0;
}
else
values[0] = PointerGetDatum(JsonbValueAsText(&v));
@@ -2389,7 +2389,7 @@ elements_array_element_end(void *state, bool isnull)
if (isnull && _state->normalize_results)
{
nulls[0] = true;
- values[0] = (Datum) NULL;
+ values[0] = (Datum) 0;
}
else if (_state->next_scalar)
{
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 407041b14a1..5a562535223 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -1517,7 +1517,7 @@ executeItemOptUnwrapTarget(JsonPathExecContext *cxt, JsonPathItem *jsp,
/* Convert numstr to Numeric with typmod */
Assert(numstr != NULL);
noerr = DirectInputFunctionCallSafe(numeric_in, numstr,
- InvalidOid, dtypmod,
+ InvalidOid, DatumGetInt32(dtypmod),
(Node *) &escontext,
&numdatum);
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 00e67fb46d0..df938812dd3 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -398,15 +398,15 @@ pg_lock_status(PG_FUNCTION_ARGS)
values[0] = CStringGetTextDatum(PredicateLockTagTypeNames[lockType]);
/* lock target */
- values[1] = GET_PREDICATELOCKTARGETTAG_DB(*predTag);
- values[2] = GET_PREDICATELOCKTARGETTAG_RELATION(*predTag);
+ values[1] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_DB(*predTag));
+ values[2] = ObjectIdGetDatum(GET_PREDICATELOCKTARGETTAG_RELATION(*predTag));
if (lockType == PREDLOCKTAG_TUPLE)
- values[4] = GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag);
+ values[4] = UInt16GetDatum(GET_PREDICATELOCKTARGETTAG_OFFSET(*predTag));
else
nulls[4] = true;
if ((lockType == PREDLOCKTAG_TUPLE) ||
(lockType == PREDLOCKTAG_PAGE))
- values[3] = GET_PREDICATELOCKTARGETTAG_PAGE(*predTag);
+ values[3] = UInt32GetDatum(GET_PREDICATELOCKTARGETTAG_PAGE(*predTag));
else
nulls[3] = true;
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index 46f2ec0c29f..84733dc5019 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -2082,15 +2082,14 @@ range_overleft_multirange_internal(TypeCacheEntry *rangetyp,
bool empty;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- PG_RETURN_BOOL(false);
-
+ return false;
range_deserialize(rangetyp, r, &lower1, &upper1, &empty);
Assert(!empty);
multirange_get_bounds(rangetyp, mr, mr->rangeCount - 1,
&lower2, &upper2);
- PG_RETURN_BOOL(range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0);
+ return (range_cmp_bounds(rangetyp, &upper1, &upper2) <= 0);
}
Datum
@@ -2167,7 +2166,7 @@ range_overright_multirange_internal(TypeCacheEntry *rangetyp,
bool empty;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- PG_RETURN_BOOL(false);
+ return false;
range_deserialize(rangetyp, r, &lower1, &upper1, &empty);
Assert(!empty);
@@ -2524,7 +2523,7 @@ multirange_adjacent_range(PG_FUNCTION_ARGS)
TypeCacheEntry *typcache;
if (RangeIsEmpty(r) || MultirangeIsEmpty(mr))
- return false;
+ PG_RETURN_BOOL(false);
typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr));
@@ -2545,7 +2544,7 @@ multirange_adjacent_multirange(PG_FUNCTION_ARGS)
upper2;
if (MultirangeIsEmpty(mr1) || MultirangeIsEmpty(mr2))
- return false;
+ PG_RETURN_BOOL(false);
typcache = multirange_get_typcache(fcinfo, MultirangeTypeGetOid(mr1));
@@ -2640,7 +2639,7 @@ multirange_cmp(PG_FUNCTION_ARGS)
Datum
multirange_lt(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp < 0);
}
@@ -2648,7 +2647,7 @@ multirange_lt(PG_FUNCTION_ARGS)
Datum
multirange_le(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp <= 0);
}
@@ -2656,7 +2655,7 @@ multirange_le(PG_FUNCTION_ARGS)
Datum
multirange_ge(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp >= 0);
}
@@ -2664,7 +2663,7 @@ multirange_ge(PG_FUNCTION_ARGS)
Datum
multirange_gt(PG_FUNCTION_ARGS)
{
- int cmp = multirange_cmp(fcinfo);
+ int cmp = DatumGetInt32(multirange_cmp(fcinfo));
PG_RETURN_BOOL(cmp > 0);
}
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index c9233565d57..122f2efab8b 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -28,6 +28,7 @@
#include "common/hashfn.h"
#include "common/int.h"
+#include "common/int128.h"
#include "funcapi.h"
#include "lib/hyperloglog.h"
#include "libpq/pqformat.h"
@@ -534,10 +535,7 @@ static bool numericvar_to_int32(const NumericVar *var, int32 *result);
static bool numericvar_to_int64(const NumericVar *var, int64 *result);
static void int64_to_numericvar(int64 val, NumericVar *var);
static bool numericvar_to_uint64(const NumericVar *var, uint64 *result);
-#ifdef HAVE_INT128
-static bool numericvar_to_int128(const NumericVar *var, int128 *result);
-static void int128_to_numericvar(int128 val, NumericVar *var);
-#endif
+static void int128_to_numericvar(INT128 val, NumericVar *var);
static double numericvar_to_double_no_overflow(const NumericVar *var);
static Datum numeric_abbrev_convert(Datum original_datum, SortSupport ssup);
@@ -4463,25 +4461,13 @@ int64_div_fast_to_numeric(int64 val1, int log10val2)
if (unlikely(pg_mul_s64_overflow(val1, factor, &new_val1)))
{
-#ifdef HAVE_INT128
/* do the multiplication using 128-bit integers */
- int128 tmp;
+ INT128 tmp;
- tmp = (int128) val1 * (int128) factor;
+ tmp = int64_to_int128(0);
+ int128_add_int64_mul_int64(&tmp, val1, factor);
int128_to_numericvar(tmp, &result);
-#else
- /* do the multiplication using numerics */
- NumericVar tmp;
-
- init_var(&tmp);
-
- int64_to_numericvar(val1, &result);
- int64_to_numericvar(factor, &tmp);
- mul_var(&result, &tmp, &result, 0);
-
- free_var(&tmp);
-#endif
}
else
int64_to_numericvar(new_val1, &result);
@@ -4901,8 +4887,8 @@ numeric_pg_lsn(PG_FUNCTION_ARGS)
* Actually, it's a pointer to a NumericAggState allocated in the aggregate
* context. The digit buffers for the NumericVars will be there too.
*
- * On platforms which support 128-bit integers some aggregates instead use a
- * 128-bit integer based transition datatype to speed up calculations.
+ * For integer inputs, some aggregates use special-purpose 64-bit or 128-bit
+ * integer based transition datatypes to speed up calculations.
*
* ----------------------------------------------------------------------
*/
@@ -5566,26 +5552,27 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
/*
- * Integer data types in general use Numeric accumulators to share code
- * and avoid risk of overflow.
+ * Integer data types in general use Numeric accumulators to share code and
+ * avoid risk of overflow. However for performance reasons optimized
+ * special-purpose accumulator routines are used when possible:
*
- * However for performance reasons optimized special-purpose accumulator
- * routines are used when possible.
+ * For 16-bit and 32-bit inputs, N and sum(X) fit into 64-bit, so 64-bit
+ * accumulators are used for SUM and AVG of these data types.
*
- * On platforms with 128-bit integer support, the 128-bit routines will be
- * used when sum(X) or sum(X*X) fit into 128-bit.
+ * For 16-bit and 32-bit inputs, sum(X^2) fits into 128-bit, so 128-bit
+ * accumulators are used for STDDEV_POP, STDDEV_SAMP, VAR_POP, and VAR_SAMP of
+ * these data types.
*
- * For 16 and 32 bit inputs, the N and sum(X) fit into 64-bit so the 64-bit
- * accumulators will be used for SUM and AVG of these data types.
+ * For 64-bit inputs, sum(X) fits into 128-bit, so a 128-bit accumulator is
+ * used for SUM(int8) and AVG(int8).
*/
-#ifdef HAVE_INT128
typedef struct Int128AggState
{
bool calcSumX2; /* if true, calculate sumX2 */
int64 N; /* count of processed numbers */
- int128 sumX; /* sum of processed numbers */
- int128 sumX2; /* sum of squares of processed numbers */
+ INT128 sumX; /* sum of processed numbers */
+ INT128 sumX2; /* sum of squares of processed numbers */
} Int128AggState;
/*
@@ -5631,12 +5618,12 @@ makeInt128AggStateCurrentContext(bool calcSumX2)
* Accumulate a new input value for 128-bit aggregate functions.
*/
static void
-do_int128_accum(Int128AggState *state, int128 newval)
+do_int128_accum(Int128AggState *state, int64 newval)
{
if (state->calcSumX2)
- state->sumX2 += newval * newval;
+ int128_add_int64_mul_int64(&state->sumX2, newval, newval);
- state->sumX += newval;
+ int128_add_int64(&state->sumX, newval);
state->N++;
}
@@ -5644,43 +5631,28 @@ do_int128_accum(Int128AggState *state, int128 newval)
* Remove an input value from the aggregated state.
*/
static void
-do_int128_discard(Int128AggState *state, int128 newval)
+do_int128_discard(Int128AggState *state, int64 newval)
{
if (state->calcSumX2)
- state->sumX2 -= newval * newval;
+ int128_sub_int64_mul_int64(&state->sumX2, newval, newval);
- state->sumX -= newval;
+ int128_sub_int64(&state->sumX, newval);
state->N--;
}
-typedef Int128AggState PolyNumAggState;
-#define makePolyNumAggState makeInt128AggState
-#define makePolyNumAggStateCurrentContext makeInt128AggStateCurrentContext
-#else
-typedef NumericAggState PolyNumAggState;
-#define makePolyNumAggState makeNumericAggState
-#define makePolyNumAggStateCurrentContext makeNumericAggStateCurrentContext
-#endif
-
Datum
int2_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, true);
+ state = makeInt128AggState(fcinfo, true);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT16(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT16(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT16(1));
PG_RETURN_POINTER(state);
}
@@ -5688,22 +5660,16 @@ int2_accum(PG_FUNCTION_ARGS)
Datum
int4_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, true);
+ state = makeInt128AggState(fcinfo, true);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT32(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT32(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT32(1));
PG_RETURN_POINTER(state);
}
@@ -5726,21 +5692,21 @@ int8_accum(PG_FUNCTION_ARGS)
}
/*
- * Combine function for numeric aggregates which require sumX2
+ * Combine function for Int128AggState for aggregates which require sumX2
*/
Datum
numeric_poly_combine(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state1;
- PolyNumAggState *state2;
+ Int128AggState *state1;
+ Int128AggState *state2;
MemoryContext agg_context;
MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
- state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
- state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1);
+ state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
+ state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1);
if (state2 == NULL)
PG_RETURN_POINTER(state1);
@@ -5750,16 +5716,10 @@ numeric_poly_combine(PG_FUNCTION_ARGS)
{
old_context = MemoryContextSwitchTo(agg_context);
- state1 = makePolyNumAggState(fcinfo, true);
+ state1 = makeInt128AggState(fcinfo, true);
state1->N = state2->N;
-
-#ifdef HAVE_INT128
state1->sumX = state2->sumX;
state1->sumX2 = state2->sumX2;
-#else
- accum_sum_copy(&state1->sumX, &state2->sumX);
- accum_sum_copy(&state1->sumX2, &state2->sumX2);
-#endif
MemoryContextSwitchTo(old_context);
@@ -5769,54 +5729,51 @@ numeric_poly_combine(PG_FUNCTION_ARGS)
if (state2->N > 0)
{
state1->N += state2->N;
+ int128_add_int128(&state1->sumX, state2->sumX);
+ int128_add_int128(&state1->sumX2, state2->sumX2);
+ }
+ PG_RETURN_POINTER(state1);
+}
-#ifdef HAVE_INT128
- state1->sumX += state2->sumX;
- state1->sumX2 += state2->sumX2;
-#else
- /* The rest of this needs to work in the aggregate context */
- old_context = MemoryContextSwitchTo(agg_context);
-
- /* Accumulate sums */
- accum_sum_combine(&state1->sumX, &state2->sumX);
- accum_sum_combine(&state1->sumX2, &state2->sumX2);
+/*
+ * int128_serialize - serialize a 128-bit integer to binary format
+ */
+static inline void
+int128_serialize(StringInfo buf, INT128 val)
+{
+ pq_sendint64(buf, PG_INT128_HI_INT64(val));
+ pq_sendint64(buf, PG_INT128_LO_UINT64(val));
+}
- MemoryContextSwitchTo(old_context);
-#endif
+/*
+ * int128_deserialize - deserialize binary format to a 128-bit integer.
+ */
+static inline INT128
+int128_deserialize(StringInfo buf)
+{
+ int64 hi = pq_getmsgint64(buf);
+ uint64 lo = pq_getmsgint64(buf);
- }
- PG_RETURN_POINTER(state1);
+ return make_int128(hi, lo);
}
/*
* numeric_poly_serialize
- * Serialize PolyNumAggState into bytea for aggregate functions which
+ * Serialize Int128AggState into bytea for aggregate functions which
* require sumX2.
*/
Datum
numeric_poly_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
StringInfoData buf;
bytea *result;
- NumericVar tmp_var;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
- state = (PolyNumAggState *) PG_GETARG_POINTER(0);
-
- /*
- * If the platform supports int128 then sumX and sumX2 will be a 128 bit
- * integer type. Here we'll convert that into a numeric type so that the
- * combine state is in the same format for both int128 enabled machines
- * and machines which don't support that type. The logic here is that one
- * day we might like to send these over to another server for further
- * processing and we want a standard format to work with.
- */
-
- init_var(&tmp_var);
+ state = (Int128AggState *) PG_GETARG_POINTER(0);
pq_begintypsend(&buf);
@@ -5824,48 +5781,33 @@ numeric_poly_serialize(PG_FUNCTION_ARGS)
pq_sendint64(&buf, state->N);
/* sumX */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX, &tmp_var);
-#else
- accum_sum_final(&state->sumX, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX);
/* sumX2 */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX2, &tmp_var);
-#else
- accum_sum_final(&state->sumX2, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX2);
result = pq_endtypsend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_BYTEA_P(result);
}
/*
* numeric_poly_deserialize
- * Deserialize PolyNumAggState from bytea for aggregate functions which
+ * Deserialize Int128AggState from bytea for aggregate functions which
* require sumX2.
*/
Datum
numeric_poly_deserialize(PG_FUNCTION_ARGS)
{
bytea *sstate;
- PolyNumAggState *result;
+ Int128AggState *result;
StringInfoData buf;
- NumericVar tmp_var;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
sstate = PG_GETARG_BYTEA_PP(0);
- init_var(&tmp_var);
-
/*
* Initialize a StringInfo so that we can "receive" it using the standard
* recv-function infrastructure.
@@ -5873,31 +5815,19 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS)
initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate),
VARSIZE_ANY_EXHDR(sstate));
- result = makePolyNumAggStateCurrentContext(false);
+ result = makeInt128AggStateCurrentContext(false);
/* N */
result->N = pq_getmsgint64(&buf);
/* sumX */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX);
-#else
- accum_sum_add(&result->sumX, &tmp_var);
-#endif
+ result->sumX = int128_deserialize(&buf);
/* sumX2 */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX2);
-#else
- accum_sum_add(&result->sumX2, &tmp_var);
-#endif
+ result->sumX2 = int128_deserialize(&buf);
pq_getmsgend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_POINTER(result);
}
@@ -5907,43 +5837,37 @@ numeric_poly_deserialize(PG_FUNCTION_ARGS)
Datum
int8_avg_accum(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Create the state data on the first call */
if (state == NULL)
- state = makePolyNumAggState(fcinfo, false);
+ state = makeInt128AggState(fcinfo, false);
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_accum(state, (int128) PG_GETARG_INT64(1));
-#else
- do_numeric_accum(state, int64_to_numeric(PG_GETARG_INT64(1)));
-#endif
- }
+ do_int128_accum(state, PG_GETARG_INT64(1));
PG_RETURN_POINTER(state);
}
/*
- * Combine function for PolyNumAggState for aggregates which don't require
+ * Combine function for Int128AggState for aggregates which don't require
* sumX2
*/
Datum
int8_avg_combine(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state1;
- PolyNumAggState *state2;
+ Int128AggState *state1;
+ Int128AggState *state2;
MemoryContext agg_context;
MemoryContext old_context;
if (!AggCheckCallContext(fcinfo, &agg_context))
elog(ERROR, "aggregate function called in non-aggregate context");
- state1 = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
- state2 = PG_ARGISNULL(1) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(1);
+ state1 = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
+ state2 = PG_ARGISNULL(1) ? NULL : (Int128AggState *) PG_GETARG_POINTER(1);
if (state2 == NULL)
PG_RETURN_POINTER(state1);
@@ -5953,14 +5877,10 @@ int8_avg_combine(PG_FUNCTION_ARGS)
{
old_context = MemoryContextSwitchTo(agg_context);
- state1 = makePolyNumAggState(fcinfo, false);
+ state1 = makeInt128AggState(fcinfo, false);
state1->N = state2->N;
-
-#ifdef HAVE_INT128
state1->sumX = state2->sumX;
-#else
- accum_sum_copy(&state1->sumX, &state2->sumX);
-#endif
+
MemoryContextSwitchTo(old_context);
PG_RETURN_POINTER(state1);
@@ -5969,52 +5889,28 @@ int8_avg_combine(PG_FUNCTION_ARGS)
if (state2->N > 0)
{
state1->N += state2->N;
-
-#ifdef HAVE_INT128
- state1->sumX += state2->sumX;
-#else
- /* The rest of this needs to work in the aggregate context */
- old_context = MemoryContextSwitchTo(agg_context);
-
- /* Accumulate sums */
- accum_sum_combine(&state1->sumX, &state2->sumX);
-
- MemoryContextSwitchTo(old_context);
-#endif
-
+ int128_add_int128(&state1->sumX, state2->sumX);
}
PG_RETURN_POINTER(state1);
}
/*
* int8_avg_serialize
- * Serialize PolyNumAggState into bytea using the standard
- * recv-function infrastructure.
+ * Serialize Int128AggState into bytea for aggregate functions which
+ * don't require sumX2.
*/
Datum
int8_avg_serialize(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
StringInfoData buf;
bytea *result;
- NumericVar tmp_var;
/* Ensure we disallow calling when not in aggregate context */
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
- state = (PolyNumAggState *) PG_GETARG_POINTER(0);
-
- /*
- * If the platform supports int128 then sumX will be a 128 integer type.
- * Here we'll convert that into a numeric type so that the combine state
- * is in the same format for both int128 enabled machines and machines
- * which don't support that type. The logic here is that one day we might
- * like to send these over to another server for further processing and we
- * want a standard format to work with.
- */
-
- init_var(&tmp_var);
+ state = (Int128AggState *) PG_GETARG_POINTER(0);
pq_begintypsend(&buf);
@@ -6022,39 +5918,30 @@ int8_avg_serialize(PG_FUNCTION_ARGS)
pq_sendint64(&buf, state->N);
/* sumX */
-#ifdef HAVE_INT128
- int128_to_numericvar(state->sumX, &tmp_var);
-#else
- accum_sum_final(&state->sumX, &tmp_var);
-#endif
- numericvar_serialize(&buf, &tmp_var);
+ int128_serialize(&buf, state->sumX);
result = pq_endtypsend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_BYTEA_P(result);
}
/*
* int8_avg_deserialize
- * Deserialize bytea back into PolyNumAggState.
+ * Deserialize Int128AggState from bytea for aggregate functions which
+ * don't require sumX2.
*/
Datum
int8_avg_deserialize(PG_FUNCTION_ARGS)
{
bytea *sstate;
- PolyNumAggState *result;
+ Int128AggState *result;
StringInfoData buf;
- NumericVar tmp_var;
if (!AggCheckCallContext(fcinfo, NULL))
elog(ERROR, "aggregate function called in non-aggregate context");
sstate = PG_GETARG_BYTEA_PP(0);
- init_var(&tmp_var);
-
/*
* Initialize a StringInfo so that we can "receive" it using the standard
* recv-function infrastructure.
@@ -6062,23 +5949,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS)
initReadOnlyStringInfo(&buf, VARDATA_ANY(sstate),
VARSIZE_ANY_EXHDR(sstate));
- result = makePolyNumAggStateCurrentContext(false);
+ result = makeInt128AggStateCurrentContext(false);
/* N */
result->N = pq_getmsgint64(&buf);
/* sumX */
- numericvar_deserialize(&buf, &tmp_var);
-#ifdef HAVE_INT128
- numericvar_to_int128(&tmp_var, &result->sumX);
-#else
- accum_sum_add(&result->sumX, &tmp_var);
-#endif
+ result->sumX = int128_deserialize(&buf);
pq_getmsgend(&buf);
- free_var(&tmp_var);
-
PG_RETURN_POINTER(result);
}
@@ -6089,24 +5969,16 @@ int8_avg_deserialize(PG_FUNCTION_ARGS)
Datum
int2_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int2_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT16(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT16(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT16(1));
PG_RETURN_POINTER(state);
}
@@ -6114,24 +5986,16 @@ int2_accum_inv(PG_FUNCTION_ARGS)
Datum
int4_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int4_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT32(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT32(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT32(1));
PG_RETURN_POINTER(state);
}
@@ -6160,24 +6024,16 @@ int8_accum_inv(PG_FUNCTION_ARGS)
Datum
int8_avg_accum_inv(PG_FUNCTION_ARGS)
{
- PolyNumAggState *state;
+ Int128AggState *state;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* Should not get here with no state */
if (state == NULL)
elog(ERROR, "int8_avg_accum_inv called with NULL state");
if (!PG_ARGISNULL(1))
- {
-#ifdef HAVE_INT128
- do_int128_discard(state, (int128) PG_GETARG_INT64(1));
-#else
- /* Should never fail, all inputs have dscale 0 */
- if (!do_numeric_discard(state, int64_to_numeric(PG_GETARG_INT64(1))))
- elog(ERROR, "do_numeric_discard failed unexpectedly");
-#endif
- }
+ do_int128_discard(state, PG_GETARG_INT64(1));
PG_RETURN_POINTER(state);
}
@@ -6185,12 +6041,11 @@ int8_avg_accum_inv(PG_FUNCTION_ARGS)
Datum
numeric_poly_sum(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
NumericVar result;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* If there were no non-null inputs, return NULL */
if (state == NULL || state->N == 0)
@@ -6205,21 +6060,17 @@ numeric_poly_sum(PG_FUNCTION_ARGS)
free_var(&result);
PG_RETURN_NUMERIC(res);
-#else
- return numeric_sum(fcinfo);
-#endif
}
Datum
numeric_poly_avg(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
NumericVar result;
Datum countd,
sumd;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
/* If there were no non-null inputs, return NULL */
if (state == NULL || state->N == 0)
@@ -6235,9 +6086,6 @@ numeric_poly_avg(PG_FUNCTION_ARGS)
free_var(&result);
PG_RETURN_DATUM(DirectFunctionCall2(numeric_div, sumd, countd));
-#else
- return numeric_avg(fcinfo);
-#endif
}
Datum
@@ -6470,7 +6318,6 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(res);
}
-#ifdef HAVE_INT128
static Numeric
numeric_poly_stddev_internal(Int128AggState *state,
bool variance, bool sample,
@@ -6514,17 +6361,15 @@ numeric_poly_stddev_internal(Int128AggState *state,
return res;
}
-#endif
Datum
numeric_poly_var_samp(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, true, true, &is_null);
@@ -6532,20 +6377,16 @@ numeric_poly_var_samp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_var_samp(fcinfo);
-#endif
}
Datum
numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, false, true, &is_null);
@@ -6553,20 +6394,16 @@ numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_stddev_samp(fcinfo);
-#endif
}
Datum
numeric_poly_var_pop(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, true, false, &is_null);
@@ -6574,20 +6411,16 @@ numeric_poly_var_pop(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_var_pop(fcinfo);
-#endif
}
Datum
numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
{
-#ifdef HAVE_INT128
- PolyNumAggState *state;
+ Int128AggState *state;
Numeric res;
bool is_null;
- state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
+ state = PG_ARGISNULL(0) ? NULL : (Int128AggState *) PG_GETARG_POINTER(0);
res = numeric_poly_stddev_internal(state, false, false, &is_null);
@@ -6595,9 +6428,6 @@ numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
else
PG_RETURN_NUMERIC(res);
-#else
- return numeric_stddev_pop(fcinfo);
-#endif
}
/*
@@ -8330,105 +8160,23 @@ numericvar_to_uint64(const NumericVar *var, uint64 *result)
return true;
}
-#ifdef HAVE_INT128
-/*
- * Convert numeric to int128, rounding if needed.
- *
- * If overflow, return false (no error is raised). Return true if okay.
- */
-static bool
-numericvar_to_int128(const NumericVar *var, int128 *result)
-{
- NumericDigit *digits;
- int ndigits;
- int weight;
- int i;
- int128 val,
- oldval;
- bool neg;
- NumericVar rounded;
-
- /* Round to nearest integer */
- init_var(&rounded);
- set_var_from_var(var, &rounded);
- round_var(&rounded, 0);
-
- /* Check for zero input */
- strip_var(&rounded);
- ndigits = rounded.ndigits;
- if (ndigits == 0)
- {
- *result = 0;
- free_var(&rounded);
- return true;
- }
-
- /*
- * For input like 10000000000, we must treat stripped digits as real. So
- * the loop assumes there are weight+1 digits before the decimal point.
- */
- weight = rounded.weight;
- Assert(weight >= 0 && ndigits <= weight + 1);
-
- /* Construct the result */
- digits = rounded.digits;
- neg = (rounded.sign == NUMERIC_NEG);
- val = digits[0];
- for (i = 1; i <= weight; i++)
- {
- oldval = val;
- val *= NBASE;
- if (i < ndigits)
- val += digits[i];
-
- /*
- * The overflow check is a bit tricky because we want to accept
- * INT128_MIN, which will overflow the positive accumulator. We can
- * detect this case easily though because INT128_MIN is the only
- * nonzero value for which -val == val (on a two's complement machine,
- * anyway).
- */
- if ((val / NBASE) != oldval) /* possible overflow? */
- {
- if (!neg || (-val) != val || val == 0 || oldval < 0)
- {
- free_var(&rounded);
- return false;
- }
- }
- }
-
- free_var(&rounded);
-
- *result = neg ? -val : val;
- return true;
-}
-
/*
* Convert 128 bit integer to numeric.
*/
static void
-int128_to_numericvar(int128 val, NumericVar *var)
+int128_to_numericvar(INT128 val, NumericVar *var)
{
- uint128 uval,
- newuval;
+ int sign;
NumericDigit *ptr;
int ndigits;
+ int32 dig;
/* int128 can require at most 39 decimal digits; add one for safety */
alloc_var(var, 40 / DEC_DIGITS);
- if (val < 0)
- {
- var->sign = NUMERIC_NEG;
- uval = -val;
- }
- else
- {
- var->sign = NUMERIC_POS;
- uval = val;
- }
+ sign = int128_sign(val);
+ var->sign = sign < 0 ? NUMERIC_NEG : NUMERIC_POS;
var->dscale = 0;
- if (val == 0)
+ if (sign == 0)
{
var->ndigits = 0;
var->weight = 0;
@@ -8440,15 +8188,13 @@ int128_to_numericvar(int128 val, NumericVar *var)
{
ptr--;
ndigits++;
- newuval = uval / NBASE;
- *ptr = uval - newuval * NBASE;
- uval = newuval;
- } while (uval);
+ int128_div_mod_int32(&val, NBASE, &dig);
+ *ptr = (NumericDigit) abs(dig);
+ } while (!int128_is_zero(val));
var->digits = ptr;
var->ndigits = ndigits;
var->weight = ndigits - 1;
}
-#endif
/*
* Convert a NumericVar to float8; if out of range, return +/- HUGE_VAL
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index c83b239b3bb..18e467bccd3 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1075,8 +1075,8 @@ range_union_internal(TypeCacheEntry *typcache, RangeType *r1, RangeType *r2,
return r1;
if (strict &&
- !DatumGetBool(range_overlaps_internal(typcache, r1, r2)) &&
- !DatumGetBool(range_adjacent_internal(typcache, r1, r2)))
+ !range_overlaps_internal(typcache, r1, r2) &&
+ !range_adjacent_internal(typcache, r1, r2))
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
errmsg("result of range union would not be contiguous")));
@@ -1343,9 +1343,9 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup)
cmp = range_cmp_bounds(typcache, &upper1, &upper2);
}
- if ((Datum) range_a != a)
+ if ((Pointer) range_a != DatumGetPointer(a))
pfree(range_a);
- if ((Datum) range_b != b)
+ if ((Pointer) range_b != DatumGetPointer(b))
pfree(range_b);
return cmp;
@@ -1356,7 +1356,7 @@ range_fast_cmp(Datum a, Datum b, SortSupport ssup)
Datum
range_lt(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp < 0);
}
@@ -1364,7 +1364,7 @@ range_lt(PG_FUNCTION_ARGS)
Datum
range_le(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp <= 0);
}
@@ -1372,7 +1372,7 @@ range_le(PG_FUNCTION_ARGS)
Datum
range_ge(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp >= 0);
}
@@ -1380,7 +1380,7 @@ range_ge(PG_FUNCTION_ARGS)
Datum
range_gt(PG_FUNCTION_ARGS)
{
- int cmp = range_cmp(fcinfo);
+ int cmp = DatumGetInt32(range_cmp(fcinfo));
PG_RETURN_BOOL(cmp > 0);
}
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9b6d7061a18..be519654880 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -757,7 +757,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
* because it's range
*/
previousCentroid = datumCopy(in->prefixDatum, false, -1);
- out->traversalValues[out->nNodes] = (void *) previousCentroid;
+ out->traversalValues[out->nNodes] = DatumGetPointer(previousCentroid);
}
out->nodeNumbers[out->nNodes] = i - 1;
out->nNodes++;
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index fe5edc0027d..9e5449f17d7 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -1529,9 +1529,9 @@ record_image_cmp(FunctionCallInfo fcinfo)
if ((cmpresult == 0) && (len1 != len2))
cmpresult = (len1 < len2) ? -1 : 1;
- if ((Pointer) arg1val != (Pointer) values1[i1])
+ if ((Pointer) arg1val != DatumGetPointer(values1[i1]))
pfree(arg1val);
- if ((Pointer) arg2val != (Pointer) values2[i2])
+ if ((Pointer) arg2val != DatumGetPointer(values2[i2]))
pfree(arg2val);
}
else
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 25cff56c3d0..e640b48205b 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -4954,7 +4954,7 @@ timestamptz_trunc_internal(text *units, TimestampTz timestamp, pg_tz *tzp)
case DTK_SECOND:
case DTK_MILLISEC:
case DTK_MICROSEC:
- PG_RETURN_TIMESTAMPTZ(timestamp);
+ return timestamp;
break;
default:
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index ffae8c23abf..11b442a5941 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -408,13 +408,12 @@ text_length(Datum str)
{
/* fastpath when max encoding length is one */
if (pg_database_encoding_max_length() == 1)
- PG_RETURN_INT32(toast_raw_datum_size(str) - VARHDRSZ);
+ return (toast_raw_datum_size(str) - VARHDRSZ);
else
{
text *t = DatumGetTextPP(str);
- PG_RETURN_INT32(pg_mbstrlen_with_len(VARDATA_ANY(t),
- VARSIZE_ANY_EXHDR(t)));
+ return (pg_mbstrlen_with_len(VARDATA_ANY(t), VARSIZE_ANY_EXHDR(t)));
}
}
diff --git a/src/backend/utils/adt/waitfuncs.c b/src/backend/utils/adt/waitfuncs.c
index ddd0a57c0c5..f01cad72a0f 100644
--- a/src/backend/utils/adt/waitfuncs.c
+++ b/src/backend/utils/adt/waitfuncs.c
@@ -73,7 +73,7 @@ pg_isolation_test_session_is_blocked(PG_FUNCTION_ARGS)
* acquire heavyweight locks.
*/
blocking_pids_a =
- DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, blocked_pid));
+ DatumGetArrayTypeP(DirectFunctionCall1(pg_blocking_pids, Int32GetDatum(blocked_pid)));
Assert(ARR_ELEMTYPE(blocking_pids_a) == INT4OID);
Assert(!array_contains_nulls(blocking_pids_a));
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 5c8360c08b5..45d1e2be007 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -86,7 +86,7 @@ relatt_cache_syshash(const void *key, Size keysize)
const AttoptCacheKey *ckey = key;
Assert(keysize == sizeof(*ckey));
- return GetSysCacheHashValue2(ATTNUM, ckey->attrelid, ckey->attnum);
+ return GetSysCacheHashValue2(ATTNUM, ObjectIdGetDatum(ckey->attrelid), Int32GetDatum(ckey->attnum));
}
/*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index c460a72b75d..032bb6222c4 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3817,7 +3817,7 @@ get_subscription_oid(const char *subname, bool missing_ok)
Oid oid;
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(subname));
+ ObjectIdGetDatum(MyDatabaseId), CStringGetDatum(subname));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 559ba9cdb2c..6fe268a8eec 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -3184,7 +3184,7 @@ AssertPendingSyncs_RelationCache(void)
if ((LockTagType) locallock->tag.lock.locktag_type !=
LOCKTAG_RELATION)
continue;
- relid = ObjectIdGetDatum(locallock->tag.lock.locktag_field2);
+ relid = locallock->tag.lock.locktag_field2;
r = RelationIdGetRelation(relid);
if (!RelationIsValid(r))
continue;
@@ -6991,5 +6991,5 @@ ResOwnerReleaseRelation(Datum res)
Assert(rel->rd_refcnt > 0);
rel->rd_refcnt -= 1;
- RelationCloseCleanup((Relation) res);
+ RelationCloseCleanup((Relation) DatumGetPointer(res));
}
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index f944453a1d8..7828bdcba8f 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -459,9 +459,9 @@ GetSysCacheOid(int cacheId,
tuple = SearchSysCache(cacheId, key1, key2, key3, key4);
if (!HeapTupleIsValid(tuple))
return InvalidOid;
- result = heap_getattr(tuple, oidcol,
- SysCache[cacheId]->cc_tupdesc,
- &isNull);
+ result = DatumGetObjectId(heap_getattr(tuple, oidcol,
+ SysCache[cacheId]->cc_tupdesc,
+ &isNull));
Assert(!isNull); /* columns used as oids should never be NULL */
ReleaseSysCache(tuple);
return result;
diff --git a/src/backend/utils/error/csvlog.c b/src/backend/utils/error/csvlog.c
index fdac3c048e3..c3159ed7d97 100644
--- a/src/backend/utils/error/csvlog.c
+++ b/src/backend/utils/error/csvlog.c
@@ -120,7 +120,7 @@ write_csvlog(ErrorData *edata)
appendStringInfoChar(&buf, ',');
/* session id */
- appendStringInfo(&buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid);
+ appendStringInfo(&buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid);
appendStringInfoChar(&buf, ',');
/* Line number */
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index afce1a8e1f0..b7b9692f8c8 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -2959,12 +2959,12 @@ log_status_format(StringInfo buf, const char *format, ErrorData *edata)
{
char strfbuf[128];
- snprintf(strfbuf, sizeof(strfbuf) - 1, INT64_HEX_FORMAT ".%x",
+ snprintf(strfbuf, sizeof(strfbuf) - 1, "%" PRIx64 ".%x",
MyStartTime, MyProcPid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
- appendStringInfo(buf, INT64_HEX_FORMAT ".%x", MyStartTime, MyProcPid);
+ appendStringInfo(buf, "%" PRIx64 ".%x", MyStartTime, MyProcPid);
break;
case 'p':
if (padding != 0)
diff --git a/src/backend/utils/error/jsonlog.c b/src/backend/utils/error/jsonlog.c
index 519eacf17f8..2619f499042 100644
--- a/src/backend/utils/error/jsonlog.c
+++ b/src/backend/utils/error/jsonlog.c
@@ -168,7 +168,7 @@ write_jsonlog(ErrorData *edata)
}
/* Session id */
- appendJSONKeyValueFmt(&buf, "session_id", true, INT64_HEX_FORMAT ".%x",
+ appendJSONKeyValueFmt(&buf, "session_id", true, "%" PRIx64 ".%x",
MyStartTime, MyProcPid);
/* Line number */
diff --git a/src/backend/utils/sort/sortsupport.c b/src/backend/utils/sort/sortsupport.c
index e0f500b9aa2..f582c6624f1 100644
--- a/src/backend/utils/sort/sortsupport.c
+++ b/src/backend/utils/sort/sortsupport.c
@@ -57,7 +57,7 @@ comparison_shim(Datum x, Datum y, SortSupport ssup)
if (extra->fcinfo.isnull)
elog(ERROR, "function %u returned NULL", extra->flinfo.fn_oid);
- return result;
+ return DatumGetInt32(result);
}
/*
diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c
index 5f70e8dddac..c5d18e46716 100644
--- a/src/backend/utils/sort/tuplesortvariants.c
+++ b/src/backend/utils/sort/tuplesortvariants.c
@@ -865,7 +865,7 @@ tuplesort_putbrintuple(Tuplesortstate *state, BrinTuple *tuple, Size size)
memcpy(&bstup->tuple, tuple, size);
stup.tuple = bstup;
- stup.datum1 = tuple->bt_blkno;
+ stup.datum1 = UInt32GetDatum(tuple->bt_blkno);
stup.isnull1 = false;
/* GetMemoryChunkSpace is not supported for bump contexts */
@@ -1836,7 +1836,7 @@ removeabbrev_index_brin(Tuplesortstate *state, SortTuple *stups, int count)
BrinSortTuple *tuple;
tuple = stups[i].tuple;
- stups[i].datum1 = tuple->tuple.bt_blkno;
+ stups[i].datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
}
}
@@ -1893,7 +1893,7 @@ readtup_index_brin(Tuplesortstate *state, SortTuple *stup,
stup->tuple = tuple;
/* set up first-column key value, which is block number */
- stup->datum1 = tuple->tuple.bt_blkno;
+ stup->datum1 = UInt32GetDatum(tuple->tuple.bt_blkno);
}
/*
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index 55621f35fb6..0a3ca4315de 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -35,6 +35,7 @@
#include "fe_utils/option_utils.h"
#include "fe_utils/recovery_gen.h"
#include "getopt_long.h"
+#include "libpq/protocol.h"
#include "receivelog.h"
#include "streamutil.h"
@@ -1338,7 +1339,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
/* Each CopyData message begins with a type byte. */
switch (GetCopyDataByte(r, copybuf, &cursor))
{
- case 'n':
+ case PqBackupMsg_NewArchive:
{
/* New archive. */
char *archive_name;
@@ -1410,7 +1411,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'd':
+ case PqMsg_CopyData:
{
/* Archive or manifest data. */
if (state->manifest_buffer != NULL)
@@ -1446,7 +1447,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'p':
+ case PqBackupMsg_ProgressReport:
{
/*
* Progress report.
@@ -1465,7 +1466,7 @@ ReceiveArchiveStreamChunk(size_t r, char *copybuf, void *callback_data)
break;
}
- case 'm':
+ case PqBackupMsg_Manifest:
{
/*
* Manifest data will be sent next. This message is not
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index 0e9d2e23947..7a4d1a2d2ca 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -24,6 +24,7 @@
#include "getopt_long.h"
#include "libpq-fe.h"
#include "libpq/pqsignal.h"
+#include "libpq/protocol.h"
#include "pqexpbuffer.h"
#include "streamutil.h"
@@ -149,7 +150,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
LSN_FORMAT_ARGS(output_fsync_lsn),
replication_slot);
- replybuf[len] = 'r';
+ replybuf[len] = PqReplMsg_StandbyStatusUpdate;
len += 1;
fe_sendint64(output_written_lsn, &replybuf[len]); /* write */
len += 8;
@@ -454,7 +455,7 @@ StreamLogicalLog(void)
}
/* Check the message type. */
- if (copybuf[0] == 'k')
+ if (copybuf[0] == PqReplMsg_Keepalive)
{
int pos;
bool replyRequested;
@@ -466,7 +467,7 @@ StreamLogicalLog(void)
* We just check if the server requested a reply, and ignore the
* rest.
*/
- pos = 1; /* skip msgtype 'k' */
+ pos = 1; /* skip msgtype PqReplMsg_Keepalive */
walEnd = fe_recvint64(&copybuf[pos]);
output_written_lsn = Max(walEnd, output_written_lsn);
@@ -509,7 +510,7 @@ StreamLogicalLog(void)
continue;
}
- else if (copybuf[0] != 'w')
+ else if (copybuf[0] != PqReplMsg_WALData)
{
pg_log_error("unrecognized streaming header: \"%c\"",
copybuf[0]);
@@ -521,7 +522,7 @@ StreamLogicalLog(void)
* message. We only need the WAL location field (dataStart), the rest
* of the header is ignored.
*/
- hdr_len = 1; /* msgtype 'w' */
+ hdr_len = 1; /* msgtype PqReplMsg_WALData */
hdr_len += 8; /* dataStart */
hdr_len += 8; /* walEnd */
hdr_len += 8; /* sendTime */
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index f2b54d3c501..25b13c7f55c 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -21,6 +21,7 @@
#include "access/xlog_internal.h"
#include "common/logging.h"
#include "libpq-fe.h"
+#include "libpq/protocol.h"
#include "receivelog.h"
#include "streamutil.h"
@@ -338,7 +339,7 @@ sendFeedback(PGconn *conn, XLogRecPtr blockpos, TimestampTz now, bool replyReque
char replybuf[1 + 8 + 8 + 8 + 8 + 1];
int len = 0;
- replybuf[len] = 'r';
+ replybuf[len] = PqReplMsg_StandbyStatusUpdate;
len += 1;
fe_sendint64(blockpos, &replybuf[len]); /* write */
len += 8;
@@ -823,13 +824,13 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream,
}
/* Check the message type. */
- if (copybuf[0] == 'k')
+ if (copybuf[0] == PqReplMsg_Keepalive)
{
if (!ProcessKeepaliveMsg(conn, stream, copybuf, r, blockpos,
&last_status))
goto error;
}
- else if (copybuf[0] == 'w')
+ else if (copybuf[0] == PqReplMsg_WALData)
{
if (!ProcessWALDataMsg(conn, stream, copybuf, r, &blockpos))
goto error;
@@ -1001,7 +1002,7 @@ ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len,
* Parse the keepalive message, enclosed in the CopyData message. We just
* check if the server requested a reply, and ignore the rest.
*/
- pos = 1; /* skip msgtype 'k' */
+ pos = 1; /* skip msgtype PqReplMsg_Keepalive */
pos += 8; /* skip walEnd */
pos += 8; /* skip sendTime */
@@ -1064,7 +1065,7 @@ ProcessWALDataMsg(PGconn *conn, StreamCtl *stream, char *copybuf, int len,
* message. We only need the WAL location field (dataStart), the rest of
* the header is ignored.
*/
- hdr_len = 1; /* msgtype 'w' */
+ hdr_len = 1; /* msgtype PqReplMsg_WALData */
hdr_len += 8; /* dataStart */
hdr_len += 8; /* walEnd */
hdr_len += 8; /* sendTime */
diff --git a/src/bin/pg_dump/filter.c b/src/bin/pg_dump/filter.c
index 7214d514137..e3cdcf40975 100644
--- a/src/bin/pg_dump/filter.c
+++ b/src/bin/pg_dump/filter.c
@@ -171,9 +171,8 @@ pg_log_filter_error(FilterStateData *fstate, const char *fmt,...)
/*
* filter_get_keyword - read the next filter keyword from buffer
*
- * Search for keywords (limited to ascii alphabetic characters) in
- * the passed in line buffer. Returns NULL when the buffer is empty or the first
- * char is not alpha. The char '_' is allowed, except as the first character.
+ * Search for keywords (strings of non-whitespace characters) in the passed
+ * in line buffer. Returns NULL when the buffer is empty or no keyword exists.
* The length of the found keyword is returned in the size parameter.
*/
static const char *
@@ -182,6 +181,9 @@ filter_get_keyword(const char **line, int *size)
const char *ptr = *line;
const char *result = NULL;
+ /* The passed buffer must not be NULL */
+ Assert(*line != NULL);
+
/* Set returned length preemptively in case no keyword is found */
*size = 0;
@@ -189,11 +191,12 @@ filter_get_keyword(const char **line, int *size)
while (isspace((unsigned char) *ptr))
ptr++;
- if (isalpha((unsigned char) *ptr))
+ /* Grab one keyword that's the string of non-whitespace characters */
+ if (*ptr != '\0' && !isspace((unsigned char) *ptr))
{
result = ptr++;
- while (isalpha((unsigned char) *ptr) || *ptr == '_')
+ while (*ptr != '\0' && !isspace((unsigned char) *ptr))
ptr++;
*size = ptr - result;
diff --git a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
index f05e8a20e05..5c69ec31c39 100644
--- a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
+++ b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl
@@ -418,10 +418,16 @@ command_fails_like(
qr/invalid filter command/,
"invalid syntax: incorrect filter command");
-# Test invalid object type
+# Test invalid object type.
+#
+# This test also verifies that keywords are correctly recognized as strings of
+# non-whitespace characters. If the parser incorrectly treats non-whitespace
+# delimiters (like hyphens) as keyword boundaries, "table-data" might be
+# misread as the valid object type "table". To catch such issues,
+# "table-data" is used here as an intentionally invalid object type.
open $inputfile, '>', "$tempdir/inputfile.txt"
or die "unable to open filterfile for writing";
-print $inputfile "include xxx";
+print $inputfile "exclude table-data one";
close $inputfile;
command_fails_like(
@@ -432,8 +438,8 @@ command_fails_like(
'--filter' => "$tempdir/inputfile.txt",
'postgres'
],
- qr/unsupported filter object type: "xxx"/,
- "invalid syntax: invalid object type specified, should be table, schema, foreign_data or data"
+ qr/unsupported filter object type: "table-data"/,
+ "invalid syntax: invalid object type specified"
);
# Test missing object identifier pattern
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 310f53c5577..67eedbae265 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -1713,7 +1713,7 @@ check_for_not_null_inheritance(ClusterInfo *cluster)
"If the parent column(s) are NOT NULL, then the child column must\n"
"also be marked NOT NULL, or the upgrade will fail.\n"
"You can fix this by running\n"
- " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
+ " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
"on each column listed in the file:\n"
" %s", report.path);
}
diff --git a/src/include/access/htup_details.h b/src/include/access/htup_details.h
index aa957cf3b01..ae813a79041 100644
--- a/src/include/access/htup_details.h
+++ b/src/include/access/htup_details.h
@@ -884,7 +884,7 @@ fastgetattr(HeapTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
if (att_isnull(attnum - 1, tup->t_data->t_bits))
{
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
else
return nocachegetattr(tup, attnum, tupleDesc);
diff --git a/src/include/access/itup.h b/src/include/access/itup.h
index 7066c2a2868..338e90749bd 100644
--- a/src/include/access/itup.h
+++ b/src/include/access/itup.h
@@ -154,7 +154,7 @@ index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
if (att_isnull(attnum - 1, (bits8 *) tup + sizeof(IndexTupleData)))
{
*isnull = true;
- return (Datum) NULL;
+ return (Datum) 0;
}
else
return nocache_index_getattr(tup, attnum, tupleDesc);
diff --git a/src/include/access/reloptions.h b/src/include/access/reloptions.h
index dfbb4c85460..a604a4702c3 100644
--- a/src/include/access/reloptions.h
+++ b/src/include/access/reloptions.h
@@ -233,7 +233,7 @@ extern void add_local_string_reloption(local_relopts *relopts, const char *name,
fill_string_relopt filler, int offset);
extern Datum transformRelOptions(Datum oldOptions, List *defList,
- const char *namspace, const char *const validnsps[],
+ const char *nameSpace, const char *const validnsps[],
bool acceptOidsOff, bool isReset);
extern List *untransformRelOptions(Datum options);
extern bytea *extractRelOptions(HeapTuple tuple, TupleDesc tupdesc,
diff --git a/src/include/c.h b/src/include/c.h
index 6d4495bdd9f..bbdaa88c63a 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -530,8 +530,6 @@ typedef uint32 bits32; /* >= 32 bits */
/* snprintf format strings to use for 64-bit integers */
#define INT64_FORMAT "%" PRId64
#define UINT64_FORMAT "%" PRIu64
-#define INT64_HEX_FORMAT "%" PRIx64
-#define UINT64_HEX_FORMAT "%" PRIx64
/*
* 128-bit signed and unsigned integers
diff --git a/src/include/common/int128.h b/src/include/common/int128.h
index a50f5709c29..62aae1bc6a7 100644
--- a/src/include/common/int128.h
+++ b/src/include/common/int128.h
@@ -6,7 +6,7 @@
* We make use of the native int128 type if there is one, otherwise
* implement things the hard way based on two int64 halves.
*
- * See src/tools/testint128.c for a simple test harness for this file.
+ * See src/test/modules/test_int128 for a simple test harness for this file.
*
* Copyright (c) 2017-2025, PostgreSQL Global Development Group
*
@@ -29,146 +29,172 @@
#endif
#endif
-
+/*
+ * If native int128 support is enabled, INT128 is just int128. Otherwise, it
+ * is a structure with separate 64-bit high and low parts.
+ *
+ * We lay out the INT128 structure with the same content and byte ordering
+ * that a native int128 type would (probably) have. This makes no difference
+ * for ordinary use of INT128, but allows union'ing INT128 with int128 for
+ * testing purposes.
+ *
+ * PG_INT128_HI_INT64 and PG_INT128_LO_UINT64 allow the (signed) high and
+ * (unsigned) low 64-bit integer parts to be extracted portably on all
+ * platforms.
+ */
#if USE_NATIVE_INT128
typedef int128 INT128;
-/*
- * Add an unsigned int64 value into an INT128 variable.
- */
-static inline void
-int128_add_uint64(INT128 *i128, uint64 v)
+#define PG_INT128_HI_INT64(i128) ((int64) ((i128) >> 64))
+#define PG_INT128_LO_UINT64(i128) ((uint64) (i128))
+
+#else
+
+typedef struct
{
- *i128 += v;
-}
+#ifdef WORDS_BIGENDIAN
+ int64 hi; /* most significant 64 bits, including sign */
+ uint64 lo; /* least significant 64 bits, without sign */
+#else
+ uint64 lo; /* least significant 64 bits, without sign */
+ int64 hi; /* most significant 64 bits, including sign */
+#endif
+} INT128;
+
+#define PG_INT128_HI_INT64(i128) ((i128).hi)
+#define PG_INT128_LO_UINT64(i128) ((i128).lo)
+
+#endif
/*
- * Add a signed int64 value into an INT128 variable.
+ * Construct an INT128 from (signed) high and (unsigned) low 64-bit integer
+ * parts.
*/
-static inline void
-int128_add_int64(INT128 *i128, int64 v)
+static inline INT128
+make_int128(int64 hi, uint64 lo)
{
- *i128 += v;
+#if USE_NATIVE_INT128
+ return (((int128) hi) << 64) + lo;
+#else
+ INT128 val;
+
+ val.hi = hi;
+ val.lo = lo;
+ return val;
+#endif
}
/*
- * Add the 128-bit product of two int64 values into an INT128 variable.
- *
- * XXX with a stupid compiler, this could actually be less efficient than
- * the other implementation; maybe we should do it by hand always?
+ * Add an unsigned int64 value into an INT128 variable.
*/
static inline void
-int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
+int128_add_uint64(INT128 *i128, uint64 v)
{
- *i128 += (int128) x * (int128) y;
-}
+#if USE_NATIVE_INT128
+ *i128 += v;
+#else
+ /*
+ * First add the value to the .lo part, then check to see if a carry needs
+ * to be propagated into the .hi part. Since this is unsigned integer
+ * arithmetic, which is just modular arithmetic, a carry is needed if the
+ * new .lo part is less than the old .lo part (i.e., if modular
+ * wrap-around occurred). Writing this in the form below, rather than
+ * using an "if" statement causes modern compilers to produce branchless
+ * machine code identical to the native code.
+ */
+ uint64 oldlo = i128->lo;
-/*
- * Compare two INT128 values, return -1, 0, or +1.
- */
-static inline int
-int128_compare(INT128 x, INT128 y)
-{
- if (x < y)
- return -1;
- if (x > y)
- return 1;
- return 0;
+ i128->lo += v;
+ i128->hi += (i128->lo < oldlo);
+#endif
}
/*
- * Widen int64 to INT128.
+ * Add a signed int64 value into an INT128 variable.
*/
-static inline INT128
-int64_to_int128(int64 v)
+static inline void
+int128_add_int64(INT128 *i128, int64 v)
{
- return (INT128) v;
-}
+#if USE_NATIVE_INT128
+ *i128 += v;
+#else
+ /*
+ * This is much like the above except that the carry logic differs for
+ * negative v -- we need to subtract 1 from the .hi part if the new .lo
+ * value is greater than the old .lo value. That can be achieved without
+ * any branching by adding the sign bit from v (v >> 63 = 0 or -1) to the
+ * previous result (for negative v, if the new .lo value is less than the
+ * old .lo value, the two terms cancel and we leave the .hi part
+ * unchanged, otherwise we subtract 1 from the .hi part). With modern
+ * compilers this often produces machine code identical to the native
+ * code.
+ */
+ uint64 oldlo = i128->lo;
-/*
- * Convert INT128 to int64 (losing any high-order bits).
- * This also works fine for casting down to uint64.
- */
-static inline int64
-int128_to_int64(INT128 val)
-{
- return (int64) val;
+ i128->lo += v;
+ i128->hi += (i128->lo < oldlo) + (v >> 63);
+#endif
}
-#else /* !USE_NATIVE_INT128 */
-
/*
- * We lay out the INT128 structure with the same content and byte ordering
- * that a native int128 type would (probably) have. This makes no difference
- * for ordinary use of INT128, but allows union'ing INT128 with int128 for
- * testing purposes.
+ * Add an INT128 value into an INT128 variable.
*/
-typedef struct
+static inline void
+int128_add_int128(INT128 *i128, INT128 v)
{
-#ifdef WORDS_BIGENDIAN
- int64 hi; /* most significant 64 bits, including sign */
- uint64 lo; /* least significant 64 bits, without sign */
+#if USE_NATIVE_INT128
+ *i128 += v;
#else
- uint64 lo; /* least significant 64 bits, without sign */
- int64 hi; /* most significant 64 bits, including sign */
+ int128_add_uint64(i128, v.lo);
+ i128->hi += v.hi;
#endif
-} INT128;
+}
/*
- * Add an unsigned int64 value into an INT128 variable.
+ * Subtract an unsigned int64 value from an INT128 variable.
*/
static inline void
-int128_add_uint64(INT128 *i128, uint64 v)
+int128_sub_uint64(INT128 *i128, uint64 v)
{
+#if USE_NATIVE_INT128
+ *i128 -= v;
+#else
/*
- * First add the value to the .lo part, then check to see if a carry needs
- * to be propagated into the .hi part. A carry is needed if both inputs
- * have high bits set, or if just one input has high bit set while the new
- * .lo part doesn't. Remember that .lo part is unsigned; we cast to
- * signed here just as a cheap way to check the high bit.
+ * This is like int128_add_uint64(), except we must propagate a borrow to
+ * (subtract 1 from) the .hi part if the new .lo part is greater than the
+ * old .lo part.
*/
uint64 oldlo = i128->lo;
- i128->lo += v;
- if (((int64) v < 0 && (int64) oldlo < 0) ||
- (((int64) v < 0 || (int64) oldlo < 0) && (int64) i128->lo >= 0))
- i128->hi++;
+ i128->lo -= v;
+ i128->hi -= (i128->lo > oldlo);
+#endif
}
/*
- * Add a signed int64 value into an INT128 variable.
+ * Subtract a signed int64 value from an INT128 variable.
*/
static inline void
-int128_add_int64(INT128 *i128, int64 v)
+int128_sub_int64(INT128 *i128, int64 v)
{
- /*
- * This is much like the above except that the carry logic differs for
- * negative v. Ordinarily we'd need to subtract 1 from the .hi part
- * (corresponding to adding the sign-extended bits of v to it); but if
- * there is a carry out of the .lo part, that cancels and we do nothing.
- */
+#if USE_NATIVE_INT128
+ *i128 -= v;
+#else
+ /* Like int128_add_int64() with the sign of v inverted */
uint64 oldlo = i128->lo;
- i128->lo += v;
- if (v >= 0)
- {
- if ((int64) oldlo < 0 && (int64) i128->lo >= 0)
- i128->hi++;
- }
- else
- {
- if (!((int64) oldlo < 0 || (int64) i128->lo >= 0))
- i128->hi--;
- }
+ i128->lo -= v;
+ i128->hi -= (i128->lo > oldlo) + (v >> 63);
+#endif
}
/*
- * INT64_AU32 extracts the most significant 32 bits of int64 as int64, while
- * INT64_AL32 extracts the least significant 32 bits as uint64.
+ * INT64_HI_INT32 extracts the most significant 32 bits of int64 as int32.
+ * INT64_LO_UINT32 extracts the least significant 32 bits as uint32.
*/
-#define INT64_AU32(i64) ((i64) >> 32)
-#define INT64_AL32(i64) ((i64) & UINT64CONST(0xFFFFFFFF))
+#define INT64_HI_INT32(i64) ((int32) ((i64) >> 32))
+#define INT64_LO_UINT32(i64) ((uint32) (i64))
/*
* Add the 128-bit product of two int64 values into an INT128 variable.
@@ -176,7 +202,14 @@ int128_add_int64(INT128 *i128, int64 v)
static inline void
int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
{
- /* INT64_AU32 must use arithmetic right shift */
+#if USE_NATIVE_INT128
+ /*
+ * XXX with a stupid compiler, this could actually be less efficient than
+ * the non-native implementation; maybe we should do it by hand always?
+ */
+ *i128 += (int128) x * (int128) y;
+#else
+ /* INT64_HI_INT32 must use arithmetic right shift */
StaticAssertDecl(((int64) -1 >> 1) == (int64) -1,
"arithmetic right shift is needed");
@@ -201,34 +234,188 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
/* No need to work hard if product must be zero */
if (x != 0 && y != 0)
{
- int64 x_u32 = INT64_AU32(x);
- uint64 x_l32 = INT64_AL32(x);
- int64 y_u32 = INT64_AU32(y);
- uint64 y_l32 = INT64_AL32(y);
+ int32 x_hi = INT64_HI_INT32(x);
+ uint32 x_lo = INT64_LO_UINT32(x);
+ int32 y_hi = INT64_HI_INT32(y);
+ uint32 y_lo = INT64_LO_UINT32(y);
int64 tmp;
/* the first term */
- i128->hi += x_u32 * y_u32;
-
- /* the second term: sign-extend it only if x is negative */
- tmp = x_u32 * y_l32;
- if (x < 0)
- i128->hi += INT64_AU32(tmp);
- else
- i128->hi += ((uint64) tmp) >> 32;
- int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32);
-
- /* the third term: sign-extend it only if y is negative */
- tmp = x_l32 * y_u32;
- if (y < 0)
- i128->hi += INT64_AU32(tmp);
- else
- i128->hi += ((uint64) tmp) >> 32;
- int128_add_uint64(i128, ((uint64) INT64_AL32(tmp)) << 32);
+ i128->hi += (int64) x_hi * (int64) y_hi;
+
+ /* the second term: sign-extended with the sign of x */
+ tmp = (int64) x_hi * (int64) y_lo;
+ i128->hi += INT64_HI_INT32(tmp);
+ int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the third term: sign-extended with the sign of y */
+ tmp = (int64) x_lo * (int64) y_hi;
+ i128->hi += INT64_HI_INT32(tmp);
+ int128_add_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
/* the fourth term: always unsigned */
- int128_add_uint64(i128, x_l32 * y_l32);
+ int128_add_uint64(i128, (uint64) x_lo * (uint64) y_lo);
}
+#endif
+}
+
+/*
+ * Subtract the 128-bit product of two int64 values from an INT128 variable.
+ */
+static inline void
+int128_sub_int64_mul_int64(INT128 *i128, int64 x, int64 y)
+{
+#if USE_NATIVE_INT128
+ *i128 -= (int128) x * (int128) y;
+#else
+ /* As above, except subtract the 128-bit product */
+ if (x != 0 && y != 0)
+ {
+ int32 x_hi = INT64_HI_INT32(x);
+ uint32 x_lo = INT64_LO_UINT32(x);
+ int32 y_hi = INT64_HI_INT32(y);
+ uint32 y_lo = INT64_LO_UINT32(y);
+ int64 tmp;
+
+ /* the first term */
+ i128->hi -= (int64) x_hi * (int64) y_hi;
+
+ /* the second term: sign-extended with the sign of x */
+ tmp = (int64) x_hi * (int64) y_lo;
+ i128->hi -= INT64_HI_INT32(tmp);
+ int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the third term: sign-extended with the sign of y */
+ tmp = (int64) x_lo * (int64) y_hi;
+ i128->hi -= INT64_HI_INT32(tmp);
+ int128_sub_uint64(i128, ((uint64) INT64_LO_UINT32(tmp)) << 32);
+
+ /* the fourth term: always unsigned */
+ int128_sub_uint64(i128, (uint64) x_lo * (uint64) y_lo);
+ }
+#endif
+}
+
+/*
+ * Divide an INT128 variable by a signed int32 value, returning the quotient
+ * and remainder. The remainder will have the same sign as *i128.
+ *
+ * Note: This provides no protection against dividing by 0, or dividing
+ * INT128_MIN by -1, which overflows. It is the caller's responsibility to
+ * guard against those.
+ */
+static inline void
+int128_div_mod_int32(INT128 *i128, int32 v, int32 *remainder)
+{
+#if USE_NATIVE_INT128
+ int128 old_i128 = *i128;
+
+ *i128 /= v;
+ *remainder = (int32) (old_i128 - *i128 * v);
+#else
+ /*
+ * To avoid any intermediate values overflowing (as happens if INT64_MIN
+ * is divided by -1), we first compute the quotient abs(*i128) / abs(v)
+ * using unsigned 64-bit arithmetic, and then fix the signs up at the end.
+ *
+ * The quotient is computed using the short division algorithm described
+ * in Knuth volume 2, section 4.3.1 exercise 16 (cf. div_var_int() in
+ * numeric.c). Since the absolute value of the divisor is known to be at
+ * most 2^31, the remainder carried from one digit to the next is at most
+ * 2^31 - 1, and so there is no danger of overflow when this is combined
+ * with the next digit (a 32-bit unsigned integer).
+ */
+ uint64 n_hi;
+ uint64 n_lo;
+ uint32 d;
+ uint64 q;
+ uint64 r;
+ uint64 tmp;
+
+ /* numerator: absolute value of *i128 */
+ if (i128->hi < 0)
+ {
+ n_hi = 0 - ((uint64) i128->hi);
+ n_lo = 0 - i128->lo;
+ if (n_lo != 0)
+ n_hi--;
+ }
+ else
+ {
+ n_hi = i128->hi;
+ n_lo = i128->lo;
+ }
+
+ /* denomimator: absolute value of v */
+ d = abs(v);
+
+ /* quotient and remainder of high 64 bits */
+ q = n_hi / d;
+ r = n_hi % d;
+ n_hi = q;
+
+ /* quotient and remainder of next 32 bits (upper half of n_lo) */
+ tmp = (r << 32) + (n_lo >> 32);
+ q = tmp / d;
+ r = tmp % d;
+
+ /* quotient and remainder of last 32 bits (lower half of n_lo) */
+ tmp = (r << 32) + (uint32) n_lo;
+ n_lo = q << 32;
+ q = tmp / d;
+ r = tmp % d;
+ n_lo += q;
+
+ /* final remainder should have the same sign as *i128 */
+ *remainder = i128->hi < 0 ? (int32) (0 - r) : (int32) r;
+
+ /* store the quotient in *i128, negating it if necessary */
+ if ((i128->hi < 0) != (v < 0))
+ {
+ n_hi = 0 - n_hi;
+ n_lo = 0 - n_lo;
+ if (n_lo != 0)
+ n_hi--;
+ }
+ i128->hi = (int64) n_hi;
+ i128->lo = n_lo;
+#endif
+}
+
+/*
+ * Test if an INT128 value is zero.
+ */
+static inline bool
+int128_is_zero(INT128 x)
+{
+#if USE_NATIVE_INT128
+ return x == 0;
+#else
+ return x.hi == 0 && x.lo == 0;
+#endif
+}
+
+/*
+ * Return the sign of an INT128 value (returns -1, 0, or +1).
+ */
+static inline int
+int128_sign(INT128 x)
+{
+#if USE_NATIVE_INT128
+ if (x < 0)
+ return -1;
+ if (x > 0)
+ return 1;
+ return 0;
+#else
+ if (x.hi < 0)
+ return -1;
+ if (x.hi > 0)
+ return 1;
+ if (x.lo > 0)
+ return 1;
+ return 0;
+#endif
}
/*
@@ -237,6 +424,13 @@ int128_add_int64_mul_int64(INT128 *i128, int64 x, int64 y)
static inline int
int128_compare(INT128 x, INT128 y)
{
+#if USE_NATIVE_INT128
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+ return 0;
+#else
if (x.hi < y.hi)
return -1;
if (x.hi > y.hi)
@@ -246,6 +440,7 @@ int128_compare(INT128 x, INT128 y)
if (x.lo > y.lo)
return 1;
return 0;
+#endif
}
/*
@@ -254,11 +449,15 @@ int128_compare(INT128 x, INT128 y)
static inline INT128
int64_to_int128(int64 v)
{
+#if USE_NATIVE_INT128
+ return (INT128) v;
+#else
INT128 val;
val.lo = (uint64) v;
val.hi = (v < 0) ? -INT64CONST(1) : INT64CONST(0);
return val;
+#endif
}
/*
@@ -268,9 +467,11 @@ int64_to_int128(int64 v)
static inline int64
int128_to_int64(INT128 val)
{
+#if USE_NATIVE_INT128
+ return (int64) val;
+#else
return (int64) val.lo;
+#endif
}
-#endif /* USE_NATIVE_INT128 */
-
#endif /* INT128_H */
diff --git a/src/include/libpq/protocol.h b/src/include/libpq/protocol.h
index b0bcb3cdc26..c64e628628d 100644
--- a/src/include/libpq/protocol.h
+++ b/src/include/libpq/protocol.h
@@ -69,6 +69,27 @@
#define PqMsg_Progress 'P'
+/* Replication codes sent by the primary (wrapped in CopyData messages). */
+
+#define PqReplMsg_Keepalive 'k'
+#define PqReplMsg_PrimaryStatusUpdate 's'
+#define PqReplMsg_WALData 'w'
+
+
+/* Replication codes sent by the standby (wrapped in CopyData messages). */
+
+#define PqReplMsg_HotStandbyFeedback 'h'
+#define PqReplMsg_PrimaryStatusRequest 'p'
+#define PqReplMsg_StandbyStatusUpdate 'r'
+
+
+/* Codes used for backups via COPY OUT (wrapped in CopyData messages). */
+
+#define PqBackupMsg_Manifest 'm'
+#define PqBackupMsg_NewArchive 'n'
+#define PqBackupMsg_ProgressReport 'p'
+
+
/* These are the authentication request codes sent by the backend. */
#define AUTH_REQ_OK 0 /* User is authenticated */
diff --git a/src/include/optimizer/plancat.h b/src/include/optimizer/plancat.h
index d6f6f4ad2d7..dd8f2cd157f 100644
--- a/src/include/optimizer/plancat.h
+++ b/src/include/optimizer/plancat.h
@@ -76,6 +76,8 @@ extern double get_function_rows(PlannerInfo *root, Oid funcid, Node *node);
extern bool has_row_triggers(PlannerInfo *root, Index rti, CmdType event);
+extern bool has_transition_tables(PlannerInfo *root, Index rti, CmdType event);
+
extern bool has_stored_generated_columns(PlannerInfo *root, Index rti);
extern Bitmapset *get_dependent_generated_columns(PlannerInfo *root, Index rti,
diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h
index 931f5b3b880..2b072cafb4d 100644
--- a/src/include/utils/pg_locale.h
+++ b/src/include/utils/pg_locale.h
@@ -18,6 +18,8 @@
/* only include the C APIs, to avoid errors in cpluspluscheck */
#undef U_SHOW_CPLUSPLUS_API
#define U_SHOW_CPLUSPLUS_API 0
+#undef U_SHOW_CPLUSPLUS_HEADER_API
+#define U_SHOW_CPLUSPLUS_HEADER_API 0
#include <unicode/ucol.h>
#endif
diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c
index e829d722f22..ca11a81f1bc 100644
--- a/src/interfaces/ecpg/compatlib/informix.c
+++ b/src/interfaces/ecpg/compatlib/informix.c
@@ -807,8 +807,10 @@ rfmtlong(long lng_val, const char *fmt, char *outbuf)
if (strchr(fmt, (int) '(') && strchr(fmt, (int) ')'))
brackets_ok = 1;
- /* get position of the right-most dot in the format-string */
- /* and fill the temp-string wit '0's up to there. */
+ /*
+ * get position of the right-most dot in the format-string and fill the
+ * temp-string with '0's up to there.
+ */
dotpos = getRightMostDot(fmt);
/* start to parse the format-string */
diff --git a/src/interfaces/libpq-oauth/oauth-curl.c b/src/interfaces/libpq-oauth/oauth-curl.c
index dba9a684fa8..aa50b00d053 100644
--- a/src/interfaces/libpq-oauth/oauth-curl.c
+++ b/src/interfaces/libpq-oauth/oauth-curl.c
@@ -278,6 +278,7 @@ struct async_ctx
bool user_prompted; /* have we already sent the authz prompt? */
bool used_basic_auth; /* did we send a client secret? */
bool debugging; /* can we give unsafe developer assistance? */
+ int dbg_num_calls; /* (debug mode) how many times were we called? */
};
/*
@@ -1291,22 +1292,31 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
return 0;
#elif defined(HAVE_SYS_EVENT_H)
- struct kevent ev[2] = {0};
+ struct kevent ev[2];
struct kevent ev_out[2];
struct timespec timeout = {0};
int nev = 0;
int res;
+ /*
+ * We don't know which of the events is currently registered, perhaps
+ * both, so we always try to remove unneeded events. This means we need to
+ * tolerate ENOENT below.
+ */
switch (what)
{
case CURL_POLL_IN:
EV_SET(&ev[nev], socket, EVFILT_READ, EV_ADD | EV_RECEIPT, 0, 0, 0);
nev++;
+ EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
break;
case CURL_POLL_OUT:
EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_ADD | EV_RECEIPT, 0, 0, 0);
nev++;
+ EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0);
+ nev++;
break;
case CURL_POLL_INOUT:
@@ -1317,12 +1327,6 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
break;
case CURL_POLL_REMOVE:
-
- /*
- * We don't know which of these is currently registered, perhaps
- * both, so we try to remove both. This means we need to tolerate
- * ENOENT below.
- */
EV_SET(&ev[nev], socket, EVFILT_READ, EV_DELETE | EV_RECEIPT, 0, 0, 0);
nev++;
EV_SET(&ev[nev], socket, EVFILT_WRITE, EV_DELETE | EV_RECEIPT, 0, 0, 0);
@@ -1334,7 +1338,10 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
return -1;
}
- res = kevent(actx->mux, ev, nev, ev_out, lengthof(ev_out), &timeout);
+ Assert(nev <= lengthof(ev));
+ Assert(nev <= lengthof(ev_out));
+
+ res = kevent(actx->mux, ev, nev, ev_out, nev, &timeout);
if (res < 0)
{
actx_error(actx, "could not modify kqueue: %m");
@@ -1377,6 +1384,53 @@ register_socket(CURL *curl, curl_socket_t socket, int what, void *ctx,
}
/*
+ * If there is no work to do on any of the descriptors in the multiplexer, then
+ * this function must ensure that the multiplexer is not readable.
+ *
+ * Unlike epoll descriptors, kqueue descriptors only transition from readable to
+ * unreadable when kevent() is called and finds nothing, after removing
+ * level-triggered conditions that have gone away. We therefore need a dummy
+ * kevent() call after operations might have been performed on the monitored
+ * sockets or timer_fd. Any event returned is ignored here, but it also remains
+ * queued (being level-triggered) and leaves the descriptor readable. This is a
+ * no-op for epoll descriptors.
+ */
+static bool
+comb_multiplexer(struct async_ctx *actx)
+{
+#if defined(HAVE_SYS_EPOLL_H)
+ /* The epoll implementation doesn't hold onto stale events. */
+ return true;
+#elif defined(HAVE_SYS_EVENT_H)
+ struct timespec timeout = {0};
+ struct kevent ev;
+
+ /*
+ * Try to read a single pending event. We can actually ignore the result:
+ * either we found an event to process, in which case the multiplexer is
+ * correctly readable for that event at minimum, and it doesn't matter if
+ * there are any stale events; or we didn't find any, in which case the
+ * kernel will have discarded any stale events as it traveled to the end
+ * of the queue.
+ *
+ * Note that this depends on our registrations being level-triggered --
+ * even the timer, so we use a chained kqueue for that instead of an
+ * EVFILT_TIMER on the top-level mux. If we used edge-triggered events,
+ * this call would improperly discard them.
+ */
+ if (kevent(actx->mux, NULL, 0, &ev, 1, &timeout) < 0)
+ {
+ actx_error(actx, "could not comb kqueue: %m");
+ return false;
+ }
+
+ return true;
+#else
+#error comb_multiplexer is not implemented on this platform
+#endif
+}
+
+/*
* Enables or disables the timer in the multiplexer set. The timeout value is
* in milliseconds (negative values disable the timer).
*
@@ -1483,40 +1537,20 @@ set_timer(struct async_ctx *actx, long timeout)
/*
* Returns 1 if the timeout in the multiplexer set has expired since the last
- * call to set_timer(), 0 if the timer is still running, or -1 (with an
- * actx_error() report) if the timer cannot be queried.
+ * call to set_timer(), 0 if the timer is either still running or disarmed, or
+ * -1 (with an actx_error() report) if the timer cannot be queried.
*/
static int
timer_expired(struct async_ctx *actx)
{
-#if defined(HAVE_SYS_EPOLL_H)
- struct itimerspec spec = {0};
-
- if (timerfd_gettime(actx->timerfd, &spec) < 0)
- {
- actx_error(actx, "getting timerfd value: %m");
- return -1;
- }
-
- /*
- * This implementation assumes we're using single-shot timers. If you
- * change to using intervals, you'll need to reimplement this function
- * too, possibly with the read() or select() interfaces for timerfd.
- */
- Assert(spec.it_interval.tv_sec == 0
- && spec.it_interval.tv_nsec == 0);
-
- /* If the remaining time to expiration is zero, we're done. */
- return (spec.it_value.tv_sec == 0
- && spec.it_value.tv_nsec == 0);
-#elif defined(HAVE_SYS_EVENT_H)
+#if defined(HAVE_SYS_EPOLL_H) || defined(HAVE_SYS_EVENT_H)
int res;
- /* Is the timer queue ready? */
+ /* Is the timer ready? */
res = PQsocketPoll(actx->timerfd, 1 /* forRead */ , 0, 0);
if (res < 0)
{
- actx_error(actx, "checking kqueue for timeout: %m");
+ actx_error(actx, "checking timer expiration: %m");
return -1;
}
@@ -1549,6 +1583,36 @@ register_timer(CURLM *curlm, long timeout, void *ctx)
}
/*
+ * Removes any expired-timer event from the multiplexer. If was_expired is not
+ * NULL, it will contain whether or not the timer was expired at time of call.
+ */
+static bool
+drain_timer_events(struct async_ctx *actx, bool *was_expired)
+{
+ int res;
+
+ res = timer_expired(actx);
+ if (res < 0)
+ return false;
+
+ if (res > 0)
+ {
+ /*
+ * Timer is expired. We could drain the event manually from the
+ * timerfd, but it's easier to simply disable it; that keeps the
+ * platform-specific code in set_timer().
+ */
+ if (!set_timer(actx, -1))
+ return false;
+ }
+
+ if (was_expired)
+ *was_expired = (res > 0);
+
+ return true;
+}
+
+/*
* Prints Curl request debugging information to stderr.
*
* Note that this will expose a number of critical secrets, so users have to opt
@@ -2751,38 +2815,64 @@ pg_fe_run_oauth_flow_impl(PGconn *conn)
{
PostgresPollingStatusType status;
+ /*
+ * Clear any expired timeout before calling back into
+ * Curl. Curl is not guaranteed to do this for us, because
+ * its API expects us to use single-shot (i.e.
+ * edge-triggered) timeouts, and ours are level-triggered
+ * via the mux.
+ *
+ * This can't be combined with the comb_multiplexer() call
+ * below: we might accidentally clear a short timeout that
+ * was both set and expired during the call to
+ * drive_request().
+ */
+ if (!drain_timer_events(actx, NULL))
+ goto error_return;
+
+ /* Move the request forward. */
status = drive_request(actx);
if (status == PGRES_POLLING_FAILED)
goto error_return;
- else if (status != PGRES_POLLING_OK)
- {
- /* not done yet */
- return status;
- }
+ else if (status == PGRES_POLLING_OK)
+ break; /* done! */
- break;
+ /*
+ * This request is still running.
+ *
+ * Make sure that stale events don't cause us to come back
+ * early. (Currently, this can occur only with kqueue.) If
+ * this is forgotten, the multiplexer can get stuck in a
+ * signaled state and we'll burn CPU cycles pointlessly.
+ */
+ if (!comb_multiplexer(actx))
+ goto error_return;
+
+ return status;
}
case OAUTH_STEP_WAIT_INTERVAL:
-
- /*
- * The client application is supposed to wait until our timer
- * expires before calling PQconnectPoll() again, but that
- * might not happen. To avoid sending a token request early,
- * check the timer before continuing.
- */
- if (!timer_expired(actx))
{
- set_conn_altsock(conn, actx->timerfd);
- return PGRES_POLLING_READING;
- }
+ bool expired;
- /* Disable the expired timer. */
- if (!set_timer(actx, -1))
- goto error_return;
+ /*
+ * The client application is supposed to wait until our
+ * timer expires before calling PQconnectPoll() again, but
+ * that might not happen. To avoid sending a token request
+ * early, check the timer before continuing.
+ */
+ if (!drain_timer_events(actx, &expired))
+ goto error_return;
- break;
+ if (!expired)
+ {
+ set_conn_altsock(conn, actx->timerfd);
+ return PGRES_POLLING_READING;
+ }
+
+ break;
+ }
}
/*
@@ -2932,6 +3022,8 @@ PostgresPollingStatusType
pg_fe_run_oauth_flow(PGconn *conn)
{
PostgresPollingStatusType result;
+ fe_oauth_state *state = conn_sasl_state(conn);
+ struct async_ctx *actx;
#ifndef WIN32
sigset_t osigset;
bool sigpipe_pending;
@@ -2960,6 +3052,25 @@ pg_fe_run_oauth_flow(PGconn *conn)
result = pg_fe_run_oauth_flow_impl(conn);
+ /*
+ * To assist with finding bugs in comb_multiplexer() and
+ * drain_timer_events(), when we're in debug mode, track the total number
+ * of calls to this function and print that at the end of the flow.
+ *
+ * Be careful that state->async_ctx could be NULL if early initialization
+ * fails during the first call.
+ */
+ actx = state->async_ctx;
+ Assert(actx || result == PGRES_POLLING_FAILED);
+
+ if (actx && actx->debugging)
+ {
+ actx->dbg_num_calls++;
+ if (result == PGRES_POLLING_OK || result == PGRES_POLLING_FAILED)
+ fprintf(stderr, "[libpq] total number of polls: %d\n",
+ actx->dbg_num_calls);
+ }
+
#ifndef WIN32
if (masked)
{
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 29cb4d7e47f..73ba1748fe0 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -1453,7 +1453,7 @@ plperl_sv_to_literal(SV *sv, char *fqtypename)
check_spi_usage_allowed();
- typid = DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename));
+ typid = DatumGetObjectId(DirectFunctionCall1(regtypein, CStringGetDatum(fqtypename)));
if (!OidIsValid(typid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
@@ -2569,13 +2569,13 @@ plperl_trigger_handler(PG_FUNCTION_ARGS)
TriggerData *trigdata = ((TriggerData *) fcinfo->context);
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_newtuple;
+ retval = PointerGetDatum(trigdata->tg_newtuple);
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else if (TRIGGER_FIRED_BY_TRUNCATE(trigdata->tg_event))
- retval = (Datum) trigdata->tg_trigtuple;
+ retval = PointerGetDatum(trigdata->tg_trigtuple);
else
retval = (Datum) 0; /* can this happen? */
}
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index 7d3d3d52b45..903a8ac151a 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -25,6 +25,7 @@ SUBDIRS = \
test_escape \
test_extensions \
test_ginpostinglist \
+ test_int128 \
test_integerset \
test_json_parser \
test_lfind \
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index dd5cd065ba1..93be0f57289 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -24,6 +24,7 @@ subdir('test_dsm_registry')
subdir('test_escape')
subdir('test_extensions')
subdir('test_ginpostinglist')
+subdir('test_int128')
subdir('test_integerset')
subdir('test_json_parser')
subdir('test_lfind')
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 41672ebd5c6..c0dafb8be76 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -418,6 +418,35 @@ $node->connect_fails(
qr/failed to obtain access token: mutual TLS required for client \(invalid_client\)/
);
+# Count the number of calls to the internal flow when multiple retries are
+# triggered. The exact number depends on many things -- the TCP stack, the
+# version of Curl in use, random chance -- but a ridiculously high number
+# suggests something is wrong with our ability to clear multiplexer events after
+# they're no longer applicable.
+my ($ret, $stdout, $stderr) = $node->psql(
+ 'postgres',
+ "SELECT 'connected for call count'",
+ extra_params => ['-w'],
+ connstr => connstr(stage => 'token', retries => 2),
+ on_error_stop => 0);
+
+is($ret, 0, "call count connection succeeds");
+like(
+ $stderr,
+ qr@Visit https://example\.com/ and enter the code: postgresuser@,
+ "call count: stderr matches");
+
+my $count_pattern = qr/\[libpq\] total number of polls: (\d+)/;
+if (like($stderr, $count_pattern, "call count: count is printed"))
+{
+ # For reference, a typical flow with two retries might take between 5-15
+ # calls to the client implementation. And while this will probably continue
+ # to change across OSes and Curl updates, we're likely in trouble if we see
+ # hundreds or thousands of calls.
+ $stderr =~ $count_pattern;
+ cmp_ok($1, '<', 100, "call count is reasonably small");
+}
+
# Stress test: make sure our builtin flow operates correctly even if the client
# application isn't respecting PGRES_POLLING_READING/WRITING signals returned
# from PQconnectPoll().
@@ -428,7 +457,7 @@ my @cmd = (
connstr(stage => 'all', retries => 1, interval => 1));
note "running '" . join("' '", @cmd) . "'";
-my ($stdout, $stderr) = run_command(\@cmd);
+($stdout, $stderr) = run_command(\@cmd);
like($stdout, qr/connection succeeded/, "stress-async: stdout matches");
unlike(
diff --git a/src/test/modules/test_int128/.gitignore b/src/test/modules/test_int128/.gitignore
new file mode 100644
index 00000000000..277fec6ed2c
--- /dev/null
+++ b/src/test/modules/test_int128/.gitignore
@@ -0,0 +1,2 @@
+/tmp_check/
+/test_int128
diff --git a/src/test/modules/test_int128/Makefile b/src/test/modules/test_int128/Makefile
new file mode 100644
index 00000000000..2e86ee93a9d
--- /dev/null
+++ b/src/test/modules/test_int128/Makefile
@@ -0,0 +1,23 @@
+# src/test/modules/test_int128/Makefile
+
+PGFILEDESC = "test_int128 - test 128-bit integer arithmetic"
+
+PROGRAM = test_int128
+OBJS = $(WIN32RES) test_int128.o
+
+PG_CPPFLAGS = -I$(libpq_srcdir)
+PG_LIBS_INTERNAL += $(libpq_pgport)
+
+NO_INSTALL = 1
+TAP_TESTS = 1
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_int128
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_int128/meson.build b/src/test/modules/test_int128/meson.build
new file mode 100644
index 00000000000..4c2be7a0326
--- /dev/null
+++ b/src/test/modules/test_int128/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_int128_sources = files(
+ 'test_int128.c',
+)
+
+if host_system == 'windows'
+ test_int128_sources += rc_bin_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_int128',
+ '--FILEDESC', 'test int128 program',])
+endif
+
+test_int128 = executable('test_int128',
+ test_int128_sources,
+ dependencies: [frontend_code, libpq],
+ kwargs: default_bin_args + {
+ 'install': false,
+ },
+)
+testprep_targets += test_int128
+
+
+tests += {
+ 'name': 'test_int128',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'tap': {
+ 'tests': [
+ 't/001_test_int128.pl',
+ ],
+ 'deps': [test_int128],
+ },
+}
diff --git a/src/test/modules/test_int128/t/001_test_int128.pl b/src/test/modules/test_int128/t/001_test_int128.pl
new file mode 100644
index 00000000000..0c683869f34
--- /dev/null
+++ b/src/test/modules/test_int128/t/001_test_int128.pl
@@ -0,0 +1,27 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+# Test 128-bit integer arithmetic code in int128.h
+
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Utils;
+use Test::More;
+
+# Run the test program with 1M iterations
+my $exe = "test_int128";
+my $size = 1_000_000;
+
+note "testing executable $exe";
+
+my ($stdout, $stderr) = run_command([ $exe, $size ]);
+
+SKIP:
+{
+ skip "no native int128 type", 2 if $stdout =~ /skipping tests/;
+
+ is($stdout, "", "test_int128: no stdout");
+ is($stderr, "", "test_int128: no stderr");
+}
+
+done_testing();
diff --git a/src/test/modules/test_int128/test_int128.c b/src/test/modules/test_int128/test_int128.c
new file mode 100644
index 00000000000..c9c17a73a4e
--- /dev/null
+++ b/src/test/modules/test_int128/test_int128.c
@@ -0,0 +1,281 @@
+/*-------------------------------------------------------------------------
+ *
+ * test_int128.c
+ * Testbed for roll-our-own 128-bit integer arithmetic.
+ *
+ * This is a standalone test program that compares the behavior of an
+ * implementation in int128.h to an (assumed correct) int128 native type.
+ *
+ * Copyright (c) 2017-2025, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_int128/test_int128.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres_fe.h"
+
+#include <time.h>
+
+/* Require a native int128 type */
+#ifdef HAVE_INT128
+
+/*
+ * By default, we test the non-native implementation in int128.h; but
+ * by predefining USE_NATIVE_INT128 to 1, you can test the native
+ * implementation, just to be sure.
+ */
+#ifndef USE_NATIVE_INT128
+#define USE_NATIVE_INT128 0
+#endif
+
+#include "common/int128.h"
+#include "common/pg_prng.h"
+
+/*
+ * We assume the parts of this union are laid out compatibly.
+ */
+typedef union
+{
+ int128 i128;
+ INT128 I128;
+ struct
+ {
+#ifdef WORDS_BIGENDIAN
+ int64 hi;
+ uint64 lo;
+#else
+ uint64 lo;
+ int64 hi;
+#endif
+ } hl;
+} test128;
+
+#define INT128_HEX_FORMAT "%016" PRIx64 "%016" PRIx64
+
+/*
+ * Control version of comparator.
+ */
+static inline int
+my_int128_compare(int128 x, int128 y)
+{
+ if (x < y)
+ return -1;
+ if (x > y)
+ return 1;
+ return 0;
+}
+
+/*
+ * Main program.
+ *
+ * Generates a lot of random numbers and tests the implementation for each.
+ * The results should be reproducible, since we use a fixed PRNG seed.
+ *
+ * You can give a loop count if you don't like the default 1B iterations.
+ */
+int
+main(int argc, char **argv)
+{
+ long count;
+
+ pg_prng_seed(&pg_global_prng_state, (uint64) time(NULL));
+
+ if (argc >= 2)
+ count = strtol(argv[1], NULL, 0);
+ else
+ count = 1000000000;
+
+ while (count-- > 0)
+ {
+ int64 x = pg_prng_uint64(&pg_global_prng_state);
+ int64 y = pg_prng_uint64(&pg_global_prng_state);
+ int64 z = pg_prng_uint64(&pg_global_prng_state);
+ int64 w = pg_prng_uint64(&pg_global_prng_state);
+ int32 z32 = (int32) z;
+ test128 t1;
+ test128 t2;
+ test128 t3;
+ int32 r1;
+ int32 r2;
+
+ /* check unsigned addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) (uint64) z;
+ int128_add_uint64(&t2.I128, (uint64) z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + unsigned %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check signed addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) z;
+ int128_add_int64(&t2.I128, z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + signed %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 128-bit signed addition */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t3.hl.hi = z;
+ t3.hl.lo = w;
+ t1.i128 += t3.i128;
+ int128_add_int128(&t2.I128, t3.I128);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + " INT128_HEX_FORMAT "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check unsigned subtraction */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) (uint64) z;
+ int128_sub_uint64(&t2.I128, (uint64) z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - unsigned %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check signed subtraction */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) z;
+ int128_sub_int64(&t2.I128, z);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - signed %016" PRIx64 "\n", x, y, z);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 64x64-bit multiply-add */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 += (int128) z * (int128) w;
+ int128_add_int64_mul_int64(&t2.I128, z, w);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " + %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 64x64-bit multiply-subtract */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2 = t1;
+ t1.i128 -= (int128) z * (int128) w;
+ int128_sub_int64_mul_int64(&t2.I128, z, w);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " - %016" PRIx64 " * %016" PRIx64 "\n", x, y, z, w);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check 128/32-bit division */
+ t3.hl.hi = x;
+ t3.hl.lo = y;
+ t1.i128 = t3.i128 / z32;
+ r1 = (int32) (t3.i128 % z32);
+ t2 = t3;
+ int128_div_mod_int32(&t2.I128, z32, &r2);
+
+ if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
+ {
+ printf(INT128_HEX_FORMAT " / signed %08X\n", t3.hl.hi, t3.hl.lo, z32);
+ printf("native = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("result = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+ if (r1 != r2)
+ {
+ printf(INT128_HEX_FORMAT " %% signed %08X\n", t3.hl.hi, t3.hl.lo, z32);
+ printf("native = %08X\n", r1);
+ printf("result = %08X\n", r2);
+ return 1;
+ }
+
+ /* check comparison */
+ t1.hl.hi = x;
+ t1.hl.lo = y;
+ t2.hl.hi = z;
+ t2.hl.lo = w;
+
+ if (my_int128_compare(t1.i128, t2.i128) !=
+ int128_compare(t1.I128, t2.I128))
+ {
+ printf("comparison failure: %d vs %d\n",
+ my_int128_compare(t1.i128, t2.i128),
+ int128_compare(t1.I128, t2.I128));
+ printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+
+ /* check case with identical hi parts; above will hardly ever hit it */
+ t2.hl.hi = x;
+
+ if (my_int128_compare(t1.i128, t2.i128) !=
+ int128_compare(t1.I128, t2.I128))
+ {
+ printf("comparison failure: %d vs %d\n",
+ my_int128_compare(t1.i128, t2.i128),
+ int128_compare(t1.I128, t2.I128));
+ printf("arg1 = " INT128_HEX_FORMAT "\n", t1.hl.hi, t1.hl.lo);
+ printf("arg2 = " INT128_HEX_FORMAT "\n", t2.hl.hi, t2.hl.lo);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#else /* ! HAVE_INT128 */
+
+/*
+ * For now, do nothing if we don't have a native int128 type.
+ */
+int
+main(int argc, char **argv)
+{
+ printf("skipping tests: no native int128 type\n");
+ return 0;
+}
+
+#endif
diff --git a/src/test/modules/test_radixtree/test_radixtree.c b/src/test/modules/test_radixtree/test_radixtree.c
index 32de6a3123e..80ad0296164 100644
--- a/src/test/modules/test_radixtree/test_radixtree.c
+++ b/src/test/modules/test_radixtree/test_radixtree.c
@@ -44,7 +44,7 @@
uint64 _expected = (expected_expr); \
if (_result != _expected) \
elog(ERROR, \
- "%s yielded " UINT64_HEX_FORMAT ", expected " UINT64_HEX_FORMAT " (%s) in file \"%s\" line %u", \
+ "%s yielded %" PRIx64 ", expected %" PRIx64 " (%s) in file \"%s\" line %u", \
#result_expr, _result, _expected, #expected_expr, __FILE__, __LINE__); \
} while (0)
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 1f1ce2380af..7319945ffe3 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -680,6 +680,25 @@ SELECT sum2(q1,q2) FROM int8_tbl;
18271560493827981
(1 row)
+-- sanity checks
+SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl;
+ sum | ?column?
+-------------------+-------------------
+ 18271560493827981 | 18271560493827981
+(1 row)
+
+SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl;
+ sum | sum | ?column?
+------------------+-------------------+------------------
+ 9135780246913245 | -9135780246913245 | 9135780246913245
+(1 row)
+
+SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl;
+ sum | sum | ?column?
+----------------------+-----------------------+----------------------
+ 27407340740741226000 | -27407340740741226000 | 27407340740741226000
+(1 row)
+
-- test for outer-level aggregates
-- this should work
select ten, sum(distinct four) from onek a
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 1bfd33de3f3..ba302da51e7 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -2091,6 +2091,40 @@ SELECT c FROM toasttest;
(1 row)
DROP TABLE toasttest;
+-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header)
+-- being toasted.
+CREATE TABLE toasttest (f1 text, f2 text);
+ALTER TABLE toasttest SET (toast_tuple_target = 128);
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL;
+-- Here, the first value is a varlena large enough to make it toasted and
+-- stored uncompressed. The second value is a short varlena, toasted
+-- and stored uncompressed.
+INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30));
+SELECT reltoastrelid::regclass AS reltoastname FROM pg_class
+ WHERE oid = 'toasttest'::regclass \gset
+-- There should be two values inserted in the toast relation.
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+ count
+-------
+ 2
+(1 row)
+
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+ f1_data | f2_data
+------------+------------
+ 1234123412 | 5678567856
+(1 row)
+
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+ f1_comp | f2_comp
+---------+---------
+ |
+(1 row)
+
+DROP TABLE toasttest;
--
-- test length
--
diff --git a/src/test/regress/expected/timestamp.out b/src/test/regress/expected/timestamp.out
index 6aaa19c8f4e..14a9f5b56a6 100644
--- a/src/test/regress/expected/timestamp.out
+++ b/src/test/regress/expected/timestamp.out
@@ -591,6 +591,16 @@ SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc
Mon Feb 23 00:00:00 2004
(1 row)
+SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc;
+ inf_trunc
+-----------
+ infinity
+(1 row)
+
+SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+ERROR: unit "timezone" not supported for type timestamp without time zone
+SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc;
+ERROR: unit "timezone" not supported for type timestamp without time zone
SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc;
ERROR: unit "ago" not recognized for type timestamp without time zone
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/expected/timestamptz.out b/src/test/regress/expected/timestamptz.out
index 2a69953ff25..5dc8a621f6c 100644
--- a/src/test/regress/expected/timestamptz.out
+++ b/src/test/regress/expected/timestamptz.out
@@ -760,6 +760,16 @@ SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393'
Mon Feb 23 00:00:00 2004 PST
(1 row)
+SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc;
+ inf_trunc
+-----------
+ infinity
+(1 row)
+
+SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
+SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc;
ERROR: unit "ago" not recognized for type timestamp with time zone
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
@@ -780,6 +790,14 @@ SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET
Thu Feb 15 20:00:00 2001 PST
(1 row)
+SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc;
+ERROR: unit "timezone" not supported for type timestamp with time zone
+SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc;
+ inf_zone_trunc
+----------------
+ infinity
+(1 row)
+
SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc;
ERROR: unit "ago" not recognized for type timestamp with time zone
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index 872b9100e1a..1eb8fba0953 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2769,6 +2769,10 @@ NOTICE: trigger = child3_delete_trig, old table = (42,CCC)
-- copy into parent sees parent-format tuples
copy parent (a, b) from stdin;
NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42)
+-- check detach/reattach behavior; statement triggers with transition tables
+-- should not prevent a table from becoming a partition again
+alter table parent detach partition child1;
+alter table parent attach partition child1 for values in ('AAA');
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
@@ -2966,6 +2970,10 @@ NOTICE: trigger = parent_insert_trig, new table = (AAA,42), (BBB,42), (CCC,42)
create index on parent(b);
copy parent (a, b) from stdin;
NOTICE: trigger = parent_insert_trig, new table = (DDD,42)
+-- check disinherit/reinherit behavior; statement triggers with transition
+-- tables should not prevent a table from becoming an inheritance child again
+alter table child1 no inherit parent;
+alter table child1 inherit parent;
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index 3dbba069024..465ac148ac9 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -727,7 +727,7 @@ PG_FUNCTION_INFO_V1(is_catalog_text_unique_index_oid);
Datum
is_catalog_text_unique_index_oid(PG_FUNCTION_ARGS)
{
- return IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0));
+ return BoolGetDatum(IsCatalogTextUniqueIndexOid(PG_GETARG_OID(0)));
}
PG_FUNCTION_INFO_V1(test_support_func);
diff --git a/src/test/regress/sql/aggregates.sql b/src/test/regress/sql/aggregates.sql
index 277b4b198cc..dde85d0dfb2 100644
--- a/src/test/regress/sql/aggregates.sql
+++ b/src/test/regress/sql/aggregates.sql
@@ -182,6 +182,11 @@ SELECT newcnt(*) AS cnt_1000 FROM onek;
SELECT oldcnt(*) AS cnt_1000 FROM onek;
SELECT sum2(q1,q2) FROM int8_tbl;
+-- sanity checks
+SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl;
+SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl;
+SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl;
+
-- test for outer-level aggregates
-- this should work
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 92c445c2439..b94004cc08c 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -650,6 +650,26 @@ SELECT length(c), c::text FROM toasttest;
SELECT c FROM toasttest;
DROP TABLE toasttest;
+-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header)
+-- being toasted.
+CREATE TABLE toasttest (f1 text, f2 text);
+ALTER TABLE toasttest SET (toast_tuple_target = 128);
+ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL;
+ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL;
+-- Here, the first value is a varlena large enough to make it toasted and
+-- stored uncompressed. The second value is a short varlena, toasted
+-- and stored uncompressed.
+INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30));
+SELECT reltoastrelid::regclass AS reltoastname FROM pg_class
+ WHERE oid = 'toasttest'::regclass \gset
+-- There should be two values inserted in the toast relation.
+SELECT count(*) FROM :reltoastname WHERE chunk_seq = 0;
+SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data
+ FROM toasttest;
+SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp
+ FROM toasttest;
+DROP TABLE toasttest;
+
--
-- test length
--
diff --git a/src/test/regress/sql/timestamp.sql b/src/test/regress/sql/timestamp.sql
index 55f80530ea0..313757ed041 100644
--- a/src/test/regress/sql/timestamp.sql
+++ b/src/test/regress/sql/timestamp.sql
@@ -175,7 +175,9 @@ SELECT d1 - timestamp without time zone '1997-01-02' AS diff
FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc;
-
+SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc;
+SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc;
SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc;
-- verify date_bin behaves the same as date_trunc for relevant intervals
diff --git a/src/test/regress/sql/timestamptz.sql b/src/test/regress/sql/timestamptz.sql
index caca3123f13..6ace851d169 100644
--- a/src/test/regress/sql/timestamptz.sql
+++ b/src/test/regress/sql/timestamptz.sql
@@ -217,15 +217,18 @@ SELECT d1 - timestamp with time zone '1997-01-02' AS diff
FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01';
SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc;
+SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc;
+SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc;
+SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc;
SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc;
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation
SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation
+SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc;
+SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc;
SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc;
-
-
-- verify date_bin behaves the same as date_trunc for relevant intervals
SELECT
str,
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d674b25c83b..5f7f75d7ba5 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1935,6 +1935,11 @@ BBB 42
CCC 42
\.
+-- check detach/reattach behavior; statement triggers with transition tables
+-- should not prevent a table from becoming a partition again
+alter table parent detach partition child1;
+alter table parent attach partition child1 for values in ('AAA');
+
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
@@ -2154,6 +2159,11 @@ copy parent (a, b) from stdin;
DDD 42
\.
+-- check disinherit/reinherit behavior; statement triggers with transition
+-- tables should not prevent a table from becoming an inheritance child again
+alter table child1 no inherit parent;
+alter table child1 inherit parent;
+
-- DML affecting parent sees tuples collected from children even if
-- there is no transition table trigger on the children
drop trigger child1_insert_trig on child1;
diff --git a/src/tools/testint128.c b/src/tools/testint128.c
deleted file mode 100644
index a25631e277d..00000000000
--- a/src/tools/testint128.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * testint128.c
- * Testbed for roll-our-own 128-bit integer arithmetic.
- *
- * This is a standalone test program that compares the behavior of an
- * implementation in int128.h to an (assumed correct) int128 native type.
- *
- * Copyright (c) 2017-2025, PostgreSQL Global Development Group
- *
- *
- * IDENTIFICATION
- * src/tools/testint128.c
- *
- *-------------------------------------------------------------------------
- */
-
-#include "postgres_fe.h"
-
-/*
- * By default, we test the non-native implementation in int128.h; but
- * by predefining USE_NATIVE_INT128 to 1, you can test the native
- * implementation, just to be sure.
- */
-#ifndef USE_NATIVE_INT128
-#define USE_NATIVE_INT128 0
-#endif
-
-#include "common/int128.h"
-#include "common/pg_prng.h"
-
-/*
- * We assume the parts of this union are laid out compatibly.
- */
-typedef union
-{
- int128 i128;
- INT128 I128;
- union
- {
-#ifdef WORDS_BIGENDIAN
- int64 hi;
- uint64 lo;
-#else
- uint64 lo;
- int64 hi;
-#endif
- } hl;
-} test128;
-
-
-/*
- * Control version of comparator.
- */
-static inline int
-my_int128_compare(int128 x, int128 y)
-{
- if (x < y)
- return -1;
- if (x > y)
- return 1;
- return 0;
-}
-
-/*
- * Main program.
- *
- * Generates a lot of random numbers and tests the implementation for each.
- * The results should be reproducible, since we use a fixed PRNG seed.
- *
- * You can give a loop count if you don't like the default 1B iterations.
- */
-int
-main(int argc, char **argv)
-{
- long count;
-
- pg_prng_seed(&pg_global_prng_state, 0);
-
- if (argc >= 2)
- count = strtol(argv[1], NULL, 0);
- else
- count = 1000000000;
-
- while (count-- > 0)
- {
- int64 x = pg_prng_uint64(&pg_global_prng_state);
- int64 y = pg_prng_uint64(&pg_global_prng_state);
- int64 z = pg_prng_uint64(&pg_global_prng_state);
- test128 t1;
- test128 t2;
-
- /* check unsigned addition */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2 = t1;
- t1.i128 += (int128) (uint64) z;
- int128_add_uint64(&t2.I128, (uint64) z);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%016lX%016lX + unsigned %lX\n", x, y, z);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check signed addition */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2 = t1;
- t1.i128 += (int128) z;
- int128_add_int64(&t2.I128, z);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%016lX%016lX + signed %lX\n", x, y, z);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check multiplication */
- t1.i128 = (int128) x * (int128) y;
-
- t2.hl.hi = t2.hl.lo = 0;
- int128_add_int64_mul_int64(&t2.I128, x, y);
-
- if (t1.hl.hi != t2.hl.hi || t1.hl.lo != t2.hl.lo)
- {
- printf("%lX * %lX\n", x, y);
- printf("native = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("result = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check comparison */
- t1.hl.hi = x;
- t1.hl.lo = y;
- t2.hl.hi = z;
- t2.hl.lo = pg_prng_uint64(&pg_global_prng_state);
-
- if (my_int128_compare(t1.i128, t2.i128) !=
- int128_compare(t1.I128, t2.I128))
- {
- printf("comparison failure: %d vs %d\n",
- my_int128_compare(t1.i128, t2.i128),
- int128_compare(t1.I128, t2.I128));
- printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
-
- /* check case with identical hi parts; above will hardly ever hit it */
- t2.hl.hi = x;
-
- if (my_int128_compare(t1.i128, t2.i128) !=
- int128_compare(t1.I128, t2.I128))
- {
- printf("comparison failure: %d vs %d\n",
- my_int128_compare(t1.i128, t2.i128),
- int128_compare(t1.I128, t2.I128));
- printf("arg1 = %016lX%016lX\n", t1.hl.hi, t1.hl.lo);
- printf("arg2 = %016lX%016lX\n", t2.hl.hi, t2.hl.lo);
- return 1;
- }
- }
-
- return 0;
-}