summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/test/regress/expected/aggregates.out50
-rw-r--r--src/test/regress/expected/arrays.out8
-rw-r--r--src/test/regress/expected/combocid_1.out7
-rw-r--r--src/test/regress/expected/create_index.out88
-rw-r--r--src/test/regress/expected/inherit.out157
-rw-r--r--src/test/regress/expected/join.out374
-rw-r--r--src/test/regress/expected/plancache.out6
-rw-r--r--src/test/regress/expected/plpgsql_1.out332
-rw-r--r--src/test/regress/expected/privileges_2.out52
-rw-r--r--src/test/regress/expected/rules_2.out139
-rw-r--r--src/test/regress/expected/subselect_1.out3
-rw-r--r--src/test/regress/expected/window.out14
-rw-r--r--src/test/regress/expected/with.out309
-rw-r--r--src/test/regress/expected/xc_FQS.out48
-rw-r--r--src/test/regress/expected/xc_alter_table.out153
-rw-r--r--src/test/regress/expected/xc_remote.out191
16 files changed, 722 insertions, 1209 deletions
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index 8baca0f5d2..0819f8fbb1 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -76,9 +76,9 @@ SELECT max(student.gpa) AS max_3_7 FROM student;
(1 row)
SELECT stddev_pop(b) FROM aggtest;
- stddev_pop
------------------
- 131.10703231895
+ stddev_pop
+------------------
+ 131.107032318951
(1 row)
SELECT stddev_samp(b) FROM aggtest;
@@ -90,13 +90,13 @@ SELECT stddev_samp(b) FROM aggtest;
SELECT var_pop(b) FROM aggtest;
var_pop
------------------
- 17189.0539234823
+ 17189.0539234824
(1 row)
SELECT var_samp(b) FROM aggtest;
var_samp
------------------
- 22918.7385646431
+ 22918.7385646432
(1 row)
SELECT stddev_pop(b::numeric) FROM aggtest;
@@ -153,13 +153,13 @@ SELECT regr_sxx(b, a) FROM aggtest;
SELECT regr_syy(b, a) FROM aggtest;
regr_syy
------------------
- 68756.2156939293
+ 68756.2156939297
(1 row)
SELECT regr_sxy(b, a) FROM aggtest;
regr_sxy
------------------
- 2614.51582155004
+ 2614.51582155001
(1 row)
SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
@@ -171,25 +171,25 @@ SELECT regr_avgx(b, a), regr_avgy(b, a) FROM aggtest;
SELECT regr_r2(b, a) FROM aggtest;
regr_r2
--------------------
- 0.0194977982031803
+ 0.0194977982031797
(1 row)
SELECT regr_slope(b, a), regr_intercept(b, a) FROM aggtest;
regr_slope | regr_intercept
-------------------+------------------
- 0.512750700441271 | 82.5619926012309
+ 0.512750700441265 | 82.5619926012313
(1 row)
SELECT covar_pop(b, a), covar_samp(b, a) FROM aggtest;
- covar_pop | covar_samp
------------------+------------------
- 653.62895538751 | 871.505273850014
+ covar_pop | covar_samp
+------------------+------------------
+ 653.628955387502 | 871.505273850003
(1 row)
SELECT corr(b, a) FROM aggtest;
corr
-------------------
- 0.139634516517873
+ 0.139634516517871
(1 row)
SELECT count(four) AS cnt_1000 FROM onek;
@@ -756,30 +756,6 @@ select min(f1), max(f1) from minmaxtest;
11 | 18
(1 row)
--- DISTINCT doesn't do anything useful here, but it shouldn't fail
-explain (costs off)
- select distinct min(f1), max(f1) from minmaxtest;
- QUERY PLAN
------------------------------------------------------------------------
- HashAggregate
- -> Aggregate
- -> Append
- -> Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Seq Scan on minmaxtest
- -> Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Seq Scan on minmaxtest1 minmaxtest
- -> Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Seq Scan on minmaxtest2 minmaxtest
- -> Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Seq Scan on minmaxtest3 minmaxtest
-(11 rows)
-
-select distinct min(f1), max(f1) from minmaxtest;
- min | max
------+-----
- 11 | 18
-(1 row)
-
drop table minmaxtest cascade;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table minmaxtest1
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index 68ab4cffd2..926a6573de 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -997,7 +997,7 @@ insert into arr_tbl values ('{1,2}');
-- failure expected:
insert into arr_tbl values ('{1,2,3}');
ERROR: duplicate key value violates unique constraint "arr_tbl_f1_key"
-DETAIL: Key (f1)=({1,2,3}) already exists.
+DETAIL: Key (f1)=((pg_catalog.int4){1,2,3}) already exists.
insert into arr_tbl values ('{2,3,4}');
insert into arr_tbl values ('{1,5,3}');
insert into arr_tbl values ('{1,2,10}');
@@ -1471,19 +1471,19 @@ select array_length(array[[1,2,3], [4,5,6]], 3);
select array_agg(unique1) from (select unique1 from tenk1 where unique1 < 15 order by unique1) ss;
array_agg
--------------------------------------
- {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14}
+ {1,2,5,6,8,9,12,13,0,3,4,7,10,11,14}
(1 row)
select array_agg(ten) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
array_agg
---------------------------------
- {0,1,2,3,4,5,6,7,8,9,0,1,2,3,4}
+ {1,2,5,6,8,9,2,3,0,3,4,7,0,1,4}
(1 row)
select array_agg(nullif(ten, 4)) from (select ten from tenk1 where unique1 < 15 order by unique1) ss;
array_agg
---------------------------------------
- {0,1,2,3,NULL,5,6,7,8,9,0,1,2,3,NULL}
+ {1,2,5,6,8,9,2,3,0,3,NULL,7,0,1,NULL}
(1 row)
select array_agg(unique1) from tenk1 where unique1 < -15;
diff --git a/src/test/regress/expected/combocid_1.out b/src/test/regress/expected/combocid_1.out
index da39f52542..71191dd544 100644
--- a/src/test/regress/expected/combocid_1.out
+++ b/src/test/regress/expected/combocid_1.out
@@ -48,9 +48,10 @@ INSERT INTO combocidtest VALUES (333);
DECLARE c CURSOR FOR SELECT ctid,cmin,* FROM combocidtest ORDER BY ctid;
DELETE FROM combocidtest;
FETCH ALL FROM c;
- ctid | cmin | foobar
-------+------+--------
-(0 rows)
+ ctid | cmin | foobar
+-------+------+--------
+ (0,3) | 0 | 333
+(1 row)
ROLLBACK;
SELECT ctid,cmin,* FROM combocidtest ORDER BY ctid;
diff --git a/src/test/regress/expected/create_index.out b/src/test/regress/expected/create_index.out
index d1a48cfed3..b4258aef89 100644
--- a/src/test/regress/expected/create_index.out
+++ b/src/test/regress/expected/create_index.out
@@ -2717,19 +2717,13 @@ DROP TABLE onek_with_null;
EXPLAIN (NODES OFF, COSTS OFF)
SELECT * FROM tenk1
WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42);
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------
Remote Subquery Scan on all
- -> Bitmap Heap Scan on tenk1
- Recheck Cond: (((thousand = 42) AND (tenthous = 1)) OR ((thousand = 42) AND (tenthous = 3)) OR ((thousand = 42) AND (tenthous = 42)))
- -> BitmapOr
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: ((thousand = 42) AND (tenthous = 1))
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: ((thousand = 42) AND (tenthous = 3))
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: ((thousand = 42) AND (tenthous = 42))
-(10 rows)
+ -> Index Scan using tenk1_thous_tenthous on tenk1
+ Index Cond: (thousand = 42)
+ Filter: ((tenthous = 1) OR (tenthous = 3) OR (tenthous = 42))
+(4 rows)
SELECT * FROM tenk1
WHERE thousand = 42 AND (tenthous = 1 OR tenthous = 3 OR tenthous = 42);
@@ -2741,22 +2735,15 @@ SELECT * FROM tenk1
EXPLAIN (NODES OFF, COSTS OFF)
SELECT count(*) FROM tenk1
WHERE hundred = 42 AND (thousand = 42 OR thousand = 99);
- QUERY PLAN
----------------------------------------------------------------------------------------------
+ QUERY PLAN
+------------------------------------------------------------------
Aggregate
-> Remote Subquery Scan on all
-> Aggregate
- -> Bitmap Heap Scan on tenk1
- Recheck Cond: ((hundred = 42) AND ((thousand = 42) OR (thousand = 99)))
- -> BitmapAnd
- -> Bitmap Index Scan on tenk1_hundred
- Index Cond: (hundred = 42)
- -> BitmapOr
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: (thousand = 42)
- -> Bitmap Index Scan on tenk1_thous_tenthous
- Index Cond: (thousand = 99)
-(13 rows)
+ -> Index Scan using tenk1_hundred on tenk1
+ Index Cond: (hundred = 42)
+ Filter: ((thousand = 42) OR (thousand = 99))
+(6 rows)
SELECT count(*) FROM tenk1
WHERE hundred = 42 AND (thousand = 42 OR thousand = 99);
@@ -2793,54 +2780,3 @@ SELECT count(*) FROM dupindexcols
97
(1 row)
---
--- Check ordering of =ANY indexqual results (bug in 9.2.0)
---
-vacuum analyze tenk1; -- ensure we get consistent plans here
-explain (costs off)
-SELECT unique1 FROM tenk1
-WHERE unique1 IN (1,42,7)
-ORDER BY unique1;
- QUERY PLAN
--------------------------------------------------------------------------
- Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Sort
- Sort Key: unique1
- -> Bitmap Heap Scan on tenk1
- Recheck Cond: (unique1 = ANY ('{1,42,7}'::integer[]))
- -> Bitmap Index Scan on tenk1_unique1
- Index Cond: (unique1 = ANY ('{1,42,7}'::integer[]))
-(7 rows)
-
-SELECT unique1 FROM tenk1
-WHERE unique1 IN (1,42,7)
-ORDER BY unique1;
- unique1
----------
- 1
- 7
- 42
-(3 rows)
-
-explain (costs off)
-SELECT thousand, tenthous FROM tenk1
-WHERE thousand < 2 AND tenthous IN (1001,3000)
-ORDER BY thousand;
- QUERY PLAN
---------------------------------------------------------------------------------------------
- Remote Subquery Scan on all (datanode_1,datanode_2)
- -> Sort
- Sort Key: thousand
- -> Index Only Scan using tenk1_thous_tenthous on tenk1
- Index Cond: ((thousand < 2) AND (tenthous = ANY ('{1001,3000}'::integer[])))
-(5 rows)
-
-SELECT thousand, tenthous FROM tenk1
-WHERE thousand < 2 AND tenthous IN (1001,3000)
-ORDER BY thousand;
- thousand | tenthous
-----------+----------
- 0 | 3000
- 1 | 1001
-(2 rows)
-
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index da342c661f..4b3d919921 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -1495,153 +1495,28 @@ SELECT a.attrelid::regclass, a.attname, a.attinhcount, e.expected
WHERE inhparent IN (SELECT inhrelid FROM r) GROUP BY inhrelid) e
JOIN pg_attribute a ON e.inhrelid = a.attrelid WHERE NOT attislocal
ORDER BY a.attrelid::regclass::name, a.attnum;
-ERROR: WITH RECURSIVE currently not supported on distributed tables.
+ attrelid | attname | attinhcount | expected
+----------+---------+-------------+----------
+ inht2 | aaaa | 1 | 1
+ inht2 | b | 1 | 1
+ inht3 | aaaa | 1 | 1
+ inht3 | b | 1 | 1
+ inht4 | aaaa | 2 | 2
+ inht4 | b | 2 | 2
+ inht4 | x | 1 | 2
+ inht4 | y | 1 | 2
+ inhts | aaaa | 1 | 1
+ inhts | b | 2 | 1
+ inhts | x | 1 | 1
+ inhts | c | 1 | 1
+(12 rows)
+
DROP TABLE inht1, inhs1 CASCADE;
NOTICE: drop cascades to 4 other objects
DETAIL: drop cascades to table inht2
drop cascades to table inhts
drop cascades to table inht3
drop cascades to table inht4
--- Test non-inheritable indices [UNIQUE, EXCLUDE] contraints
-CREATE TABLE test_constraints (id int, val1 varchar, val2 int, UNIQUE(val1, val2));
-NOTICE: CREATE TABLE / UNIQUE will create implicit index "test_constraints_val1_val2_key" for table "test_constraints"
-CREATE TABLE test_constraints_inh () INHERITS (test_constraints);
-\d+ test_constraints
- Table "public.test_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+-------------------+-----------+----------+--------------+-------------
- id | integer | | plain | |
- val1 | character varying | | extended | |
- val2 | integer | | plain | |
-Indexes:
- "test_constraints_val1_val2_key" UNIQUE CONSTRAINT, btree (val1, val2)
-Child tables: test_constraints_inh
-Has OIDs: no
-Distribute By: HASH(val1)
-Location Nodes: ALL DATANODES
-
-ALTER TABLE ONLY test_constraints DROP CONSTRAINT test_constraints_val1_val2_key;
-\d+ test_constraints
- Table "public.test_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+-------------------+-----------+----------+--------------+-------------
- id | integer | | plain | |
- val1 | character varying | | extended | |
- val2 | integer | | plain | |
-Child tables: test_constraints_inh
-Has OIDs: no
-Distribute By: HASH(val1)
-Location Nodes: ALL DATANODES
-
-\d+ test_constraints_inh
- Table "public.test_constraints_inh"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+-------------------+-----------+----------+--------------+-------------
- id | integer | | plain | |
- val1 | character varying | | extended | |
- val2 | integer | | plain | |
-Inherits: test_constraints
-Has OIDs: no
-Distribute By: HASH(val1)
-Location Nodes: ALL DATANODES
-
-DROP TABLE test_constraints_inh;
-DROP TABLE test_constraints;
-CREATE TABLE test_ex_constraints (
- c circle,
- EXCLUDE USING gist (c WITH &&)
-);
-NOTICE: CREATE TABLE / EXCLUDE will create implicit index "test_ex_constraints_c_excl" for table "test_ex_constraints"
-CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints);
-\d+ test_ex_constraints
- Table "public.test_ex_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+--------+-----------+---------+--------------+-------------
- c | circle | | plain | |
-Indexes:
- "test_ex_constraints_c_excl" EXCLUDE USING gist (c WITH &&)
-Child tables: test_ex_constraints_inh
-Has OIDs: no
-Distribute By: ROUND ROBIN
-Location Nodes: ALL DATANODES
-
-ALTER TABLE test_ex_constraints DROP CONSTRAINT test_ex_constraints_c_excl;
-\d+ test_ex_constraints
- Table "public.test_ex_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+--------+-----------+---------+--------------+-------------
- c | circle | | plain | |
-Child tables: test_ex_constraints_inh
-Has OIDs: no
-Distribute By: ROUND ROBIN
-Location Nodes: ALL DATANODES
-
-\d+ test_ex_constraints_inh
- Table "public.test_ex_constraints_inh"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+--------+-----------+---------+--------------+-------------
- c | circle | | plain | |
-Inherits: test_ex_constraints
-Has OIDs: no
-Distribute By: ROUND ROBIN
-Location Nodes: ALL DATANODES
-
-DROP TABLE test_ex_constraints_inh;
-DROP TABLE test_ex_constraints;
--- Test non-inheritable foreign key contraints
-CREATE TABLE test_primary_constraints(id int PRIMARY KEY);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "test_primary_constraints_pkey" for table "test_primary_constraints"
-CREATE TABLE test_foreign_constraints(id1 int REFERENCES test_primary_constraints(id));
-CREATE TABLE test_foreign_constraints_inh () INHERITS (test_foreign_constraints);
-\d+ test_primary_constraints
- Table "public.test_primary_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+---------+-----------+---------+--------------+-------------
- id | integer | not null | plain | |
-Indexes:
- "test_primary_constraints_pkey" PRIMARY KEY, btree (id)
-Referenced by:
- TABLE "test_foreign_constraints" CONSTRAINT "test_foreign_constraints_id1_fkey" FOREIGN KEY (id1) REFERENCES test_primary_constraints(id)
-Has OIDs: no
-Distribute By: HASH(id)
-Location Nodes: ALL DATANODES
-
-\d+ test_foreign_constraints
- Table "public.test_foreign_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+---------+-----------+---------+--------------+-------------
- id1 | integer | | plain | |
-Foreign-key constraints:
- "test_foreign_constraints_id1_fkey" FOREIGN KEY (id1) REFERENCES test_primary_constraints(id)
-Child tables: test_foreign_constraints_inh
-Has OIDs: no
-Distribute By: HASH(id1)
-Location Nodes: ALL DATANODES
-
-ALTER TABLE test_foreign_constraints DROP CONSTRAINT test_foreign_constraints_id1_fkey;
-\d+ test_foreign_constraints
- Table "public.test_foreign_constraints"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+---------+-----------+---------+--------------+-------------
- id1 | integer | | plain | |
-Child tables: test_foreign_constraints_inh
-Has OIDs: no
-Distribute By: HASH(id1)
-Location Nodes: ALL DATANODES
-
-\d+ test_foreign_constraints_inh
- Table "public.test_foreign_constraints_inh"
- Column | Type | Modifiers | Storage | Stats target | Description
---------+---------+-----------+---------+--------------+-------------
- id1 | integer | | plain | |
-Inherits: test_foreign_constraints
-Has OIDs: no
-Distribute By: HASH(id1)
-Location Nodes: ALL DATANODES
-
-DROP TABLE test_foreign_constraints_inh;
-DROP TABLE test_foreign_constraints;
-DROP TABLE test_primary_constraints;
--
-- Test parameterized append plans for inheritance trees
--
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 3b2327eb3a..88a5ed2ad2 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -2233,12 +2233,15 @@ SELECT * FROM t3 ORDER By x, y;
(3 rows)
DELETE FROM t3 USING t1 JOIN t2 USING (a) WHERE t3.x > t1.a;
+ERROR: could not plan this distributed delete
+DETAIL: correlated or complex DELETE is currently not supported in Postgres-XL.
SELECT * FROM t3 ORDER By x, y;
- x | y
----+---
- 6 | 7
- 7 | 8
-(2 rows)
+ x | y
+-----+-----
+ 6 | 7
+ 7 | 8
+ 500 | 100
+(3 rows)
DELETE FROM t3 USING t3 t3_other WHERE t3.x = t3_other.x AND t3.y = t3_other.y;
SELECT * FROM t3 ORDER By x, y;
@@ -2587,18 +2590,19 @@ SELECT qq, unique1
( SELECT COALESCE(q2, -1) AS qq FROM int8_tbl b ) AS ss2
USING (qq)
INNER JOIN tenk1 c ON qq = unique2;
- QUERY PLAN
---------------------------------------------------------------------------------------------------
- Hash Join
- Hash Cond: (COALESCE((COALESCE(a.q1, 0::bigint)), (COALESCE(b.q2, (-1)::bigint))) = c.unique2)
- -> Hash Full Join
- Hash Cond: (COALESCE(a.q1, 0::bigint) = COALESCE(b.q2, (-1)::bigint))
- -> Data Node Scan on int8_tbl "_REMOTE_TABLE_QUERY_"
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Hash Join
+ Hash Cond: (c.unique2 = COALESCE((COALESCE(a.q1, 0::bigint)), (COALESCE(b.q2, (-1)::bigint))))
+ -> Seq Scan on tenk1 c
-> Hash
- -> Data Node Scan on int8_tbl "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
-(9 rows)
+ -> Hash Full Join
+ Hash Cond: (COALESCE(a.q1, 0::bigint) = COALESCE(b.q2, (-1)::bigint))
+ -> Seq Scan on int8_tbl a
+ -> Hash
+ -> Seq Scan on int8_tbl b
+(10 rows)
SELECT qq, unique1
FROM
@@ -2701,21 +2705,23 @@ where
order by 1,2;
QUERY PLAN
-----------------------------------------------------------------------------
- Sort
- Sort Key: t1.q1, t1.q2
- -> Hash Left Join
- Hash Cond: (t1.q2 = t2.q1)
- Filter: (1 = (SubPlan 1))
- -> Data Node Scan on int8_tbl "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on int8_tbl "_REMOTE_TABLE_QUERY_"
- SubPlan 1
- -> Limit
- -> Result
- One-Time Filter: ((42) IS NOT NULL)
- -> Data Node Scan on int8_tbl "_REMOTE_TABLE_QUERY_"
- Coordinator quals: ((42) IS NOT NULL)
-(14 rows)
+ Remote Subquery Scan on all
+ -> Sort
+ Sort Key: t1.q1, t1.q2
+ -> Hash Left Join
+ Hash Cond: (t1.q2 = t2.q1)
+ Filter: (1 = (SubPlan 1))
+ -> Seq Scan on int8_tbl t1
+ -> Hash
+ -> Seq Scan on int8_tbl t2
+ SubPlan 1
+ -> Limit
+ -> Remote Subquery Scan on all
+ -> Limit
+ -> Result
+ One-Time Filter: ((42) IS NOT NULL)
+ -> Seq Scan on int8_tbl t3
+(16 rows)
select * from
int8_tbl t1 left join
@@ -2793,19 +2799,25 @@ select * from
int4(sin(1)) q1,
int4(sin(0)) q2
where q1 = thousand or q2 = thousand;
- QUERY PLAN
------------------------------------------------------------------------------------
- Hash Join
- Hash Cond: (int4_tbl.f1 = tenk1.twothousand)
- -> Data Node Scan on int4_tbl "_REMOTE_TABLE_QUERY_"
- -> Hash
+ QUERY PLAN
+------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Hash Join
+ Hash Cond: (tenk1.twothousand = int4_tbl.f1)
-> Nested Loop
- Join Filter: ((q1.q1 = tenk1.thousand) OR (q2.q2 = tenk1.thousand))
-> Nested Loop
-> Function Scan on q1
-> Function Scan on q2
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
-(10 rows)
+ -> Bitmap Heap Scan on tenk1
+ Recheck Cond: ((q1.q1 = thousand) OR (q2.q2 = thousand))
+ -> BitmapOr
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (q1.q1 = thousand)
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (q2.q2 = thousand)
+ -> Hash
+ -> Seq Scan on int4_tbl
+(16 rows)
explain (num_nodes off, nodes off, costs off)
select * from
@@ -2813,20 +2825,22 @@ select * from
int4(sin(1)) q1,
int4(sin(0)) q2
where thousand = (q1 + q2);
- QUERY PLAN
-------------------------------------------------------------------
- Hash Join
- Hash Cond: (int4_tbl.f1 = tenk1.twothousand)
- -> Data Node Scan on int4_tbl "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Hash Join
- Hash Cond: (tenk1.thousand = (q1.q1 + q2.q2))
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Nested Loop
- -> Function Scan on q1
- -> Function Scan on q2
-(11 rows)
+ QUERY PLAN
+--------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Hash Join
+ Hash Cond: (tenk1.twothousand = int4_tbl.f1)
+ -> Nested Loop
+ -> Nested Loop
+ -> Function Scan on q1
+ -> Function Scan on q2
+ -> Bitmap Heap Scan on tenk1
+ Recheck Cond: (thousand = (q1.q1 + q2.q2))
+ -> Bitmap Index Scan on tenk1_thous_tenthous
+ Index Cond: (thousand = (q1.q1 + q2.q2))
+ -> Hash
+ -> Seq Scan on int4_tbl
+(13 rows)
--
-- test placement of movable quals in a parameterized join tree
@@ -2836,61 +2850,75 @@ select * from tenk1 t1 left join
(tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
on t1.hundred = t2.hundred and t1.ten = t3.ten
where t1.unique1 = 1;
- QUERY PLAN
-------------------------------------------------------------------
- Hash Right Join
- Hash Cond: ((t2.hundred = t1.hundred) AND (t3.ten = t1.ten))
- -> Hash Join
- Hash Cond: (t2.thousand = t3.unique2)
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
-(9 rows)
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Nested Loop Left Join
+ -> Index Scan using tenk1_unique1 on tenk1 t1
+ Index Cond: (unique1 = 1)
+ -> Nested Loop
+ Join Filter: (t1.ten = t3.ten)
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_hundred on tenk1 t2
+ Index Cond: (t1.hundred = hundred)
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique2 on tenk1 t3
+ Index Cond: (unique2 = t2.thousand)
+(13 rows)
explain (num_nodes off, nodes off, costs off)
select * from tenk1 t1 left join
(tenk1 t2 join tenk1 t3 on t2.thousand = t3.unique2)
on t1.hundred = t2.hundred and t1.ten + t2.ten = t3.ten
where t1.unique1 = 1;
- QUERY PLAN
-------------------------------------------------------------------
- Hash Right Join
- Hash Cond: (t2.hundred = t1.hundred)
- Join Filter: ((t1.ten + t2.ten) = t3.ten)
- -> Hash Join
- Hash Cond: (t2.thousand = t3.unique2)
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
-(10 rows)
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Nested Loop Left Join
+ -> Index Scan using tenk1_unique1 on tenk1 t1
+ Index Cond: (unique1 = 1)
+ -> Nested Loop
+ Join Filter: ((t1.ten + t2.ten) = t3.ten)
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_hundred on tenk1 t2
+ Index Cond: (t1.hundred = hundred)
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique2 on tenk1 t3
+ Index Cond: (unique2 = t2.thousand)
+(13 rows)
explain (num_nodes off, nodes off, costs off)
select count(*) from
tenk1 a join tenk1 b on a.unique1 = b.unique2
left join tenk1 c on a.unique2 = b.unique1 and c.thousand = a.thousand
join int4_tbl on b.thousand = f1;
- QUERY PLAN
-------------------------------------------------------------------------------
+ QUERY PLAN
+-------------------------------------------------------------------------------
Aggregate
- -> Hash Join
- Hash Cond: (b.thousand = int4_tbl.f1)
- -> Hash Left Join
- Hash Cond: (a.thousand = c.thousand)
- Join Filter: (a.unique2 = b.unique1)
- -> Hash Join
- Hash Cond: (a.unique1 = b.unique2)
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on int4_tbl "_REMOTE_TABLE_QUERY_"
-(15 rows)
+ -> Merge Left Join
+ Merge Cond: (a.thousand = c.thousand)
+ Join Filter: (a.unique2 = b.unique1)
+ -> Sort
+ Sort Key: a.thousand
+ -> Nested Loop
+ -> Remote Subquery Scan on all
+ -> Hash Join
+ Hash Cond: (b.thousand = int4_tbl.f1)
+ -> Seq Scan on tenk1 b
+ -> Hash
+ -> Seq Scan on int4_tbl
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 = b.unique2)
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Sort
+ Sort Key: c.thousand
+ -> Seq Scan on tenk1 c
+(22 rows)
select count(*) from
tenk1 a join tenk1 b on a.unique1 = b.unique2
@@ -2908,29 +2936,30 @@ select b.unique1 from
join int4_tbl i1 on b.thousand = f1
right join int4_tbl i2 on i2.f1 = b.tenthous
order by 1;
- QUERY PLAN
-------------------------------------------------------------------------------------
- Sort
- Sort Key: b.unique1
- -> Hash Right Join
- Hash Cond: (b.tenthous = i2.f1)
- -> Hash Join
- Hash Cond: (b.thousand = i1.f1)
- -> Hash Left Join
- Hash Cond: (a.thousand = c.thousand)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Sort
+ Sort Key: b.unique1
+ -> Nested Loop Left Join
+ -> Seq Scan on int4_tbl i2
+ -> Nested Loop Left Join
Join Filter: (b.unique1 = 42)
- -> Hash Join
- Hash Cond: (a.unique1 = b.unique2)
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on tenk1 "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on int4_tbl "_REMOTE_TABLE_QUERY_"
- -> Hash
- -> Data Node Scan on int4_tbl "_REMOTE_TABLE_QUERY_"
-(20 rows)
+ -> Nested Loop
+ -> Remote Subquery Scan on all
+ -> Nested Loop
+ -> Seq Scan on int4_tbl i1
+ -> Index Scan using tenk1_thous_tenthous on tenk1 b
+ Index Cond: ((thousand = i1.f1) AND (i2.f1 = tenthous))
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Scan using tenk1_unique1 on tenk1 a
+ Index Cond: (unique1 = b.unique2)
+ -> Materialize
+ -> Remote Subquery Scan on all
+ -> Index Only Scan using tenk1_thous_tenthous on tenk1 c
+ Index Cond: (thousand = a.thousand)
+(21 rows)
select b.unique1 from
tenk1 a join tenk1 b on a.unique1 = b.unique2
@@ -2962,49 +2991,52 @@ INSERT INTO b VALUES (0, 0), (1, NULL);
INSERT INTO c VALUES (0), (1);
-- all three cases should be optimizable into a simple seqscan
explain (verbose true, costs false, nodes false) SELECT a.* FROM a LEFT JOIN b ON a.b_id = b.id;
- QUERY PLAN
---------------------------------------------------------
- Data Node Scan on a "_REMOTE_TABLE_QUERY_"
+ QUERY PLAN
+-------------------------------
+ Remote Subquery Scan on all
Output: a.id, a.b_id
- Remote query: SELECT id, b_id FROM ONLY a WHERE true
-(3 rows)
+ -> Seq Scan on pg_temp_2.a
+ Output: a.id, a.b_id
+(4 rows)
explain (verbose true, costs false, nodes false) SELECT b.* FROM b LEFT JOIN c ON b.c_id = c.id;
- QUERY PLAN
---------------------------------------------------------
- Data Node Scan on b "_REMOTE_TABLE_QUERY_"
+ QUERY PLAN
+-------------------------------
+ Remote Subquery Scan on all
Output: b.id, b.c_id
- Remote query: SELECT id, c_id FROM ONLY b WHERE true
-(3 rows)
+ -> Seq Scan on pg_temp_2.b
+ Output: b.id, b.c_id
+(4 rows)
explain (verbose true, costs false, nodes false)
SELECT a.* FROM a LEFT JOIN (b left join c on b.c_id = c.id)
ON (a.b_id = b.id);
- QUERY PLAN
---------------------------------------------------------
- Data Node Scan on a "_REMOTE_TABLE_QUERY_"
+ QUERY PLAN
+-------------------------------
+ Remote Subquery Scan on all
Output: a.id, a.b_id
- Remote query: SELECT id, b_id FROM ONLY a WHERE true
-(3 rows)
+ -> Seq Scan on pg_temp_2.a
+ Output: a.id, a.b_id
+(4 rows)
-- check optimization of outer join within another special join
explain (verbose true, costs false, nodes false)
select id from a where id in (
select b.id from b left join c on b.id = c.id
);
- QUERY PLAN
---------------------------------------------------------------
- Hash Semi Join
+ QUERY PLAN
+-------------------------------------------
+ Remote Subquery Scan on all
Output: a.id
- Hash Cond: (a.id = b.id)
- -> Data Node Scan on a "_REMOTE_TABLE_QUERY_"
+ -> Hash Semi Join
Output: a.id
- Remote query: SELECT id FROM ONLY a WHERE true
- -> Hash
- Output: b.id
- -> Data Node Scan on b "_REMOTE_TABLE_QUERY_"
+ Hash Cond: (a.id = b.id)
+ -> Seq Scan on pg_temp_2.a
+ Output: a.id, a.b_id
+ -> Hash
Output: b.id
- Remote query: SELECT id FROM ONLY b WHERE true
+ -> Seq Scan on pg_temp_2.b
+ Output: b.id
(11 rows)
rollback;
@@ -3025,15 +3057,16 @@ select p.* from parent p left join child c on (p.k = c.k) order by 1,2;
explain (verbose true, costs false, nodes false)
select p.* from parent p left join child c on (p.k = c.k) order by 1,2;
- QUERY PLAN
-------------------------------------------------------------------
- Sort
+ QUERY PLAN
+--------------------------------------------
+ Remote Subquery Scan on all
Output: p.k, p.pd
- Sort Key: p.k, p.pd
- -> Data Node Scan on parent "_REMOTE_TABLE_QUERY_"
+ -> Sort
Output: p.k, p.pd
- Remote query: SELECT k, pd FROM ONLY parent p WHERE true
-(6 rows)
+ Sort Key: p.k, p.pd
+ -> Seq Scan on pg_temp_2.parent p
+ Output: p.k, p.pd
+(7 rows)
-- this case is not
select p.*, linked from parent p
@@ -3050,23 +3083,18 @@ explain (verbose true, costs false, nodes false)
select p.*, linked from parent p
left join (select c.*, true as linked from child c) as ss
on (p.k = ss.k) order by p.k;
- QUERY PLAN
--------------------------------------------------------------------------
- Sort
- Output: p.k, p.pd, (true)
- Sort Key: p.k
- -> Hash Left Join
+ QUERY PLAN
+--------------------------------------------------------------------
+ Remote Subquery Scan on all
+ Output: p.k, p.pd, true
+ -> Merge Left Join
Output: p.k, p.pd, (true)
- Hash Cond: (p.k = c.k)
- -> Data Node Scan on parent "_REMOTE_TABLE_QUERY_"
+ Merge Cond: (p.k = c.k)
+ -> Index Scan using parent_pkey on pg_temp_2.parent p
Output: p.k, p.pd
- Remote query: SELECT k, pd FROM ONLY parent p WHERE true
- -> Hash
- Output: c.k, (true)
- -> Data Node Scan on child "_REMOTE_TABLE_QUERY_"
- Output: c.k, true
- Remote query: SELECT k FROM ONLY child c WHERE true
-(14 rows)
+ -> Index Only Scan using child_k_key on pg_temp_2.child c
+ Output: c.k, true
+(9 rows)
-- check for a 9.0rc1 bug: join removal breaks pseudoconstant qual handling
select p.* from
@@ -3080,15 +3108,17 @@ explain (verbose true, costs false, nodes false)
select p.* from
parent p left join child c on (p.k = c.k)
where p.k = 1 and p.k = 2;
- QUERY PLAN
----------------------------------------------------------------------------------
- Result
+ QUERY PLAN
+----------------------------------------------------------------
+ Remote Subquery Scan on all
Output: p.k, p.pd
- One-Time Filter: false
- -> Data Node Scan on parent "_REMOTE_TABLE_QUERY_"
+ -> Result
Output: p.k, p.pd
- Remote query: SELECT k, pd FROM ONLY parent p WHERE ((k = 1) AND false)
-(6 rows)
+ One-Time Filter: false
+ -> Index Scan using parent_pkey on pg_temp_2.parent p
+ Output: p.k, p.pd
+ Index Cond: (p.k = 1)
+(8 rows)
select p.* from
(parent p left join child c on (p.k = c.k)) join parent x on p.k = x.k
diff --git a/src/test/regress/expected/plancache.out b/src/test/regress/expected/plancache.out
index 2bcfe34b79..b3b2297d5e 100644
--- a/src/test/regress/expected/plancache.out
+++ b/src/test/regress/expected/plancache.out
@@ -15,7 +15,7 @@ EXECUTE prepstmt;
(5 rows)
-- and one with parameters
-PREPARE prepstmt2(bigint) AS SELECT * FROM pcachetest WHERE q1 = $1 ORDER BY q1, q2;
+PREPARE prepstmt2(bigint) AS SELECT * FROM pcachetest WHERE q1 = $1;
EXECUTE prepstmt2(123);
q1 | q2
-----+------------------
@@ -31,7 +31,7 @@ EXECUTE prepstmt2(123);
ERROR: relation "pcachetest" does not exist
-- recreate the temp table (this demonstrates that the raw plan is
-- purely textual and doesn't depend on OIDs, for instance)
-CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl ORDER BY q1, q2;
+CREATE TEMP TABLE pcachetest AS SELECT * FROM int8_tbl;
EXECUTE prepstmt;
q1 | q2
------------------+-------------------
@@ -78,7 +78,7 @@ EXECUTE prepstmt2(123);
-- Try it with a view, which isn't directly used in the resulting plan
-- but should trigger invalidation anyway
CREATE TEMP VIEW pcacheview AS
- SELECT * FROM pcachetest;
+ SELECT * FROM pcachetest ORDER BY q1, q2;
PREPARE vprep AS SELECT * FROM pcacheview ORDER BY q1, q2;
EXECUTE vprep;
q1 | q2
diff --git a/src/test/regress/expected/plpgsql_1.out b/src/test/regress/expected/plpgsql_1.out
index a1cf0cc771..a563a3ee20 100644
--- a/src/test/regress/expected/plpgsql_1.out
+++ b/src/test/regress/expected/plpgsql_1.out
@@ -1971,36 +1971,17 @@ begin
return x;
end$$ language plpgsql;
select trap_zero_divide(50);
-NOTICE: should see this
-NOTICE: should see this only if 50 <> 0
-NOTICE: should see this only if 50 fits in smallint
- trap_zero_divide
-------------------
- 2
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_zero_divide(integer) line 5 during statement block entry
select trap_zero_divide(0);
-NOTICE: should see this
-NOTICE: caught division_by_zero
- trap_zero_divide
-------------------
- -1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_zero_divide(integer) line 5 during statement block entry
select trap_zero_divide(100000);
-NOTICE: should see this
-NOTICE: should see this only if 100000 <> 0
-NOTICE: caught numeric_value_out_of_range
- trap_zero_divide
-------------------
- -2
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_zero_divide(integer) line 5 during statement block entry
select trap_zero_divide(-100);
-NOTICE: should see this
-NOTICE: should see this only if -100 <> 0
-NOTICE: should see this only if -100 fits in smallint
-ERROR: -100 is less than zero
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_zero_divide(integer) line 5 during statement block entry
create function trap_matching_test(int) returns int as $$
declare x int;
sx smallint;
@@ -2023,32 +2004,18 @@ begin
end$$ language plpgsql;
-- PGXCTODO: This is failing due to issue 3522907, complicated SELECT queries in plpgsql functions
select trap_matching_test(50);
-ERROR: bind message supplies 1 parameters, but prepared statement "" requires 0
-CONTEXT: SQL statement "select unique1 from tenk1 where unique2 =
- (select unique2 from tenk1 b where ten = $1)"
-PL/pgSQL function trap_matching_test(integer) line 9 at SQL statement
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_matching_test(integer) line 6 during statement block entry
select trap_matching_test(0);
-NOTICE: caught data_exception
- trap_matching_test
---------------------
- -1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_matching_test(integer) line 6 during statement block entry
select trap_matching_test(100000);
-NOTICE: caught data_exception
- trap_matching_test
---------------------
- -1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_matching_test(integer) line 6 during statement block entry
-- PGXCTODO: This is failing due to issue 3522907, complicated SELECT queries in plpgsql functions
select trap_matching_test(1);
-ERROR: bind message supplies 1 parameters, but prepared statement "" requires 0
-CONTEXT: SQL statement "select unique1 from tenk1 where unique2 =
- (select unique2 from tenk1 b where ten = $1)"
-PL/pgSQL function trap_matching_test(integer) line 9 at SQL statement
--- Enforce use of COMMIT instead of 2PC for temporary objects
-SET enforce_two_phase_commit TO off;
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_matching_test(integer) line 6 during statement block entry
create temp table foo (f1 int);
create function blockme() returns int as $$
declare x int;
@@ -2073,12 +2040,8 @@ begin
end$$ language plpgsql;
set statement_timeout to 2000;
select blockme();
-NOTICE: nyeah nyeah, can't stop me
- blockme
----------
- 20
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function blockme() line 6 during statement block entry
reset statement_timeout;
select * from foo order by 1;
f1
@@ -2105,20 +2068,8 @@ begin
return x;
end$$ language plpgsql;
select test_variable_storage();
-NOTICE: should see this
-CONTEXT: SQL statement "SELECT trap_zero_divide(-100)"
-PL/pgSQL function test_variable_storage() line 8 at PERFORM
-NOTICE: should see this only if -100 <> 0
-CONTEXT: SQL statement "SELECT trap_zero_divide(-100)"
-PL/pgSQL function test_variable_storage() line 8 at PERFORM
-NOTICE: should see this only if -100 fits in smallint
-CONTEXT: SQL statement "SELECT trap_zero_divide(-100)"
-PL/pgSQL function test_variable_storage() line 8 at PERFORM
- test_variable_storage
------------------------
- 123456789012
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function test_variable_storage() line 5 during statement block entry
--
-- test foreign key error trapping
--
@@ -2153,29 +2104,18 @@ begin
return 1;
end$$ language plpgsql;
select trap_foreign_key(1);
- trap_foreign_key
-------------------
- 1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_foreign_key(integer) line 3 during statement block entry
select trap_foreign_key(2); -- detects FK violation
-NOTICE: caught foreign_key_violation
- trap_foreign_key
-------------------
- 0
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_foreign_key(integer) line 3 during statement block entry
begin;
set constraints all deferred;
select trap_foreign_key(2); -- should not detect FK violation
-NOTICE: caught foreign_key_violation
- trap_foreign_key
-------------------
- 0
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function trap_foreign_key(integer) line 3 during statement block entry
savepoint x;
-ERROR: SAVEPOINT is not yet supported.
+ERROR: current transaction is aborted, commands ignored until end of transaction block
set constraints all immediate; -- fails
ERROR: current transaction is aborted, commands ignored until end of transaction block
rollback to x;
@@ -2518,13 +2458,8 @@ EXCEPTION
END;
$$ LANGUAGE plpgsql;
SELECT reraise_test();
-NOTICE: exception syntax_error thrown in inner block, reraising
-NOTICE: RIGHT - exception syntax_error caught in inner block
- reraise_test
---------------
-
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function reraise_test() line 2 during statement block entry
--
-- reject function definitions that contain malformed SQL queries at
-- compile-time, where possible
@@ -2673,15 +2608,8 @@ begin
end;
end; $$ language plpgsql;
select excpt_test3();
-NOTICE: caught exception P0001 user exception
-NOTICE: P0001 user exception
-NOTICE: caught exception 22012 division by zero
-NOTICE: P0001 user exception
- excpt_test3
--------------
-
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function excpt_test3() line 3 during statement block entry
drop function excpt_test1();
drop function excpt_test2();
drop function excpt_test3();
@@ -3657,9 +3585,8 @@ begin
end;
$$ language plpgsql;
select raise_test();
-NOTICE: SQLSTATE: 22012 SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function raise_test() line 2 during statement block entry
create or replace function raise_test() returns void as $$
begin
raise 'check me'
@@ -3671,9 +3598,8 @@ begin
end;
$$ language plpgsql;
select raise_test();
-NOTICE: SQLSTATE: 1234F SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function raise_test() line 2 during statement block entry
-- SQLSTATE specification in WHEN
create or replace function raise_test() returns void as $$
begin
@@ -3686,9 +3612,8 @@ begin
end;
$$ language plpgsql;
select raise_test();
-NOTICE: SQLSTATE: 1234F SQLERRM: check me
-ERROR: check me
-DETAIL: some detail info
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function raise_test() line 2 during statement block entry
create or replace function raise_test() returns void as $$
begin
raise division_by_zero using detail = 'some detail info';
@@ -3699,9 +3624,8 @@ begin
end;
$$ language plpgsql;
select raise_test();
-NOTICE: SQLSTATE: 22012 SQLERRM: division_by_zero
-ERROR: division_by_zero
-DETAIL: some detail info
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function raise_test() line 2 during statement block entry
create or replace function raise_test() returns void as $$
begin
raise division_by_zero;
@@ -3787,12 +3711,8 @@ exception when others then
end;
$$ language plpgsql;
select stacked_diagnostics_test();
-NOTICE: sqlstate: 22012, message: division by zero, context: [PL/pgSQL function zero_divide() line 4 at RETURN <- SQL statement "SELECT zero_divide()" <- PL/pgSQL function stacked_diagnostics_test() line 6 at PERFORM]
- stacked_diagnostics_test
---------------------------
-
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function stacked_diagnostics_test() line 5 during statement block entry
create or replace function stacked_diagnostics_test() returns void as $$
declare _detail text;
_hint text;
@@ -3808,12 +3728,8 @@ exception when others then
end;
$$ language plpgsql;
select stacked_diagnostics_test();
-NOTICE: message: custom exception, detail: some detail of custom exception, hint: some hint related to custom exception
- stacked_diagnostics_test
---------------------------
-
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function stacked_diagnostics_test() line 5 during statement block entry
-- fail, cannot use stacked diagnostics statement outside handler
create or replace function stacked_diagnostics_test() returns void as $$
declare _detail text;
@@ -3844,8 +3760,8 @@ exception
end;
$$ language plpgsql;
select raise_test();
-NOTICE: 22012
-ERROR: substitute message
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function raise_test() line 2 during statement block entry
drop function raise_test();
-- test CASE statement
create or replace function case_test(bigint) returns text as $$
@@ -3931,12 +3847,8 @@ exception
end
$$ language plpgsql;
select catch();
-NOTICE: caught case_not_found 20000 case not found
- catch
--------
-
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function catch() line 2 during statement block entry
-- test the searched variant too, as well as ELSE
create or replace function case_test(bigint) returns text as $$
declare a int = 10;
@@ -4157,17 +4069,11 @@ BEGIN
END;
$$ LANGUAGE plpgsql;
SELECT * FROM leaker_1(false);
- leaker_1
-----------
- 1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function leaker_1(boolean) line 5 during statement block entry
SELECT * FROM leaker_1(true);
- leaker_1
-----------
- 0
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function leaker_1(boolean) line 5 during statement block entry
DROP FUNCTION leaker_1(bool);
DROP FUNCTION leaker_2(bool);
-- Test for appropriate cleanup of non-simple expression evaluations
@@ -4207,11 +4113,8 @@ begin
end;
$$ LANGUAGE plpgsql;
SELECT nonsimple_expr_test();
- nonsimple_expr_test
----------------------
- 1
-(1 row)
-
+ERROR: Internal subtransactions not supported in Postgres-XL
+CONTEXT: PL/pgSQL function nonsimple_expr_test() line 5 during statement block entry
DROP FUNCTION nonsimple_expr_test();
--
-- Test cases involving recursion and error recovery in simple expressions
@@ -4718,3 +4621,132 @@ ERROR: value for domain orderedarray violates check constraint "sorted"
CONTEXT: PL/pgSQL function testoa(integer,integer,integer) line 5 at assignment
drop function arrayassign1();
drop function testoa(x1 int, x2 int, x3 int);
+-- Check that DMLs in a plpgsql function work OK, when subsequent queries need
+-- to open new datanode connections
+CREATE OR REPLACE FUNCTION TestJoinTempTable_CT()
+RETURNS void AS $$
+BEGIN
+ CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
+ TRUNCATE TABLE RealTable;
+
+ CREATE TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION;
+ CREATE TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION;
+END ;
+$$ LANGUAGE plpgsql;
+CREATE OR REPLACE FUNCTION TestJoinTempTable_INSERT()
+RETURNS void AS $$
+BEGIN
+ INSERT INTO RealTable(ProductId, ScenarioId)
+ SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
+
+ INSERT INTO TmpBar(NodeId)
+ SELECT generate_series(1,1000);
+ RAISE INFO 'number of existing rows in RealTable - %', (SELECT count(*) FROM RealTable);
+ RAISE INFO 'number of existing rows in TmpBar - %', (SELECT count(*) FROM TmpBar);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ INSERT INTO TmpFoo(TempId)
+ SELECT DISTINCT(PR.ProductId)
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
+
+ RAISE INFO 'number of rows produced by query - %',
+ (SELECT COUNT(DISTINCT(PR.ProductId))
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId);
+ RAISE INFO 'number of rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+ RAISE INFO 'number of existing rows in TmpFoo - %', (SELECT count(*) FROM TmpFoo);
+END ;
+$$ LANGUAGE plpgsql;
+SELECT TestJoinTempTable_CT();
+ testjointemptable_ct
+----------------------
+
+(1 row)
+
+SELECT TestJoinTempTable_INSERT();
+INFO: number of existing rows in RealTable - 1000
+INFO: number of existing rows in TmpBar - 1000
+INFO: number of existing rows in TmpFoo - 0
+INFO: number of rows produced by query - 1000
+INFO: number of rows in TmpFoo - 1000
+INFO: number of existing rows in TmpFoo - 1000
+INFO: number of existing rows in TmpFoo - 1000
+ testjointemptable_insert
+--------------------------
+
+(1 row)
+
+DROP TABLE RealTable;
+DROP TABLE TmpBar;
+DROP TABLE TmpFoo;
+CREATE OR REPLACE FUNCTION TestJoinTempTable()
+RETURNS void AS $$
+BEGIN
+ CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int);
+ TRUNCATE TABLE RealTable;
+
+ CREATE TEMPORARY TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION;
+ CREATE TEMPORARY TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION;
+
+ INSERT INTO RealTable(ProductId, ScenarioId)
+ SELECT generate_series(1,1000) as ProductId, (random() * 100)::int as ScenarioId;
+
+ INSERT INTO TmpBar(NodeId)
+ SELECT generate_series(1,1000);
+
+ INSERT INTO TmpFoo(TempId)
+ SELECT DISTINCT(PR.ProductId)
+ FROM RealTable AS PR
+ JOIN TmpBar tmp1 ON PR.ProductId = tmp1.NodeId;
+END ;
+$$ LANGUAGE plpgsql;
+SELECT TestJoinTempTable();
+ testjointemptable
+-------------------
+
+(1 row)
+
+-- Multiple invokations of the function showed interesting issues with command
+-- passdown. So add that to the test case
+SELECT TestJoinTempTable();
+NOTICE: relation "realtable" already exists, skipping
+CONTEXT: SQL statement "CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int)"
+PL/pgSQL function testjointemptable() line 3 at SQL statement
+NOTICE: relation "tmpbar" already exists, skipping
+CONTEXT: SQL statement "CREATE TEMPORARY TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION"
+PL/pgSQL function testjointemptable() line 6 at SQL statement
+NOTICE: relation "tmpfoo" already exists, skipping
+CONTEXT: SQL statement "CREATE TEMPORARY TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION"
+PL/pgSQL function testjointemptable() line 8 at SQL statement
+ testjointemptable
+-------------------
+
+(1 row)
+
+SELECT TestJoinTempTable();
+NOTICE: relation "realtable" already exists, skipping
+CONTEXT: SQL statement "CREATE TABLE IF NOT EXISTS RealTable(ProductId int, ScenarioId int)"
+PL/pgSQL function testjointemptable() line 3 at SQL statement
+NOTICE: relation "tmpbar" already exists, skipping
+CONTEXT: SQL statement "CREATE TEMPORARY TABLE IF NOT EXISTS TmpBar(NodeId int)
+ DISTRIBUTE BY REPLICATION"
+PL/pgSQL function testjointemptable() line 6 at SQL statement
+NOTICE: relation "tmpfoo" already exists, skipping
+CONTEXT: SQL statement "CREATE TEMPORARY TABLE IF NOT EXISTS TmpFoo(TempId int)
+ DISTRIBUTE BY REPLICATION"
+PL/pgSQL function testjointemptable() line 8 at SQL statement
+ testjointemptable
+-------------------
+
+(1 row)
+
+DROP TABLE RealTable;
+DROP TABLE TmpBar;
+DROP TABLE TmpFoo;
diff --git a/src/test/regress/expected/privileges_2.out b/src/test/regress/expected/privileges_2.out
index cd92aca653..a3747fbb7a 100644
--- a/src/test/regress/expected/privileges_2.out
+++ b/src/test/regress/expected/privileges_2.out
@@ -555,7 +555,7 @@ ERROR: permission denied for type testdomain1
CREATE TABLE test6a OF testtype1;
ERROR: permission denied for type testtype1
CREATE TABLE test10a (a int[], b testtype1[]);
-ERROR: permission denied for type testtype1
+ERROR: permission denied for type testtype1[]
CREATE TABLE test9a (a int, b int);
ALTER TABLE test9a ADD COLUMN c testdomain1;
ERROR: permission denied for type testdomain1
@@ -1290,56 +1290,6 @@ SELECT has_function_privilege('regressuser1', 'testns.testfunc(int)', 'EXECUTE')
SET client_min_messages TO 'warning';
DROP SCHEMA testns CASCADE;
RESET client_min_messages;
--- test that dependent privileges are revoked (or not) properly
-\c -
-set session role regressuser1;
-create table dep_priv_test (a int);
-grant select on dep_priv_test to regressuser2 with grant option;
-grant select on dep_priv_test to regressuser3 with grant option;
-set session role regressuser2;
-grant select on dep_priv_test to regressuser4 with grant option;
-set session role regressuser3;
-grant select on dep_priv_test to regressuser4 with grant option;
-set session role regressuser4;
-grant select on dep_priv_test to regressuser5;
-\dp dep_priv_test
- Access privileges
- Schema | Name | Type | Access privileges | Column access privileges
---------+---------------+-------+-----------------------------------+--------------------------
- public | dep_priv_test | table | regressuser1=arwdDxt/regressuser1+|
- | | | regressuser2=r*/regressuser1 +|
- | | | regressuser3=r*/regressuser1 +|
- | | | regressuser4=r*/regressuser2 +|
- | | | regressuser4=r*/regressuser3 +|
- | | | regressuser5=r/regressuser4 |
-(1 row)
-
-set session role regressuser2;
-revoke select on dep_priv_test from regressuser4 cascade;
-\dp dep_priv_test
- Access privileges
- Schema | Name | Type | Access privileges | Column access privileges
---------+---------------+-------+-----------------------------------+--------------------------
- public | dep_priv_test | table | regressuser1=arwdDxt/regressuser1+|
- | | | regressuser2=r*/regressuser1 +|
- | | | regressuser3=r*/regressuser1 +|
- | | | regressuser4=r*/regressuser3 +|
- | | | regressuser5=r/regressuser4 |
-(1 row)
-
-set session role regressuser3;
-revoke select on dep_priv_test from regressuser4 cascade;
-\dp dep_priv_test
- Access privileges
- Schema | Name | Type | Access privileges | Column access privileges
---------+---------------+-------+-----------------------------------+--------------------------
- public | dep_priv_test | table | regressuser1=arwdDxt/regressuser1+|
- | | | regressuser2=r*/regressuser1 +|
- | | | regressuser3=r*/regressuser1 |
-(1 row)
-
-set session role regressuser1;
-drop table dep_priv_test;
-- clean up
\c
drop sequence x_seq;
diff --git a/src/test/regress/expected/rules_2.out b/src/test/regress/expected/rules_2.out
index de754adf93..c7283de96d 100644
--- a/src/test/regress/expected/rules_2.out
+++ b/src/test/regress/expected/rules_2.out
@@ -6,7 +6,7 @@
--
-- Tables and rules for the view test
--
-create table rtest_t1 (a int4, b int4) distribute by roundrobin;
+create table rtest_t1 (a int4, b int4) distribute by replication;
create table rtest_t2 (a int4, b int4);
create table rtest_t3 (a int4, b int4);
create view rtest_v1 as select * from rtest_t1;
@@ -151,26 +151,19 @@ select * from rtest_v1 order by a, b;
-- delete with constant expression
delete from rtest_v1 where a = 1;
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
select * from rtest_v1 order by a, b;
a | b
---+----
- 1 | 11
2 | 12
-(2 rows)
+(1 row)
insert into rtest_v1 values (1, 11);
delete from rtest_v1 where b = 12;
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
select * from rtest_v1 order by a, b;
a | b
---+----
1 | 11
- 1 | 11
- 2 | 12
-(3 rows)
+(1 row)
insert into rtest_v1 values (2, 12);
insert into rtest_v1 values (2, 13);
@@ -178,11 +171,9 @@ select * from rtest_v1 order by a, b;
a | b
---+----
1 | 11
- 1 | 11
- 2 | 12
2 | 12
2 | 13
-(5 rows)
+(3 rows)
** Remember the delete rule on rtest_v1: It says
** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a
@@ -193,17 +184,11 @@ select * from rtest_v1 order by a, b;
** So this time both rows with a = 2 must get deleted
\r
delete from rtest_v1 where b = 12;
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
select * from rtest_v1 order by a, b;
a | b
---+----
1 | 11
- 1 | 11
- 2 | 12
- 2 | 12
- 2 | 13
-(5 rows)
+(1 row)
delete from rtest_v1;
-- insert select
@@ -255,65 +240,41 @@ select * from rtest_v1 where b isnull order by a, b;
-- let attribute a differ (must be done on rtest_t1 - see above)
update rtest_t1 set a = a + 10 where b isnull;
delete from rtest_v1 where b isnull;
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
select * from rtest_v1 order by a, b;
- a | b
-----+----
- 1 | 21
- 2 | 22
- 3 | 23
- 11 |
- 12 |
- 13 |
- 14 |
- 15 |
-(8 rows)
+ a | b
+---+----
+ 1 | 21
+ 2 | 22
+ 3 | 23
+(3 rows)
-- now updates with constant expression
update rtest_v1 set b = 42 where a = 2;
-ERROR: Partition column can't be updated in current version
select * from rtest_v1 order by a, b;
- a | b
-----+----
- 1 | 21
- 2 | 22
- 3 | 23
- 11 |
- 12 |
- 13 |
- 14 |
- 15 |
-(8 rows)
+ a | b
+---+----
+ 1 | 21
+ 2 | 42
+ 3 | 23
+(3 rows)
update rtest_v1 set b = 99 where b = 42;
-ERROR: Partition column can't be updated in current version
select * from rtest_v1 order by a, b;
- a | b
-----+----
- 1 | 21
- 2 | 22
- 3 | 23
- 11 |
- 12 |
- 13 |
- 14 |
- 15 |
-(8 rows)
+ a | b
+---+----
+ 1 | 21
+ 2 | 99
+ 3 | 23
+(3 rows)
update rtest_v1 set b = 88 where b < 50;
select * from rtest_v1 order by a, b;
- a | b
-----+----
- 1 | 88
- 2 | 88
- 3 | 88
- 11 |
- 12 |
- 13 |
- 14 |
- 15 |
-(8 rows)
+ a | b
+---+----
+ 1 | 88
+ 2 | 99
+ 3 | 88
+(3 rows)
delete from rtest_v1;
insert into rtest_v1 select rtest_t2.a, rtest_t3.b
@@ -329,7 +290,8 @@ select * from rtest_v1 order by a, b;
-- updates in a mergejoin
update rtest_v1 set b = rtest_t2.b from rtest_t2 where rtest_v1.a = rtest_t2.a;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from rtest_v1 order by a, b;
a | b
---+----
@@ -367,7 +329,8 @@ select * from rtest_v1 order by a, b;
(8 rows)
update rtest_v1 set a = rtest_t3.a + 20 from rtest_t3 where rtest_v1.b = rtest_t3.b;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from rtest_v1 order by a, b;
a | b
----+----
@@ -397,7 +360,8 @@ insert into rtest_admin values ('jw', 'orion');
insert into rtest_admin values ('jw', 'notjw');
insert into rtest_admin values ('bm', 'neptun');
update rtest_system set sysname = 'pluto' where sysname = 'neptun';
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from rtest_interface order by sysname, ifname;
sysname | ifname
---------+--------
@@ -416,7 +380,8 @@ select * from rtest_admin order by pname, sysname;
(3 rows)
update rtest_person set pname = 'jwieck' where pdesc = 'Jan Wieck';
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
-- Note: use ORDER BY here to ensure consistent output across all systems.
-- The above UPDATE affects two rows with equal keys, so they could be
-- updated in either order depending on the whim of the local qsort().
@@ -429,8 +394,8 @@ select * from rtest_admin order by pname, sysname;
(3 rows)
delete from rtest_system where sysname = 'orion';
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
+ERROR: could not plan this distributed delete
+DETAIL: correlated or complex DELETE is currently not supported in Postgres-XL.
select * from rtest_interface order by sysname, ifname;
sysname | ifname
---------+--------
@@ -486,7 +451,8 @@ select ename, who = current_user as "matches user", action, newsal, oldsal from
update rtest_empmass set salary = salary + '1000.00';
update rtest_emp set salary = rtest_empmass.salary from rtest_empmass where rtest_emp.ename = rtest_empmass.ename;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal;
ename | matches user | action | newsal | oldsal
----------------------+--------------+------------+------------+------------
@@ -501,8 +467,8 @@ select ename, who = current_user as "matches user", action, newsal, oldsal from
(8 rows)
delete from rtest_emp using rtest_empmass where rtest_emp.ename = rtest_empmass.ename;
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
+ERROR: could not plan this distributed delete
+DETAIL: correlated or complex DELETE is currently not supported in Postgres-XL.
select ename, who = current_user as "matches user", action, newsal, oldsal from rtest_emplog order by ename, action, newsal;
ename | matches user | action | newsal | oldsal
----------------------+--------------+------------+------------+------------
@@ -1136,7 +1102,8 @@ SELECT * FROM shoelace ORDER BY sl_name;
(8 rows)
insert into shoelace_ok select * from shoelace_arrive;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
SELECT * FROM shoelace ORDER BY sl_name;
sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm
------------+----------+------------+--------+----------+-----------
@@ -1179,8 +1146,8 @@ SELECT * FROM shoelace_candelete;
DELETE FROM shoelace WHERE EXISTS
(SELECT * FROM shoelace_candelete
WHERE sl_name = shoelace.sl_name);
-ERROR: could not plan this distributed statement
-DETAIL: The plan suggests moving data of the target table between data nodes, possible data corruption.
+ERROR: could not plan this distributed delete
+DETAIL: correlated or complex DELETE is currently not supported in Postgres-XL.
SELECT * FROM shoelace ORDER BY sl_name;
sl_name | sl_avail | sl_color | sl_len | sl_unit | sl_len_cm
------------+----------+------------+--------+----------+-----------
@@ -1281,7 +1248,8 @@ select * from vview order by pid;
(2 rows)
update vview set descrip='test1' where pid=1;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from vview order by pid;
pid | txt | descrip
-----+---------+----------
@@ -1290,7 +1258,8 @@ select * from vview order by pid;
(2 rows)
update vview set descrip='test2' where pid=2;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from vview order by pid;
pid | txt | descrip
-----+---------+----------
@@ -1299,7 +1268,8 @@ select * from vview order by pid;
(2 rows)
update vview set descrip='test3' where pid=3;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from vview order by pid;
pid | txt | descrip
-----+---------+----------
@@ -1584,7 +1554,8 @@ create rule t1_upd_2 as on update to t1
set constraint_exclusion = on;
insert into t1 select * from generate_series(5,19,1) g;
update t1 set a = 4 where a = 5;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from only t1 order by 1;
a
---
diff --git a/src/test/regress/expected/subselect_1.out b/src/test/regress/expected/subselect_1.out
index 1d577e5e68..c4d6b96e61 100644
--- a/src/test/regress/expected/subselect_1.out
+++ b/src/test/regress/expected/subselect_1.out
@@ -376,7 +376,8 @@ update shipped_view set value = 11
from int4_tbl a join int4_tbl b
on (a.f1 = (select f1 from int4_tbl c where c.f1=b.f1))
where ordnum = a.f1;
-ERROR: Partition column can't be updated in current version
+ERROR: could not plan this distributed update
+DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
select * from shipped_view;
ttype | ordnum | partnum | value
-------+--------+---------+---------
diff --git a/src/test/regress/expected/window.out b/src/test/regress/expected/window.out
index 8d583c34bd..97e73ccd05 100644
--- a/src/test/regress/expected/window.out
+++ b/src/test/regress/expected/window.out
@@ -590,20 +590,6 @@ group by ten order by ten;
9 | 10040184 | 7
(10 rows)
--- window and aggregate with GROUP BY expression (9.2 bug)
-explain (costs off)
-select first_value(max(x)) over (), y
- from (select unique1 as x, ten+four as y from tenk1) ss
- group by y;
- QUERY PLAN
------------------------------------------------------------------
- WindowAgg
- -> HashAggregate
- -> Remote Subquery Scan on all (datanode_1,datanode_2)
- -> HashAggregate
- -> Seq Scan on tenk1
-(5 rows)
-
-- test non-default frame specifications
SELECT four, ten,
sum(ten) over (partition by four order by ten),
diff --git a/src/test/regress/expected/with.out b/src/test/regress/expected/with.out
index 832ba52d53..137479d05e 100644
--- a/src/test/regress/expected/with.out
+++ b/src/test/regress/expected/with.out
@@ -136,7 +136,7 @@ SELECT n, n IS OF (text) as is_text FROM t ORDER BY n;
-- +->E-+->G
CREATE TEMP TABLE department (
id INTEGER PRIMARY KEY, -- department ID
- parent_department INTEGER ,
+ parent_department INTEGER REFERENCES department, -- upper department ID
name TEXT -- department name
) DISTRIBUTE BY REPLICATION;
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "department_pkey" for table "department"
@@ -294,38 +294,38 @@ SELECT pg_get_viewdef('vsubdepartment'::regclass, true);
(1 row)
-- corner case in which sub-WITH gets initialized first
-select * from (with recursive q as (
- (select * from department order by id)
+with recursive q as (
+ select * from department
union all
(with x as (select * from q)
select * from x)
)
-select * from q limit 24) rel_alias order by 1, 2, 3;
+select * from q limit 24;
id | parent_department | name
----+-------------------+------
0 | | ROOT
- 0 | | ROOT
- 0 | | ROOT
- 1 | 0 | A
- 1 | 0 | A
1 | 0 | A
2 | 1 | B
- 2 | 1 | B
- 2 | 1 | B
- 3 | 2 | C
3 | 2 | C
- 3 | 2 | C
- 4 | 2 | D
- 4 | 2 | D
4 | 2 | D
5 | 0 | E
- 5 | 0 | E
- 5 | 0 | E
- 6 | 4 | F
- 6 | 4 | F
6 | 4 | F
7 | 5 | G
+ 0 | | ROOT
+ 1 | 0 | A
+ 2 | 1 | B
+ 3 | 2 | C
+ 4 | 2 | D
+ 5 | 0 | E
+ 6 | 4 | F
7 | 5 | G
+ 0 | | ROOT
+ 1 | 0 | A
+ 2 | 1 | B
+ 3 | 2 | C
+ 4 | 2 | D
+ 5 | 0 | E
+ 6 | 4 | F
7 | 5 | G
(24 rows)
@@ -1047,279 +1047,6 @@ SELECT * FROM t order by 1;
(55 rows)
--
--- test WITH attached to intermediate-level set operation
---
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM innermost
- UNION SELECT 3)
-)
-SELECT * FROM outermost;
- x
----
- 1
- 2
- 3
-(3 rows)
-
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost -- fail
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost;
-ERROR: relation "outermost" does not exist
-LINE 4: SELECT * FROM outermost
- ^
-DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query.
-HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references.
-WITH RECURSIVE outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost;
- x
----
- 1
- 2
-(2 rows)
-
-WITH RECURSIVE outermost(x) AS (
- WITH innermost as (SELECT 2 FROM outermost) -- fail
- SELECT * FROM innermost
- UNION SELECT * from outermost
-)
-SELECT * FROM outermost;
-ERROR: recursive reference to query "outermost" must not appear within a subquery
-LINE 2: WITH innermost as (SELECT 2 FROM outermost)
- ^
---
--- This test will fail with the old implementation of PARAM_EXEC parameter
--- assignment, because the "q1" Var passed down to A's targetlist subselect
--- looks exactly like the "A.id" Var passed down to C's subselect, causing
--- the old code to give them the same runtime PARAM_EXEC slot. But the
--- lifespans of the two parameters overlap, thanks to B also reading A.
---
-with
-A as ( select q2 as id, (select q1) as x from int8_tbl ),
-B as ( select id, row_number() over (partition by id) as r from A ),
-C as ( select A.id, array(select B.id from B where B.id = A.id) from A )
-select * from C;
- id | array
--------------------+-------------------------------------
- 456 | {456}
- 4567890123456789 | {4567890123456789,4567890123456789}
- 123 | {123}
- 4567890123456789 | {4567890123456789,4567890123456789}
- -4567890123456789 | {-4567890123456789}
-(5 rows)
-
---
--- Test CTEs read in non-initialization orders
---
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION ALL (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
---
--- test WITH attached to intermediate-level set operation
---
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM innermost
- UNION SELECT 3)
-)
-SELECT * FROM outermost;
- x
----
- 1
- 2
- 3
-(3 rows)
-
-WITH outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost -- fail
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost;
-ERROR: relation "outermost" does not exist
-LINE 4: SELECT * FROM outermost
- ^
-DETAIL: There is a WITH item named "outermost", but it cannot be referenced from this part of the query.
-HINT: Use WITH RECURSIVE, or re-order the WITH items to remove forward references.
-WITH RECURSIVE outermost(x) AS (
- SELECT 1
- UNION (WITH innermost as (SELECT 2)
- SELECT * FROM outermost
- UNION SELECT * FROM innermost)
-)
-SELECT * FROM outermost;
- x
----
- 1
- 2
-(2 rows)
-
-WITH RECURSIVE outermost(x) AS (
- WITH innermost as (SELECT 2 FROM outermost) -- fail
- SELECT * FROM innermost
- UNION SELECT * from outermost
-)
-SELECT * FROM outermost;
-ERROR: recursive reference to query "outermost" must not appear within a subquery
-LINE 2: WITH innermost as (SELECT 2 FROM outermost)
- ^
---
--- Test CTEs read in non-initialization orders
---
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION ALL (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
-WITH RECURSIVE
- tab(id_key,link) AS (VALUES (1,17), (2,17), (3,17), (4,17), (6,17), (5,17)),
- iter (id_key, row_type, link) AS (
- SELECT 0, 'base', 17
- UNION (
- WITH remaining(id_key, row_type, link, min) AS (
- SELECT tab.id_key, 'true'::text, iter.link, MIN(tab.id_key) OVER ()
- FROM tab INNER JOIN iter USING (link)
- WHERE tab.id_key > iter.id_key
- ),
- first_remaining AS (
- SELECT id_key, row_type, link
- FROM remaining
- WHERE id_key=min
- ),
- effect AS (
- SELECT tab.id_key, 'new'::text, tab.link
- FROM first_remaining e INNER JOIN tab ON e.id_key=tab.id_key
- WHERE e.row_type = 'false'
- )
- SELECT * FROM first_remaining
- UNION ALL SELECT * FROM effect
- )
- )
-SELECT * FROM iter;
- id_key | row_type | link
---------+----------+------
- 0 | base | 17
- 1 | true | 17
- 2 | true | 17
- 3 | true | 17
- 4 | true | 17
- 5 | true | 17
- 6 | true | 17
-(7 rows)
-
---
-- Data-modifying statements in WITH
--
-- INSERT ... RETURNING
diff --git a/src/test/regress/expected/xc_FQS.out b/src/test/regress/expected/xc_FQS.out
index 7e58c4fccd..6e923970b0 100644
--- a/src/test/regress/expected/xc_FQS.out
+++ b/src/test/regress/expected/xc_FQS.out
@@ -13,11 +13,11 @@ insert into tab1_rr values (5, 3);
insert into tab1_rr values (7, 8);
insert into tab1_rr values (9, 2);
explain (verbose on, nodes off, num_nodes on, costs off) insert into tab1_rr values (9, 2);
- QUERY PLAN
-------------------------------------------------
+ QUERY PLAN
+-----------------------------------------
Remote Subquery Scan on all
-> Insert on public.tab1_rr
- -> Remote Subquery Scan on local node
+ -> Remote Subquery Scan on all
Output: 9, 2
Distribute results by N
-> Result
@@ -64,9 +64,29 @@ explain (verbose on, nodes off, costs off) select sum(val), avg(val), count(*) f
-- should not get FQSed because of window functions
select first_value(val) over (partition by val2 order by val) from tab1_rr;
-ERROR: Window functions are not supported yet
+ first_value
+-------------
+ 1
+ 1
+ 5
+ 2
+ 7
+(5 rows)
+
explain (verbose on, nodes off, costs off) select first_value(val) over (partition by val2 order by val) from tab1_rr;
-ERROR: Window functions are not supported yet
+ QUERY PLAN
+---------------------------------------------------
+ WindowAgg
+ Output: first_value(val) OVER (?), val, val2
+ -> Remote Subquery Scan on all
+ Output: val, val2
+ -> Sort
+ Output: val, val2
+ Sort Key: tab1_rr.val2, tab1_rr.val
+ -> Seq Scan on public.tab1_rr
+ Output: val, val2
+(9 rows)
+
-- should not get FQSed because of LIMIT clause
select * from tab1_rr where val2 = 3 limit 1;
val | val2
@@ -95,18 +115,16 @@ select * from tab1_rr where val2 = 4 offset 1;
(0 rows)
explain (verbose on, nodes off, costs off) select * from tab1_rr where val2 = 4 offset 1;
- QUERY PLAN
-------------------------------------------------
+ QUERY PLAN
+------------------------------------------
Limit
Output: val, val2
-> Remote Subquery Scan on all
Output: val, val2
- -> Limit
+ -> Seq Scan on public.tab1_rr
Output: val, val2
- -> Seq Scan on public.tab1_rr
- Output: val, val2
- Filter: (tab1_rr.val2 = 4)
-(9 rows)
+ Filter: (tab1_rr.val2 = 4)
+(7 rows)
-- should not get FQSed because of SORT clause
select * from tab1_rr order by val;
@@ -366,9 +384,9 @@ select * from tab1_rr where val = 7;
delete from tab1_rr where val = 7;
explain verbose delete from tab1_rr where val = 7;
- QUERY PLAN
------------------------------------------------------------------------------
- Remote Subquery Scan on all (d01) (cost=0.00..36.75 rows=11 width=14)
+ QUERY PLAN
+------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2) (cost=0.00..36.75 rows=11 width=14)
-> Delete on public.tab1_rr (cost=0.00..36.75 rows=11 width=14)
-> Seq Scan on public.tab1_rr (cost=0.00..36.75 rows=11 width=14)
Output: val, ctid, xc_node_id
diff --git a/src/test/regress/expected/xc_alter_table.out b/src/test/regress/expected/xc_alter_table.out
index 0b2a34f69f..3b238171f3 100644
--- a/src/test/regress/expected/xc_alter_table.out
+++ b/src/test/regress/expected/xc_alter_table.out
@@ -5,14 +5,16 @@
CREATE TABLE xc_alter_table_1 (id serial, name varchar(80), code varchar(80)) DISTRIBUTE BY HASH(id);
NOTICE: CREATE TABLE will create implicit sequence "xc_alter_table_1_id_seq" for serial column "xc_alter_table_1.id"
EXPLAIN (VERBOSE true, COSTS false, NODES false) INSERT INTO xc_alter_table_1(name) VALUES ('aaa'),('bbb'),('ccc');
- QUERY PLAN
------------------------------------------------------------------------------------------------------------
- Insert on public.xc_alter_table_1
- Node expr: xc_alter_table_1.id
- Remote query: INSERT INTO public.xc_alter_table_1 (id, name, code) VALUES ($1, $2, $3)
- -> Values Scan on "*VALUES*"
- Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::character varying
-(5 rows)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Insert on public.xc_alter_table_1
+ -> Remote Subquery Scan on all
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::character varying
+ Distribute results by H: nextval('xc_alter_table_1_id_seq'::regclass)
+ -> Values Scan on "*VALUES*"
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::character varying
+(7 rows)
INSERT INTO xc_alter_table_1(name) VALUES ('aaa'),('bbb'),('ccc');
SELECT id, name, code FROM xc_alter_table_1 ORDER BY 1;
@@ -31,14 +33,16 @@ ALTER TABLE xc_alter_table_1 DROP COLUMN code;
-- Check for query generation of remote INSERT
INSERT INTO xc_alter_table_1(name) VALUES('ddd'),('eee'),('fff');
EXPLAIN (VERBOSE true, COSTS false, NODES false) INSERT INTO xc_alter_table_1(name) VALUES('ddd'),('eee'),('fff');
- QUERY PLAN
--------------------------------------------------------------------------------------------------
- Insert on public.xc_alter_table_1
- Node expr: xc_alter_table_1.id
- Remote query: INSERT INTO public.xc_alter_table_1 (id, name) VALUES ($1, $2)
- -> Values Scan on "*VALUES*"
- Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::integer
-(5 rows)
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Insert on public.xc_alter_table_1
+ -> Remote Subquery Scan on all
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::integer
+ Distribute results by H: nextval('xc_alter_table_1_id_seq'::regclass)
+ -> Values Scan on "*VALUES*"
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), "*VALUES*".column1, NULL::integer
+(7 rows)
SELECT id, name FROM xc_alter_table_1 ORDER BY 1;
id | name
@@ -54,16 +58,18 @@ SELECT id, name FROM xc_alter_table_1 ORDER BY 1;
-- Check for query generation of remote INSERT SELECT
INSERT INTO xc_alter_table_1(name) SELECT 'ggg';
EXPLAIN (VERBOSE true, COSTS false, NODES false) INSERT INTO xc_alter_table_1(name) SELECT 'ggg';
- QUERY PLAN
------------------------------------------------------------------------------------------------------------
- Insert on public.xc_alter_table_1
- Node expr: xc_alter_table_1.id
- Remote query: INSERT INTO public.xc_alter_table_1 (id, name) VALUES ($1, $2)
- -> Subquery Scan on "*SELECT*"
- Output: nextval('xc_alter_table_1_id_seq'::regclass), 'ggg'::character varying(80), NULL::integer
- -> Result
- Output: 'ggg'
-(7 rows)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Insert on public.xc_alter_table_1
+ -> Remote Subquery Scan on all
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), 'ggg'::character varying(80), NULL::integer
+ Distribute results by H: nextval('xc_alter_table_1_id_seq'::regclass)
+ -> Subquery Scan on "*SELECT*"
+ Output: nextval('xc_alter_table_1_id_seq'::regclass), 'ggg'::character varying(80), NULL::integer
+ -> Result
+ Output: 'ggg'
+(9 rows)
SELECT id, name FROM xc_alter_table_1 ORDER BY 1;
id | name
@@ -79,16 +85,14 @@ SELECT id, name FROM xc_alter_table_1 ORDER BY 1;
-- Check for query generation of remote UPDATE
EXPLAIN (VERBOSE true, COSTS false, NODES false) UPDATE xc_alter_table_1 SET name = 'zzz' WHERE id = currval('xc_alter_table_1_id_seq');
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xc_alter_table_1
- Node expr: xc_alter_table_1.id
- Remote query: UPDATE ONLY public.xc_alter_table_1 SET name = $2 WHERE ctid = $5 AND xc_node_id = $6
- -> Data Node Scan on xc_alter_table_1 "_REMOTE_TABLE_QUERY_"
- Output: xc_alter_table_1.id, 'zzz'::character varying(80), NULL::integer, xc_alter_table_1.id, xc_alter_table_1.ctid, xc_alter_table_1.xc_node_id
- Remote query: SELECT id, ctid, xc_node_id FROM ONLY xc_alter_table_1 WHERE true
- Coordinator quals: (xc_alter_table_1.id = currval('xc_alter_table_1_id_seq'::regclass))
-(7 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Update on public.xc_alter_table_1
+ -> Seq Scan on public.xc_alter_table_1
+ Output: id, 'zzz'::character varying(80), NULL::integer, id, ctid, xc_node_id
+ Filter: (xc_alter_table_1.id = currval('xc_alter_table_1_id_seq'::regclass))
+(5 rows)
UPDATE xc_alter_table_1 SET name = 'zzz' WHERE id = currval('xc_alter_table_1_id_seq');
SELECT id, name FROM xc_alter_table_1 ORDER BY 1;
@@ -125,13 +129,16 @@ ALTER TABLE xc_alter_table_2 DROP COLUMN d;
ALTER TABLE xc_alter_table_2 DROP COLUMN e;
-- Check for query generation of remote INSERT
EXPLAIN (VERBOSE true, COSTS false, NODES false) INSERT INTO xc_alter_table_2 VALUES ('Kodek', false);
- QUERY PLAN
-----------------------------------------------------------------------------------------------------
- Insert on public.xc_alter_table_2
- Remote query: INSERT INTO public.xc_alter_table_2 (b, c) VALUES ($2, $3)
- -> Result
- Output: NULL::integer, 'Kodek'::character varying(20), false, NULL::integer, NULL::integer
-(4 rows)
+ QUERY PLAN
+----------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on any
+ -> Insert on public.xc_alter_table_2
+ -> Remote Subquery Scan on all
+ Output: NULL::integer, 'Kodek'::character varying(20), false, NULL::integer, NULL::integer
+ Distribute results by R
+ -> Result
+ Output: NULL::integer, 'Kodek'::character varying(20), false, NULL::integer, NULL::integer
+(7 rows)
INSERT INTO xc_alter_table_2 VALUES ('Kodek', false);
SELECT b, c FROM xc_alter_table_2 ORDER BY b;
@@ -145,13 +152,13 @@ SELECT b, c FROM xc_alter_table_2 ORDER BY b;
-- Check for query generation of remote UPDATE
EXPLAIN (VERBOSE true, COSTS false, NODES false) UPDATE xc_alter_table_2 SET b = 'Morphee', c = false WHERE b = 'Neo';
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xc_alter_table_2
- Remote query: UPDATE ONLY public.xc_alter_table_2 SET b = $2, c = $3 WHERE ctid = $7
- -> Data Node Scan on xc_alter_table_2 "_REMOTE_TABLE_QUERY_"
- Output: NULL::integer, 'Morphee'::character varying(20), false, NULL::integer, NULL::integer, xc_alter_table_2.b, xc_alter_table_2.ctid
- Remote query: SELECT b, ctid FROM ONLY xc_alter_table_2 WHERE ((b)::text = 'Neo'::text)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on any
+ -> Update on public.xc_alter_table_2
+ -> Seq Scan on public.xc_alter_table_2
+ Output: NULL::integer, 'Morphee'::character varying(20), false, NULL::integer, NULL::integer, b, ctid
+ Filter: ((xc_alter_table_2.b)::text = 'Neo'::text)
(5 rows)
UPDATE xc_alter_table_2 SET b = 'Morphee', c = false WHERE b = 'Neo';
@@ -169,13 +176,16 @@ ALTER TABLE xc_alter_table_2 ADD COLUMN a int;
ALTER TABLE xc_alter_table_2 ADD COLUMN a2 varchar(20);
-- Check for query generation of remote INSERT
EXPLAIN (VERBOSE true, COSTS false, NODES false) INSERT INTO xc_alter_table_2 (a, a2, b, c) VALUES (100, 'CEO', 'Gordon', true);
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
- Insert on public.xc_alter_table_2
- Remote query: INSERT INTO public.xc_alter_table_2 (b, c, a, a2) VALUES ($2, $3, $6, $7)
- -> Result
- Output: NULL::integer, 'Gordon'::character varying(20), true, NULL::integer, NULL::integer, 100, 'CEO'::character varying(20)
-(4 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on any
+ -> Insert on public.xc_alter_table_2
+ -> Remote Subquery Scan on all
+ Output: NULL::integer, 'Gordon'::character varying(20), true, NULL::integer, NULL::integer, 100, 'CEO'::character varying(20)
+ Distribute results by R
+ -> Result
+ Output: NULL::integer, 'Gordon'::character varying(20), true, NULL::integer, NULL::integer, 100, 'CEO'::character varying(20)
+(7 rows)
INSERT INTO xc_alter_table_2 (a, a2, b, c) VALUES (100, 'CEO', 'Gordon', true);
SELECT a, a2, b, c FROM xc_alter_table_2 ORDER BY b;
@@ -190,13 +200,13 @@ SELECT a, a2, b, c FROM xc_alter_table_2 ORDER BY b;
-- Check for query generation of remote UPDATE
EXPLAIN (VERBOSE true, COSTS false, NODES false) UPDATE xc_alter_table_2 SET a = 200, a2 = 'CTO' WHERE b = 'John';
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xc_alter_table_2
- Remote query: UPDATE ONLY public.xc_alter_table_2 SET a = $6, a2 = $7 WHERE ctid = $9
- -> Data Node Scan on xc_alter_table_2 "_REMOTE_TABLE_QUERY_"
- Output: NULL::integer, xc_alter_table_2.b, xc_alter_table_2.c, NULL::integer, NULL::integer, 200, 'CTO'::character varying(20), xc_alter_table_2.b, xc_alter_table_2.ctid
- Remote query: SELECT b, c, ctid FROM ONLY xc_alter_table_2 WHERE ((b)::text = 'John'::text)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on any
+ -> Update on public.xc_alter_table_2
+ -> Seq Scan on public.xc_alter_table_2
+ Output: NULL::integer, b, c, NULL::integer, NULL::integer, 200, 'CTO'::character varying(20), b, ctid
+ Filter: ((xc_alter_table_2.b)::text = 'John'::text)
(5 rows)
UPDATE xc_alter_table_2 SET a = 200, a2 = 'CTO' WHERE b = 'John';
@@ -396,19 +406,18 @@ SELECT b FROM xc_alter_table_3 WHERE a = 11;
(1 row)
EXECUTE xc_alter_table_update(11, 'bb');
-ERROR: could not plan this distributed update
-DETAIL: correlated UPDATE or updating distribution column currently not supported in Postgres-XL.
SELECT b FROM xc_alter_table_3 WHERE a = 11;
- b
----
- b
+ b
+----
+ bb
(1 row)
EXECUTE xc_alter_table_delete(11);
SELECT b FROM xc_alter_table_3 WHERE a = 11 or a = 12;
- b
----
-(0 rows)
+ b
+-----
+ nnn
+(1 row)
EXECUTE xc_alter_table_delete(12);
-- Index and redistribution
diff --git a/src/test/regress/expected/xc_remote.out b/src/test/regress/expected/xc_remote.out
index 82b0e0ed9b..301a5712ef 100644
--- a/src/test/regress/expected/xc_remote.out
+++ b/src/test/regress/expected/xc_remote.out
@@ -378,23 +378,24 @@ $$begin return 3;end $$ language plpgsql;
\set stmt 'update xcrem_employee E set salary = salary + salary + 0.3 * bonus WHERE SALARY > ( SELECT AVG(SALARY) FROM xcrem_employee WHERE SUBSTRING(E.WORKDEPT,1,1) = SUBSTRING(WORKDEPT, 1,1) )'
:stmt;
:EXP :stmt;
- QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xcrem_employee e
- Node expr: e.empno
- Remote query: UPDATE ONLY public.xcrem_employee SET salary = $12 WHERE ctid = $17 AND xc_node_id = $18
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.empno, e.firstname, e.midinit, e.lastname, e.workdept, e.phoneno, e.hiredate, e.job, e.edlevel, e.sex, e.birthdate, ((e.salary + e.salary) + (0.3 * e.bonus)), e.bonus, e.comm, e.salary, e.workdept, e.ctid, e.xc_node_id
- Remote query: SELECT empno, firstname, midinit, lastname, workdept, phoneno, hiredate, job, edlevel, sex, birthdate, salary, bonus, comm, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: (e.salary > (SubPlan 1))
- SubPlan 1
- -> Aggregate
- Output: avg(xcrem_employee.salary)
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: xcrem_employee.salary, xcrem_employee.workdept
- Remote query: SELECT salary, workdept FROM ONLY xcrem_employee WHERE true
- Coordinator quals: ("substring"((e.workdept)::text, 1, 1) = "substring"((xcrem_employee.workdept)::text, 1, 1))
-(14 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Update on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: e.empno, e.firstname, e.midinit, e.lastname, e.workdept, e.phoneno, e.hiredate, e.job, e.edlevel, e.sex, e.birthdate, ((e.salary + e.salary) + (0.3 * e.bonus)), e.bonus, e.comm, e.salary, e.workdept, e.ctid, e.xc_node_id
+ Filter: (e.salary > (SubPlan 1))
+ SubPlan 1
+ -> Aggregate
+ Output: pg_catalog.avg((avg(xcrem_employee.salary)))
+ -> Remote Subquery Scan on all
+ Output: avg(xcrem_employee.salary)
+ -> Aggregate
+ Output: avg(xcrem_employee.salary)
+ -> Seq Scan on public.xcrem_employee
+ Output: xcrem_employee.empno, xcrem_employee.firstname, xcrem_employee.midinit, xcrem_employee.lastname, xcrem_employee.workdept, xcrem_employee.phoneno, xcrem_employee.hiredate, xcrem_employee.job, xcrem_employee.edlevel, xcrem_employee.sex, xcrem_employee.birthdate, xcrem_employee.salary, xcrem_employee.bonus, xcrem_employee.comm
+ Filter: ("substring"((e.workdept)::text, 1, 1) = "substring"((xcrem_employee.workdept)::text, 1, 1))
+(15 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -420,23 +421,24 @@ $$begin return 3;end $$ language plpgsql;
\set stmt 'update xcrem_employee E set bonus = bonus + salary* 0.3 WHERE EDLEVEL > ( SELECT AVG(EDLEVEL) FROM xcrem_employee WHERE WORKDEPT = E.WORKDEPT )'
:stmt;
:EXP :stmt;
- QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xcrem_employee e
- Node expr: e.empno
- Remote query: UPDATE ONLY public.xcrem_employee SET bonus = $13 WHERE ctid = $17 AND xc_node_id = $18
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.empno, e.firstname, e.midinit, e.lastname, e.workdept, e.phoneno, e.hiredate, e.job, e.edlevel, e.sex, e.birthdate, e.salary, (e.bonus + (e.salary * 0.3)), e.comm, e.edlevel, e.workdept, e.ctid, e.xc_node_id
- Remote query: SELECT empno, firstname, midinit, lastname, workdept, phoneno, hiredate, job, edlevel, sex, birthdate, salary, bonus, comm, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: ((e.edlevel)::numeric > (SubPlan 1))
- SubPlan 1
- -> Aggregate
- Output: avg(xcrem_employee.edlevel)
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: xcrem_employee.edlevel, xcrem_employee.workdept
- Remote query: SELECT edlevel, workdept FROM ONLY xcrem_employee WHERE true
- Coordinator quals: (xcrem_employee.workdept = e.workdept)
-(14 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Update on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: e.empno, e.firstname, e.midinit, e.lastname, e.workdept, e.phoneno, e.hiredate, e.job, e.edlevel, e.sex, e.birthdate, e.salary, (e.bonus + (e.salary * 0.3)), e.comm, e.edlevel, e.workdept, e.ctid, e.xc_node_id
+ Filter: ((e.edlevel)::numeric > (SubPlan 1))
+ SubPlan 1
+ -> Aggregate
+ Output: pg_catalog.avg((avg(xcrem_employee.edlevel)))
+ -> Remote Subquery Scan on all
+ Output: avg(xcrem_employee.edlevel)
+ -> Aggregate
+ Output: avg(xcrem_employee.edlevel)
+ -> Seq Scan on public.xcrem_employee
+ Output: xcrem_employee.empno, xcrem_employee.firstname, xcrem_employee.midinit, xcrem_employee.lastname, xcrem_employee.workdept, xcrem_employee.phoneno, xcrem_employee.hiredate, xcrem_employee.job, xcrem_employee.edlevel, xcrem_employee.sex, xcrem_employee.birthdate, xcrem_employee.salary, xcrem_employee.bonus, xcrem_employee.comm
+ Filter: (xcrem_employee.workdept = e.workdept)
+(15 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -462,16 +464,14 @@ $$begin return 3;end $$ language plpgsql;
\set stmt 'update xcrem_employee E set lastname = lastname || ''suf'' WHERE EDLEVEL > volatile_func(2)'
:stmt;
:EXP :stmt;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xcrem_employee e
- Node expr: e.empno
- Remote query: UPDATE ONLY public.xcrem_employee SET lastname = $4 WHERE ctid = $16 AND xc_node_id = $17
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.empno, e.firstname, e.midinit, ((e.lastname)::text || 'suf'::text), e.workdept, e.phoneno, e.hiredate, e.job, e.edlevel, e.sex, e.birthdate, e.salary, e.bonus, e.comm, e.edlevel, e.ctid, e.xc_node_id
- Remote query: SELECT empno, firstname, midinit, lastname, workdept, phoneno, hiredate, job, edlevel, sex, birthdate, salary, bonus, comm, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: (e.edlevel > volatile_func(2))
-(7 rows)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Update on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: empno, firstname, midinit, ((lastname)::text || 'suf'::text), workdept, phoneno, hiredate, job, edlevel, sex, birthdate, salary, bonus, comm, edlevel, ctid, xc_node_id
+ Filter: (e.edlevel > volatile_func(2))
+(5 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -497,16 +497,14 @@ $$begin return 3;end $$ language plpgsql;
\set stmt 'update xcrem_employee E set lastname = lastname || ''suf'', edlevel = edlevel+1 WHERE EDLEVEL > volatile_func(2)'
:stmt;
:EXP :stmt;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- Update on public.xcrem_employee e
- Node expr: e.empno
- Remote query: UPDATE ONLY public.xcrem_employee SET lastname = $4, edlevel = $9 WHERE ctid = $16 AND xc_node_id = $17
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.empno, e.firstname, e.midinit, ((e.lastname)::text || 'suf'::text), e.workdept, e.phoneno, e.hiredate, e.job, (e.edlevel + 1), e.sex, e.birthdate, e.salary, e.bonus, e.comm, e.edlevel, e.ctid, e.xc_node_id
- Remote query: SELECT empno, firstname, midinit, lastname, workdept, phoneno, hiredate, job, edlevel, sex, birthdate, salary, bonus, comm, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: (e.edlevel > volatile_func(2))
-(7 rows)
+ QUERY PLAN
+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Update on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: empno, firstname, midinit, ((lastname)::text || 'suf'::text), workdept, phoneno, hiredate, job, (edlevel + 1), sex, birthdate, salary, bonus, comm, edlevel, ctid, xc_node_id
+ Filter: (e.edlevel > volatile_func(2))
+(5 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -536,15 +534,14 @@ insert into xcrem_employee select * from xcrem_temptable;
\set stmt 'DELETE FROM xcrem_employee E WHERE EDLEVEL > volatile_func(2)'
:stmt;
:EXP :stmt;
- QUERY PLAN
-----------------------------------------------------------------------------------------------
- Delete on public.xcrem_employee e
- Remote query: DELETE FROM ONLY public.xcrem_employee WHERE ctid = $2 AND xc_node_id = $3
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.edlevel, e.ctid, e.xc_node_id
- Remote query: SELECT edlevel, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: (e.edlevel > volatile_func(2))
-(6 rows)
+ QUERY PLAN
+--------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Delete on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: edlevel, ctid, xc_node_id, empno
+ Filter: (e.edlevel > volatile_func(2))
+(5 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -556,22 +553,24 @@ insert into xcrem_employee select * from xcrem_temptable;
\set stmt 'DELETE FROM xcrem_employee E WHERE EDLEVEL > ( SELECT AVG(EDLEVEL) FROM xcrem_employee WHERE WORKDEPT = E.WORKDEPT )'
:stmt;
:EXP :stmt;
- QUERY PLAN
---------------------------------------------------------------------------------------------------------
- Delete on public.xcrem_employee e
- Remote query: DELETE FROM ONLY public.xcrem_employee WHERE ctid = $3 AND xc_node_id = $4
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.edlevel, e.workdept, e.ctid, e.xc_node_id
- Remote query: SELECT edlevel, workdept, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: ((e.edlevel)::numeric > (SubPlan 1))
- SubPlan 1
- -> Aggregate
- Output: avg(xcrem_employee.edlevel)
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: xcrem_employee.edlevel, xcrem_employee.workdept
- Remote query: SELECT edlevel, workdept FROM ONLY xcrem_employee WHERE true
- Coordinator quals: (xcrem_employee.workdept = e.workdept)
-(13 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Delete on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: e.edlevel, e.workdept, e.ctid, e.xc_node_id, e.empno
+ Filter: ((e.edlevel)::numeric > (SubPlan 1))
+ SubPlan 1
+ -> Aggregate
+ Output: pg_catalog.avg((avg(xcrem_employee.edlevel)))
+ -> Remote Subquery Scan on all
+ Output: avg(xcrem_employee.edlevel)
+ -> Aggregate
+ Output: avg(xcrem_employee.edlevel)
+ -> Seq Scan on public.xcrem_employee
+ Output: xcrem_employee.empno, xcrem_employee.firstname, xcrem_employee.midinit, xcrem_employee.lastname, xcrem_employee.workdept, xcrem_employee.phoneno, xcrem_employee.hiredate, xcrem_employee.job, xcrem_employee.edlevel, xcrem_employee.sex, xcrem_employee.birthdate, xcrem_employee.salary, xcrem_employee.bonus, xcrem_employee.comm
+ Filter: (xcrem_employee.workdept = e.workdept)
+(15 rows)
:SEL;
empno | edlevel | lastname | salary | bonus
@@ -594,22 +593,24 @@ insert into xcrem_employee select * from xcrem_temptable;
\set stmt 'DELETE FROM xcrem_employee E WHERE SALARY > ( SELECT AVG(SALARY) FROM xcrem_employee WHERE SUBSTRING(E.WORKDEPT,1,1) = SUBSTRING(WORKDEPT, 1,1) )'
:stmt;
:EXP :stmt;
- QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------
- Delete on public.xcrem_employee e
- Remote query: DELETE FROM ONLY public.xcrem_employee WHERE ctid = $3 AND xc_node_id = $4
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: e.salary, e.workdept, e.ctid, e.xc_node_id
- Remote query: SELECT salary, workdept, ctid, xc_node_id FROM ONLY xcrem_employee e WHERE true
- Coordinator quals: (e.salary > (SubPlan 1))
- SubPlan 1
- -> Aggregate
- Output: avg(xcrem_employee.salary)
- -> Data Node Scan on xcrem_employee "_REMOTE_TABLE_QUERY_"
- Output: xcrem_employee.salary, xcrem_employee.workdept
- Remote query: SELECT salary, workdept FROM ONLY xcrem_employee WHERE true
- Coordinator quals: ("substring"((e.workdept)::text, 1, 1) = "substring"((xcrem_employee.workdept)::text, 1, 1))
-(13 rows)
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Remote Subquery Scan on all
+ -> Delete on public.xcrem_employee e
+ -> Seq Scan on public.xcrem_employee e
+ Output: e.salary, e.workdept, e.ctid, e.xc_node_id, e.empno
+ Filter: (e.salary > (SubPlan 1))
+ SubPlan 1
+ -> Aggregate
+ Output: pg_catalog.avg((avg(xcrem_employee.salary)))
+ -> Remote Subquery Scan on all
+ Output: avg(xcrem_employee.salary)
+ -> Aggregate
+ Output: avg(xcrem_employee.salary)
+ -> Seq Scan on public.xcrem_employee
+ Output: xcrem_employee.empno, xcrem_employee.firstname, xcrem_employee.midinit, xcrem_employee.lastname, xcrem_employee.workdept, xcrem_employee.phoneno, xcrem_employee.hiredate, xcrem_employee.job, xcrem_employee.edlevel, xcrem_employee.sex, xcrem_employee.birthdate, xcrem_employee.salary, xcrem_employee.bonus, xcrem_employee.comm
+ Filter: ("substring"((e.workdept)::text, 1, 1) = "substring"((xcrem_employee.workdept)::text, 1, 1))
+(15 rows)
:SEL;
empno | edlevel | lastname | salary | bonus