summaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib')
-rw-r--r--contrib/Makefile6
-rwxr-xr-x[-rw-r--r--]contrib/citext/Makefile2
-rw-r--r--contrib/citext/expected/citext_1.out8
-rw-r--r--contrib/citext/expected/xl_citext.out13
-rw-r--r--contrib/citext/sql/citext.sql4
-rw-r--r--contrib/citext/sql/xl_citext.sql14
-rw-r--r--contrib/hstore/expected/hstore.out4
-rw-r--r--contrib/hstore/hstore_io.c6
-rw-r--r--contrib/hstore/sql/hstore.sql4
-rwxr-xr-x[-rw-r--r--]contrib/ltree/Makefile2
-rw-r--r--contrib/ltree/expected/ltree.out2
-rw-r--r--contrib/ltree/expected/xl_ltree.out10
-rw-r--r--contrib/ltree/sql/xl_ltree.sql10
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c32
-rw-r--r--contrib/pg_trgm/expected/pg_trgm.out124
-rw-r--r--contrib/pgxc_clean/.gitignore1
-rw-r--r--contrib/pgxc_clean/Makefile34
-rw-r--r--contrib/pgxc_clean/pgxc_clean.c1100
-rw-r--r--contrib/pgxc_clean/pgxc_clean.h13
-rw-r--r--contrib/pgxc_clean/pgxc_clean_test.sh85
-rw-r--r--contrib/pgxc_clean/txninfo.c481
-rw-r--r--contrib/pgxc_clean/txninfo.h95
-rw-r--r--contrib/pgxc_ctl/.gitignore3
-rw-r--r--contrib/pgxc_ctl/Makefile55
-rw-r--r--contrib/pgxc_ctl/bash_handler.c79
-rw-r--r--contrib/pgxc_ctl/bash_handler.h18
-rw-r--r--contrib/pgxc_ctl/config.c1223
-rw-r--r--contrib/pgxc_ctl/config.h45
-rw-r--r--contrib/pgxc_ctl/coord_cmd.c2397
-rw-r--r--contrib/pgxc_ctl/coord_cmd.h72
-rw-r--r--contrib/pgxc_ctl/coord_command.h1
-rw-r--r--contrib/pgxc_ctl/datanode_cmd.c2248
-rw-r--r--contrib/pgxc_ctl/datanode_cmd.h74
-rw-r--r--contrib/pgxc_ctl/do_command.c3050
-rw-r--r--contrib/pgxc_ctl/do_command.h19
-rw-r--r--contrib/pgxc_ctl/do_shell.c741
-rw-r--r--contrib/pgxc_ctl/do_shell.h114
-rw-r--r--contrib/pgxc_ctl/gtm_cmd.c1580
-rw-r--r--contrib/pgxc_ctl/gtm_cmd.h75
-rw-r--r--contrib/pgxc_ctl/gtm_util.c168
-rw-r--r--contrib/pgxc_ctl/gtm_util.h23
-rwxr-xr-xcontrib/pgxc_ctl/make_signature202
-rw-r--r--contrib/pgxc_ctl/mcxt.c77
-rw-r--r--contrib/pgxc_ctl/monitor.c476
-rw-r--r--contrib/pgxc_ctl/monitor.h18
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl.c625
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl.h58
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl_bash_2314
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_conf_part_empty267
-rwxr-xr-xcontrib/pgxc_ctl/pgxc_ctl_conf_part_full326
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_conf_part_minimal269
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_log.c333
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl_log.h63
-rwxr-xr-xcontrib/pgxc_ctl/t/005_pgxc_ctl_minimal.pl30
-rwxr-xr-xcontrib/pgxc_ctl/t/006_parallel_analyze.pl83
-rwxr-xr-xcontrib/pgxc_ctl/t/007_role_recreate.pl53
-rwxr-xr-xcontrib/pgxc_ctl/t/010_pgxc_ctl.pl83
-rwxr-xr-xcontrib/pgxc_ctl/t/020_prepared_txns.pl206
-rwxr-xr-xcontrib/pgxc_ctl/t/030_pgxc_ctl_file_sanity_check.pl375
-rwxr-xr-xcontrib/pgxc_ctl/t/analyze_verbose.sql1
-rwxr-xr-xcontrib/pgxc_ctl/t/prep_tx1.sql23
-rwxr-xr-xcontrib/pgxc_ctl/t/prep_tx2.sql23
-rwxr-xr-xcontrib/pgxc_ctl/t/prep_tx3.sql23
-rwxr-xr-xcontrib/pgxc_ctl/t/prep_tx4.sql23
-rwxr-xr-xcontrib/pgxc_ctl/t/role_recreate.sql1200
-rw-r--r--contrib/pgxc_ctl/utils.c462
-rw-r--r--contrib/pgxc_ctl/utils.h51
-rw-r--r--contrib/pgxc_ctl/variables.c473
-rw-r--r--contrib/pgxc_ctl/variables.h88
-rw-r--r--contrib/pgxc_ctl/varnames.h152
-rw-r--r--contrib/pgxc_ddl/README47
-rw-r--r--contrib/pgxc_ddl/pgxc.conf.sample20
-rw-r--r--contrib/pgxc_ddl/pgxc_ddl443
-rw-r--r--contrib/pgxc_monitor/.gitignore1
-rw-r--r--contrib/pgxc_monitor/Makefile35
-rw-r--r--contrib/pgxc_monitor/mcxt.c77
-rw-r--r--contrib/pgxc_monitor/pgxc_monitor.c279
-rw-r--r--contrib/sepgsql/hooks.c11
-rw-r--r--contrib/stormstats/Makefile15
-rw-r--r--contrib/stormstats/stormstats--1.0.sql17
-rw-r--r--contrib/stormstats/stormstats--unpackaged--1.0.sql5
-rw-r--r--contrib/stormstats/stormstats.c897
-rw-r--r--contrib/stormstats/stormstats.control5
-rw-r--r--contrib/stormstats/stormstats.h9
-rw-r--r--contrib/tsm_system_time/expected/tsm_system_time.out71
-rw-r--r--contrib/tsm_system_time/expected/xl_known_bugs.out33
-rw-r--r--contrib/tsm_system_time/sql/xl_known_bugs.sql18
87 files changed, 22281 insertions, 114 deletions
diff --git a/contrib/Makefile b/contrib/Makefile
index e84eb67008..d250341270 100644
--- a/contrib/Makefile
+++ b/contrib/Makefile
@@ -39,6 +39,9 @@ SUBDIRS = \
pgcrypto \
pgrowlocks \
pgstattuple \
+ pgxc_clean \
+ pgxc_ctl \
+ pgxc_monitor \
pg_visibility \
postgres_fdw \
seg \
@@ -49,7 +52,8 @@ SUBDIRS = \
tsm_system_rows \
tsm_system_time \
unaccent \
- vacuumlo
+ vacuumlo \
+ stormstats
ifeq ($(with_openssl),yes)
SUBDIRS += sslinfo
diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile
index 563cd22dcc..a6e026dafb 100644..100755
--- a/contrib/citext/Makefile
+++ b/contrib/citext/Makefile
@@ -8,7 +8,7 @@ DATA = citext--1.4.sql citext--1.3--1.4.sql \
citext--1.0--1.1.sql citext--unpackaged--1.0.sql
PGFILEDESC = "citext - case-insensitive character string data type"
-REGRESS = citext
+REGRESS = citext xl_citext
ifdef USE_PGXS
PG_CONFIG = pg_config
diff --git a/contrib/citext/expected/citext_1.out b/contrib/citext/expected/citext_1.out
index d1fb1e14e0..fe745a1f28 100644
--- a/contrib/citext/expected/citext_1.out
+++ b/contrib/citext/expected/citext_1.out
@@ -510,13 +510,13 @@ SELECT name FROM srt WHERE name !~ 'A$' ORDER BY name;
(3 rows)
-- SIMILAR TO should be case-insensitive.
-SELECT name FROM srt WHERE name SIMILAR TO '%a.*';
+SELECT name FROM srt WHERE name SIMILAR TO '%a.*' order by name;
name
------
ABA
(1 row)
-SELECT name FROM srt WHERE name SIMILAR TO '%A.*';
+SELECT name FROM srt WHERE name SIMILAR TO '%A.*' order by name;
name
------
ABA
@@ -2341,11 +2341,13 @@ SELECT *
(2 rows)
REFRESH MATERIALIZED VIEW CONCURRENTLY citext_matview;
+ERROR: schema "pg_temp_2" does not exist
+CONTEXT: SQL statement "ANALYZE pg_temp_2.pg_temp_16514_2"
SELECT * FROM citext_matview ORDER BY id;
id | name
----+-------
1 | one
- 2 | Two
+ 2 | two
3 | three
4 |
5 |
diff --git a/contrib/citext/expected/xl_citext.out b/contrib/citext/expected/xl_citext.out
new file mode 100644
index 0000000000..5da4257924
--- /dev/null
+++ b/contrib/citext/expected/xl_citext.out
@@ -0,0 +1,13 @@
+--
+-- Test citext datatype
+--
+--Column of types “citext”, “ltree” cannot be used as distribution column
+-- citext - case insensitive text
+CREATE TABLE xl_dc26 (
+ product_no integer,
+ product_id citext PRIMARY KEY,
+ name MONEY,
+ purchase_date TIMETZ,
+ price numeric
+) DISTRIBUTE BY HASH (product_id); --fail
+ERROR: Column product_id is not a hash distributable data type
diff --git a/contrib/citext/sql/citext.sql b/contrib/citext/sql/citext.sql
index f70f9ebae9..3bf7da2ff4 100644
--- a/contrib/citext/sql/citext.sql
+++ b/contrib/citext/sql/citext.sql
@@ -177,8 +177,8 @@ SELECT name FROM srt WHERE name ~ '^A' ORDER BY name;
SELECT name FROM srt WHERE name !~ 'A$' ORDER BY name;
-- SIMILAR TO should be case-insensitive.
-SELECT name FROM srt WHERE name SIMILAR TO '%a.*';
-SELECT name FROM srt WHERE name SIMILAR TO '%A.*';
+SELECT name FROM srt WHERE name SIMILAR TO '%a.*' order by name;
+SELECT name FROM srt WHERE name SIMILAR TO '%A.*' order by name;
-- Explicit casts.
SELECT true::citext = 'true' AS t;
diff --git a/contrib/citext/sql/xl_citext.sql b/contrib/citext/sql/xl_citext.sql
new file mode 100644
index 0000000000..0cd48314f3
--- /dev/null
+++ b/contrib/citext/sql/xl_citext.sql
@@ -0,0 +1,14 @@
+--
+-- Test citext datatype
+--
+
+--Column of types “citext”, “ltree” cannot be used as distribution column
+-- citext - case insensitive text
+CREATE TABLE xl_dc26 (
+ product_no integer,
+ product_id citext PRIMARY KEY,
+ name MONEY,
+ purchase_date TIMETZ,
+ price numeric
+) DISTRIBUTE BY HASH (product_id); --fail
+
diff --git a/contrib/hstore/expected/hstore.out b/contrib/hstore/expected/hstore.out
index f0d421602d..b43da92815 100644
--- a/contrib/hstore/expected/hstore.out
+++ b/contrib/hstore/expected/hstore.out
@@ -1501,14 +1501,14 @@ select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 01
create table test_json_agg (f1 text, f2 hstore);
insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'),
('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
-select json_agg(q) from test_json_agg q;
+select json_agg(q order by (f1)) from test_json_agg q;
json_agg
----------------------------------------------------------------------------------------------------------------------------
[{"f1":"rec1","f2":{"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"}}, +
{"f1":"rec2","f2":{"b": "f", "c": "null", "d": "-12345", "e": "012345.6", "f": "-1.234", "g": "0.345e-4", "a key": "2"}}]
(1 row)
-select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
+select json_agg(q order by f1) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
json_agg
----------------------------------------------------------------------------------------------------------------------
[{"f1":"rec1","f2":{"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4, "a key": 1}}, +
diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c
index 1cecf86004..191fc48c7a 100644
--- a/contrib/hstore/hstore_io.c
+++ b/contrib/hstore/hstore_io.c
@@ -838,6 +838,9 @@ hstore_from_record(PG_FUNCTION_ARGS)
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
+#ifdef PGXC
+ tuple.t_xc_node_id = 0;
+#endif
tuple.t_data = rec;
values = (Datum *) palloc(ncolumns * sizeof(Datum));
@@ -983,6 +986,9 @@ hstore_populate_record(PG_FUNCTION_ARGS)
tuple.t_len = HeapTupleHeaderGetDatumLength(rec);
ItemPointerSetInvalid(&(tuple.t_self));
tuple.t_tableOid = InvalidOid;
+#ifdef PGXC
+ tuple.t_xc_node_id = 0;
+#endif
tuple.t_data = rec;
}
diff --git a/contrib/hstore/sql/hstore.sql b/contrib/hstore/sql/hstore.sql
index d64b9f77c7..57f9494325 100644
--- a/contrib/hstore/sql/hstore.sql
+++ b/contrib/hstore/sql/hstore.sql
@@ -348,5 +348,5 @@ select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 01
create table test_json_agg (f1 text, f2 hstore);
insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'),
('rec2','"a key" =>2, b => f, c => "null", d=> -12345, e => 012345.6, f=> -1.234, g=> 0.345e-4');
-select json_agg(q) from test_json_agg q;
-select json_agg(q) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
+select json_agg(q order by (f1)) from test_json_agg q;
+select json_agg(q order by f1) from (select f1, hstore_to_json_loose(f2) as f2 from test_json_agg) q;
diff --git a/contrib/ltree/Makefile b/contrib/ltree/Makefile
index c101603e6c..39ecd60804 100644..100755
--- a/contrib/ltree/Makefile
+++ b/contrib/ltree/Makefile
@@ -9,7 +9,7 @@ EXTENSION = ltree
DATA = ltree--1.1.sql ltree--1.0--1.1.sql ltree--unpackaged--1.0.sql
PGFILEDESC = "ltree - hierarchical label data type"
-REGRESS = ltree
+REGRESS = ltree xl_ltree
ifdef USE_PGXS
PG_CONFIG = pg_config
diff --git a/contrib/ltree/expected/ltree.out b/contrib/ltree/expected/ltree.out
index 3d5737d41b..197718e950 100644
--- a/contrib/ltree/expected/ltree.out
+++ b/contrib/ltree/expected/ltree.out
@@ -3330,6 +3330,7 @@ SELECT * FROM ltreetest WHERE t ? '{23.*.1,23.*.2}' order by t asc;
(4 rows)
create unique index tstidx on ltreetest (t);
+ERROR: Cannot locally enforce a unique index on round robin distributed table.
set enable_seqscan=off;
SELECT * FROM ltreetest WHERE t < '12.3' order by t asc;
t
@@ -5370,6 +5371,7 @@ SELECT * FROM ltreetest WHERE t > '12.3' order by t asc;
(882 rows)
drop index tstidx;
+ERROR: index "tstidx" does not exist
create index tstidx on ltreetest using gist (t);
set enable_seqscan=off;
SELECT * FROM ltreetest WHERE t < '12.3' order by t asc;
diff --git a/contrib/ltree/expected/xl_ltree.out b/contrib/ltree/expected/xl_ltree.out
new file mode 100644
index 0000000000..75441a7e41
--- /dev/null
+++ b/contrib/ltree/expected/xl_ltree.out
@@ -0,0 +1,10 @@
+--Column of types “citext”, “ltree” cannot be used as distribution column
+--ltree - labels of data stored in a hierarchical tree-like structure
+CREATE TABLE xl_dc27 (
+ product_no integer,
+ product_id ltree PRIMARY KEY,
+ name MONEY,
+ purchase_date TIMETZ,
+ price numeric
+) DISTRIBUTE BY HASH (product_id); --fail
+ERROR: Column product_id is not a hash distributable data type
diff --git a/contrib/ltree/sql/xl_ltree.sql b/contrib/ltree/sql/xl_ltree.sql
new file mode 100644
index 0000000000..2fc90b3d4d
--- /dev/null
+++ b/contrib/ltree/sql/xl_ltree.sql
@@ -0,0 +1,10 @@
+
+--Column of types “citext”, “ltree” cannot be used as distribution column
+--ltree - labels of data stored in a hierarchical tree-like structure
+CREATE TABLE xl_dc27 (
+ product_no integer,
+ product_id ltree PRIMARY KEY,
+ name MONEY,
+ purchase_date TIMETZ,
+ price numeric
+) DISTRIBUTE BY HASH (product_id); --fail
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index bf03e67513..3c35604b5d 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -300,7 +300,11 @@ static void pgss_ExecutorEnd(QueryDesc *queryDesc);
static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params,
QueryEnvironment *queryEnv,
- DestReceiver *dest, char *completionTag);
+ DestReceiver *dest,
+#ifdef PGXC
+ bool sentToRemote,
+#endif /* PGXC */
+ char *completionTag);
static uint32 pgss_hash_fn(const void *key, Size keysize);
static int pgss_match_fn(const void *key1, const void *key2, Size keysize);
static uint32 pgss_hash_string(const char *str, int len);
@@ -957,9 +961,13 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
*/
static void
pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
- ProcessUtilityContext context,
- ParamListInfo params, QueryEnvironment *queryEnv,
- DestReceiver *dest, char *completionTag)
+ ProcessUtilityContext context, ParamListInfo params,
+ QueryEnvironment *queryEnv,
+ DestReceiver *dest,
+#ifdef PGXC
+ bool sentToRemote,
+#endif /* PGXC */
+ char *completionTag)
{
Node *parsetree = pstmt->utilityStmt;
@@ -997,11 +1005,15 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
if (prev_ProcessUtility)
prev_ProcessUtility(pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
else
standard_ProcessUtility(pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
nested_level--;
}
PG_CATCH();
@@ -1061,11 +1073,15 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
if (prev_ProcessUtility)
prev_ProcessUtility(pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
else
standard_ProcessUtility(pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
}
}
diff --git a/contrib/pg_trgm/expected/pg_trgm.out b/contrib/pg_trgm/expected/pg_trgm.out
index c3304b0ceb..81ccc594d4 100644
--- a/contrib/pg_trgm/expected/pg_trgm.out
+++ b/contrib/pg_trgm/expected/pg_trgm.out
@@ -2338,12 +2338,14 @@ select t,similarity(t,'gwertyu1988') as sml from test_trgm where t % 'gwertyu198
explain (costs off)
select t <-> 'q0987wertyu0988', t from test_trgm order by t <-> 'q0987wertyu0988' limit 2;
- QUERY PLAN
----------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------
Limit
- -> Index Scan using trgm_idx on test_trgm
- Order By: (t <-> 'q0987wertyu0988'::text)
-(3 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Limit
+ -> Index Scan using trgm_idx on test_trgm
+ Order By: (t <-> 'q0987wertyu0988'::text)
+(5 rows)
select t <-> 'q0987wertyu0988', t from test_trgm order by t <-> 'q0987wertyu0988' limit 2;
?column? | t
@@ -3502,23 +3504,27 @@ create index test2_idx_gin on test2 using gin (t gin_trgm_ops);
set enable_seqscan=off;
explain (costs off)
select * from test2 where t like '%BCD%';
- QUERY PLAN
-------------------------------------------
- Bitmap Heap Scan on test2
- Recheck Cond: (t ~~ '%BCD%'::text)
- -> Bitmap Index Scan on test2_idx_gin
- Index Cond: (t ~~ '%BCD%'::text)
-(4 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Bitmap Heap Scan on test2
+ Recheck Cond: (t ~~ '%BCD%'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~~ '%BCD%'::text)
+(6 rows)
explain (costs off)
select * from test2 where t ilike '%BCD%';
- QUERY PLAN
--------------------------------------------
- Bitmap Heap Scan on test2
- Recheck Cond: (t ~~* '%BCD%'::text)
- -> Bitmap Index Scan on test2_idx_gin
- Index Cond: (t ~~* '%BCD%'::text)
-(4 rows)
+ QUERY PLAN
+-------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Bitmap Heap Scan on test2
+ Recheck Cond: (t ~~* '%BCD%'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~~* '%BCD%'::text)
+(6 rows)
select * from test2 where t like '%BCD%';
t
@@ -3563,23 +3569,27 @@ select * from test2 where t like ' z foo%';
explain (costs off)
select * from test2 where t ~ '[abc]{3}';
- QUERY PLAN
---------------------------------------------
- Bitmap Heap Scan on test2
- Recheck Cond: (t ~ '[abc]{3}'::text)
- -> Bitmap Index Scan on test2_idx_gin
- Index Cond: (t ~ '[abc]{3}'::text)
-(4 rows)
+ QUERY PLAN
+--------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Bitmap Heap Scan on test2
+ Recheck Cond: (t ~ '[abc]{3}'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~ '[abc]{3}'::text)
+(6 rows)
explain (costs off)
select * from test2 where t ~* 'DEF';
- QUERY PLAN
-------------------------------------------
- Bitmap Heap Scan on test2
- Recheck Cond: (t ~* 'DEF'::text)
- -> Bitmap Index Scan on test2_idx_gin
- Index Cond: (t ~* 'DEF'::text)
-(4 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Bitmap Heap Scan on test2
+ Recheck Cond: (t ~* 'DEF'::text)
+ -> Bitmap Index Scan on test2_idx_gin
+ Index Cond: (t ~* 'DEF'::text)
+(6 rows)
select * from test2 where t ~ '[abc]{3}';
t
@@ -3703,19 +3713,23 @@ create index test2_idx_gist on test2 using gist (t gist_trgm_ops);
set enable_seqscan=off;
explain (costs off)
select * from test2 where t like '%BCD%';
- QUERY PLAN
-------------------------------------------
- Index Scan using test2_idx_gist on test2
- Index Cond: (t ~~ '%BCD%'::text)
-(2 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~~ '%BCD%'::text)
+(4 rows)
explain (costs off)
select * from test2 where t ilike '%BCD%';
- QUERY PLAN
-------------------------------------------
- Index Scan using test2_idx_gist on test2
- Index Cond: (t ~~* '%BCD%'::text)
-(2 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~~* '%BCD%'::text)
+(4 rows)
select * from test2 where t like '%BCD%';
t
@@ -3760,19 +3774,23 @@ select * from test2 where t like ' z foo%';
explain (costs off)
select * from test2 where t ~ '[abc]{3}';
- QUERY PLAN
-------------------------------------------
- Index Scan using test2_idx_gist on test2
- Index Cond: (t ~ '[abc]{3}'::text)
-(2 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~ '[abc]{3}'::text)
+(4 rows)
explain (costs off)
select * from test2 where t ~* 'DEF';
- QUERY PLAN
-------------------------------------------
- Index Scan using test2_idx_gist on test2
- Index Cond: (t ~* 'DEF'::text)
-(2 rows)
+ QUERY PLAN
+------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Index Scan using test2_idx_gist on test2
+ Index Cond: (t ~* 'DEF'::text)
+(4 rows)
select * from test2 where t ~ '[abc]{3}';
t
diff --git a/contrib/pgxc_clean/.gitignore b/contrib/pgxc_clean/.gitignore
new file mode 100644
index 0000000000..30575370c4
--- /dev/null
+++ b/contrib/pgxc_clean/.gitignore
@@ -0,0 +1 @@
+/pgxc_clean
diff --git a/contrib/pgxc_clean/Makefile b/contrib/pgxc_clean/Makefile
new file mode 100644
index 0000000000..4938573d35
--- /dev/null
+++ b/contrib/pgxc_clean/Makefile
@@ -0,0 +1,34 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for contrib/pgxc_clean
+#
+# Portions Copyright (c) 2011-2012 Postgres-XC Development Group
+#
+# $PostgreSQL$
+#
+#-------------------------------------------------------------------------
+
+PGFILEDESC = "pgxc_clean - Abort prepared transaction for a Postgres-XC Coordinator"
+PGAPPICON = win32
+
+PROGRAM= pgxc_clean
+OBJS= pgxc_clean.o txninfo.o
+
+#Include GTM objects
+gtm_builddir = $(top_builddir)/src/gtm
+EX_OBJS = $(gtm_builddir)/common/assert.o \
+ $(gtm_builddir)/client/libgtmclient.a
+
+PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
+PG_LIBS = $(libpq_pgport) $(PTHREAD_LIBS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pgxc_clean
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/pgxc_clean/pgxc_clean.c b/contrib/pgxc_clean/pgxc_clean.c
new file mode 100644
index 0000000000..d96cdf85fd
--- /dev/null
+++ b/contrib/pgxc_clean/pgxc_clean.c
@@ -0,0 +1,1100 @@
+/*
+ * ------------------------------------------------------------------------
+ *
+ * pgxc_clean utility
+ *
+ * Recovers outstanding 2PC when after crashed nodes or entire cluster
+ * is recovered.
+ *
+ * Depending upon how nodes/XC cluster fail, there could be outstanding
+ * 2PC transactions which are partly prepared and partly commited/borted.
+ * Such transactions must be commited/aborted to remove them from the
+ * snapshot.
+ *
+ * This utility checks if there's such outstanding transactions and
+ * cleans them up.
+ *
+ * Command syntax
+ *
+ * pgxc_clean [option ... ] [database] [user]
+ *
+ * Options are:
+ *
+ * -a, --all cleanup all the database avilable
+ * -d, --dbname=DBNAME database name to clean up. Multiple -d option
+ * can be specified.
+ * -h, --host=HOSTNAME Coordinator hostname to connect to.
+ * -N, --no-clean only test. no cleanup actually.
+ * -o, --output=FILENAME output file name.
+ * -p, --port=PORT Coordinator port number.
+ * -q, --quiet do not print messages except for error, default.
+ * -s, --status prints out 2PC status.
+ * -U, --username=USERNAME database user name
+ * -v, --verbose same as -s, plus prints result of each cleanup.
+ * -V, --version prints out the version,
+ * -w, --no-password never prompt for the password.
+ * -W, --password prompt for the password,
+ * -?, --help prints help message
+ *
+ * ------------------------------------------------------------------------
+ */
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <pwd.h>
+#include <errno.h>
+#include <string.h>
+#include "libpq-fe.h"
+#include "pg_config.h"
+#include "getopt_long.h"
+#include "pgxc_clean.h"
+#include "txninfo.h"
+#include "port.h"
+
+/* Who I am */
+const char *progname;
+char *my_nodename;
+int my_nodeidx = -1; /* Index in pgxc_clean_node_info */
+
+/* Databases to clean */
+bool clean_all_databases = false; /* "--all" overrides specific database specification */
+
+database_names *head_database_names = NULL;
+database_names *last_database_names = NULL;
+
+/* Coordinator to connect to */
+char *coordinator_host = NULL;
+int coordinator_port = -1;
+
+typedef enum passwd_opt
+{
+ TRI_DEFAULT,
+ TRI_YES,
+ TRI_NO
+} passwd_opt;
+
+/* Miscellaneous */
+char *output_filename = NULL;
+char *username = NULL;
+bool version_opt = false;
+passwd_opt try_password_opt = TRI_DEFAULT;
+bool status_opt = false;
+bool no_clean_opt = false;
+bool verbose_opt = false;
+FILE *outf;
+FILE *errf;
+
+/* Global variables */
+node_info *pgxc_clean_node_info;
+int pgxc_clean_node_count;
+
+database_info *head_database_info;
+database_info *last_database_info;
+
+static char *password = NULL;
+static char password_prompt[256];
+
+/* Funcs */
+static void add_to_database_list(char *dbname);
+static void parse_pgxc_clean_options(int argc, char *argv[]);
+static void usage(void);
+static char *GetUserName(void);
+static void showVersion(void);
+static PGconn *loginDatabase(char *host, int port, char *user, char *password,
+ char *dbname, const char *progname, char *encoding, char *password_prompt);
+static void getMyNodename(PGconn *conn);
+static void recover2PCForDatabase(database_info *db_info);
+static void recover2PC(PGconn *conn, txn_info *txn);
+static void getDatabaseList(PGconn *conn);
+static void getNodeList(PGconn *conn);
+static void showVersion(void);
+static void add_to_database_list(char *dbname);
+static void parse_pgxc_clean_options(int argc, char *argv[]);
+static void usage(void);
+static void getPreparedTxnList(PGconn *conn);
+static void getTxnInfoOnOtherNodesAll(PGconn *conn);
+static void do_commit(PGconn *conn, txn_info *txn);
+static void do_abort(PGconn *conn, txn_info *txn);
+static void do_commit_abort(PGconn *conn, txn_info *txn, bool is_commit);
+static bool setMaintenanceMode(PGconn *conn);
+
+/*
+ * Connection to the Coordinator
+ */
+PGconn *coord_conn;
+
+/*
+ *
+ * Main
+ *
+ */
+int main(int argc, char *argv[])
+{
+
+ /* Should setup pglocale when it is supported by XC core */
+
+ if (argc > 1)
+ {
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
+ {
+ usage();
+ exit(0);
+ }
+ if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
+ {
+ showVersion();
+ exit(0);
+ }
+ }
+ parse_pgxc_clean_options(argc, argv);
+
+ /*
+ * Check missing arguments
+ */
+ if (clean_all_databases == false && head_database_names == NULL)
+ {
+ fprintf(stderr, "%s: you must specify -a or -d option.\n", progname);
+ exit(1);
+ }
+
+ /*
+ * Arrange my environment
+ */
+ if (output_filename)
+ {
+ /* Prepare output filename */
+ outf = fopen(output_filename, "w");
+ if (outf == NULL)
+ {
+ fprintf(stderr, "%s: Cannot ope output file %s (%s)\n", progname, output_filename, strerror(errno));
+ exit(1);
+ }
+ errf = outf;
+ }
+ else
+ {
+ outf = stdout;
+ errf = stderr;
+ }
+ if (coordinator_host == NULL)
+ {
+ /* Default Coordinator host */
+ if ((coordinator_host = getenv("PGHOST")) == NULL)
+ coordinator_host = "localhost";
+ }
+ if (coordinator_port == -1)
+ {
+ /* Default Coordinator port */
+ char *pgport;
+
+ if ((pgport = getenv("PGPORT")) == NULL)
+ coordinator_port = DEF_PGPORT; /* pg_config.h */
+ else
+ coordinator_port = atoi(pgport);
+ }
+ if (username == NULL)
+ strcpy(password_prompt, "Password: ");
+ else
+ sprintf(password_prompt, "Password for user %s: ", username);
+ if (try_password_opt == TRI_YES)
+ password = simple_prompt(password_prompt, 100, false);
+
+ if (verbose_opt)
+ {
+ /* Print environments */
+ fprintf(outf, "%s (%s): Cleanup outstanding 2PCs.\n", progname, PG_VERSION);
+ /* Target databaess */
+ fprintf(outf, "Target databases:");
+ if (clean_all_databases)
+ fprintf(outf, "(ALL)\n");
+ else
+ {
+ database_names *cur_name;
+
+ for(cur_name = head_database_names; cur_name; cur_name = cur_name->next)
+ fprintf(outf, " %s", cur_name->database_name);
+ fprintf(outf, "\n");
+ }
+ /* Username to use */
+ fprintf(outf, "Username: %s\n", username ? username : "default");
+ /* Status opt */
+ fprintf(outf, "Status opt: %s\n", status_opt ? "on" : "off");
+ /* No-dlean opt */
+ fprintf(outf, "no-clean: %s\n", no_clean_opt ? "on" : "off");
+ }
+
+ /* Tweak options --> should be improved in the next releases */
+ if (status_opt)
+ verbose_opt = true;
+ /* Connect to XC server */
+ if (verbose_opt)
+ {
+ fprintf(outf, "%s: connecting to database \"%s\", host: \"%s\", port: %d\n",
+ progname,
+ clean_all_databases ? "postgres" : head_database_names->database_name,
+ coordinator_host, coordinator_port);
+ }
+ coord_conn = loginDatabase(coordinator_host, coordinator_port, username, password,
+ clean_all_databases ? "postgres" : head_database_names->database_name,
+ progname, "auto", password_prompt);
+ if (verbose_opt)
+ {
+ fprintf(outf, "%s: connected successfully\n", progname);
+ }
+
+ /*
+ * Get my nodename (connected Coordinator)
+ */
+ getMyNodename(coord_conn);
+ if (verbose_opt)
+ {
+ fprintf(outf, "%s: Connected to the node \"%s\"\n", progname, my_nodename);
+ }
+
+ /*
+ * Get available databases
+ *
+ * pgxc_clean assumes that all the database are available from the connecting Coordinator.
+ * Some (expert) DBA can create a database local to subset of the node by EXECUTE DIRECT.
+ * In this case, DBA may have to clean outstanding 2PC transactions manually or clean
+ * 2PC transactions by connecting pgxc_clean to different Coordinators.
+ *
+ * If such node-subset database is found to be used widely, pgxc_clean may need
+ * an extension to deal with this case.
+ */
+ if (clean_all_databases)
+ getDatabaseList(coord_conn);
+ if (verbose_opt)
+ {
+ database_info *cur_database;
+
+ fprintf(outf, "%s: Databases visible from the node \"%s\": ", progname, my_nodename);
+
+ if (head_database_info)
+ {
+ for (cur_database = head_database_info; cur_database; cur_database = cur_database->next)
+ {
+ fprintf(outf, " \"%s\"", cur_database->database_name);
+ }
+ fputc('\n', outf);
+ }
+ }
+
+ /*
+ * Get list of Coordinators
+ *
+ * As in the case of database, we clean transactions in visible nodes from the
+ * connecting Coordinator. DBA can also setup different node configuration
+ * at different Coordinators. In this case, DBA should be careful to choose
+ * appropriate Coordinator to clean up transactions.
+ */
+ getNodeList(coord_conn);
+ if (verbose_opt)
+ {
+ int ii;
+
+ fprintf(outf, "%s: Node list visible from the node \"%s\"\n", progname, my_nodename);
+
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ fprintf(outf, "Name: %s, host: %s, port: %d, type: %s\n",
+ pgxc_clean_node_info[ii].node_name,
+ pgxc_clean_node_info[ii].host,
+ pgxc_clean_node_info[ii].port,
+ pgxc_clean_node_info[ii].type == NODE_TYPE_COORD ? "coordinator" : "datanode");
+ }
+ }
+
+ /*
+ * Get list of prepared statement
+ */
+ getPreparedTxnList(coord_conn);
+
+ /*
+ * Check if there're any 2PC candidate to recover
+ */
+ if (!check2PCExists())
+ {
+ fprintf(errf, "%s: There's no prepared 2PC in this cluster. Exiting.\n", progname);
+ exit(0);
+ }
+
+
+ /*
+ * Check status of each prepared transaction. To do this, look into
+ * nodes where the transaction is not recorded as "prepared".
+ * Possible status are unknown (prepare has not been issued), committed or
+ * aborted.
+ */
+ getTxnInfoOnOtherNodesAll(coord_conn);
+ if (verbose_opt)
+ {
+ /* Print all the prepared transaction list */
+ database_info *cur_db;
+
+ fprintf(outf, "%s: 2PC transaction list.\n", progname);
+ for (cur_db = head_database_info; cur_db; cur_db = cur_db->next)
+ {
+ txn_info *txn;
+
+ fprintf(outf, "Database: \"%s\":\n", cur_db->database_name);
+
+ for (txn = cur_db->head_txn_info; txn; txn = txn->next)
+ {
+ int ii;
+
+ fprintf(outf, " gxid: %d, xid: \"%s\", owner: %s\n", txn->gxid, txn->xid, txn->owner);
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ fprintf(outf, " node: %s, status: %s\n",
+ pgxc_clean_node_info[ii].node_name,
+ str_txn_stat(txn->txn_stat[ii]));
+ }
+ }
+ }
+ }
+
+ /*
+ * Then disconnect from the database.
+ * I need to login to specified databases which 2PC is issued for. Again, we assume
+ * that all the prepare is issued against the same database in each node, which
+ * current Coordinator does and there seems to be no way to violate this assumption.
+ */
+ if (verbose_opt)
+ {
+ fprintf(outf, "%s: disconnecting\n", progname);
+ }
+ PQfinish(coord_conn);
+
+ /*
+ * If --no-clean option is specified, we exit here.
+ */
+ if (no_clean_opt)
+ {
+ fprintf(outf, "--no-clean opt is specified. Exiting.\n");
+ exit(0);
+ }
+
+ /*
+ * Recover 2PC for specified databases
+ */
+ if (clean_all_databases)
+ {
+ database_info *cur_database_info;
+
+ for(cur_database_info = head_database_info; cur_database_info; cur_database_info = cur_database_info->next)
+ {
+ recover2PCForDatabase(cur_database_info);
+ }
+ }
+ else
+ {
+ database_info *cur_database_info;
+ database_names *cur_database_name;
+
+ for(cur_database_name = head_database_names; cur_database_name; cur_database_name = cur_database_name->next)
+ {
+ cur_database_info = find_database_info(cur_database_name->database_name);
+ if (cur_database_info)
+ {
+ recover2PCForDatabase(cur_database_info);
+ }
+ }
+ }
+ exit(0);
+}
+
+static void
+getMyNodename(PGconn *conn)
+{
+ static const char *stmt = "SELECT pgxc_node_str()";
+ PGresult *res;
+
+ res = PQexec(conn, stmt);
+
+ /* Error handling here */
+ if (res)
+ my_nodename = strdup(PQgetvalue(res, 0, 0));
+ else
+ my_nodename = strdup("unknown");
+
+ PQclear(res);
+}
+
+static void
+recover2PCForDatabase(database_info *db_info)
+{
+ PGconn *coord_conn;
+ txn_info *cur_txn;
+
+ if (verbose_opt)
+ fprintf(outf, "%s: recovering 2PC for database \"%s\"\n", progname, db_info->database_name);
+ coord_conn = loginDatabase(coordinator_host, coordinator_port, username, password, db_info->database_name,
+ progname, "auto", password_prompt);
+ if (coord_conn == NULL)
+ {
+ fprintf(errf, "Could not connect to the database %s.\n", db_info->database_name);
+ return;
+ }
+ if (!setMaintenanceMode(coord_conn))
+ {
+ /* Cannot recover */
+ fprintf(errf, "Skipping database %s.\n", db_info->database_name);
+ PQfinish(coord_conn);
+ return;
+ }
+ if (verbose_opt)
+ fprintf(outf, "%s: connected to the database \"%s\"\n", progname, db_info->database_name);
+ for(cur_txn = db_info->head_txn_info; cur_txn; cur_txn = cur_txn->next)
+ {
+ recover2PC(coord_conn, cur_txn);
+ }
+ PQfinish(coord_conn);
+}
+
+static void
+recover2PC(PGconn *conn, txn_info *txn)
+{
+ TXN_STATUS txn_stat;
+
+ txn_stat = check_txn_global_status(txn);
+ if (verbose_opt)
+ {
+ fprintf(outf, " Recovering TXN: gxid: %d, xid: \"%s\", owner: \"%s\", global status: %s\n",
+ txn->gxid, txn->xid, txn->owner, str_txn_stat(txn_stat));
+ }
+ switch (txn_stat)
+ {
+ case TXN_STATUS_FAILED:
+ case TXN_STATUS_UNKNOWN:
+ if (verbose_opt)
+ fprintf(outf, " Recovery not needed.\n");
+ return;
+ case TXN_STATUS_PREPARED:
+ if (verbose_opt)
+ fprintf(outf, " Recovery not needed.\n");
+ return;
+ case TXN_STATUS_COMMITTED:
+ do_commit(conn, txn);
+ return;
+ case TXN_STATUS_ABORTED:
+ do_abort(conn, txn);
+ return;
+ case TXN_STATUS_INPROGRESS:
+ fprintf(stderr, " Can't recover a running transaction.\n");
+ exit(1);
+ default:
+ fprintf(stderr, " Unknown TXN status, pgxc_clean error.\n");
+ exit(1);
+ }
+ return;
+}
+
+static void
+do_commit(PGconn *conn, txn_info *txn)
+{
+ do_commit_abort(conn, txn, true);
+}
+
+static void
+do_abort(PGconn *conn, txn_info *txn)
+{
+ do_commit_abort(conn, txn, false);
+}
+
+static void
+do_commit_abort(PGconn *conn, txn_info *txn, bool is_commit)
+{
+ int ii;
+ static const char *EXEC_DIRECT_STMT_FMT = "EXECUTE DIRECT ON (%s) '%s PREPARED ''%s'';';";
+ static const char *GLOBAL_STMT_FMT = "%s PREPARED '%s';";
+ char *stmt = (char *) malloc (64 + strlen(txn->xid));
+ PGresult *res;
+ ExecStatusType res_status;
+
+ if (verbose_opt)
+ fprintf(outf, " %s... ", is_commit ? "committing" : "aborting");
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ if (txn->txn_stat[ii] == TXN_STATUS_PREPARED && ii != my_nodeidx)
+ {
+
+ sprintf(stmt, EXEC_DIRECT_STMT_FMT,
+ pgxc_clean_node_info[ii].node_name,
+ is_commit ? "COMMIT" : "ROLLBACK",
+ txn->xid);
+ res = PQexec(conn, stmt);
+ res_status = PQresultStatus(res);
+ if (verbose_opt)
+ {
+ if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK)
+ fprintf(outf, "succeeded (%s), ", pgxc_clean_node_info[ii].node_name);
+ else
+ fprintf(outf, "failed (%s: %s), ",
+ pgxc_clean_node_info[ii].node_name,
+ PQresultErrorMessage(res));
+ }
+ else
+ {
+ if (res_status != PGRES_COMMAND_OK && res_status != PGRES_TUPLES_OK)
+ {
+ fprintf(errf, "Failed to recover TXN, gxid: %d, xid: \"%s\", owner: \"%s\", node: \"%s\" (%s)\n",
+ txn->gxid, txn->xid, txn->owner, pgxc_clean_node_info[ii].node_name,
+ PQresultErrorMessage(res));
+ }
+ }
+ PQclear(res);
+ }
+ }
+ /* Issue global statment */
+ sprintf(stmt, GLOBAL_STMT_FMT,
+ is_commit ? "COMMIT" : "ROLLBACK",
+ txn->xid);
+ res = PQexec(conn, stmt);
+ res_status = PQresultStatus(res);
+ if (verbose_opt)
+ {
+ if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK)
+ fprintf(outf, "succeeded (%s)\n", my_nodename);
+ else
+ fprintf(outf, "failed (%s: %s)\n",
+ my_nodename,
+ PQresultErrorMessage(res));
+ }
+ else if (res_status != PGRES_COMMAND_OK && res_status != PGRES_TUPLES_OK)
+ {
+ fprintf(errf, "Failed to recover TXN, gxid: %d, xid: \"%s\", owner: \"%s\", node: \"%s\" (%s)\n",
+ txn->gxid, txn->xid, txn->owner, my_nodename, PQresultErrorMessage(res));
+ }
+ PQclear(res);
+}
+
+#if 0
+static database_info *
+find_database_info(char *dbname)
+{
+ database_info *cur_database_info;
+
+ for(cur_database_info = head_database_info; cur_database_info; cur_database_info = cur_database_info->next)
+ {
+ if (strcmp(cur_database_info->database_name, dbname) == 0)
+ return(cur_database_info);
+ }
+ return(NULL);
+}
+#endif
+
+
+static PGconn *
+loginDatabase(char *host, int port, char *user, char *password, char *dbname, const char *progname, char *encoding, char *password_prompt)
+{
+ bool new_pass = false;
+ PGconn *coord_conn;
+ char port_s[32];
+#define PARAMS_ARRAY_SIZE 8
+ const char *keywords[PARAMS_ARRAY_SIZE];
+ const char *values[PARAMS_ARRAY_SIZE];
+
+ sprintf(port_s, "%d", port);
+
+ keywords[0] = "host";
+ values[0] = host;
+ keywords[1] = "port";
+ values[1] = port_s;
+ keywords[2] = "user";
+ values[2] = user;
+ keywords[3] = "password";
+ keywords[4] = "dbname";
+ values[4] = dbname;
+ keywords[5] = "fallback_application_name";
+ values[5] = progname;
+ keywords[6] = "client_encoding";
+ values[6] = encoding;
+ keywords[7] = NULL;
+ values[7] = NULL;
+
+ /* Loop until we have a password if requested by backend */
+ do
+ {
+ values[3] = password;
+
+ new_pass = false;
+ coord_conn = PQconnectdbParams(keywords, values, true);
+
+ if (PQstatus(coord_conn) == CONNECTION_BAD &&
+ PQconnectionNeedsPassword(coord_conn) &&
+ password == NULL &&
+ try_password_opt != TRI_NO)
+ {
+ PQfinish(coord_conn);
+ password = simple_prompt(password_prompt, 100, false);
+ new_pass = true;
+ }
+ } while (new_pass);
+
+ return(coord_conn);
+}
+
+
+static TXN_STATUS
+getTxnStatus(PGconn *conn, GlobalTransactionId gxid, int node_idx)
+{
+ char *node_name;
+ char stmt[1024];
+ PGresult *res;
+ char *res_s;
+
+ static const char *STMT_FORM = "EXECUTE DIRECT ON (%s) 'SELECT pgxc_is_committed(''%d''::xid);'";
+ static const char *STMT_FORM_RUNNING = "EXECUTE DIRECT ON (%s) 'SELECT pgxc_is_inprogress(''%d''::xid);'";
+
+ node_name = pgxc_clean_node_info[node_idx].node_name;
+ sprintf(stmt, STMT_FORM, node_name, gxid);
+
+ res = PQexec(conn, stmt);
+ if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK ||
+ PQgetisnull(res, 0, 0))
+ {
+ PQclear(res);
+ sprintf(stmt, STMT_FORM_RUNNING, node_name, gxid);
+ res = PQexec(conn, stmt);
+ if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK)
+ return TXN_STATUS_UNKNOWN;
+ if (PQgetisnull(res, 0, 0))
+ return TXN_STATUS_UNKNOWN;
+ res_s = PQgetvalue(res, 0, 0);
+ if (strcmp(res_s, "t") == 0)
+ return TXN_STATUS_INPROGRESS;
+ return TXN_STATUS_UNKNOWN;
+ }
+ res_s = PQgetvalue(res, 0, 0);
+ if (strcmp(res_s, "t") == 0)
+ return TXN_STATUS_COMMITTED;
+ else
+ return TXN_STATUS_ABORTED;
+}
+
+static void
+getTxnInfoOnOtherNodes(PGconn *conn, txn_info *txn)
+{
+ int ii;
+
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ if (txn->txn_stat[ii] == TXN_STATUS_INITIAL)
+ txn->txn_stat[ii] = getTxnStatus(conn, txn->gxid, ii);
+ }
+}
+
+
+static void
+getTxnInfoOnOtherNodesForDatabase(PGconn *conn, database_info *database)
+{
+ txn_info *cur_txn;
+
+ for (cur_txn = database->head_txn_info; cur_txn; cur_txn = cur_txn->next)
+ {
+ getTxnInfoOnOtherNodes(conn, cur_txn);
+ }
+}
+
+
+static void
+getTxnInfoOnOtherNodesAll(PGconn *conn)
+{
+ database_info *cur_database;
+
+ for (cur_database = head_database_info; cur_database; cur_database = cur_database->next)
+ {
+ getTxnInfoOnOtherNodesForDatabase(conn, cur_database);
+ }
+}
+
+
+
+static void
+getPreparedTxnListOfNode(PGconn *conn, int idx)
+{
+ int prep_txn_count;
+ int ii;
+ PGresult *res;
+ ExecStatusType pq_status;
+
+#define MAX_STMT_LEN 1024
+
+ /* SQL Statement */
+ static const char *STMT_GET_PREP_TXN_ON_NODE
+ = "EXECUTE DIRECT ON (%s) 'SELECT TRANSACTION, GID, OWNER, DATABASE FROM PG_PREPARED_XACTS;'";
+ char stmt[MAX_STMT_LEN];
+
+ sprintf(stmt, STMT_GET_PREP_TXN_ON_NODE,
+ pgxc_clean_node_info[idx].node_name);
+
+ res = PQexec(conn, stmt);
+ if (res == NULL || (pq_status = PQresultStatus(res)) != PGRES_TUPLES_OK)
+ {
+ fprintf(stderr, "Could not obtain prepared transaction list for node %s.(%s)\n",
+ pgxc_clean_node_info[idx].node_name, res ? PQresultErrorMessage(res) : "");
+ PQclear(res);
+ exit (1);
+ }
+ prep_txn_count = PQntuples(res);
+ for (ii = 0; ii < prep_txn_count; ii++)
+ {
+ GlobalTransactionId gxid;
+ char *xid;
+ char *owner;
+ char *database_name;
+
+ gxid = atoi(PQgetvalue(res, ii, 0));
+ xid = strdup(PQgetvalue(res, ii, 1));
+ owner = strdup(PQgetvalue(res, ii, 2));
+ database_name = strdup(PQgetvalue(res, ii, 3));
+
+ add_txn_info(database_name, pgxc_clean_node_info[idx].node_name, gxid, xid, owner,
+ TXN_STATUS_PREPARED);
+ if(xid)
+ free(xid);
+ if (owner)
+ free(owner);
+ if (database_name)
+ free(database_name);
+ }
+ PQclear(res);
+}
+
+static void
+getPreparedTxnList(PGconn *conn)
+{
+ int ii;
+
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ getPreparedTxnListOfNode(conn, ii);
+ }
+}
+
+static void
+getDatabaseList(PGconn *conn)
+{
+ int database_count;
+ int ii;
+ PGresult *res;
+ char *dbname;
+
+ /* SQL Statement */
+ static const char *STMT_GET_DATABASE_LIST = "SELECT DATNAME FROM PG_DATABASE;";
+
+ /*
+ * Get database list
+ */
+ res = PQexec(conn, STMT_GET_DATABASE_LIST);
+ if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ fprintf(stderr, "Could not obtain database list.\n");
+ PQclear(res);
+ exit (1);
+ }
+ database_count = PQntuples(res);
+ for(ii = 0; ii < database_count; ii++)
+ {
+ dbname = PQgetvalue(res, ii, 0);
+ if (strcmp(dbname, "template0") == 0)
+ /* Skip template0 database */
+ continue;
+ add_database_info(dbname);
+ }
+ PQclear(res);
+}
+
+static void
+getNodeList(PGconn *conn)
+{
+ int ii;
+ PGresult *res;
+
+ /* SQL Statement */
+ static const char *STMT_GET_NODE_INFO = "SELECT NODE_NAME, NODE_TYPE, "
+ "NODE_PORT, NODE_HOST, NODE_ID FROM PGXC_NODE;";
+
+ res = PQexec(conn, STMT_GET_NODE_INFO);
+ if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK)
+ {
+ fprintf(stderr, "Could not obtain node list.\n");
+ PQclear(res);
+ exit (1);
+ }
+ pgxc_clean_node_count = PQntuples(res);
+ pgxc_clean_node_info = (node_info *)calloc(pgxc_clean_node_count, sizeof(node_info));
+ if (pgxc_clean_node_info == NULL)
+ {
+ fprintf(stderr, "No more memory.\n");
+ exit(1);
+ }
+
+ for (ii = 0; ii < pgxc_clean_node_count; ii++)
+ {
+ char *node_name;
+ char *node_type_c;
+ NODE_TYPE node_type;
+ int port;
+ char *host;
+ int nodeid;
+
+ node_name = strdup(PQgetvalue(res, ii, 0));
+ node_type_c = strdup(PQgetvalue(res, ii, 1));
+ switch (node_type_c[0])
+ {
+ case 'C':
+ /* pgxc_clean has to connect to the Coordinator */
+ node_type = NODE_TYPE_COORD;
+ if (strcmp(node_name, my_nodename) == 0)
+ my_nodeidx = ii;
+ break;
+ case 'D':
+ node_type = NODE_TYPE_DATANODE;
+ break;
+ default:
+ fprintf(stderr, "Invalid catalog data (node_type), node_name: %s, node_type: %s\n", node_name, node_type_c);
+ exit(1);
+ }
+ port = atoi(PQgetvalue(res, ii, 2));
+ host = strdup(PQgetvalue(res, ii, 3));
+ nodeid = atoi(PQgetvalue(res, ii, 4));
+ set_node_info(node_name, port, host, node_type, nodeid, ii);
+
+ if (node_name)
+ free(node_name);
+ if (node_type_c)
+ free(node_type_c);
+ if (host)
+ free(host);
+ }
+ /* Check if local Coordinator has been found */
+ if (my_nodeidx == -1)
+ {
+ fprintf(stderr, "Failed to identify the coordinator which %s is connecting to. ", progname);
+ fprintf(stderr, "Connecting to a wrong node.\n");
+ exit(1);
+ }
+}
+
+
+
+static void
+showVersion(void)
+{
+ puts("pgxc_clean (Postgres-XC) " PGXC_VERSION);
+}
+
+static void
+add_to_database_list(char *dbname)
+{
+ if (head_database_names == NULL)
+ {
+ head_database_names = last_database_names = (database_names *)malloc(sizeof(database_names));
+ if (head_database_names == NULL)
+ {
+ fprintf(stderr, "No more memory, FILE:%s, LINE:%d.\n", __FILE__, __LINE__);
+ exit(1);
+ }
+ }
+ else
+ {
+ last_database_names->next = (database_names *)malloc(sizeof(database_names));
+ if (last_database_names->next == NULL)
+ {
+ fprintf(stderr, "No more memory, FILE:%s, LINE:%d.\n", __FILE__, __LINE__);
+ exit(1);
+ }
+ last_database_names = last_database_names->next;
+ }
+ last_database_names->next = NULL;
+ last_database_names->database_name = dbname;
+}
+
+static void
+parse_pgxc_clean_options(int argc, char *argv[])
+{
+ static struct option long_options[] =
+ {
+ {"all", no_argument, NULL, 'a'},
+ {"dbname", required_argument, NULL, 'd'},
+ {"host", required_argument, NULL, 'h'},
+ {"no-clean", no_argument, NULL, 'N'},
+ {"output", required_argument, NULL, 'o'},
+ {"port", required_argument, NULL, 'p'},
+ {"quiet", no_argument, NULL, 'q'},
+ {"username", required_argument, NULL, 'U'},
+ {"verbose", no_argument, NULL, 'v'},
+ {"version", no_argument, NULL, 'V'},
+ {"no-password", no_argument, NULL, 'w'},
+ {"password", no_argument, NULL, 'W'},
+ {"help", no_argument, NULL, '?'},
+ {"status", no_argument, NULL, 's'},
+ {NULL, 0, NULL, 0}
+ };
+
+ int optindex;
+ extern char *optarg;
+ extern int optind;
+ int c;
+
+ progname = get_progname(argv[0]); /* Should be more fancy */
+
+ while ((c = getopt_long(argc, argv, "ad:h:No:p:qU:vVwWs?", long_options, &optindex)) != -1)
+ {
+ switch(c)
+ {
+ case 'a':
+ clean_all_databases = true;
+ break;
+ case 'd':
+ add_to_database_list(optarg);
+ break;
+ case 'h':
+ coordinator_host = optarg;
+ break;
+ case 'N':
+ no_clean_opt = true;
+ break;
+ case 'o':
+ output_filename = optarg;
+ break;
+ case 'p':
+ coordinator_port = atoi(optarg);
+ break;
+ case 'q':
+ verbose_opt = false;
+ break;
+ case 'U':
+ username = optarg;
+ break;
+ case 'V':
+ version_opt = 0;
+ break;
+ case 'v':
+ verbose_opt = true;
+ break;
+ case 'w':
+ try_password_opt = TRI_NO;
+ break;
+ case 'W':
+ try_password_opt = TRI_YES;
+ break;
+ case 's':
+ status_opt = true;
+ break;
+ case '?':
+ if (strcmp(argv[optind - 1], "-?") == 0 || strcmp(argv[optind - 1], "--help") == 0)
+ {
+ usage();
+ exit(0);
+ }
+ else
+ {
+ fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
+ exit(1);
+ }
+ break;
+ default:
+ fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
+ exit(1);
+ break;
+ }
+ }
+
+ while (argc - optind >= 1)
+ {
+ if (head_database_names == NULL)
+ {
+ if (strcmp(argv[optind], "template0") == 0)
+ {
+ fprintf(stderr, "%s: You should not clean template0 database.\n", progname);
+ exit(1);
+ }
+ add_to_database_list(argv[optind]);
+ }
+ if (username == NULL)
+ username = argv[optind];
+ else
+ fprintf(stderr, "%s: warning: extra command-line argument \"%s\" ignored\n",
+ progname, argv[optind]);
+ optind++;
+ }
+
+ if (!clean_all_databases && head_database_names == NULL)
+ {
+ fprintf(stderr, "Please specify at least one database or -a for all\n");
+ exit(1);
+ }
+}
+
+static bool setMaintenanceMode(PGconn *conn)
+{
+ static const char *SetMaintenanceModeCommand = "SET xc_maintenance_mode = on;";
+ PGresult *res;
+ ExecStatusType res_status;
+
+ res = PQexec(conn, SetMaintenanceModeCommand);
+ res_status = PQresultStatus(res);
+ if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK)
+ return true;
+ else
+ fprintf(errf, "Failed to set xc_maintenance_mode. (%s)\n",
+ PQresultErrorMessage(res));
+ return false;
+}
+
+static char *GetUserName(void)
+{
+ struct passwd *passwd;
+
+ passwd = getpwuid(getuid());
+ if (passwd)
+ return(strdup(passwd->pw_name));
+ else
+ {
+ fprintf(stderr, "%s: could not get current user name: %s\n", progname, strerror(errno));
+ exit(1);
+ }
+ return NULL;
+}
+
+static void usage(void)
+{
+ char *env;
+ char *user;
+
+ user = getenv("PGUSER");
+ if (!user)
+ user = GetUserName();
+
+ printf("pgxc_clean cleans up outstanding 2PCs after failed node is recovered.\n"
+ "Usage:\n"
+ "pgxc_clean [OPTION ...] [DBNAME [USERNAME]]\n\n"
+ "Options:\n");
+
+ env = getenv("PGDATABASE");
+ if (!env)
+ env = user;
+ printf(" -a, --all cleanup all the databases available.\n");
+ printf(" -d, --dbname=DBNAME database name to clean up (default: \"%s\")\n", env);
+ env = getenv("PGHOST");
+ printf(" -h, --host=HOSTNAME target coordinator host address, (default: \"%s\")\n", env ? env : "local socket");
+ printf(" -N, no-clean only collect 2PC information. Do not recover them\n");
+ printf(" -o, --output=FILENAME output file name.\n");
+ env = getenv("PGPORT");
+ printf(" -p, --port=PORT port number of the coordinator (default: \"%s\")\n", env ? env : DEF_PGPORT_STR);
+ printf(" -q, --quiet quiet mode. do not print anything but error information.\n");
+ printf(" -s, --status prints out 2PC status\n");
+ env = getenv("PGUSER");
+ if (!env)
+ env = user;
+ printf(" -U, --username=USERNAME database user name (default: \"%s\")\n", env);
+ printf(" -v, --verbose print recovery information.\n");
+ printf(" -V, --version prints out the version.\n");
+ printf(" -w, --no-password never prompt for the password.\n");
+ printf(" -W, --password prompt for the password.\n");
+ printf(" -?, --help print this message.\n");
+}
diff --git a/contrib/pgxc_clean/pgxc_clean.h b/contrib/pgxc_clean/pgxc_clean.h
new file mode 100644
index 0000000000..cc3def7c7f
--- /dev/null
+++ b/contrib/pgxc_clean/pgxc_clean.h
@@ -0,0 +1,13 @@
+#ifndef PGXC_CLEAN
+#define PGXC_CLEAN
+
+typedef struct database_names
+{
+ struct database_names *next;
+ char *database_name;
+} database_names;
+
+extern FILE *outf;
+extern FILE *errf;
+
+#endif /* PGXC_CLEAN */
diff --git a/contrib/pgxc_clean/pgxc_clean_test.sh b/contrib/pgxc_clean/pgxc_clean_test.sh
new file mode 100644
index 0000000000..87bc928173
--- /dev/null
+++ b/contrib/pgxc_clean/pgxc_clean_test.sh
@@ -0,0 +1,85 @@
+#!/bin/bash
+#
+# This script sets up test environment for pgxc_clean.
+# Please note that all the prepared transactions are
+# partially committed or aborted.
+#
+# You should configure PGPORT and PGHOST to connect, as
+# well as node names for your test environment.
+#
+# Before you run this script, XC should be up and ready.
+# Also, this may try to drop test databases. You may need
+# to run CLEAN CONNECTION satement for each coordinator in
+# advance.
+#
+
+if [ $# -le 0 ]
+then
+ create=no
+else
+ if [ $1 = create ]
+ then
+ create=yes
+ else
+ create=no
+ fi
+fi
+
+export PGPORT=20004
+exprot PGHOST=localhost
+sourcedb=postgres
+
+if [ $create = yes ]
+then
+psql -e $sourcedb <<EOF
+drop database if exists test1;
+drop database if exists test2;
+drop database if exists test3;
+create database test1;
+create database test2;
+create database test3;
+\q
+EOF
+fi
+
+psql -e test1 <<EOF
+drop table if exists t;
+begin;
+create table t (a int);
+prepare transaction 'test1_1';
+\q
+EOF
+
+psql -e test2 <<EOF
+drop table if exists t;
+begin;
+create table t (a int);
+prepare transaction 'test2_1';
+\q
+EOF
+
+psql -e test3 <<EOF
+drop table if exists t;
+begin;
+create table t (a int);
+prepare transaction 'test3_1';
+\q
+EOF
+
+psql -e test1 <<EOF
+set xc_maintenance_mode = on;
+execute direct on node1 'commit prepared ''test1_1'' ';
+\q
+EOF
+
+psql -e test2 <<EOF
+set xc_maintenance_mode = on;
+execute direct on node2 'commit prepared ''test2_1'' ';
+\q
+EOF
+
+psql -e test3 <<EOF
+set xc_maintenance_mode = on;
+execute direct on node1 'rollback prepared ''test3_1'' ';
+\q
+EOF
diff --git a/contrib/pgxc_clean/txninfo.c b/contrib/pgxc_clean/txninfo.c
new file mode 100644
index 0000000000..804b2da18e
--- /dev/null
+++ b/contrib/pgxc_clean/txninfo.c
@@ -0,0 +1,481 @@
+#include "txninfo.h"
+
+static int check_xid_is_implicit(char *xid);
+static txn_info *find_txn(TransactionId gxid);
+static txn_info *make_txn_info(char *dbname, TransactionId gxid, char *xid, char *owner);
+static void find_txn_participant_nodes(txn_info *txn);
+
+#define XIDPREFIX "_$XC$"
+
+database_info *find_database_info(char *database_name)
+{
+ database_info *cur_database_info = head_database_info;
+
+ for (;cur_database_info; cur_database_info = cur_database_info->next)
+ {
+ if(strcmp(cur_database_info->database_name, database_name) == 0)
+ return(cur_database_info);
+ }
+ return(NULL);
+}
+
+database_info *add_database_info(char *database_name)
+{
+ database_info *rv;
+
+ if ((rv = find_database_info(database_name)) != NULL)
+ return rv; /* Already in the list */
+ rv = malloc(sizeof(database_info));
+ if (rv == NULL)
+ return NULL;
+ rv->next = NULL;
+ rv->database_name = strdup(database_name);
+ if (rv->database_name == NULL)
+ {
+ free(rv);
+ return NULL;
+ }
+ rv->head_txn_info = NULL;
+ rv->last_txn_info = NULL;
+ if (head_database_info == NULL)
+ {
+ head_database_info = last_database_info = rv;
+ return rv;
+ }
+ else
+ {
+ last_database_info->next = rv;
+ last_database_info = rv;
+ return rv;
+ }
+}
+
+int set_node_info(char *node_name, int port, char *host, NODE_TYPE type,
+ int nodeid, int index)
+{
+ node_info *cur_node_info;
+
+ if (index >= pgxc_clean_node_count)
+ return -1;
+ cur_node_info = &pgxc_clean_node_info[index];
+ if (cur_node_info->node_name)
+ free(cur_node_info->node_name);
+ if (cur_node_info->host)
+ free(cur_node_info->host);
+ cur_node_info->node_name = strdup(node_name);
+ if (cur_node_info->node_name == NULL)
+ return -1;
+ cur_node_info->port = port;
+ cur_node_info->host = strdup(host);
+ if (cur_node_info->host == NULL)
+ return -1;
+ cur_node_info->type = type;
+ cur_node_info->nodeid = nodeid;
+ return 0;
+}
+
+node_info *find_node_info(char *node_name)
+{
+ int i;
+ for (i = 0; i < pgxc_clean_node_count; i++)
+ {
+ if (pgxc_clean_node_info[i].node_name == NULL)
+ continue;
+ if (strcmp(pgxc_clean_node_info[i].node_name, node_name) == 0)
+ return &pgxc_clean_node_info[i];
+ }
+ return(NULL);
+}
+
+node_info *find_node_info_by_nodeid(int nodeid)
+{
+ int i;
+ for (i = 0; i < pgxc_clean_node_count; i++)
+ {
+ if (pgxc_clean_node_info[i].nodeid == nodeid)
+ return &pgxc_clean_node_info[i];
+ }
+ return(NULL);
+}
+
+int find_node_index(char *node_name)
+{
+ int i;
+ for (i = 0; i < pgxc_clean_node_count; i++)
+ {
+ if (pgxc_clean_node_info[i].node_name == NULL)
+ continue;
+ if (strcmp(pgxc_clean_node_info[i].node_name, node_name) == 0)
+ return i;
+ }
+ return -1;
+}
+
+int find_node_index_by_nodeid(int nodeid)
+{
+ int i;
+ for (i = 0; i < pgxc_clean_node_count; i++)
+ {
+ if (pgxc_clean_node_info[i].nodeid == nodeid)
+ return i;
+ }
+ return -1;
+}
+
+int add_txn_info(char *dbname, char *node_name, TransactionId gxid, char *xid, char *owner, TXN_STATUS status)
+{
+ txn_info *txn;
+ int nodeidx;
+
+ if ((txn = find_txn(gxid)) == NULL)
+ {
+ txn = make_txn_info(dbname, gxid, xid, owner);
+ if (txn == NULL)
+ {
+ fprintf(stderr, "No more memory.\n");
+ exit(1);
+ }
+ }
+ nodeidx = find_node_index(node_name);
+ txn->txn_stat[nodeidx] = status;
+ return 1;
+}
+
+
+static txn_info *
+make_txn_info(char *dbname, TransactionId gxid, char *xid, char *owner)
+{
+ database_info *dbinfo;
+ txn_info *txn;
+
+ if ((dbinfo = find_database_info(dbname)) == NULL)
+ dbinfo = add_database_info(dbname);
+ txn = (txn_info *)malloc(sizeof(txn_info));
+ if (txn == NULL)
+ return NULL;
+ memset(txn, 0, sizeof(txn_info));
+ txn->gxid = gxid;
+ txn->xid = strdup(xid);
+ if (txn->xid == NULL)
+ {
+ free(txn);
+ return NULL;
+ }
+ txn->owner = strdup(owner);
+ if (txn->owner == NULL)
+ {
+ free(txn);
+ return NULL;
+ }
+ if (dbinfo->head_txn_info == NULL)
+ {
+ dbinfo->head_txn_info = dbinfo->last_txn_info = txn;
+ }
+ else
+ {
+ dbinfo->last_txn_info->next = txn;
+ dbinfo->last_txn_info = txn;
+ }
+ txn->txn_stat = (TXN_STATUS *)malloc(sizeof(TXN_STATUS) * pgxc_clean_node_count);
+ if (txn->txn_stat == NULL)
+ return(NULL);
+ memset(txn->txn_stat, 0, sizeof(TXN_STATUS) * pgxc_clean_node_count);
+ return txn;
+}
+
+
+/* Ugly ---> Remove this */
+txn_info *init_txn_info(char *database_name, TransactionId gxid)
+{
+ database_info *database;
+ txn_info *cur_txn_info;
+
+ if ((database = find_database_info(database_name)) == NULL)
+ return NULL;
+
+ if (database->head_txn_info == NULL)
+ {
+ database->head_txn_info = database->last_txn_info = (txn_info *)malloc(sizeof(txn_info));
+ if (database->head_txn_info == NULL)
+ return NULL;
+ memset(database->head_txn_info, 0, sizeof(txn_info));
+ return database->head_txn_info;
+ }
+ for(cur_txn_info = database->head_txn_info; cur_txn_info; cur_txn_info = cur_txn_info->next)
+ {
+ if (cur_txn_info->gxid == gxid)
+ return(cur_txn_info);
+ }
+ cur_txn_info->next = database->last_txn_info = (txn_info *)malloc(sizeof(txn_info));
+ if (cur_txn_info->next == NULL)
+ return(NULL);
+ memset(cur_txn_info->next, 0, sizeof(txn_info));
+ if ((cur_txn_info->next->txn_stat = (TXN_STATUS *)malloc(sizeof(TXN_STATUS) * pgxc_clean_node_count)) == NULL)
+ return(NULL);
+ memset(cur_txn_info->next->txn_stat, 0, sizeof(TXN_STATUS) * pgxc_clean_node_count);
+ return cur_txn_info->next;
+}
+
+
+static txn_info *find_txn(TransactionId gxid)
+{
+ database_info *cur_db;
+ txn_info *cur_txn;
+
+ for (cur_db = head_database_info; cur_db; cur_db = cur_db->next)
+ {
+ for (cur_txn = cur_db->head_txn_info; cur_txn; cur_txn = cur_txn->next)
+ {
+ if (cur_txn->gxid == gxid)
+ return cur_txn;
+ }
+ }
+ return NULL;
+}
+
+int set_txn_status(TransactionId gxid, char *node_name, TXN_STATUS status)
+{
+ txn_info *txn;
+ int node_idx;
+
+ txn = find_txn(gxid);
+ if (txn == NULL)
+ return -1;
+
+ node_idx = find_node_index(node_name);
+ if (node_idx < 0)
+ return -1;
+
+ txn->txn_stat[node_idx] = status;
+ return 0;
+}
+
+/*
+ * This function should be called "after" all the 2PC info
+ * has been collected.
+ *
+ * To determine if a prepared transaction is implicit or explicit,
+ * we use gxid. If gxid ~ '__XC[0-9]+', it is implicit 2PC.
+ */
+
+TXN_STATUS check_txn_global_status_gxid(TransactionId gxid)
+{
+ return(check_txn_global_status(find_txn(gxid)));
+}
+
+static void find_txn_participant_nodes(txn_info *txn)
+{
+ int ii;
+ char *xid;
+ char *val;
+
+ if (txn == NULL)
+ return;
+
+ if ((txn->xid == NULL || *txn->xid == '\0'))
+ return;
+
+ xid = strdup(txn->xid);
+
+#define SEP ":"
+ val = strtok(xid, SEP);
+ if (strncmp(val, XIDPREFIX, strlen(XIDPREFIX)) != 0)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+
+ /* Get originating coordinator name */
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->origcoord = strdup(val);
+
+ /* Get if the originating coordinator was involved in the txn */
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->isorigcoord_part = atoi(val);
+
+ /* Get participating datanode count */
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->num_dnparts = atoi(val);
+
+ /* Get participating coordinator count */
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->num_coordparts = atoi(val);
+
+ txn->dnparts = (int *) malloc(sizeof (int) * txn->num_dnparts);
+ txn->coordparts = (int *) malloc(sizeof (int) * txn->num_coordparts);
+
+ for (ii = 0; ii < txn->num_dnparts; ii++)
+ {
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->dnparts[ii] = atoi(val);
+ }
+
+ for (ii = 0; ii < txn->num_coordparts; ii++)
+ {
+ val = strtok(NULL, SEP);
+ if (val == NULL)
+ {
+ fprintf(stderr, "Invalid format for implicit XID (%s).\n", txn->xid);
+ exit(1);
+ }
+ txn->coordparts[ii] = atoi(val);
+ }
+
+ return;
+}
+
+TXN_STATUS check_txn_global_status(txn_info *txn)
+{
+#define TXN_PREPARED 0x0001
+#define TXN_COMMITTED 0x0002
+#define TXN_ABORTED 0x0004
+
+ int ii;
+ int check_flag = 0;
+ int nodeindx;
+
+ if (txn == NULL)
+ return TXN_STATUS_INITIAL;
+
+ find_txn_participant_nodes(txn);
+
+ for (ii = 0; ii < txn->num_dnparts; ii++)
+ {
+ nodeindx = find_node_index_by_nodeid(txn->dnparts[ii]);
+ if (nodeindx == -1)
+ {
+ fprintf(stderr, "Participant datanode %d not reachable. Can't "
+ "resolve the transaction %s", txn->dnparts[ii], txn->xid);
+ return TXN_STATUS_FAILED;
+ }
+
+ if (txn->txn_stat[nodeindx] == TXN_STATUS_INITIAL ||
+ txn->txn_stat[nodeindx] == TXN_STATUS_UNKNOWN)
+ check_flag |= TXN_ABORTED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_PREPARED)
+ check_flag |= TXN_PREPARED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_COMMITTED)
+ check_flag |= TXN_COMMITTED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_ABORTED)
+ check_flag |= TXN_ABORTED;
+ else
+ return TXN_STATUS_FAILED;
+ }
+
+ for (ii = 0; ii < txn->num_coordparts; ii++)
+ {
+ nodeindx = find_node_index_by_nodeid(txn->coordparts[ii]);
+ if (nodeindx == -1)
+ {
+ fprintf(stderr, "Participant datanode %d not reachable. Can't "
+ "resolve the transaction %s", txn->coordparts[ii], txn->xid);
+ return TXN_STATUS_FAILED;
+ }
+
+ if (txn->txn_stat[nodeindx] == TXN_STATUS_INITIAL ||
+ txn->txn_stat[nodeindx] == TXN_STATUS_UNKNOWN)
+ check_flag |= TXN_ABORTED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_PREPARED)
+ check_flag |= TXN_PREPARED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_COMMITTED)
+ check_flag |= TXN_COMMITTED;
+ else if (txn->txn_stat[nodeindx] == TXN_STATUS_ABORTED)
+ check_flag |= TXN_ABORTED;
+ else
+ return TXN_STATUS_FAILED;
+ }
+
+ if ((check_flag & TXN_PREPARED) == 0)
+ /* Should be at least one "prepared statement" in nodes */
+ return TXN_STATUS_FAILED;
+ if ((check_flag & TXN_COMMITTED) && (check_flag & TXN_ABORTED))
+ /* Mix of committed and aborted. This should not happen. */
+ return TXN_STATUS_FAILED;
+ if (check_flag & TXN_COMMITTED)
+ /* Some 2PC transactions are committed. Need to commit others. */
+ return TXN_STATUS_COMMITTED;
+ if (check_flag & TXN_ABORTED)
+ /* Some 2PC transactions are aborted. Need to abort others. */
+ return TXN_STATUS_ABORTED;
+ /* All the transactions remain prepared. No need to recover. */
+ if (check_xid_is_implicit(txn->xid))
+ return TXN_STATUS_COMMITTED;
+ else
+ return TXN_STATUS_PREPARED;
+}
+
+
+/*
+ * Returns 1 if implicit, 0 otherwise.
+ *
+ * Should this be replaced with regexp calls?
+ */
+static int check_xid_is_implicit(char *xid)
+{
+ if (strncmp(xid, XIDPREFIX, strlen(XIDPREFIX)) != 0)
+ return 0;
+ return 1;
+}
+
+bool check2PCExists(void)
+{
+ database_info *cur_db;
+
+ for (cur_db = head_database_info; cur_db; cur_db = cur_db->next)
+ {
+ txn_info *cur_txn;
+
+ for (cur_txn = cur_db->head_txn_info; cur_txn; cur_txn = cur_txn->next)
+ {
+ return (true);
+ }
+ }
+ return (false);
+}
+
+char *str_txn_stat(TXN_STATUS status)
+{
+ switch(status)
+ {
+ case TXN_STATUS_INITIAL:
+ return("initial");
+ case TXN_STATUS_UNKNOWN:
+ return("unknown");
+ case TXN_STATUS_PREPARED:
+ return("prepared");
+ case TXN_STATUS_COMMITTED:
+ return("committed");
+ case TXN_STATUS_ABORTED:
+ return("aborted");
+ case TXN_STATUS_FAILED:
+ return("failed");
+ default:
+ return("undefined status");
+ }
+ return("undefined status");
+}
diff --git a/contrib/pgxc_clean/txninfo.h b/contrib/pgxc_clean/txninfo.h
new file mode 100644
index 0000000000..fc0e8eca65
--- /dev/null
+++ b/contrib/pgxc_clean/txninfo.h
@@ -0,0 +1,95 @@
+/*-------------------------------------------------------------------------
+ *
+ * txninfo.h
+ * Prepared transaction info
+ *
+ * Portions Copyright (c) 2012 Postgres-XC Development Group
+ *
+ * $Postgres-XC$
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef TXNINFO_H
+#define TXNINFO_H
+
+#include "gtm/gtm_c.h"
+
+typedef enum TXN_STATUS
+{
+ TXN_STATUS_INITIAL = 0, /* Initial */
+ TXN_STATUS_UNKNOWN, /* Unknown: Frozen, running, or not started */
+ TXN_STATUS_PREPARED,
+ TXN_STATUS_COMMITTED,
+ TXN_STATUS_ABORTED,
+ TXN_STATUS_INPROGRESS,
+ TXN_STATUS_FAILED /* Error detected while interacting with the node */
+} TXN_STATUS;
+
+typedef enum NODE_TYPE
+{
+ NODE_TYPE_COORD = 1,
+ NODE_TYPE_DATANODE
+} NODE_TYPE;
+
+
+typedef struct node_info
+{
+ char *node_name;
+ int port;
+ char *host;
+ NODE_TYPE type;
+ int nodeid;
+} node_info;
+
+extern node_info *pgxc_clean_node_info;
+extern int pgxc_clean_node_count;
+
+typedef struct txn_info
+{
+ struct txn_info *next;
+ TransactionId gxid;
+ char *xid; /* xid used in prepare */
+ char *owner;
+ char *origcoord; /* Original coordinator who initiated the txn */
+ bool isorigcoord_part; /* Is original coordinator a
+ participant? */
+ int num_dnparts; /* Number of participant datanodes */
+ int num_coordparts; /* Number of participant coordinators */
+ int *dnparts; /* Whether a node was participant in the txn */
+ int *coordparts;
+ TXN_STATUS *txn_stat; /* Array for each nodes */
+ char *msg; /* Notice message for this txn. */
+} txn_info;
+
+typedef struct database_info
+{
+ struct database_info *next;
+ char *database_name;
+ txn_info *head_txn_info;
+ txn_info *last_txn_info;
+} database_info;
+
+extern database_info *head_database_info;
+extern database_info *last_database_info;
+
+/* Functions */
+
+extern txn_info *init_txn_info(char *database_name, TransactionId gxid);
+extern int add_txn_info(char *database, char *node, TransactionId gxid, char *xid, char *owner, TXN_STATUS status);
+extern txn_info *find_txn_info(TransactionId gxid);
+extern int set_txn_status(TransactionId gxid, char *node_name, TXN_STATUS status);
+extern database_info *find_database_info(char *database_name);
+extern database_info *add_database_info(char *database_name);
+extern node_info *find_node_info(char *node_name);
+extern node_info *find_node_info_by_nodeid(int nodeid);
+extern int find_node_index(char *node_name);
+extern int find_node_index_by_nodeid(int nodeid);
+extern int set_node_info(char *node_name, int port, char *host, NODE_TYPE type,
+ int nodeid, int index);
+extern TXN_STATUS check_txn_global_status(txn_info *txn);
+extern TXN_STATUS check_txn_global_status_gxid(TransactionId gxid);
+extern bool check2PCExists(void);
+extern char *str_txn_stat(TXN_STATUS status);
+
+#endif /* TXNINFO_H */
diff --git a/contrib/pgxc_ctl/.gitignore b/contrib/pgxc_ctl/.gitignore
new file mode 100644
index 0000000000..ff5178b242
--- /dev/null
+++ b/contrib/pgxc_ctl/.gitignore
@@ -0,0 +1,3 @@
+/pgxc_ctl
+/pgxc_ctl_bash.c
+/signature.h
diff --git a/contrib/pgxc_ctl/Makefile b/contrib/pgxc_ctl/Makefile
new file mode 100644
index 0000000000..e73764bd45
--- /dev/null
+++ b/contrib/pgxc_ctl/Makefile
@@ -0,0 +1,55 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for contrib/pgxc_ctl
+#
+# Portions Copyright (c) 2013 Postgres-XC Development Group
+#
+# $PostgreSQL$
+#
+#-------------------------------------------------------------------------
+
+PGFILEDESC = "pgxc_ctl - Provide XC configuration and opeation"
+PGAPPICON = win32
+
+PROGRAM= pgxc_ctl
+OBJS= pgxc_ctl_bash.o bash_handler.o config.o pgxc_ctl.o variables.o pgxc_ctl_log.o do_command.o \
+ utils.o do_shell.o gtm_cmd.o coord_cmd.o datanode_cmd.o gtm_util.o mcxt.o monitor.o
+
+
+#Include GTM objects
+gtm_builddir = $(top_builddir)/src/gtm
+EX_OBJS = $(gtm_builddir)/common/assert.o \
+ $(gtm_builddir)/client/libgtmclient.a \
+ $(gtm_builddir)/common/gtm_serialize.o
+
+PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
+PG_LIBS = $(libpq_pgport) $(PTHREAD_LIBS) $(EX_OBJS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pgxc_ctl
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
+
+pgxc_ctl_bash.c: pgxc_ctl_conf_part_full pgxc_ctl_conf_part_minimal pgxc_ctl_conf_part_empty pgxc_ctl_bash_2
+ cd $(top_srcdir)/$(subdir) && ./make_signature
+ifeq ($(vpath_build),yes)
+ mv $(top_srcdir)/$(subdir)/signature.h .
+ mv $(top_srcdir)/$(subdir)/pgxc_ctl_bash.c .
+endif
+
+clean: clean-script
+
+clean-script:
+ rm -f pgxc_ctl_bash.c signature.h
+
+check:
+ $(prove_check)
+
+installcheck:
+ $(prove_installcheck)
diff --git a/contrib/pgxc_ctl/bash_handler.c b/contrib/pgxc_ctl/bash_handler.c
new file mode 100644
index 0000000000..c4aa1e27f1
--- /dev/null
+++ b/contrib/pgxc_ctl/bash_handler.c
@@ -0,0 +1,79 @@
+/*-------------------------------------------------------------------------
+ *
+ * bash_handler.c
+ *
+ * Bash script handler module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include "bash_handler.h"
+#include "config.h"
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+
+extern char *pgxc_ctl_bash_script[];
+extern char *pgxc_ctl_conf_prototype[];
+
+/*
+ * Install bash script.
+ */
+void install_pgxc_ctl_bash(char *path, int read_prototype)
+{
+ char cmd[1024];
+ FILE *pgxc_ctl_bash = fopen(path, "w");
+ int i;
+
+ elog(NOTICE, "Installing pgxc_ctl_bash script as %s.\n", path);
+ if (!pgxc_ctl_bash)
+ {
+ elog(ERROR, "ERROR: Could not open pgxc_ctl bash script, %s, %s\n", path, strerror(errno));
+ }
+ if (read_prototype)
+ {
+ for (i=0; pgxc_ctl_conf_prototype[i]; i++)
+ fprintf(pgxc_ctl_bash, "%s\n", pgxc_ctl_conf_prototype[i]);
+ }
+ for (i=0; pgxc_ctl_bash_script[i]; i++)
+ fprintf(pgxc_ctl_bash, "%s\n", pgxc_ctl_bash_script[i]);
+ fclose(pgxc_ctl_bash);
+ sprintf(cmd, "chmod +x %s", path);
+ system(cmd);
+}
+
+/*
+ * Uninstall bash script.
+ */
+void uninstall_pgxc_ctl_bash(char *path)
+{
+ if (path)
+ unlink(path);
+}
+
+/*
+ * Run the bash script and read its output, which consists of variables needed to configure
+ * postgres-xc cluster in pgxc_ctl.
+ *
+ * Be careful that pgxc_ctl changes its working directory to pgxc home directory,
+ * typically $HOME/pgxc_ctl, which can be changed with pgxc_ctl options.
+ * See pgxc_ctl.c or pgxc_ctl document for details.
+ */
+void read_config_file(char *path, char *conf)
+{
+ FILE *vars;
+ char cmd[1024];
+
+ if (conf)
+ sprintf(cmd, "bash %s/pgxc_ctl_bash --configure %s print_values", path, conf);
+ else
+ sprintf(cmd, "bash %s/pgxc_ctl_bash print_values", path);
+ vars = popen(cmd, "r");
+ read_vars(vars);
+ pclose(vars);
+}
diff --git a/contrib/pgxc_ctl/bash_handler.h b/contrib/pgxc_ctl/bash_handler.h
new file mode 100644
index 0000000000..c0494137bd
--- /dev/null
+++ b/contrib/pgxc_ctl/bash_handler.h
@@ -0,0 +1,18 @@
+/*-------------------------------------------------------------------------
+ *
+ * bash_handler.h
+ *
+ * Bash script handling module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef BASH_HANDLER_H
+#define BASH_HANDLER_H
+
+void install_pgxc_ctl_bash(char *path, int read_prototype);
+void read_config_file(char *path, char *conf);
+void uninstall_pgxc_ctl_bash(char *path);
+
+#endif /* BASH_HANDLER_H */
diff --git a/contrib/pgxc_ctl/config.c b/contrib/pgxc_ctl/config.c
new file mode 100644
index 0000000000..b35c4c1f27
--- /dev/null
+++ b/contrib/pgxc_ctl/config.c
@@ -0,0 +1,1223 @@
+/*-------------------------------------------------------------------------
+ *
+ * config.c
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module handles pgxc_ctl configuration. This file includes
+ * parser of incoming variable inforation which is installed to
+ * pgxc_ctl variable system.
+ *
+ * This module also checks if there's any conficts in the resources
+ * among different nodes.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include "variables.h"
+#include "varnames.h"
+#include "config.h"
+#include "pgxc_ctl_log.h"
+#include "pgxc_ctl.h"
+#include "utils.h"
+#include "do_shell.h"
+
+static void addServer(char **name);
+static void verifyResource(void);
+
+enum Quote { UNQUOTED, SINGLEQUOTED, DOUBLEQUOTED };
+
+/*====================================================================
+ *
+ * Configuration parser
+ *
+ * The following functions read, parse and constract variables.
+ *
+ * As you see pgxc_ctl_bash_script[] in pgxc_ctl_bash.c, each variable
+ * will be read in the form of
+ * varname value value ....
+ *
+ * Each variable is basically an array. Sometimes, only the first
+ * element is needed.
+ *
+ * Please note that pgxc_ctl_bash.c is build by make_signature script
+ * using pgxc_ctl_bash_2 and pgxc_cont_conf_part dynamically.
+ *
+ * You can change detailed behaviors of the script by editing these
+ * files. Be careful! Please maintain the interface to this module.
+ *
+ *===================================================================
+ */
+/*
+ * Get a token from the line
+ */
+char *get_word(char *line, char **token)
+{
+ enum Quote quoted = UNQUOTED;
+
+ *token = NULL;
+
+ if (!line)
+ return NULL;
+ for(;*line == ' ' || *line == '\t'; line++);
+ if (!*line)
+ {
+ *token = NULL;
+ return NULL;
+ }
+ if (*line == '"')
+ {
+ quoted = DOUBLEQUOTED;
+ line++;
+ }
+ else if (*line == '\'')
+ {
+ quoted = SINGLEQUOTED;
+ line++;
+ }
+ else if (*line == '#')
+ {
+ *line = 0;
+ *token = NULL;
+ return NULL;
+ }
+ *token = line;
+ if (quoted == DOUBLEQUOTED)
+ {
+ for (; *line && *line != '\n' && *line != '"'; line++);
+ if (*line == '"')
+ {
+ *line = 0;
+ line++;
+ }
+ }
+ else if (quoted == SINGLEQUOTED)
+ {
+ for (; *line && *line != '\n' && *line != '\''; line++);
+ if (*line == '\'')
+ {
+ *line = 0;
+ line++;
+ }
+ }
+ else
+ for (; *line && *line != ' ' && *line != '\t' && *line != '\n' && *line != '#'; line++);
+ if (*line == '#')
+ {
+ *line = 0;
+ return(line);
+ }
+ else if (*line)
+ {
+ *line = 0;
+ return(line+1);
+ }
+ else
+ return(line);
+}
+
+/*
+ * Parse line and create/update a variable.
+ */
+static void parse_line(char *line)
+{
+ char *varname;
+ char *val;
+ pgxc_ctl_var *newv;
+
+ line = get_word(line, &varname);
+ if (!varname)
+ return;
+ if (!(newv = confirm_var(varname)))
+ return;
+ reset_value(newv);
+ while((line = get_word(line, &val)))
+ {
+ if (val && (strcmp(val, "") != 0))
+ {
+ add_val(newv, val);
+ }
+ }
+}
+
+/*
+ * Parse line and filter only pre-defined variables.
+ *
+ * This blocks any unknow variables to be build within pgxc_ctl structure.
+ */
+static void parse_line_select(char *line, char *selectThis[])
+{
+ char *varname;
+ char *val;
+ pgxc_ctl_var *newv;
+ int ii;
+
+ line = get_word(line, &varname);
+ if (!varname || varname[0] == '#')
+ return;
+ for (ii = 0; selectThis[ii]; ii++)
+ {
+ if (strcmp(varname, selectThis[ii]) == 0)
+ {
+ if (!(newv = confirm_var(varname)))
+ return;
+ while((line = get_word(line, &val)))
+ {
+ if (val)
+ add_val(newv, val);
+ }
+ }
+ }
+}
+
+/*
+ * Configuration file I/F
+ */
+void read_vars(FILE *conf)
+{
+ char line[MAXLINE+1];
+
+ while (fgets(line, MAXLINE, conf))
+ parse_line(line);
+}
+
+/*
+ * Configuration file I/F
+ */
+void read_selected_vars(FILE *conf, char *selectThis[])
+{
+ char line[MAXLINE+1];
+
+ while (fgets(line, MAXLINE, conf))
+ parse_line_select(line, selectThis);
+}
+
+/*
+ * Get all the servers --> VAR_allServers
+ */
+static void addServer(char **name)
+{
+ int ii, jj;
+ int flag;
+
+ confirm_var(VAR_allServers);
+
+ for (ii = 0; name[ii]; ii++)
+ {
+ flag = TRUE;
+ for (jj = 0; aval(VAR_allServers)[jj]; jj++)
+ {
+ if (strcmp(name[ii], aval(VAR_allServers)[jj]) != 0)
+ continue;
+ else
+ {
+ flag = FALSE;
+ break;
+ }
+ }
+ if (flag)
+ add_val(find_var(VAR_allServers), name[ii]);
+ }
+}
+
+/*
+ * Test each node and build target server list
+ */
+void makeServerList(void)
+{
+ /* Initialize */
+ reset_var(VAR_allServers);
+ /* GTM Master */
+ addServer(aval(VAR_gtmMasterServer));
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave))
+ addServer(aval(VAR_gtmSlaveServer));
+ /* GTM_Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ addServer(aval(VAR_gtmProxyServers));
+ /* Coordinator Master */
+ if (find_var(VAR_coordMasterServers))
+ addServer(aval(VAR_coordMasterServers));
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ addServer(aval(VAR_coordSlaveServers));
+ /* Datanode Master */
+ addServer(aval(VAR_datanodeMasterServers));
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ addServer(aval(VAR_datanodeSlaveServers));
+ /* Should add secondary slaves */
+}
+
+
+/*
+ * Take care of "no" slaves and build server list. At present, we don't support
+ * Cascaded or multi slaves. This will be supported in the future.
+ *
+ * Please note that log archive backup site is not counted as this server list
+ * because such servers are not likely to provide XC node operation.
+ *
+ * Log archive backup will be implemented in the future.
+ */
+int is_none(char *s)
+{
+ if (s == NULL)
+ return TRUE;
+ if (strcmp(s, "none") == 0)
+ return TRUE;
+ if (strcmp(s, "") == 0)
+ return TRUE;
+ if (strcmp(s, "N/A") == 0)
+ return TRUE;
+ return FALSE;
+}
+
+/*
+ * Remove gtm slave. Used at failover.
+ */
+static void emptyGtmSlave()
+{
+ reset_var_val(VAR_gtmSlaveServer, "none");
+ reset_var_val(VAR_gtmSlavePort, "0");
+ reset_var_val(VAR_gtmSlaveDir, "none");
+}
+
+/*
+ * Remove gtm proxies. Used when a node crashes.
+ * Because gtm_proxy is expected to be running at any target
+ * server, we don't have gtm_proxy slaves.
+ * We can just initialize gtm_proy using gtminit, configure and
+ * run it.
+ */
+static void emptyGtmProxies()
+{
+ int ii;
+
+ reset_var_val(VAR_gtmProxy, "n");
+ reset_var(VAR_gtmProxyServers);
+ reset_var(VAR_gtmProxyNames);
+ reset_var(VAR_gtmProxyPorts);
+ reset_var(VAR_gtmProxyDirs);
+ reset_var_val(VAR_gtmPxyExtraConfig, "none");
+ reset_var(VAR_gtmPxySpecificExtraConfig);
+ for (ii = 0; ii < arraySizeName(VAR_allServers); ii++)
+ {
+ add_val(find_var(VAR_gtmProxyServers), "none");
+ add_val(find_var(VAR_gtmProxyNames), "none");
+ add_val(find_var(VAR_gtmProxyPorts), "-1");
+ add_val(find_var(VAR_gtmProxyDirs), "none");
+ add_val(find_var(VAR_gtmPxyExtraConfig), "none");
+ }
+}
+
+/*
+ * Removes coordinator slaves from pgxc_ctl configuration.
+ * This is needed when a slave promotes and becomes a new
+ * master.
+ */
+static void emptyCoordSlaves()
+{
+ int ii;
+
+ reset_var_val(VAR_coordSlave, "n");
+ reset_var(VAR_coordSlaveServers);
+ reset_var(VAR_coordSlavePorts);
+ reset_var(VAR_coordSlavePoolerPorts);
+ reset_var(VAR_coordSlaveDirs);
+ reset_var(VAR_coordArchLogDirs);
+ for (ii = 0; ii < arraySizeName(VAR_coordNames); ii++)
+ {
+ add_val(find_var(VAR_coordSlaveServers), "none");
+ add_val(find_var(VAR_coordSlaveDirs), "none");
+ add_val(find_var(VAR_coordArchLogDirs), "none");
+ add_val(find_var(VAR_coordSlavePorts), "none");
+ add_val(find_var(VAR_coordSlavePoolerPorts), "none");
+ }
+}
+
+/*
+ * Removes datanode slave from pgxc_ctl configuration.
+ */
+static void emptyDatanodeSlaves()
+{
+ int ii;
+
+ reset_var_val(VAR_datanodeSlave, "n");
+ reset_var(VAR_datanodeSlaveServers);
+ reset_var(VAR_datanodeSlavePorts);
+ reset_var(VAR_datanodeSlavePoolerPorts);
+ reset_var(VAR_datanodeSlaveDirs);
+ reset_var(VAR_datanodeArchLogDirs);
+ for (ii = 0; ii < arraySizeName(VAR_datanodeSlaveServers); ii++)
+ {
+ add_val(find_var(VAR_datanodeSlaveServers), "none");
+ add_val(find_var(VAR_datanodeSlaveDirs), "none");
+ add_val(find_var(VAR_datanodeArchLogDirs), "none");
+ add_val(find_var(VAR_datanodeSlavePorts), "-1");
+ add_val(find_var(VAR_datanodeSlavePoolerPorts), "-1");
+ }
+}
+
+/*
+ * Scans initial configuration and set up "not configured" things.
+ *
+ * If, for example, gtm proxy is not configured,
+ * we set gtmProxy variable to "n".
+ *
+ * When gtmProxy varieble is already set to "n", remove gtm_proxy
+ * configuration information.
+ *
+ * Similar handling will be done for gtm slave, coordinator slaves
+ * and datanode slaves.
+ */
+void handle_no_slaves()
+{
+ int is_empty;
+ int ii;
+
+ /* GTM Slave */
+ if (!find_var(VAR_gtmSlave))
+ reset_var_val(VAR_gtmSlave, "n");
+ if (!isVarYes(VAR_gtmSlave))
+ emptyGtmSlave();
+ else
+ {
+ confirm_var(VAR_gtmSlaveServer);
+ if (!sval(VAR_gtmSlaveServer) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ emptyGtmSlave();
+ reset_var_val(VAR_gtmSlaveServer, "n");
+ }
+ }
+
+ /* GTM Proxy */
+ if (!find_var(VAR_gtmProxy))
+ reset_var_val(VAR_gtmProxy, "n");
+ if (!isVarYes(VAR_gtmProxy))
+ emptyGtmProxies();
+ else
+ {
+ is_empty = TRUE;
+ for (ii = 0; aval(VAR_gtmProxyServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_gtmProxyServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_gtmProxy, "n");
+ emptyGtmProxies();
+ }
+ }
+ /* Coordinator Slaves */
+ if (!find_var(VAR_coordSlave))
+ reset_var_val(VAR_coordSlave, "n");
+ if (!isVarYes(VAR_coordSlave))
+ emptyCoordSlaves();
+ else
+ {
+ is_empty = TRUE;
+ if (find_var(VAR_coordSlaveServers))
+ {
+ for (ii = 0; aval(VAR_coordSlaveServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_coordSlaveServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_coordSlave, "n");
+ emptyCoordSlaves();
+ }
+ }
+ else
+ {
+ elog(WARNING, "WARNING: coordSlaveServers variable not found where coordSlave is set to \"y\"\n");
+ reset_var_val(VAR_coordSlave, "n");
+ emptyCoordSlaves();
+ }
+ }
+ /* Datanode Slaves */
+ if (!find_var(VAR_datanodeSlave))
+ reset_var_val(VAR_datanodeSlave, "n");
+ if (!isVarYes(VAR_datanodeSlave))
+ emptyDatanodeSlaves();
+ else
+ {
+ is_empty = TRUE;
+ if (find_var(VAR_datanodeSlaveServers))
+ {
+ for (ii = 0; aval(VAR_datanodeSlaveServers)[ii]; ii++)
+ {
+ if (is_none(aval(VAR_datanodeSlaveServers)[ii]))
+ continue;
+ else
+ {
+ is_empty = FALSE;
+ break;
+ }
+ }
+ if (is_empty)
+ {
+ reset_var_val(VAR_datanodeSlave, "n");
+ emptyDatanodeSlaves();
+ }
+ }
+ else
+ {
+ elog(WARNING, "WARNING: datanodeSlaveServers variable not found where datanodeSlave is set to \"y\"\n");
+ reset_var_val(VAR_datanodeSlave, "n");
+ emptyDatanodeSlaves();
+ }
+ }
+}
+
+/*
+ * Check if there's no overlap in the resource --> Port, host and directory.
+ */
+static void reportMissingVar(char *name)
+{
+ elog(ERROR, "ERROR: %s is not configured.\n", name);
+}
+
+static int anyConfigErrors = FALSE;
+
+static void checkIfVarIsConfigured(char *name)
+{
+ /* var could be just defined without valid contents */
+ if (!find_var(name))
+ {
+ anyConfigErrors = TRUE;
+ reportMissingVar(name);
+ }
+}
+
+static void checkIfConfigured(char *names[])
+{
+ int ii;
+ for(ii = 0; names[ii]; ii++)
+ checkIfVarIsConfigured(names[ii]);
+}
+
+static void checkConfiguredAndSize(char *names[], char *msg)
+{
+ int ii;
+ int sz0;
+
+ for (ii = 0; names[ii]; ii++)
+ {
+ checkIfVarIsConfigured(names[ii]);
+ confirm_var(names[ii]);
+ }
+ sz0 = arraySizeName(names[0]);
+ for (ii = 1; names[ii]; ii++)
+ {
+ if (arraySizeName(names[ii]) != sz0)
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Number of elements in %s definitions are different %s and %s. Check your configuration\n", msg, names[0], names[ii]);
+ }
+ }
+}
+
+int checkSpecificResourceConflict(char *name, char *host, int port, char *dir, int is_gtm)
+{
+ if (checkNameConflict(name, is_gtm))
+ return 1;
+ if (checkPortConflict(host, port))
+ return 1;
+ if (checkDirConflict(host, dir))
+ return 1;
+ return 0;
+}
+/*
+ * Note that 1 will be returned when a conflict is found
+ */
+int checkNameConflict(char *name, int is_gtm)
+{
+ int ii;
+
+ /*
+ * GTM Master
+ */
+ if (!is_gtm && strcasecmp(name, sval(VAR_gtmName)) == 0)
+ return 1;
+ /*
+ * GTM Proxy
+ */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_gtmProxyNames)[ii]) == 0)
+ return 1;
+ /*
+ * Coordinator
+ */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_coordNames)[ii]) == 0)
+ return 1;
+ /*
+ * Datanode
+ */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (strcasecmp(name, aval(VAR_datanodeNames)[ii]) == 0)
+ return 1;
+ return 0;
+}
+
+/*
+ * Note that 1 will be returned when a conflict is found.
+ */
+int checkPortConflict(char *host, int port)
+{
+ int ii;
+
+ /* GTM Master */
+ if (doesExist(VAR_gtmMasterServer, 0) && doesExist(VAR_gtmMasterPort, 0) &&
+ (strcasecmp(host, sval(VAR_gtmMasterServer)) == 0) && (atoi(sval(VAR_gtmMasterPort)) == port))
+ return 1;
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcasecmp(host, sval(VAR_gtmSlaveServer)) == 0) && (atoi(sval(VAR_gtmSlavePort)) == port))
+ return 1;
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_gtmProxyServers)[ii]) == 0) && (atoi(aval(VAR_gtmProxyPorts)[ii]) == port))
+ return 1;
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordMasterServers)[ii]) == 0) &&
+ ((atoi(aval(VAR_coordPorts)[ii]) == port) || (atoi(aval(VAR_poolerPorts)[ii])) == port))
+ return 1;
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (doesExist(VAR_coordSlaveServers, ii) && !is_none(aval(VAR_coordSlaveServers)[ii]) &&
+ (strcasecmp(host, aval(VAR_coordSlaveServers)[ii]) == 0) && (atoi(aval(VAR_coordSlavePorts)[ii]) == port))
+ return 1;
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_datanodeMasterServers)[ii]) == 0) && (atoi(aval(VAR_datanodePorts)[ii]) == port))
+ return 1;
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (doesExist(VAR_datanodeSlaveServers, ii) && !is_none(aval(VAR_datanodeSlaveServers)[ii]) &&
+ (strcasecmp(host, aval(VAR_datanodeSlaveServers)[ii]) == 0) && (atoi(aval(VAR_datanodeSlavePorts)[ii]) == port))
+ return 1;
+ return 0;
+}
+
+int checkDirConflict(char *host, char *dir)
+{
+ int ii;
+
+ /* "none" conflictd with nothing */
+ if (strcasecmp(dir, "none") == 0)
+ return 0;
+ /* GTM Master */
+ if (doesExist(VAR_gtmMasterServer, 0) && doesExist(VAR_gtmMasterDir, 0) &&
+ (strcasecmp(host, sval(VAR_gtmMasterServer)) == 0) && (strcmp(dir, sval(VAR_gtmMasterDir)) == 0))
+ return 1;
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcasecmp(host, sval(VAR_gtmSlaveServer)) == 0) && (strcmp(dir, sval(VAR_gtmSlaveDir)) == 0))
+ return 1;
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_gtmProxyServers)[ii]) == 0) && (strcmp(dir, aval(VAR_gtmProxyDirs)[ii]) == 0))
+ return 1;
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordMasterServers)[ii]) == 0) && (strcmp(dir, aval(VAR_coordMasterDirs)[ii]) == 0))
+ return 1;
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_coordSlaveServers)[ii]) == 0) && (strcmp(dir, aval(VAR_coordSlaveDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_datanodeMasterServers)[ii]) == 0) && (strcmp(dir, aval(VAR_datanodeMasterDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Master WAL Dirs */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if ((strcasecmp(host, aval(VAR_datanodeMasterServers)[ii]) == 0) &&
+ (strcmp(dir, aval(VAR_datanodeMasterWALDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ if (doesExist(VAR_datanodeSlaveServers, ii) && doesExist(VAR_datanodeSlaveDirs, ii) &&
+ (strcasecmp(host, aval(VAR_datanodeSlaveServers)[ii]) == 0) && (strcmp(dir, aval(VAR_datanodeSlaveDirs)[ii]) == 0))
+ return 1;
+ /* Datanode Slave WAL Dirs */
+ if (isVarYes(VAR_datanodeSlave))
+ if (doesExist(VAR_datanodeSlaveServers, ii) && doesExist(VAR_datanodeSlaveDirs, ii) &&
+ (strcasecmp(host, aval(VAR_datanodeSlaveServers)[ii]) == 0) &&
+ (strcmp(dir, aval(VAR_datanodeSlaveWALDirs)[ii]) == 0))
+ return 1;
+ return 0;
+}
+
+/*
+ * Check if there's any conflict among src and dest, checks duplicate in names, servers, ports and directories.
+ *
+ * The rules are:
+ *
+ * 1) Each node (gtm, gtm_proxy, coordinator, datanode) must have unique name.
+ *
+ * 2) A port, in a given host, must be owned (listed to) only by single node.
+ *
+ * 3) A directory, in a given host, must be owned (used) only by single node.
+ */
+static void checkResourceConflict(char *srcNames, char *srcServers, char *srcPorts, char *srcPoolers, char *srcDirs,
+ char *destNames, char *destServers, char *destPorts, char *destPoolers, char *destDirs,
+ int destOnly, int checkName)
+{
+ int ii, jj;
+
+ if (!srcNames || !find_var(srcNames))
+ {
+ /* No source specified */
+ return;
+ }
+ if (!destOnly)
+ {
+ /* Check conflict among the source first */
+ for (ii = 0; aval(srcNames)[ii]; ii++)
+ {
+ if (is_none(aval(srcNames)[ii]))
+ continue;
+ /* Pooler and the port in the same name */
+ if (srcPoolers && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPoolers)[ii])))
+ {
+ if (atoi(aval(srcPorts)[ii]) > 0)
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in between port and pooler within %s variable.\n", srcNames);
+ }
+ }
+ if (checkName && srcNames && !doesExist(srcNames, ii))
+ assign_arrayEl(srcNames, ii, "none", NULL);
+ if (srcServers && !doesExist(srcServers, ii))
+ assign_arrayEl(srcServers, ii, "none", NULL);
+ if (srcPoolers && !doesExist(srcPoolers, ii))
+ assign_arrayEl(srcPoolers, ii, "-1", "-1");
+ if (srcPorts && !doesExist(srcPorts, ii))
+ assign_arrayEl(srcPorts, ii, "-1", "-1");
+ if (srcDirs && !doesExist(srcDirs, ii))
+ assign_arrayEl(srcDirs, ii, "none", NULL);
+ for (jj = ii+1; aval(srcNames)[jj]; jj++)
+ {
+ /* Name conflict */
+ if (checkName && srcNames && !doesExist(srcNames, jj))
+ assign_arrayEl(srcNames, jj, "none", NULL);
+ if (checkName && srcNames && (strcmp(aval(srcNames)[ii], aval(srcNames)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in resource name within %s variable.\n", srcNames);
+ }
+ if (srcServers && is_none(aval(srcServers)[ii]))
+ continue;
+ if (srcServers && !doesExist(srcServers, jj))
+ assign_arrayEl(srcServers, jj, "none", NULL);
+ if (srcServers && strcmp(aval(srcServers)[ii], aval(srcServers)[jj]) == 0)
+ {
+ /* Ports and Poolers */
+ if (srcPorts && !doesExist(srcPorts, jj))
+ assign_arrayEl(srcPorts, jj, "-1", "-1");
+ if (srcPoolers && !doesExist(srcPoolers, jj))
+ assign_arrayEl(srcPoolers, jj, "-1", "-1");
+ if((srcPorts && (atoi(aval(srcPorts)[ii]) > 0) && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPorts)[jj]))) ||
+ (srcPorts && srcPoolers && (atoi(aval(srcPorts)[ii]) > 0) && (atoi(aval(srcPorts)[ii]) == atoi(aval(srcPoolers)[jj]))) ||
+ (srcPoolers && (atoi(aval(srcPoolers)[ii]) > 0) && (atoi(aval(srcPoolers)[ii]) == atoi(aval(srcPoolers)[jj]))))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in port and pooler numbers within %s variable.\n", srcNames);
+ }
+ /* Directories */
+ if (srcDirs && !doesExist(srcDirs, jj))
+ assign_arrayEl(srcDirs, jj, "none", NULL);
+ if (srcDirs && strcmp(aval(srcDirs)[ii], aval(srcDirs)[jj]) == 0)
+ {
+ if (!is_none(aval(srcDirs)[ii]))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in directories within %s variable.\n", srcNames);
+ }
+ }
+ }
+ }
+ }
+ }
+ /* Check between src and destination */
+ if (destNames)
+ {
+ for (ii = 0; aval(srcNames)[ii]; ii++)
+ {
+ if (is_none(aval(srcNames)[ii]))
+ continue;
+ for (jj = 0; aval(destNames)[jj]; jj++)
+ {
+ /* Resource names */
+ if (checkName && (strcmp(aval(srcNames)[ii], aval(destNames)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in names between %s and %s variable.\n", srcNames, destNames);
+ }
+ if (destServers && !doesExist(destServers, jj))
+ assign_arrayEl(destServers, jj, "none", NULL);
+ if (srcServers && destServers && (strcmp(aval(srcServers)[ii], aval(destServers)[jj]) == 0) && !is_none(aval(srcServers)[ii]))
+ {
+ /* Ports and poolers */
+ if (destPorts && !doesExist(destPorts, jj))
+ assign_arrayEl(destPorts, jj, "-1", "-1");
+ if (destPoolers && !doesExist(destPoolers, jj))
+ assign_arrayEl(destPoolers, jj, "-1", "-1");
+ if ((srcPorts && destPorts && (atoi(aval(srcPorts)[ii]) == atoi(aval(destPorts)[jj])) && (atoi(aval(srcPorts)[ii]) > 0)) ||
+ (destPoolers && srcPorts && (destPoolers && (atoi(aval(srcPorts)[ii]) == atoi(aval(destPoolers)[jj]))) && (atoi(aval(srcPorts)[ii]) > 0)) ||
+ (srcPoolers && destPorts && (atoi(aval(srcPoolers)[ii]) == atoi(aval(destPorts)[jj])) && (atoi(aval(srcPoolers)[ii]) > 0)) ||
+ (srcPoolers && destPoolers && (atoi(aval(srcPoolers)[ii]) == atoi(aval(destPoolers)[jj])) && (atoi(aval(srcPoolers)[ii]) > 0)))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in port/pooler in %s and %s variable.\n", srcNames, destNames);
+ }
+ /* Dir Names */
+ if (srcDirs && destDirs &&
+ doesExist(srcDirs, ii) &&
+ !is_none(aval(srcDirs)[ii]) &&
+ doesExist(destDirs, jj) &&
+ !is_none(aval(destDirs)[jj]) &&
+ (strcmp(aval(srcDirs)[ii], aval(destDirs)[jj]) == 0))
+ {
+ anyConfigErrors = TRUE;
+ elog(ERROR, "ERROR: Conflict in directory names in %s and %s variable.\n", srcNames, destNames);
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Check if each node resource is configured properly
+ * Again, finding an error will not make the program stop.
+ */
+static void verifyResource(void)
+{
+ char *GtmVars[] = {VAR_gtmName,
+ VAR_gtmMasterServer,
+ VAR_gtmMasterPort,
+ VAR_gtmMasterDir,
+ NULL};
+ char *GtmSlaveVars[] = {VAR_gtmSlaveName,
+ VAR_gtmSlaveServer,
+ VAR_gtmSlavePort,
+ VAR_gtmSlaveDir,
+ NULL};
+ char *gtmProxyVars[] = {VAR_gtmProxyNames,
+ VAR_gtmProxyServers,
+ VAR_gtmProxyPorts,
+ VAR_gtmProxyDirs,
+ NULL};
+ char *coordMasterVars[] = {VAR_coordNames,
+ VAR_coordPorts,
+ VAR_poolerPorts,
+ VAR_coordMasterServers,
+ VAR_coordMasterDirs,
+ VAR_coordMaxWALSenders,
+ NULL};
+ char *coordSlaveVars[] = {VAR_coordNames,
+ VAR_coordSlaveServers,
+ VAR_coordSlavePorts,
+ VAR_coordSlavePoolerPorts,
+ VAR_coordSlaveDirs,
+ VAR_coordArchLogDirs,
+ NULL};
+#if 0
+ /*
+ * Please note that at present, pgxc_ctl supports only synchronous replication
+ * between {coordinator|datanode} master and server.
+ *
+ * Start/stop operation of the master and failover operation is affected by this
+ * settings. Will be improved soon.
+ */
+ char *coordSlaveSVars[] = {VAR_coordSlaveSync, NULL}; /* For extension */
+#endif
+ char *datanodeMasterVars[] = {VAR_datanodeNames,
+ VAR_datanodePorts,
+ VAR_datanodePoolerPorts,
+ VAR_datanodeMasterServers,
+ VAR_datanodeMasterDirs,
+ VAR_datanodeMaxWALSenders,
+ NULL};
+ char *datanodeSlaveVars[] = {VAR_datanodeNames,
+ VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts,
+ VAR_datanodeSlavePoolerPorts,
+ VAR_datanodeSlaveDirs,
+ VAR_datanodeArchLogDirs,
+ NULL};
+#if 0
+ char *datanodeSlaveSVars[] = {VAR_datanodeSlaveSync, NULL}; /* For extension, see above */
+#endif
+
+ /*
+ * -------------- Fundamental check -------------------
+ */
+ anyConfigErrors = FALSE;
+ /* GTM */
+ checkIfConfigured(GtmVars);
+ /* GTM slave */
+ if (isVarYes(VAR_gtmSlave))
+ checkIfConfigured(GtmSlaveVars);
+ /* GTM proxy */
+ if (isVarYes(VAR_gtmProxy))
+ checkConfiguredAndSize(gtmProxyVars, "GTM Proxy");
+ /* Coordinator Master */
+ checkIfConfigured(coordMasterVars);
+ checkConfiguredAndSize(coordMasterVars, "coordinator master");
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ {
+#if 0
+ checkIfConfigured(coordSlaveSVars); /* For extension, see above */
+
+#endif
+ checkConfiguredAndSize(coordSlaveVars, "coordinator slave");
+ }
+ /* Datanode Master */
+ checkConfiguredAndSize(datanodeMasterVars, "datanode master");
+ /* Datanode Slave */
+ if (sval(VAR_datanodeSlave) && strcmp(sval(VAR_datanodeSlave), "y") == 0)
+ {
+#if 0
+ checkIfConfigured(datanodeSlaveSVars); /* For extension, see above */
+#endif
+ checkConfiguredAndSize(datanodeSlaveVars, "datanode slave");
+ }
+ if (anyConfigErrors)
+ {
+ elog(ERROR, "ERROR: Found fundamental configuration error.\n");
+ exit(1);
+ }
+ /*
+ * --------------- Resource Conflict Check ---------------------
+ */
+ /*
+ * GTM Master and others ----------------
+ */
+ anyConfigErrors = FALSE;
+ /* GTM and GTM slave */
+ if (isVarYes(VAR_gtmSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir, TRUE, FALSE);
+ /* GTM and GTM Proxy, if any */
+ if (isVarYes(VAR_gtmProxy))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs, TRUE, TRUE);
+ /* GTM and coordinator masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ TRUE, TRUE);
+ /* GTM and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, NULL, VAR_coordSlaveDirs, TRUE, TRUE);
+ /* GTM and datanode masters */
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs, TRUE, TRUE);
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs, TRUE, TRUE);
+ /* GTM and datanode slaves, if any */
+ if(isVarYes(VAR_datanodeSlave))
+ {
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_gtmName, VAR_gtmMasterServer, VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, TRUE);
+ }
+ /*
+ * GTM slave and others ------------
+ */
+ if (isVarYes(VAR_gtmSlave))
+ {
+ /* GTM slave and GTM master, if any */
+ if (isVarYes(VAR_gtmProxy))
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_gtmName, VAR_gtmMasterServer,
+ VAR_gtmMasterPort, NULL, VAR_gtmMasterDir,
+ TRUE, TRUE);
+ /* GTM slave and GTM Proxy, if any */
+ if (isVarYes(VAR_gtmProxy))
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ TRUE, TRUE);
+ /* GTM slave and coordinator masters */
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ TRUE, TRUE);
+ /* GTM slave and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_coordNames, VAR_coordSlaveServers,
+ VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ TRUE, TRUE);
+ /* GTM slave and datanode masters */
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs,
+ TRUE, TRUE);
+ /* GTM slave and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_gtmSlaveName, VAR_gtmSlaveServer, VAR_gtmSlavePort, NULL, VAR_gtmSlaveDir,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, TRUE);
+ }
+ }
+ /*
+ * GTM proxy and others ---------
+ */
+ if (isVarYes(VAR_gtmProxy))
+ {
+ /* GTM proxy and coordinator masters */
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ FALSE, TRUE);
+ /* GTM proxy and coordinator slaves, if any */
+ if (sval(VAR_coordSlave) && (strcmp(sval(VAR_coordSlave), "y") == 0))
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ TRUE, TRUE);
+ /* GTM proxy and datanode masters */
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs,
+ TRUE, TRUE);
+ /* GTM proxy and datanode slave, if any */
+ if (sval(VAR_datanodeSlave) && (strcmp(sval(VAR_datanodeSlave), "y") == 0))
+ {
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers, VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_gtmProxyNames, VAR_gtmProxyServers, VAR_gtmProxyPorts, NULL, VAR_gtmProxyDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, TRUE);
+ }
+ }
+ /*
+ * Coordinator Masters and others
+ */
+ /* Coordinator master and coordinator slaves, if any */
+ if (isVarYes(VAR_coordSlave))
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_coordNames, VAR_coordSlaveServers,
+ VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ TRUE, FALSE);
+ /* Coordinator masters and datanode masters */
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ FALSE, TRUE);
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs,
+ FALSE, TRUE);
+ /* Coordinator masters and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_coordNames, VAR_coordMasterServers, VAR_coordPorts, VAR_poolerPorts, VAR_coordMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, TRUE);
+ }
+ /*
+ * Coordinator slaves and others
+ */
+ if (isVarYes(VAR_coordSlave))
+ {
+ /* Coordinator slave and datanode masters */
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ FALSE, TRUE);
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs,
+ FALSE, TRUE);
+ /* Coordinator slave and datanode slave, if any */
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, TRUE);
+ checkResourceConflict(VAR_coordNames, VAR_coordSlaveServers, VAR_coordSlavePorts, VAR_coordSlavePoolerPorts, VAR_coordSlaveDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, TRUE);
+ }
+ }
+ /*
+ * Datanode masters and others ---
+ */
+ /* Datanode master self */
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ NULL, NULL, NULL, NULL, NULL,
+ FALSE, TRUE);
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers,
+ VAR_datanodePorts, NULL, VAR_datanodeMasterWALDirs,
+ NULL, NULL, NULL, NULL, NULL,
+ FALSE, TRUE);
+ /* Datanode master and datanode slave, if any */
+ if (sval(VAR_datanodeSlave) && (strcmp(sval(VAR_datanodeSlave), "y") == 0))
+ {
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL, VAR_datanodeSlaveDirs,
+ TRUE, FALSE);
+ checkResourceConflict(VAR_datanodeNames, VAR_datanodeMasterServers, VAR_datanodePorts, NULL, VAR_datanodeMasterDirs,
+ VAR_datanodeNames, VAR_datanodeSlaveServers,
+ VAR_datanodeSlavePorts, NULL,
+ VAR_datanodeSlaveWALDirs,
+ TRUE, FALSE);
+ }
+ if (anyConfigErrors)
+ {
+ elog(ERROR, "ERROR: Found conflicts among resources. Exiting.\n");
+ exit(1);
+ }
+}
+
+/*
+ * Check if the minimum components are configured --- gtm master, coordinator master and datanode master.
+ */
+void check_configuration(void)
+{
+ /*
+ * See if mandatory configuration is defined. Will continue if error is detected
+ * to check all the errors at a time.
+ */
+ /* GTM Master */
+ if (!find_var(VAR_gtmName) || !find_var(VAR_gtmMasterServer) || !find_var(VAR_gtmMasterPort) || !find_var(VAR_gtmMasterDir))
+ elog(ERROR, "ERROR: GTM master configuration is missing. gtmName, gtmMasterServer, gtmMasterPort or gtmMasterDir\n");
+ /* Coordinator Master */
+ if (!find_var(VAR_coordNames) || !find_var(VAR_coordPorts) || !find_var(VAR_poolerPorts) ||
+ !find_var(VAR_coordMasterServers) || !find_var(VAR_coordMasterDirs))
+ elog(ERROR, "ERROR: Coordinator master configuration is missing. coordNames, coodPorts, poolerPorts, coordMasterPorts or coordMasterDirs\n");
+ /* Datanode Master */
+ if (!find_var(VAR_datanodeNames) || !find_var(VAR_datanodePorts) || !find_var(VAR_datanodeMasterServers) ||
+ !find_var(VAR_datanodeMasterDirs))
+ elog(ERROR, "ERROR: Datanode master configuration is missing. datanodeNames, datanodePorts, datanodePoolerPorts, datanodeMasterPorts or datanodeMasterDirs\n");
+ handle_no_slaves();
+ verifyResource();
+ makeServerList();
+}
+
+/*
+ * Backup configuration files to a remote site as specified.
+ */
+int backup_configuration(void)
+{
+ if ((strcasecmp(sval(VAR_configBackup), "y") != 0) || is_none(sval(VAR_configBackupHost)) ||
+ is_none(sval(VAR_configBackupDir)) || is_none(sval(VAR_configBackupFile)))
+ return (2);
+ return(doImmediate(NULL, NULL, "scp %s %s@%s:%s/%s",
+ pgxc_ctl_config_path,
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupDir), sval(VAR_configBackupFile)));
+}
+
+NodeType getNodeType(char *nodeName)
+{
+ int ii;
+
+ /* Check GTM */
+ if (strcmp(nodeName, sval(VAR_gtmName)) == 0)
+ return NodeType_GTM;
+ /* GTM_Proxy */
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_gtmProxyNames)[ii]) == 0)
+ return NodeType_GTM_PROXY;
+ /* Coordinator */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_coordNames)[ii]) == 0)
+ return NodeType_COORDINATOR;
+ /* Datanode */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_datanodeNames)[ii]) == 0)
+ return NodeType_DATANODE;
+ /* Nodename */
+ for (ii = 0; aval(VAR_allServers)[ii]; ii++)
+ if (strcmp(nodeName, aval(VAR_allServers)[ii]) == 0)
+ return NodeType_SERVER;
+ return NodeType_UNDEF;
+
+}
+
+#define DEFAULT_PGXC_CTL_MAX_WAL_SENDERS 5
+
+/*
+ * Determine default value for max_wal_senders. We pick up the value specified
+ * for some other existing coordinator or datanode or return the
+ * DEFAULT_PGXC_CTL_MAX_WAL_SENDERS value if none is spcified
+ */
+int getDefaultWalSender(int isCoord)
+{
+ int ii;
+
+ char *names = isCoord ? VAR_coordNames : VAR_datanodeNames;
+ char *walSender = isCoord ? VAR_coordMaxWALSenders : VAR_datanodeMaxWALSenders;
+
+ for (ii = 0; aval(names)[ii]; ii++)
+ {
+ if (doesExist(names, ii) && !is_none(aval(names)[ii]) && (atoi(aval(walSender)[ii]) >= 0))
+ {
+ int nsenders = atoi(aval(walSender)[ii]);
+ return nsenders ? nsenders : DEFAULT_PGXC_CTL_MAX_WAL_SENDERS;
+ }
+ }
+ /* If none found, return the default value.. */
+ return DEFAULT_PGXC_CTL_MAX_WAL_SENDERS;
+}
diff --git a/contrib/pgxc_ctl/config.h b/contrib/pgxc_ctl/config.h
new file mode 100644
index 0000000000..543070b91c
--- /dev/null
+++ b/contrib/pgxc_ctl/config.h
@@ -0,0 +1,45 @@
+/*-------------------------------------------------------------------------
+ *
+ * config.h
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef CONFIG_H
+#define CONFIG_H
+
+#include <stdio.h>
+#include <string.h>
+
+typedef enum NodeType {
+ NodeType_UNDEF = 0,
+ NodeType_GTM,
+ NodeType_GTM_PROXY,
+ NodeType_COORDINATOR,
+ NodeType_DATANODE,
+ NodeType_SERVER} NodeType;
+
+void read_vars(FILE *conf);
+void check_configuration(void);
+void read_selected_vars(FILE *conf, char *selectThis[]);
+char *get_word(char *line, char **token);
+int is_none(char *s);
+int backup_configuration(void);
+NodeType getNodeType(char *nodeName);
+int checkSpecificResourceConflict(char *name, char *host, int port, char *dir, int is_gtm);
+int checkNameConflict(char *name, int is_gtm);
+int checkPortConflict(char *host, int port);
+int checkDirConflict(char *host, char *dir);
+void makeServerList(void);
+int getDefaultWalSender(int isCoord);
+
+#define DEBUG() (strcasecmp(sval(VAR_debug), "y") == 0)
+#define VERBOSE() (strcasecmp(sval(VAR_verbose), "y") == 0)
+#define isVarYes(x) ((sval(x) != NULL) && (strcasecmp(sval(x), "y") == 0))
+
+void handle_no_slaves(void);
+
+#endif /* CONFIG_H */
diff --git a/contrib/pgxc_ctl/coord_cmd.c b/contrib/pgxc_ctl/coord_cmd.c
new file mode 100644
index 0000000000..00afd1e52e
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_cmd.c
@@ -0,0 +1,2397 @@
+/*-------------------------------------------------------------------------
+ *
+ * coord_cmd.c
+ *
+ * Coordinator command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "coord_cmd.h"
+#include "gtm_util.h"
+
+
+static int failover_oneCoordinator(int coordIdx);
+static cmd_t *prepare_configureDataNode(char *nodeName);
+
+static char date[MAXTOKEN+1];
+
+/*
+ *======================================================================
+ *
+ * Coordinator staff
+ *
+ *=====================================================================
+ */
+/*
+ * Initialize coordinator masters -----------------------------------------------------------
+ */
+int init_coordinator_master_all(void)
+{
+ elog(NOTICE, "Initialize all the coordinator masters.\n");
+ return(init_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_initCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd, *cmdInitdb, *cmdPgConf, *cmdWalArchDir, *cmdWalArch, *cmdPgHba;
+ int jj, kk, gtmPxyIdx;
+ char **confFiles = NULL;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char *gtmHost, *gtmPort;
+ char timestamp[MAXTOKEN+1];
+ char remoteDirCheck[MAXPATH * 2 + 128];
+
+ /* Reset coordinator master directory and run initdb */
+ if ((jj = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Node %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if(pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) == 0)
+ {
+ elog(ERROR, "ERROR: target coordinator master %s is running now. Skip initilialization.\n",
+ nodeName);
+ return(NULL);
+ }
+
+ remoteDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip Coordinator initilialization'; exit; fi;",
+ aval(VAR_coordMasterDirs)[jj],
+ aval(VAR_coordMasterDirs)[jj]
+ );
+ }
+
+ cmd = cmdInitdb = initCmd(aval(VAR_coordMasterServers)[jj]);
+ snprintf(newCommand(cmdInitdb), MAXLINE,
+ "%s"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "PGXC_CTL_SILENT=1 initdb --nodename %s -D %s",
+ remoteDirCheck,
+ aval(VAR_coordMasterDirs)[jj],
+ aval(VAR_coordMasterDirs)[jj],
+ nodeName,
+ aval(VAR_coordMasterDirs)[jj]);
+
+ /* Update postgresql.conf */
+
+ /* coordSpecificExtraConfig */
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_coordMasterServers)[jj]);
+ gtmHost = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+ appendCmdEl(cmdInitdb, (cmdPgConf = initCmd(aval(VAR_coordMasterServers)[jj])));
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[jj]);
+ if (doesExist(VAR_coordExtraConfig, 0) &&
+ !is_none(sval(VAR_coordExtraConfig)))
+ AddMember(confFiles, sval(VAR_coordExtraConfig));
+ if (doesExist(VAR_coordSpecificExtraConfig, jj) &&
+ !is_none(aval(VAR_coordSpecificExtraConfig)[jj]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraConfig)[jj]);
+ if ((f = prepareLocalStdin((cmdPgConf->localStdin = Malloc(MAXPATH+1)), MAXPATH, confFiles)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ /* From configuration variables */
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+ "pooler_port = %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of Additon\n",
+ timeStampString(timestamp, MAXTOKEN),
+ atoi(aval(VAR_coordPorts)[jj]),
+ aval(VAR_poolerPorts)[jj],
+ gtmHost, gtmPort);
+ fclose(f);
+ CleanArray(confFiles);
+
+ /* Log Shipping */
+
+ if (isVarYes(VAR_coordSlave) && !is_none(aval(VAR_coordSlaveServers)[jj]))
+ {
+ /* Build WAL archive target directory */
+ appendCmdEl(cmdInitdb, (cmdWalArchDir = initCmd(aval(VAR_coordSlaveServers)[jj])));
+ snprintf(newCommand(cmdWalArchDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s; chmod 0700 %s",
+ aval(VAR_coordArchLogDirs)[jj], aval(VAR_coordArchLogDirs)[jj],
+ aval(VAR_coordArchLogDirs)[jj]);
+ /* Build master's postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmdWalArch = initCmd(aval(VAR_coordMasterServers)[jj])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = archive\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %s\n"
+ "# End of Addition\n",
+ timeStampString(timestamp, MAXPATH),
+ sval(VAR_pgxcUser), aval(VAR_coordSlaveServers)[jj], aval(VAR_coordArchLogDirs)[jj],
+ is_none(aval(VAR_coordMaxWALSenders)[jj]) ? "0" : aval(VAR_coordMaxWALSenders)[jj]);
+ fclose(f);
+ cmdWalArch->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdWalArch), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordMasterDirs)[jj]);
+ }
+
+ /* pg_hba.conf */
+
+ appendCmdEl(cmdInitdb, (cmdPgHba = initCmd(aval(VAR_coordMasterServers)[jj])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Addition at initialization, %s\n",
+ timeStampString(timestamp, MAXTOKEN));
+ if (doesExist(VAR_coordExtraPgHba, 0) && !is_none(sval(VAR_coordExtraPgHba)))
+ AddMember(confFiles, sval(VAR_coordExtraPgHba));
+ if (doesExist(VAR_coordSpecificExtraPgHba, jj) && !is_none(aval(VAR_coordSpecificExtraPgHba)[jj]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraPgHba)[jj]);
+ appendFiles(f, confFiles);
+ CleanArray(confFiles);
+ for (kk = 0; aval(VAR_coordPgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ if (isVarYes(VAR_coordSlave))
+ if (!is_none(aval(VAR_coordSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ fclose(f);
+ cmdPgHba->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdPgHba), MAXLINE,
+ "cat >> %s/pg_hba.conf", aval(VAR_coordMasterDirs)[jj]);
+
+ /*
+ * Now prepare statements to create/alter nodes.
+ */
+ return(cmd);
+}
+
+int init_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ /*
+ * Build directory and run initdb
+ */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Initialize coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Initialize coordinator slaves ---------------------------------------------------------------
+ */
+int init_coordinator_slave_all(void)
+{
+ elog(NOTICE, "Initialize all the coordinator slaves.\n");
+ return(init_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_initCoordinatorSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdBuildDir, *cmdStartMaster, *cmdBaseBkup, *cmdRecoveryConf, *cmdPgConf;
+ int idx;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char timestamp[MAXTOKEN+1];
+ char remoteDirCheck[MAXPATH * 2 + 128];
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave of the coordinator %s is not configured.\n", nodeName);
+ return(NULL);
+ }
+
+ remoteDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip Coordinator slave initilialization'; exit; fi;",
+ aval(VAR_coordSlaveDirs)[idx],
+ aval(VAR_coordSlaveDirs)[idx]
+ );
+ }
+
+ /* Build work directory */
+ cmd = cmdBuildDir = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ snprintf(newCommand(cmdBuildDir), MAXLINE,
+ "%s"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "chmod 0700 %s",
+ remoteDirCheck,
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx]);
+ /*
+ * Check if the master is running --> May not need change if we have watchdog. This case, we need
+ * a master which can handle the request. So GTM should be running. We can test all of them by
+ * single 'select 1' command.
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ /* Master is not running. Must start it first */
+ appendCmdEl(cmdBuildDir, (cmdStartMaster = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdStartMaster), MAXLINE,
+ "pg_ctl start -w -Z coordinator -D %s -o -i",
+ aval(VAR_coordMasterDirs)[idx]);
+ }
+ /*
+ * Obtain base backup of the master
+ */
+ appendCmdEl(cmdBuildDir, (cmdBaseBkup = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ snprintf(newCommand(cmdBaseBkup), MAXLINE,
+ "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_coordPorts)[idx], aval(VAR_coordMasterServers)[idx], aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Configure recovery.conf file at the slave */
+ appendCmdEl(cmdBuildDir, (cmdRecoveryConf = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN), aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_coordNames)[idx],
+ aval(VAR_coordArchLogDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ fclose(f);
+ cmdRecoveryConf->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdRecoveryConf), MAXLINE,
+ "cat >> %s/recovery.conf\n", aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Configure postgresql.conf at the slave */
+ appendCmdEl(cmdBuildDir, (cmdPgConf = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = off\n"
+ "port = %s\n"
+ "pooler_port = %s\n"
+ "wal_level = archive\n"
+ "archive_mode = off\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "# End of Addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_coordSlavePorts)[idx],
+ aval(VAR_coordSlavePoolerPorts)[idx]);
+ fclose(f);
+ cmdPgConf->localStdin = Strdup(localStdin);
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int init_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ int rc;
+ cmd_t *cmd;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ /*
+ * First step: initialize work directory and run the master if necessary
+ */
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initialize the coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Configure nodes in each coordinator -------------------------------------------
+ *
+ * Issues CREATE NODE/ALTER NODE through psql.
+ *
+ * Please note that CREATE/ALTER/DROP NODE are handled only locally. You have to
+ * visit all the coordinators.
+ */
+int configure_nodes_all(void)
+{
+ configure_nodes(aval(VAR_coordNames));
+ return configure_datanodes(aval(VAR_datanodeNames));
+}
+
+int configure_nodes(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_configureNode(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+int configure_datanodes(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_configureDataNode(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+cmd_t *prepare_configureNode(char *nodeName)
+{
+ cmd_t *cmd;
+ int ii;
+ int idx;
+ FILE *f;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return NULL;
+ }
+ if (is_none(aval(VAR_coordMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(NULL);
+ snprintf(newCommand(cmd), MAXLINE,
+ "psql -p %d -h %s -a %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner));
+ if ((f = prepareLocalStdin(newFilename(cmd->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return NULL;
+ }
+ /* Setup coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ int targetIdx;
+ if (is_none(aval(VAR_coordNames)[ii]))
+ continue;
+ if ((targetIdx = coordIdx(aval(VAR_coordNames)[ii])) < 0)
+ continue;
+
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ {
+ if (idx != targetIdx)
+ /* Register outside coordinator */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='coordinator', HOST='%s', PORT=%d);\n",
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ else
+ /* Update myself */
+ fprintf(f, "ALTER NODE %s WITH (HOST='%s', PORT=%d);\n",
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ }
+ }
+ /* Setup datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ int dnIdx;
+
+ if (is_none(aval(VAR_datanodeNames)[ii]))
+ continue;
+
+ if ((dnIdx = datanodeIdx(aval(VAR_datanodeNames)[ii])) < 0)
+ {
+ fclose(f);
+ cleanCmd(cmd);
+ return NULL;
+ }
+
+ if (is_none(aval(VAR_datanodeMasterServers)[dnIdx]))
+ continue;
+
+ if (sval(VAR_primaryDatanode) && (strcmp(sval(VAR_primaryDatanode), aval(VAR_datanodeNames)[dnIdx]) == 0))
+ {
+ /* Primary Node */
+ if (strcmp(aval(VAR_coordMasterServers)[idx], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ /* Primay and preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PRIMARY, PREFERRED);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PRIMARY);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ /* Non-primary node */
+ if (strcmp(aval(VAR_coordMasterServers)[idx], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ /* Preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d, PREFERRED);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "CREATE NODE %s WITH (TYPE='datanode', HOST='%s', PORT=%d);\n",
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ }
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fclose(f);
+ return(cmd);
+}
+
+static cmd_t *prepare_configureDataNode(char *nodeName)
+{
+ cmd_t *cmd;
+ int ii;
+ int jj;
+ int idx;
+ int connCordIndx;
+ FILE *f;
+ bool is_preferred;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode.\n", nodeName);
+ return NULL;
+ }
+ if (is_none(aval(VAR_datanodeMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(NULL);
+
+ /* We use one of the coordinators to send queries to datanodes */
+ connCordIndx = get_any_available_coord(-1);
+ if (connCordIndx == -1)
+ return NULL;
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "psql -p %d -h %s -a %s %s",
+ atoi(aval(VAR_coordPorts)[connCordIndx]),
+ aval(VAR_coordMasterServers)[connCordIndx],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner));
+ if ((f = prepareLocalStdin(newFilename(cmd->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return NULL;
+ }
+ /* Setup coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ int targetIdx;
+ if (is_none(aval(VAR_coordNames)[ii]))
+ continue;
+ if ((targetIdx = coordIdx(aval(VAR_coordNames)[ii])) < 0)
+ continue;
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ {
+ /* Register outside coordinator */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''coordinator'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_coordNames)[ii],
+ aval(VAR_coordMasterServers)[ii],
+ atoi(aval(VAR_coordPorts)[ii]));
+ }
+ }
+
+ /* Setup datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ int dnIdx;
+
+ if (is_none(aval(VAR_datanodeNames)[ii]))
+ continue;
+
+ if ((dnIdx = datanodeIdx(aval(VAR_datanodeNames)[ii])) < 0)
+ {
+ fclose(f);
+ cleanCmd(cmd);
+ return NULL;
+ }
+
+ if (is_none(aval(VAR_datanodeMasterServers)[dnIdx]))
+ continue;
+
+ // See if this data node is on the same host as a coordinator
+ is_preferred = false;
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (strcmp(aval(VAR_coordMasterServers)[jj], aval(VAR_datanodeMasterServers)[dnIdx]) == 0)
+ {
+ is_preferred = true;
+ break;
+ }
+ }
+
+ if (sval(VAR_primaryDatanode) && (strcmp(sval(VAR_primaryDatanode), aval(VAR_datanodeNames)[dnIdx]) == 0))
+ {
+ if (idx != dnIdx)
+ {
+ /* Primary Node */
+ if (is_preferred)
+ {
+ /* Primay and preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ /* Primary Node */
+ if (is_preferred)
+ /* Primay and preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* Primary but not prefereed node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PRIMARY)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ }
+ else
+ {
+ if (idx != dnIdx)
+ {
+ /* Non-primary node */
+ if (is_preferred)
+ /* Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ else
+ {
+ /* Non-primary node */
+ if (is_preferred)
+ /* Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d, PREFERRED)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ else
+ /* non-Preferred node */
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE=''datanode'', HOST=''%s'', PORT=%d)';\n",
+ aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeNames)[dnIdx], aval(VAR_datanodeMasterServers)[dnIdx],
+ atoi(aval(VAR_datanodePorts)[dnIdx]));
+ }
+ }
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'SELECT pgxc_pool_reload()';\n", aval(VAR_datanodeNames)[idx]);
+ fclose(f);
+ return(cmd);
+}
+
+
+/*
+ * Kill coordinator masters -------------------------------------------------------------
+ *
+ * It is not recommended to kill them in such a manner. This is just for emergence.
+ * You should try to stop component by "stop" command.
+ */
+
+int kill_coordinator_master_all(void)
+{
+ elog(INFO, "Killing all the coordinator masters.\n");
+ return(kill_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t * prepare_killCoordinatorMaster(char *nodeName)
+{
+ int idx;
+ pid_t pmPid;
+ cmd_t *cmdKill = NULL, *cmd = NULL;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: node %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ cmd = cmdKill = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if ((pmPid = get_postmaster_pid(aval(VAR_coordMasterServers)[idx], aval(VAR_coordMasterDirs)[idx])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_coordMasterServers)[idx], pmPid);
+
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d %s; rm -f /tmp/.s.'*'%d'*'",
+ pmPid, pidList, atoi(aval(VAR_coordPorts)[idx]));
+ freeAndReset(pidList);
+ }
+ else
+ {
+ elog(WARNING, "WARNING: pid for coordinator master \"%s\" was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "rm -f /tmp/.s.'*'%d'*'",
+ atoi(aval(VAR_coordPorts)[idx]));
+ }
+ return cmd;
+}
+
+int kill_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Killing coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Kill coordinator masters -------------------------------------------------------------
+ *
+ * It is not recommended to kill them in such a manner. This is just for emergence.
+ * You should try to stop component by "stop" command.
+ */
+int kill_coordinator_slave_all(void)
+{
+ elog(INFO, "Killing all the coordinator slaves.\n");
+ return(kill_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_killCoordinatorSlave(char *nodeName)
+{
+ int idx;
+ pid_t pmPid;
+ cmd_t *cmd = NULL;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ if ((pmPid = get_postmaster_pid(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordSlaveDirs)[idx])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_coordSlaveServers)[idx], pmPid);
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d %s", pmPid, pidList);
+ freeAndReset(pidList);
+ }
+ else
+ {
+ elog(WARNING, "WARNING: pid for coordinator slave \"%s\" was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -f /tmp/.s.'*'%d'*'", atoi(aval(VAR_coordSlavePorts)[idx]));
+ }
+ return(cmd);
+}
+
+int kill_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Killing coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+cmd_t *prepare_cleanCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ return NULL;
+ if (is_none(aval(VAR_coordMasterServers)[idx]))
+ return NULL;
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s;mkdir -p %s;chmod 0700 %s; rm -f /tmp/.s.*%d*; rm -f /tmp/.s.*%d*",
+ aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterDirs)[idx],
+ atoi(aval(VAR_coordPorts)[idx]), atoi(aval(VAR_poolerPorts)[idx]));
+ return cmd;
+}
+
+/*
+ * Cleanup coordinator master resources -- directory and socket.
+ */
+int clean_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ int ii;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Clean coordinator master %s resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: coordinator master %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+int clean_coordinator_master_all(void)
+{
+ elog(INFO, "Cleaning all the coordinator masters resources.\n");
+ return(clean_coordinator_master(aval(VAR_coordNames)));
+}
+
+/*
+ * Cleanup coordinator slave resources -- directory and the socket.
+ */
+cmd_t *prepare_cleanCoordinatorSlave(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator.\n", nodeName);
+ return NULL;
+ }
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ return NULL;
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s;mkdir -p %s;chmod 0700 %s; rm -f /tmp/.s.*%d*; rm -f /tmp/.s.*%d*",
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordSlaveDirs)[idx],
+ atoi(aval(VAR_coordSlavePorts)[idx]), atoi(aval(VAR_coordSlavePoolerPorts)[idx]));
+ return cmd;
+}
+
+int clean_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ int ii;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Clean coordinator slave %s resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: coordinator slave %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return (rc);
+}
+
+int clean_coordinator_slave_all(void)
+{
+ elog(INFO, "Cleaning all the coordinator slave resources.\n");
+ return(clean_coordinator_slave(aval(VAR_coordNames)));
+}
+
+/*------------------------------------------------------------------------
+ *
+ * Add command
+ *
+ *-----------------------------------------------------------------------*/
+int add_coordinatorMaster(char *name, char *host, int port, int pooler,
+ char *dir, char *extraConf, char *extraPgHbaConf)
+{
+ FILE *f, *lockf;
+ int size, idx;
+ char port_s[MAXTOKEN+1];
+ char pooler_s[MAXTOKEN+1];
+ char max_wal_senders_s[MAXTOKEN+1];
+ int gtmPxyIdx;
+ int connCordIndx;
+ char *gtmHost;
+ char *gtmPort;
+ char pgdumpall_out[MAXPATH+1];
+ char **nodelist = NULL;
+ int ii, jj;
+ char **confFiles = NULL;
+ char **pgHbaConfFiles = NULL;
+
+ /* Check if all the coordinator masters are running */
+ if (!check_AllCoordRunning())
+ {
+ elog(ERROR, "ERROR: Some of the coordinator masters are not running. Cannot add one.\n");
+ return 1;
+ }
+ /* Check if there's no conflict with the current configuration */
+ if (checkNameConflict(name, FALSE))
+ {
+ elog(ERROR, "ERROR: Node name %s duplicate.\n", name);
+ return 1;
+ }
+ if (checkPortConflict(host, port) || checkPortConflict(host, pooler))
+ {
+ elog(ERROR, "ERROR: port numbrer (%d) or pooler port (%d) at host %s conflicts.\n", port, pooler, host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir))
+ {
+ elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", dir, host);
+ return 1;
+ }
+ /*
+ * Check if coordinator masgter configuration is consistent
+ */
+ idx = size = arraySizeName(VAR_coordNames);
+ if ((arraySizeName(VAR_coordPorts) != size) ||
+ (arraySizeName(VAR_poolerPorts) != size) ||
+ (arraySizeName(VAR_coordMasterServers) != size) ||
+ (arraySizeName(VAR_coordMasterDirs) != size) ||
+ (arraySizeName(VAR_coordMaxWALSenders) != size) ||
+ (arraySizeName(VAR_coordSpecificExtraConfig) != size) ||
+ (arraySizeName(VAR_coordSpecificExtraPgHba) != size))
+ {
+ elog(ERROR, "ERROR: Found some conflicts in coordinator master configuration.");
+ return 1;
+ }
+ /*
+ * Now reconfigure
+ */
+ /* Need an API to expand the array to desired size */
+ if ((extendVar(VAR_coordNames, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordMasterServers, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordPorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_poolerPorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordMasterDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordMaxWALSenders, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordSlaveServers, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordSlavePorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordSlaveDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordArchLogDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordSpecificExtraConfig, idx + 1, "none") != 0) ||
+ (extendVar(VAR_coordSpecificExtraPgHba, idx + 1, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsistent coordinator information\n");
+ return 1;
+ }
+ /*
+ * 000 We need another way to configure specific pg_hba.conf and max_wal_senders.
+ */
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler);
+ snprintf(max_wal_senders_s, MAXTOKEN, "%d", getDefaultWalSender(true));
+ assign_arrayEl(VAR_coordNames, idx, name, NULL);
+ assign_arrayEl(VAR_coordMasterServers, idx, host, NULL);
+ assign_arrayEl(VAR_coordPorts, idx, port_s, "-1");
+ assign_arrayEl(VAR_poolerPorts, idx, pooler_s, NULL);
+ assign_arrayEl(VAR_coordMasterDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_coordMaxWALSenders, idx, max_wal_senders_s, NULL);
+ assign_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlavePorts, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlavePoolerPorts, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_coordSpecificExtraConfig, idx, extraConf, NULL);
+ assign_arrayEl(VAR_coordSpecificExtraPgHba, idx, extraPgHbaConf, NULL);
+ handle_no_slaves();
+ /*
+ * Update the configuration file and backup it
+ */
+ /*
+ * Take care of extra conf file
+ */
+ if (doesExist(VAR_coordExtraConfig, 0) && !is_none(sval(VAR_coordExtraConfig)))
+ AddMember(confFiles, sval(VAR_coordExtraConfig));
+ if (doesExist(VAR_coordSpecificExtraConfig, idx) && !is_none(aval(VAR_coordSpecificExtraConfig)[idx]))
+ AddMember(confFiles, aval(VAR_coordSpecificExtraConfig)[idx]);
+
+ /*
+ * Take care of extra pg_hba conf file
+ */
+ if (doesExist(VAR_coordExtraPgHba, 0) && !is_none(sval(VAR_coordExtraPgHba)))
+ AddMember(pgHbaConfFiles, sval(VAR_coordExtraPgHba));
+ if (doesExist(VAR_coordSpecificExtraPgHba, idx) && !is_none(aval(VAR_coordSpecificExtraPgHba)[idx]))
+ AddMember(pgHbaConfFiles, aval(VAR_coordSpecificExtraPgHba)[idx]);
+
+ /*
+ * Main part
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to coordinator master addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintAval(f, VAR_coordNames);
+ fprintAval(f, VAR_coordMasterServers);
+ fprintAval(f, VAR_coordPorts);
+ fprintAval(f, VAR_poolerPorts);
+ fprintAval(f, VAR_coordMasterDirs);
+ fprintAval(f, VAR_coordMaxWALSenders);
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlavePorts);
+ fprintAval(f, VAR_coordSlavePoolerPorts);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSpecificExtraConfig);
+ fprintAval(f, VAR_coordSpecificExtraPgHba);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Now add the master */
+
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(host);
+ gtmHost = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+
+ /* initdb */
+ doImmediate(host, NULL, "PGXC_CTL_SILENT=1 initdb -D %s --nodename %s", dir, name);
+
+ /* Edit configurations */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)))
+ {
+ appendFiles(f, confFiles);
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+ "pooler_port = %d\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %d\n"
+ "# End of Additon\n",
+ timeStampString(date, MAXTOKEN+1),
+ port, pooler, gtmHost, atoi(gtmPort));
+ pclose(f);
+ }
+ CleanArray(confFiles);
+ jj = coordIdx(name);
+ if ((f = pgxc_popen_w(host, "cat >> %s/pg_hba.conf", dir)))
+ {
+ int kk;
+
+ fprintf(f, "#===========================================\n");
+ fprintf(f, "# Added at initialization.\n");
+
+ appendFiles(f, pgHbaConfFiles);
+ for (kk = 0; aval(VAR_coordPgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ if (isVarYes(VAR_coordSlave))
+ if (!is_none(aval(VAR_coordSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ pclose(f);
+ }
+
+ /* find any available coordinator */
+ connCordIndx = get_any_available_coord(-1);
+ if (connCordIndx == -1)
+ {
+ /*
+ * This is the FIRST coordinator being added into
+ * the cluster. Just start it and be done with it.
+ *
+ * Start the new coordinator with --coordinator option
+ */
+ AddMember(nodelist, name);
+ start_coordinator_master(nodelist);
+ CleanArray(nodelist);
+
+ /* ALTER our own definition appropriately */
+ goto selfadd;
+ }
+
+ /* Lock ddl */
+ if ((lockf = pgxc_popen_wRaw("psql -h %s -p %s %s",
+ aval(VAR_coordMasterServers)[connCordIndx],
+ aval(VAR_coordPorts)[connCordIndx],
+ sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open psql command, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(lockf, "select pgxc_lock_for_backup();\n"); /* Keep open until the end of the addition. */
+ fflush(lockf);
+
+ /* pg_dumpall */
+ createLocalFileName(GENERAL, pgdumpall_out, MAXPATH);
+ doImmediateRaw("pg_dumpall -p %s -h %s -s --include-nodes --dump-nodes --file=%s",
+ aval(VAR_coordPorts)[connCordIndx],
+ aval(VAR_coordMasterServers)[connCordIndx], pgdumpall_out);
+
+ /* Start the new coordinator */
+ doImmediate(host, NULL, "pg_ctl start -w -Z restoremode -D %s -o -i", dir);
+
+ /* Allow the new coordinator to start up by sleeping for a couple of seconds */
+ pg_usleep(2000000L);
+
+ /* Restore the backup */
+ doImmediateRaw("psql -h %s -p %d -d %s -f %s", host, port, sval(VAR_defaultDatabase), pgdumpall_out);
+ doImmediateRaw("rm -f %s", pgdumpall_out);
+
+ /* Quit the new coordinator */
+ doImmediate(host, NULL, "pg_ctl stop -w -Z restoremode -D %s", dir);
+
+ /* Start the new coordinator with --coordinator option */
+ AddMember(nodelist, name);
+ start_coordinator_master(nodelist);
+ CleanArray(nodelist);
+
+ /* Issue CREATE NODE on coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordNames)[ii]) && strcmp(aval(VAR_coordNames)[ii], name) != 0)
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_coordMasterServers)[ii], atoi(aval(VAR_coordPorts)[ii]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "CREATE NODE %s WITH (TYPE = 'coordinator', host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ }
+ /* Issue CREATE NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s",
+ aval(VAR_coordMasterServers)[connCordIndx],
+ atoi(aval(VAR_coordPorts)[connCordIndx]),
+ sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE = ''coordinator'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'SELECT pgxc_pool_reload()';\n", aval(VAR_datanodeNames)[ii]);
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ }
+ /* Quit DDL lokkup session */
+ fprintf(lockf, "\\q\n");
+ pclose(lockf);
+
+selfadd:
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %d %s", host, port, sval(VAR_defaultDatabase))) == NULL)
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", name);
+ else
+ {
+ fprintf(f, "ALTER NODE %s WITH (host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ return 0;
+}
+
+int add_coordinatorSlave(char *name, char *host, int port, int pooler_port, char *dir, char *archDir)
+{
+ int idx;
+ FILE *f;
+ char port_s[MAXTOKEN+1];
+ char pooler_s[MAXTOKEN+1];
+ int kk;
+ int size;
+
+ /* Check if the name is valid coordinator */
+ if ((idx = coordIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified coordiantor %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if the coordinator slave is not configured */
+ if (isVarYes(VAR_coordSlave) && doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave for the coordinator %s has already been configured.\n", name);
+ return 1;
+ }
+ /* Check if the resource does not conflict */
+ if (strcmp(dir, archDir) == 0)
+ {
+ elog(ERROR, "ERROR: working directory is the same as WAL archive directory.\n");
+ return 1;
+ }
+ /*
+ * We don't check the name conflict here because acquiring valid coordiinator index means that
+ * there's no name conflict.
+ */
+ if (checkPortConflict(host, port))
+ {
+ elog(ERROR, "ERROR: the port %s has already been used in the host %s.\n", aval(VAR_coordPorts)[idx], host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir) || checkDirConflict(host, archDir))
+ {
+ elog(ERROR, "ERROR: directory %s or %s has already been used by other node.\n", dir, archDir);
+ return 1;
+ }
+ /* Check if the coordinator master is running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Coordinator master %s is not running.\n", name);
+ return 1;
+ }
+
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler_port);
+
+ /* Prepare the resources (directories) */
+ doImmediate(host, NULL, "mkdir -p %s;chmod 0700 %s", dir, dir);
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", archDir, archDir, archDir);
+ /* Reconfigure the master with WAL archive */
+ /* Update the configuration and backup the configuration file */
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open coordinator master's configuration file, %s/postgresql.conf, %s\n",
+ aval(VAR_coordMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = archive\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %d\n"
+ "# End of Addition\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcUser), host, archDir,
+ getDefaultWalSender(TRUE));
+ pclose(f);
+ /* pg_hba.conf for replication */
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/pg_hba.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open coordinator master's pg_hba.conf file, %s/pg_hba.conf, %s\n",
+ aval(VAR_coordMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================\n"
+ "# Additional entry by adding the slave, %s\n",
+ timeStampString(date, MAXPATH));
+
+ for (kk = 0; aval(VAR_coordPgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_coordPgHbaEntries)[kk]);
+ }
+
+ fprintf(f,
+ "host replication %s %s/32 trust\n"
+ "# End of addition ===============================\n",
+ sval(VAR_pgxcOwner), getIpAddress(host));
+ pclose(f);
+ /* Reconfigure pgxc_ctl configuration with the new slave */
+ size = arraySizeName(VAR_coordNames);
+ /* Need an API to expand the array to desired size */
+ if ((extendVar(VAR_coordSlaveServers, size, "none") != 0) ||
+ (extendVar(VAR_coordSlaveDirs, size, "none") != 0) ||
+ (extendVar(VAR_coordSlavePorts, size, "none") != 0) ||
+ (extendVar(VAR_coordSlavePoolerPorts, size, "none") != 0) ||
+ (extendVar(VAR_coordArchLogDirs, size, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsistent coordinator information\n");
+ return 1;
+ }
+ if (!isVarYes(VAR_coordSlave))
+ assign_sval(VAR_coordSlave, "y");
+ replace_arrayEl(VAR_coordSlaveServers, idx, host, NULL);
+ replace_arrayEl(VAR_coordSlavePorts, idx, port_s, NULL);
+ replace_arrayEl(VAR_coordSlavePoolerPorts, idx, pooler_s, NULL);
+ replace_arrayEl(VAR_coordSlaveDirs, idx, dir, NULL);
+ replace_arrayEl(VAR_coordArchLogDirs, idx, archDir, NULL);
+ /* Update the configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to coordinator slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlavePorts);
+ fprintAval(f, VAR_coordSlavePoolerPorts);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Restart the master */
+ /*
+ * It's not a good idea to use "restart" here because some connection from other coordinators
+ * may be alive. They are posessed by the pooler and we have to reload the pool to release them,
+ * which aborts all the transactions.
+ *
+ * Beacse we need to issue pgxc_pool_reload() at all the coordinators, we need to give up all the
+ * transactions in the whole cluster.
+ *
+ * It is much better to shutdow the target coordinator master fast because it does not affect
+ * transactions this coordinator is not involved.
+ */
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl stop -w -Z coordinator -D %s -m fast", aval(VAR_coordMasterDirs)[idx]);
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL,
+ "pg_ctl start -w -Z coordinator -D %s", aval(VAR_coordMasterDirs)[idx]);
+ /* pg_basebackup */
+ doImmediate(host, NULL, "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_coordPorts)[idx], aval(VAR_coordMasterServers)[idx], dir);
+ /* Update the slave configuration with hot standby and port */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the new slave's postgresql.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = off\n"
+ "port = %d\n"
+ "pooler_port = %d\n"
+ "wal_level = archive\n"
+ "archive_mode = off\n" /* No archive mode */
+ "archive_command = ''\n" /* No archive mode */
+ "max_wal_senders = 0\n" /* Minimum WAL senders */
+ "# End of Addition\n",
+ timeStampString(date, MAXTOKEN),
+ atoi(aval(VAR_coordSlavePorts)[idx]),
+ atoi(aval(VAR_coordSlavePoolerPorts)[idx]));
+ pclose(f);
+ /* Update the slave recovery.conf */
+ if ((f = pgxc_popen_w(host, "cat >> %s/recovery.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the slave's recovery.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to add the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(date, MAXTOKEN), aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_coordNames)[idx],
+ aval(VAR_coordArchLogDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ pclose(f);
+
+ /* Start the slave */
+ doImmediate(host, NULL, "pg_ctl start -w -Z coordinator -D %s", dir);
+ return 0;
+}
+
+
+/*------------------------------------------------------------------------
+ *
+ * Remove command
+ *
+ *-----------------------------------------------------------------------*/
+int remove_coordinatorMaster(char *name, int clean_opt)
+{
+ /*
+
+ Removing an existing coordinator
+ ==========================
+
+ Assume a two coordinator cluster, COORD_1 & COORD_2
+ Suppose we want to remove COORD2 for any reason.
+
+ 1. Stop the coordinator to be removed.
+ In our example we need to stop COORD_2.
+
+ 2. Connect to any of the coordinators except the one to be removed.
+ In our example assuming COORD_1 is running on port 5432,
+ the following command would connect to COORD_1
+
+ psql postgres -p 5432
+
+ 3. Drop the coordinator to be removed.
+ For example to drop coordinator COORD_2
+
+ DROP NODE COORD_2;
+
+ 4. Update the connection information cached in pool.
+
+ SELECT pgxc_pool_reload();
+
+ COORD_2 is now removed from the cluster & COORD_1 would work as if COORD_2 never existed.
+
+ CAUTION : If COORD_2 is still running and clients are connected to it, any queries issued would create inconsistencies in the cluster.
+
+ Please note that there is no need to block DDLs because either way DDLs will fail after step 1 and before step 4.
+
+ */
+
+ int idx;
+ int ii;
+ FILE *f;
+ char **namelist = NULL;
+ char date[MAXTOKEN+1];
+
+ /* Check if the coordinator is configured */
+ if ((idx = coordIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Coordinator %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if all the other coordinators are running */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if ((ii != idx) && !is_none(aval(VAR_coordNames)[ii]) && (pingNode(aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii]) != 0))
+ {
+ elog(ERROR, "ERROR: Coordinator master %s is not running.\n", aval(VAR_coordNames)[ii]);
+ return 1;
+ }
+ }
+ /* Check if there's a slave configured */
+ if (doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ remove_coordinatorSlave(name, clean_opt);
+#if 0
+ /* Stop the coordinator master if running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_coordinator_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the coordinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_coordMasterDirs)[idx]);
+#endif
+ /* Issue "drop node" at all the other coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if ((ii != idx) && doesExist(VAR_coordNames, ii) && !is_none(aval(VAR_coordNames)[ii]))
+ {
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[ii]), aval(VAR_coordMasterServers)[ii], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "DROP NODE %s;\n", name);
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fprintf(f, "\\q");
+ pclose(f);
+ }
+ }
+ /* Issue "drop node" at all the datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (doesExist(VAR_datanodeNames, ii) && !is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ int coord_idx = get_any_available_coord(idx);
+ if (coord_idx == -1)
+ return 1;
+
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[coord_idx]), aval(VAR_coordMasterServers)[coord_idx], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[coord_idx]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'DROP NODE %s';\n",
+ aval(VAR_datanodeNames)[ii], name);
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'SELECT pgxc_pool_reload()';\n",
+ aval(VAR_datanodeNames)[ii]);
+ fprintf(f, "\\q");
+ pclose(f);
+ }
+ }
+#if 1
+ /* Stop the coordinator master if running */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_coordinator_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the coordinator master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_coordMasterDirs)[idx]);
+#endif
+ /* Update configuration and backup --> should cleanup "none" entries here */
+ replace_arrayEl(VAR_coordNames, idx, "none", NULL);
+ replace_arrayEl(VAR_coordMasterDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_coordPorts, idx, "-1", "-1");
+ replace_arrayEl(VAR_poolerPorts, idx, "-1", "-1");
+ replace_arrayEl(VAR_coordMasterServers, idx, "none", NULL);
+ replace_arrayEl(VAR_coordMaxWALSenders, idx, "0", "0");
+ replace_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlavePorts, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlavePoolerPorts, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSpecificExtraConfig, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Write config files
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator master removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordNames);
+ fprintAval(f, VAR_coordMasterDirs);
+ fprintAval(f, VAR_coordPorts);
+ fprintAval(f, VAR_poolerPorts);
+ fprintAval(f, VAR_coordMasterServers);
+ fprintAval(f, VAR_coordMaxWALSenders);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlavePorts);
+ fprintAval(f, VAR_coordSlavePoolerPorts);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fprintAval(f, VAR_coordSpecificExtraConfig);
+ fclose(f);
+ backup_configuration();
+ return 0;
+}
+
+int remove_coordinatorSlave(char *name, int clean_opt)
+{
+ int idx;
+ char **nodelist = NULL;
+ FILE *f;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ return 1;
+ }
+ idx = coordIdx(name);
+ if (idx < 0)
+ {
+ elog(ERROR, "ERROR: coordinator %s is not configured.\n", name);
+ return 1;
+ }
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: coordinator slave %s is not configured.\n", name);
+ return 1;
+ }
+ AddMember(nodelist, name);
+ if (pingNodeSlave(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordSlaveDirs)[idx]) == 0)
+ stop_coordinator_slave(nodelist, "immediate");
+ {
+ FILE *f;
+ if ((f = pgxc_popen_w(aval(VAR_coordMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot open %s/postgresql.conf at %s, %s\n", aval(VAR_coordMasterDirs)[idx], aval(VAR_coordMasterServers)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to remove the slave %s\n"
+ "archive_mode = off\n"
+ "synchronous_standby_names = ''\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "wal_level = minimal\n"
+ "# End of the update\n",
+ timeStampString(date, MAXTOKEN));
+ pclose(f);
+ }
+ doImmediate(aval(VAR_coordMasterServers)[idx], NULL, "pg_ctl restart -Z coordinator -D %s", aval(VAR_coordMasterDirs)[idx]);
+ if (clean_opt)
+ clean_coordinator_slave(nodelist);
+ /*
+ * Maintain variables
+ */
+ replace_arrayEl(VAR_coordSlaveServers, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlavePorts, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlavePoolerPorts, idx, "none", NULL);
+ replace_arrayEl(VAR_coordSlaveDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_coordArchLogDirs, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Maintain configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to coodinator slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN));
+ fprintSval(f, VAR_coordSlave);
+ fprintAval(f, VAR_coordSlaveServers);
+ fprintAval(f, VAR_coordSlavePorts);
+ fprintAval(f, VAR_coordSlavePoolerPorts);
+ fprintAval(f, VAR_coordSlaveDirs);
+ fprintAval(f, VAR_coordArchLogDirs);
+ fclose(f);
+ backup_configuration();
+ CleanArray(nodelist);
+ return 0;
+
+}
+
+
+
+/*
+ * Start coordinator master ---------------------------------------------
+ */
+int start_coordinator_master_all(void)
+{
+ elog(INFO, "Starting coordinator master.\n");
+ return(start_coordinator_master(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_startCoordinatorMaster(char *nodeName)
+{
+ cmd_t *cmd = NULL, *cmdPgCtl;
+ int idx;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator, skipping.\n", nodeName);
+ return(NULL);
+ }
+ /*
+ * Check if the coordinator is running
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ elog(ERROR, "ERROR: target coordinator master %s is already running now. Skip initialization.\n",
+ nodeName);
+ return(NULL);
+ }
+ cmd = cmdPgCtl = initCmd(aval(VAR_coordMasterServers)[idx]);
+ snprintf(newCommand(cmdPgCtl), MAXLINE,
+ "pg_ctl start -w -Z coordinator -D %s -o -i",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+int start_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting coordinator master %s\n", actualNodeList[ii]);
+ if ((cmd = prepare_startCoordinatorMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Start coordinator slaves ----------------------------------------
+ */
+int start_coordinator_slave_all(void)
+{
+ elog(INFO, "Starting all the coordinator slaves.\n");
+ return(start_coordinator_slave(aval(VAR_coordNames)));
+}
+
+cmd_t *prepare_startCoordinatorSlave(char *nodeName)
+{
+ int idx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+ cmd_t *cmd = NULL, *cmdPgCtlStart, *cmdPgConfMaster, *cmdMasterReload;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator, skipping.\n", nodeName);
+ return(NULL);
+ }
+ /*
+ * Check if the coordinator is running
+ */
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Coordinator master %s is not running now. Cannot start the slave.\n",
+ aval(VAR_coordNames)[idx]);
+ return(NULL);
+ }
+ cmd = cmdPgCtlStart = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ snprintf(newCommand(cmdPgCtlStart), MAXLINE,
+ "pg_ctl start -w -Z coordinator -D %s -o -i",
+ aval(VAR_coordSlaveDirs)[idx]);
+
+ /* Postgresql.conf at the Master */
+
+ appendCmdEl(cmdPgCtlStart, (cmdPgConfMaster = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdPgConfMaster), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_coordMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgConfMaster->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================================\n"
+ "# Added to start the slave in sync. mode, %s\n"
+ "synchronous_commit = on\n"
+ "synchronous_standby_names = '%s'\n"
+ "# End of the addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_coordNames)[idx]);
+ fclose(f);
+
+ /* Reload postgresql.conf change */
+ appendCmdEl(cmdPgCtlStart, (cmdMasterReload = initCmd(aval(VAR_coordMasterServers)[idx])));
+ snprintf(newCommand(cmdMasterReload), MAXLINE,
+ "pg_ctl reload -Z coordinator -D %s",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+int start_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startCoordinatorSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done\n");
+ return(rc);
+}
+
+/*
+ * Stop coordinator masters ---------------------------------------------------
+ */
+/* Does not check if immediate is valid here */
+int stop_coordinator_master_all(char *immediate)
+{
+ elog(INFO, "Stopping all the coordinator masters.\n");
+ return(stop_coordinator_master(aval(VAR_coordNames), immediate));
+}
+
+cmd_t *prepare_stopCoordinatorMaster(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd;
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmd), MAXLINE,
+ "pg_ctl stop -w -Z coordinator -D %s -m %s",
+ aval(VAR_coordMasterDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmd), MAXLINE,
+ "pg_ctl stop -w -Z coordinator -D %s",
+ aval(VAR_coordMasterDirs)[idx]);
+ return(cmd);
+}
+
+
+/* Does not check if immediate is valid here. */
+int stop_coordinator_master(char **nodeList, char *immediate)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ int rc;
+
+ if (immediate == NULL)
+ immediate = FAST;
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+ elog(INFO, "Stopping coordinator master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopCoordinatorMaster(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop coordinator slaves ----------------------------------------------------
+ */
+int stop_coordinator_slave_all(char *immediate)
+{
+ elog(INFO, "Stopping all the coordinator slaves.\n");
+ return(stop_coordinator_slave(aval(VAR_coordNames), immediate));
+}
+
+cmd_t *prepare_stopCoordinatorSlave(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd = NULL, *cmdMasterReload, *cmdPgCtlStop;
+ FILE *f;
+ char localStdin[MAXPATH+1];
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = coordIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a coordinator.\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(WARNING, "coordinator %s does not have a slave. Skipping.\n", nodeName);
+ return(NULL);
+ }
+ if (pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]) == 0)
+ {
+ /* Master is running. Need to switch log shipping to asynchronous mode. */
+ cmd = cmdMasterReload = initCmd(aval(VAR_coordMasterServers)[idx]);
+ if ((f = prepareLocalStdin(localStdin, MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to trun off the slave %s\n"
+ "synchronous_standby_names = ''\n"
+ "# End of the update\n",
+ timeStampString(timestamp, MAXTOKEN));
+ fclose(f);
+ snprintf(newCommand(cmdMasterReload), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordMasterDirs)[idx]);
+ cmdMasterReload->localStdin = Strdup(localStdin);
+ }
+ if (cmd)
+ appendCmdEl(cmdMasterReload, (cmdPgCtlStop = initCmd(aval(VAR_coordSlaveServers)[idx])));
+ else
+ cmd = cmdPgCtlStop = initCmd(aval(VAR_coordSlaveServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmdPgCtlStop), MAXLINE,
+ "pg_ctl stop -w -Z coordinator -D %s -m %s",
+ aval(VAR_coordSlaveDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdPgCtlStop), MAXLINE,
+ "pg_ctl stop -w -Z coordinator -D %s",
+ aval(VAR_coordSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_coordinator_slave(char **nodeList, char *immediate)
+{
+ char **actualNodeList;
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(1);
+ }
+ if (immediate == NULL)
+ immediate = "fast";
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping the coordinator slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopCoordinatorSlave(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Failover coordinator ---------------------------------------------------------
+ */
+int failover_coordinator(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc = 0;
+
+ elog(INFO, "Failover coordinators.\n");
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slaves are not configured.\n");
+ return(2);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ int idx;
+ int rc_local;
+
+ elog(INFO, "Failover the coordinator %s.\n", actualNodeList[ii]);
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator. Skipping.\n", actualNodeList[ii]);
+ continue;
+ }
+ if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: slave of the coordinator %s is not configured. Skipping\n",
+ actualNodeList[ii]);
+ continue;
+ }
+ rc_local = failover_oneCoordinator(idx);
+ if (rc_local < 0)
+ return(rc_local);
+ else
+ if (rc_local > rc)
+ rc = rc_local;
+ }
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+static int failover_oneCoordinator(int coordIdx)
+{
+ int rc = 0;
+ int rc_local;
+ int jj;
+ int gtmPxyIdx;
+ char *gtmHost;
+ char *gtmPort;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+ char cmd[MAXLINE];
+ int cmdlen;
+ bool dnReconfigured;
+
+#define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
+
+ /*
+ * Determine the target gtm
+ */
+ gtmPxyIdx= getEffectiveGtmProxyIdxFromServerName(aval(VAR_coordSlaveServers)[coordIdx]);
+ gtmHost = (gtmPxyIdx < 0) ? sval(VAR_gtmMasterServer) :
+ aval(VAR_gtmProxyServers)[gtmPxyIdx];
+ gtmPort = (gtmPxyIdx < 0) ? sval(VAR_gtmMasterPort) :
+ aval(VAR_gtmProxyPorts)[gtmPxyIdx];
+ if (gtmPxyIdx >= 0)
+ elog(NOTICE, "Failover coordinator %s using gtm %s\n",
+ aval(VAR_coordNames)[coordIdx], aval(VAR_gtmProxyNames)[gtmPxyIdx]);
+ else
+ elog(NOTICE, "Failover coordinator %s using GTM itself\n",
+ aval(VAR_coordNames)[coordIdx]);
+
+ /* Promote the slave */
+ rc_local = doImmediate(aval(VAR_coordSlaveServers)[coordIdx], NULL,
+ "pg_ctl promote -Z coordinator -D %s",
+ aval(VAR_coordSlaveDirs)[coordIdx]);
+ checkRc();
+
+ /* Reconfigure new coordinator master with new gtm_proxy or gtm */
+
+ if ((f = pgxc_popen_w(aval(VAR_coordSlaveServers)[coordIdx],
+ "cat >> %s/postgresql.conf",
+ aval(VAR_coordSlaveDirs)[coordIdx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not prepare to update postgresql.conf, %s", strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Added to promote, %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ gtmHost, gtmPort);
+ pclose(f);
+
+ /* Restart coord Slave Server */
+ rc_local = doImmediate(aval(VAR_coordSlaveServers)[coordIdx], NULL,
+ "pg_ctl restart -Z coordinator -D %s -w -o -i; sleep 1",
+ aval(VAR_coordSlaveDirs)[coordIdx]);
+ checkRc();
+
+ /* Update the configuration variable */
+ var_assign(&(aval(VAR_coordMasterServers)[coordIdx]), Strdup(aval(VAR_coordSlaveServers)[coordIdx]));
+ var_assign(&(aval(VAR_coordPorts)[coordIdx]), Strdup(aval(VAR_coordSlavePorts)[coordIdx]));
+ var_assign(&(aval(VAR_poolerPorts)[coordIdx]), Strdup(aval(VAR_coordSlavePoolerPorts)[coordIdx]));
+ var_assign(&(aval(VAR_coordSlaveServers)[coordIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_coordSlavePorts)[coordIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_coordSlavePoolerPorts)[coordIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_coordMasterDirs)[coordIdx]), Strdup(aval(VAR_coordSlaveDirs)[coordIdx]));
+ var_assign(&(aval(VAR_coordSlaveDirs)[coordIdx]), Strdup("none"));
+
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ elog(ERROR, "ERROR: Failed to open configuration file %s, %s\n", pgxc_ctl_config_path, strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=====================================================\n"
+ "# Updated due to the coordinator failover, %s, %s\n"
+ "coordMasterServers=( %s )\n"
+ "coordPorts=( %s )\n"
+ "poolerPorts=( %s )\n"
+ "coordMasterDirs=( %s )\n"
+ "coordSlaveServers=( %s )\n"
+ "coordSlavePorts=( %s )\n"
+ "coordSlavePoolerPorts=( %s )\n"
+ "coordSlaveDirs=( %s )\n"
+ "# End of the update\n",
+ aval(VAR_coordNames)[coordIdx], timeStampString(timestamp, MAXTOKEN),
+ listValue(VAR_coordMasterServers),
+ listValue(VAR_coordPorts),
+ listValue(VAR_poolerPorts),
+ listValue(VAR_coordMasterDirs),
+ listValue(VAR_coordSlaveServers),
+ listValue(VAR_coordSlavePorts),
+ listValue(VAR_coordSlavePoolerPorts),
+ listValue(VAR_coordSlaveDirs));
+ fclose(f);
+
+ /* Backup the configuration file */
+ if (isVarYes(VAR_configBackup))
+ {
+ rc_local = doConfigBackup();
+ checkRc();
+ }
+
+ cmdlen = 0;
+ cmd[0] = '\0';
+ /*
+ * Reconfigure datanodes with the new datanode. We prepare the commands and
+ * pass them to the first coordinator we reconfigure later
+ */
+ for (jj = 0; aval(VAR_datanodeNames)[jj]; jj++)
+ {
+ int len;
+
+ if (is_none(aval(VAR_datanodeMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_datanodeMasterServers)[jj], aval(VAR_datanodePorts)[jj]) != 0)
+ {
+ elog(ERROR, "Datanode %s is not running. Skip reconfiguration for this datanode.\n",
+ aval(VAR_coordNames)[jj]);
+ continue;
+ }
+
+ len = snprintf(cmd + cmdlen, MAXLINE - cmdlen, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (HOST=''%s'', PORT=%s)';\n"
+ "EXECUTE DIRECT ON (%s) 'select pgxc_pool_reload()';\n",
+ aval(VAR_datanodeNames)[jj],
+ aval(VAR_coordNames)[coordIdx],
+ aval(VAR_coordMasterServers)[coordIdx],
+ aval(VAR_coordPorts)[coordIdx],
+ aval(VAR_datanodeNames)[jj]);
+ if (len > (MAXLINE - cmdlen))
+ {
+ elog(ERROR, "Datanode command exceeds the maximum allowed length");
+ return -1;
+ }
+ cmdlen += len;
+ }
+ dnReconfigured = false;
+
+ /*
+ * Reconfigure coordinators with new coordinator
+ */
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (is_none(aval(VAR_coordMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) != 0)
+ {
+ elog(ERROR, "Coordinator %s is not running. Skip reconfiguration for this coordinator.\n",
+ aval(VAR_coordNames)[jj]);
+ continue;
+ }
+ if ((f = pgxc_popen_wRaw("psql -p %s -h %s %s %s",
+ aval(VAR_coordPorts)[jj],
+ aval(VAR_coordMasterServers)[jj],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner)))
+ == NULL)
+ {
+ elog(ERROR, "ERROR: failed to start psql for coordinator %s, %s\n", aval(VAR_coordNames)[jj], strerror(errno));
+ continue;
+ }
+ fprintf(f,
+ "ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
+ "select pgxc_pool_reload();\n"
+ "%s"
+ "\\q\n",
+ aval(VAR_coordNames)[coordIdx],
+ aval(VAR_coordMasterServers)[coordIdx],
+ aval(VAR_coordPorts)[coordIdx],
+ dnReconfigured ? "" : cmd);
+ pclose(f);
+ }
+ return(rc);
+
+# undef checkRc
+}
+
+/*
+ * Show coordinator configuration
+ */
+int show_config_coordMasterSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ {
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ if (isVarYes(VAR_coordSlave))
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ }
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordMasterMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_coordSlave))
+ return(1);
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(nodeList[ii])) < 0)
+ continue;
+ else
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordMaster(int flag, int idx, char *hostname)
+{
+ int ii;
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Coordinator Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_coordNames)[idx], aval(VAR_coordPorts)[idx], aval(VAR_poolerPorts)[idx]);
+ elog(NOTICE, " MaxWalSenders: %s, Dir: '%s'\n",
+ aval(VAR_coordMaxWALSenders)[idx], aval(VAR_coordMasterDirs)[idx]);
+ elog(NOTICE, " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ sval(VAR_coordExtraConfig), aval(VAR_coordSpecificExtraConfig)[idx]);
+ strncpy(outBuf, " pg_hba entries ( ", MAXLINE);
+ for (ii = 0; aval(VAR_coordPgHbaEntries)[ii]; ii++)
+ {
+ snprintf(editBuf, MAXPATH, "'%s' ", aval(VAR_coordPgHbaEntries)[ii]);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ elog(NOTICE, "%s)\n", outBuf);
+ elog(NOTICE, " Extra pg_hba: '%s', Specific Extra pg_hba: '%s'\n",
+ sval(VAR_coordExtraPgHba), aval(VAR_coordSpecificExtraPgHba)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_coordSlave(int flag, int idx, char *hostname)
+{
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Coordinator Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE," Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_coordNames)[idx], aval(VAR_coordSlavePorts)[idx],
+ aval(VAR_coordSlavePoolerPorts)[idx]);
+ elog(NOTICE, " Dir: '%s', Archive Log Dir: '%s'\n",
+ aval(VAR_coordSlaveDirs)[idx], aval(VAR_coordArchLogDirs)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+
+/*
+ * Checks if all the coordinators are running
+ *
+ * Returns FALSE if any of them are not running.
+ */
+int check_AllCoordRunning(void)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_coordMasterServers)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ if (pingNode(aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii]) != 0)
+ return FALSE;
+ }
+ return TRUE;
+}
diff --git a/contrib/pgxc_ctl/coord_cmd.h b/contrib/pgxc_ctl/coord_cmd.h
new file mode 100644
index 0000000000..77f33aaded
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_cmd.h
@@ -0,0 +1,72 @@
+/*-------------------------------------------------------------------------
+ *
+ * cood_cmd.h
+ *
+ * Coordinator command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef COORD_CMD_H
+#define COORD_CMD_H
+
+#include "utils.h"
+
+extern int init_coordinator_master(char **nodeList);
+extern int init_coordinator_slave(char **nodeList);
+extern int init_coordinator_master_all(void);
+extern int init_coordinator_slave_all(void);
+extern cmd_t *prepare_initCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_initCoordinatorSlave(char *nodeName);
+
+extern int configure_nodes(char **nodeList);
+extern int configure_datanodes(char **nodeList);
+extern int configure_nodes_all(void);
+extern cmd_t *prepare_configureNode(char *nodeName);
+
+extern int kill_coordinator_master(char **nodeList);
+extern int kill_coordinator_master_all(void);
+extern int kill_coordinator_slave(char **nodeList);
+extern int kill_coordinator_slave_all(void);
+extern cmd_t *prepare_killCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_killCoordinatorSlave(char *nodeName);
+
+extern int clean_coordinator_master(char **nodeList);
+extern int clean_coordinator_master_all(void);
+extern int clean_coordinator_slave(char **nodeList);
+extern int clean_coordinator_slave_all(void);
+extern cmd_t *prepare_cleanCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_cleanCoordinatorSlave(char *nodeName);
+
+extern int start_coordinator_master(char **nodeList);
+extern int start_coordinator_master_all(void);
+extern int start_coordinator_slave(char **nodeList);
+extern int start_coordinator_slave_all(void);
+extern cmd_t *prepare_startCoordinatorMaster(char *nodeName);
+extern cmd_t *prepare_startCoordinatorSlave(char *nodeName);
+
+extern int stop_coordinator_master(char **nodeList, char *immediate);
+extern int stop_coordinator_master_all(char *immediate);
+extern int stop_coordinator_slave(char **nodeList, char *immediate);
+extern int stop_coordinator_slave_all(char *immediate);
+extern cmd_t *prepare_stopCoordinatorMaster(char *nodeName, char *immediate);
+extern cmd_t *prepare_stopCoordinatorSlave(char *nodeName, char *immediate);
+
+extern int add_coordinatorMaster(char *name, char *host, int port, int pooler,
+ char *dir, char *extraConf, char *extraPgHbaConf);
+extern int add_coordinatorSlave(char *name, char *host, int port, int pooler, char *dir, char *archDir);
+extern int remove_coordinatorMaster(char *name, int clean_opt);
+extern int remove_coordinatorSlave(char *name, int clean_opt);
+
+extern int failover_coordinator(char **nodeList);
+
+extern int show_config_coordMasterSlaveMulti(char **nodeList);
+extern int show_config_coordMasterMulti(char **nodeList);
+extern int show_config_coordSlaveMulti(char **nodeList);
+extern int show_config_coordMaster(int flag, int idx, char *hostname);
+extern int show_config_coordSlave(int flag, int idx, char *hostname);
+extern int check_AllCoordRunning(void);
+
+
+#endif /* COORD_CMD_H */
diff --git a/contrib/pgxc_ctl/coord_command.h b/contrib/pgxc_ctl/coord_command.h
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/contrib/pgxc_ctl/coord_command.h
@@ -0,0 +1 @@
+
diff --git a/contrib/pgxc_ctl/datanode_cmd.c b/contrib/pgxc_ctl/datanode_cmd.c
new file mode 100644
index 0000000000..4a3b83b325
--- /dev/null
+++ b/contrib/pgxc_ctl/datanode_cmd.c
@@ -0,0 +1,2248 @@
+/*-------------------------------------------------------------------------
+ *
+ * datanode_cmd.c
+ *
+ * Datanode command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "datanode_cmd.h"
+#include "gtm_util.h"
+#include "coord_cmd.h"
+
+static char date[MAXTOKEN+1];
+
+/*
+ *======================================================================
+ *
+ * Datanode staff
+ *
+ *=====================================================================
+ */
+static int failover_oneDatanode(int datanodeIdx);
+
+/*
+ * Initialize datanode master ------------------------------------
+ */
+int init_datanode_master_all(void)
+{
+ elog(NOTICE, "Initialize all the datanode masters.\n");
+ return(init_datanode_master(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_initDatanodeMaster(char *nodeName)
+{
+ int idx;
+ int jj;
+ cmd_t *cmd, *cmdInitdb, *cmdPgConf, *cmdPgHba;
+ char *gtmHost;
+ char *gtmPort;
+ int gtmIdx;
+ char **fileList = NULL;
+ FILE *f;
+ char timeStamp[MAXTOKEN+1];
+ char remoteDirCheck[MAXPATH * 2 + 128];
+ char remoteWalDirCheck[MAXPATH * 2 + 128];
+ bool wal;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ return(NULL);
+
+ if (doesExist(VAR_datanodeMasterWALDirs, idx) &&
+ aval(VAR_datanodeMasterWALDirs)[idx] &&
+ !is_none(aval(VAR_datanodeMasterWALDirs)[idx]))
+ wal = true;
+ else
+ wal = false;
+
+ remoteDirCheck[0] = '\0';
+ remoteWalDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip Datanode initilialization'; exit; fi;",
+ aval(VAR_datanodeMasterDirs)[idx],
+ aval(VAR_datanodeMasterDirs)[idx]
+ );
+ if (wal)
+ {
+ sprintf(remoteWalDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip Datanode initilialization'; exit; fi;",
+ aval(VAR_datanodeMasterWALDirs)[idx],
+ aval(VAR_datanodeMasterWALDirs)[idx]
+ );
+ }
+
+ }
+
+ /* Build each datanode's initialize command */
+ cmd = cmdInitdb = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdInitdb), MAXLINE,
+ "%s %s"
+ "rm -rf %s;"
+ "mkdir -p %s; PGXC_CTL_SILENT=1 initdb --nodename %s %s %s -D %s",
+ remoteDirCheck,
+ remoteWalDirCheck,
+ aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx],
+ aval(VAR_datanodeNames)[idx],
+ wal ? "-X" : "",
+ wal ? aval(VAR_datanodeMasterWALDirs)[idx] : "",
+ aval(VAR_datanodeMasterDirs)[idx]);
+
+ /* Initialize postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmdPgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdPgConf), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin((cmdPgConf->localStdin = Malloc(MAXPATH+1)), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "log_destination = 'stderr'\n"
+ "logging_collector = on\n"
+ "log_directory = 'pg_log'\n"
+ "listen_addresses = '*'\n"
+ "max_connections = 100\n",
+ timeStampString(timeStamp, MAXTOKEN));
+ if (doesExist(VAR_datanodeExtraConfig, 0) &&
+ !is_none(sval(VAR_datanodeExtraConfig)))
+ AddMember(fileList, sval(VAR_datanodeExtraConfig));
+ if (doesExist(VAR_datanodeSpecificExtraConfig, idx) &&
+ !is_none(aval(VAR_datanodeSpecificExtraConfig)[idx]))
+ AddMember(fileList, aval(VAR_datanodeSpecificExtraConfig)[idx]);
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ freeAndReset(fileList);
+ gtmIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_datanodeMasterServers)[idx]);
+ gtmHost = (gtmIdx < 0) ? sval(VAR_gtmMasterServer) : aval(VAR_gtmProxyServers)[gtmIdx];
+ gtmPort = (gtmIdx < 0) ? sval(VAR_gtmMasterPort) : aval(VAR_gtmProxyPorts)[gtmIdx];
+ fprintf(f,
+ "port = %s\n"
+ "pooler_port = %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n",
+ aval(VAR_datanodePorts)[idx],
+ aval(VAR_datanodePoolerPorts)[idx],
+ gtmHost, gtmPort);
+ fclose(f);
+
+ /* Additional Initialization for log_shipping */
+ if (isVarYes(VAR_datanodeSlave) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ cmd_t *cmd_cleanDir, *cmd_PgConf;
+ /* This datanode has a slave */
+
+ /* Build archive log target */
+ appendCmdEl(cmdInitdb, (cmd_cleanDir = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmd_cleanDir), MAXLINE,
+ "rm -rf %s;mkdir -p %s; chmod 0700 %s",
+ aval(VAR_datanodeArchLogDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx]);
+
+ /* postgresql.conf */
+ appendCmdEl(cmdInitdb, (cmd_PgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmd_PgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmd_PgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "wal_level = archive\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %s\n"
+ "# End of Addition\n",
+ sval(VAR_pgxcUser), aval(VAR_datanodeSlaveServers)[idx], aval(VAR_datanodeArchLogDirs)[idx],
+ is_none(aval(VAR_datanodeMaxWALSenders)[idx]) ? "0" : aval(VAR_datanodeMaxWALSenders)[idx]);
+ fclose(f);
+ }
+ else
+ {
+ cmd_t *cmd_PgConf;
+ appendCmdEl(cmdInitdb, (cmd_PgConf = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmd_PgConf), MAXLINE,
+ "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmd_PgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f, "# End of Addition\n");
+ fclose(f);
+ }
+
+ /* pg_hba.conf */
+ appendCmdEl(cmdInitdb, (cmdPgHba = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdPgHba), MAXLINE,
+ "cat >> %s/pg_hba.conf", aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgHba->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Addition at initialization, %s\n",
+ timeStampString(timeStamp, MAXTOKEN));
+ if (doesExist(VAR_datanodeExtraPgHba, 0) && !is_none(sval(VAR_datanodeExtraPgHba)))
+ AddMember(fileList, sval(VAR_datanodeExtraPgHba));
+ if (doesExist(VAR_datanodeSpecificExtraPgHba, idx) && !is_none(aval(VAR_datanodeSpecificExtraPgHba)[idx]))
+ AddMember(fileList, aval(VAR_datanodeSpecificExtraPgHba)[idx]);
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ for (jj = 0; aval(VAR_datanodePgHbaEntries)[jj]; jj++)
+ {
+ fprintf(f,
+ "host all %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[jj]);
+ if (isVarYes(VAR_datanodeSlave))
+ if (!is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ fprintf(f,
+ "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[jj]);
+ }
+ fprintf(f, "# End of additon\n");
+ fclose(f);
+ return(cmd);
+}
+
+
+int init_datanode_master(char **nodeList)
+{
+ int ii;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for(ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initialize the datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_initDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Initialize datanode slave ----------------------------------------------------
+ */
+int init_datanode_slave_all(void)
+{
+ elog(INFO, "Initialize all the datanode slaves.\n");
+ return(init_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_initDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdBuildDir, *cmdStartMaster, *cmdBaseBkup, *cmdRecovConf, *cmdPgConf, *cmdStopMaster;
+ FILE *f;
+ int idx;
+ int startMaster;
+ char timestamp[MAXTOKEN+1];
+ char remoteDirCheck[MAXPATH * 2 + 128];
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: specified node %s is not datanode. skipping.\n", nodeName);
+ return(NULL);
+ }
+ startMaster = FALSE;
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ startMaster = TRUE;
+
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(WARNING, "WARNING: slave not configured for datanode %s\n",
+ nodeName);
+ return NULL;
+ }
+
+ remoteDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip Datanode initilialization'; exit; fi;",
+ aval(VAR_datanodeSlaveDirs)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]
+ );
+ }
+
+ /* Build slave's directory -1- */
+ cmd = cmdBuildDir = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmdBuildDir), MAXLINE,
+ "%s"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "chmod 0700 %s",
+ remoteDirCheck,
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeSlaveDirs)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Start datanode master if it is not running -2- */
+ if (startMaster)
+ {
+ appendCmdEl(cmdBuildDir, (cmdStartMaster = prepare_startDatanodeMaster(nodeName)));
+ }
+
+ /* Obtain base backup of the master */
+ appendCmdEl(cmdBuildDir, (cmdBaseBkup = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdBaseBkup), MAXLINE,
+ "pg_basebackup -p %s -h %s -D %s -x",
+ aval(VAR_datanodePorts)[idx], aval(VAR_datanodeMasterServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Configure recovery.conf of the slave */
+ appendCmdEl(cmdBuildDir, (cmdRecovConf = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdRecovConf), MAXLINE,
+ "cat >> %s/recovery.conf",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdRecovConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx]);
+ fclose(f);
+
+ /* Configure slave's postgresql.conf */
+ appendCmdEl(cmdBuildDir, (cmdPgConf = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ snprintf(newCommand(cmdPgConf), MAXPATH,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdPgConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to startup the slave, %s\n"
+ "hot_standby = off\n"
+ "port = %s\n"
+ "pooler_port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodeSlavePorts)[idx],
+ aval(VAR_datanodeSlavePoolerPorts)[idx]);
+ fclose(f);
+
+ /* Stp datanode master if needed */
+ if (startMaster == TRUE)
+ appendCmdEl(cmdBuildDir, (cmdStopMaster = prepare_stopDatanodeMaster(aval(VAR_datanodeNames)[idx], FAST)));
+ return(cmd);
+}
+
+int init_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Initialize datanode slave %s\n", actualNodeList[ii]);
+ if ((cmd = prepare_initDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Start datanode master --------------------------------------------------
+ */
+int start_datanode_master_all(void)
+{
+ elog(INFO, "Starting all the datanode masters.\n");
+ return(start_datanode_master(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_startDatanodeMaster(char *nodeName)
+{
+ cmd_t *cmdStartDatanodeMaster = NULL;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping\n", nodeName);
+ return(NULL);
+ }
+ /* Check if the target is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ elog(WARNING, "WARNING: datanode master %s is running now. Skipping.\n",
+ aval(VAR_datanodeNames)[idx]);
+ cleanCmd(cmdStartDatanodeMaster);
+ return(NULL);
+ }
+ cmdStartDatanodeMaster = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdStartDatanodeMaster), MAXLINE,
+ "pg_ctl start -w -Z datanode -D %s -o -i", aval(VAR_datanodeMasterDirs)[idx]);
+ return(cmdStartDatanodeMaster);
+}
+
+int start_datanode_master(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ actualNodeList = makeActualNodeList(nodeList);
+
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Starting datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Start datanode slave --------------------------------------------------
+ */
+int start_datanode_slave_all(void)
+{
+ elog(INFO, "Starting all the datanode slaves.\n");
+ return(start_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+cmd_t *prepare_startDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd, *cmdStartDatanodeSlave, *cmdMasterToSyncMode, *cmdMasterReload;
+ FILE *f;
+ int idx;
+ char timestamp[MAXTOKEN+1];
+
+ /* If the node really a datanode? */
+ if((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: node %s is not a datanode. Skipping\n", nodeName);
+ return(NULL);
+ }
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ {
+ elog(WARNING, "WARNING: master of the datanode %s is not running. Skipping\n", nodeName);
+ return(NULL);
+ }
+
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(WARNING, "WARNING: slave not configured for datanode %s\n",
+ nodeName);
+ return(NULL);
+ }
+
+ cmd = cmdStartDatanodeSlave = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmdStartDatanodeSlave), MAXLINE,
+ "pg_ctl start -w -Z datanode -D %s",
+ aval(VAR_datanodeSlaveDirs)[idx]);
+
+ /* Change the master to synchronous mode */
+ appendCmdEl(cmdStartDatanodeSlave, (cmdMasterToSyncMode = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdMasterToSyncMode), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdMasterToSyncMode->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#==========================================================\n"
+ "# Added to start the slave in sync. mode, %s\n"
+ "synchronous_commit = on\n"
+ "synchronous_standby_names = '%s'\n"
+ "# End of the addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_datanodeNames)[idx]);
+ fclose(f);
+
+ /* Reload postgresql.conf change */
+ appendCmdEl(cmdStartDatanodeSlave, (cmdMasterReload = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdMasterReload), MAXLINE,
+ "pg_ctl reload -Z datanode -D %s",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ return(cmd);
+}
+
+int start_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(INFO, "Starting datanode slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop datanode master ------------------------------------------------
+ */
+cmd_t *prepare_stopDatanodeMaster(char *nodeName, char *immediate)
+{
+ cmd_t *cmdStopDatanodeMaster;
+ int idx;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode. Skipping\n", nodeName);
+ return(NULL);
+ }
+ cmdStopDatanodeMaster = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ if (immediate)
+ snprintf(newCommand(cmdStopDatanodeMaster), MAXLINE,
+ "pg_ctl stop -w -Z datanode -D %s -m %s",
+ aval(VAR_datanodeMasterDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdStopDatanodeMaster), MAXLINE,
+ "pg_ctl stop -w -Z datanode -D %s",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ return(cmdStopDatanodeMaster);
+}
+
+
+int stop_datanode_master_all(char *immediate)
+{
+ elog(INFO, "Stopping all the datanode masters.\n");
+ return(stop_datanode_master(aval(VAR_datanodeNames), immediate));
+}
+
+
+int stop_datanode_master(char **nodeList, char *immediate)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for(ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping datanode master %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopDatanodeMaster(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+/*
+ * Stop datanode slave --------------------------------------------------------
+ */
+cmd_t *prepare_stopDatanodeSlave(char *nodeName, char *immediate)
+{
+ int idx;
+ cmd_t *cmd, *cmdMasterToAsyncMode, *cmdStopSlave;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "%s is not a datanode. Skipping\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(WARNING, "datanode %s does not have a slave. Skipping.\n", nodeName);
+ return(NULL);
+ }
+ /* Set the master to asynchronous mode */
+ cmd = cmdMasterToAsyncMode = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmdMasterToAsyncMode), MAXLINE,
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ if ((f = prepareLocalStdin(newFilename(cmdMasterToAsyncMode->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmd);
+ return(NULL);
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to trun off the slave %s\n"
+ "synchronous_standby_names = ''\n"
+ "# End of the update\n",
+ timeStampString(timestamp, MAXTOKEN));
+ fclose(f);
+
+ /* Reload new config file if the master is running */
+ /* The next step might need improvement. When GTM is dead, the following may
+ * fail even though the master is running.
+ */
+ if (pingNodeSlave(aval(VAR_datanodeSlaveServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]) == 0)
+ {
+ cmd_t *cmdReloadMaster;
+
+ appendCmdEl(cmdMasterToAsyncMode, (cmdReloadMaster = initCmd(aval(VAR_datanodeMasterServers)[idx])));
+ snprintf(newCommand(cmdReloadMaster), MAXLINE,
+ "pg_ctl reload -Z datanode -D %s",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ }
+
+ /* Stop the slave */
+ appendCmdEl(cmdMasterToAsyncMode, (cmdStopSlave = initCmd(aval(VAR_datanodeSlaveServers)[idx])));
+ if (immediate)
+ snprintf(newCommand(cmdStopSlave), MAXLINE,
+ "pg_ctl stop -w -Z datanode -D %s -m %s", aval(VAR_datanodeSlaveDirs)[idx], immediate);
+ else
+ snprintf(newCommand(cmdStopSlave), MAXLINE,
+ "pg_ctl stop -w -Z datanode -D %s", aval(VAR_datanodeSlaveDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_datanode_slave_all(char *immediate)
+{
+ elog(INFO, "Stopping all the datanode slaves.\n");
+ return(stop_datanode_slave(aval(VAR_datanodeNames), immediate));
+}
+
+int stop_datanode_slave(char **nodeList, char *immediate)
+{
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ char **actualNodeList;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "Datanode slave is not configured. Returning.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Stopping datanode slave %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopDatanodeSlave(actualNodeList[ii], immediate)))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Failover datanode ---------------------------------------------------------
+ */
+int failover_datanode(char **nodeList)
+{
+ int ii;
+ char **actualNodeList;
+ int rc = 0;
+
+ elog(INFO, "Failover specified datanodes.\n");
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datnaode slave is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ int idx;
+ int rc_local;
+
+ elog(INFO, "Failover the datanode %s.\n", actualNodeList[ii]);
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode. Skipping.\n", actualNodeList[ii]);
+ continue;
+ }
+ if (is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: slave of the datanode %s is not configured. Skipping\n",
+ actualNodeList[ii]);
+ continue;
+ }
+ rc_local = failover_oneDatanode(idx);
+ if (rc_local < 0)
+ return(rc_local);
+ else
+ if (rc_local > rc)
+ rc = rc_local;
+ }
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+static int failover_oneDatanode(int datanodeIdx)
+{
+ int rc = 0;
+ int rc_local;
+ int jj;
+ char *gtmHost;
+ char *gtmPort;
+ int gtmPxyIdx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+
+ char cmd[MAXLINE];
+ int cmdlen;
+ bool dnReconfigured;
+
+# define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
+
+ /*
+ * Determine the target GTM
+ */
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(aval(VAR_datanodeSlaveServers)[datanodeIdx]);
+ gtmHost = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx >= 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+ if (gtmPxyIdx >= 0)
+ elog(NOTICE, "Failover datanode %s using gtm %s\n",
+ aval(VAR_datanodeNames)[datanodeIdx], aval(VAR_gtmProxyNames)[gtmPxyIdx]);
+ else
+ elog(NOTICE, "Failover datanode %s using GTM itself\n",
+ aval(VAR_datanodeNames)[datanodeIdx]);
+
+ /* Promote the slave */
+ rc_local = doImmediate(aval(VAR_datanodeSlaveServers)[datanodeIdx], NULL,
+ "pg_ctl promote -Z datanode -D %s",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx]);
+ checkRc();
+
+ /* Reconfigure new datanode master with new gtm_proxy or gtm */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeSlaveServers)[datanodeIdx],
+ "cat >> %s/postgresql.conf",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not prepare to update postgresql.conf, %s", strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=================================================\n"
+ "# Added to promote, %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of addition\n",
+ timeStampString(timestamp, MAXTOKEN),
+ gtmHost, gtmPort);
+ pclose(f);
+
+ /* Restart datanode slave (as the new master) */
+ rc_local = doImmediate(aval(VAR_datanodeSlaveServers)[datanodeIdx], NULL,
+ "pg_ctl restart -w -Z datanode -D %s -o -i; sleep 1",
+ aval(VAR_datanodeSlaveDirs)[datanodeIdx]);
+ checkRc();
+ /*
+ * Update the configuration variable
+ */
+ var_assign(&(aval(VAR_datanodeMasterServers)[datanodeIdx]), Strdup(aval(VAR_datanodeSlaveServers)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodeSlaveServers)[datanodeIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_datanodeMasterDirs)[datanodeIdx]), Strdup(aval(VAR_datanodeSlaveDirs)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodeSlaveDirs)[datanodeIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_datanodePorts)[datanodeIdx]), Strdup(aval(VAR_datanodeSlavePorts)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodePoolerPorts)[datanodeIdx]), Strdup(aval(VAR_datanodeSlavePoolerPorts)[datanodeIdx]));
+ var_assign(&(aval(VAR_datanodeSlavePorts)[datanodeIdx]), Strdup("none"));
+ var_assign(&(aval(VAR_datanodeSlavePoolerPorts)[datanodeIdx]), Strdup("none"));
+ /*
+ * Update the configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ elog(ERROR, "ERROR: Failed to open configuration file %s, %s\n", pgxc_ctl_config_path, strerror(errno));
+ return(-1);
+ }
+ fprintf(f,
+ "#=====================================================\n"
+ "# Updated due to the datanode failover, %s, %s\n"
+ "datanodeMasterServers=( %s )\n"
+ "datanodePorts=( %s )\n"
+ "datanodePoolerPorts=( %s )\n"
+ "datanodeMasterDirs=( %s )\n"
+ "datanodeSlaveServers=( %s )\n"
+ "datanodeSlavePorts=( %s )\n"
+ "datanodeSlavePoolerPorts=( %s )\n"
+ "datanodeSlaveDirs=( %s )\n"
+ "# End of the update\n",
+ aval(VAR_datanodeNames)[datanodeIdx], timeStampString(timestamp, MAXTOKEN),
+ listValue(VAR_datanodeMasterServers),
+ listValue(VAR_datanodePorts),
+ listValue(VAR_datanodePoolerPorts),
+ listValue(VAR_datanodeMasterDirs),
+ listValue(VAR_datanodeSlaveServers),
+ listValue(VAR_datanodeSlavePorts),
+ listValue(VAR_datanodeSlavePoolerPorts),
+ listValue(VAR_datanodeSlaveDirs));
+ fclose(f);
+
+ /* Backup the configuration file */
+ if (isVarYes(VAR_configBackup))
+ {
+ rc_local = doConfigBackup();
+ checkRc();
+ }
+
+ cmdlen = 0;
+ cmd[0] = '\0';
+ /*
+ * Reconfigure datanodes with the new datanode. We prepare the commands and
+ * pass them to the first coordinator we reconfigure later
+ */
+ for (jj = 0; aval(VAR_datanodeNames)[jj]; jj++)
+ {
+ int len;
+
+ if (is_none(aval(VAR_datanodeMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_datanodeMasterServers)[jj], aval(VAR_datanodePorts)[jj]) != 0)
+ {
+ elog(ERROR, "Datanode %s is not running. Skip reconfiguration for this datanode.\n",
+ aval(VAR_datanodeNames)[jj]);
+ continue;
+ }
+
+ len = snprintf(cmd + cmdlen, MAXLINE - cmdlen, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (HOST=''%s'', PORT=%s)';\n"
+ "EXECUTE DIRECT ON (%s) 'select pgxc_pool_reload()';\n",
+ aval(VAR_datanodeNames)[jj],
+ aval(VAR_datanodeNames)[datanodeIdx],
+ aval(VAR_datanodeMasterServers)[datanodeIdx],
+ aval(VAR_datanodePorts)[datanodeIdx],
+ aval(VAR_datanodeNames)[jj]);
+ if (len > (MAXLINE - cmdlen))
+ {
+ elog(ERROR, "Datanode command exceeds the maximum allowed length");
+ return -1;
+ }
+ cmdlen += len;
+ }
+ dnReconfigured = false;
+ /*
+ * Reconfigure coordinators with new datanode
+ */
+ for (jj = 0; aval(VAR_coordNames)[jj]; jj++)
+ {
+ if (is_none(aval(VAR_coordMasterServers)[jj]))
+ continue;
+
+ if (pingNode(aval(VAR_coordMasterServers)[jj], aval(VAR_coordPorts)[jj]) != 0)
+ {
+ elog(ERROR, "Coordinator %s is not running. Skip reconfiguration for this coordinator.\n",
+ aval(VAR_coordNames)[jj]);
+ continue;
+ }
+ if ((f = pgxc_popen_wRaw("psql -p %d -h %s %s %s",
+ atoi(aval(VAR_coordPorts)[jj]),
+ aval(VAR_coordMasterServers)[jj],
+ sval(VAR_defaultDatabase),
+ sval(VAR_pgxcOwner)))
+ == NULL)
+ {
+ elog(ERROR, "ERROR: failed to start psql for coordinator %s, %s\n", aval(VAR_coordNames)[jj], strerror(errno));
+ continue;
+ }
+ fprintf(f,
+ "ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
+ "select pgxc_pool_reload();\n"
+ "%s"
+ "\\q\n",
+ aval(VAR_datanodeNames)[datanodeIdx],
+ aval(VAR_datanodeMasterServers)[datanodeIdx],
+ aval(VAR_datanodePorts)[datanodeIdx],
+ dnReconfigured ? "" : cmd
+ );
+ dnReconfigured = true;
+ pclose(f);
+ }
+
+
+ return rc;
+
+# undef checkRc
+
+}
+
+/*------------------------------------------------------------------------
+ *
+ * Add command
+ *
+ *-----------------------------------------------------------------------*/
+int add_datanodeMaster(char *name, char *host, int port, int pooler, char *dir,
+ char *waldir, char *extraConf, char *extraPgHbaConf)
+{
+ FILE *f, *lockf;
+ int size, idx;
+ char port_s[MAXTOKEN+1];
+ char pooler_s[MAXTOKEN+1];
+ char max_wal_senders_s[MAXTOKEN+1];
+ int gtmPxyIdx;
+ int connCordIdx;
+ char *gtmHost;
+ char *gtmPort;
+ char pgdumpall_out[MAXPATH+1];
+ char **nodelist = NULL;
+ int ii, jj, restore_dnode_idx, restore_coord_idx = -1;
+ char **confFiles = NULL;
+ char **pgHbaConfFiles = NULL;
+ bool wal;
+
+ if (waldir && (strcasecmp(waldir, "none") != 0))
+ wal = true;
+ else
+ wal = false;
+
+ /* Check if all the datanodes are running */
+ if (!check_AllDatanodeRunning())
+ {
+ elog(ERROR, "ERROR: Some of the datanode masters are not running. Cannot add new one.\n");
+ return 1;
+ }
+ /* Check if there's no conflict with the current configuration */
+ if (checkNameConflict(name, FALSE))
+ {
+ elog(ERROR, "ERROR: Node name %s duplicate.\n", name);
+ return 1;
+ }
+ if (checkPortConflict(host, port) || checkPortConflict(host, pooler))
+ {
+ elog(ERROR, "ERROR: port numbrer (%d) or pooler port (%d) at host %s conflicts.\n", port, pooler, host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir))
+ {
+ elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", dir, host);
+ return 1;
+ }
+ if (checkDirConflict(host, waldir))
+ {
+ elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", waldir, host);
+ return 1;
+ }
+ /*
+ * Check if datanode masgter configuration is consistent
+ */
+ idx = size = arraySizeName(VAR_datanodeNames);
+ if ((arraySizeName(VAR_datanodePorts) != size) ||
+ (arraySizeName(VAR_datanodePoolerPorts) != size) ||
+ (arraySizeName(VAR_datanodeMasterServers) != size) ||
+ (arraySizeName(VAR_datanodeMasterDirs) != size) ||
+ (arraySizeName(VAR_datanodeMasterWALDirs) != size) ||
+ (arraySizeName(VAR_datanodeMaxWALSenders) != size) ||
+ (arraySizeName(VAR_datanodeSpecificExtraConfig) != size) ||
+ (arraySizeName(VAR_datanodeSpecificExtraPgHba) != size))
+ {
+ elog(ERROR, "ERROR: Found some conflicts in datanode master configuration.\n");
+ return 1;
+ }
+
+ /* find any available datanode */
+ restore_dnode_idx = get_any_available_datanode(-1);
+ if (restore_dnode_idx == -1)
+ restore_coord_idx = get_any_available_coord(-1);
+
+ if (restore_dnode_idx == -1 && restore_coord_idx == -1)
+ {
+ elog(ERROR, "ERROR: no valid datanode or coordinator configuration!");
+ return 1;
+ }
+
+ if ((extendVar(VAR_datanodeNames, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeMasterServers, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodePorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodePoolerPorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeMasterDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeMasterWALDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeMaxWALSenders, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSpecificExtraConfig, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSpecificExtraPgHba, idx + 1, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsistent datanode information\n");
+ return 1;
+ }
+
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ if ((extendVar(VAR_datanodeSlaveServers, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSlavePorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSlavePoolerPorts, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSlaveDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeSlaveWALDirs, idx + 1, "none") != 0) ||
+ (extendVar(VAR_datanodeArchLogDirs, idx + 1, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsistent datanode slave information\n");
+ return 1;
+ }
+ }
+
+ /*
+ * Now reconfigure
+ */
+ /*
+ * 000 We need another way to configure specific pg_hba.conf and max_wal_senders.
+ */
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler);
+ snprintf(max_wal_senders_s, MAXTOKEN, "%d", getDefaultWalSender(false));
+ assign_arrayEl(VAR_datanodeNames, idx, name, NULL);
+ assign_arrayEl(VAR_datanodeMasterServers, idx, host, NULL);
+ assign_arrayEl(VAR_datanodePorts, idx, port_s, "-1");
+ assign_arrayEl(VAR_datanodePoolerPorts, idx, pooler_s, "-1");
+ assign_arrayEl(VAR_datanodeMasterDirs, idx, dir, NULL);
+ assign_arrayEl(VAR_datanodeMasterWALDirs, idx, waldir, NULL);
+ assign_arrayEl(VAR_datanodeMaxWALSenders, idx, max_wal_senders_s, NULL);
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSlavePorts, idx, "-1", NULL);
+ assign_arrayEl(VAR_datanodeSlavePoolerPorts, idx, "-1", NULL);
+ assign_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeSlaveWALDirs, idx, "none", NULL);
+ assign_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ }
+ assign_arrayEl(VAR_datanodeSpecificExtraConfig, idx, extraConf, NULL);
+ assign_arrayEl(VAR_datanodeSpecificExtraPgHba, idx, extraPgHbaConf, NULL);
+ /*
+ * Update the configuration file and backup it
+ */
+ /*
+ * Take care of exrtra conf file
+ */
+ if (doesExist(VAR_datanodeExtraConfig, 0) && !is_none(sval(VAR_datanodeExtraConfig)))
+ AddMember(confFiles, sval(VAR_datanodeExtraConfig));
+ if (doesExist(VAR_datanodeSpecificExtraConfig, idx) && !is_none(aval(VAR_datanodeSpecificExtraConfig)[idx]))
+ AddMember(confFiles, aval(VAR_datanodeSpecificExtraConfig)[idx]);
+
+ /*
+ * Take care of exrtra conf pg_hba file
+ */
+ if (doesExist(VAR_datanodeExtraPgHba, 0) && !is_none(sval(VAR_datanodeExtraPgHba)))
+ AddMember(pgHbaConfFiles, sval(VAR_datanodeExtraPgHba));
+ if (doesExist(VAR_datanodeSpecificExtraPgHba, idx) && !is_none(aval(VAR_datanodeSpecificExtraPgHba)[idx]))
+ AddMember(pgHbaConfFiles, aval(VAR_datanodeSpecificExtraPgHba)[idx]);
+ /*
+ * Main part
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to datanode master addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintAval(f, VAR_datanodeNames);
+ fprintAval(f, VAR_datanodeMasterServers);
+ fprintAval(f, VAR_datanodePorts);
+ fprintAval(f, VAR_datanodePoolerPorts);
+ fprintAval(f, VAR_datanodeMasterDirs);
+ fprintAval(f, VAR_datanodeMasterWALDirs);
+ fprintAval(f, VAR_datanodeMaxWALSenders);
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlavePorts);
+ fprintAval(f, VAR_datanodeSlavePoolerPorts);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeSlaveWALDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ }
+ fprintAval(f, VAR_datanodeSpecificExtraConfig);
+ fprintAval(f, VAR_datanodeSpecificExtraPgHba);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Now add the master */
+
+ gtmPxyIdx = getEffectiveGtmProxyIdxFromServerName(host);
+ gtmHost = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyServers)[gtmPxyIdx] : sval(VAR_gtmMasterServer);
+ gtmPort = (gtmPxyIdx > 0) ? aval(VAR_gtmProxyPorts)[gtmPxyIdx] : sval(VAR_gtmMasterPort);
+
+ /* initdb */
+ doImmediate(host, NULL, "PGXC_CTL_SILENT=1 initdb -D %s %s %s --nodename %s", dir,
+ wal ? "-X" : "",
+ wal ? waldir : "",
+ name);
+
+ /* Edit configurations */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)))
+ {
+ appendFiles(f, confFiles);
+ fprintf(f,
+ "#===========================================\n"
+ "# Added at initialization. %s\n"
+ "port = %d\n"
+ "pooler_port = %d\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "# End of Additon\n",
+ timeStampString(date, MAXTOKEN+1),
+ port, pooler, gtmHost, gtmPort);
+ pclose(f);
+ }
+ CleanArray(confFiles);
+ jj = datanodeIdx(name);
+ if ((f = pgxc_popen_w(host, "cat >> %s/pg_hba.conf", dir)))
+ {
+ int kk;
+
+ fprintf(f, "#===========================================\n");
+ fprintf(f, "# Added at initialization.\n");
+
+ appendFiles(f, pgHbaConfFiles);
+ for (kk = 0; aval(VAR_datanodePgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f,"host all %s %s trust\n", sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[kk]);
+ if (isVarYes(VAR_datanodeSlave))
+ if (!is_none(aval(VAR_datanodeSlaveServers)[jj]))
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[kk]);
+ }
+ fprintf(f, "# End of addition\n");
+ pclose(f);
+ }
+
+ /* Lock ddl */
+ if (restore_dnode_idx != -1)
+ {
+ if ((lockf = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_datanodeMasterServers)[restore_dnode_idx], atoi(aval(VAR_datanodePorts)[restore_dnode_idx]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open datanode psql command, %s\n", strerror(errno));
+ return 1;
+ }
+ }
+ else if (restore_coord_idx != -1)
+ {
+ if ((lockf = pgxc_popen_wRaw("psql -h %s -p %d %s", aval(VAR_coordMasterServers)[restore_coord_idx], atoi(aval(VAR_coordPorts)[restore_coord_idx]), sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open coordinator psql command, %s\n", strerror(errno));
+ return 1;
+ }
+ }
+ else
+ {
+ elog(ERROR, "ERROR: no valid datanode or coordinator configuration!");
+ return 1;
+ }
+
+ fprintf(lockf, "select pgxc_lock_for_backup();\n"); /* Keep open until the end of the addition. */
+ fflush(lockf);
+
+ /* pg_dumpall */
+ createLocalFileName(GENERAL, pgdumpall_out, MAXPATH);
+ if (restore_dnode_idx != -1)
+ doImmediateRaw("pg_dumpall -p %s -h %s -s --include-nodes --dump-nodes >%s",
+ aval(VAR_datanodePorts)[restore_dnode_idx],
+ aval(VAR_datanodeMasterServers)[restore_dnode_idx],
+ pgdumpall_out);
+ else if (restore_coord_idx != -1)
+ doImmediateRaw("pg_dumpall -p %s -h %s -s --include-nodes --dump-nodes >%s",
+ aval(VAR_coordPorts)[restore_coord_idx],
+ aval(VAR_coordMasterServers)[restore_coord_idx],
+ pgdumpall_out);
+ else
+ {
+ elog(ERROR, "ERROR: no valid datanode or coordinator configuration!");
+ return 1;
+ }
+
+ /* Start the new datanode */
+ doImmediate(host, NULL, "pg_ctl start -w -Z restoremode -D %s -o -i", dir);
+
+ /* Allow the new datanode to start up by sleeping for a couple of seconds */
+ pg_usleep(2000000L);
+
+ /* Restore the backup */
+ doImmediateRaw("psql -h %s -p %d -d %s -f %s", host, port, sval(VAR_defaultDatabase), pgdumpall_out);
+ doImmediateRaw("rm -f %s", pgdumpall_out);
+
+ /* Quit the new datanode */
+ doImmediate(host, NULL, "pg_ctl stop -w -Z restoremode -D %s", dir);
+
+ /* Start the new datanode with --datanode option */
+ AddMember(nodelist, name);
+ start_datanode_master(nodelist);
+ CleanArray(nodelist);
+
+ /* Issue CREATE NODE on coordinators */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s", aval(VAR_coordMasterServers)[ii], aval(VAR_coordPorts)[ii], sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator master %s.\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "CREATE NODE %s WITH (TYPE = 'datanode', host='%s', PORT=%d);\n", name, host, port);
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ }
+
+ if (restore_coord_idx == -1)
+ connCordIdx = get_any_available_coord(-1);
+ else
+ connCordIdx = restore_coord_idx;
+ if (connCordIdx == -1)
+ return 1;
+
+ /* Issue CREATE NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s",
+ aval(VAR_coordMasterServers)[connCordIdx],
+ aval(VAR_coordPorts)[connCordIdx],
+ sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator %s.\n", aval(VAR_coordNames)[0]);
+ continue;
+ }
+ if (strcmp(aval(VAR_datanodeNames)[ii], name) != 0)
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'CREATE NODE %s WITH (TYPE = ''datanode'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ else
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'ALTER NODE %s WITH (TYPE = ''datanode'', host=''%s'', PORT=%d)';\n", aval(VAR_datanodeNames)[ii], name, host, port);
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'SELECT pgxc_pool_reload();'\n", aval(VAR_datanodeNames)[ii]);
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ }
+
+ /* Quit DDL lokkup session */
+ fprintf(lockf, "\\q\n");
+ pclose(lockf);
+ return 0;
+}
+
+int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir,
+ char *walDir, char *archDir)
+{
+ int idx;
+ FILE *f;
+ char port_s[MAXTOKEN+1];
+ char pooler_s[MAXTOKEN+1];
+ int kk;
+ bool wal;
+ int size;
+
+ if (walDir && (strcasecmp(walDir, "none") != 0))
+ wal = true;
+ else
+ wal = false;
+
+
+ /* Check if the name is valid datanode */
+ if ((idx = datanodeIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified datanode %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if the datanode slave is not configured */
+ if (isVarYes(VAR_datanodeSlave) && doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: Slave for the datanode %s has already been configured.\n", name);
+ return 1;
+ }
+ /* Check if the resource does not conflict */
+ if (strcmp(dir, archDir) == 0)
+ {
+ elog(ERROR, "ERROR: working directory is the same as WAL archive directory.\n");
+ return 1;
+ }
+ /*
+ * We dont check the name conflict here because acquiring datanode index means that
+ * there's no name conflict.
+ */
+ if (checkPortConflict(host, port))
+ {
+ elog(ERROR, "ERROR: the port %s has already been used in the host %s.\n", aval(VAR_datanodePorts)[idx], host);
+ return 1;
+ }
+ if (checkDirConflict(host, dir) || checkDirConflict(host, archDir) ||
+ checkDirConflict(host, walDir))
+ {
+ elog(ERROR, "ERROR: directory %s or %s or %s has already been used by other node.\n", dir, archDir, walDir);
+ return 1;
+ }
+ /* Check if the datanode master is running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) != 0)
+ {
+ elog(ERROR, "ERROR: Datanode master %s is not running.\n", name);
+ return 1;
+ }
+ /* Prepare the resources (directories) */
+ doImmediate(host, NULL, "mkdir -p %s;chmod 0700 %s", dir, dir);
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", archDir, archDir, archDir);
+ doImmediate(host, NULL, "rm -rf %s; mkdir -p %s;chmod 0700 %s", walDir,
+ walDir, walDir);
+ /* Reconfigure the master with WAL archive */
+ /* Update the configuration and backup the configuration file */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open datanode master's configuration file, %s/postgresql.conf",
+ aval(VAR_datanodeMasterDirs)[idx]);
+ return 1;
+ }
+ fprintf(f,
+ "#========================================\n"
+ "# Addition for log shipping, %s\n"
+ "wal_level = archive\n"
+ "archive_mode = on\n"
+ "archive_command = 'rsync %%p %s@%s:%s/%%f'\n"
+ "max_wal_senders = %d\n"
+ "synchronous_commit = on\n"
+ "synchronous_standby_names = '%s'\n"
+ "# End of Addition\n",
+ timeStampString(date, MAXPATH),
+ sval(VAR_pgxcUser), host, archDir,
+ getDefaultWalSender(FALSE),
+ name);
+ pclose(f);
+ /* pg_hba.conf for replication */
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/pg_hba.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open datanode master's pg_hba.conf file, %s/pg_hba.conf, %s\n",
+ aval(VAR_datanodeMasterDirs)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================\n"
+ "# Additional entry by adding the slave, %s\n",
+ timeStampString(date, MAXPATH));
+
+ for (kk = 0; aval(VAR_datanodePgHbaEntries)[kk]; kk++)
+ {
+ fprintf(f, "host replication %s %s trust\n",
+ sval(VAR_pgxcOwner), aval(VAR_datanodePgHbaEntries)[kk]);
+ }
+
+ fprintf(f,
+ "host replication %s %s/32 trust\n"
+ "# End of addition ===============================\n",
+ sval(VAR_pgxcOwner), getIpAddress(host));
+ pclose(f);
+
+ size = arraySizeName(VAR_datanodeNames);
+ /* Need an API to expand the array to desired size */
+ if ((extendVar(VAR_datanodeSlaveServers, size, "none") != 0) ||
+ (extendVar(VAR_datanodeSlavePorts, size, "none") != 0) ||
+ (extendVar(VAR_datanodeSlavePoolerPorts, size, "none") != 0) ||
+ (extendVar(VAR_datanodeSlaveDirs, size, "none") != 0) ||
+ (extendVar(VAR_datanodeSlaveWALDirs, size, "none") != 0) ||
+ (extendVar(VAR_datanodeArchLogDirs, size, "none") != 0))
+ {
+ elog(PANIC, "PANIC: Internal error, inconsistent datanode information\n");
+ return 1;
+ }
+
+ /* Reconfigure pgxc_ctl configuration with the new slave */
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ snprintf(pooler_s, MAXTOKEN, "%d", pooler);
+
+ if (!isVarYes(VAR_datanodeSlave))
+ assign_sval(VAR_datanodeSlave, "y");
+ replace_arrayEl(VAR_datanodeSlaveServers, idx, host, NULL);
+ replace_arrayEl(VAR_datanodeSlavePorts, idx, port_s, NULL);
+ replace_arrayEl(VAR_datanodeSlavePoolerPorts, idx, pooler_s, NULL);
+ replace_arrayEl(VAR_datanodeSlaveDirs, idx, dir, NULL);
+ replace_arrayEl(VAR_datanodeSlaveWALDirs, idx, walDir, NULL);
+ replace_arrayEl(VAR_datanodeArchLogDirs, idx, archDir, NULL);
+ /* Update the configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to datanode slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlavePorts);
+ fprintAval(f, VAR_datanodeSlavePoolerPorts);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeSlaveWALDirs);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+
+ /* Restart the master */
+ /*
+ * It's not a good idea to use "restart" here because some connection from other coordinators
+ * may be alive. They are posessed by the pooler and we have to reload the pool to release them,
+ * which aborts all the transactions.
+ *
+ * Beacse we need to issue pgxc_pool_reload() at all the coordinators, we need to give up all the
+ * transactions in the whole cluster.
+ *
+ * It is much better to shutdow the target datanode master fast because it does not affect
+ * transactions this coordinator is not involved.
+ */
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL,
+ "pg_ctl stop -w -Z datanode -D %s -m fast", aval(VAR_datanodeMasterDirs)[idx]);
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL,
+ "pg_ctl start -w -Z datanode -D %s", aval(VAR_datanodeMasterDirs)[idx]);
+ /* pg_basebackup */
+ doImmediate(host, NULL, "pg_basebackup -p %s -h %s -D %s -x %s %s",
+ aval(VAR_datanodePorts)[idx],
+ aval(VAR_datanodeMasterServers)[idx], dir,
+ wal ? "--xlogdir" : "",
+ wal ? walDir : "");
+ /* Update the slave configuration with hot standby and port */
+ if ((f = pgxc_popen_w(host, "cat >> %s/postgresql.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the new slave's postgresql.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to initialize the slave, %s\n"
+ "hot_standby = off\n"
+ "port = %s\n"
+ "pooler_port = %s\n"
+ "wal_level = archive\n"
+ "archive_mode = off\n" /* No archive mode */
+ "archive_command = ''\n" /* No archive mode */
+ "max_wal_senders = 0\n" /* Minimum WAL senders */
+ "# End of Addition\n",
+ timeStampString(date, MAXTOKEN), aval(VAR_datanodeSlavePorts)[idx], aval(VAR_datanodeSlavePoolerPorts)[idx]);
+ pclose(f);
+ /* Update the slave recovery.conf */
+ if ((f = pgxc_popen_w(host, "cat >> %s/recovery.conf", dir)) == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open the slave's recovery.conf, %s\n", strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#==========================================\n"
+ "# Added to add the slave, %s\n"
+ "standby_mode = on\n"
+ "primary_conninfo = 'host = %s port = %s "
+ "user = %s application_name = %s'\n"
+ "restore_command = 'cp %s/%%f %%p'\n"
+ "archive_cleanup_command = 'pg_archivecleanup %s %%r'\n"
+ "# End of addition\n",
+ timeStampString(date, MAXTOKEN), aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx],
+ sval(VAR_pgxcOwner), aval(VAR_datanodeNames)[idx],
+ aval(VAR_datanodeArchLogDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx]);
+ pclose(f);
+ /* Start the slave */
+ doImmediate(host, NULL, "pg_ctl start -w -Z datanode -D %s", dir);
+ return 0;
+}
+
+
+/*------------------------------------------------------------------------
+ *
+ * Remove command
+ *
+ *-----------------------------------------------------------------------*/
+int remove_datanodeMaster(char *name, int clean_opt)
+{
+ /*
+ 1. Transfer the data from the datanode to be removed to the rest of the datanodes for all the tables in all the databases.
+ For example to shift data of the table rr_abc to the
+ rest of the nodes we can use command
+
+ ALTER TABLE rr_abc DELETE NODE (DATA_NODE_3);
+
+ This step is not included in remove_datanodeMaster() function.
+
+ 2. Confirm that there is no data left on the datanode to be removed.
+ For example to confirm that there is no data left on DATA_NODE_3
+
+ select c.pcrelid from pgxc_class c, pgxc_node n where
+ n.node_name = 'DATA_NODE_3' and n.oid = ANY (c.nodeoids);
+
+ This step is not included in this function either.
+
+ 3. Stop the datanode server to be removed.
+ Now any SELECTs that involve the datanode to be removed would start failing
+ and DMLs have already been blocked, so essentially the cluster would work
+ only partially.
+
+ If datanode slave is also configured, we need to remove it first.
+
+ 4. Connect to any of the coordinators.
+ In our example assuming COORD_1 is running on port 5432,
+ the following command would connect to COORD_1
+
+ psql postgres -p 5432
+
+ 5. Drop the datanode to be removed.
+ For example to drop datanode DATA_NODE_3 use command
+
+ DROP NODE DATA_NODE_3;
+
+ 6. Update the connection information cached in pool.
+
+ SELECT pgxc_pool_reload();
+
+ 7. Repeat steps 4,5 & 6 for all the coordinators in the cluster.
+ */
+
+ int idx;
+ int connCordIdx;
+ int ii;
+ FILE *f;
+ char **namelist = NULL;
+ char date[MAXTOKEN+1];
+
+ /* Check if the datanode is configured */
+ if ((idx = datanodeIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: Datanode %s is not configured.\n", name);
+ return 1;
+ }
+ /* Check if all the other datanodes are running */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if ((ii != idx) && !is_none(aval(VAR_datanodeNames)[ii]) && (pingNode(aval(VAR_datanodeMasterServers)[ii], aval(VAR_datanodePorts)[ii]) != 0))
+ {
+ elog(ERROR, "ERROR: Datanode master %s is not running.\n", aval(VAR_datanodeNames)[ii]);
+ return 1;
+ }
+ }
+ /* Check if there's a slave configured */
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ remove_datanodeSlave(name, clean_opt);
+#if 0
+ /* Stop the datanode master if running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_datanode_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the datanode master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_datanodeMasterDirs)[idx]);
+#endif
+ /* Issue "drop node" at all the other datanodes */
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (doesExist(VAR_coordNames, ii) && !is_none(aval(VAR_coordNames)[ii]))
+ {
+ f = pgxc_popen_wRaw("psql -p %d -h %s %s", atoi(aval(VAR_coordPorts)[ii]), aval(VAR_coordMasterServers)[ii], sval(VAR_defaultDatabase));
+ if (f == NULL)
+ {
+ elog(ERROR, "ERROR: cannot begin psql for the coordinator master %s\n", aval(VAR_coordNames)[ii]);
+ continue;
+ }
+ fprintf(f, "DROP NODE %s;\n", name);
+ fprintf(f, "SELECT pgxc_pool_reload();\n");
+ fprintf(f, "\\q");
+ pclose(f);
+ }
+ }
+
+ /* find any available coordinator */
+ connCordIdx = get_any_available_coord(-1);
+ if (connCordIdx == -1)
+ return 1;
+
+ /* Issue DROP NODE on datanodes */
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]) &&
+ strcmp(aval(VAR_datanodeNames)[ii], name) != 0)
+ {
+ if ((f = pgxc_popen_wRaw("psql -h %s -p %s %s",
+ aval(VAR_coordMasterServers)[connCordIdx],
+ aval(VAR_coordPorts)[connCordIdx],
+ sval(VAR_defaultDatabase))) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot connect to the coordinator %s.\n", aval(VAR_coordNames)[0]);
+ continue;
+ }
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'DROP NODE %s';\n", aval(VAR_datanodeNames)[ii], name);
+ fprintf(f, "EXECUTE DIRECT ON (%s) 'SELECT pgxc_pool_reload();'\n", aval(VAR_datanodeNames)[ii]);
+ fprintf(f, "\\q\n");
+ pclose(f);
+ }
+ }
+ /* Stop the datanode master if running */
+ if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
+ {
+ AddMember(namelist, name);
+ stop_datanode_master(namelist, "fast");
+ CleanArray(namelist);
+ }
+ /* Cleanup the datanode master resource if specified */
+ if (clean_opt)
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_datanodeMasterDirs)[idx]);
+ /* Update configuration and backup --> should cleanup "none" entries here */
+ replace_arrayEl(VAR_datanodeNames, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeMasterServers, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodePorts, idx, "-1", "-1");
+ replace_arrayEl(VAR_datanodePoolerPorts, idx, "-1", "-1");
+ replace_arrayEl(VAR_datanodeMasterDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeMasterWALDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeMaxWALSenders, idx, "0", "0");
+ replace_arrayEl(VAR_datanodeSpecificExtraConfig, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSpecificExtraPgHba, idx, "none", NULL);
+
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ replace_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlavePorts, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlavePoolerPorts, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlaveWALDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ }
+
+ handle_no_slaves();
+ /*
+ * Write config files
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to datanode master removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeNames);
+ fprintAval(f, VAR_datanodeMasterDirs);
+ fprintAval(f, VAR_datanodeMasterWALDirs);
+ fprintAval(f, VAR_datanodePorts);
+ fprintAval(f, VAR_datanodePoolerPorts);
+ fprintAval(f, VAR_datanodeMasterServers);
+ fprintAval(f, VAR_datanodeMaxWALSenders);
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlavePorts);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeSlaveWALDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ }
+ fprintAval(f, VAR_datanodeSpecificExtraConfig);
+ fprintAval(f, VAR_datanodeSpecificExtraPgHba);
+ fclose(f);
+ backup_configuration();
+ return 0;
+}
+
+int remove_datanodeSlave(char *name, int clean_opt)
+{
+ int idx;
+ char **nodelist = NULL;
+ FILE *f;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ idx = datanodeIdx(name);
+ if (idx < 0)
+ {
+ elog(ERROR, "ERROR: datanode %s is not configured.\n", name);
+ return 1;
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ {
+ elog(ERROR, "ERROR: datanode slave %s is not configured.\n", name);
+ return 1;
+ }
+ AddMember(nodelist, name);
+ if (pingNodeSlave(aval(VAR_datanodeSlaveServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]) == 0)
+ stop_datanode_slave(nodelist, "immediate");
+ {
+ FILE *f;
+ if ((f = pgxc_popen_w(aval(VAR_datanodeMasterServers)[idx], "cat >> %s/postgresql.conf", aval(VAR_datanodeMasterDirs)[idx])) == NULL)
+ {
+ elog(ERROR, "ERROR: cannot open %s/postgresql.conf at %s, %s\n", aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterServers)[idx], strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#=======================================\n"
+ "# Updated to remove the slave %s\n"
+ "archive_mode = off\n"
+ "synchronous_standby_names = ''\n"
+ "archive_command = ''\n"
+ "max_wal_senders = 0\n"
+ "wal_level = minimal\n"
+ "# End of the update\n",
+ timeStampString(date, MAXTOKEN));
+ pclose(f);
+ }
+ doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "pg_ctl restart -Z datanode -D %s", aval(VAR_datanodeMasterDirs)[idx]);
+
+ if (clean_opt)
+ clean_datanode_slave(nodelist);
+ /*
+ * Maintain variables
+ */
+ replace_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeSlaveWALDirs, idx, "none", NULL);
+ replace_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
+ handle_no_slaves();
+ /*
+ * Maintain configuration file
+ */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#================================================================\n"
+ "# pgxc configuration file updated due to datanode slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_datanodeSlave);
+ fprintAval(f, VAR_datanodeSlaveServers);
+ fprintAval(f, VAR_datanodeSlaveDirs);
+ fprintAval(f, VAR_datanodeSlaveWALDirs);
+ fprintAval(f, VAR_datanodeArchLogDirs);
+ fclose(f);
+ backup_configuration();
+ CleanArray(nodelist);
+ return 0;
+
+}
+
+/*
+ * Clean datanode master resources -- directory and port -----------------------------
+ */
+cmd_t *prepare_cleanDatanodeMaster(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+ bool wal;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", nodeName);
+ return(NULL);
+ }
+
+ if (doesExist(VAR_datanodeMasterWALDirs, idx) &&
+ !is_none(aval(VAR_datanodeMasterWALDirs)[idx]))
+ wal = true;
+ else
+ wal = false;
+
+ cmd = initCmd(aval(VAR_datanodeMasterServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; %s %s %s mkdir -p %s; chmod 0700 %s; rm -f /tmp/.s.*%d*",
+ aval(VAR_datanodeMasterDirs)[idx],
+ wal ? "rm -rf " : "",
+ wal ? aval(VAR_datanodeMasterWALDirs)[idx] : "",
+ wal ? ";" : "",
+ aval(VAR_datanodeMasterDirs)[idx],
+ aval(VAR_datanodeMasterDirs)[idx],
+ atoi(aval(VAR_datanodePoolerPorts)[idx]));
+ return(cmd);
+}
+
+int clean_datanode_master_all(void)
+{
+ elog(INFO, "Cleaning all the datanode master resources.\n");
+ return(clean_datanode_master(aval(VAR_datanodeNames)));
+}
+
+int clean_datanode_master(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Cleaning datanode %s master resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Cleanup datanode slave resources -- directory and the socket ------------------
+ */
+cmd_t *prepare_cleanDatanodeSlave(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+ bool wal;
+
+ if ((idx = datanodeIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, idx) || is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ return NULL;
+
+ if (doesExist(VAR_datanodeSlaveWALDirs, idx) &&
+ !is_none(aval(VAR_datanodeSlaveWALDirs)[idx]))
+ wal = true;
+ else
+ wal = false;
+
+ cmd = initCmd(aval(VAR_datanodeSlaveServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; %s %s ; mkdir -p %s; chmod 0700 %s",
+ aval(VAR_datanodeSlaveDirs)[idx],
+ wal ? " rm -rf " : "",
+ wal ? aval(VAR_datanodeSlaveWALDirs)[idx] : "",
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeSlaveDirs)[idx]);
+ return(cmd);
+}
+
+int clean_datanode_slave_all(void)
+{
+ elog(INFO, "Cleaning all the datanode slave resouces.\n");
+ return(clean_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+int clean_datanode_slave(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(INFO, "Cleaning datanode %s slave resources.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: datanode slave %s not found.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+/*
+ * Show configuration of datanodes -------------------------------------------------
+ */
+int show_config_datanodeMaster(int flag, int idx, char *hostname)
+{
+ int ii;
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Datanode Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx], aval(VAR_poolerPorts)[idx]);
+ elog(NOTICE, " MaxWALSenders: %s, Dir: '%s'\n",
+ aval(VAR_datanodeMaxWALSenders)[idx], aval(VAR_datanodeMasterDirs)[idx]);
+ elog(NOTICE, " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ sval(VAR_datanodeExtraConfig), aval(VAR_datanodeSpecificExtraConfig)[idx]);
+ strncpy(outBuf, " pg_hba entries ( ", MAXLINE);
+ for (ii = 0; aval(VAR_datanodePgHbaEntries)[ii]; ii++)
+ {
+ snprintf(editBuf, MAXPATH, "'%s' ", aval(VAR_datanodePgHbaEntries)[ii]);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ elog(NOTICE, "%s)\n", outBuf);
+ elog(NOTICE, " Extra pg_hba: '%s', Specific Extra pg_hba: '%s'\n",
+ sval(VAR_datanodeExtraPgHba), aval(VAR_datanodeSpecificExtraPgHba)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeSlave(int flag, int idx, char *hostname)
+{
+ char outBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ outBuf[0] = 0;
+ if (flag)
+ strncat(outBuf, "Datanode Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(outBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(outBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (outBuf[0])
+ elog(NOTICE, "%s", outBuf);
+ elog(NOTICE, " Nodename: '%s', port: %s, pooler port: %s\n",
+ aval(VAR_datanodeNames)[idx], aval(VAR_datanodeSlavePorts)[idx], aval(VAR_poolerPorts)[idx]);
+ elog(NOTICE," Dir: '%s', Archive Log Dir: '%s'\n",
+ aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeMasterSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping.\n", nodeList[ii]);
+ continue;
+ }
+ else
+ {
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ if (isVarYes(VAR_datanodeSlave))
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ }
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeMasterMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode. skipping\n", nodeList[ii]);
+ continue;
+ }
+ else
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ }
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_datanodeSlaveMulti(char **nodeList)
+{
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return 1;
+ }
+ lockLogFile();
+ for (ii = 0; nodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(nodeList[ii])) < 0)
+ {
+ elog(WARNING, "WARNING: %s is not a datanode, skipping.\n", nodeList[ii]);
+ continue;
+ }
+ else
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ }
+ unlockLogFile();
+ return(0);
+}
+
+/*
+ * Kill datanode master ---------------------------------------------------------------
+ *
+ * Normally, you should not kill masters in such a manner. It is just for
+ * emergence.
+ */
+cmd_t *prepare_killDatanodeMaster(char *nodeName)
+{
+ pid_t postmasterPid;
+ int dnIndex;
+ cmd_t *cmd = NULL;
+
+ if (is_none(nodeName))
+ return(NULL);
+ if ((dnIndex = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: \"%s\" is not a datanode name\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_datanodeMasterServers)[dnIndex]);
+ if ((postmasterPid = get_postmaster_pid(aval(VAR_datanodeMasterServers)[dnIndex], aval(VAR_datanodeMasterDirs)[dnIndex])) > 0)
+ {
+ char *pidList = getChPidList(aval(VAR_datanodeMasterServers)[dnIndex], postmasterPid);
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d %s;" /* Kill the postmaster and all its children */
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ postmasterPid,
+ pidList,
+ atoi(aval(VAR_datanodePorts)[dnIndex]));
+ freeAndReset(pidList);
+ }
+ else
+ {
+ elog(WARNING, "WARNING: pid for datanode master \"%s\" was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ atoi(aval(VAR_datanodePorts)[dnIndex]));
+ }
+ return(cmd);
+}
+
+int kill_datanode_master_all(void)
+{
+ return(kill_datanode_master(aval(VAR_datanodeNames)));
+}
+
+int kill_datanode_master(char **nodeList)
+{
+ int ii;
+ int rc;
+ char **actualNodeList;
+ cmdList_t *cmdList = NULL;
+ cmd_t *cmd;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_killDatanodeMaster(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ if (cmdList)
+ {
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ }
+ else
+ rc = 0;
+ return(rc);
+}
+
+/*
+ * Kill datanode slaves -----------------------------------------------------
+ *
+ * You should not kill datanodes in such a manner. It is just for emergence.
+ * You should try to stop it gracefully.
+ */
+cmd_t *prepare_killDatanodeSlave(char *nodeName)
+{
+ pid_t postmasterPid;
+ int dnIndex;
+ cmd_t *cmd;
+
+ if (is_none(nodeName))
+ return(NULL);
+ if ((dnIndex = datanodeIdx(nodeName)) < 0)
+ {
+ elog(WARNING, "WARNING: \"%s\" is not a datanode name, skipping.\n", nodeName);
+ return(NULL);
+ }
+ if (!doesExist(VAR_datanodeSlaveServers, dnIndex) || is_none(aval(VAR_datanodeSlaveServers)[dnIndex]))
+ {
+ elog(WARNING, "WARNING: datanode slave %s is not found.\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_datanodeSlaveServers)[dnIndex]);
+ postmasterPid = get_postmaster_pid(aval(VAR_datanodeSlaveServers)[dnIndex], aval(VAR_datanodeSlaveDirs)[dnIndex]);
+ if (postmasterPid == -1)
+ {
+ /* No postmaster pid found */
+ elog(WARNING, "WARNING: pid for datanode slave \"%s\" slave was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf /tmp/.s.'*'%s'*'", /* Remove the socket */
+ aval(VAR_datanodeSlavePorts)[dnIndex]);
+ }
+ else
+ {
+ char *pidList = getChPidList(aval(VAR_datanodeSlaveServers)[dnIndex], postmasterPid);
+
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d %s;" /* Kill the postmaster and all its children */
+ "rm -rf /tmp/.s.'*'%d'*'", /* Remove the socket */
+ postmasterPid,
+ pidList,
+ atoi(aval(VAR_datanodeSlavePorts)[dnIndex]));
+ freeAndReset(pidList);
+ }
+ return(cmd);
+}
+
+int kill_datanode_slave_all(void)
+{
+ return(kill_datanode_slave(aval(VAR_datanodeNames)));
+}
+
+int kill_datanode_slave(char **nodeList)
+{
+ int ii;
+ int rc;
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ actualNodeList = makeActualNodeList(nodeList);
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ return 1;
+ }
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((cmd = prepare_killDatanodeSlave(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Checks if all the datanodes are running
+ *
+ * Returns FALSE if any of them are not running.
+ */
+int check_AllDatanodeRunning(void)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_datanodeMasterServers)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeMasterServers)[ii]))
+ if (pingNode(aval(VAR_datanodeMasterServers)[ii], aval(VAR_datanodePorts)[ii]) != 0)
+ return FALSE;
+ }
+ return TRUE;
+}
+
+
diff --git a/contrib/pgxc_ctl/datanode_cmd.h b/contrib/pgxc_ctl/datanode_cmd.h
new file mode 100644
index 0000000000..9a8eca58df
--- /dev/null
+++ b/contrib/pgxc_ctl/datanode_cmd.h
@@ -0,0 +1,74 @@
+/*-------------------------------------------------------------------------
+ *
+ * datanode_cmd.h
+ *
+ * Datanode command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DATANODE_CMD_H
+#define DATANODE_CMD_H
+
+#include "utils.h"
+
+extern int init_datanode_master(char **nodeList);
+extern int init_datanode_master_all(void);
+extern int init_datanode_slave(char **nodeList);
+extern int init_datanode_slave_all(void);
+extern cmd_t *prepare_initDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_initDatanodeSlave(char *nodeName);
+
+
+extern int start_datanode_master(char **nodeList);
+extern int start_datanode_master_all(void);
+extern int start_datanode_slave(char **nodeList);
+extern int start_datanode_slave_all(void);
+extern cmd_t *prepare_startDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_startDatanodeSlave(char *nodeName);
+
+extern int stop_datanode_master(char **nodeList, char *immediate);
+extern int stop_datanode_master_all(char *immediate);
+extern int stop_datanode_slave(char **nodeList, char *immediate);
+extern int stop_datanode_slave_all(char *immediate);
+extern cmd_t *prepare_stopDatanodeSlave(char *nodeName, char *immediate);
+extern cmd_t *prepare_stopDatanodeMaster(char *nodeName, char *immediate);
+
+extern int failover_datanode(char **nodeList);
+
+extern int kill_datanode_master(char **nodeList);
+extern int kill_datanode_master_all(void);
+extern int kill_datanode_slave(char **nodeList);
+extern int kill_datanode_slave_all(void);
+extern cmd_t *prepare_killDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_killDatanodeSlave(char *nodeName);
+
+extern int clean_datanode_master(char **nodeList);
+extern int clean_datanode_master_all(void);
+extern int clean_datanode_slave(char **nodeList);
+extern int clean_datanode_slave_all(void);
+extern cmd_t *prepare_cleanDatanodeMaster(char *nodeName);
+extern cmd_t *prepare_cleanDatanodeSlave(char *nodeName);
+
+#ifdef XCP
+extern int add_datanodeMaster(char *name, char *host, int port, int pooler,
+ char *dir, char *walDir, char *extraConf, char *extraPgHbaConf);
+#else
+extern int add_datanodeMaster(char *name, char *host, int port, char *dir,
+ char *restore_dname, char *extraConf, char *extraPgHbaConf);
+#endif
+extern int add_datanodeSlave(char *name, char *host, int port, int pooler,
+ char *dir, char *walDir, char *archDir);
+extern int remove_datanodeMaster(char *name, int clean_opt);
+extern int remove_datanodeSlave(char *name, int clean_opt);
+
+extern int show_config_datanodeMasterSlaveMulti(char **nodeList);
+extern int show_config_datanodeMasterMulti(char **nodeList);
+extern int show_config_datanodeSlaveMulti(char **nodeList);
+extern int show_config_datanodeMaster(int flag, int idx, char *hostname);
+extern int show_config_datanodeSlave(int flag, int idx, char *hostname);
+
+extern int check_AllDatanodeRunning(void);
+
+#endif /* DATANODE_CMD_H */
diff --git a/contrib/pgxc_ctl/do_command.c b/contrib/pgxc_ctl/do_command.c
new file mode 100644
index 0000000000..916b024253
--- /dev/null
+++ b/contrib/pgxc_ctl/do_command.c
@@ -0,0 +1,3050 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_command.c
+ *
+ * Main command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This file provides a frontend module to pgxc_ctl operation.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "gtm_cmd.h"
+#include "coord_cmd.h"
+#include "datanode_cmd.h"
+#include "gtm_util.h"
+#include "monitor.h"
+
+extern char *pgxc_ctl_conf_prototype[];
+extern char *pgxc_ctl_conf_prototype_minimal[];
+extern char *pgxc_ctl_conf_prototype_empty[];
+
+int forceInit = false;
+
+#define Exit(c) exit(myWEXITSTATUS(c))
+#define GetToken() (line = get_word(line, &token))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+
+static void kill_something(char *token);
+static void do_deploy(char *line);
+static void deploy_xc(char **hostlist);
+static void show_config_something(char *nodeName);
+static void show_config_something_multi(char **nodeList);
+extern void show_config_hostList(char **hostList);
+static void show_config_host(char *hostname);
+static void show_basicConfig(void);
+static void show_config_servers(char **hostList);
+static void do_clean_command(char *line);
+static void do_start_command(char *line);
+static void start_all(void);
+static void do_stop_command(char *line);
+static void stop_all(char *immediate);
+static int show_Resource(char *datanodeName, char *databasename, char *username);
+static void do_show_help(char *line);
+
+typedef enum ConfigType
+{
+ CONFIG_EMPTY,
+ CONFIG_MINIMAL,
+ CONFIG_COMPLETE
+} ConfigType;
+
+static void do_echo_command(char * line)
+{
+ printf("do_echo_command\n");
+}
+
+static void do_prepareConfFile(char *Path, ConfigType config_type)
+{
+ char *path = NULL;
+ FILE *conf;
+ int ii;
+ char **my_pgxc_conf_prototype;
+
+ if (Path)
+ path = Path;
+ else
+ {
+ if (find_var(VAR_configFile) && sval(VAR_configFile))
+ path = sval(VAR_configFile);
+ else
+ {
+ elog(ERROR, "ERROR: Configuration file path was not specified.\n");
+ return;
+ }
+ }
+ conf = fopen(path, "w");
+ if (conf == NULL)
+ {
+ elog(ERROR, "ERROR: Could not open the configuration file \"%s\", %s.\n", path, strerror(errno));
+ return;
+ }
+
+ if (config_type == CONFIG_EMPTY)
+ my_pgxc_conf_prototype = pgxc_ctl_conf_prototype_empty;
+ else if (config_type == CONFIG_MINIMAL)
+ my_pgxc_conf_prototype = pgxc_ctl_conf_prototype_minimal;
+ else
+ my_pgxc_conf_prototype = pgxc_ctl_conf_prototype;
+
+ for (ii = 0; my_pgxc_conf_prototype[ii]; ii++)
+ {
+ fprintf(conf, "%s\n", my_pgxc_conf_prototype[ii]);
+ }
+ fclose(conf);
+ return;
+}
+
+/*
+ * Deploy pgxc binaries
+ */
+
+static void do_deploy(char *line)
+{
+ char *token;
+ char **hostlist = NULL;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify option for deploy command.\n");
+ return;
+ }
+ if (TestToken("all"))
+ {
+ elog(NOTICE, "Deploying Postgres-XL components to all the target servers.\n");
+ deploy_xc(aval(VAR_allServers));
+ }
+ else
+ {
+ elog(NOTICE, "Deploying Postgres-XL components.\n");
+ /*
+ * Please note that the following code does not check if the specified nost
+ * appears in the configuration file.
+ * We should deploy xc binary to the target not in the current configuraiton
+ * to add gtm slave, gtm_proxy, coordinator/datanode master/slave online.
+ */
+ do {
+ AddMember(hostlist, token);
+ } while(GetToken());
+ deploy_xc(hostlist);
+ CleanArray(hostlist);
+ }
+}
+
+static void deploy_xc(char **hostlist)
+{
+ char tarFile[MAXPATH+1];
+ cmdList_t *cmdList;
+ int ii;
+
+ /* Build tarball --> need to do foreground */
+ elog(NOTICE, "Prepare tarball to deploy ... \n");
+ snprintf(tarFile, MAXPATH, "%d.tgz", getpid());
+ doImmediate(NULL, NULL, "tar czCf %s %s/%s bin include lib share",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_localTmpDir), tarFile);
+
+ /* Backgroud jobs */
+
+ cmdList = initCmdList();
+ /* Build install dir */
+ for (ii = 0; hostlist[ii]; ii++)
+ {
+ cmd_t *cmd;
+ cmd_t *cmdScp;
+ cmd_t *cmdTarExtract;
+
+ elog(NOTICE, "Deploying to the server %s.\n", hostlist[ii]);
+ /* Build target directory */
+ addCmd(cmdList, (cmd = initCmd(hostlist[ii])));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s/bin %s/include %s/lib %s/share; mkdir -p %s",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_pgxcInstallDir));
+ /* SCP tarball */
+ appendCmdEl(cmd, (cmdScp = initCmd(NULL)));
+ snprintf(newCommand(cmdScp), MAXLINE,
+ "scp %s/%s %s@%s:%s",
+ sval(VAR_localTmpDir), tarFile, sval(VAR_pgxcUser), hostlist[ii], sval(VAR_tmpDir));
+ /* Extract Tarball and remove it */
+ appendCmdEl(cmd, (cmdTarExtract = initCmd(hostlist[ii])));
+ snprintf(newCommand(cmdTarExtract), MAXLINE,
+ "tar xzCf %s %s/%s; rm %s/%s",
+ sval(VAR_pgxcInstallDir),
+ sval(VAR_tmpDir), tarFile,
+ sval(VAR_tmpDir), tarFile);
+ }
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ doImmediate(NULL, NULL, "rm -f %s/%s",
+ sval(VAR_tmpDir), tarFile);
+ elog(NOTICE, "Deployment done.\n");
+}
+
+static void do_set(char *line)
+{
+
+ char *token;
+ char *varname;
+ pgxc_ctl_var *var;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: No variable name was given\n");
+ return;
+ }
+ varname = Strdup(token);
+ var = confirm_var(varname);
+ reset_value(var);
+ while(GetToken())
+ {
+ add_val(var, token);
+ }
+ print_var(varname);
+ log_var(varname);
+ return;
+}
+
+/*
+ * Failover command ... failover gtm
+ * failover coordinator nodename
+ * failover datanode nodename
+ * failover nodename
+ */
+static void do_failover_command(char *line)
+{
+ char *token;
+ int idx;
+
+ if (GetToken() == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify failover command option.\n");
+ return;
+ }
+ else if (TestToken("gtm"))
+ {
+ if (isVarYes(VAR_gtmSlave) && !is_none(sval(VAR_gtmSlaveServer)))
+ failover_gtm();
+ else
+ elog(ERROR, "ERROR: no gtm slave is configured.\n");
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ else if (!GetToken())
+ elog(ERROR, "ERROR: please specify failover coordinator command option.\n");
+ else
+ {
+ char **nodeList = NULL;
+
+ do
+ {
+ if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not a coordinator\n", token);
+ else if (is_none(aval(VAR_coordSlaveServers)[idx]))
+ elog(ERROR, "ERROR: slave for the coordinator %s is not configured.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ failover_coordinator(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!isVarYes(VAR_datanodeSlave))
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ else if (!GetToken())
+ elog(ERROR, "ERROR: please specify failover datanode command option.\n");
+ else
+ {
+ char **nodeList = NULL;
+
+ do
+ {
+ if ((idx = datanodeIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not a datanode.\n", token);
+ else if (is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ elog(ERROR, "ERROR: slave for the datanode %s is not configured,\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ failover_datanode(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid failover command option %s.\n", token);
+}
+
+/*
+ * Reconnect command ... reconnect gtm_proxy [all | nodename ... ]
+ */
+static void do_reconnect_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specifiy option to reconnect command.\n");
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!isVarYes(VAR_gtmProxy))
+ elog(ERROR, "ERROR: gtm proxy is not configured.\n");
+ else if ((GetToken() == NULL) || TestToken("all"))
+ reconnect_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ int idx;
+ do
+ {
+ if ((idx = gtmProxyIdx(token)) < 0)
+ elog(ERROR, "ERROR: %s is not gtm_proxy.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ if (nodeList)
+ reconnect_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid option %s for reconnect command.\n", token);
+ return;
+}
+
+
+
+/*
+ * Kill command ... kill nodename, kill all,
+ * kill gtm [master|slave|all],
+ * kill gtm_proxy [nodename|all] ...
+ * kill coordinator [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * kill datanode [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ */
+static void do_kill_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specifiy option to kill command\n");
+ else if (TestToken("gtm"))
+ {
+ if ((GetToken() == NULL) || TestToken("all"))
+ {
+ kill_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ }
+ else if (TestToken("master"))
+ kill_gtm_master();
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ else
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: input value \"%s\" is invalid.\n", token);
+ return;
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify additonal option to kill gtm_proxies\n");
+ else if (TestToken("all"))
+ kill_gtm_proxy(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char*));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ kill_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if ((GetToken() == NULL) || TestToken("all"))
+ {
+ kill_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ }
+ if (TestToken("master"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_coordinator_master(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("datanode"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ {
+ kill_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(aval(VAR_coordNames));
+ }
+ else if (TestToken("master"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_datanode_master(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do{
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ kill_datanode_slave(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while (GetToken());
+ kill_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("all"))
+ {
+ if(isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(aval(VAR_datanodeNames));
+ kill_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(aval(VAR_coordNames));
+ kill_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_gtmProxy))
+ kill_gtm_proxy(aval(VAR_gtmProxyNames));
+ if (isVarYes(VAR_gtmSlave))
+ kill_gtm_slave();
+ kill_gtm_master();
+ }
+ else
+ {
+ do {
+ kill_something(token);
+ } while (GetToken());
+ }
+ return;
+}
+
+
+static void init_all(void)
+{
+ init_gtm_master(true);
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ {
+ init_gtm_slave();
+ start_gtm_slave();
+ }
+ if (isVarYes(VAR_gtmProxy))
+ {
+ init_gtm_proxy_all();
+ start_gtm_proxy_all();
+ }
+ init_coordinator_master_all();
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ {
+ init_coordinator_slave_all();
+ start_coordinator_slave_all();
+ }
+ init_datanode_master_all();
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ init_datanode_slave_all();
+ start_datanode_slave_all();
+ }
+ configure_nodes_all();
+}
+
+
+/*
+ * Init command ... init all
+ * init gtm [master|slave|all],
+ * init gtm_proxy [all| nodename ...]
+ * init coordinator [all | master [all | nodename ... ]| slave [all | nodename ... ]| nodename ... ]
+ * init datanode [all | master [all | nodename ...] | slave [all | nodename ... ] | nodename ... ]
+ */
+static void do_init_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to init command.\n");
+
+ if (TestToken("force"))
+ {
+ forceInit = true;
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to init command.\n");
+ }
+
+ if (TestToken("all"))
+ init_all();
+ else if (TestToken("gtm"))
+ {
+ if (!GetToken() || (TestToken("all")))
+ {
+ init_gtm_master(true);
+ if (isVarYes(VAR_gtmSlave))
+ init_gtm_slave();
+ }
+ else if (TestToken("master"))
+ init_gtm_master(true);
+ else if (TestToken("slave"))
+ init_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for init gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ if (!GetToken() || TestToken("all"))
+ init_gtm_proxy_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("coordinator"))
+ if (!GetToken() || TestToken("all"))
+ {
+ init_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ init_coordinator_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ init_coordinator_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ init_coordinator_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do {
+ AddMember(nodeList, token);
+ } while(GetToken());
+ init_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ init_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("datanode"))
+ if (!GetToken() || TestToken("all"))
+ {
+ init_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ init_datanode_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ init_datanode_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ init_datanode_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ init_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ init_datanode_slave(nodeList);
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for init command.\n");
+ return;
+}
+
+/*
+ * Start command ... start nodename, start all,
+ * start gtm [master|slave|all],
+ * start gtm_proxy [nodename|all] ...
+ * start coordinator [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * start datanode [nodename ... |master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ */
+static void start_all(void)
+{
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ start_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ start_gtm_proxy_all();
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave_all();
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave_all();
+}
+
+static void do_start_command(char *line)
+{
+ char *token;
+
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to start command.\n");
+ else if (TestToken("all"))
+ start_all();
+ else if (TestToken("gtm"))
+ {
+ if (!GetToken() || (TestToken("all")))
+ {
+ start_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ start_gtm_slave();
+ }
+ else if (TestToken("master"))
+ start_gtm_master();
+ else if (TestToken("slave"))
+ start_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for start gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ if (!GetToken() || TestToken("all"))
+ start_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ start_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("coordinator"))
+ if (!GetToken() || TestToken("all"))
+ {
+ start_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ start_coordinator_master_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ start_coordinator_slave_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ start_coordinator_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("datanode"))
+ if (!GetToken() || TestToken("all"))
+ {
+ start_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave_all();
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ start_datanode_master_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_master(nodeList);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ start_datanode_slave_all();
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_slave(nodeList);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ start_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ start_datanode_slave(nodeList);
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for start command.\n");
+ return;
+}
+
+/*
+ * Stop command ... stop [-m smart | fast | immediate] all
+ * stop gtm [master | slave | all],
+ * stop gtm_proxy [ nodename | all] ...
+ * stop [-m smart | fast | immediate ] coordinator [nodename ... | master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ * stop [-m smart | fast | immediate ] datanode [nodename ... | master [all | nodenames ... ] | slave [all | nodenames ... ] |all]
+ *
+ */
+static void stop_all(char *immediate)
+{
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave_all(immediate);
+ stop_coordinator_master_all(immediate);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave_all(immediate);
+ stop_datanode_master_all(immediate);
+ if (isVarYes(VAR_gtmProxy))
+ stop_gtm_proxy_all();
+ if (isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+ if (!is_none(sval(VAR_gtmMasterServer)))
+ stop_gtm_master();
+}
+
+
+#define GetAndSet(var, msg) do{if(!GetToken()){elog(ERROR, msg); return;} var=Strdup(token);}while(0)
+/*
+ * Add command
+ */
+static void do_add_command(char *line)
+{
+ char *token;
+ char *name;
+ char *host;
+ char *port;
+ char *pooler;
+ char *dir;
+ char *walDir;
+ char *archDir;
+ char *extraConf;
+ char *extraPgHbaConf;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Specify options for add command.\n");
+ return;
+ }
+ if (!TestToken("gtm") && is_none(sval(VAR_gtmMasterServer)))
+ {
+ elog(ERROR, "ERROR: GTM master must be added before adding any other component.\n");
+ return;
+ }
+
+ if (TestToken("gtm"))
+ {
+ /*
+ * add gtm master name host port dir
+ */
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Specify option for add gtm command.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of gtm master\n");
+ GetAndSet(host, "ERROR: please specify the host name for gtm master\n");
+ GetAndSet(port, "ERROR: please specify the port number for gtm master\n");
+ GetAndSet(dir, "ERROR: please specify the working director for gtm master\n");
+ add_gtmMaster(name, host, atoi(port), dir);
+ }
+ else if (TestToken("slave"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of gtm slave\n");
+ GetAndSet(host, "ERROR: please specify the host name for gtm slave\n");
+ GetAndSet(port, "ERROR: please specify the port number for gtm slave\n");
+ GetAndSet(dir, "ERROR: please specify the working director for gtm slave\n");
+ add_gtmSlave(name, host, atoi(port), dir);
+ }
+ else
+ {
+ elog(ERROR, "ERROR: you can specify only master/slave to add gtm command. %s is invalid.\n", token);
+ return;
+ }
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(dir);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ /*
+ * Add gtm_proxy name host port dir
+ */
+ GetAndSet(name, "ERROR: please specify the name of gtm_proxy\n");
+ GetAndSet(host, "ERROR: please specify the host name for gtm_proxy\n");
+ GetAndSet(port, "ERROR: please specify the port number for gtm_proxy\n");
+ GetAndSet(dir, "ERROR: please specify the working director for gtm_proxy\n");
+ add_gtmProxy(name, host, atoi(port), dir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(dir);
+ }
+ else if (TestToken("coordinator"))
+ {
+ /*
+ * Add coordinator master name host port pooler dir
+ * Add coordinator slave name host dir
+ */
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please specify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ GetAndSet(host, "ERROR: please specify the host for the coordinator masetr\n");
+ GetAndSet(port, "ERROR: please specify the port number for the coordinator master\n");
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the coordinator master.\n");
+ GetAndSet(dir, "ERROR: please specify the working directory for the coordinator master\n");
+ GetAndSet(extraConf, "ERROR: please specify file to read extra configuration. Specify 'none' if nothing extra to be added.\n");
+ GetAndSet(extraPgHbaConf, "ERROR: please specify file to read extra pg_hba configuration. Specify 'none' if nothing extra to be added.\n");
+ add_coordinatorMaster(name, host, atoi(port), atoi(pooler), dir,
+ extraConf, extraPgHbaConf);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(pooler);
+ freeAndReset(dir);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator slave\n");
+ GetAndSet(host, "ERROR: please specify the host for the coordinator slave\n");
+ GetAndSet(port, "ERROR: please specify the port number for the coordinator slave\n");
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the coordinator slave.\n");
+ GetAndSet(dir, "ERROR: please specify the working director for coordinator slave\n");
+ GetAndSet(archDir, "ERROR: please specify WAL archive directory for coordinator slave\n");
+ add_coordinatorSlave(name, host, atoi(port), atoi(pooler), dir, archDir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(dir);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please specify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode master\n");
+ GetAndSet(host, "ERROR: please specify the host for the datanode masetr\n");
+ GetAndSet(port, "ERROR: please specify the port number for the datanode master\n");
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the datanode master.\n");
+ GetAndSet(dir, "ERROR: please specify the working director for the datanode master\n");
+ GetAndSet(walDir, "ERROR: please specify the WAL directory for the datanode master WAL. Specify 'none' for default\n");
+ GetAndSet(extraConf, "ERROR: please specify file to read extra configuration. Specify 'none' if nothig extra to be added.\n");
+ GetAndSet(extraPgHbaConf, "ERROR: please specify file to read extra pg_hba configuration. Specify 'none' if nothig extra to be added.\n");
+ add_datanodeMaster(name, host, atoi(port), atoi(pooler), dir,
+ walDir, extraConf, extraPgHbaConf);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(pooler);
+ freeAndReset(dir);
+ freeAndReset(walDir);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode slave\n");
+ GetAndSet(host, "ERROR: please specify the host for the datanode slave\n");
+ GetAndSet(port, "ERROR: please specify the port number for the datanode slave\n");
+ GetAndSet(pooler, "ERROR: please specify the pooler port number for the datanode slave.\n");
+ GetAndSet(dir, "ERROR: please specify the working directory for datanode slave\n");
+ GetAndSet(walDir, "ERROR: please specify the WAL directory for datanode slave WAL. Specify 'none' for default.\n");
+ GetAndSet(archDir, "ERROR: please specify WAL archive directory for datanode slave\n");
+
+ add_datanodeSlave(name, host, atoi(port), atoi(pooler), dir,
+ walDir, archDir);
+ freeAndReset(name);
+ freeAndReset(host);
+ freeAndReset(port);
+ freeAndReset(pooler);
+ freeAndReset(dir);
+ freeAndReset(walDir);
+ }
+ }
+ return;
+}
+
+static void do_remove_command(char *line)
+{
+ char *token;
+ char *name;
+ bool clean_opt = FALSE;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Please specify gtm, gtm_master, coordinator or datanode after add command.\n");
+ return;
+ }
+ if (TestToken("gtm"))
+ {
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: Specify option to remove gtm command\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_gtmMaster(clean_opt);
+ }
+ else if (TestToken("slave"))
+ {
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_gtmSlave(clean_opt);
+ }
+ else
+ {
+ elog(ERROR, "ERROR: you can specify only master/slave to remove gtm command. %s is invalid.\n", token);
+ return;
+ }
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ GetAndSet(name, "ERROR: please specify gtm proxy name to remove.\n");
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_gtmProxy(name, clean_opt );
+ freeAndReset(name);
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please specify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator master\n");
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_coordinatorMaster(name, clean_opt);
+ freeAndReset(name);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the coordinator slave\n");
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_coordinatorSlave(name, clean_opt);
+ freeAndReset(name);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || (!TestToken("master") && !TestToken("slave")))
+ {
+ elog(ERROR, "ERROR: please specify master or slave.\n");
+ return;
+ }
+ if (TestToken("master"))
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode master\n");
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_datanodeMaster(name, clean_opt);
+ freeAndReset(name);
+ }
+ else
+ {
+ GetAndSet(name, "ERROR: please specify the name of the datanode slave\n");
+ if (GetToken() && TestToken("clean"))
+ clean_opt = TRUE;
+ remove_datanodeSlave(name, clean_opt);
+ freeAndReset(name);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid argument %s to add command.\n", token);
+ return;
+}
+
+static char *m_Option;
+
+static char *handle_m_option(char *line, char **m_option)
+{
+ char *token;
+
+ freeAndReset(m_Option);
+ if (GetToken() == NULL)
+ return(line);
+ else if (TestToken("immediate"))
+ m_Option = Strdup("immediate");
+ else if (TestToken("fast"))
+ m_Option = Strdup("fast");
+ else if (TestToken("smart"))
+ m_Option = Strdup("smart");
+ else
+ elog(ERROR, "ERROR: specify smart, fast or immediate for -m option value.\n");
+ return(line);
+}
+
+
+
+static void do_stop_command(char *line)
+{
+ char *token;
+
+ freeAndReset(m_Option);
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify option to stop command.\n");
+ else if (testToken("-m"))
+ {
+ line = handle_m_option(line, &m_Option);
+ GetToken();
+ }
+ if (TestToken("all"))
+ {
+ if (GetToken() && TestToken("-m"))
+ handle_m_option(line, &m_Option);
+ stop_all(m_Option);
+ }
+ else if (TestToken("gtm"))
+ {
+ if (m_Option)
+ elog(WARNING, "-m option is not available with gtm. Ignoring.\n");
+ if (!GetToken() || (TestToken("all")))
+ {
+ if (!is_none(sval(VAR_gtmMasterServer)))
+ stop_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+ }
+ else if (TestToken("master") && !is_none(sval(VAR_gtmMasterServer)))
+ stop_gtm_master();
+ else if (TestToken("slave") && isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+ else
+ elog(ERROR, "ERROR: please specify master, slave or all for stop gtm command.\n");
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (m_Option)
+ elog(WARNING, "-m option is not available with gtm_prxy. Ignoring.\n");
+ if (!GetToken() || TestToken("all"))
+ stop_gtm_proxy_all();
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_gtm_proxy(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ stop_coordinator_master_all(m_Option);
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave_all(m_Option);
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ stop_coordinator_master_all(m_Option);
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_master(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ stop_coordinator_slave_all(m_Option);
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_master(nodeList, m_Option);
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ stop_datanode_master_all(m_Option);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave_all(m_Option);
+ }
+ else if (TestToken("master"))
+ if (!GetToken() || TestToken("all"))
+ stop_datanode_master_all(m_Option);
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_master(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else if (TestToken("slave"))
+ if (!GetToken() || TestToken("all"))
+ stop_datanode_slave_all(m_Option);
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_slave(nodeList, m_Option);
+ clean_array(nodeList);
+ }
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_master(nodeList, m_Option);
+ if (isVarYes(VAR_datanodeSlave))
+ stop_datanode_slave(nodeList, m_Option);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: invalid option for stop command.\n");
+ return;
+}
+
+/*
+ * Test staff
+ */
+static void do_test(char *line)
+{
+ char *token;
+ int logLevel;
+ int printLevel;
+
+ logLevel = setLogMsgLevel(DEBUG3);
+ printLevel = setPrintMsgLevel(DEBUG3);
+
+ GetToken();
+ if (TestToken("ssh"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ GetToken();
+ cmdList = initCmdList();
+ cmd = Malloc0(sizeof(cmd_t));
+ cmd->host = Strdup(token);
+ cmd->command = Strdup(line);
+ cmd->localStdin = NULL;
+ addCmd(cmdList, cmd);
+ elog(INFO, "INFO: Testing ssh %s \"%s\"\n", token, line);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("ssh-stdin"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ cmd = Malloc0(sizeof(cmd_t));
+ GetToken();
+ cmd->host = Strdup(token);
+ GetToken();
+ cmd->localStdin = Strdup(token);
+ cmd->command = Strdup(line);
+ addCmd(cmdList, cmd);
+ elog(INFO, "Testing ssh %s \"%s\" < %s\n", cmd->host, cmd->command, cmd->localStdin);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("local"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ addCmd(cmdList, (cmd = initCmd(NULL)));
+ cmd->command = Strdup(line);
+ elog(INFO, "Testing local, \"%s\"\n", cmd->command);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ else if (TestToken("local-stdin"))
+ {
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ cmdList = initCmdList();
+ addCmd(cmdList, (cmd = initCmd(NULL)));
+ GetToken();
+ cmd->localStdin = Strdup(token);
+ cmd->command = Strdup(line);
+ elog(INFO, "Testing local-stdin, \"%s\"\n", cmd->command);
+ doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ }
+ setLogMsgLevel(logLevel);
+ setPrintMsgLevel(printLevel);
+}
+
+
+/* ==================================================================
+ *
+ * Staff specified by "node name", not node type
+ *
+ * ==================================================================
+ */
+static void kill_something(char *nodeName)
+{
+ char *nodeList[2];
+
+ nodeList[1] = NULL;
+ switch(getNodeType(nodeName))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: Could not find name \"%s\" in any node type.\n", nodeName);
+ return;
+ case NodeType_GTM:
+ elog(ERROR, "ERROR: Issue kill gtm command to kill gtm master/slave\n");
+ return;
+ case NodeType_GTM_PROXY:
+ nodeList[0] = nodeName;
+ kill_gtm_proxy(nodeList);
+ return;
+ case NodeType_COORDINATOR:
+ nodeList[0] = nodeName;
+ kill_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ kill_coordinator_slave(nodeList);
+ return;
+ case NodeType_DATANODE:
+ nodeList[0] = nodeName;
+ kill_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ kill_datanode_slave(nodeList);
+ return;
+ default:
+ elog(ERROR, "ERROR: internal error. Should not come here!\n");
+ return;
+ }
+}
+
+static void show_config_something_multi(char **nodeList)
+{
+ int ii;
+
+ for (ii = 0; nodeList[ii]; ii++)
+ show_config_something(nodeList[ii]);
+}
+
+static void show_config_something(char *nodeName)
+{
+ int idx;
+
+ switch(getNodeType(nodeName))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: Could not find name \"%s\" in any node type.\n", nodeName);
+ return;
+ case NodeType_GTM:
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ return;
+ case NodeType_GTM_PROXY:
+ idx = gtmProxyIdx(nodeName);
+ show_config_gtmProxy(TRUE, idx, aval(VAR_gtmProxyServers)[idx]);
+ return;
+ case NodeType_COORDINATOR:
+ idx = coordIdx(nodeName);
+ show_config_coordMaster(TRUE, idx, aval(VAR_coordMasterServers)[idx]);
+ if (isVarYes(VAR_coordSlave))
+ show_config_coordSlave(TRUE, idx, aval(VAR_coordSlaveServers)[idx]);
+ return;
+ case NodeType_DATANODE:
+ idx = datanodeIdx(nodeName);
+ show_config_datanodeMaster(TRUE, idx, aval(VAR_datanodeMasterServers)[idx]);
+ if (isVarYes(VAR_datanodeSlave))
+ show_config_datanodeSlave(TRUE, idx, aval(VAR_datanodeSlaveServers)[idx]);
+ return;
+ case NodeType_SERVER:
+ {
+ char *hostList[2];
+ hostList[0] = nodeName;
+ hostList[1] = NULL;
+ show_config_servers(hostList);
+ return;
+ }
+ default:
+ elog(ERROR, "ERROR: internal error. Should not come here!\n");
+ return;
+ }
+}
+
+
+
+/* ========================================================================================
+ *
+ * Configuration staff
+ *
+ * ========================================================================================
+ */
+static void show_config_servers(char **hostList)
+{
+ int ii;
+ for (ii = 0; hostList[ii]; ii++)
+ if (!is_none(hostList[ii]))
+ show_config_host(hostList[ii]);
+ return;
+}
+
+/*
+ * show {config|configuration} [all | name .... | gtm [master|slave|all] | gtm_proxy [all | name ...] |
+ * coordinator [all | master | slave | name ... ] |
+ * host name .... ]
+ * With no option, will print common configuartion parameters and exit.
+ *
+ */
+static void show_basicConfig(void)
+{
+ elog(NOTICE, "========= Postgres-XL configuration Common Info ========================\n");
+ elog(NOTICE, "=== Overall ===\n");
+ elog(NOTICE, "Postgres-XL owner: %s\n", sval(VAR_pgxcOwner));
+ elog(NOTICE, "Postgres-XL user: %s\n", sval(VAR_pgxcUser));
+ elog(NOTICE, "Postgres-XL install directory: %s\n", sval(VAR_pgxcInstallDir));
+ elog(NOTICE, "pgxc_ctl home: %s\n", pgxc_ctl_home);
+ elog(NOTICE, "pgxc_ctl configuration file: %s\n", pgxc_ctl_config_path);
+ elog(NOTICE, "pgxc_ctl tmpDir: %s\n", sval(VAR_tmpDir));
+ elog(NOTICE, "pgxc_ctl localTempDir: %s\n", sval(VAR_localTmpDir));
+ elog(NOTICE, "pgxc_ctl log file: %s\n", logFileName);
+ elog(NOTICE, "pgxc_ctl configBackup: %s\n", isVarYes(VAR_configBackup) ? "y" : "n");
+ elog(NOTICE, "pgxc_ctl configBackupHost: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupHost) : "none");
+ elog(NOTICE, "pgxc_ctl configBackupFile: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupFile) : "none");
+ elog(NOTICE, "========= Postgres-XL configuration End Common Info ===================\n");
+}
+
+
+static void show_configuration(char *line)
+{
+ char *token;
+
+ GetToken();
+ if (line == NULL)
+ elog(ERROR, "ERROR: No configuration option is specified. Retruning.\n");
+ else if (TestToken("basic"))
+ show_basicConfig();
+ else if (TestToken("all"))
+ {
+ show_basicConfig();
+ show_config_servers(aval(VAR_allServers));
+ }
+ else if (TestToken("basic"))
+ {
+ show_basicConfig();
+ }
+ else if (TestToken("host"))
+ {
+ char **hostList = Malloc0(sizeof(char *));
+ do {
+ AddMember(hostList, token);
+ } while(GetToken());
+ if (hostList[0])
+ show_config_servers(hostList);
+ clean_array(hostList);
+ }
+ else if (TestToken("gtm"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ {
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ }
+ else if (TestToken("master"))
+ show_config_gtmMaster(TRUE, sval(VAR_gtmMasterServer));
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ show_config_gtmSlave(TRUE, sval(VAR_gtmSlaveServer));
+ else
+ elog(NOTICE, "NOTICE: gtm slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: invalid option %s for 'show config gtm' command.\n", token);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: gtm proxies are not configured.\n");
+ }
+ else if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_gtmProxies(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do{
+ int idx;
+ idx = gtmProxyIdx(token);
+ if (idx < 0)
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy.\n", token);
+ else
+ AddMember(nodeList, token);
+ } while(GetToken());
+ show_config_gtmProxies(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_coordMasterSlaveMulti(aval(VAR_coordNames));
+ else if (TestToken("master"))
+ {
+ if (GetToken() == NULL)
+ show_config_coordMasterMulti(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_coordMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: Coordinator slave is not configured.\n");
+ else if (GetToken() == NULL)
+ show_config_coordMasterMulti(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_coordMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Invalid option %s for 'show config coordinator' command.\n", token);
+ }
+ else if (TestToken("datanode"))
+ {
+ if ((GetToken() == NULL) || (TestToken("all")))
+ show_config_datanodeMasterSlaveMulti(aval(VAR_datanodeNames));
+ else if (TestToken("master"))
+ {
+ if (GetToken() == NULL)
+ show_config_datanodeMasterMulti(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_datanodeMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_datanodeSlave))
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ else if (GetToken() == NULL)
+ show_config_datanodeMasterMulti(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = Malloc0(sizeof(char *));
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_datanodeMasterMulti(nodeList);
+ clean_array(nodeList);
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Invalid option %s for 'show config datanode' command.\n", token);
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ show_config_something_multi(nodeList);
+ clean_array(nodeList);
+ }
+ return;
+}
+
+void print_simple_node_info(char *nodeName, char *port, char *dir,
+ char *extraConfig, char *specificExtraConfig)
+{
+ elog(NOTICE,
+ " Nodename: '%s', port: %s, dir: '%s'"
+ " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
+ nodeName, port, dir, extraConfig, specificExtraConfig);
+
+}
+
+
+static void show_config_host(char *hostname)
+{
+ int ii;
+
+ lockLogFile();
+ elog(NOTICE, "====== Server: %s =======\n", hostname);
+ /* GTM Master */
+ if (strcmp(hostname, sval(VAR_gtmMasterServer)) == 0)
+ show_config_gtmMaster(TRUE, NULL);
+ /* GTM Slave */
+ if (isVarYes(VAR_gtmSlave) && (strcmp(sval(VAR_gtmSlaveServer), hostname) == 0))
+ show_config_gtmSlave(TRUE, NULL);
+ /* GTM Proxy */
+ if (isVarYes(VAR_gtmProxy))
+ for (ii = 0; aval(VAR_gtmProxyServers)[ii]; ii++)
+ if (strcmp(aval(VAR_gtmProxyServers)[ii], hostname) == 0)
+ show_config_gtmProxy(TRUE, ii, NULL);
+ /* Coordinator Master */
+ for (ii = 0; aval(VAR_coordMasterServers)[ii]; ii++)
+ if (strcmp(aval(VAR_coordMasterServers)[ii], hostname) == 0)
+ show_config_coordMaster(TRUE, ii, NULL);
+ /* Coordinator Slave */
+ if (isVarYes(VAR_coordSlave))
+ for (ii = 0; aval(VAR_coordSlaveServers)[ii]; ii++)
+ if (strcmp(aval(VAR_coordSlaveServers)[ii], hostname) == 0)
+ show_config_coordSlave(TRUE, ii, NULL);
+ /* Datanode Master */
+ for (ii = 0; aval(VAR_datanodeMasterServers)[ii]; ii++)
+ if (strcmp(aval(VAR_datanodeMasterServers)[ii], hostname) == 0)
+ show_config_datanodeMaster(TRUE, ii, NULL);
+ /* Datanode Slave */
+ if (isVarYes(VAR_datanodeSlave))
+ for (ii = 0; aval(VAR_datanodeSlaveServers)[ii]; ii++)
+ if (strcmp(aval(VAR_datanodeSlaveServers)[ii], hostname) == 0)
+ show_config_datanodeSlave(TRUE, ii, NULL);
+ unlockLogFile();
+}
+
+void show_config_hostList(char **hostList)
+{
+ int ii;
+ for (ii = 0; hostList[ii]; ii++)
+ show_config_host(hostList[ii]);
+}
+/*
+ * Clean command
+ *
+ * clean {all |
+ * gtm [ all | master | slave ] |
+ * gtm_proxy [ all | nodename ... ]
+ * coordinator [[all | master | slave ] [nodename ... ]] |
+ * datanode [ [all | master | slave] [nodename ... ]}
+ */
+static void do_clean_command(char *line)
+{
+ char *token;
+ cmdList_t *cmdList = NULL;
+
+ GetToken();
+ if (token == NULL)
+ {
+ elog(ERROR, "ERROR: Please specify options for clean command.\n");
+ return;
+ }
+ if (TestToken("all"))
+ {
+ elog(INFO, "Stopping all components before cleaning\n");
+ stop_all("immediate");
+
+ elog(INFO, "Cleaning all the directories and sockets.\n");
+ if (!is_none(sval(VAR_gtmMasterServer)))
+ clean_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ clean_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ clean_gtm_proxy_all();
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave_all();
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ clean_datanode_slave_all();
+ }
+ else if (TestToken("gtm"))
+ {
+ GetToken();
+ if ((token == NULL) || TestToken("all"))
+ {
+ elog(INFO, "Stopping and cleaning GTM slave/master \n");
+ if (!is_none(sval(VAR_gtmMasterServer)))
+ stop_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ stop_gtm_slave();
+
+ if (!is_none(sval(VAR_gtmMasterServer)))
+ clean_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ clean_gtm_slave();
+ }
+ else if (TestToken("master") && !is_none(sval(VAR_gtmMasterServer)))
+ {
+ stop_gtm_master();
+ clean_gtm_master();
+ }
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ {
+ stop_gtm_slave();
+ clean_gtm_slave();
+ }
+ else
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ }
+ else
+ elog(ERROR, "ERROR: invalid clean command option %s.\n", token);
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ elog(INFO, "Stopping and cleaning specified gtm_proxy.\n");
+ GetToken();
+ if (!isVarYes(VAR_gtmProxy))
+ elog(ERROR, "ERROR: gtm proxy is not configured.\n");
+ else if ((token == NULL) || TestToken("all"))
+ {
+ stop_gtm_proxy_all();
+ clean_gtm_proxy_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+
+ stop_gtm_proxy(nodeList);
+ clean_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("coordinator"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Stopping and cleaning coordinator master and slave.\n");
+ stop_coordinator_master_all("immediate");
+ if (isVarYes(VAR_coordSlave))
+ stop_coordinator_slave_all("immediate");
+
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ clean_coordinator_slave_all();
+ }
+ else if (TestToken("all"))
+ {
+ elog(INFO, "Stopping and cleaning coordinator master and slave.\n");
+ GetToken();
+ if (token == NULL)
+ {
+ stop_coordinator_master_all("immediate");
+ clean_coordinator_master_all();
+ if (isVarYes(VAR_coordSlave))
+ {
+ stop_coordinator_slave_all("immediate");
+ clean_coordinator_slave_all();
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_coordinator_master(nodeList, "immediate");
+ clean_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ {
+ stop_coordinator_slave(nodeList,"immediate");
+ clean_coordinator_slave(nodeList);
+ }
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("master"))
+ {
+ elog(INFO, "Stopping and cleaning specified coordinator master.\n");
+ GetToken();
+ if (token == NULL)
+ {
+ stop_coordinator_master_all("immediate");
+ clean_coordinator_master_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_coordinator_master(nodeList, "immediate");
+ clean_coordinator_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ elog(INFO, "Stopping and cleaning specified coordinator slave.\n");
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: Coordinator slave is not configured.\n");
+ return;
+ }
+ GetToken();
+ if (token == NULL)
+ {
+ stop_coordinator_slave_all("immediate");
+ clean_coordinator_slave_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_coordinator_slave(nodeList, "immediate");
+ clean_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Stopping and cleaning specified coordinator.\n");
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_coordinator_master(nodeList, "immediate");
+ clean_coordinator_master(nodeList);
+ if (isVarYes(VAR_coordSlave))
+ {
+ stop_coordinator_slave(nodeList, "immediate");
+ clean_coordinator_slave(nodeList);
+ }
+ CleanArray(nodeList);
+ }
+ }
+ else if(TestToken("datanode"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Stopping and cleaning all the datanodes.\n");
+ stop_datanode_master_all("immediate");
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ stop_datanode_slave_all("immediate");
+ clean_datanode_slave_all();
+ }
+ }
+ else if (TestToken("all"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Stopping and cleaning all the datanodes.\n");
+ stop_datanode_master_all("immediate");
+ clean_datanode_master_all();
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ stop_datanode_slave_all("immediate");
+ clean_datanode_slave_all();
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Stopping and cleaning specified datanodes\n");
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ stop_datanode_master(nodeList, "immediate");
+ clean_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ stop_datanode_slave(nodeList, "immediate");
+ clean_datanode_slave(nodeList);
+ }
+ }
+ }
+ else if (TestToken("master"))
+ {
+ GetToken();
+ if (token == NULL)
+ {
+ elog(INFO, "Stopping and cleaning all the datanode masters.\n");
+ stop_datanode_master_all("immediate");
+ clean_datanode_master_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ elog(INFO, "Stopping and cleaning specified datanode masters.\n");
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_datanode_master(nodeList, "immediate");
+ clean_datanode_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ elog(INFO, "Stopping and cleaning specified datanode slaves.\n");
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: Datanode slave is not configured.\n");
+ return;
+ }
+ GetToken();
+ if (token == NULL)
+ {
+ stop_datanode_slave_all("immediate");
+ clean_datanode_slave_all();
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_datanode_slave(nodeList, "immediate");
+ clean_datanode_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ stop_datanode_master(nodeList, "immediate");
+ clean_datanode_master(nodeList);
+ if (isVarYes(VAR_datanodeSlave))
+ {
+ stop_datanode_slave(nodeList, "immediate");
+ clean_datanode_slave(nodeList);
+ }
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ elog(INFO, "Stopping and cleaning specifieid nodes.\n");
+ do
+ {
+ switch(getNodeType(token))
+ {
+ case NodeType_UNDEF:
+ elog(ERROR, "ERROR: %s is not found, skipping\n", token);
+ continue;
+ case NodeType_GTM:
+ elog(INFO, "Stopping and cleaning GTM master.\n");
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopGtmMaster());
+ addCmd(cmdList, prepare_cleanGtmMaster());
+ if (isVarYes(VAR_gtmSlave))
+ {
+ elog(INFO, "Stopping and cleaning GTM slave.\n");
+ addCmd(cmdList, prepare_stopGtmSlave());
+ addCmd(cmdList, prepare_cleanGtmSlave());
+ }
+ continue;
+ case NodeType_GTM_PROXY:
+ elog(INFO, "Stopping and cleaning GTM proxy %s.\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopGtmProxy(token));
+ addCmd(cmdList, prepare_cleanGtmProxy(token));
+ continue;
+ case NodeType_COORDINATOR:
+ elog(INFO, "Stopping and cleaning coordinator master %s\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopCoordinatorMaster(token, "immediate"));
+ addCmd(cmdList, prepare_cleanCoordinatorMaster(token));
+ if (isVarYes(VAR_coordSlave))
+ {
+ elog(INFO, "Stopping and cleaning coordinator slave %s\n", token);
+ addCmd(cmdList, prepare_stopCoordinatorSlave(token, "immediate"));
+ addCmd(cmdList, prepare_cleanCoordinatorSlave(token));
+ }
+ continue;
+ case NodeType_DATANODE:
+ elog(INFO, "Stopping and cleaning datanode master %s\n", token);
+ if (cmdList == NULL)
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopDatanodeMaster(token, "immediate"));
+ addCmd(cmdList, prepare_cleanDatanodeMaster(token));
+ if (isVarYes(VAR_coordSlave))
+ {
+ elog(INFO, "Stopping and cleaning datanode slave %s\n", token);
+ addCmd(cmdList, prepare_stopDatanodeSlave(token, "immediate"));
+ addCmd(cmdList, prepare_cleanDatanodeSlave(token));
+ }
+ continue;
+ case NodeType_SERVER:
+ elog(ERROR, "ERROR: clearing host is not supported yet. Skipping\n");
+ continue;
+ default:
+ elog(ERROR, "ERROR: internal error.\n");
+ continue;
+ }
+ } while(GetToken());
+ if (cmdList)
+ {
+ int rc;
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ }
+ return;
+ }
+}
+
+static void do_configure_command(char *line)
+{
+ char *token;
+ char **nodeList = NULL;
+
+ if (!GetToken() || TestToken("all"))
+ {
+ configure_nodes_all();
+ }
+ else
+ {
+ bool is_datanode;
+
+ if (TestToken("datanode"))
+ is_datanode = true;
+ else if (TestToken("coordinator"))
+ is_datanode = false;
+ else
+ {
+ elog(ERROR, "ERROR: must specify either coordinator or datanode\n");
+ return;
+ }
+
+ while (GetToken())
+ AddMember(nodeList, token);
+
+ if (is_datanode)
+ configure_datanodes(nodeList);
+ else
+ configure_nodes(nodeList);
+
+ CleanArray(nodeList);
+ }
+}
+
+static int selectCoordinator(void)
+{
+ int sz = arraySizeName(VAR_coordNames);
+ int i;
+
+ for (;;)
+ {
+ i = rand() % sz;
+ if (is_none(aval(VAR_coordMasterServers)[i]))
+ continue;
+ else
+ return i;
+ }
+ return -1;
+}
+
+
+static int show_Resource(char *datanodeName, char *databasename, char *username)
+{
+ int cdIdx = selectCoordinator();
+ int dnIdx = datanodeIdx(datanodeName);
+ FILE *f;
+ char queryFname[MAXPATH+1];
+
+ elog(NOTICE, "NOTICE: showing tables in the datanode '%s', database %s, user %s\n",
+ datanodeName,
+ databasename ? databasename : "NULL",
+ username ? username : "NULL");
+ if (dnIdx < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode.\n", datanodeName);
+ return 1;
+ }
+ createLocalFileName(GENERAL, queryFname, MAXPATH);
+ if ((f = fopen(queryFname, "w")) == NULL)
+ {
+ elog(ERROR, "ERROR: Could not create temporary file %s, %s\n", queryFname, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "SELECT pg_class.relname relation,\n"
+ " CASE\n"
+ " WHEN pclocatortype = 'H' THEN 'Hash'\n"
+ " WHEN pclocatortype = 'M' THEN 'Modulo'\n"
+ " WHEN pclocatortype = 'N' THEN 'Round Robin'\n"
+ " WHEN pclocatortype = 'R' THEN 'Replicate'\n"
+ " ELSE 'Unknown'\n"
+ " END AS distribution,\n"
+ " pg_attribute.attname attname,\n"
+ " pgxc_node.node_name nodename\n"
+ " FROM pg_class, pgxc_class, pg_attribute, pgxc_node\n"
+ " WHERE pg_class.oid = pgxc_class.pcrelid\n"
+ " and pg_class.oid = pg_attribute.attrelid\n"
+ " and pgxc_class.pcattnum = pg_attribute.attnum\n"
+ " and pgxc_node.node_name = '%s'\n"
+ " and pgxc_node.oid = ANY (pgxc_class.nodeoids)\n"
+ " UNION\n"
+ " SELECT pg_class.relname relation,\n"
+ " CASE\n"
+ " WHEN pclocatortype = 'H' THEN 'Hash'\n"
+ " WHEN pclocatortype = 'M' THEN 'Modulo'\n"
+ " WHEN pclocatortype = 'N' THEN 'Round Robin'\n"
+ " WHEN pclocatortype = 'R' THEN 'Replicate'\n"
+ " ELSE 'Unknown'\n"
+ " END AS distribution,\n"
+ " '- none -' attname,\n"
+ " pgxc_node.node_name nodename\n"
+ " FROM pg_class, pgxc_class, pg_attribute, pgxc_node\n"
+ " WHERE pg_class.oid = pgxc_class.pcrelid\n"
+ " and pg_class.oid = pg_attribute.attrelid\n"
+ " and pgxc_class.pcattnum = 0\n"
+ " and pgxc_node.node_name = '%s'\n"
+ " and pgxc_node.oid = ANY (pgxc_class.nodeoids)\n"
+ " ;\n",
+ datanodeName, datanodeName);
+ fclose(f);
+ if (databasename == NULL)
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname);
+ else if (username == NULL)
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s -d %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname, databasename);
+ else
+ doImmediateRaw("psql -p %d -h %s --quiet -f %s -d %s -U %s",
+ atoi(aval(VAR_coordPorts)[cdIdx]), aval(VAR_coordMasterServers)[cdIdx],
+ queryFname, databasename, username);
+ doImmediateRaw("rm -f %s", queryFname);
+ return 0;
+}
+
+/*
+ * =======================================================================================
+ *
+ * Loop of main command processor
+ *
+ * ======================================================================================
+ */
+void do_command(FILE *inf, FILE *outf)
+{
+ int istty = ((inf == stdin) && isatty(fileno(stdin)));
+ int interactive = ((inf == stdin) && (outf == stdout));
+ char *wkline = NULL;
+ char buf[MAXLINE+1];
+ int rc;
+ char histfile[MAXPATH + 20];
+
+#define HISTFILE ".pgxc_ctl_history"
+
+ histfile[0] = '\0';
+ if (pgxc_ctl_home[0] != '\0')
+ {
+ snprintf(histfile, MAXPATH + 20, "%s/%s", pgxc_ctl_home, HISTFILE);
+ read_history(histfile);
+ }
+
+ /*
+ * Set the long jump path so that we can come out straight here in case of
+ * an error. There is not much to reinitialize except may be freeing up the
+ * wkline buffer and resetting the long jump buffer pointer. But if
+ * anything else needs to reset, that should happen in the following block
+ */
+ if (setjmp(dcJmpBufMainLoop) != 0)
+ {
+ whereToJumpMainLoop = NULL;
+ if (wkline)
+ freeAndReset(wkline);
+ }
+
+ for (;;)
+ {
+ if (wkline)
+ free(wkline);
+ if (istty)
+ {
+ wkline = readline(sval(VAR_xc_prompt));
+ if (wkline == NULL)
+ {
+ wkline = Strdup("q\n");
+ putchar('\n');
+ }
+ else if (wkline[0] != '\0')
+ add_history(wkline);
+ strncpy(buf, wkline, MAXLINE);
+ }
+ else
+ {
+ if (interactive)
+ fputs(sval(VAR_xc_prompt), stdout);
+ if (fgets(buf, MAXLINE+1, inf) == NULL)
+ break;
+ }
+ trimNl(buf);
+ writeLogOnly("PGXC %s\n", buf);
+
+ whereToJumpMainLoop = &dcJmpBufMainLoop;
+ rc = do_singleLine(buf, wkline);
+ whereToJumpMainLoop = NULL;
+
+ freeAndReset(wkline);
+ if (rc) /* "q" command was found */
+ {
+ if (histfile[0] != '\0')
+ write_history(histfile);
+ return;
+ }
+ }
+}
+
+
+
+/*
+ * ---------------------------------------------------------------------------
+ *
+ * Single line command processor
+ *
+ * -----------------------------------------------------------------------------
+ */
+int do_singleLine(char *buf, char *wkline)
+{
+ char *token;
+ char *line = buf;
+ GetToken();
+ /*
+ * Parsecommand
+ */
+ if (!token) return 0;
+ if (TestToken("q") || TestToken("quit") || TestToken("exit"))
+ /* Exit command */
+ return 1;
+ else if (TestToken("echo"))
+ {
+ do_echo_command(line);
+ return 0;
+ }
+ else if (TestToken("deploy"))
+ {
+ do_deploy(line);
+ return 0;
+ }
+ else if (TestToken("prepare"))
+ {
+ char *config_path = NULL;
+ ConfigType config_type = CONFIG_COMPLETE;
+
+ if (GetToken() != NULL)
+ {
+ if (TestToken("config"))
+ GetToken();
+
+ if (TestToken("empty"))
+ config_type = CONFIG_EMPTY;
+ else if (TestToken("minimal"))
+ config_type = CONFIG_MINIMAL;
+ else if (TestToken("complete"))
+ config_type = CONFIG_COMPLETE;
+ else if (token)
+ config_path = strdup(token);
+
+ if (GetToken() != NULL)
+ config_path = strdup(token);
+ }
+
+ do_prepareConfFile(config_path, config_type);
+ return 0;
+ }
+ else if (TestToken("kill"))
+ {
+ do_kill_command(line);
+ return 0;
+ }
+ else if (TestToken("init"))
+ {
+ do_init_command(line);
+ return 0;
+ }
+ else if (TestToken("start"))
+ {
+ do_start_command(line);
+ return 0;
+ }
+ else if (TestToken("stop"))
+ {
+ do_stop_command(line);
+ return 0;
+ }
+ else if (TestToken("monitor"))
+ {
+ do_monitor_command(line);
+ return 0;
+ }
+ else if (TestToken("failover"))
+ {
+ do_failover_command(line);
+ return 0;
+ }
+ else if (TestToken("reconnect"))
+ {
+ do_reconnect_command(line);
+ return 0;
+ }
+ else if (TestToken("add"))
+ {
+ do_add_command(line);
+ return 0;
+ }
+ else if (TestToken("remove"))
+ {
+ do_remove_command(line);
+ return 0;
+ }
+ /*
+ * Show commnand ... show [variable | var] varname ...
+ * show [variable | var] all
+ * show config[uration] ....
+ */
+ else if (TestToken("show"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify what to show\n");
+ else
+ {
+ if (TestToken("variable") || TestToken("var"))
+ {
+ /* Variable */
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify variable name to print\n");
+ else if (TestToken("all"))
+ print_vars();
+ else while (line)
+ {
+ print_var(token);
+ GetToken();
+ }
+ }
+ else if (TestToken("configuration") || TestToken("config") || TestToken("configure"))
+ /* Configuration */
+ show_configuration(line);
+ else if (TestToken("resource"))
+ {
+ if ((GetToken() == NULL) || !TestToken("datanode"))
+ elog(ERROR, "ERROR: please specify datanode for show resource command.\n");
+ else
+ {
+ char *datanodeName = NULL;
+ char *dbname = NULL;
+ char *username = NULL;
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: please specify datanode name\n");
+ else
+ {
+ datanodeName = Strdup(token);
+ if (GetToken())
+ {
+ dbname = Strdup(token);
+ if (GetToken())
+ username = Strdup(token);
+ }
+ show_Resource(datanodeName, dbname, username);
+ Free(datanodeName);
+ Free(dbname);
+ Free(username);
+ }
+ }
+ }
+ else
+ elog(ERROR, "ERROR: Cannot show %s now, sorry.\n", token);
+ }
+ return 0;
+ }
+ /*
+ * Log command log variable varname ...
+ * log variable all
+ * log msg artitrary_message_to_the_end_of_the_line
+ */
+ else if (TestToken("log"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify what to log\n");
+ else
+ {
+ if (TestToken("variable") || TestToken("var"))
+ {
+ if (GetToken() == NULL)
+ elog(ERROR, "ERROR: Please specify variable name to log\n");
+ else if (TestToken("all"))
+ print_vars();
+ else while (line)
+ {
+ print_var(token);
+ GetToken();
+ }
+ fflush(logFile);
+ }
+ else if (TestToken("msg") || TestToken("message"))
+ writeLogOnly("USERLOG: \"%s\"\n", line);
+ else
+ elog(ERROR, "ERROR: Cannot log %s in this version.\n", token);
+ }
+ return 0;
+ }
+ else if (TestToken("deploy"))
+ {
+ do_deploy(line);
+ return 0;
+ }
+ else if (TestToken("configure"))
+ {
+ do_configure_command(line);
+ return 0;
+ }
+ else if (testToken("Psql"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("psql -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("psql -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (testToken("Createdb"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("createdb -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("createdb -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (testToken("Createuser"))
+ {
+ int idx;
+ char *cmdLine;
+
+ cmdLine = Strdup(line);
+ if (GetToken() && TestToken("-"))
+ {
+ if (!GetToken())
+ elog(ERROR, "ERROR: Please specify coordinator name after '-'.\n");
+ else if ((idx = coordIdx(token)) < 0)
+ elog(ERROR, "ERROR: Specified node %s is not a coordinator.\n", token);
+ else
+ doImmediateRaw("createuser -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ line);
+ }
+ else
+ {
+ idx = selectCoordinator();
+ elog(INFO, "Selected %s.\n", aval(VAR_coordNames)[idx]);
+ doImmediateRaw("createuser -p %d -h %s %s",
+ atoi(aval(VAR_coordPorts)[idx]),
+ aval(VAR_coordMasterServers)[idx],
+ cmdLine);
+ }
+ Free(cmdLine);
+ return 0;
+ }
+ else if (TestToken("test"))
+ {
+ do_test(line);
+ return 0;
+ }
+ else if (TestToken("set"))
+ {
+ do_set(line);
+ return 0;
+ }
+ /*
+ * Clean command
+ *
+ * clean [all |
+ * gtm [ all | master | slave ] |
+ * gtm_proxy [ all | nodename ... ]
+ * coordinator [[all | master | slave ] [nodename ... ]] |
+ * datanode [ [all | master | slave] [nodename ... ]
+ */
+ else if (TestToken("clean"))
+ {
+ do_clean_command(line);
+ }
+ else if (TestToken("cd"))
+ {
+ /*
+ * CD command
+ */
+ if (GetToken() == NULL)
+ Chdir(pgxc_ctl_home, FALSE);
+ else
+ Chdir(token, FALSE);
+ return 0;
+ }
+ else if (TestToken("ssh"))
+ {
+ doImmediateRaw("%s", wkline);
+ }
+ else if (TestToken("help"))
+ {
+ do_show_help(line);
+ }
+ else
+ {
+ doImmediateRaw("%s", wkline);
+ return 0;
+ }
+ return 0;
+}
+
+static void
+show_all_help()
+{
+ printf("You are using pgxc_ctl, the configuration utility for PGXL\n"
+ "Type:\n"
+ " help <command>\n"
+ " where <command> is either add, Createdb, Createuser, clean,\n"
+ " configure, deploy, failover, init, kill, log, monitor,\n"
+ " prepare, q, reconnect, remove, set, show, start, \n"
+ " stop or unregister\n");
+}
+
+static void
+do_show_help(char *line)
+{
+ char *token;
+
+ GetToken();
+ if ((token == NULL) || TestToken("all"))
+ {
+ show_all_help();
+ return;
+ }
+
+ if (TestToken("add"))
+ {
+ printf(
+ "\n"
+ "add gtm slave slave_name host port dir\n"
+ "add gtm_proxy name host port dir\n"
+ "add coordinator master name host port pooler dir extra_conf extra_pghba\n"
+ "add coordinator slave name host port pooler dir archDir\n"
+ "add datanode master name host port pooler dir xlogdir restore_datanode_name extra_conf extra_pghba\n"
+ "add datanode slave name host port pooler dir xlogdir archDir\n"
+ "\n"
+ "Add the specified node to your postgres-xl cluster:\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("Createdb"))
+ {
+ printf(
+ "\n"
+ "Createdb [ - coordinator ] createdb_option ...\n"
+ "\n"
+ "Invokes createdb utility to create a new database using specified coordinator\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("Createuser"))
+ {
+ printf(
+ "\n"
+ "Createuser[ - coordinator ] createuser_option ...\n"
+ "\n"
+ "Invokes createuser utility to create a new user using specified coordinator\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("clean"))
+ {
+ printf(
+ "\n"
+ "clean all\n"
+ "clean gtm [all | master | slave]\n"
+ "clean gtm_proxy [all | nodename ... ]\n"
+ "clean coordinator [[all | master | slave ] [nodename ... ]]\n"
+ "clean datanode [[all | master | slave ] [nodename ... ]]\n"
+ "\n"
+ "Stop specified node in immediate mode and clean all resources including data directory\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("configure"))
+ {
+ printf("\n"
+ "configure all\n"
+ "configure datanode nodename ...\n"
+ "configure coordinator nodename ...\n"
+ "\n"
+ "Configure specified node with the node information and reload pooler information\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("deploy"))
+ {
+ printf(
+ "\n"
+ "deploy [ all | host ... ]\n"
+ "\n"
+ "Deploys postgres-xl binaries and other installation material to specified hosts\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("failover"))
+ {
+ printf(
+ "\n"
+ "failover [ gtm | coordinator nodename | datanode nodename | nodename ]\n"
+ "\n"
+ "Failover specified node to its master\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("init"))
+ {
+ printf(
+ "\n"
+ "init [force] all\n"
+ "init [force] nodename ...\n"
+ "init [force] gtm [ master | slave | all ]\n"
+ "init [force] gtm_proxy [ all | nodename ... ]\n"
+ "init [force] coordinator nodename ...\n"
+ "init [force] coordinator [ master | slave ] [ all | nodename ... ]\n"
+ "init [force] datanode nodename ...\n"
+ "init [force] datanode [ master | slave ] [ all | nodename ... ]\n"
+ "\n"
+ "Initializes specified nodes.\n"
+ " [force] option removes existing data directories even if they are not empty\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+
+ }
+ else if (TestToken("kill"))
+ {
+ printf(
+ "\n"
+ "kill all\n"
+ "kill nodename ...\n"
+ "kill gtm [ master | slave | all ]\n"
+ "kill gtm_proxy [ all | nodename ... ]\n"
+ "kill coordinator nodename ...\n"
+ "kill coordinator [ master | slave ] [ all | nodename ... ]\n"
+ "kill datanode nodename ...\n"
+ "kill datanode [ master | slave ] [ all | nodename ... ]\n"
+ "\n"
+ "Kills specified node:\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+
+ }
+ else if (TestToken("log"))
+ {
+ printf(
+ "\n"
+ "log [ variable | var ] varname\n"
+ "log [ message | msg ] message_body\n"
+ "\n"
+ "Prints the specified contents to the log file\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("monitor"))
+ {
+ printf(
+ "\n"
+ "monitor all\n"
+ "monitor nodename ...\n"
+ "monitor gtm [ master | slave | all ]\n"
+ "monitor gtm_proxy [ all | nodename ... ]\n"
+ "monitor coordinator nodename ...\n"
+ "monitor coordinator [ master | slave ] [ all | nodename ... ]\n"
+ "monitor datanode nodename ...\n"
+ "monitor datanode [ master | slave ] [ all | nodename ... ]\n"
+ "\n"
+ "Monitors if specified nodes are running\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("prepare"))
+ {
+ printf(
+ "\n"
+ "prepare [ path ]\n"
+ "\n"
+ "Write pgxc_ctl configuration file template to the specified file\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("Psql"))
+ {
+ printf(
+ "\n"
+ "Psql [ - coordinator ] psql_option ... \n"
+ "\n"
+ "Invokes psql targetted to specified coordinator\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("q"))
+ {
+ printf(
+ "\n"
+ "q | quit | exit\n"
+ "\n"
+ "Exits pgxc_ctl\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("reconnect"))
+ {
+ printf(
+ "\n"
+ "reconnect gtm_proxy [ all | nodename ... ]\n"
+ "\n"
+ "Reconnects specified gtm_proxy to new gtm\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("remove"))
+ {
+ printf(
+ "\n"
+ "remove gtm slave\n"
+ "remove gtm_proxy nodename [ clean ]\n"
+ "remove coordinator [ master| slave ] nodename [ clean ]\n"
+ "remove datanode [ master| slave ] nodename [ clean ]\n"
+ "\n"
+ "Removes the specified node from the cluster\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("set"))
+ {
+ printf(
+ "\n"
+ "set varname value ...\n"
+ "\n"
+ "Set variable value\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("show"))
+ {
+ printf(
+ "\n"
+ "show [ configuration | configure | config ] [ all | basic ]\n"
+ "show [ configuration | configure | config ] host hostname ... \n"
+ "show [ configuration | configure | config ] gtm [ all | master | slave ]\n"
+ "show [ configuration | configure | config ] gtm_proxy [ all | gtm_proxy_name ... ]\n"
+ "show [ configuration | configure | config ] [ coordinator | datanode ] [ all | master | slave ] nodename ...\n"
+ "\n"
+ "Shows postgres-xl configuration\n"
+ "\n"
+ "show resource datanode datanodename [ databasename [ username ] ]\n"
+ "\n"
+ "Shows table names specified datanode is involved\n"
+ "\n"
+ "show [ variable | var ] [ all | varname ... ]\n"
+ "\n"
+ "Displays configuration or variable name and its value\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ );
+ }
+ else if (TestToken("start"))
+ {
+ printf(
+ "\n"
+ "start all\n"
+ "start nodename ...\n"
+ "start gtm [ master | slave | all ]\n"
+ "start gtm_proxy [ all | nodename ... ]\n"
+ "start coordinator nodename ...\n"
+ "start coordinator [ master | slave ] [ all | nodename ... ]\n"
+ "start datanode nodename ...\n"
+ "start datanode [ master | slave ] [ all | nodename ... ]\n"
+ "\n"
+ "Starts specified node\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("stop"))
+ {
+ printf(
+ "\n"
+ "stop [ -m smart | fast | immediate ] all\n"
+ "stop gtm [ master | slave | all ]\n"
+ "stop gtm_proxy [ all | nodename ... ]\n"
+ "stop [ -m smart | fast | immediate ] coordinator nodename ... \n"
+ "stop [ -m smart | fast | immediate ] coordinator [ master | slave ] [ all | nodename ... ] \n"
+ "stop [ -m smart | fast | immediate ] datanode nodename ... \n"
+ "stop [ -m smart | fast | immediate ] datanode [ master | slave ] [ all | nodename ... ] \n"
+ "\n"
+ "Stops specified node\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else if (TestToken("unregister"))
+ {
+ printf(
+ "\n"
+ "unregister unregister_option ...\n"
+ "\n"
+ "Unregisteres specified node from the gtm\n"
+ "For more details, please see the pgxc_ctl documentation\n"
+ "\n"
+ );
+ }
+ else
+ {
+ printf(
+ "\n"
+ "Unrecognized command: such commands are sent to shell for execution\n"
+ "\n"
+ );
+ }
+}
+
+int
+get_any_available_coord(int except)
+{
+ int ii;
+ for (ii = 0; aval(VAR_coordMasterServers)[ii]; ii++)
+ {
+ if (ii == except)
+ continue;
+
+ if (!is_none(aval(VAR_coordMasterServers)[ii]))
+ {
+ if (pingNode(aval(VAR_coordMasterServers)[ii],
+ aval(VAR_coordPorts)[ii]) == 0)
+ return ii;
+ }
+ }
+
+ /*
+ * this could be the first coordinator that is being added.
+ * This call would happen *after* expanding the array to
+ * accomodate the new coordinator. Hence we check for size
+ * being more than 1
+ */
+ if (arraySizeName(VAR_coordNames) > 1)
+ {
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_coordNames)[ii]))
+ {
+ elog(ERROR, "ERROR: failed to find any running coordinator");
+ return -1;
+ }
+ }
+ }
+ return -1;
+}
+
+int
+get_any_available_datanode(int except)
+{
+ int ii;
+ for (ii = 0; aval(VAR_datanodeMasterServers)[ii]; ii++)
+ {
+ if (ii == except)
+ continue;
+
+ if (!is_none(aval(VAR_datanodeMasterServers)[ii]))
+ {
+ if (pingNode(aval(VAR_datanodeMasterServers)[ii],
+ aval(VAR_datanodePorts)[ii]) == 0)
+ return ii;
+ }
+ }
+
+ /*
+ * this could be the first datanode that is being added.
+ * This call would happen *after* expanding the array to
+ * accomodate the new datanode. Hence we check for size
+ * being more than 1
+ */
+ if (arraySizeName(VAR_datanodeNames) > 1)
+ {
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (!is_none(aval(VAR_datanodeNames)[ii]))
+ {
+ elog(ERROR, "ERROR: failed to find any running datanode");
+ return -1;
+ }
+ }
+ }
+ return -1;
+}
diff --git a/contrib/pgxc_ctl/do_command.h b/contrib/pgxc_ctl/do_command.h
new file mode 100644
index 0000000000..b39a272516
--- /dev/null
+++ b/contrib/pgxc_ctl/do_command.h
@@ -0,0 +1,19 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_command.h
+ *
+ * Main command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DO_COMMAND_H
+#define DO_COMMAND_H
+
+extern int forceInit;
+extern void do_command(FILE *inf, FILE *outf);
+extern int do_singleLine(char *buf, char *wkline);
+extern int get_any_available_coord(int except);
+extern int get_any_available_datanode(int except);
+#endif /* DO_COMMAND_H */
diff --git a/contrib/pgxc_ctl/do_shell.c b/contrib/pgxc_ctl/do_shell.c
new file mode 100644
index 0000000000..5165ef3564
--- /dev/null
+++ b/contrib/pgxc_ctl/do_shell.c
@@ -0,0 +1,741 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_shell.c
+ *
+ * Shell control module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/*
+ * This module provides a basic infrastructure to run various shell script.
+ *
+ * Basically, for a single operation, when more than one server are involved,
+ * they can be run in parallel. Within each parallel execution, we can have
+ * more than one command to be run in series.
+ *
+ * cmdList_t contains more than one command trains can be done in parallel.
+ * cmd_t will be contained in cmdList_t structure which represents a train
+ * of shell script.
+ *
+ * For each command, stdout will be handled automatically in this module.
+ * Stdin can be provided by callers.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+
+typedef unsigned int xc_status;
+static int file_sn = 0;
+static int nextSize(int size);
+static char *getCleanHostname(char *buf, int len);
+#if 0
+static void waitTypeReturn(void);
+static void echoPid(pid_t pid);
+#endif
+static char *allocActualCmd(cmd_t *cmd);
+static void prepareStdout(cmdList_t *cmdList);
+
+/*
+ * SIGINT handler
+ */
+jmp_buf *whereToJumpDoShell = NULL;
+jmp_buf dcJmpBufDoShell;
+jmp_buf *whereToJumpMainLoop = NULL;
+jmp_buf dcJmpBufMainLoop;
+pqsigfunc old_HandlerDoShell = NULL;
+void do_shell_SigHandler(int signum);
+
+/*
+ * Signal handler (SIGINT only)
+ */
+void do_shell_SigHandler(int signum)
+{
+ if (whereToJumpDoShell)
+ longjmp(*whereToJumpDoShell, 1);
+ else
+ signal(SIGINT,do_shell_SigHandler);
+}
+
+/*
+ * Stdout/stderr/stdin will be created at $LocalTmpDir.
+ *
+ */
+char *createLocalFileName(FileType type, char *buf, int len)
+{
+ /*
+ * Filename is $LocalTmpDir/type_pid_serno.
+ */
+ switch (type)
+ {
+ case STDIN:
+ snprintf(buf, len-1, "%s/STDIN_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case STDOUT:
+ snprintf(buf, len-1, "%s/STDOUT_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case STDERR:
+ snprintf(buf, len-1, "%s/STDERR_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ break;
+ case GENERAL:
+ snprintf(buf, len-1, "%s/GENERAL_%d_%d", sval(VAR_localTmpDir), getpid(), file_sn++);
+ default:
+ return NULL;
+ }
+ return buf;
+}
+
+/*
+ * Please note that remote stdout is not in pgxc_ctl so far. It will directly be written
+ * to local stdout.
+ */
+char *createRemoteFileName(FileType type, char *buf, int len)
+{
+ char hostname[MAXPATH+1];
+ /*
+ * Filename is $TmpDir/hostname_type_serno.
+ */
+ getCleanHostname(hostname, MAXPATH);
+ switch (type)
+ {
+ case STDIN:
+ snprintf(buf, len-1, "%s/%s_STDIN_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case STDOUT:
+ snprintf(buf, len-1, "%s/%s_STDOUT_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case STDERR:
+ snprintf(buf, len-1, "%s/%s_STDERR_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ case GENERAL:
+ snprintf(buf, len-1, "%s/%s_GENERAL_%d_%d", sval(VAR_tmpDir), hostname, getpid(), file_sn++);
+ break;
+ default:
+ return NULL;
+ }
+ return buf;
+}
+
+/*
+ * ==============================================================================================
+ *
+ * Tools to run a command foreground.
+ *
+ * ==============================================================================================
+ */
+/*
+ * Run any command foreground locally. No more redirection.
+ * Return value same as system();
+ * Stdout will be set to outF. The content will also be written to log if specified.
+ * If stdIn is NULL or stdiIn[0] == 0, then stdin will not be used.
+ * If host == NULL or host[0] == 0, then the command will be run locally.
+ */
+
+/* Does not handle stdin/stdout. If needed, they should be included in the cmd. */
+int doImmediateRaw(const char *cmd_fmt, ...)
+{
+ char actualCmd[MAXLINE+1];
+ va_list arg;
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ return(system(actualCmd));
+}
+
+FILE *pgxc_popen_wRaw(const char *cmd_fmt, ...)
+{
+ va_list arg;
+ char actualCmd[MAXLINE+1];
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ return(popen(actualCmd, "w"));
+}
+
+FILE *pgxc_popen_w(char *host, const char *cmd_fmt, ...)
+{
+ FILE *f;
+ va_list arg;
+ char actualCmd[MAXLINE+1];
+ char sshCmd[MAXLINE+1];
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(actualCmd, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ snprintf(sshCmd, MAXLINE, "ssh %s@%s \" %s \"", sval(VAR_pgxcUser), host, actualCmd);
+ if ((f = popen(sshCmd, "w")) == NULL)
+ elog(ERROR, "ERROR: could not open the command \"%s\" to write, %s\n", sshCmd, strerror(errno));
+ return f;
+}
+
+int doImmediate(char *host, char *stdIn, const char *cmd_fmt, ...)
+{
+ char cmd_wk[MAXLINE+1];
+ char actualCmd[MAXLINE+1];
+ char remoteStdout[MAXPATH+1];
+ char localStdout[MAXPATH+1];
+ va_list arg;
+ int rc;
+
+ va_start(arg, cmd_fmt);
+ vsnprintf(cmd_wk, MAXLINE, cmd_fmt, arg);
+ va_end(arg);
+ if (host == NULL || host[0] == '\0')
+ {
+ /* Local case */
+ snprintf(actualCmd, MAXLINE, "( %s ) < %s > %s 2>&1",
+ cmd_wk,
+ ((stdIn == NULL) || (stdIn[0] == 0)) ? "/dev/null" : stdIn,
+ createLocalFileName(STDOUT, localStdout, MAXPATH));
+ elog(DEBUG1, "Actual command: %s\n", actualCmd);
+ rc = system(actualCmd);
+ }
+ else
+ {
+ int rc1;
+ /* Remote case */
+ snprintf(actualCmd, MAXLINE, "ssh %s@%s \"( %s ) > %s 2>&1\" < %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser), host, cmd_wk,
+ createRemoteFileName(STDOUT, remoteStdout, MAXPATH),
+ ((stdIn == NULL) || (stdIn[0] == 0)) ? "/dev/null" : stdIn);
+ elog(INFO, "Actual Command: %s\n", actualCmd);
+ rc = system(actualCmd);
+ snprintf(actualCmd, MAXLINE, "scp %s@%s:%s %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser), host, remoteStdout,
+ createLocalFileName(STDOUT, localStdout, MAXPATH));
+ elog(INFO, "Bring remote stdout: %s\n", actualCmd);
+ rc1 = system(actualCmd);
+ if (WEXITSTATUS(rc1) != 0)
+ elog(WARNING, "WARNING: Stdout transfer not successful, file: %s:%s->%s\n",
+ host, remoteStdout, localStdout);
+ doImmediateRaw("ssh %s@%s \"rm -f %s < /dev/null > /dev/null\" < /dev/null > /dev/null",
+ sval(VAR_pgxcUser), host, remoteStdout);
+ }
+ elogFile(INFO, localStdout);
+ unlink(localStdout);
+ if (stdIn && stdIn[0])
+ unlink(stdIn);
+ return((rc));
+}
+
+/*
+ * =======================================================================================
+ *
+ * Command list handlers
+ *
+ * =======================================================================================
+ */
+cmdList_t *initCmdList(void)
+{
+ cmdList_t *rv = (cmdList_t *)Malloc0(sizeof(cmdList_t));
+
+ rv->allocated = 1;
+ return(rv);
+}
+
+cmd_t *initCmd(char *host)
+{
+ cmd_t *rv = (cmd_t *)Malloc0(sizeof(cmd_t));
+ if (host)
+ rv->host = Strdup(host);
+ return rv;
+}
+
+static void clearStdin(cmd_t *cmd)
+{
+ unlink(cmd->localStdin);
+ freeAndReset(cmd->localStdin);
+}
+
+static void touchStdout(cmd_t *cmd)
+{
+ if (cmd->remoteStdout)
+ if (cmd->remoteStdout)
+ doImmediateRaw("(ssh %s@%s touch %s) < /dev/null > /dev/null 2>&1",
+ sval(VAR_pgxcUser), cmd->host,
+ cmd->remoteStdout);
+ if (cmd->localStdout)
+ doImmediateRaw("(touch %s) < /dev/null > /dev/null", cmd->localStdout);
+}
+
+#if 0
+static void setStdout(cmd_t *cmd)
+{
+ if (cmd->host != NULL)
+ {
+ if (cmd->remoteStdout == NULL)
+ /* Remote cmd */
+ cmd->remoteStdout = createRemoteFileName(STDOUT, Malloc(MAXPATH+1), MAXPATH);
+ else
+ freeAndReset(cmd->remoteStdout);
+ }
+ if (cmd->localStdout == NULL)
+ cmd->localStdout = createLocalFileName(STDOUT, Malloc(MAXPATH+1), MAXPATH);
+}
+#endif
+
+int doCmd(cmd_t *cmd)
+{
+ int rc = 0;
+
+ cmd_t *curr;
+
+ for(curr = cmd; curr; curr = curr->next)
+ {
+ rc = doCmdEl(curr);
+ }
+ return rc;
+}
+
+static char *allocActualCmd(cmd_t *cmd)
+{
+ return (cmd->actualCmd) ? cmd->actualCmd : (cmd->actualCmd = Malloc(MAXLINE+1));
+}
+
+/* localStdout has to be set by the caller */
+int doCmdEl(cmd_t *cmd)
+{
+ if (cmd->isInternal)
+ {
+ if (*cmd->callback)
+ (*cmd->callback)(cmd->callback_parm);
+ else
+ elog(ERROR, "ERROR: no function entry was found in cmd_t.\n");
+ freeAndReset(cmd->callback_parm);
+ return 0;
+ }
+ if (cmd->host)
+ {
+ /* Build actual command */
+ snprintf(allocActualCmd(cmd), MAXLINE,
+ "ssh %s@%s \"( %s ) > %s 2>&1\" < %s > /dev/null 2>&1",
+ sval(VAR_pgxcUser),
+ cmd->host,
+ cmd->command,
+ cmd->remoteStdout ? cmd->remoteStdout : "/dev/null",
+ cmd->localStdin ? cmd->localStdin : "/dev/null");
+ /* Do it */
+ elog(DEBUG1, "Remote command: \"%s\", actual: \"%s\"\n", cmd->command, cmd->actualCmd);
+ cmd->excode = system(cmd->actualCmd);
+ /* Handle stdout */
+ clearStdin(cmd);
+ touchStdout(cmd);
+ doImmediateRaw("(scp %s@%s:%s %s; ssh %s@%s rm -rf %s) < /dev/null > /dev/null",
+ sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout, cmd->localStdout,
+ sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout);
+ freeAndReset(cmd->remoteStdout);
+ /* Handle stdin */
+ return (cmd->excode);
+ }
+ else
+ {
+ freeAndReset(cmd->remoteStdout);
+ /* Build actual command */
+ snprintf(allocActualCmd(cmd), MAXLINE,
+ "( %s ) > %s 2>&1 < %s",
+ cmd->command,
+ cmd->localStdout ? cmd->localStdout : "/dev/null",
+ cmd->localStdin ? cmd->localStdin : "/dev/null");
+ /* Do it */
+ elog(DEBUG1, "Local command: \"%s\", actual: \"%s\"\n", cmd->command, cmd->actualCmd);
+ cmd->excode = system(cmd->actualCmd);
+ /* Handle stdout */
+ clearStdin(cmd);
+ touchStdout(cmd);
+ /* Handle stdin */
+ return (cmd->excode);
+ }
+}
+
+/*
+ * Here, we should handle exit code.
+ *
+ * If each command ran and exit normally, maximum (worst) value of the status code
+ * will be returned.
+ *
+ * If SIGINT is detected, then the status will be set with EC_IFSTOPPED flag, as well as
+ * EC_STOPSIG to SIGINT. In this case, EC_IFSTOPPED will be set and EC_SIGNAL will be
+ * set to SIGKILL as well. Exit status will be set to 2.
+ */
+int doCmdList(cmdList_t *cmds)
+{
+ int ii, jj;
+ xc_status rc = 0;
+
+ dump_cmdList(cmds);
+ if (cmds->cmds == NULL)
+ return(0);
+ old_HandlerDoShell = signal(SIGINT, do_shell_SigHandler);
+ whereToJumpDoShell = &dcJmpBufDoShell;
+ /*
+ * Invoke remote command with SSH
+ */
+ prepareStdout(cmds);
+ if (setjmp(dcJmpBufDoShell) == 0)
+ {
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ if (!isVarYes(VAR_debug))
+ {
+ if ((cmds->cmds[ii]->pid = fork()) != 0)
+ {
+ if (cmds->cmds[ii]->pid == -1)
+ {
+ elog(ERROR, "Process for \"%s\" failed to start. %s\n",
+ cmds->cmds[ii]->actualCmd,
+ strerror(errno));
+ cmds->cmds[ii]->pid = 0;
+ }
+ continue;
+ }
+ else
+ exit(doCmd(cmds->cmds[ii]));
+ }
+ else
+ {
+ cmds->cmds[ii]->excode = doCmd(cmds->cmds[ii]);
+ rc = WEXITSTATUS(cmds->cmds[ii]->excode);
+ }
+ }
+ }
+ else
+ {
+ /* Signal exit here */
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ if (!isVarYes(VAR_debug))
+ {
+ if (cmds->cmds[ii]->pid)
+ {
+ /*
+ * We don't care if the process is alive or not.
+ * Try to kill anyway. Then handle remote/local
+ * stdin/stdout in the next step.
+ *
+ * If it's bothering to wait for printing, the user can
+ * issue a SIGINT again.
+ */
+ kill(cmds->cmds[ii]->pid, SIGKILL);
+ cmds->cmds[ii]->pid = 0;
+ }
+ }
+ else
+ {
+ /* Something to do at non-parallel execution */
+ }
+ }
+ elog(NOTICE, "%s:%d Finish by interrupt\n", __FUNCTION__, __LINE__);
+ if (whereToJumpMainLoop)
+ {
+ elog(NOTICE, "Control reaches to the mainloop\n");
+ longjmp(*whereToJumpMainLoop, 1);
+ }
+ return 2;
+ }
+ /*
+ * Handle remote/local stdin/stdout
+ */
+ signal(SIGINT, do_shell_SigHandler);
+ if (setjmp(dcJmpBufDoShell) == 0)
+ {
+ for (ii = 0; cmds->cmds[ii]; ii++)
+ {
+ int status;
+ cmd_t *cur;
+
+ if (!isVarYes(VAR_debug))
+ {
+ if (cmds->cmds[ii]->pid)
+ {
+ int pid;
+ pid = waitpid(cmds->cmds[ii]->pid, &status, 0);
+ rc = WEXITSTATUS(status);
+ }
+ }
+ cmds->cmds[ii]->pid = 0;
+ for (cur = cmds->cmds[ii]; cur; cur = cur->next)
+ {
+ elogFile(MANDATORY, cur->localStdout);
+ doImmediateRaw("(rm -f %s) < /dev/null > /dev/null", cur->localStdout);
+ freeAndReset(cur->actualCmd);
+ freeAndReset(cur->localStdout);
+ freeAndReset(cur->msg);
+ }
+ }
+ }
+ else
+ {
+ /* Captured SIGINT */
+ signal(SIGINT, old_HandlerDoShell);
+
+ for (jj = 0; cmds->cmds[jj]; jj++)
+ {
+ /* Need to handle the case with non-parallel execution */
+ if (cmds->cmds[jj]->pid)
+ {
+ kill(cmds->cmds[jj]->pid, SIGKILL);
+ cmds->cmds[jj]->pid = 0;
+ }
+ if (cmds->cmds[jj]->localStdout)
+ doImmediate(NULL, NULL, "rm -f %s", cmds->cmds[jj]->localStdout);
+ if (cmds->cmds[jj]->remoteStdout) /* Note that remote stdout will be removed anyway */
+ doImmediate(cmds->cmds[jj]->host, NULL, "rm -f %s",
+ cmds->cmds[jj]->remoteStdout);
+ freeAndReset(cmds->cmds[jj]->actualCmd);
+ freeAndReset(cmds->cmds[jj]->localStdout);
+ freeAndReset(cmds->cmds[jj]->msg);
+ freeAndReset(cmds->cmds[jj]->remoteStdout);
+ }
+ elog(NOTICE, "%s:%d Finish by interrupt\n", __FUNCTION__, __LINE__);
+
+ if (whereToJumpMainLoop)
+ {
+ elog(NOTICE, "Control reaches to the mainloop\n");
+ longjmp(*whereToJumpMainLoop, 1);
+ }
+ return(2);
+ }
+ signal(SIGINT, old_HandlerDoShell);
+ whereToJumpDoShell = NULL;
+ return(rc);
+}
+
+void appendCmdEl(cmd_t *src, cmd_t *new)
+{
+ cmd_t *curr;
+
+ for(curr = src; src->next; src = src->next);
+ src->next = new;
+}
+
+void do_cleanCmdEl(cmd_t *cmd)
+{
+ if (cmd)
+ {
+ if (cmd->localStdout)
+ unlink(cmd->localStdout);
+ Free(cmd->localStdout);
+ Free(cmd->msg);
+ if (cmd->localStdin)
+ unlink(cmd->localStdin);
+ Free(cmd->localStdin);
+ if (cmd->remoteStdout)
+ doImmediateRaw("ssh %s@%s \"rm -f %s > /dev/null 2>&1\"", sval(VAR_pgxcUser), cmd->host, cmd->remoteStdout);
+ Free(cmd->remoteStdout);
+ Free(cmd->actualCmd);
+ Free(cmd->command);
+ Free(cmd->host);
+ }
+}
+
+void do_cleanCmd(cmd_t *cmd)
+{
+ if (cmd == NULL)
+ return;
+ if (cmd->next == NULL)
+ do_cleanCmdEl(cmd);
+ else
+ {
+ do_cleanCmd(cmd->next);
+ freeAndReset(cmd->next);
+ }
+}
+
+void do_cleanCmdList(cmdList_t *cmdList)
+{
+ int ii;
+
+ if (cmdList->cmds)
+ {
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ cleanCmd(cmdList->cmds[ii]);
+ Free(cmdList->cmds[ii]);
+ }
+ }
+ Free(cmdList);
+}
+
+void addCmd(cmdList_t *cmds, cmd_t *cmd)
+{
+ cmd->pid = 0;
+ cmd->actualCmd = cmd->remoteStdout = cmd->msg = cmd->localStdout = NULL;
+ if (cmds->used + 1 >= cmds->allocated)
+ {
+ int newsize = nextSize(cmds->allocated);
+ cmds->cmds = (cmd_t **)Realloc(cmds->cmds, sizeof(cmd_t *) * newsize);
+ cmds->allocated = newsize;
+ }
+ cmds->cmds[cmds->used++] = cmd;
+ cmds->cmds[cmds->used] = NULL;
+}
+
+void cleanLastCmd(cmdList_t *cmdList)
+{
+ int ii;
+
+ if ((cmdList == NULL) || (cmdList->cmds[0] == NULL))
+ return;
+ for (ii = 0; cmdList->cmds[ii+1]; ii++);
+ cleanCmd(cmdList->cmds[ii]);
+}
+
+/*
+ * ====================================================================================
+ *
+ * Miscellaneous
+ *
+ * ====================================================================================
+ */
+static int nextSize(int size)
+{
+ if (size == 0)
+ return 1;
+ if (size < 128)
+ return (size*2);
+ return (size + 32);
+}
+
+/*
+ * Get my hostname to prevent remote file name conflist
+ * Take only the first part of the hostname and ignore
+ * domain part
+ */
+static char *getCleanHostname(char *buf, int len)
+{
+ char hostname[MAXPATH+1];
+ int ii;
+
+ gethostname(hostname, MAXPATH);
+ for (ii = 0; hostname[ii] && hostname[ii] != '.'; ii++);
+ if (hostname[ii])
+ hostname[ii] = 0;
+ strncpy(buf, hostname, len);
+ return buf;
+}
+
+/*
+ * Wait for typing something only when debug option is specified.
+ * Used to synchronize child processes to start to help gdb.
+ *
+ * May be not useful if input file is not stdin.
+ */
+#if 0
+static void waitTypeReturn(void)
+{
+ char buf[MAXLINE+1];
+
+ fputs("Type Return: ", outF);
+ fgets(buf, MAXLINE, inF);
+}
+
+static void echoPid(pid_t pid)
+{
+ fprintf(outF, "INFO: pid = %d\n", pid);
+}
+#endif
+
+static void prepareStdout(cmdList_t *cmdList)
+{
+ int ii;
+
+ if (cmdList == NULL)
+ return;
+ if (cmdList->cmds == NULL)
+ return;
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ cmd_t *curr;
+ for (curr = cmdList->cmds[ii]; curr; curr = curr->next)
+ {
+ if (curr->localStdout == NULL)
+ createLocalFileName(STDOUT, (curr->localStdout = Malloc(sizeof(char) * (MAXPATH+1))), MAXPATH);
+ if (curr->host)
+ {
+ if (curr->remoteStdout == NULL)
+ createRemoteFileName(STDOUT, (curr->remoteStdout = Malloc(sizeof(char) * (MAXPATH+1))), MAXPATH);
+ }
+ else
+ freeAndReset(curr->remoteStdout);
+ }
+ }
+}
+
+cmd_t *makeConfigBackupCmd(void)
+{
+ cmd_t *rv = Malloc0(sizeof(cmd_t));
+ snprintf((rv->command = Malloc(MAXLINE+1)), MAXLINE,
+ "ssh %s@%s mkdir -p %s;scp %s %s@%sp:%s",
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost), sval(VAR_configBackupDir),
+ pgxc_ctl_config_path, sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupFile));
+ return(rv);
+}
+
+int doConfigBackup(void)
+{
+ int rc;
+
+ rc = doImmediateRaw("ssh %s@%s mkdir -p %s;scp %s %s@%s:%s/%s",
+ sval(VAR_pgxcUser), sval(VAR_configBackupHost), sval(VAR_configBackupDir),
+ pgxc_ctl_config_path, sval(VAR_pgxcUser), sval(VAR_configBackupHost),
+ sval(VAR_configBackupDir), sval(VAR_configBackupFile));
+ return(rc);
+}
+
+void dump_cmdList(cmdList_t *cmdList)
+{
+ int ii, jj;
+ cmd_t *cur;
+
+ lockLogFile(); /* We don't like this output interrupted by other process log */
+ elog(DEBUG1,
+ "*** cmdList Dump *******************************\n"
+ "allocated = %d, used = %d\n", cmdList->allocated, cmdList->used);
+ if (cmdList->cmds == NULL)
+ {
+ elog(DEBUG1, "=== No command defined. ===\n");
+ return;
+ }
+ for (ii = 0; cmdList->cmds[ii]; ii++)
+ {
+ elog(DEBUG1,
+ "=== CMD: %d ===\n", ii);
+ for (cur = cmdList->cmds[ii], jj=0; cur; cur = cur->next, jj++)
+ {
+ elog(DEBUG1,
+ " --- CMD-EL: %d:"
+ "host=\"%s\", command=\"%s\", localStdin=\"%s\", localStdout=\"%s\"\n",
+ jj, cur->host ? cur->host : "NULL",
+ cur->command ? cur->command : "NULL",
+ cur->localStdin ? cur->localStdin : "NULL",
+ cur->localStdout ? cur->localStdout : "NULL");
+ if (cur->localStdin)
+ {
+ elogFile(DEBUG1, cur->localStdin);
+ elog(DEBUG1, " ----------\n");
+ }
+ }
+ }
+ unlockLogFile();
+}
diff --git a/contrib/pgxc_ctl/do_shell.h b/contrib/pgxc_ctl/do_shell.h
new file mode 100644
index 0000000000..6d580a4d44
--- /dev/null
+++ b/contrib/pgxc_ctl/do_shell.h
@@ -0,0 +1,114 @@
+/*-------------------------------------------------------------------------
+ *
+ * do_shell.h
+ *
+ * Shell control module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef DO_SHELL_H
+#define DO_SHELL_H
+
+#include <setjmp.h>
+
+extern jmp_buf *whereToJumpMainLoop;
+extern jmp_buf dcJmpBufMainLoop;
+
+extern void dcSigHandler(int signum);
+typedef enum FileType { STDIN, STDOUT, STDERR, GENERAL } FileType;
+#ifndef PGSIGFUNC
+#define PGSIGFUNC
+typedef void (*pqsigfunc) (int signo);
+#endif
+extern char *createLocalFileName(FileType type, char *buf, int len);
+extern char *createRemoteFileName(FileType type, char *buf, int len);
+extern int doImmediate(char *host, char *stdIn, const char *cmd_fmt, ...) __attribute__((format(printf, 3, 4)));
+extern int doImmediateRaw(const char *cmd_fmt, ...) __attribute__((format(printf, 1,2)));
+extern FILE *pgxc_popen_wRaw(const char *cmd_fmt, ...) __attribute__((format(printf, 1,2)));
+extern FILE *pgxc_popen_w(char *host, const char *cmd_fmt, ...) __attribute__((format(printf, 2,3)));
+
+/*
+ * Flags
+ */
+#define PrintLog 0x01
+#define PrintErr 0x02
+#define LogOnly PrintLog
+#define ErrOnly PrintErr
+#define LogErr (PrintLog | PrintErr)
+#define LeaveRemoteStdin 0x04
+#define LeaveLocalStdin 0x08
+#define LeaveStdout 0x10
+#define InternalFunc(cmd, func, parm) \
+ do \
+ {(cmd)->isInternal = TRUE; (cmd)->callback = (func); (cmd)->callback_parm = (parm);} \
+ while(0)
+#define ShellCall(cmd) \
+ do \
+ {(cmd)->isInternal = FALSE; (cmd)->callback = NULL; (cmd)->callback_parm = NULL;} \
+ while(0)
+
+
+
+typedef struct cmd_t
+{
+ struct cmd_t *next; /* Next to do --> done in the same shell */
+ int isInternal; /* If true, do not invoke shell. Call internal function */
+ void (*callback)(char *line); /* Callback function */
+ char *callback_parm;/* Argument to the callback function. Will be freed here. */
+ char *host; /* target host -> If null, then local command */
+ char *command; /* Will be double-quoted. Double-quote has to be escaped by the caller */
+ char *localStdin; /* Local stdin name --> Supplied by the caller. */
+ char *actualCmd; /* internal use --> local ssh full command. */
+ char *localStdout; /* internal use, local stdout name --> Generated. Stderr will be copied here too */
+ /* Messages from the child process may be printed to this file too. */
+ pid_t pid; /* internal use: valid only for cmd at the head of the list */
+ int flag; /* flags */
+ int excode; /* exit code -> not used in parallel execution. */
+ char *msg; /* internal use: messages to write. Has to be comsumed only by child process. */
+ char *remoteStdout; /* internal use: remote stdout name. Generated for remote case */
+} cmd_t;
+
+typedef struct cmdList_t
+{
+ int allocated;
+ int used;
+ cmd_t **cmds;
+} cmdList_t;
+
+extern cmdList_t *initCmdList(void);
+extern cmd_t *initCmd(char *host);
+#define newCommand(a) ((a)->command=Malloc(sizeof(char) * (MAXLINE+1)))
+#define newCmd(a) ((a)=initCmd())
+#define newFilename(a) ((a)=Malloc(sizeof(char) *(MAXPATH+1)))
+
+/*
+ * Return valie from doCmd() and doCmdList(): This include
+ * exit code from the shell (and their command), as well as
+ * other status of the code.
+ *
+ * Exit status should include WIFSIGNALED() and their signal information,
+ * as well as other seen in wait(2). Such information should be composed
+ * using individual command status. Because functions to compose them is
+ * not available, we provide corresponding local implementation for them.
+ */
+
+extern int doCmdEl(cmd_t *cmd);
+extern int doCmd(cmd_t *cmd);
+extern int doCmdList(cmdList_t *cmds);
+extern void do_cleanCmdList(cmdList_t *cmds);
+#define cleanCmdList(x) do{do_cleanCmdList(x); (x) = NULL;}while(0)
+extern void do_cleanCmd(cmd_t *cmd);
+#define cleanCmd(x) do{do_cleanCmd(x); (x) = NULL;}while(0)
+extern void do_cleanCmdEl(cmd_t *cmd);
+#define cleanCmdEl(x) do{do_cleanCmeEl(x); (x) = NULL;}while(0)
+extern void addCmd(cmdList_t *cmdList, cmd_t *cmd);
+extern void appendCmdEl(cmd_t *src, cmd_t *new);
+extern void cleanLastCmd(cmdList_t *cmdList);
+extern cmd_t *makeConfigBackupCmd(void);
+extern int doConfigBackup(void);
+extern void dump_cmdList(cmdList_t *cmdList);
+
+#endif /* DO_SHELL_H */
+
diff --git a/contrib/pgxc_ctl/gtm_cmd.c b/contrib/pgxc_ctl/gtm_cmd.c
new file mode 100644
index 0000000000..beb1e3e1ec
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_cmd.c
@@ -0,0 +1,1580 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_cmd.c
+ *
+ * GTM command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module provides various gtm-related pgxc_operation.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <signal.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <setjmp.h>
+#include <string.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+
+#include "pgxc_ctl.h"
+#include "do_command.h"
+#include "variables.h"
+#include "varnames.h"
+#include "pgxc_ctl_log.h"
+#include "config.h"
+#include "do_shell.h"
+#include "utils.h"
+#include "gtm_cmd.h"
+#include "monitor.h"
+
+static char date[MAXTOKEN+1];
+
+
+/* ======================================================================================
+ *
+ * GTM Staff
+ *
+ * =======================================================================================
+ */
+/*
+ * Init gtm master -----------------------------------------------------------------
+ */
+cmd_t *prepare_initGtmMaster(bool stop)
+{
+ cmd_t *cmdInitGtmMaster, *cmdGtmConf, *cmdGxid;
+ char date[MAXTOKEN+1];
+ FILE *f;
+ char **fileList = NULL;
+ char remoteDirCheck[MAXPATH * 2 + 128];
+
+ remoteDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip GTM initilialization'; exit; fi;",
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir)
+ );
+ }
+
+ /* Kill current gtm, bild work directory and run initgtm */
+ cmdInitGtmMaster = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdInitGtmMaster), MAXLINE,
+ "%s"
+ "[ -f %s/gtm.pid ] && gtm_ctl -D %s -m immediate -Z gtm stop;"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "PGXC_CTL_SILENT=1 initgtm -Z gtm -D %s",
+ remoteDirCheck,
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir));
+
+ /* Then prepare gtm.conf file */
+
+ /* Prepare local Stdin */
+ appendCmdEl(cmdInitGtmMaster, (cmdGtmConf = initCmd(sval(VAR_gtmMasterServer))));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtmMaster);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===============================================\n"
+ "# Added at initialization, %s\n"
+ "listen_addresses = '*'\n",
+ timeStampString(date, MAXTOKEN));
+ if (!is_none(sval(VAR_gtmExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmExtraConfig));
+ if (!is_none(sval(VAR_gtmMasterSpecificExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmMasterSpecificExtraConfig));
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ fprintf(f,
+ "port = %s\n"
+ "nodename = '%s'\n"
+ "startup = ACT\n"
+ "# End of addition\n",
+ sval(VAR_gtmMasterPort), sval(VAR_gtmName));
+ fclose(f);
+ /* other options */
+ snprintf(newCommand(cmdGtmConf), MAXLINE,
+ "cat >> %s/gtm.conf",
+ sval(VAR_gtmMasterDir));
+
+ /* Setup GTM with appropriate GXID value */
+
+ appendCmdEl(cmdGtmConf, (cmdGxid = initCmd(sval(VAR_gtmMasterServer))));
+ if (stop)
+ snprintf(newCommand(cmdGxid), MAXLINE,
+ "(gtm -x 2000 -D %s &); sleep 1; gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir));
+ else
+ snprintf(newCommand(cmdGxid), MAXLINE,
+ "(gtm -x 2000 -D %s &); sleep 1;",
+ sval(VAR_gtmMasterDir));
+
+ return cmdInitGtmMaster;
+}
+
+int init_gtm_master(bool stop)
+{
+ int rc = 0;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+
+ elog(INFO, "Initialize GTM master\n");
+ if (is_none(sval(VAR_gtmMasterServer)))
+ {
+ elog(INFO, "No GTM master specified, exiting!\n");
+ return rc;
+ }
+
+ cmdList = initCmdList();
+
+ /* Kill current gtm, build work directory and run initgtm */
+
+ if ((cmd = prepare_initGtmMaster(stop)))
+ addCmd(cmdList, cmd);
+
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+}
+
+/*
+ * Add gtm master
+ *
+ */
+int add_gtmMaster(char *name, char *host, int port, char *dir)
+{
+ char port_s[MAXTOKEN+1];
+ char date[MAXTOKEN+1];
+ FILE *f;
+ int rc;
+
+ if (is_none(name))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm master with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(host))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm master with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(dir))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm master with the directory \"none\".\n");
+ return 1;
+ }
+ if (checkSpecificResourceConflict(name, host, port, dir, TRUE))
+ {
+ elog(ERROR, "ERROR: New specified name:%s, host:%s, port:%d and dir:\"%s\" conflicts with existing node.\n",
+ name, host, port, dir);
+ return 1;
+ }
+ assign_sval(VAR_gtmName, Strdup(name));
+ assign_sval(VAR_gtmMasterServer, Strdup(host));
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ assign_sval(VAR_gtmMasterPort, Strdup(port_s));
+ assign_sval(VAR_gtmMasterDir, Strdup(dir));
+ makeServerList();
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM master addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmName);
+ fprintSval(f, VAR_gtmMasterServer);
+ fprintSval(f, VAR_gtmMasterPort);
+ fprintSval(f, VAR_gtmMasterDir);
+ fprintf(f, "%s","#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ if ((rc = init_gtm_master(false)) != 0)
+ return rc;
+ return(start_gtm_master());
+}
+
+/*
+ * Add gtm slave: to be used after all the configuration is done.
+ *
+ * This function only maintains internal configuration, updte configuration file,
+ * and make backup if configured. You should run init_gtm_slave and stat_gtm_slave
+ * separately.
+ */
+int add_gtmSlave(char *name, char *host, int port, char *dir)
+{
+ char port_s[MAXTOKEN+1];
+ char date[MAXTOKEN+1];
+ FILE *f;
+ int rc;
+
+ if (isVarYes(VAR_gtmSlave))
+ {
+ elog(ERROR, "ERROR: GTM slave is already configured.\n");
+ return 1;
+ }
+ if (is_none(name))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm slave with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(host))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm slave with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(dir))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm slave with the directory \"none\".\n");
+ return 1;
+ }
+ if (checkSpecificResourceConflict(name, host, port, dir, FALSE))
+ {
+ elog(ERROR, "ERROR: New specified name:%s, host:%s, port:%d and dir:\"%s\" conflicts with existing node.\n",
+ name, host, port, dir);
+ return 1;
+ }
+ assign_sval(VAR_gtmSlave, Strdup("y"));
+ assign_sval(VAR_gtmSlaveName, Strdup(name));
+ assign_sval(VAR_gtmSlaveServer, Strdup(host));
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ assign_sval(VAR_gtmSlavePort, Strdup(port_s));
+ assign_sval(VAR_gtmSlaveDir, Strdup(dir));
+ makeServerList();
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM slave addition\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmSlave);
+ fprintSval(f, VAR_gtmSlaveName);
+ fprintSval(f, VAR_gtmSlaveServer);
+ fprintSval(f, VAR_gtmSlavePort);
+ fprintSval(f, VAR_gtmSlaveDir);
+ fprintf(f, "%s","#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ if ((rc = init_gtm_slave()) != 0)
+ return rc;
+ return(start_gtm_slave());
+}
+
+int remove_gtmMaster(bool clean_opt)
+{
+ FILE *f;
+
+ /* Check if gtm_slave is configured */
+ if (!sval(VAR_gtmMasterServer) || is_none(sval(VAR_gtmMasterServer)))
+ {
+ elog(ERROR, "ERROR: GTM master is not configured.\n");
+ return 1;
+ }
+
+ /* Check if gtm_master is running and stop if yes */
+ if (do_gtm_ping(sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort))) == 0)
+ stop_gtm_master();
+
+ elog(NOTICE, "Removing gtm master.\n");
+ /* Clean */
+ if (clean_opt)
+ clean_gtm_master();
+ /* Reconfigure */
+ reset_var(VAR_gtmName);
+ assign_sval(VAR_gtmName, Strdup("none"));
+ reset_var(VAR_gtmMasterServer);
+ assign_sval(VAR_gtmMasterServer, Strdup("none"));
+ reset_var(VAR_gtmMasterPort);
+ assign_sval(VAR_gtmMasterPort, Strdup("-1"));
+ reset_var(VAR_gtmMasterDir);
+ assign_sval(VAR_gtmMasterDir, Strdup("none"));
+ /* Write the configuration file and bakup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM master removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmName);
+ fprintSval(f, VAR_gtmMasterServer);
+ fprintSval(f, VAR_gtmMasterPort);
+ fprintSval(f, VAR_gtmMasterDir);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ elog(NOTICE, "Done.\n");
+ return 0;
+}
+
+int remove_gtmSlave(bool clean_opt)
+{
+ FILE *f;
+
+ /* Check if gtm_slave is configured */
+ if (!isVarYes(VAR_gtmSlave) || !sval(VAR_gtmSlaveServer) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return 1;
+ }
+ /* Check if gtm_slave is not running */
+ if (!do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))))
+ {
+ elog(ERROR, "ERROR: GTM slave is now running. Cannot remove it.\n");
+ return 1;
+ }
+ elog(NOTICE, "Removing gtm slave.\n");
+ /* Clean */
+ if (clean_opt)
+ clean_gtm_slave();
+ /* Reconfigure */
+ reset_var(VAR_gtmSlave);
+ assign_sval(VAR_gtmSlave, Strdup("n"));
+ reset_var(VAR_gtmSlaveName);
+ assign_sval(VAR_gtmSlaveName, Strdup("none"));
+ reset_var(VAR_gtmSlaveServer);
+ assign_sval(VAR_gtmSlaveServer, Strdup("none"));
+ reset_var(VAR_gtmSlavePort);
+ assign_sval(VAR_gtmSlavePort, Strdup("-1"));
+ reset_var(VAR_gtmSlaveDir);
+ assign_sval(VAR_gtmSlaveDir, Strdup("none"));
+ /* Write the configuration file and bakup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM slave removal\n"
+ "# %s\n",
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmSlave);
+ fprintSval(f, VAR_gtmSlaveServer);
+ fprintSval(f, VAR_gtmSlavePort);
+ fprintSval(f, VAR_gtmSlaveDir);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ backup_configuration();
+ elog(NOTICE, "Done.\n");
+ return 0;
+}
+
+
+/*
+ * Init gtm slave -------------------------------------------------------------
+ */
+
+/*
+ * Assumes Gtm Slave is configured.
+ * Caller should check this.
+ */
+cmd_t *prepare_initGtmSlave(void)
+{
+ char date[MAXTOKEN+1];
+ cmd_t *cmdInitGtm, *cmdGtmConf;
+ FILE *f;
+ char **fileList = NULL;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ /* Kill current gtm, build work directory and run initgtm */
+ cmdInitGtm = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdInitGtm), MAXLINE,
+ "[ -f %s/gtm.pid ] && gtm_ctl -D %s -m immediate -Z gtm stop;"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "PGXC_CTL_SILENT=1 initgtm -Z gtm -D %s",
+ sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir));
+
+ /* Prepare gtm.conf file */
+
+ /* Prepare local Stdin */
+ appendCmdEl(cmdInitGtm, (cmdGtmConf = initCmd(sval(VAR_gtmSlaveServer))));
+ snprintf(newCommand(cmdGtmConf), MAXLINE,
+ "cat >> %s/gtm.conf",
+ sval(VAR_gtmSlaveDir));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtm);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===============================================\n"
+ "# Added at initialization, %s\n"
+ "listen_addresses = '*'\n",
+ timeStampString(date, MAXPATH+1));
+ if (!is_none(sval(VAR_gtmExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmExtraConfig));
+ if (!is_none(sval(VAR_gtmMasterSpecificExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmMasterSpecificExtraConfig));
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+ fprintf(f,
+ "port = %s\n"
+ "nodename = '%s'\n"
+ "startup = STANDBY\n"
+ "active_host = '%s'\n"
+ "active_port = %d\n"
+ "# End of addition\n",
+ sval(VAR_gtmSlavePort), sval(VAR_gtmSlaveName),
+ sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort)));
+ fclose(f);
+ return (cmdInitGtm);
+}
+
+int init_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmdInitGtm;
+ int rc;
+
+ elog(INFO, "Initialize GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmdInitGtm = prepare_initGtmSlave()))
+ {
+ addCmd(cmdList, cmdInitGtm);
+ /* Do all the commands and clean */
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Start gtm master -----------------------------------------------------
+ */
+cmd_t *prepare_startGtmMaster(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ cmdGtmCtl = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "[ -f %s/gtm.pid ] && gtm_ctl stop -Z gtm -D %s;"
+ "rm -f %s/register.node;"
+ "gtm_ctl start -Z gtm -D %s",
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir),
+ sval(VAR_gtmMasterDir));
+ return cmdGtmCtl;
+}
+
+int start_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc = 0;
+
+ elog(INFO, "Start GTM master\n");
+ if (is_none(sval(VAR_gtmMasterServer)))
+ {
+ elog(INFO, "No GTM master specified, cannot start. Exiting!\n");
+ return rc;
+ }
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_startGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Start gtm slave ----------------------------------------------------
+ */
+cmd_t *prepare_startGtmSlave(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdGtmCtl = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "[ -f %s/gtm.pid ] && gtm_ctl stop -Z gtm -D %s;"
+ "rm -rf %s/register.node;"
+ "gtm_ctl start -Z gtm -D %s",
+ sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmSlaveDir));
+ return (cmdGtmCtl);
+}
+
+int start_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ elog(INFO, "Start GTM slave");
+ cmdList = initCmdList();
+ if ((cmd = prepare_startGtmSlave()))
+ {
+ addCmd(cmdList, cmd);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(INFO, "Done.\n");
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Stop gtm master ---------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmMaster(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ cmdGtmCtl = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmMasterDir));
+ return(cmdGtmCtl);
+}
+
+int stop_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(INFO, "Stop GTM master\n");
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_stopGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Stop gtm slave ---------------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmSlave(void)
+{
+ cmd_t *cmdGtmCtl;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdGtmCtl = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl stop -Z gtm -D %s",
+ sval(VAR_gtmSlaveDir));
+ return(cmdGtmCtl);
+}
+
+int stop_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int rc;
+
+ elog(INFO, "Stop GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmd = prepare_stopGtmSlave()))
+ {
+ addCmd(cmdList, cmd);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Kill gtm master -----------------------------------------------------
+ *
+ * You should not kill gtm master in this way. This may discard the latest
+ * gtm status. This is just in case. You must try to stop gtm master
+ * gracefully.
+ */
+cmd_t *prepare_killGtmMaster(void)
+{
+ cmd_t *cmdKill;
+ pid_t gtmPid;
+
+ cmdKill = initCmd(sval(VAR_gtmMasterServer));
+ gtmPid = get_gtm_pid(sval(VAR_gtmMasterServer), sval(VAR_gtmMasterDir));
+ if (gtmPid > 0)
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ gtmPid, atoi(sval(VAR_gtmMasterPort)), sval(VAR_gtmMasterDir));
+ else
+ {
+ elog(WARNING, "WARNING: pid for gtm master was not found. Remove socket only.\n");
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ atoi(sval(VAR_gtmMasterPort)), sval(VAR_gtmMasterDir));
+ }
+ return(cmdKill);
+}
+
+
+int kill_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmd_killGtmMaster;
+ int rc;
+
+ elog(INFO, "Kill GTM master\n");
+ cmdList = initCmdList();
+ if ((cmd_killGtmMaster = prepare_killGtmMaster()))
+ {
+ addCmd(cmdList, cmd_killGtmMaster);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ return 1;
+}
+
+/*
+ * Kill gtm slave --------------------------------------------------------
+ *
+ * GTM slave has no significant informaion to carry over. But it is a good
+ * habit to stop gtm slave gracefully with stop command.
+ */
+cmd_t *prepare_killGtmSlave(void)
+{
+ cmd_t *cmdKill;
+ pid_t gtmPid;
+
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured.\n");
+ return(NULL);
+ }
+ cmdKill = initCmd(sval(VAR_gtmSlaveServer));
+ gtmPid = get_gtm_pid(sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ if (gtmPid > 0)
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ gtmPid, atoi(sval(VAR_gtmSlavePort)), sval(VAR_gtmSlaveDir));
+ else
+ {
+ elog(WARNING, "WARNING: pid for gtm slave was not found. Remove socket only.\n");
+ snprintf(newCommand(cmdKill), MAXLINE,
+ "rm -rf /tmp/.s.'*'%d'*' %s/gtm.pid",
+ atoi(sval(VAR_gtmSlavePort)), sval(VAR_gtmSlaveDir));
+ }
+ return(cmdKill);
+}
+
+
+int kill_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ cmd_t *cmdKill;
+ int rc;
+
+ elog(INFO, "Kill GTM slave\n");
+ cmdList = initCmdList();
+ if ((cmdKill = prepare_killGtmSlave()))
+ {
+ addCmd(cmdList, cmdKill);
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+ }
+ else return 1;
+}
+
+/*
+ * Failover the gtm ------------------------------------------------------
+ */
+int failover_gtm(void)
+{
+ char date[MAXTOKEN+1];
+ char *stdIn;
+ int rc;
+ FILE *f;
+
+ elog(INFO, "Failover gtm\n");
+ if (!isVarYes(VAR_gtmSlave) || (sval(VAR_gtmSlaveServer) == NULL) || is_none(sval(VAR_gtmSlaveServer)))
+ {
+ elog(ERROR, "ERROR: GTM slave is not configured. Cannot failover.\n");
+ return(1);
+ }
+
+ if (do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))) != 0)
+ {
+ elog(ERROR, "ERROR: GTM slave is not running\n");
+ return(1);
+ }
+
+ /* Promote the slave */
+ elog(NOTICE, "Running \"gtm_ctl promote -Z gtm -D %s\"\n", sval(VAR_gtmSlaveDir));
+ rc = doImmediate(sval(VAR_gtmSlaveServer), NULL,
+ "gtm_ctl promote -Z gtm -D %s", sval(VAR_gtmSlaveDir));
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not promote gtm (host:%s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+
+ /* Configure promoted gtm */
+ if ((f = prepareLocalStdin(newFilename(stdIn), MAXPATH, NULL)) == NULL)
+ return(1);
+ fprintf(f,
+ "#===================================================\n"
+ "# Updated due to GTM failover\n"
+ "# %s\n"
+ "startup = ACT\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1));
+ fclose(f);
+ elog(NOTICE, "Updating gtm.conf at %s:%s\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ rc = doImmediate(sval(VAR_gtmSlaveServer), stdIn, "cat >> %s/gtm.conf", sval(VAR_gtmSlaveDir));
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not update gtm.conf (host: %s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+
+ /* Update and backup configuration file */
+ if ((f = prepareLocalStdin(stdIn, MAXPATH, NULL)) == NULL)
+ return(1);
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM failover\n"
+ "# %s\n"
+ "gtmMasterServer=%s\n"
+ "gtmMasterPort=%s\n"
+ "gtmMasterDir=%s\n"
+ "gtmSlave=n\n"
+ "gtmSlaveServer=none\n"
+ "gtmSlavePort=0\n"
+ "gtmSlaveDir=none\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1),
+ sval(VAR_gtmSlaveServer),
+ sval(VAR_gtmSlavePort),
+ sval(VAR_gtmSlaveDir));
+ fclose(f);
+ rc = doImmediate(NULL, stdIn, "cat >> %s", pgxc_ctl_config_path);
+ if (WEXITSTATUS(rc) != 0)
+ {
+ elog(ERROR, "ERROR: could not update gtm.conf (host: %s, dir:%s)\n", sval(VAR_gtmSlaveServer), sval(VAR_gtmSlaveDir));
+ return 1;
+ }
+ freeAndReset(stdIn);
+ backup_configuration();
+
+ /* Reconfigure myself */
+ assign_val(VAR_gtmMasterServer, VAR_gtmSlaveServer); reset_var(VAR_gtmSlaveServer);
+ assign_val(VAR_gtmMasterPort, VAR_gtmSlavePort); reset_var(VAR_gtmSlavePort);
+ assign_val(VAR_gtmMasterDir, VAR_gtmSlaveDir); reset_var(VAR_gtmSlaveDir);
+ assign_sval(VAR_gtmSlaveServer, "none");
+ assign_sval(VAR_gtmSlavePort, "0");
+ assign_sval(VAR_gtmSlaveDir, "none");
+ assign_sval(VAR_gtmSlave, "n");
+
+ return 0;
+}
+
+/*
+ * Clean gtm master resources -- directory and socket --------------------------
+ */
+cmd_t *prepare_cleanGtmMaster(void)
+{
+ cmd_t *cmd;
+
+ /* Remote work dir and clean the socket */
+ cmd = initCmd(sval(VAR_gtmMasterServer));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir), sval(VAR_gtmMasterDir),
+ atoi(VAR_gtmMasterPort));
+ return cmd;
+}
+
+int clean_gtm_master(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(INFO, "Clearing gtm master directory and socket.\n");
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmMaster());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ return(rc);
+}
+
+/*
+ * Clean gtm master resources -- direcotry and socket --------------------------
+ */
+/*
+ * Null will be retruend if gtm slave is not configured.
+ * Be careful. If you configure gtm slave and gtm master on a same server,
+ * bott slave amd master process will be killed.
+ */
+cmd_t *prepare_cleanGtmSlave(void)
+{
+ cmd_t *cmd;
+
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ return(NULL);
+ cmd = initCmd(sval(VAR_gtmSlaveServer));
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ sval(VAR_gtmSlaveDir), sval(VAR_gtmSlaveDir), sval(VAR_gtmMasterDir),
+ atoi(VAR_gtmSlavePort));
+ return cmd;
+}
+
+int clean_gtm_slave(void)
+{
+ cmdList_t *cmdList;
+ int rc;
+
+ elog(NOTICE, "Clearing gtm slave resources.\n");
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ {
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ return 1;
+ }
+ cmdList = initCmdList();
+ addCmd(cmdList, prepare_cleanGtmSlave());
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+/*
+ * ==================================================================================
+ *
+ * Gtm Proxy Staff
+ *
+ * ==================================================================================
+ */
+
+/*
+ * Add gtm proxy: to be used after all the configuration is done.
+ *
+ * This function only maintains internal configuration, updte configuration file,
+ * and make backup if configured. You should run init and start it separately.
+ */
+int add_gtmProxy(char *name, char *host, int port, char *dir)
+{
+ char port_s[MAXTOKEN+1];
+ char date[MAXTOKEN+1];
+ FILE *f;
+ char **nodelist = NULL;
+ int rc;
+
+ if (is_none(host))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm proxy with the name \"none\".\n");
+ return 1;
+ }
+ if (is_none(dir))
+ {
+ elog(ERROR, "ERROR: Cannot add gtm proxy with the directory \"none\".\n");
+ return 1;
+ }
+ if (checkSpecificResourceConflict(name, host, port, dir, TRUE))
+ {
+ elog(ERROR, "ERROR: New specified name:%s, host:%s, port:%d and dir:\"%s\" conflicts with existing node.\n",
+ name, host, port, dir);
+ return 1;
+ }
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ assign_sval(VAR_gtmProxy, Strdup("y"));
+ reset_var(VAR_gtmProxyNames);
+ reset_var(VAR_gtmProxyServers);
+ reset_var(VAR_gtmProxyPorts);
+ reset_var(VAR_gtmProxyDirs);
+ reset_var(VAR_gtmPxySpecificExtraConfig);
+ reset_var(VAR_gtmPxyExtraConfig);
+ }
+ add_val(find_var(VAR_gtmProxyNames), Strdup(name));
+ add_val(find_var(VAR_gtmProxyServers), Strdup(host));
+ snprintf(port_s, MAXTOKEN, "%d", port);
+ add_val(find_var(VAR_gtmProxyPorts), Strdup(port_s));
+ add_val(find_var(VAR_gtmProxyDirs), Strdup(dir));
+ add_val(find_var(VAR_gtmPxySpecificExtraConfig), Strdup("none"));
+ makeServerList();
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM proxy (%s) addition\n"
+ "# %s\n",
+ name,
+ timeStampString(date, MAXTOKEN+1));
+ fprintSval(f, VAR_gtmProxy);
+ fprintAval(f, VAR_gtmProxyNames);
+ fprintAval(f, VAR_gtmProxyServers);
+ fprintAval(f, VAR_gtmProxyPorts);
+ fprintAval(f, VAR_gtmProxyDirs);
+ fprintAval(f, VAR_gtmPxySpecificExtraConfig);
+ fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
+ fclose(f);
+ AddMember(nodelist, name);
+ init_gtm_proxy(nodelist);
+ rc = start_gtm_proxy(nodelist);
+ CleanArray(nodelist);
+ return rc;
+}
+
+int remove_gtmProxy(char *name, bool clean_opt)
+{
+ FILE *f;
+ int idx;
+
+ /* Check if gtm proxy exists */
+ if ((idx = gtmProxyIdx(name)) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a gtm proxy.\n", name);
+ return 1;
+ }
+ /* Check if it is in use */
+ if (ifExists(VAR_coordMasterServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_coordSlaveServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_datanodeMasterServers, aval(VAR_gtmProxyServers)[idx]) ||
+ ifExists(VAR_datanodeSlaveServers, aval(VAR_gtmProxyServers)[idx]))
+ {
+ elog(ERROR, "ERROR: GTM Proxy %s is in use\n", name);
+ return 1;
+ }
+ elog(NOTICE, "NOTICE: removing gtm_proxy %s\n", name);
+ /* Clean */
+ if (clean_opt)
+ {
+ char **nodelist = NULL;
+
+ elog(NOTICE, "NOTICE: cleaning target resources.\n");
+ AddMember(nodelist, name);
+ clean_gtm_proxy(nodelist);
+ CleanArray(nodelist);
+ }
+ /* Reconfigure */
+ var_assign(&aval(VAR_gtmProxyNames)[idx], Strdup("none"));
+ var_assign(&aval(VAR_gtmProxyServers)[idx], Strdup("none"));
+ var_assign(&aval(VAR_gtmProxyPorts)[idx], Strdup("-1"));
+ var_assign(&aval(VAR_gtmProxyDirs)[idx], Strdup("none"));
+ handle_no_slaves();
+ makeServerList();
+ /* Update configuration file and backup it */
+ if ((f = fopen(pgxc_ctl_config_path, "a")) == NULL)
+ {
+ /* Should it be panic? */
+ elog(ERROR, "ERROR: cannot open configuration file \"%s\", %s\n", pgxc_ctl_config_path, strerror(errno));
+ return 1;
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# pgxc configuration file updated due to GTM proxy addition\n"
+ "# %s\n"
+ "%s=%s\n" /* gtmProxy */
+ "%s=( %s )\n" /* gtmProxyNames */
+ "%s=( %s )\n" /* gtmProxyServers */
+ "%s=( %s )\n" /* gtmProxyPorts */
+ "%s=( %s )\n" /* gtmProxyDirs */
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN+1),
+ VAR_gtmProxy, sval(VAR_gtmProxy),
+ VAR_gtmProxyNames, listValue(VAR_gtmProxyNames),
+ VAR_gtmProxyServers, listValue(VAR_gtmProxyServers),
+ VAR_gtmProxyPorts, listValue(VAR_gtmProxyPorts),
+ VAR_gtmProxyDirs, listValue(VAR_gtmProxyDirs));
+ fclose(f);
+ backup_configuration();
+ elog(NOTICE, "Done.\n");
+ return 0;
+}
+
+/*
+ * Does not check if node name is valid.
+ */
+
+cmd_t *prepare_initGtmProxy(char *nodeName)
+{
+ cmd_t *cmdInitGtm, *cmdGtmProxyConf;
+ int idx;
+ FILE *f;
+ char timestamp[MAXTOKEN+1];
+ char **fileList = NULL;
+ char remoteDirCheck[MAXPATH * 2 + 128];
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy configuration.\n", nodeName);
+ return NULL;
+ }
+
+ remoteDirCheck[0] = '\0';
+ if (!forceInit)
+ {
+ sprintf(remoteDirCheck, "if [ '$(ls -A %s 2> /dev/null)' ]; then echo 'ERROR: "
+ "target directory (%s) exists and not empty. "
+ "Skip GTM proxy initilialization'; exit; fi;",
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx]);
+ }
+
+ /* Build directory and run initgtm */
+ cmdInitGtm = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmdInitGtm), MAXLINE,
+ "%s"
+ "[ -f %s/gtm_proxy.pid ] && gtm_ctl -D %s -m immediate -Z gtm_proxy stop;"
+ "rm -rf %s;"
+ "mkdir -p %s;"
+ "PGXC_CTL_SILENT=1 initgtm -Z gtm_proxy -D %s",
+ remoteDirCheck,
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx]);
+
+ /* Configure gtm_proxy.conf */
+ appendCmdEl(cmdInitGtm, (cmdGtmProxyConf = initCmd(aval(VAR_gtmProxyServers)[idx])));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmProxyConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdInitGtm);
+ return NULL;
+ }
+ fprintf(f,
+ "#===========================\n"
+ "# Added at initialization, %s\n"
+ "nodename = '%s'\n"
+ "listen_addresses = '*'\n",
+ timeStampString(timestamp, MAXTOKEN),
+ aval(VAR_gtmProxyNames)[idx]);
+
+ fprintf(f,
+ "port = %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "worker_threads = 1\n"
+ "gtm_connect_retry_interval = 1\n"
+ "# End of addition\n",
+ aval(VAR_gtmProxyPorts)[idx],
+ sval(VAR_gtmMasterServer),
+ sval(VAR_gtmMasterPort));
+
+ if (!is_none(sval(VAR_gtmPxyExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmPxyExtraConfig));
+ if (!is_none(sval(VAR_gtmPxySpecificExtraConfig)))
+ AddMember(fileList, sval(VAR_gtmPxySpecificExtraConfig));
+ appendFiles(f, fileList);
+ CleanArray(fileList);
+
+ fclose(f);
+ snprintf(newCommand(cmdGtmProxyConf), MAXLINE,
+ "cat >> %s/gtm_proxy.conf", aval(VAR_gtmProxyDirs)[idx]);
+ return(cmdInitGtm);
+}
+
+/*
+ * Initialize gtm proxy -------------------------------------------------------
+ */
+int init_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+ cmd_t *cmdInitGtmPxy;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return 1;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Initializing gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmdInitGtmPxy = prepare_initGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmdInitGtmPxy);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+
+int init_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Initialize all the gtm proxies.\n");
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ return(init_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Start gtm proxy -----------------------------------------------------------
+ */
+cmd_t *prepare_startGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return(NULL);
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "[ -f %s/gtm_proxy.pid ] && gtm_ctl -D %s -m immediate -Z gtm_proxy stop;"
+ "gtm_ctl start -Z gtm_proxy -D %s",
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx],
+ aval(VAR_gtmProxyDirs)[idx]);
+ return(cmd);
+}
+
+int start_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+ elog(NOTICE, "Starting gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_startGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int start_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Starting all the gtm proxies.\n");
+ return(start_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Stop gtm proxy -------------------------------------------------------------
+ */
+cmd_t *prepare_stopGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "gtm_ctl stop -Z gtm_proxy -D %s",
+ aval(VAR_gtmProxyDirs)[idx]);
+ return(cmd);
+}
+
+
+int stop_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Stopping gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_stopGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int stop_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Stopping all the gtm proxies.\n");
+ return(stop_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Kill gtm proxy -------------------------------------------------------------------
+ *
+ * Although gtm proxy does not have significant resources to carry over to the next
+ * run, it is a good habit to stop gtm proxy with stop command gracefully.
+ */
+cmd_t *prepare_killGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+ pid_t gtmPxyPid;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return NULL;
+ }
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ gtmPxyPid = get_gtmProxy_pid(aval(VAR_gtmProxyServers)[idx], aval(VAR_gtmProxyDirs)[idx]);
+ if (gtmPxyPid > 0)
+ snprintf(newCommand(cmd), MAXLINE,
+ "kill -9 %d; rm -rf /tmp/.s.'*'%d'*' %s/gtm_proxy.pid",
+ gtmPxyPid, atoi(aval(VAR_gtmProxyPorts)[idx]), aval(VAR_gtmProxyDirs)[idx]);
+ else
+ {
+ elog(WARNING, "WARNING: pid for gtm proxy \"%s\" was not found. Remove socket only.\n", nodeName);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf /tmp/.s.'*'%d'*' %s/gtm_proxy.pid",
+ atoi(aval(VAR_gtmProxyPorts)[idx]), aval(VAR_gtmProxyDirs)[idx]);
+ }
+ return(cmd);
+}
+
+int kill_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "ERROR: GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Killing process of gtm proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_killGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ return(rc);
+}
+
+int kill_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Killing all the gtm proxy processes.\n");
+ return(kill_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Reconnect to the current GTM master --------------------------------------------------
+ *
+ * When failed over, the current Master must have been updated.
+ * Remember to update gtm_proxy configuration file so that it
+ * connects to the new master at the next start.
+ * Please note that we assume GTM has already been failed over.
+ * First argument is gtm_proxy nodename
+ */
+cmd_t *prepare_reconnectGtmProxy(char *nodeName)
+{
+ cmd_t *cmdGtmCtl, *cmdGtmProxyConf;
+ int idx;
+ FILE *f;
+ char date[MAXTOKEN+1];
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ {
+ elog(ERROR, "ERROR: Specified name %s is not GTM Proxy\n", nodeName);
+ return(NULL);
+ }
+
+ /* gtm_ctl reconnect */
+ cmdGtmCtl = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmdGtmCtl), MAXLINE,
+ "gtm_ctl reconnect -Z gtm_proxy -D %s -o \\\"-s %s -t %s\\\"",
+ aval(VAR_gtmProxyDirs)[idx], sval(VAR_gtmMasterServer), sval(VAR_gtmMasterPort));
+
+ /* gtm_proxy.conf */
+ appendCmdEl(cmdGtmCtl, (cmdGtmProxyConf = initCmd(aval(VAR_gtmProxyServers)[idx])));
+ if ((f = prepareLocalStdin(newFilename(cmdGtmProxyConf->localStdin), MAXPATH, NULL)) == NULL)
+ {
+ cleanCmd(cmdGtmCtl);
+ return(NULL);
+ }
+ fprintf(f,
+ "#===================================================\n"
+ "# Updated due to GTM Proxy reconnect\n"
+ "# %s\n"
+ "gtm_host = '%s'\n"
+ "gtm_port = %s\n"
+ "#----End of reconfiguration -------------------------\n",
+ timeStampString(date, MAXTOKEN),
+ sval(VAR_gtmMasterServer),
+ sval(VAR_gtmMasterPort));
+ fclose(f);
+ snprintf(newCommand(cmdGtmProxyConf), MAXLINE,
+ "cat >> %s/gtm_proxy.conf", aval(VAR_gtmProxyDirs)[idx]);
+ return(cmdGtmCtl);
+}
+
+
+int reconnect_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int rc;
+ cmdList_t *cmdList;
+
+ if (!isVarYes(VAR_gtmProxy))
+ {
+ elog(ERROR, "GTM Proxy is not configured.\n");
+ return(1);
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ /* Init and run initgtm */
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ cmd_t *cmd;
+
+ elog(NOTICE, "Reconnecting gtm proxy %s.\n", actualNodeList[ii]);
+ if((cmd = prepare_reconnectGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "WARNING: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int reconnect_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Reconnecting all the gtm proxies to the new one.\n");
+ return(reconnect_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * Cleanup -- nodeName must be valid. Instead, NULL will bereturned.
+ */
+cmd_t *prepare_cleanGtmProxy(char *nodeName)
+{
+ cmd_t *cmd;
+ int idx;
+
+ if ((idx = gtmProxyIdx(nodeName)) < 0)
+ return NULL;
+ cmd = initCmd(aval(VAR_gtmProxyServers)[idx]);
+ snprintf(newCommand(cmd), MAXLINE,
+ "rm -rf %s; mkdir -p %s; chmod 0700 %s;rm -f /tmp/.s.*%d*",
+ aval(VAR_gtmProxyDirs)[idx], aval(VAR_gtmProxyDirs)[idx], aval(VAR_gtmProxyDirs)[idx],
+ atoi(aval(VAR_gtmProxyPorts)[idx]));
+ return cmd;
+}
+
+int clean_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ cmdList_t *cmdList;
+ cmd_t *cmd;
+ int ii;
+ int rc;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ cmdList = initCmdList();
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ elog(NOTICE, "Clearing resources for gtm_proxy %s.\n", actualNodeList[ii]);
+ if ((cmd = prepare_cleanGtmProxy(actualNodeList[ii])))
+ addCmd(cmdList, cmd);
+ else
+ elog(WARNING, "%s is not a gtm proxy.\n", actualNodeList[ii]);
+ }
+ rc = doCmdList(cmdList);
+ cleanCmdList(cmdList);
+ CleanArray(actualNodeList);
+ elog(NOTICE, "Done.\n");
+ return(rc);
+}
+
+int clean_gtm_proxy_all(void)
+{
+ elog(NOTICE, "Clearing all the gtm_proxy resources.\n");
+ return(clean_gtm_proxy(aval(VAR_gtmProxyNames)));
+}
+
+/*
+ * configuration --------------------------------------------------------------------
+ */
+int show_config_gtmMaster(int flag, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Master: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (lineBuf[0])
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(sval(VAR_gtmName), sval(VAR_gtmMasterPort), sval(VAR_gtmMasterDir),
+ sval(VAR_gtmExtraConfig), sval(VAR_gtmMasterSpecificExtraConfig));
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmSlave(int flag, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ if (!isVarYes(VAR_gtmSlave) || is_none(VAR_gtmSlaveServer))
+ {
+ elog(ERROR, "ERROR: gtm slave is not configured.\n");
+ return 0;
+ }
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Slave: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(sval(VAR_gtmSlaveName), sval(VAR_gtmSlavePort), sval(VAR_gtmSlaveDir),
+ sval(VAR_gtmExtraConfig), sval(VAR_gtmSlaveSpecificExtraConfig));
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmProxies(char **nameList)
+{
+ int ii;
+
+ lockLogFile();
+ for(ii = 0; nameList[ii]; ii++)
+ show_config_gtmProxy(TRUE, ii, aval(VAR_gtmProxyServers)[ii]);
+ unlockLogFile();
+ return 0;
+}
+
+int show_config_gtmProxy(int flag, int idx, char *hostname)
+{
+ char lineBuf[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ lineBuf[0] = 0;
+ if (flag)
+ strncat(lineBuf, "GTM Proxy: ", MAXLINE);
+ if (hostname)
+ {
+ snprintf(editBuf, MAXPATH, "host: %s", hostname);
+ strncat(lineBuf, editBuf, MAXLINE);
+ }
+ if (flag || hostname)
+ strncat(lineBuf, "\n", MAXLINE);
+ lockLogFile();
+ if (lineBuf[0])
+ elog(NOTICE, "%s", lineBuf);
+ print_simple_node_info(aval(VAR_gtmProxyNames)[idx], aval(VAR_gtmProxyPorts)[idx],
+ aval(VAR_gtmProxyDirs)[idx], sval(VAR_gtmPxyExtraConfig),
+ aval(VAR_gtmPxySpecificExtraConfig)[idx]);
+ unlockLogFile();
+ return 0;
+}
diff --git a/contrib/pgxc_ctl/gtm_cmd.h b/contrib/pgxc_ctl/gtm_cmd.h
new file mode 100644
index 0000000000..07f7f8dea9
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_cmd.h
@@ -0,0 +1,75 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_cmd.h
+ *
+ * GTM command module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef GTM_CMD_H
+#define GTM_CMD_H
+
+#include "gtm/gtm_c.h"
+#include "utils.h"
+
+extern int init_gtm_master(bool stop);
+extern int init_gtm_slave(void);
+extern int init_gtm_proxy(char **nodeList);
+extern int init_gtm_proxy_all(void);
+extern cmd_t *prepare_initGtmMaster(bool stop);
+extern cmd_t *prepare_initGtmSlave(void);
+extern cmd_t *prepare_initGtmProxy(char *nodeName);
+
+extern int add_gtmMaster(char *name, char *host, int port, char *dir);
+extern int add_gtmSlave(char *name, char *host, int port, char *dir);
+extern int add_gtmProxy(char *name, char *host, int port, char *dir);
+extern int remove_gtmMaster(bool clean_opt);
+extern int remove_gtmSlave(bool clean_opt);
+extern int remove_gtmProxy(char *name, bool clean_opt);
+
+extern int kill_gtm_master(void);
+extern int kill_gtm_slave(void);
+extern int kill_gtm_proxy(char **nodeList);
+extern int kill_gtm_proxy_all(void);
+extern cmd_t *prepare_killGtmMaster(void);
+extern cmd_t *prepare_killGtmSlave(void);
+extern cmd_t *prepare_killGtmProxy(char *nodeName);
+
+extern int show_config_gtmMaster(int flag, char *hostname);
+extern int show_config_gtmSlave(int flag, char *hostname);
+extern int show_config_gtmProxy(int flag, int idx, char *hostname);
+extern int show_config_gtmProxies(char **nameList);
+
+extern int start_gtm_master(void);
+extern int start_gtm_slave(void);
+extern int start_gtm_proxy(char **nodeList);
+extern int start_gtm_proxy_all(void);
+extern cmd_t *prepare_startGtmMaster(void);
+extern cmd_t *prepare_startGtmSlave(void);
+extern cmd_t *prepare_startGtmProxy(char *nodeName);
+
+extern int stop_gtm_master(void);
+extern int stop_gtm_slave(void);
+extern int stop_gtm_proxy(char **nodeList);
+extern int stop_gtm_proxy_all(void);
+extern cmd_t *prepare_stopGtmMaster(void);
+extern cmd_t *prepare_stopGtmSlave(void);
+extern cmd_t *prepare_stopGtmProxy(char *nodeName);
+
+extern int failover_gtm(void);
+extern int reconnect_gtm_proxy(char **nodeList);
+extern int reconnect_gtm_proxy_all(void);
+extern cmd_t *prepare_reconnectGtmProxy(char *nodeName);
+
+extern int clean_gtm_master(void);
+extern int clean_gtm_slave(void);
+extern cmd_t *prepare_cleanGtmMaster(void);
+extern cmd_t *prepare_cleanGtmSlave(void);
+
+extern int clean_gtm_proxy(char **nodeList);
+extern int clean_gtm_proxy_all(void);
+extern cmd_t *prepare_cleanGtmProxy(char *nodeName);
+
+#endif /* GTM_CMD_H */
diff --git a/contrib/pgxc_ctl/gtm_util.c b/contrib/pgxc_ctl/gtm_util.c
new file mode 100644
index 0000000000..20a64071a8
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_util.c
@@ -0,0 +1,168 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_util.c
+ *
+ * GTM utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module was imported from Koichi's personal development.
+ *
+ * Provides unregistration of the nodes from gtm. This operation might be
+ * needed after some node crashes and its registration information remains
+ * in GTM.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+/*
+#include "gtm/gtm_c.h"
+*/
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+#include "utils.h"
+#include "variables.h"
+/* This is an ugly hack to avoid conflict between gtm_c.h and pgxc_ctl.h */
+#undef true
+#undef false
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "config.h"
+#include "gtm_util.h"
+
+typedef enum command_t
+{
+ CMD_INVALID = 0,
+ CMD_UNREGISTER
+} command_t;
+
+static char *nodename = NULL;
+static char *myname = NULL;
+static GTM_PGXCNodeType nodetype = 0; /* Invalid */
+#define GetToken() (line = get_word(line, &token))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+
+static int inputError(char *msg)
+{
+ elog(ERROR, "%s\n", msg);
+ return -1;
+}
+
+int unregisterFromGtm(char *line)
+{
+ char *token;
+ int rc;
+
+ for(;GetToken();)
+ {
+ if (testToken("-n"))
+ {
+ if (!GetToken())
+ return(inputError("No -n option value was found."));
+ Free(myname);
+ myname = Strdup(token);
+ continue;
+ }
+ else if (testToken("-Z"))
+ {
+ if (!GetToken())
+ return(inputError("No -Z option value was found."));
+ if (testToken("gtm"))
+ {
+ nodetype = GTM_NODE_GTM;
+ continue;
+ }
+ else if (testToken("gtm_proxy"))
+ {
+ nodetype = GTM_NODE_GTM_PROXY;
+ break;
+ }
+ else if (testToken("gtm_proxy_postmaster"))
+ {
+ nodetype = GTM_NODE_GTM_PROXY_POSTMASTER;
+ break;
+ }
+ else if (testToken("coordinator"))
+ {
+ nodetype = GTM_NODE_COORDINATOR;
+ break;
+ }
+ else if (testToken("datanode"))
+ {
+ nodetype = GTM_NODE_DATANODE;
+ break;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: Invalid -Z option value, %s\n", token);
+ return(-1);
+ }
+ continue;
+ }
+ else
+ break;
+ }
+ if (nodetype == 0)
+ {
+ elog(ERROR, "ERROR: no node type was specified.\n");
+ return(-1);
+ }
+
+ if (myname == NULL)
+ myname = Strdup(DefaultName);
+
+ if (!token)
+ {
+ fprintf(stderr,"%s: No command specified.\n", progname);
+ exit(2);
+ }
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: unregister: no node name was found to unregister.\n");
+ return(-1);
+ }
+ nodename = Strdup(token);
+ rc = process_unregister_command(nodetype, nodename);
+ Free(nodename);
+ return(rc);
+}
+
+static GTM_Conn *connectGTM()
+{
+ char connect_str[MAXLINE+1];
+
+ /* Use 60s as connection timeout */
+ snprintf(connect_str, MAXLINE, "host=%s port=%d node_name=%s remote_type=%d postmaster=0 connect_timeout=60",
+ sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort)), (myname == NULL) ? DefaultName : myname, GTM_NODE_COORDINATOR);
+ return(PQconnectGTM(connect_str));
+}
+
+int process_unregister_command(GTM_PGXCNodeType type, char *nodename)
+{
+ GTM_Conn *conn;
+ int res;
+
+ conn = connectGTM();
+ if (conn == NULL)
+ {
+ elog(ERROR, "ERROR: failed to connect to GTM\n");
+ return -1;
+ }
+ res = node_unregister(conn, type, nodename);
+ if (res == GTM_RESULT_OK){
+ elog(NOTICE, "unregister %s from GTM.\n", nodename);
+ GTMPQfinish(conn);
+ return 0;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: Failed to unregister %s from GTM.\n", nodename);
+ GTMPQfinish(conn);
+ return res;
+ }
+}
diff --git a/contrib/pgxc_ctl/gtm_util.h b/contrib/pgxc_ctl/gtm_util.h
new file mode 100644
index 0000000000..c4e209977e
--- /dev/null
+++ b/contrib/pgxc_ctl/gtm_util.h
@@ -0,0 +1,23 @@
+/*-------------------------------------------------------------------------
+ *
+ * gtm_util.h
+ *
+ * GTM utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef GTM_UTIL_H
+#define GTM_UTIL_H
+
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+
+extern int unregisterFromGtm(char *line);
+extern int process_unregister_command(GTM_PGXCNodeType type, char *nodename);
+#define unregister_gtm_proxy(name) do{process_unregister_command(GTM_NODE_GTM_PROXY, name);}while(0)
+#define unregister_coordinator(name) do{process_unregister_command(GTM_NODE_COORDINATOR, name);}while(0)
+#define unregister_datanode(name) do{process_unregister_command(GTM_NODE_DATANODE, name);}while(0)
+
+#endif /* GTM_UTIL_H */
diff --git a/contrib/pgxc_ctl/make_signature b/contrib/pgxc_ctl/make_signature
new file mode 100755
index 0000000000..09990714a9
--- /dev/null
+++ b/contrib/pgxc_ctl/make_signature
@@ -0,0 +1,202 @@
+#!/usr/bin/env bash
+#--------------------------------------------------------------------
+#
+# make_signature
+#
+# Bash script building module of pgxc_ctl.
+#
+# Copyright (c) 2012 Postgres-XC Development Group
+#
+#---------------------------------------------------------------------
+#
+# This module is used to create signature.h and pgxc_ctl_bash.c files.
+#
+# pgxc_ctl_bash.c files contains two information,
+# 1. Bash script to read pgxc_ctl configuration information and write
+# it back to pgxc_ctl. This way, users can use their familiar bash
+# script to configure postgres-xc cluster.
+# This includes typical (test) configuration so that pgxc_ctl
+# can run even with incomplete configuration.
+# 2. Template postgres-xc cluster configuration used by pgxc_ctl.
+# You can get this template by typing "prepare configu" command.
+#
+# signature.h contains signature information which is useful in
+# checking the bash script and pgxc_ctl binary build.
+#
+# At present, the bash script is installed each time pgxc_ctl is invoked
+# and uninstalled, this has no significant role. In the future,
+# when we need to maintain this bash script, it will work to enforce
+# the integrity between the two.
+#------------------------------------------------------------------------
+
+sig=`date +%y%m%d_%H%M_%N`
+cat > signature.h <<EOF
+/*-------------------------------------------------------------------------
+ *
+ * signature.h
+ *
+ * Signature of module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef SIGNATURE_H
+#define SIGNATURE_H
+/* Signature file to identify the make */
+EOF
+echo '#'define signature \"$sig\" >> signature.h
+cat >> signature.h <<EOF
+#endif /* SIGNATURE_H */
+EOF
+
+
+
+cp pgxc_ctl_bash_2 pgxc_ctl_bash.c.wk
+ex pgxc_ctl_bash.c.wk <<EOF
+%s/"/\\\"/ge
+w
+%s/^\(.*\)$/"\1",/e
+%s/^"#ifdef XCP",$/#ifdef XCP/e
+%s/^"#endif",$/#endif/e
+wq
+EOF
+
+cat > pgxc_ctl_bash.c <<EOF
+/*
+ *-----------------------------------------------------------------------
+ *
+ * pgxc_ctl_bash.c
+ *
+ * Bash script body for Postrgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *------------------------------------------------------------------------
+ *
+ * This file was created by make_signature utility when pgxc_ctl was built.
+ *
+ * pgxc_ctl uses this bash script to configure postgres-xc and read
+ * configuration.
+ *
+ * This provides users very flexible way to configure their own
+ * postgres-xc cluster. For example, by using extra variables and script,
+ * you can save most of your specific hours typing same (or similar)
+ * variable values again and again.
+ */
+
+#include <stddef.h>
+
+/*
+ * Bash script to read pgxc_ctl configuration parameters and write
+ * back to itself.
+ *
+ * This part is written to pgxc_ctl work directory and reads
+ * configuration file, which is also written in bash script.
+ */
+
+char *pgxc_ctl_bash_script[] = {
+EOF
+
+cat pgxc_ctl_bash.c.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+
+EOF
+
+rm pgxc_ctl_bash.c.wk
+
+cp pgxc_ctl_conf_part_full pgxc_ctl_conf_part.wk
+
+ex pgxc_ctl_conf_part.wk <<EOF
+%s/"/\\\"/ge
+w
+%s/^\(.*\)$/"\1",/e
+%s/^"#ifdef XCP",$/#ifdef XCP/e
+%s/^"#endif",$/#endif/e
+wq
+EOF
+
+cat >> pgxc_ctl_bash.c <<EOF
+/*
+ * Prototype of pgxc_ctl configuration file.
+ *
+ * It should be self descripting. Can be extracted to your pgxc_ctl
+ * work directory with 'prepare config' command.
+ */
+
+char *pgxc_ctl_conf_prototype[] = {
+EOF
+
+cat pgxc_ctl_conf_part.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+EOF
+
+rm pgxc_ctl_conf_part.wk
+
+cp pgxc_ctl_conf_part_minimal pgxc_ctl_conf_part.wk
+
+ex pgxc_ctl_conf_part.wk <<EOF
+%s/"/\\\"/ge
+w
+%s/^\(.*\)$/"\1",/e
+%s/^"#ifdef XCP",$/#ifdef XCP/e
+%s/^"#endif",$/#endif/e
+wq
+EOF
+
+cat >> pgxc_ctl_bash.c <<EOF
+/*
+ * Prototype of pgxc_ctl configuration file.
+ *
+ * It should be self descripting. Can be extracted to your pgxc_ctl
+ * work directory with 'prepare config' command.
+ */
+
+char *pgxc_ctl_conf_prototype_minimal[] = {
+EOF
+
+cat pgxc_ctl_conf_part.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+EOF
+
+rm pgxc_ctl_conf_part.wk
+
+cp pgxc_ctl_conf_part_empty pgxc_ctl_conf_empty.wk
+
+ex pgxc_ctl_conf_empty.wk <<EOF
+%s/"/\\\"/ge
+w
+%s/^\(.*\)$/"\1",/e
+%s/^"#ifdef XCP",$/#ifdef XCP/e
+%s/^"#endif",$/#endif/e
+wq
+EOF
+
+cat >> pgxc_ctl_bash.c <<EOF
+/*
+ * Prototype of pgxc_ctl configuration file.
+ *
+ * It should be self descripting. Can be extracted to your pgxc_ctl
+ * work directory with 'prepare empty' command.
+ */
+
+char *pgxc_ctl_conf_prototype_empty[] = {
+EOF
+
+cat pgxc_ctl_conf_empty.wk >> pgxc_ctl_bash.c
+
+cat >> pgxc_ctl_bash.c <<EOF
+NULL
+};
+EOF
+
+rm pgxc_ctl_conf_empty.wk
diff --git a/contrib/pgxc_ctl/mcxt.c b/contrib/pgxc_ctl/mcxt.c
new file mode 100644
index 0000000000..fcb31f8208
--- /dev/null
+++ b/contrib/pgxc_ctl/mcxt.c
@@ -0,0 +1,77 @@
+/*----------------------------------------------------------------------------------
+ *
+ * mxct.c
+ * Postgres-XC memory context management code for applications.
+ *
+ * This module is for Postgres-XC application/utility programs. Sometimes,
+ * applications/utilities may need Postgres-XC internal functions which
+ * depends upon mcxt.c of gtm or Postgres.
+ *
+ * This module "virtualize" such module-dependent memory management.
+ *
+ * This code is for general use, which depends only upon confentional
+ * memory management functions.
+ *
+ * Copyright (c) 2013, Postgres-XC Development Group
+ *
+ *---------------------------------------------------------------------------------
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include "gen_alloc.h"
+
+static void *current_cxt;
+
+static void *memCxtAlloc(void *, size_t);
+static void *memCxtRealloc(void *, size_t);
+static void *memCxtAlloc0(void *, size_t);
+static void memCxtFree(void *);
+static void *memCxtAllocTop(size_t);
+static void *memCxtCurrentContext(void);
+
+
+static void *memCxtAlloc(void* current, size_t needed)
+{
+ return(malloc(needed));
+}
+
+static void *memCxtRealloc(void *addr, size_t needed)
+{
+ return(realloc(addr, needed));
+}
+
+static void *memCxtAlloc0(void *current, size_t needed)
+{
+ void *allocated;
+
+ allocated = malloc(needed);
+ if (allocated == NULL)
+ return(NULL);
+ memset(allocated, 0, needed);
+ return(allocated);
+}
+
+static void memCxtFree(void *addr)
+{
+ free(addr);
+ return;
+}
+
+static void *memCxtCurrentContext()
+{
+ return((void *)&current_cxt);
+}
+
+static void *memCxtAllocTop(size_t needed)
+{
+ return(malloc(needed));
+}
+
+
+Gen_Alloc genAlloc_class = {(void *)memCxtAlloc,
+ (void *)memCxtAlloc0,
+ (void *)memCxtRealloc,
+ (void *)memCxtFree,
+ (void *)memCxtCurrentContext,
+ (void *)memCxtAllocTop};
diff --git a/contrib/pgxc_ctl/monitor.c b/contrib/pgxc_ctl/monitor.c
new file mode 100644
index 0000000000..f14c24bb47
--- /dev/null
+++ b/contrib/pgxc_ctl/monitor.c
@@ -0,0 +1,476 @@
+/*-------------------------------------------------------------------------
+ *
+ * monitor.c
+ *
+ * Monitoring module of Postgres-XC configuration and operation tool.
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * This module is imported from /contrib/pgxc_monitor, to provide monitoring
+ * feature of each pgstgres-xc components.
+ */
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+
+#include <stdlib.h>
+#include <getopt.h>
+#include "utils.h"
+#include "variables.h"
+/* This is an ugly hack to avoid conflict between gtm_c.h and pgxc_ctl.h */
+#undef true
+#undef false
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "config.h"
+#include "monitor.h"
+
+/* Define all the node types */
+typedef enum
+{
+ NONE = 0,
+ GTM, /* GTM or GTM-proxy */
+ NODE /* Coordinator or Datanode */
+} nodetype_t;
+
+#define GetToken() (line = get_word(line, &token))
+#define testToken(word) ((token != NULL) && (strcmp(token, word) == 0))
+#define TestToken(word) ((token != NULL) && (strcasecmp(token, word) == 0))
+
+static void printResult(int res, char *what, char *name)
+{
+ if (res == 0)
+ {
+ if (name)
+ elog(NOTICE, "Running: %s %s\n", what, name);
+ else
+ elog(NOTICE, "Running: %s\n", what);
+ }
+ else
+ {
+ if (name)
+ elog(NOTICE, "Not running: %s %s\n", what, name);
+ else
+ elog(NOTICE, "Not running: %s\n", what);
+ }
+}
+
+static void monitor_gtm_master(void)
+{
+ if (doesExist(VAR_gtmMasterServer, 0) && doesExist(VAR_gtmMasterPort, 0))
+ return(printResult(do_gtm_ping(sval(VAR_gtmMasterServer), atoi(sval(VAR_gtmMasterPort))), "gtm master", NULL));
+ else
+ elog(NOTICE, "GTM master not running\n");
+}
+
+static void monitor_gtm_slave(void)
+{
+ if (doesExist(VAR_gtmSlaveServer, 0) && doesExist(VAR_gtmSlavePort, 0))
+ return(printResult(do_gtm_ping(sval(VAR_gtmSlaveServer), atoi(sval(VAR_gtmSlavePort))), "gtm slave", NULL));
+}
+
+static void monitor_gtm_proxy(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = gtmProxyIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a gtm proxy.\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(do_gtm_ping(aval(VAR_gtmProxyServers)[idx], atoi(aval(VAR_gtmProxyPorts)[idx])),
+ "gtm proxy", actualNodeList[ii]);
+ }
+}
+
+
+static void monitor_coordinator_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinator master", actualNodeList[ii]);
+ }
+}
+
+static void monitor_coordinator_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_coordSlave))
+ {
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n");
+ return;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ /* Need to check again if the slave is configured */
+ if (!doesExist(VAR_coordSlaveServers, idx) || is_none(aval(VAR_coordSlaveServers)[idx]))
+ elog(ERROR, "ERROR: coordinator slave %s is not configured\n", actualNodeList[ii]);
+ else
+ printResult(pingNode(aval(VAR_coordSlaveServers)[idx], aval(VAR_coordSlavePorts)[idx]),
+ "coordinator slave", actualNodeList[ii]);
+ }
+}
+
+static void monitor_coordinator(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = coordIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a coordinator\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_coordMasterServers)[idx], aval(VAR_coordPorts)[idx]),
+ "coordinator master", actualNodeList[ii]);
+ if (doesExist(VAR_coordSlaveServers, idx) && !is_none(aval(VAR_coordSlaveServers)[idx]))
+ printResult(pingNodeSlave(aval(VAR_coordSlaveServers)[idx],
+ aval(VAR_coordSlaveDirs)[idx]),
+ "coordinator slave", actualNodeList[ii]);
+ }
+}
+static void monitor_datanode_master(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode master", actualNodeList[ii]);
+ }
+}
+
+static void monitor_datanode_slave(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ if (!isVarYes(VAR_datanodeSlave))
+ {
+ elog(ERROR, "ERROR: datanode slave is not configured.\n");
+ return;
+ }
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ printResult(pingNodeSlave(aval(VAR_datanodeSlaveServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]),
+ "datanode slave", actualNodeList[ii]);
+ else
+ elog(ERROR, "ERROR: datanode slave %s is not configured.\n", actualNodeList[ii]);
+ }
+}
+
+static void monitor_datanode(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ int idx;
+
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((idx = datanodeIdx(actualNodeList[ii])) < 0)
+ {
+ elog(ERROR, "ERROR: %s is not a datanode\n", actualNodeList[ii]);
+ continue;
+ }
+ printResult(pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]),
+ "datanode master", actualNodeList[ii]);
+ if (doesExist(VAR_datanodeSlaveServers, idx) && !is_none(aval(VAR_datanodeSlaveServers)[idx]))
+ printResult(pingNodeSlave(aval(VAR_datanodeSlaveServers)[idx],
+ aval(VAR_datanodeSlaveDirs)[idx]),
+ "datanode slave", actualNodeList[ii]);
+ }
+}
+
+static void monitor_something(char **nodeList)
+{
+ char **actualNodeList;
+ int ii;
+ char *wkNodeList[2];
+ NodeType type;
+
+ wkNodeList[1] = NULL;
+ actualNodeList = makeActualNodeList(nodeList);
+ for (ii = 0; actualNodeList[ii]; ii++)
+ {
+ if ((type = getNodeType(actualNodeList[ii])) == NodeType_GTM)
+ {
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ continue;
+ }
+ else if (type == NodeType_GTM_PROXY)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_gtm_proxy(wkNodeList);
+ continue;
+ }
+ else if (type == NodeType_COORDINATOR)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_coordinator(wkNodeList);
+ continue;
+ }
+ else if (type == NodeType_DATANODE)
+ {
+ wkNodeList[0] = actualNodeList[ii];
+ monitor_datanode(wkNodeList);
+ continue;
+ }
+ else
+ {
+ elog(ERROR, "ERROR: %s is not found in any node.\n", actualNodeList[ii]);
+ continue;
+ }
+ }
+}
+
+
+
+void do_monitor_command(char *line)
+{
+ char *token;
+ int rc = 0;
+
+ if (!GetToken())
+ {
+ elog(ERROR, "ERROR: no monitor command options found.\n");
+ return;
+ }
+ if (TestToken("gtm"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ /* Ping GTM */
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ }
+ else if (TestToken("master"))
+ monitor_gtm_master();
+ else if (TestToken("slave"))
+ {
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ else
+ elog(ERROR, "ERROR: gtm slave is not configured.\n"), rc=-1;
+ }
+ else
+ elog(ERROR, "Invalid monitor gtm command option.\n"), rc=-1;
+ return;
+ }
+ else if (TestToken("gtm_proxy"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_gtm_proxy(aval(VAR_gtmProxyNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_gtm_proxy(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+ }
+ else if (TestToken("coordinator"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ monitor_coordinator_master(aval(VAR_coordNames));
+ if (isVarYes(VAR_coordSlave))
+ monitor_coordinator_slave(aval(VAR_coordNames));
+ return;
+ }
+ else if (TestToken("master"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_coordinator_master(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_coordinator_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: coordinator slave is not configured.\n"), rc = -1;
+ else
+ if (!GetToken() || TestToken("all"))
+ monitor_coordinator_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_coordinator_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList= NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ monitor_coordinator(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("datanode"))
+ {
+ if (!GetToken() || TestToken("all"))
+ {
+ monitor_datanode_master(aval(VAR_datanodeNames));
+ if (isVarYes(VAR_coordSlave))
+ monitor_datanode_slave(aval(VAR_datanodeNames));
+ }
+ else if (TestToken("master"))
+ {
+ if (!GetToken() || TestToken("all"))
+ monitor_datanode_master(aval(VAR_datanodeNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_datanode_master(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("slave"))
+ {
+ if (!isVarYes(VAR_coordSlave))
+ elog(ERROR, "ERROR: datanode slave is not configured.\n"), rc = -1;
+ else
+ if (!GetToken() || TestToken("all"))
+ monitor_datanode_slave(aval(VAR_coordNames));
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_datanode_slave(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else
+ {
+ char **nodeList= NULL;
+ do
+ AddMember(nodeList, token);
+ while(GetToken());
+ monitor_datanode(nodeList);
+ CleanArray(nodeList);
+ }
+ }
+ else if (TestToken("all"))
+ {
+ monitor_gtm_master();
+ if (isVarYes(VAR_gtmSlave))
+ monitor_gtm_slave();
+ if (isVarYes(VAR_gtmProxy))
+ monitor_gtm_proxy(aval(VAR_gtmProxyNames));
+ monitor_coordinator(aval(VAR_coordNames));
+ monitor_datanode(aval(VAR_datanodeNames));
+ }
+ else
+ {
+ char **nodeList = NULL;
+ do
+ AddMember(nodeList, token);
+ while (GetToken());
+ monitor_something(nodeList);
+ CleanArray(nodeList);
+ }
+ return;
+}
+
+/*
+ * Ping a given GTM or GTM-proxy
+ */
+int
+do_gtm_ping(char *host, int port)
+{
+ char connect_str[MAXPATH+1];
+ GTM_Conn *conn;
+
+ if (host == NULL)
+ {
+ elog(ERROR, "ERROR: no hostname is specified.\n");
+ return -1;
+ }
+ if (port <= 0)
+ {
+ elog(ERROR, "ERROR: Invalid port number, %d.\n", port);
+ return -1;
+ }
+ /* Use 60s as connection timeout */
+ sprintf(connect_str, "host=%s port=%d node_name=%s remote_type=%d postmaster=0 connect_timeout=60",
+ host, port, myName, GTM_NODE_COORDINATOR);
+ if ((conn = PQconnectGTM(connect_str)) == NULL)
+ {
+ elog(DEBUG3, "DEBUG3: Could not connect to %s, %d\n", host, port);
+ return -1;
+ }
+ GTMPQfinish(conn);
+ return 0;
+}
diff --git a/contrib/pgxc_ctl/monitor.h b/contrib/pgxc_ctl/monitor.h
new file mode 100644
index 0000000000..f78a864624
--- /dev/null
+++ b/contrib/pgxc_ctl/monitor.h
@@ -0,0 +1,18 @@
+/*-------------------------------------------------------------------------
+ *
+ * monitor.h
+ *
+ * Monitoring module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef MONITOR_H
+#define MONITOR_H
+
+extern void do_monitor_command(char *line);
+extern int do_gtm_ping(char *host, int port);
+
+#endif /* MONITOR_H */
diff --git a/contrib/pgxc_ctl/pgxc_ctl.c b/contrib/pgxc_ctl/pgxc_ctl.c
new file mode 100644
index 0000000000..81d732753d
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl.c
@@ -0,0 +1,625 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl.c
+ *
+ * Main module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * PXC_CTL Postgres-XC configurator and operation tool
+ *
+ *
+ * Command line options
+ *
+ * -c --configuration file : configuration file. Rerative path
+ * start at $HOME/.pgxc_ctl or homedir if
+ * specified by --home option
+ * --home homedir : home directory of pgxc_ctl. Default is
+ * $HOME/.pgxc_ctl. You can override this
+ * with PGXC_CTL_HOME environment or option.
+ * Command argument has the highest priority.
+ *
+ * -v | --verbose: verbose mode. You can set your default in
+ * pgxc_ctl_rc file at home.
+ *
+ * --silent: Opposite to --verbose.
+ *
+ * -V | --version: prints out the version
+ *
+ * -l | --logdir dir: Log directory. Default is $home/pgxc_log
+ *
+ * -L | --logfile file: log file. Default is the timestamp.
+ * Relative path starts with --logdir.
+ *
+ */
+
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <readline/readline.h>
+#include <readline/history.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <getopt.h>
+
+#include "config.h"
+#include "pg_config.h"
+#include "variables.h"
+#include "pgxc_ctl.h"
+#include "bash_handler.h"
+#include "signature.h"
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "do_command.h"
+#include "utils.h"
+
+/*
+ * Common global variable
+ */
+char pgxc_ctl_home[MAXPATH+1];
+char pgxc_ctl_bash_path[MAXPATH+1];
+char pgxc_ctl_config_path[MAXPATH+1];
+char progname[MAXPATH+1];
+char *myName;
+char *defaultDatabase;
+
+FILE *inF;
+FILE *outF;
+
+static void build_pgxc_ctl_home(char *home);
+static void trim_trailing_slash(char *path);
+static void startLog(char *path, char *logFileNam);
+static void print_version(void);
+static void print_help(void);
+
+static void trim_trailing_slash(char *path)
+{
+ char *curr = path;
+ char *last = path;
+
+ while (*curr)
+ {
+ last = curr;
+ curr++;
+ }
+ while (last != path)
+ {
+ if (*last == '/')
+ {
+ last = 0;
+ last--;
+ continue;
+ }
+ else
+ return;
+ }
+}
+
+
+static void build_pgxc_ctl_home(char *home)
+{
+ char *env_pgxc_ctl_home = getenv(PGXC_CTL_HOME);
+ char *env_home = getenv(HOME); /* We assume this is always available */
+
+ if (home)
+ {
+ if (home[0] == '/')
+ {
+ /* Absolute path */
+ strncpy(pgxc_ctl_home, home, MAXPATH);
+ goto set_bash;
+ }
+ else
+ {
+ /* Relative path */
+ trim_trailing_slash(home);
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, home);
+ goto set_bash;
+ }
+ }
+ if ((env_pgxc_ctl_home = getenv(PGXC_CTL_HOME)) == NULL)
+ {
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, pgxc_ctl_home_def);
+ goto set_bash;
+ }
+ if (env_pgxc_ctl_home[0] == '/') /* Absoute path */
+ {
+ strncpy(pgxc_ctl_home, env_pgxc_ctl_home, MAXPATH);
+ goto set_bash;
+ }
+ trim_trailing_slash(env_pgxc_ctl_home);
+ if (env_pgxc_ctl_home[0] == '\0' || env_pgxc_ctl_home[0] == ' ' || env_pgxc_ctl_home[0] == '\t')
+ {
+ /* Null environment */
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, pgxc_ctl_home_def);
+ goto set_bash;
+ }
+ snprintf(pgxc_ctl_home, MAXPATH, "%s/%s", env_home, home);
+ goto set_bash;
+
+set_bash:
+ snprintf(pgxc_ctl_bash_path, MAXPATH, "%s/%s", pgxc_ctl_home, PGXC_CTL_BASH);
+ /*
+ * Create home dir if necessary and change current directory to it.
+ */
+ {
+ struct stat buf;
+ char cmd[MAXLINE+1];
+
+ if (stat(pgxc_ctl_home, &buf) ==0)
+ {
+ if (S_ISDIR(buf.st_mode))
+ {
+ Chdir(pgxc_ctl_home, TRUE);
+ return;
+ }
+ else
+ {
+ fprintf(stderr, "%s is not directory. Check your configurfation\n", pgxc_ctl_home);
+ exit(1);
+ }
+ }
+ snprintf(cmd, MAXLINE, "mkdir -p %s", pgxc_ctl_home);
+ system(cmd);
+ if (stat(pgxc_ctl_home, &buf) ==0)
+ {
+ if (S_ISDIR(buf.st_mode))
+ {
+ Chdir(pgxc_ctl_home, TRUE);
+ return;
+ }
+ else
+ {
+ fprintf(stderr, "Creating %s directory failed. Check your configuration\n", pgxc_ctl_home);
+ exit(1);
+ }
+ }
+ fprintf(stderr, "Creating directory %s failed. %s\n", pgxc_ctl_home, strerror(errno));
+ exit(1);
+ }
+ return;
+}
+
+
+static void build_configuration_path(char *path)
+{
+ struct stat statbuf;
+ int rr;
+
+ if (path)
+ reset_var_val(VAR_configFile, path);
+ if (!find_var(VAR_configFile) || !sval(VAR_configFile) || (sval(VAR_configFile)[0] == 0))
+ {
+ /* Default */
+ snprintf(pgxc_ctl_config_path, MAXPATH, "%s/%s", pgxc_ctl_home, DEFAULT_CONF_FILE_NAME);
+ rr = stat(pgxc_ctl_config_path, &statbuf);
+ if (rr || !S_ISREG(statbuf.st_mode))
+ {
+ /* No configuration specified and the default does not apply --> simply ignore */
+ elog(ERROR, "ERROR: Default configuration file \"%s\" was not found while no configuration file was specified\n",
+ pgxc_ctl_config_path);
+ pgxc_ctl_config_path[0] = 0;
+
+ /* Read prototype config file if no default config file found */
+ install_pgxc_ctl_bash(pgxc_ctl_bash_path, true);
+ return;
+ }
+ }
+ else if (sval(VAR_configFile)[0] == '/')
+ {
+ /* Absolute path */
+ strncpy(pgxc_ctl_config_path, sval(VAR_configFile), MAXPATH);
+ }
+ else
+ {
+ /* Relative path from $pgxc_ctl_home */
+ snprintf(pgxc_ctl_config_path, MAXPATH, "%s/%s", pgxc_ctl_home, sval(VAR_configFile));
+ }
+ rr = stat(pgxc_ctl_config_path, &statbuf);
+ if (rr || !S_ISREG(statbuf.st_mode))
+ {
+ if (rr)
+ elog(ERROR, "ERROR: File \"%s\" not found or not a regular file. %s\n",
+ pgxc_ctl_config_path, strerror(errno));
+ else
+ elog(ERROR, "ERROR: File \"%s\" not found or not a regular file",
+ pgxc_ctl_config_path);
+ /* Read prototype config file if no config file found */
+ install_pgxc_ctl_bash(pgxc_ctl_bash_path, true);
+ }
+ else
+ {
+ /*
+ * Since we found a valid config file, don't read the prototype config
+ * file as it may conflict with the user conf file
+ */
+ install_pgxc_ctl_bash(pgxc_ctl_bash_path, false);
+ }
+ return;
+}
+
+
+static void read_configuration(void)
+{
+ FILE *conf;
+ char cmd[MAXPATH+1];
+
+ if (pgxc_ctl_config_path[0])
+ snprintf(cmd, MAXPATH, "%s --home %s --configuration %s",
+ pgxc_ctl_bash_path, pgxc_ctl_home, pgxc_ctl_config_path);
+ else
+ snprintf(cmd, MAXPATH, "%s --home %s", pgxc_ctl_bash_path, pgxc_ctl_home);
+
+ elog(NOTICE, "Reading configuration using %s\n", cmd);
+ conf = popen(cmd, "r");
+ if (conf == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot execute %s, %s", cmd, strerror(errno));
+ return;
+ }
+ read_vars(conf);
+ pclose(conf);
+ uninstall_pgxc_ctl_bash(pgxc_ctl_bash_path);
+ elog(INFO, "Finished reading configuration.\n");
+}
+
+static void prepare_pgxc_ctl_bash(char *path)
+{
+ struct stat buf;
+ int rc;
+
+ rc = stat(path, &buf);
+ if (rc)
+ install_pgxc_ctl_bash(path, false);
+ else
+ if (S_ISREG(buf.st_mode))
+ return;
+ rc = stat(path, &buf);
+ if (S_ISREG(buf.st_mode))
+ return;
+ fprintf(stderr, "Error: caould not install bash script %s\n", path);
+ exit(1);
+}
+
+static void pgxcCtlMkdir(char *path)
+{
+ char cmd[MAXPATH+1];
+
+ snprintf(cmd, MAXPATH, "mkdir -p %s", path);
+ system(cmd);
+}
+
+static void startLog(char *path, char *logFileNam)
+{
+ char logFilePath[MAXPATH+1];
+
+ if(path)
+ {
+ trim_trailing_slash(path);
+ pgxcCtlMkdir(path);
+ if(logFileNam)
+ {
+ if (logFileNam[0] == '/')
+ {
+ fprintf(stderr, "ERROR: both --logdir and --logfile are specified and logfile was abosolute path.\n");
+ exit(1);
+ }
+ if (path[0] == '/')
+ snprintf(logFilePath, MAXPATH, "%s/%s", path, logFileNam);
+ else
+ snprintf(logFilePath, MAXPATH, "%s/%s/%s", pgxc_ctl_home, path, logFileNam);
+ initLog(NULL, logFilePath);
+ }
+ else
+ {
+ if (path[0] == '/')
+ initLog(path, NULL);
+ else
+ {
+ snprintf(logFilePath, MAXPATH, "%s/%s", pgxc_ctl_home, path);
+ initLog(logFilePath, NULL);
+ }
+ }
+ }
+ else
+ {
+ if (logFileNam && logFileNam[0] == '/')
+ {
+ /* This is used as log file path */
+ initLog(NULL, logFileNam);
+ return;
+ }
+ else
+ {
+ snprintf(logFilePath, MAXPATH, "%s/pgxc_log", pgxc_ctl_home);
+ pgxcCtlMkdir(logFilePath);
+ initLog(logFilePath, NULL);
+ }
+ }
+ return;
+}
+
+static void setDefaultIfNeeded(char *name, char *val)
+{
+ if (!find_var(name) || !sval(name))
+ {
+ if (val)
+ reset_var_val(name, val);
+ else
+ reset_var(name);
+ }
+}
+
+static void setup_my_env(void)
+{
+ char path[MAXPATH+1];
+ char *home;
+ FILE *ini_env;
+
+ char *selectVarList[] = {
+ VAR_pgxc_ctl_home,
+ VAR_xc_prompt,
+ VAR_verbose,
+ VAR_logDir,
+ VAR_logFile,
+ VAR_tmpDir,
+ VAR_localTmpDir,
+ VAR_configFile,
+ VAR_echoAll,
+ VAR_debug,
+ VAR_printMessage,
+ VAR_logMessage,
+ VAR_defaultDatabase,
+ VAR_pgxcCtlName,
+ VAR_printLocation,
+ VAR_logLocation,
+ NULL
+ };
+
+ ini_env = fopen("/etc/pgxc_ctl", "r");
+ if (ini_env)
+ {
+ read_selected_vars(ini_env, selectVarList);
+ fclose(ini_env);
+ }
+ if ((home = getenv("HOME")))
+ {
+ snprintf(path, MAXPATH, "%s/.pgxc_ctl", getenv("HOME"));
+ if ((ini_env = fopen(path, "r")))
+ {
+ read_selected_vars(ini_env, selectVarList);
+ fclose(ini_env);
+ }
+ }
+ /*
+ * Setup defaults
+ */
+ snprintf(path, MAXPATH, "%s/pgxc_ctl", getenv("HOME"));
+ setDefaultIfNeeded(VAR_pgxc_ctl_home, path);
+ setDefaultIfNeeded(VAR_xc_prompt, "PGXC ");
+ snprintf(path, MAXPATH, "%s/pgxc_ctl/pgxc_log", getenv("HOME"));
+ setDefaultIfNeeded(VAR_logDir, path);
+ setDefaultIfNeeded(VAR_logFile, NULL);
+ setDefaultIfNeeded(VAR_tmpDir, "/tmp");
+ setDefaultIfNeeded(VAR_localTmpDir, "/tmp");
+ setDefaultIfNeeded(VAR_configFile, "pgxc_ctl.conf");
+ setDefaultIfNeeded(VAR_echoAll, "n");
+ setDefaultIfNeeded(VAR_debug, "n");
+ setDefaultIfNeeded(VAR_printMessage, "info");
+ setDefaultIfNeeded(VAR_logMessage, "info");
+ setDefaultIfNeeded(VAR_pgxcCtlName, DefaultName);
+ myName = Strdup(sval(VAR_pgxcCtlName));
+ setDefaultIfNeeded(VAR_defaultDatabase, DefaultDatabase);
+ defaultDatabase = Strdup(sval(VAR_defaultDatabase));
+ setDefaultIfNeeded(VAR_printLocation, "n");
+ setDefaultIfNeeded(VAR_logLocation, "n");
+}
+
+int main(int argc, char *argv[])
+{
+ char *configuration = NULL;
+ char *infile = NULL;
+ char *outfile = NULL;
+ char *verbose = NULL;
+ int version_opt = 0;
+ char *logdir = NULL;
+ char *logfile = NULL;
+ char *home = NULL;
+ int help_opt = 0;
+
+ int c;
+
+ static struct option long_options[] = {
+ {"configuration", required_argument, 0, 'c'},
+ {"silent", no_argument, 0, 1},
+ {"verbose", no_argument, 0, 'v'},
+ {"version", no_argument, 0, 'V'},
+ {"logdir", required_argument, 0, 'l'},
+ {"logfile", required_argument, 0, 'L'},
+ {"home", required_argument, 0, 2},
+ {"infile", required_argument, 0, 'i'},
+ {"outfile", required_argument, 0, 'o'},
+ {"help", no_argument, 0, 'h'},
+ {0, 0, 0, 0}
+ };
+
+ int is_bash_exist = system("command -v bash");
+
+ if ( is_bash_exist != 0 )
+ {
+ fprintf(stderr, "Cannot find bash. Please ensure that bash is "
+ "installed and available in the PATH\n");
+ exit(2);
+ }
+
+ strcpy(progname, argv[0]);
+ init_var_hash();
+
+ while(1) {
+ int option_index = 0;
+
+ c = getopt_long(argc, argv, "m:i:o:c:vVl:L:h", long_options, &option_index);
+
+ if (c == -1)
+ break;
+ switch(c)
+ {
+ case 1:
+ verbose = "n";
+ break;
+ case 2:
+ if (home)
+ free (home);
+ home = strdup(optarg);
+ break;
+ case 'i':
+ if (infile)
+ free(infile);
+ infile = strdup(optarg);
+ break;
+ case 'o':
+ if (outfile)
+ free(outfile);
+ outfile = strdup(optarg);
+ break;
+ case 'v':
+ verbose = "y";
+ break;
+ case 'V':
+ version_opt = 1;
+ break;
+ case 'l':
+ if (logdir)
+ free(logdir);
+ logdir = strdup(optarg);
+ break;
+ case 'L':
+ if (logfile)
+ free(logfile);
+ logfile = strdup(optarg);
+ break;
+ case 'c':
+ if (configuration)
+ free(configuration);
+ configuration = strdup(optarg);
+ break;
+ case 'h':
+ help_opt = 1;
+ break;
+ case 'm':
+ break;
+ default:
+ fprintf(stderr, "Invalid optin value, received code 0%o\n", c);
+ exit(1);
+ }
+ }
+ if (version_opt || help_opt)
+ {
+ if (version_opt)
+ print_version();
+ if (help_opt)
+ print_help();
+ exit(0);
+ }
+ setup_my_env(); /* Read $HOME/.pgxc_ctl */
+ build_pgxc_ctl_home(home);
+ if (infile)
+ reset_var_val(VAR_configFile, infile);
+ if (logdir)
+ reset_var_val(VAR_logDir, logdir);
+ if (logfile)
+ reset_var_val(VAR_logFile, logfile);
+ startLog(sval(VAR_logDir), sval(VAR_logFile));
+ prepare_pgxc_ctl_bash(pgxc_ctl_bash_path);
+ build_configuration_path(configuration);
+ read_configuration();
+ check_configuration();
+ /*
+ * Setop output
+ */
+ if (outfile)
+ {
+ elog(INFO, "Output file: %s\n", outfile);
+ if ((outF = fopen(outfile, "w")))
+ dup2(fileno(outF),2);
+ else
+ elog(ERROR, "ERROR: Cannot open output file %s, %s\n", outfile, strerror(errno));
+ }
+ else
+ outF = stdout;
+ /*
+ * Startup Message
+ */
+ elog(NOTICE, " ******** PGXC_CTL START ***************\n\n");
+ elog(NOTICE, "Current directory: %s\n", pgxc_ctl_home);
+ /*
+ * Setup input
+ */
+ if (infile)
+ {
+ elog(INFO, "Input file: %s\n", infile);
+ inF = fopen(infile, "r");
+ if(inF == NULL)
+ {
+ elog(ERROR, "ERROR: Cannot open input file %s, %s\n", infile, strerror(errno));
+ exit(1);
+ }
+ }
+ else
+ inF = stdin;
+ /*
+ * If we have remaing arguments, they will be treated as a command to do. Do this
+ * first, then handle the input from input file specified by -i option.
+ * If it is not found, then exit.
+ */
+#if 0
+ print_vars();
+#endif
+ if (optind < argc)
+ {
+ char orgBuf[MAXLINE + 1];
+ char wkBuf[MAXLINE + 1];
+ orgBuf[0] = 0;
+ while (optind < argc)
+ {
+ strncat(orgBuf, argv[optind++], MAXLINE);
+ strncat(orgBuf, " ", MAXLINE);
+ }
+ strncpy(wkBuf, orgBuf, MAXLINE);
+ do_singleLine(orgBuf, wkBuf);
+ if (infile)
+ do_command(inF, outF);
+ }
+ else
+ do_command(inF, outF);
+ exit(0);
+}
+
+static void print_version(void)
+{
+ printf("Pgxc_ctl %s\n", PG_VERSION);
+}
+
+static void print_help(void)
+{
+ printf(
+ "pgxc_ctl [option ...] [command]\n"
+ "option:\n"
+ " -c or --configuration conf_file: Specify configruration file.\n"
+ " -v or --verbose: Specify verbose output.\n"
+ " -V or --version: Print version and exit.\n"
+ " -l or --logdir log_directory: specifies what directory to write logs.\n"
+ " -L or --logfile log_file: Specifies log file.\n"
+ " --home home_direcotry: Specifies pgxc_ctl work director.\n"
+ " -i or --infile input_file: Specifies inptut file.\n"
+ " -o or --outfile output_file: Specifies output file.\n"
+ " -h or --help: Prints this message and exits.\n"
+ "For more deatils, refer to pgxc_ctl reference manual included in\n"
+ "postgres-xc reference manual.\n");
+}
diff --git a/contrib/pgxc_ctl/pgxc_ctl.h b/contrib/pgxc_ctl/pgxc_ctl.h
new file mode 100644
index 0000000000..2802da7f66
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl.h
@@ -0,0 +1,58 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgx_ctl.h
+ *
+ * Configuration module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef PGXC_CTL_H
+#define PGXC_CTL_H
+
+#include <stdio.h>
+
+/* Common macros */
+#define MAXPATH (512-1)
+#define PGXC_CTL_HOME "PGXC_CTL_HOME"
+#define HOME "HOME"
+#define PGXC_CTL_BASH "pgxc_ctl_bash"
+
+#define MAXLINE (8192-1)
+#define DEFAULT_CONF_FILE_NAME "pgxc_ctl.conf"
+
+#define pgxc_ctl_home_def "pgxc_ctl"
+
+#define MAXTOKEN (64-1)
+
+#define true 1
+#define false 0
+#define TRUE 1
+#define FALSE 0
+
+/* Global variable definition */
+extern char pgxc_ctl_home[];
+extern char pgxc_bash_path[];
+extern char pgxc_ctl_config_path[];
+extern char progname[];
+
+/* Important files */
+extern FILE *inF;
+extern FILE *outF;
+
+/* pg_ctl stop option */
+#define IMMEDIATE "immediate"
+#define FAST "fast"
+#define SMART "smart"
+
+/* My nodename default --> used to ping */
+#define DefaultName "pgxc_ctl"
+extern char *myName; /* pgxc_ctl name used to ping */
+#define DefaultDatabase "postgres"
+extern char *defaultDatabase;
+
+extern void print_simple_node_info(char *nodeName, char *port, char *dir,
+ char *extraConfig, char *specificExtraConfig);
+
+#endif /* PGXC_CTL_H */
diff --git a/contrib/pgxc_ctl/pgxc_ctl_bash_2 b/contrib/pgxc_ctl/pgxc_ctl_bash_2
new file mode 100755
index 0000000000..843fa7b25c
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_bash_2
@@ -0,0 +1,314 @@
+#!/usr/bin/env bash
+# Common variables ######################################################################
+xc_prompt='PGXC$ '
+interactive=n
+verbose=n
+progname=$0
+
+bin=pgxc_ctl # Just in case. Never touch this
+logfile=none
+
+#===========================================================
+#
+# Extract parsed configuration values
+#
+#===========================================================
+
+# $1 is variable name of the array to print
+function print_array
+{
+ echo -n $1 " "
+ eval echo '$'{$1[@]}
+}
+
+
+function print_values
+{
+ local i
+ declare -i i
+ local el
+
+ # Install Directory
+ echo pgxcInstallDir $pgxcInstallDir
+
+ # Overall
+ echo pgxcOwner $pgxcOwner
+ echo pgxcUser $pgxcUser
+ echo tmpDir $tmpDir
+ echo localTmpDir $localTmpDir
+ echo configBackup $configBackup
+ echo configBackupHost $configBackupHost
+ echo configBackupDir $configBackupDir
+ echo configBackupFile $configBackupFile
+
+ # GTM overall
+ echo gtmName $gtmName
+
+ # GTM master
+ echo gtmMasterServer $gtmMasterServer
+ echo gtmMasterPort $gtmMasterPort
+ echo gtmMasterDir $gtmMasterDir
+ echo gtmExtraConfig $gtmExtraConfig
+ echo gtmMasterSpecificExtraConfig $gtmMasterSpecificExtraConfig
+
+ # GTM slave
+ echo gtmSlave $gtmSlave
+ echo gtmSlaveName $gtmSlaveName
+ echo gtmSlaveServer $gtmSlaveServer
+ echo gtmSlavePort $gtmSlavePort
+ echo gtmSlaveDir $gtmSlaveDir
+ echo gtmSlaveSpecificExtraConfig $gtmSlaveSpecificExtraConfig
+
+ # GTM Proxy
+ echo gtmProxy $gtmProxy
+ print_array gtmProxyNames
+ print_array gtmProxyServers
+ print_array gtmProxyPorts
+ print_array gtmProxyDirs
+ echo gtmPxyExtraConfig $gtmPxyExtraConfig
+ print_array gtmPxySpecificExtraConfig
+
+ # Coordinators overall
+ print_array coordNames
+ print_array coordPorts
+ print_array poolerPorts
+ print_array coordPgHbaEntries
+
+ # Coordinators master
+ print_array coordMasterServers
+ print_array coordMasterDirs
+ print_array coordMaxWALSenders
+
+ # Coordinators slave
+ echo coordSlave $coordSlave
+ echo coordSlaveSync $coordSlaveSync
+ print_array coordSlaveServers
+ print_array coordSlavePorts
+ print_array coordSlavePoolerPorts
+ print_array coordSlaveDirs
+ print_array coordArchLogDirs
+
+ # Coordinator Configuration files
+ echo coordExtraConfig $coordExtraConfig
+ print_array coordSpecificExtraConfig
+ echo coordExtraPgHba $coordExtraPgHba
+ print_array coordSpecificExtraPgHba
+
+ # Coordinator Additional Slaves
+ echo coordAdditionalSlaves $coordAdditionalSlaves
+ if [ "$coordAdditionalSlaves" == "y" ]; then
+ print_array coordAdditionalSlaveSet
+ for ((i=0; i<${#coordAdditionalSlaveSet[@]}; i++)); do
+ el=${coordAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # Datanodes overall
+ echo primaryDatanode $primaryDatanode
+ print_array datanodeNames
+ print_array datanodePorts
+#ifdef XCP
+ print_array datanodePoolerPorts
+#endif
+ print_array datanodePgHbaEntries
+
+ # Datanodes masters
+ print_array datanodeMasterServers
+ print_array datanodeMasterDirs
+ print_array datanodeMasterWALDirs
+ print_array datanodeMaxWALSenders
+
+ # Datanodes slaves
+ echo datanodeSlave $datanodeSlave
+ echo datanodeSlaveSync $datanodeSlaveSync
+ print_array datanodeSlaveServers
+ print_array datanodeSlavePorts
+#ifdef XCP
+ print_array datanodeSlavePoolerPorts
+#endif
+ print_array datanodeSlaveDirs
+ print_array datanodeSlaveWALDirs
+ print_array datanodeArchLogDirs
+
+ # Datanode configuration files
+ echo datanodeExtraConfig $datanodeExtraConfig
+ print_array datanodeSpecificExtraConfig
+ echo datanodeExtraPgHba $datanodeExtraPgHba
+ print_array datanodeSpecificExtraPgHba
+
+ # Datanodes additional slaves
+ echo datanodeAdditionalSlaves $datanodeAdditionalSlaves
+ if [ "$datanodeAdditionalSlaves" == "y" ]; then
+ print_array datanodeAdditionalSlaveSet
+ for ((i=0; i<${#datanodeAdditionalSlaveSet[@]}; i++)); do
+ el=${datanodeAdditionalSlaveSet[$i]}
+ echo -n ${el}_Sync " "
+ eval echo '$'"$el"_Sync
+ print_array ${el}_Servers
+ print_array ${el}_Dirs
+ print_array ${el}_ArchLogDirs
+ done
+ fi
+
+ # WAL Archives
+ echo walArchive $walArchive
+ print_array walArchiveSet
+ if [ "$walArchive" == "y" ]; then
+ for ((i=0; i<${#walArchvieSet[@]}; i++)); do
+ print_array ${el}_source
+ echo -n ${el}_host
+ eval echo '$'"$el"_host
+ echo -n ${el}_backupdir
+ eval echo '$'"$el"_backupdir
+ done
+ fi
+}
+
+
+
+#============================================================
+#
+# Common functions
+#
+#============================================================
+
+# Optionally $1 will be $PGXC_CTL_HOME settings.
+function set_home
+{
+ if [ $# > 1 ]; then
+ echo "Invalid set_home function call"
+ return 1
+ fi
+ if [ $# == 1 ]; then
+ if [ -d $1 ]; then
+ pgxc_ctl_home=$1
+ else
+ eecho "set_home: $1 is not a directory."
+ return 1
+ fi
+ elif [ $PGXC_CTL_HOME != "" ]; then
+ if [ -d $PGXC_CTL_HOME ]; then
+ pgxc_ctl_home=$PGXC_CTL_HOME
+ else
+ eecho "set_home: env PGXC_CTL_HOME($PGXC_CTL_HOME) is not a directory."
+ return 1;
+ fi
+ fi
+ cd $pgxc_ctl_home;
+}
+
+###############################################################################
+#
+# EXECUTING SECTION
+#
+###############################################################################
+
+#=======================================================
+# Things to be done at first
+#=======================================================
+
+# Handle options
+progname=$0
+moretodo=y
+cmd_with_log=null
+#set_home
+if [ -f $pgxc_ctl_home/.pgxc_ctl_rc ]; then
+ source $pgxc_ctl_home/.pgxc_ctl_rc
+fi
+
+configFile=""
+
+while [ $moretodo == y ]; do
+ if [ $# -gt 0 ]; then
+ case $1 in
+ -v )
+ shift;
+ verbose=y;
+ continue;;
+ --verbose )
+ shift;
+ verbose=y;
+ continue;;
+ --silent )
+ verbose=n;
+ continue;;
+ -d ) # debug option
+ shift;
+ DEBUG=y;
+ continue;;
+ --debug )
+ shift;
+ DEBUG=y;
+ continue;;
+ -c ) # Configuraton file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no -c option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --configuration ) # Configuraion file
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no --configuration option value found
+ exit 1
+ else
+ configFile=$1
+ shift
+ fi;
+ continue;;
+ --home ) # PGXC_CTL_HOME
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: no pgxc_ctl_home specified
+ exit 1
+ else
+ pgxc_ctl_home=$1
+ cd $pgxc_ctl_home
+ shift
+ fi;
+ continue;;
+ --signature ) # Check signature
+ shift;
+ if [ $# -le 0 ]; then
+ echo ERROR: Signature does not match
+ exit 1
+ fi
+ if [ "$1" != "$signature" ]; then
+ echo ERROR: Signature does not match
+ exit 1
+ fi
+ shift
+ continue;;
+ * )
+ moretodo=n
+ continue;;
+ esac
+ else
+ moretodo=n
+ fi
+done
+
+echo $signature
+# Read configuration file --> Should be activated only when debug option is off
+
+if [ -f $pgxc_ctl_home/pgxc_ctl_rc ]; then
+ source $pgxc_ctl_home/pgxc_ctl_rc
+fi
+
+if [ "$configFile" != "" ] && [ -f "$configFile" ]; then
+ source $configFile
+fi
+# Log option can be overriden by command-line option
+
+print_values
+
+
diff --git a/contrib/pgxc_ctl/pgxc_ctl_conf_part_empty b/contrib/pgxc_ctl/pgxc_ctl_conf_part_empty
new file mode 100644
index 0000000000..4cce952c22
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_conf_part_empty
@@ -0,0 +1,267 @@
+#!/usr/bin/env bash
+#
+# Postgres-XC Configuration file for pgxc_ctl utility.
+#
+# Configuration file can be specified as -c option from pgxc_ctl command. Default is
+# $PGXC_CTL_HOME/pgxc_ctl.org.
+#
+# This is bash script so you can make any addition for your convenience to configure
+# your Postgres-XC cluster.
+#
+# Please understand that pgxc_ctl provides only a subset of configuration which pgxc_ctl
+# provide. Here's several several assumptions/restrictions pgxc_ctl depends on.
+#
+# 1) All the resources of pgxc nodes has to be owned by the same user. Same user means
+# user with the same user name. User ID may be different from server to server.
+# This must be specified as a variable $pgxcOwner.
+#
+# 2) All the servers must be reacheable via ssh without password. It is highly recommended
+# to setup key-based authentication among all the servers.
+#
+# 3) All the databases in coordinator/datanode has at least one same superuser. Pgxc_ctl
+# uses this user to connect to coordinators and datanodes. Again, no password should
+# be used to connect. You have many options to do this, pg_hba.conf, pg_ident.conf and
+# others. Pgxc_ctl provides a way to configure pg_hba.conf but not pg_ident.conf. This
+# will be implemented in the later releases.
+#
+# 4) Gtm master and slave can have different port to listen, while coordinator and datanode
+# slave should be assigned the same port number as master.
+#
+# 5) Port nuber of a coordinator slave must be the same as its master.
+#
+# 6) Master and slave are connected using synchronous replication. Asynchronous replication
+# have slight (almost none) chance to bring total cluster into inconsistent state.
+# This chance is very low and may be negligible. Support of asynchronous replication
+# may be supported in the later release.
+#
+# 7) Each coordinator and datanode can have only one slave each. Cascaded replication and
+# multiple slave are not supported in the current pgxc_ctl.
+#
+# 8) Killing nodes may end up with IPC resource leak, such as semafor and shared memory.
+# Only listening port (socket) will be cleaned with clean command.
+#
+# 9) Backup and restore are not supported in pgxc_ctl at present. This is a big task and
+# may need considerable resource.
+#
+#========================================================================================
+#
+#
+# pgxcInstallDir variable is needed if you invoke "deploy" command from pgxc_ctl utility.
+# If don't you don't need this variable.
+pgxcInstallDir=$HOME/pgxc
+#---- OVERALL -----------------------------------------------------------------------------
+#
+pgxcOwner=$USER # owner of the Postgres-XC databaseo cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+configBackup=n # If you want config file backup, specify y to this value.
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$HOME/pgxc # Backup directory
+configBackupFile=pgxc_ctl.bak # Backup file name --> Need to synchronize when original changed.
+
+dataDirRoot=$HOME/DATA/pgxl/nodes
+
+#---- GTM ------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=()
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=()
+gtmMasterPort=()
+gtmMasterDir=()
+
+#---- Configuration ---
+gtmExtraConfig=() # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=() # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=n # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveName=()
+gtmSlaveServer=() # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=() # Not used if you don't configure GTM slave.
+gtmSlaveDir=() # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=() # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=()
+
+#---- Overall -------
+gtmProxy=() # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=() # No used if it is not configured
+gtmProxyServers=() # Specify none if you dont' configure it.
+gtmProxyPorts=() # Not used if it is not configured.
+gtmProxyDirs=() # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy. Coordinator section has an example.
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$dataDirRoot/coord_master
+coordSlaveDir=$HOME/coord_slave
+coordArchLogDir=$HOME/coord_archlog
+
+#---- Overall ------------
+coordNames=() # Master and slave use the same name
+coordPorts=() # Master server listening ports
+poolerPorts=() # Master pooler ports
+coordPgHbaEntries=(::1/128) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+#coordPgHbaEntries=(127.0.0.1/32) # Same as above but for IPv4 connections
+
+#---- Master -------------
+coordMasterServers=() # none means this master is not available
+coordMasterDirs=()
+coordMaxWALsender=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=()
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=n # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=n # Specify to connect with synchronized mode.
+coordSlaveServers=() # none means this slave is not available
+coordSlavePorts=() # coordinator slave listening ports
+coordSlavePoolerPorts=() # coordinator slave pooler ports
+coordSlaveDirs=()
+coordArchLogDirs=()
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=coordExtraConfig # Extra configuration file for coordinators.
+ # This file will be added to all the coordinators'
+ # postgresql.conf
+# Pleae note that the following sets up minimum parameters which you may want to change.
+# You can put your postgresql.conf lines here.
+cat > $coordExtraConfig <<EOF
+#================================================
+# Added to all the coordinator postgresql.conf
+# Original: $coordExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+hot_standby = off
+EOF
+
+# Additional Configuration file for specific coordinator master.
+# You can define each setting by similar means as above.
+coordSpecificExtraConfig=()
+coordSpecificExtraPgHba=()
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$dataDirRoot/dn_master
+datanodeSlaveDir=$dataDirRoot/dn_slave
+datanodeArchLogDir=$dataDirRoot/datanode_archlog
+
+#---- Overall ---------------
+primaryDatanode= # Primary Node.
+datanodeNames=()
+datanodePorts=() # Master and slave use the same port!
+datanodePoolerPorts=() # Master and slave use the same port!
+datanodePgHbaEntries=(::1/128) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+#datanodePgHbaEntries=(127.0.0.1/32) # Same as above but for IPv4 connections
+
+#---- Master ----------------
+datanodeMasterServers=() # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=()
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWALSenders=()
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=n # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=() # value none means this slave is not available
+datanodeSlavePorts=() # Master and slave use the same port!
+datanodeSlavePoolerPorts=() # Master and slave use the same port!
+#datanodeSlaveSync=y # If datanode slave is connected in synchronized mode
+datanodeSlaveDirs=()
+datanodeArchLogDirs=()
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=datanodeExtraConfig
+cat > $datanodeExtraConfig <<EOF
+#================================================
+# Added to all the datanode postgresql.conf
+# Original: $datanodeExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+hot_standby = off
+EOF
+# Additional Configuration file for specific datanode master.
+# You can define each setting by similar means as above.
+datanodeSpecificExtraConfig=()
+datanodeSpecificExtraPgHba=()
diff --git a/contrib/pgxc_ctl/pgxc_ctl_conf_part_full b/contrib/pgxc_ctl/pgxc_ctl_conf_part_full
new file mode 100755
index 0000000000..a97e6b296c
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_conf_part_full
@@ -0,0 +1,326 @@
+#!/usr/bin/env bash
+#
+# Postgres-XC Configuration file for pgxc_ctl utility.
+#
+# Configuration file can be specified as -c option from pgxc_ctl command. Default is
+# $PGXC_CTL_HOME/pgxc_ctl.org.
+#
+# This is bash script so you can make any addition for your convenience to configure
+# your Postgres-XC cluster.
+#
+# Please understand that pgxc_ctl provides only a subset of configuration which pgxc_ctl
+# provide. Here's several several assumptions/restrictions pgxc_ctl depends on.
+#
+# 1) All the resources of pgxc nodes has to be owned by the same user. Same user means
+# user with the same user name. User ID may be different from server to server.
+# This must be specified as a variable $pgxcOwner.
+#
+# 2) All the servers must be reacheable via ssh without password. It is highly recommended
+# to setup key-based authentication among all the servers.
+#
+# 3) All the databases in coordinator/datanode has at least one same superuser. Pgxc_ctl
+# uses this user to connect to coordinators and datanodes. Again, no password should
+# be used to connect. You have many options to do this, pg_hba.conf, pg_ident.conf and
+# others. Pgxc_ctl provides a way to configure pg_hba.conf but not pg_ident.conf. This
+# will be implemented in the later releases.
+#
+# 4) Gtm master and slave can have different port to listen, while coordinator and datanode
+# slave should be assigned the same port number as master.
+#
+# 5) Port nuber of a coordinator slave must be the same as its master.
+#
+# 6) Master and slave are connected using synchronous replication. Asynchronous replication
+# have slight (almost none) chance to bring total cluster into inconsistent state.
+# This chance is very low and may be negligible. Support of asynchronous replication
+# may be supported in the later release.
+#
+# 7) Each coordinator and datanode can have only one slave each. Cascaded replication and
+# multiple slave are not supported in the current pgxc_ctl.
+#
+# 8) Killing nodes may end up with IPC resource leak, such as semafor and shared memory.
+# Only listening port (socket) will be cleaned with clean command.
+#
+# 9) Backup and restore are not supported in pgxc_ctl at present. This is a big task and
+# may need considerable resource.
+#
+#========================================================================================
+#
+#
+# pgxcInstallDir variable is needed if you invoke "deploy" command from pgxc_ctl utility.
+# If don't you don't need this variable.
+pgxcInstallDir=$HOME/pgxc
+#---- OVERALL -----------------------------------------------------------------------------
+#
+pgxcOwner=$USER # owner of the Postgres-XC databaseo cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+configBackup=n # If you want config file backup, specify y to this value.
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$HOME/pgxc # Backup directory
+configBackupFile=pgxc_ctl.bak # Backup file name --> Need to synchronize when original changed.
+
+#---- GTM ------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmName=gtm
+gtmMasterServer=node13
+gtmMasterPort=20001
+gtmMasterDir=$HOME/pgxc/nodes/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=y # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveName=gtmSlave
+gtmSlaveServer=node12 # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20001 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$HOME/pgxc/nodes/gtm # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$HOME/pgxc/nodes/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1 gtm_pxy2 gtm_pxy3 gtm_pxy4) # No used if it is not configured
+gtmProxyServers=(node06 node07 node08 node09) # Specify none if you dont' configure it.
+gtmProxyPorts=(20001 20001 20001 20001) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir $gtmProxyDir $gtmProxyDir $gtmProxyDir) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy. Coordinator section has an example.
+gtmPxySpecificExtraConfig=(none none none none)
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$HOME/pgxc/nodes/coord
+coordSlaveDir=$HOME/pgxc/nodes/coord_slave
+coordArchLogDir=$HOME/pgxc/nodes/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2 coord3 coord4) # Master and slave use the same name
+coordPorts=(20004 20005 20004 20005) # Master ports
+poolerPorts=(20010 20011 20010 20011) # Master pooler ports
+coordPgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using coordExtraPgHba
+# and/or coordSpecificExtraPgHba variables.
+#coordPgHbaEntries=(::1/128) # Same as above but for IPv6 addresses
+
+#---- Master -------------
+coordMasterServers=(node06 node07 node08 node09) # none means this master is not available
+coordMasterDirs=($coordMasterDir $coordMasterDir $coordMasterDir $coordMasterDir)
+coordMaxWALsernder=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder $coordMaxWALsernder)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=y # Specify to connect with synchronized mode.
+coordSlaveServers=(node07 node08 node09 node06) # none means this slave is not available
+coordSlavePorts=(20004 20005 20004 20005) # Master ports
+coordSlavePoolerPorts=(20010 20011 20010 20011) # Master pooler ports
+coordSlaveDirs=($coordSlaveDir $coordSlaveDir $coordSlaveDir $coordSlaveDir)
+coordArchLogDirs=($coordArchLogDir $coordArchLogDir $coordArchLogDir $coordArchLogDir)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=coordExtraConfig # Extra configuration file for coordinators.
+ # This file will be added to all the coordinators'
+ # postgresql.conf
+# Pleae note that the following sets up minimum parameters which you may want to change.
+# You can put your postgresql.conf lines here.
+cat > $coordExtraConfig <<EOF
+#================================================
+# Added to all the coordinator postgresql.conf
+# Original: $coordExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+EOF
+
+# Additional Configuration file for specific coordinator master.
+# You can define each setting by similar means as above.
+coordSpecificExtraConfig=(none none none none)
+coordExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the coordinators' pg_hba.conf
+coordSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+#
+# Please note that this section is just a suggestion how we extend the configuration for
+# multiple and cascaded replication. They're not used in the current version.
+#
+coordAdditionalSlaves=n # Additional slave can be specified as follows: where you
+coordAdditionalSlaveSet=(cad1) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+cad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+cad1_Servers=(node08 node09 node06 node07) # Hosts
+cad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+cad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+cad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+cad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$HOME/pgxc/nodes/dn_master
+datanodeSlaveDir=$HOME/pgxc/nodes/dn_slave
+datanodeArchLogDir=$HOME/pgxc/nodes/datanode_archlog
+
+#---- Overall ---------------
+#primaryDatanode=datanode1 # Primary Node.
+# At present, xc has a priblem to issue ALTER NODE against the primay node. Until it is fixed, the test will be done
+# without this feature.
+primaryDatanode=datanode1 # Primary Node.
+datanodeNames=(datanode1 datanode2 datanode3 datanode4)
+datanodePorts=(20008 20009 20008 20009) # Master ports
+#ifdef XCP
+datanodePoolerPorts=(20012 20013 20012 20013) # Master pooler ports
+#endif
+datanodePgHbaEntries=(192.168.1.0/24) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+# Note: The above parameter is extracted as "host all all 0.0.0.0/0 trust". If you don't want
+# such setups, specify the value () to this variable and suplly what you want using datanodeExtraPgHba
+# and/or datanodeSpecificExtraPgHba variables.
+#datanodePgHbaEntries=(::1/128) # Same as above but for IPv6 addresses
+
+#---- Master ----------------
+datanodeMasterServers=(node06 node07 node08 node09) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir $datanodeMasterDir $datanodeMasterDir $datanodeMasterDir)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWALSenders=($datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+datanodeSlave=y # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+datanodeSlaveServers=(node07 node08 node09 node06) # value none means this slave is not available
+datanodeSlavePorts=(20008 20009 20008 20009) # value none means this slave is not available
+datanodeSlavePoolerPorts=(20012 20013 20012 20013) # value none means this slave is not available
+datanodeSlaveSync=y # If datanode slave is connected in synchronized mode
+datanodeSlaveDirs=($datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir $datanodeSlaveDir)
+datanodeArchLogDirs=( $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir $datanodeArchLogDir )
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=none # Extra configuration file for datanodes. This file will be added to all the
+ # datanodes' postgresql.conf
+datanodeSpecificExtraConfig=(none none none none)
+datanodeExtraPgHba=none # Extra entry for pg_hba.conf. This file will be added to all the datanodes' postgresql.conf
+datanodeSpecificExtraPgHba=(none none none none)
+
+#----- Additional Slaves -----
+datanodeAdditionalSlaves=n # Additional slave can be specified as follows: where you
+# datanodeAdditionalSlaveSet=(dad1 dad2) # Each specifies set of slaves. This case, two set of slaves are
+ # configured
+# dad1_Sync=n # All the slaves at "cad1" are connected with asynchronous mode.
+ # If not, specify "y"
+ # The following lines specifies detailed configuration for each
+ # slave tag, cad1. You can define cad2 similarly.
+# dad1_Servers=(node08 node09 node06 node07) # Hosts
+# dad1_dir=$HOME/pgxc/nodes/coord_slave_cad1
+# dad1_Dirs=($cad1_dir $cad1_dir $cad1_dir $cad1_dir)
+# dad1_ArchLogDir=$HOME/pgxc/nodes/coord_archlog_cad1
+# dad1_ArchLogDirs=($cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir $cad1_ArchLogDir)
+
+#---- WAL archives -------------------------------------------------------------------------------------------------
+walArchive=n # If you'd like to configure WAL archive, edit this section.
+ # Pgxc_ctl assumes that if you configure WAL archive, you configure it
+ # for all the coordinators and datanodes.
+ # Default is "no". Please specify "y" here to turn it on.
+#
+# End of Configuration Section
+#
+#==========================================================================================================================
+
+#========================================================================================================================
+# The following is for extension. Just demonstrate how to write such extension. There's no code
+# which takes care of them so please ignore the following lines. They are simply ignored by pgxc_ctl.
+# No side effects.
+#=============<< Beginning of future extension demonistration >> ========================================================
+# You can setup more than one backup set for various purposes, such as disaster recovery.
+walArchiveSet=(war1 war2)
+war1_source=(master) # you can specify master, slave or ano other additional slaves as a source of WAL archive.
+ # Default is the master
+wal1_source=(slave)
+wal1_source=(additiona_coordinator_slave_set additional_datanode_slave_set)
+war1_host=node10 # All the nodes are backed up at the same host for a given archive set
+war1_backupdir=$HOME/pgxc/backup_war1
+wal2_source=(master)
+war2_host=node11
+war2_backupdir=$HOME/pgxc/backup_war2
+#=============<< End of future extension demonistration >> ========================================================
diff --git a/contrib/pgxc_ctl/pgxc_ctl_conf_part_minimal b/contrib/pgxc_ctl/pgxc_ctl_conf_part_minimal
new file mode 100644
index 0000000000..cfd7d28d64
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_conf_part_minimal
@@ -0,0 +1,269 @@
+#!/usr/bin/env bash
+#
+# Postgres-XC Configuration file for pgxc_ctl utility.
+#
+# Configuration file can be specified as -c option from pgxc_ctl command. Default is
+# $PGXC_CTL_HOME/pgxc_ctl.org.
+#
+# This is bash script so you can make any addition for your convenience to configure
+# your Postgres-XC cluster.
+#
+# Please understand that pgxc_ctl provides only a subset of configuration which pgxc_ctl
+# provide. Here's several several assumptions/restrictions pgxc_ctl depends on.
+#
+# 1) All the resources of pgxc nodes has to be owned by the same user. Same user means
+# user with the same user name. User ID may be different from server to server.
+# This must be specified as a variable $pgxcOwner.
+#
+# 2) All the servers must be reacheable via ssh without password. It is highly recommended
+# to setup key-based authentication among all the servers.
+#
+# 3) All the databases in coordinator/datanode has at least one same superuser. Pgxc_ctl
+# uses this user to connect to coordinators and datanodes. Again, no password should
+# be used to connect. You have many options to do this, pg_hba.conf, pg_ident.conf and
+# others. Pgxc_ctl provides a way to configure pg_hba.conf but not pg_ident.conf. This
+# will be implemented in the later releases.
+#
+# 4) Gtm master and slave can have different port to listen, while coordinator and datanode
+# slave should be assigned the same port number as master.
+#
+# 5) Port nuber of a coordinator slave must be the same as its master.
+#
+# 6) Master and slave are connected using synchronous replication. Asynchronous replication
+# have slight (almost none) chance to bring total cluster into inconsistent state.
+# This chance is very low and may be negligible. Support of asynchronous replication
+# may be supported in the later release.
+#
+# 7) Each coordinator and datanode can have only one slave each. Cascaded replication and
+# multiple slave are not supported in the current pgxc_ctl.
+#
+# 8) Killing nodes may end up with IPC resource leak, such as semafor and shared memory.
+# Only listening port (socket) will be cleaned with clean command.
+#
+# 9) Backup and restore are not supported in pgxc_ctl at present. This is a big task and
+# may need considerable resource.
+#
+#========================================================================================
+#
+#
+# pgxcInstallDir variable is needed if you invoke "deploy" command from pgxc_ctl utility.
+# If don't you don't need this variable.
+pgxcInstallDir=$HOME/pgxc
+#---- OVERALL -----------------------------------------------------------------------------
+#
+pgxcOwner=$USER # owner of the Postgres-XC databaseo cluster. Here, we use this
+ # both as linus user and database user. This must be
+ # the super user of each coordinator and datanode.
+pgxcUser=$pgxcOwner # OS user of Postgres-XC owner
+
+tmpDir=/tmp # temporary dir used in XC servers
+localTmpDir=$tmpDir # temporary dir used here locally
+
+configBackup=n # If you want config file backup, specify y to this value.
+configBackupHost=pgxc-linker # host to backup config file
+configBackupDir=$HOME/pgxc # Backup directory
+configBackupFile=pgxc_ctl.bak # Backup file name --> Need to synchronize when original changed.
+
+dataDirRoot=$HOME/DATA/pgxl/nodes
+
+#---- GTM ------------------------------------------------------------------------------------
+
+# GTM is mandatory. You must have at least (and only) one GTM master in your Postgres-XC cluster.
+# If GTM crashes and you need to reconfigure it, you can do it by pgxc_update_gtm command to update
+# GTM master with others. Of course, we provide pgxc_remove_gtm command to remove it. This command
+# will not stop the current GTM. It is up to the operator.
+
+#---- Overall -------
+gtmName=gtm
+
+#---- GTM Master -----------------------------------------------
+
+#---- Overall ----
+gtmMasterServer=localhost
+gtmMasterPort=20001
+gtmMasterDir=$dataDirRoot/gtm
+
+#---- Configuration ---
+gtmExtraConfig=none # Will be added gtm.conf for both Master and Slave (done at initilization only)
+gtmMasterSpecificExtraConfig=none # Will be added to Master's gtm.conf (done at initialization only)
+
+#---- GTM Slave -----------------------------------------------
+
+# Because GTM is a key component to maintain database consistency, you may want to configure GTM slave
+# for backup.
+
+#---- Overall ------
+gtmSlave=n # Specify y if you configure GTM Slave. Otherwise, GTM slave will not be configured and
+ # all the following variables will be reset.
+gtmSlaveName=gtmSlave
+gtmSlaveServer=localhost # value none means GTM slave is not available. Give none if you don't configure GTM Slave.
+gtmSlavePort=20002 # Not used if you don't configure GTM slave.
+gtmSlaveDir=$dataDirRoot/gtm_slv # Not used if you don't configure GTM slave.
+# Please note that when you have GTM failover, then there will be no slave available until you configure the slave
+# again. (pgxc_add_gtm_slave function will handle it)
+
+#---- Configuration ----
+#gtmSlaveSpecificExtraConfig=none # Will be added to Slave's gtm.conf (done at initialization only)
+
+#---- GTM Proxy -------------------------------------------------------------------------------------------------------
+# GTM proxy will be selected based upon which server each component runs on.
+# When fails over to the slave, the slave inherits its master's gtm proxy. It should be
+# reconfigured based upon the new location.
+#
+# To do so, slave should be restarted. So pg_ctl promote -> (edit postgresql.conf and recovery.conf) -> pg_ctl restart
+#
+# You don't have to configure GTM Proxy if you dont' configure GTM slave or you are happy if every component connects
+# to GTM Master directly. If you configure GTL slave, you must configure GTM proxy too.
+
+#---- Shortcuts ------
+gtmProxyDir=$dataDirRoot/gtm_pxy
+
+#---- Overall -------
+gtmProxy=y # Specify y if you conifugre at least one GTM proxy. You may not configure gtm proxies
+ # only when you dont' configure GTM slaves.
+ # If you specify this value not to y, the following parameters will be set to default empty values.
+ # If we find there're no valid Proxy server names (means, every servers are specified
+ # as none), then gtmProxy value will be set to "n" and all the entries will be set to
+ # empty values.
+gtmProxyNames=(gtm_pxy1) # No used if it is not configured
+gtmProxyServers=(localhost) # Specify none if you dont' configure it.
+gtmProxyPorts=(20101) # Not used if it is not configured.
+gtmProxyDirs=($gtmProxyDir.1) # Not used if it is not configured.
+
+#---- Configuration ----
+gtmPxyExtraConfig=none # Extra configuration parameter for gtm_proxy. Coordinator section has an example.
+
+#---- Coordinators ----------------------------------------------------------------------------------------------------
+
+#---- shortcuts ----------
+coordMasterDir=$dataDirRoot/coord_master
+coordSlaveDir=$HOME/coord_slave
+coordArchLogDir=$HOME/coord_archlog
+
+#---- Overall ------------
+coordNames=(coord1 coord2) # Master and slave use the same name
+coordPorts=(30001 30002) # Master server listening ports
+poolerPorts=(30011 30012) # Master pooler ports
+coordPgHbaEntries=(::1/128) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This entry allows only $pgxcOwner to connect.
+ # If you'd like to setup another connection, you should
+ # supply these entries through files specified below.
+#coordPgHbaEntries=(127.0.0.1/32) # Same as above but for IPv4 connections
+
+#---- Master -------------
+coordMasterServers=(localhost localhost) # none means this master is not available
+coordMasterDirs=($coordMasterDir.1 $coordMasterDir.2)
+coordMaxWALsender=5 # max_wal_senders: needed to configure slave. If zero value is specified,
+ # it is expected to supply this parameter explicitly by external files
+ # specified in the following. If you don't configure slaves, leave this value to zero.
+coordMaxWALSenders=($coordMaxWALsender $coordMaxWALsender)
+ # max_wal_senders configuration for each coordinator.
+
+#---- Slave -------------
+coordSlave=n # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then coordSlave value will be set to n and all the following values will be set to
+ # empty values.
+coordSlaveSync=y # Specify to connect with synchronized mode.
+coordSlaveServers=(localhost localhost) # none means this slave is not available
+coordSlavePorts=(30101 30102) # coordinator slave listening ports
+coordSlavePoolerPorts=(30111 30112) # coordinator slave pooler ports
+coordSlaveDirs=($coordSlaveDir.1 $coordSlaveDir.2)
+coordArchLogDirs=($coordArchLogDir.1 $coordArchLogDir.2)
+
+#---- Configuration files---
+# Need these when you'd like setup specific non-default configuration
+# These files will go to corresponding files for the master.
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries
+# Or you may supply these files manually.
+coordExtraConfig=coordExtraConfig # Extra configuration file for coordinators.
+ # This file will be added to all the coordinators'
+ # postgresql.conf
+# Pleae note that the following sets up minimum parameters which you may want to change.
+# You can put your postgresql.conf lines here.
+cat > $coordExtraConfig <<EOF
+#================================================
+# Added to all the coordinator postgresql.conf
+# Original: $coordExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+hot_standby = off
+EOF
+
+# Additional Configuration file for specific coordinator master.
+# You can define each setting by similar means as above.
+coordSpecificExtraConfig=(none none)
+coordSpecificExtraPgHba=(none none)
+
+#---- Datanodes -------------------------------------------------------------------------------------------------------
+
+#---- Shortcuts --------------
+datanodeMasterDir=$dataDirRoot/dn_master
+datanodeSlaveDir=$dataDirRoot/dn_slave
+datanodeArchLogDir=$dataDirRoot/datanode_archlog
+
+#---- Overall ---------------
+primaryDatanode=datanode_1 # Primary Node.
+datanodeNames=(datanode_1 datanode_2)
+datanodePorts=(40001 40002) # Master and slave use the same port!
+#ifdef XCP
+datanodePoolerPorts=(40011 40012) # Master and slave use the same port!
+#endif
+datanodePgHbaEntries=(::1/128) # Assumes that all the coordinator (master/slave) accepts
+ # the same connection
+ # This list sets up pg_hba.conf for $pgxcOwner user.
+ # If you'd like to setup other entries, supply them
+ # through extra configuration files specified below.
+#datanodePgHbaEntries=(127.0.0.1/32) # Same as above but for IPv4 connections
+
+#---- Master ----------------
+datanodeMasterServers=(localhost localhost) # none means this master is not available.
+ # This means that there should be the master but is down.
+ # The cluster is not operational until the master is
+ # recovered and ready to run.
+datanodeMasterDirs=($datanodeMasterDir.1 $datanodeMasterDir.2)
+datanodeMaxWalSender=5 # max_wal_senders: needed to configure slave. If zero value is
+ # specified, it is expected this parameter is explicitly supplied
+ # by external configuration files.
+ # If you don't configure slaves, leave this value zero.
+datanodeMaxWALSenders=($datanodeMaxWalSender $datanodeMaxWalSender)
+ # max_wal_senders configuration for each datanode
+
+#---- Slave -----------------
+#datanodeSlave=n # Specify y if you configure at least one coordiantor slave. Otherwise, the following
+ # configuration parameters will be set to empty values.
+ # If no effective server names are found (that is, every servers are specified as none),
+ # then datanodeSlave value will be set to n and all the following values will be set to
+ # empty values.
+#datanodeSlaveServers=(localhost localhost) # value none means this slave is not available
+#datanodeSlavePorts=(40101 40102) # Master and slave use the same port!
+#datanodeSlavePoolerPorts=(40111 40112) # Master and slave use the same port!
+#datanodeSlaveSync=y # If datanode slave is connected in synchronized mode
+#datanodeSlaveDirs=($datanodeSlaveDir.1 $datanodeSlaveDir.2)
+#datanodeArchLogDirs=( $datanodeArchLogDir.1 $datanodeArchLogDir.2)
+
+# ---- Configuration files ---
+# You may supply your bash script to setup extra config lines and extra pg_hba.conf entries here.
+# These files will go to corresponding files for the master.
+# Or you may supply these files manually.
+datanodeExtraConfig=datanodeExtraConfig
+cat > $datanodeExtraConfig <<EOF
+#================================================
+# Added to all the datanode postgresql.conf
+# Original: $datanodeExtraConfig
+log_destination = 'stderr'
+logging_collector = on
+log_directory = 'pg_log'
+listen_addresses = '*'
+max_connections = 100
+hot_standby = off
+EOF
+# Additional Configuration file for specific datanode master.
+# You can define each setting by similar means as above.
+datanodeSpecificExtraConfig=(none none)
+datanodeSpecificExtraPgHba=(none none)
diff --git a/contrib/pgxc_ctl/pgxc_ctl_log.c b/contrib/pgxc_ctl/pgxc_ctl_log.c
new file mode 100644
index 0000000000..8934dbfa3f
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_log.c
@@ -0,0 +1,333 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl_log.c
+ *
+ * Logging module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * To allow mutiple pgxc_ctl to run in parallel and write a log to the same file,
+ * this module uses fctl to lock log I/O. You can lock/unlock in stack. Anyway
+ * actual lock will be captured/released at the bottom level of this stack.
+ * If you'd like to have a block of the logs to be in a single block, not interrupted
+ * bo other pgxc_ctl log, you should be careful to acquire the lock and release it
+ * reasonablly.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+#include "varnames.h"
+#include "variables.h"
+#include "config.h"
+#include "utils.h"
+
+FILE *logFile = NULL;
+char logFileName[MAXPATH+1];
+static char *pgxcCtlGetTime(void);
+static int lockStack = 0;
+#define lockStackLimit 8
+
+int logMsgLevel = INFO;
+int printMsgLevel = WARNING;
+int printLocation = FALSE;
+int logLocation = FALSE;
+
+
+/*
+ * Path is NULL if name is effective.
+ * Path is valid if name is NULL
+ */
+static void set_msgLogLevel(void)
+{
+ if (sval(VAR_logMessage) == NULL)
+ logMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_logMessage), "panic") == 0)
+ logMsgLevel = PANIC;
+ else if (strcasecmp(sval(VAR_logMessage), "error") == 0)
+ logMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_logMessage), "warning") == 0)
+ logMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_logMessage), "notice") == 0)
+ logMsgLevel = NOTICE;
+ else if (strcasecmp(sval(VAR_logMessage), "info") == 0)
+ logMsgLevel = INFO;
+ else if (strcasecmp(sval(VAR_logMessage), "debug1") == 0)
+ logMsgLevel = DEBUG1;
+ else if (strcasecmp(sval(VAR_logMessage), "debug2") == 0)
+ logMsgLevel = DEBUG2;
+ else if (strcasecmp(sval(VAR_logMessage), "debug3") == 0)
+ logMsgLevel = DEBUG3;
+ else
+ logMsgLevel = INFO;
+}
+
+static void set_printLogLevel(void)
+{
+ if (sval(VAR_printMessage) == NULL)
+ printMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_printMessage), "panic") == 0)
+ printMsgLevel = PANIC;
+ else if (strcasecmp(sval(VAR_printMessage), "error") == 0)
+ printMsgLevel = ERROR;
+ else if (strcasecmp(sval(VAR_printMessage), "warning") == 0)
+ printMsgLevel = WARNING;
+ else if (strcasecmp(sval(VAR_printMessage), "notice") == 0)
+ printMsgLevel = NOTICE;
+ else if (strcasecmp(sval(VAR_printMessage), "info") == 0)
+ printMsgLevel = INFO;
+ else if (strcasecmp(sval(VAR_printMessage), "debug1") == 0)
+ printMsgLevel = DEBUG1;
+ else if (strcasecmp(sval(VAR_printMessage), "debug2") == 0)
+ printMsgLevel = DEBUG2;
+ else if (strcasecmp(sval(VAR_printMessage), "debug3") == 0)
+ printMsgLevel = DEBUG3;
+ else
+ printMsgLevel = WARNING;
+}
+
+void initLog(char *path, char *name)
+{
+ if(logFile)
+ return;
+ if(name)
+ strncat(logFileName, name, MAXPATH);
+ else
+ snprintf(logFileName, MAXPATH, "%s/%d_pgxc_ctl.log", path, getpid());
+ if ((logFile = fopen(logFileName, "a")) == NULL)
+ fprintf(stderr, "Could not open log file %s, %s\n", logFileName, strerror(errno));
+ /* Setup log/print message level */
+ set_msgLogLevel();
+ set_printLogLevel();
+ printLocation = (isVarYes(VAR_printLocation)) ? TRUE : FALSE;
+ logLocation = (isVarYes(VAR_logLocation)) ? TRUE : FALSE;
+ lockStack = 0;
+}
+
+void closeLog()
+{
+ fclose(logFile);
+ logFile = NULL;
+}
+
+static char *fname;
+static char *funcname;
+static int lineno;
+
+void elog_start(const char *file, const char *func, int line)
+{
+ fname = Strdup(file);
+ funcname = Strdup(func);
+ lineno = line;
+}
+
+static void clean_location(void)
+{
+ freeAndReset(fname);
+ freeAndReset(funcname);
+ lineno = -1;
+}
+
+
+static void elogMsgRaw0(int level, const char *msg, int flag)
+{
+ if (logFile && level >= logMsgLevel)
+ {
+ if (logLocation && flag)
+ fprintf(logFile, "%s(%d):%s %s:%s(%d) %s", progname, getpid(), pgxcCtlGetTime(),
+ fname, funcname, lineno, msg);
+ else
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ }
+ if (level >= printMsgLevel)
+ {
+ if (printLocation && flag)
+ fprintf(((outF) ? outF : stderr), "%s:%s(%d) %s", fname, funcname, lineno, msg);
+ else
+ fputs(msg, (outF) ? outF : stderr);
+ fflush((outF) ? outF : stderr);
+ }
+ clean_location();
+}
+
+void elogMsgRaw(int level, const char *msg)
+{
+ lockLogFile();
+ elogMsgRaw0(level, msg, TRUE);
+ unlockLogFile();
+}
+
+void elogFinish(int level, const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ lockLogFile();
+ if ((level >= logMsgLevel) || (level >= printMsgLevel))
+ {
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ elogMsgRaw(level, msg);
+ }
+ unlockLogFile();
+}
+
+void elogFileRaw(int level, char *path)
+{
+ FILE *f;
+ char s[MAXLINE+1];
+
+ lockLogFile();
+ if ((f = fopen(path, "r")))
+ {
+ while(fgets(s, MAXLINE, f))
+ elogMsgRaw0(level, s, FALSE);
+ fclose(f);
+ }
+ else
+ elog(ERROR, "ERROR: Cannot open \"%s\" for read, %s\n", path, strerror(errno));
+ unlockLogFile();
+}
+
+static char timebuf[MAXTOKEN+1];
+
+/*
+ * Please note that this routine is not reentrant
+ */
+static char *pgxcCtlGetTime(void)
+{
+ struct tm *tm_s;
+ time_t now;
+
+ now = time(NULL);
+ tm_s = localtime(&now);
+/* tm_s = gmtime(&now); */
+
+ snprintf(timebuf, MAXTOKEN, "%02d%02d%02d%02d%02d_%02d",
+ ((tm_s->tm_year+1900) >= 2000) ? (tm_s->tm_year + (1900 - 2000)) : tm_s->tm_year,
+ tm_s->tm_mon+1, tm_s->tm_mday, tm_s->tm_hour, tm_s->tm_min, tm_s->tm_sec);
+ return timebuf;
+}
+
+void writeLogRaw(const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ if (logFile)
+ {
+ lockLogFile();
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ unlockLogFile();
+ }
+ fputs(msg, logFile ? logFile : stderr);
+ fflush(outF ? outF : stderr);
+}
+
+void writeLogOnly(const char *fmt, ...)
+{
+ char msg[MAXLINE+1];
+ va_list arg;
+
+ if (logFile)
+ {
+ va_start(arg, fmt);
+ vsnprintf(msg, MAXLINE, fmt, arg);
+ va_end(arg);
+ lockLogFile();
+ fprintf(logFile, "%s(%d):%s %s", progname, getpid(), pgxcCtlGetTime(), msg);
+ fflush(logFile);
+ unlockLogFile();
+ }
+}
+
+int setLogMsgLevel(int newLevel)
+{
+ int rc;
+
+ rc = logMsgLevel;
+ logMsgLevel = newLevel;
+ return rc;
+}
+
+int getLogMsgLevel(void)
+{
+ return logMsgLevel;
+}
+
+int setPrintMsgLevel(int newLevel)
+{
+ int rc;
+
+ rc = printMsgLevel;
+ printMsgLevel = newLevel;
+ return rc;
+}
+
+int getPrintMsgLevel(void)
+{
+ return printMsgLevel;
+}
+
+void lockLogFile(void)
+{
+ struct flock lock1;
+
+ if (logFile == NULL)
+ return;
+ if (lockStack > lockStackLimit)
+ {
+ fprintf(stderr, "Log file lock stack exceeded the limit %d. Something must be wrong.\n", lockStackLimit);
+ return;
+ }
+ if (lockStack == 0)
+ {
+ lock1.l_type = F_WRLCK;
+ lock1.l_start = 0;
+ lock1.l_len = 0;
+ lock1.l_whence = SEEK_SET;
+ fcntl(fileno(logFile), F_SETLKW, &lock1);
+ }
+ lockStack++;
+}
+
+
+void unlockLogFile(void)
+{
+ struct flock lock1;
+
+ if (logFile == NULL)
+ return;
+ lockStack--;
+ if (lockStack < 0)
+ {
+ fprintf(stderr, "Log file stack is below zero. Something must be wrong.\n");
+ return;
+ }
+ if (lockStack == 0)
+ {
+ lock1.l_type = F_UNLCK;
+ lock1.l_start = 0;
+ lock1.l_len = 0;
+ lock1.l_whence = SEEK_SET;
+ fcntl(fileno(logFile), F_SETLKW, &lock1);
+ }
+}
diff --git a/contrib/pgxc_ctl/pgxc_ctl_log.h b/contrib/pgxc_ctl/pgxc_ctl_log.h
new file mode 100644
index 0000000000..790b258a1c
--- /dev/null
+++ b/contrib/pgxc_ctl/pgxc_ctl_log.h
@@ -0,0 +1,63 @@
+/*-------------------------------------------------------------------------
+ *
+ * pgxc_ctl_log.h
+ *
+ * Logging module of Postgres-XC configuration and operation tool.
+ *
+ * Portions Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef LOG_H
+#define LOG_H
+#include "pgxc_ctl.h"
+
+#define MAXMSG 4096
+
+/* Control verbosity */
+
+#define DEBUG3 10
+#define DEBUG2 11
+#define DEBUG1 12
+#define INFO 13 /* Default for logMsgLevel */
+#define NOTICE2 14
+#define NOTICE 15 /* Default for printMsgLevel */
+#define WARNING 16
+#define ERROR 17
+#define PANIC 18
+#define MANDATORY 19
+
+extern FILE *logFile;
+extern void elog_start(const char *file, const char *func, int line);
+extern void elogFinish(int level, const char *fmt,...) __attribute__((format(printf, 2, 3)));
+extern void elogMsgRaw(int level, const char *msg);
+extern void elogFileRaw(int level, char *fn);
+extern void initLog(char *path, char *name);
+extern void closeLog(void);
+extern void writeLogRaw(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+extern void writeLogOnly(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
+extern int setLogMsgLevel(int newLevel);
+extern int getLogMsgLevel(void);
+extern int setPrintMsgLevel(int newLevel);
+extern int getPrintMsgLevel(void);
+extern void lockLogFile(void);
+extern void unlockLogFile(void);
+
+#define elog elog_start(__FILE__, __FUNCTION__, __LINE__), elogFinish
+#define elogMsg elog_start(__FILE__, __FUNCTION__, __LINE__), elogMsgRaw
+#define elogFile elog_start(__FILE__, __FUNCTION__, __LINE__), elogFileRaw
+/*
+#define elog elogFinish
+#define elogMsg elogMsgRaw
+#define elogFile elogFileRaw
+*/
+
+extern char logFileName[MAXPATH+1];
+
+
+extern int logMsgLevel;
+extern int printMsgLevel;
+extern int printLocation;
+extern int logLocation;
+
+#endif /* LOG_H */
diff --git a/contrib/pgxc_ctl/t/005_pgxc_ctl_minimal.pl b/contrib/pgxc_ctl/t/005_pgxc_ctl_minimal.pl
new file mode 100755
index 0000000000..0f53a71370
--- /dev/null
+++ b/contrib/pgxc_ctl/t/005_pgxc_ctl_minimal.pl
@@ -0,0 +1,30 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 6;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+program_help_ok('pgxc_ctl');
+program_version_ok('pgxc_ctl');
+
+system_or_bail 'pgxc_ctl', 'prepare', 'minimal' ;
+
+system_or_bail 'pgxc_ctl', 'init', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
diff --git a/contrib/pgxc_ctl/t/006_parallel_analyze.pl b/contrib/pgxc_ctl/t/006_parallel_analyze.pl
new file mode 100755
index 0000000000..42b95ab5b7
--- /dev/null
+++ b/contrib/pgxc_ctl/t/006_parallel_analyze.pl
@@ -0,0 +1,83 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 1;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+my $DEFAULT_DB="postgres";
+my $TEST_DB="testdb";
+my $COORD1_PORT=30001;
+
+
+system_or_bail 'pgxc_ctl', 'prepare', 'minimal' ;
+
+system_or_bail 'pgxc_ctl', 'init', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$DEFAULT_DB",'-c', "CREATE DATABASE testdb;";
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE ana1(col1 int, col2 text default 'ana1');";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into ana1 select generate_series(1, 10000);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE ana2(col1 int, col2 text default 'ana2');";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into ana2 select generate_series(1, 10000);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE ana3(col1 int, col2 text default 'ana3');";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into ana3 select generate_series(1, 10000);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE ana4(col1 int, col2 text default 'ana4');";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into ana4 select generate_series(1, 10000);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE ana5(col1 int, col2 text default 'ana5');";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into ana5 select generate_series(1, 10000);";
+
+# 30 jobs of anaylze running in parallel
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+system("psql -p 30001 testdb -f t/analyze_verbose.sql --echo-all --set AUTOCOMMIT=off &");
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "analyze verbose;" ], 'analyze verbose ');
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "select pg_sleep(5);";
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE ana1;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE ana2;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE ana3;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE ana4;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE ana5;";
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
diff --git a/contrib/pgxc_ctl/t/007_role_recreate.pl b/contrib/pgxc_ctl/t/007_role_recreate.pl
new file mode 100755
index 0000000000..acbd23431e
--- /dev/null
+++ b/contrib/pgxc_ctl/t/007_role_recreate.pl
@@ -0,0 +1,53 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 11;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+my $DEFAULT_DB="postgres";
+my $TEST_DB="testdb";
+my $COORD1_PORT=30001;
+
+
+system_or_bail 'pgxc_ctl', 'prepare', 'minimal' ;
+
+system_or_bail 'pgxc_ctl', 'init', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+# parallel scripts that drop and recreate roles
+
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$DEFAULT_DB",'-c', "CREATE DATABASE testdb;"], 'create database testdb ');
+system("psql -p 30001 testdb -f t/role_recreate.sql --echo-all --set AUTOCOMMIT=off &");
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE rol1(col1 int, col2 text default 'rol1');"], 'create table rol1 ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into rol1 select generate_series(1, 10000);"], 'insert to rol1 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE rol2(col1 int, col2 text default 'rol2');"], 'create rol2 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into rol2 select generate_series(1, 10000);"], 'insert to rol2 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE rol3(col1 int, col2 text default 'rol3');"], 'create rol3 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into rol3 select generate_series(1, 10000);"], 'insert to rol3 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE rol4(col1 int, col2 text default 'rol4');"], 'create rol4 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "insert into rol4 select generate_series(1, 10000);"], 'insert to rol4 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE rol5(col1 int, col2 text default 'rol5');"], 'create rol5 table ');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "insert into rol5 select generate_series(1, 10000);" ], 'insert to rol5 table ');
+
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE rol1;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE rol2;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE rol3;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE rol4;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "DROP TABLE rol5;";
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
diff --git a/contrib/pgxc_ctl/t/010_pgxc_ctl.pl b/contrib/pgxc_ctl/t/010_pgxc_ctl.pl
new file mode 100755
index 0000000000..fd7b431a01
--- /dev/null
+++ b/contrib/pgxc_ctl/t/010_pgxc_ctl.pl
@@ -0,0 +1,83 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 6;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+program_help_ok('pgxc_ctl');
+program_version_ok('pgxc_ctl');
+
+my $GTM_HOST = "localhost";
+my $COORD1_HOST = "localhost";
+my $COORD2_HOST = "localhost";
+my $COORD3_HOST = "localhost";
+my $DN1_HOST = "localhost";
+my $DN2_HOST = "localhost";
+my $DN3_HOST = "localhost";
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
diff --git a/contrib/pgxc_ctl/t/020_prepared_txns.pl b/contrib/pgxc_ctl/t/020_prepared_txns.pl
new file mode 100755
index 0000000000..802c78bdd5
--- /dev/null
+++ b/contrib/pgxc_ctl/t/020_prepared_txns.pl
@@ -0,0 +1,206 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 5;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("echo '==========clear existing configuration==========='");
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+my $GTM_HOST="localhost";
+my $COORD1_HOST="localhost";
+my $COORD1_PORT=30001;
+my $COORD2_PORT=30002;
+my $COORD2_HOST="localhost";
+my $DN1_HOST="localhost";
+my $DN2_HOST="localhost";
+my $DEFAULT_DB="postgres";
+my $TEST_DB="testdb";
+
+system("echo '==========prepare configuration==========='");
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add data
+
+system("echo '==========populate data==========='");
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$DEFAULT_DB",'-c', "CREATE DATABASE testdb;";
+
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * FROM pgxc_node;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE disttab(col1 int, col2 int) DISTRIBUTE BY HASH(col1);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "CREATE TABLE repltab (col1 int, col2 int) DISTRIBUTE BY REPLICATION;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(1,1);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(2,2);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(3,3);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(4,4);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(5,5);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(6,6);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(7,7);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(8,8);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(9,9);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(10,10);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(11,11);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(12,12);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(13,13);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(14,14);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(15,15);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(16,16);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(17,17);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(18,18);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(19,19);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO disttab VALUES(20,20);";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "INSERT INTO repltab VALUES (generate_series(1,100), generate_series(101, 200));";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, * FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+
+# test with killing nodes
+# do data sanity check
+
+system("psql -p 30001 testdb -f t/prep_tx1.sql --echo-all --set AUTOCOMMIT=off &");
+system("echo '==========kill dn1 -- with data==========='");
+system_or_bail 'pgxc_ctl', 'kill', 'datanode', 'master', 'dn1' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "select pg_sleep(3);";
+system_or_bail 'pgxc_ctl', 'start', 'datanode', 'master', 'dn1' ;
+
+
+system("echo '==========data sanity check==========='");
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * from pg_prepared_xacts;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn1) 'SELECT * from pg_prepared_xacts;';";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn2) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord1==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord1) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord2==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord2) 'SELECT * from pg_prepared_xacts;';";
+
+
+system("psql -p 30001 testdb -f t/prep_tx2.sql --echo-all --set AUTOCOMMIT=off &");
+system("echo '==========kill coord1 -- with data==========='");
+system_or_bail 'pgxc_ctl', 'kill', 'coordinator', 'master', 'coord1' ;
+system_or_bail 'sleep', '3';
+system_or_bail 'pgxc_ctl', 'start', 'coordinator', 'master', 'coord1' ;
+
+
+system("echo '==========data sanity check==========='");
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * from pg_prepared_xacts;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn1) 'SELECT * from pg_prepared_xacts;';";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn2) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord1==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord1) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord2==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord2) 'SELECT * from pg_prepared_xacts;';";
+
+
+
+system("psql -p 30001 testdb -f t/prep_tx3.sql --echo-all --set AUTOCOMMIT=off &");
+system("echo '==========kill coord2 -- with data==========='");
+system_or_bail 'pgxc_ctl', 'kill', 'coordinator', 'master', 'coord2' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "select pg_sleep(3);";
+system_or_bail 'pgxc_ctl', 'start', 'coordinator', 'master', 'coord2' ;
+
+
+system("echo '==========data sanity check==========='");
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * from pg_prepared_xacts;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn1) 'SELECT * from pg_prepared_xacts;';";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn2) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord1==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord1) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord2==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord2) 'SELECT * from pg_prepared_xacts;';";
+
+
+
+system("psql -p 30001 testdb -f t/prep_tx4.sql --echo-all --set AUTOCOMMIT=off &");
+system("echo '==========kill gtm master -- with data==========='");
+system_or_bail 'pgxc_ctl', 'kill', 'gtm', 'master', 'gtm' ;
+system_or_bail 'sleep', '3';
+system_or_bail 'pgxc_ctl', 'start', 'gtm', 'master', 'gtm' ;
+system_or_bail 'sleep', '10';
+
+system("echo '==========data sanity check==========='");
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * from pg_prepared_xacts;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn1) 'SELECT * from pg_prepared_xacts;';";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn2) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord1==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord1) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord2==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord2) 'SELECT * from pg_prepared_xacts;';";
+
+
+
+system("echo '==========commit prepared transactions==========='");
+
+command_fails([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "COMMIT PREPARED 'foo1';" ], 'cannot commit prepared transaction foo1 ');
+command_fails([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "COMMIT PREPARED 'foo2';" ], 'cannot commit prepared transaction foo2');
+command_ok([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "EXECUTE DIRECT ON (coord2) 'COMMIT PREPARED ''foo3'';';" ], 'commit prepared transaction foo3 directly on coord2 ');
+command_ok([ 'psql', '-p', "$COORD2_PORT", "$TEST_DB", '-c', "COMMIT PREPARED 'foo3';" ], 'commit prepared transaction foo3 connecting to coord2 ');
+command_fails([ 'psql', '-p', "$COORD1_PORT", "$TEST_DB", '-c', "COMMIT PREPARED 'foo4';" ], 'cannot commit prepared transaction foo4 ');
+
+system("echo '==========data sanity check==========='");
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM disttab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM disttab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT count(*) FROM repltab;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT xc_node_id, count(*) FROM repltab GROUP BY xc_node_id;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "SELECT * from pg_prepared_xacts;";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn1) 'SELECT * from pg_prepared_xacts;';";
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (dn2) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord1==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord1) 'SELECT * from pg_prepared_xacts;';";
+system("echo '==========checking on coord2==========='");
+system_or_bail 'psql', '-p', "$COORD1_PORT", "$TEST_DB",'-c', "EXECUTE DIRECT ON (coord2) 'SELECT * from pg_prepared_xacts;';";
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
diff --git a/contrib/pgxc_ctl/t/030_pgxc_ctl_file_sanity_check.pl b/contrib/pgxc_ctl/t/030_pgxc_ctl_file_sanity_check.pl
new file mode 100755
index 0000000000..6e8d2546ed
--- /dev/null
+++ b/contrib/pgxc_ctl/t/030_pgxc_ctl_file_sanity_check.pl
@@ -0,0 +1,375 @@
+use strict;
+use warnings;
+use Cwd;
+use Config;
+use TestLib;
+use Test::More tests => 6;
+
+my $dataDirRoot="~/DATA/pgxl/nodes/";
+$ENV{'PGXC_CTL_HOME'} = '/tmp/pgxc_ctl';
+my $PGXC_CTL_HOME=$ENV{'PGXC_CTL_HOME'};
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+program_help_ok('pgxc_ctl');
+program_version_ok('pgxc_ctl');
+
+my $GTM_HOST = "localhost";
+my $COORD1_HOST = "localhost";
+my $COORD2_HOST = "localhost";
+my $COORD3_HOST = "localhost";
+my $DN1_HOST = "localhost";
+my $DN2_HOST = "localhost";
+my $DN3_HOST = "localhost";
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
+
+
+system_or_bail 'pgxc_ctl', 'prepare', 'config', 'empty' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'master', 'gtm', "$GTM_HOST", '20001', "$dataDirRoot/gtm" ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord1', "$COORD1_HOST", '30001', '30011', "$dataDirRoot/coord_master.1", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord2', "$COORD2_HOST", '30002', '30012', "$dataDirRoot/coord_master.2", 'none', 'none';
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn1', "$DN1_HOST", '40001', '40011', "$dataDirRoot/dn_master.1", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn2', "$DN2_HOST", '40002', '40012', "$dataDirRoot/dn_master.2", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'master', 'dn3', "$DN3_HOST", '40003', '40013', "$dataDirRoot/dn_master.3", 'none', 'none', 'none' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'add', 'coordinator', 'master', 'coord3', "$COORD3_HOST", '30003', '30013', "$dataDirRoot/coord_master.3", 'none', 'none' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'coordinator', 'master', 'coord3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'remove', 'datanode', 'master', 'dn3', 'clean' ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#Datanode slave test
+
+system_or_bail 'pgxc_ctl', 'add', 'datanode', 'slave', 'dn1', "$DN1_HOST", '40101', '40111', "$dataDirRoot/dn_slave.1", 'none', "$dataDirRoot/datanode_archlog.1" ;
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'datanode', 'master', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'datanode', 'dn1' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#GTM standby test
+
+system_or_bail 'pgxc_ctl', 'add', 'gtm', 'slave', 'gtm_slave', "$GTM_HOST", '20101', "$dataDirRoot/gtm_slave" ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+system_or_bail 'pgxc_ctl', 'stop', "-m", 'immediate', 'gtm', 'master', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'failover', 'gtm', 'gtm' ;
+
+system_or_bail 'pgxc_ctl', 'monitor', 'all' ;
+
+#add cleanup
+system_or_bail 'pgxc_ctl', 'clean', 'all' ;
+
+
+#delete related dirs for cleanup
+system("rm -rf $dataDirRoot");
+system("rm -rf $PGXC_CTL_HOME");
diff --git a/contrib/pgxc_ctl/t/analyze_verbose.sql b/contrib/pgxc_ctl/t/analyze_verbose.sql
new file mode 100755
index 0000000000..e985710e86
--- /dev/null
+++ b/contrib/pgxc_ctl/t/analyze_verbose.sql
@@ -0,0 +1 @@
+analyze verbose;
diff --git a/contrib/pgxc_ctl/t/prep_tx1.sql b/contrib/pgxc_ctl/t/prep_tx1.sql
new file mode 100755
index 0000000000..79c355a283
--- /dev/null
+++ b/contrib/pgxc_ctl/t/prep_tx1.sql
@@ -0,0 +1,23 @@
+BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SELECT pg_sleep(3);
+INSERT INTO disttab VALUES(21,21);
+INSERT INTO disttab VALUES(22,22);
+INSERT INTO disttab VALUES(23,23);
+INSERT INTO disttab VALUES(24,24);
+INSERT INTO disttab VALUES(25,25);
+INSERT INTO disttab VALUES(26,26);
+INSERT INTO disttab VALUES(27,27);
+INSERT INTO disttab VALUES(28,28);
+INSERT INTO disttab VALUES(29,29);
+INSERT INTO disttab VALUES(30,30);
+INSERT INTO disttab VALUES(31,31);
+INSERT INTO disttab VALUES(32,32);
+INSERT INTO disttab VALUES(33,33);
+INSERT INTO disttab VALUES(34,34);
+INSERT INTO disttab VALUES(35,35);
+INSERT INTO disttab VALUES(36,36);
+INSERT INTO disttab VALUES(37,37);
+INSERT INTO disttab VALUES(38,38);
+INSERT INTO disttab VALUES(39,39);
+INSERT INTO disttab VALUES(40,40);
+PREPARE TRANSACTION 'foo1';
diff --git a/contrib/pgxc_ctl/t/prep_tx2.sql b/contrib/pgxc_ctl/t/prep_tx2.sql
new file mode 100755
index 0000000000..4e4efb8161
--- /dev/null
+++ b/contrib/pgxc_ctl/t/prep_tx2.sql
@@ -0,0 +1,23 @@
+BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SELECT pg_sleep(3);
+INSERT INTO disttab VALUES(21,21);
+INSERT INTO disttab VALUES(22,22);
+INSERT INTO disttab VALUES(23,23);
+INSERT INTO disttab VALUES(24,24);
+INSERT INTO disttab VALUES(25,25);
+INSERT INTO disttab VALUES(26,26);
+INSERT INTO disttab VALUES(27,27);
+INSERT INTO disttab VALUES(28,28);
+INSERT INTO disttab VALUES(29,29);
+INSERT INTO disttab VALUES(30,30);
+INSERT INTO disttab VALUES(31,31);
+INSERT INTO disttab VALUES(32,32);
+INSERT INTO disttab VALUES(33,33);
+INSERT INTO disttab VALUES(34,34);
+INSERT INTO disttab VALUES(35,35);
+INSERT INTO disttab VALUES(36,36);
+INSERT INTO disttab VALUES(37,37);
+INSERT INTO disttab VALUES(38,38);
+INSERT INTO disttab VALUES(39,39);
+INSERT INTO disttab VALUES(40,40);
+PREPARE TRANSACTION 'foo2';
diff --git a/contrib/pgxc_ctl/t/prep_tx3.sql b/contrib/pgxc_ctl/t/prep_tx3.sql
new file mode 100755
index 0000000000..0278d39553
--- /dev/null
+++ b/contrib/pgxc_ctl/t/prep_tx3.sql
@@ -0,0 +1,23 @@
+BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SELECT pg_sleep(3);
+INSERT INTO disttab VALUES(21,21);
+INSERT INTO disttab VALUES(22,22);
+INSERT INTO disttab VALUES(23,23);
+INSERT INTO disttab VALUES(24,24);
+INSERT INTO disttab VALUES(25,25);
+INSERT INTO disttab VALUES(26,26);
+INSERT INTO disttab VALUES(27,27);
+INSERT INTO disttab VALUES(28,28);
+INSERT INTO disttab VALUES(29,29);
+INSERT INTO disttab VALUES(30,30);
+INSERT INTO disttab VALUES(31,31);
+INSERT INTO disttab VALUES(32,32);
+INSERT INTO disttab VALUES(33,33);
+INSERT INTO disttab VALUES(34,34);
+INSERT INTO disttab VALUES(35,35);
+INSERT INTO disttab VALUES(36,36);
+INSERT INTO disttab VALUES(37,37);
+INSERT INTO disttab VALUES(38,38);
+INSERT INTO disttab VALUES(39,39);
+INSERT INTO disttab VALUES(40,40);
+PREPARE TRANSACTION 'foo3';
diff --git a/contrib/pgxc_ctl/t/prep_tx4.sql b/contrib/pgxc_ctl/t/prep_tx4.sql
new file mode 100755
index 0000000000..1f328170eb
--- /dev/null
+++ b/contrib/pgxc_ctl/t/prep_tx4.sql
@@ -0,0 +1,23 @@
+BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+SELECT pg_sleep(3);
+INSERT INTO disttab VALUES(21,21);
+INSERT INTO disttab VALUES(22,22);
+INSERT INTO disttab VALUES(23,23);
+INSERT INTO disttab VALUES(24,24);
+INSERT INTO disttab VALUES(25,25);
+INSERT INTO disttab VALUES(26,26);
+INSERT INTO disttab VALUES(27,27);
+INSERT INTO disttab VALUES(28,28);
+INSERT INTO disttab VALUES(29,29);
+INSERT INTO disttab VALUES(30,30);
+INSERT INTO disttab VALUES(31,31);
+INSERT INTO disttab VALUES(32,32);
+INSERT INTO disttab VALUES(33,33);
+INSERT INTO disttab VALUES(34,34);
+INSERT INTO disttab VALUES(35,35);
+INSERT INTO disttab VALUES(36,36);
+INSERT INTO disttab VALUES(37,37);
+INSERT INTO disttab VALUES(38,38);
+INSERT INTO disttab VALUES(39,39);
+INSERT INTO disttab VALUES(40,40);
+PREPARE TRANSACTION 'foo4';
diff --git a/contrib/pgxc_ctl/t/role_recreate.sql b/contrib/pgxc_ctl/t/role_recreate.sql
new file mode 100755
index 0000000000..2a68a26972
--- /dev/null
+++ b/contrib/pgxc_ctl/t/role_recreate.sql
@@ -0,0 +1,1200 @@
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
+CREATE ROLE test_user;
+DROP ROLE test_user;
+CREATE ROLE test_superuser WITH SUPERUSER;
+DROP ROLE test_superuser;
diff --git a/contrib/pgxc_ctl/utils.c b/contrib/pgxc_ctl/utils.c
new file mode 100644
index 0000000000..501e7f83ea
--- /dev/null
+++ b/contrib/pgxc_ctl/utils.c
@@ -0,0 +1,462 @@
+/*-------------------------------------------------------------------------
+ *
+ * utils.c
+ *
+ * Utility module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+/*
+ * Variable useful tools/small routines.
+ */
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdio.h>
+
+#include "../../src/interfaces/libpq/libpq-fe.h"
+#include "utils.h"
+#include "pgxc_ctl.h"
+#include "pgxc_ctl_log.h"
+#include "do_shell.h"
+#include "config.h"
+#include "variables.h"
+#include "varnames.h"
+#include "c.h"
+
+
+static int Malloc_ed = 0;
+static int Strdup_ed = 0;
+static int Freed = 0;
+
+void *Malloc(size_t size)
+{
+ void *rv = malloc(size);
+
+ Malloc_ed++;
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+char **addToList(char **List, char *val)
+{
+ char **rv;
+ int ii;
+
+ for (ii = 0; List[ii]; ii++);
+ rv = Realloc(List, sizeof(char *) * ii);
+ rv[ii - 1] = NULL;
+ return rv;
+}
+
+void *Malloc0(size_t size)
+{
+ void *rv = malloc(size);
+
+ Malloc_ed++;
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ memset(rv, 0, size);
+ return(rv);
+}
+
+void *Realloc(void *ptr, size_t size)
+{
+ void *rv = realloc(ptr, size);
+
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+void Free(void *ptr)
+{
+ Freed++;
+ if (ptr)
+ free(ptr);
+}
+
+/*
+ * If flag is TRUE and chdir fails, then exit(1)
+ */
+int Chdir(char *path, int flag)
+{
+ if (chdir(path))
+ {
+ elog(ERROR, "ERROR: Could not change work directory to \"%s\". %s%s\n",
+ path,
+ flag == TRUE ? "Exiting. " : "",
+ strerror(errno));
+ if (flag == TRUE)
+ exit(1);
+ else
+ return -1;
+ }
+ return 0;
+}
+
+FILE *Fopen(char *path, char *mode)
+{
+ FILE *rv;
+
+ if ((rv = fopen(path, mode)) == NULL)
+ elog(ERROR, "ERROR: Could not open the file \"%s\" in \"%s\", %s\n", path, mode, strerror(errno));
+ return(rv);
+}
+
+
+char *Strdup(const char *s)
+{
+ char *rv;
+
+ Strdup_ed++;
+ rv = strdup(s);
+ if (rv == NULL)
+ {
+ elog(PANIC, "PANIC: No more memory. See core file for details.\n");
+ abort();
+ }
+ return(rv);
+}
+
+void appendFiles(FILE *f, char **fileList)
+{
+ FILE *src;
+ int ii;
+ char buf[MAXLINE+1];
+
+ if (fileList)
+ for (ii = 0; fileList[ii]; ii++)
+ {
+ if (!is_none(fileList[ii]))
+ {
+ if ((src = fopen(fileList[ii], "r")) == 0)
+ {
+ elog(ERROR, "ERROR: could not open file %s for read, %s\n", fileList[ii], strerror(errno));
+ continue;
+ }
+ while (fgets(buf, MAXLINE, src))
+ fputs(buf, f);
+ fclose(src);
+ }
+ }
+}
+
+FILE *prepareLocalStdin(char *buf, int len, char **fileList)
+{
+ FILE *f;
+ if ((f = fopen(createLocalFileName(STDIN, buf, len), "w")) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open file %s for write, %s\n", buf, strerror(errno));
+ return(NULL);
+ }
+ appendFiles(f, fileList);
+ return(f);
+}
+
+char *timeStampString(char *buf, int len)
+{
+ time_t nowTime;
+ struct tm nowTm;
+
+ nowTime = time(NULL);
+ localtime_r(&nowTime, &nowTm);
+
+ snprintf(buf, len, "%04d%02d%02d_%02d:%02d:%02d",
+ nowTm.tm_year+1900, nowTm.tm_mon+1, nowTm.tm_mday,
+ nowTm.tm_hour, nowTm.tm_min, nowTm.tm_sec);
+ return(buf);
+}
+
+char **makeActualNodeList(char **nodeList)
+{
+ char **actualNodeList;
+ int ii, jj;
+
+ for (ii = 0, jj = 0; nodeList[ii]; ii++)
+ {
+ if (!is_none(nodeList[ii]))
+ jj++;
+ }
+ actualNodeList = Malloc0(sizeof(char *) * (jj + 1));
+ for (ii = 0, jj = 0; nodeList[ii]; ii++)
+ {
+ if (!is_none(nodeList[ii]))
+ {
+ actualNodeList[jj] = Strdup(nodeList[ii]);
+ jj++;
+ }
+ }
+ return actualNodeList;
+}
+
+int gtmProxyIdx(char *gtmProxyName)
+{
+ int ii;
+
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_gtmProxyNames)[ii], gtmProxyName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int coordIdx(char *coordName)
+{
+ int ii;
+
+ if (is_none(coordName))
+ return -1;
+ for (ii = 0; aval(VAR_coordNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_coordNames)[ii], coordName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int datanodeIdx(char *datanodeName)
+{
+ int ii;
+
+ if (is_none(datanodeName))
+ return -1;
+ for (ii = 0; aval(VAR_datanodeNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_datanodeNames)[ii], datanodeName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+int getEffectiveGtmProxyIdxFromServerName(char *serverName)
+{
+ int ii;
+
+ if (serverName == NULL)
+ return (-1);
+ for (ii = 0; aval(VAR_gtmProxyNames)[ii]; ii++)
+ {
+ if (strcmp(aval(VAR_gtmProxyServers)[ii], serverName) == 0)
+ return ii;
+ }
+ return -1;
+}
+
+/*
+ * We rely on the PID file created in Postgres/GTM et al data directory to
+ * fetch the PID. The first line in these respective files has the PID.
+ */
+pid_t get_prog_pid(char *host, char *pidfile, char *dir)
+{
+ char cmd[MAXLINE+1];
+ char pid_s[MAXLINE+1];
+ FILE *wkf;
+
+ snprintf(cmd, MAXLINE,
+ "ssh %s@%s "
+ "\"cat %s/%s.pid\"",
+ sval(VAR_pgxcUser), host, dir, pidfile);
+ wkf = popen(cmd, "r");
+ if (wkf == NULL)
+ {
+ elog(ERROR, "ERROR: cannot obtain pid value of the remote server process, host \"%s\" dir \"%s\", %s\n",
+ host, dir, strerror(errno));
+ return(-1);
+ }
+
+ if (fgets(pid_s, MAXLINE, wkf) == NULL)
+ {
+ elog(ERROR, "ERROR: fgets failed to get pid of remote server process, host \"%s\" dir \"%s\", %d\n",
+ host, dir, ferror(wkf));
+ pclose(wkf);
+ return(-1);
+ }
+
+ pclose(wkf);
+ return(atoi(pid_s));
+}
+
+int pingNode(char *host, char *port)
+{
+ PGPing status;
+ char conninfo[MAXLINE+1];
+ char editBuf[MAXPATH+1];
+
+ conninfo[0] = 0;
+ if (host)
+ {
+ snprintf(editBuf, MAXPATH, "host = '%s' ", host);
+ strncat(conninfo, editBuf, MAXLINE);
+ }
+ if (port)
+ {
+ snprintf(editBuf, MAXPATH, "port = %d ", atoi(port));
+ strncat(conninfo, editBuf, MAXLINE);
+ }
+
+ strncat(conninfo, "dbname = postgres ", MAXLINE);
+
+ if (conninfo[0])
+ {
+ status = PQping(conninfo);
+ if (status == PQPING_OK)
+ return 0;
+ else
+ return 1;
+ }
+ else
+ return -1;
+}
+
+/*
+ * A different mechanism to ping datanode and coordinator slaves since these
+ * nodes currently do not accept connections and hence won't respond to PQping
+ * requests. Instead we rely on "pg_ctl status", which must be run via ssh on
+ * the remote machine
+ */
+int pingNodeSlave(char *host, char *datadir)
+{
+ FILE *wkf;
+ char cmd[MAXLINE+1];
+ char line[MAXLINE+1];
+ int rv;
+
+ snprintf(cmd, MAXLINE, "ssh %s@%s pg_ctl -D %s status > /dev/null 2>&1; echo $?",
+ sval(VAR_pgxcUser), host, datadir);
+ wkf = popen(cmd, "r");
+ if (wkf == NULL)
+ return -1;
+ if (fgets(line, MAXLINE, wkf))
+ {
+ trimNl(line);
+ rv = atoi(line);
+ }
+ else
+ rv = -1;
+ pclose(wkf);
+ return rv;
+}
+
+void trimNl(char *s)
+{
+ for (;*s && *s != '\n'; s++);
+ *s = 0;
+}
+
+char *getChPidList(char *host, pid_t ppid)
+{
+ FILE *wkf;
+ char cmd[MAXLINE+1];
+ char line[MAXLINE+1];
+ char *rv = Malloc(MAXLINE+1);
+
+ rv[0] = 0;
+ snprintf(cmd, MAXLINE, "ssh %s@%s pgrep -P %d",
+ sval(VAR_pgxcUser), host, ppid);
+ wkf = popen(cmd, "r");
+ if (wkf == NULL)
+ return NULL;
+ while (fgets(line, MAXLINE, wkf))
+ {
+ trimNl(line);
+ strncat(rv, line, MAXLINE);
+ strncat(rv, " ", MAXLINE);
+ }
+ pclose(wkf);
+ return rv;
+}
+
+char *getIpAddress(char *hostName)
+{
+ char command[MAXLINE+1];
+ char *ipAddr;
+ FILE *f;
+
+ snprintf(command, MAXLINE, "ping -c1 %s | head -n 1 | sed 's/^[^(]*(\\([^)]*\\).*$/\\1/'", hostName);
+ if ((f = popen(command, "r")) == NULL)
+ {
+ elog(ERROR, "ERROR: could not open the command, \"%s\", %s\n", command, strerror(errno));
+ return NULL;
+ }
+ ipAddr = Malloc(MAXTOKEN+1);
+ fgets(ipAddr, MAXTOKEN, f);
+ pclose(f);
+ trimNl(ipAddr);
+ return ipAddr;
+}
+
+
+/*
+ * Test to see if a directory exists and is empty or not.
+ *
+ * Returns:
+ * 0 if nonexistent
+ * 1 if exists and empty
+ * 2 if exists and not empty
+ * -1 if trouble accessing directory (errno reflects the error)
+ */
+int
+pgxc_check_dir(const char *dir)
+{
+ int result = 1;
+ DIR *chkdir;
+ struct dirent *file;
+
+ errno = 0;
+
+ chkdir = opendir(dir);
+
+ if (chkdir == NULL)
+ return (errno == ENOENT) ? 0 : -1;
+
+ while ((file = readdir(chkdir)) != NULL)
+ {
+ if (strcmp(".", file->d_name) == 0 ||
+ strcmp("..", file->d_name) == 0)
+ {
+ /* skip this and parent directory */
+ continue;
+ }
+ else
+ {
+ result = 2; /* not empty */
+ break;
+ }
+ }
+
+#ifdef WIN32
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but not in
+ * released version
+ */
+ if (GetLastError() == ERROR_NO_MORE_FILES)
+ errno = 0;
+#endif
+
+ closedir(chkdir);
+
+ if (errno != 0)
+ result = -1; /* some kind of I/O error? */
+
+ return result;
+}
diff --git a/contrib/pgxc_ctl/utils.h b/contrib/pgxc_ctl/utils.h
new file mode 100644
index 0000000000..18cc0ec9fe
--- /dev/null
+++ b/contrib/pgxc_ctl/utils.h
@@ -0,0 +1,51 @@
+/*-------------------------------------------------------------------------
+ *
+ * utils.h
+ *
+ * Utilty module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/types.h>
+
+extern void *Malloc(size_t size);
+extern void *Malloc0(size_t size);
+extern void *Realloc(void *ptr, size_t size);
+extern void Free(void *ptr);
+extern int Chdir(char *path, int flag);
+extern FILE *Fopen(char *path, char *mode);
+extern char *Strdup(const char *s);
+extern char **addToList(char **List, char *val);
+extern void appendFiles(FILE *f, char **fileList);
+extern FILE *prepareLocalStdin(char *buf, int len, char **fileList);
+extern char *timeStampString(char *buf, int len);
+extern char **makeActualNodeList(char **nodeList);
+extern int gtmProxyIdx(char *gtmProxyName);
+extern int coordIdx(char *coordName);
+extern int datanodeIdx(char *datanodeName);
+extern int getEffectiveGtmProxyIdxFromServerName(char *serverName);
+extern pid_t get_prog_pid(char *host, char *pidfile, char *dir);
+extern int pingNode(char *host, char *port);
+extern int pingNodeSlave(char *host, char *datadir);
+extern void trimNl(char *s);
+extern char *getChPidList(char *host, pid_t ppid);
+extern char *getIpAddress(char *hostName);
+extern int pgxc_check_dir(const char *dir);
+
+#define get_postmaster_pid(host, dir) get_prog_pid(host, "postmaster", dir)
+#define get_gtm_pid(host, dir) get_prog_pid(host, "gtm", dir)
+#define get_gtmProxy_pid(host, dir) get_prog_pid(host, "gtm_proxy", dir)
+#define freeAndReset(x) do{Free(x);(x)=NULL;}while(0)
+#define myWEXITSTATUS(rc) ((rc) & 0x000000FF)
+
+/* Printout variable in bash format */
+#define svalFormat "%s=%s\n"
+#define expandSval(name) name, sval(name)
+#define avalFormat "%s=( %s )\n"
+#define expandAval(name) name, listValue(name)
+#define fprintAval(f, name) do{fprintf(f, avalFormat, expandAval(name));}while(0)
+#define fprintSval(f, name) do{fprintf(f, svalFormat, expandSval(name));}while(0)
diff --git a/contrib/pgxc_ctl/variables.c b/contrib/pgxc_ctl/variables.c
new file mode 100644
index 0000000000..b946dfb2f4
--- /dev/null
+++ b/contrib/pgxc_ctl/variables.c
@@ -0,0 +1,473 @@
+/*-------------------------------------------------------------------------
+ *
+ * varibales.c
+ *
+ * Variable haneling module of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#include <stdlib.h>
+#include <string.h>
+#include "variables.h"
+#include "utils.h"
+#include "pgxc_ctl_log.h"
+
+pgxc_ctl_var *var_head = NULL;
+pgxc_ctl_var *var_tail = NULL;
+
+static void clear_var(pgxc_ctl_var *var);
+/*
+ * Hash bucket size is up to 256
+ */
+static int hash_val(char *name)
+{
+ unsigned char *name_u = (unsigned char *)name;
+ unsigned char v;
+
+ for(v = 0; *name_u; name_u++)
+ v += *name_u;
+ return (v%NUM_HASH_BUCKET);
+}
+
+#define LIMIT_TO_DOUBLE 128
+#define INCR_OVER_DOUBLE 10
+static int next_size(int sz)
+{
+ if (sz <= 0)
+ return 1;
+ if (sz <= LIMIT_TO_DOUBLE)
+ return sz*2;
+ else
+ return sz + INCR_OVER_DOUBLE;
+}
+
+void init_var_hash()
+{
+ int i;
+
+ for (i = 0; i < NUM_HASH_BUCKET; i++)
+ {
+ var_hash[i].el_size = 1;
+ var_hash[i].el_used = 0;
+ var_hash[i].el = (pgxc_ctl_var **)Malloc(sizeof(pgxc_ctl_var *));
+ var_hash[i].el[0] = NULL;
+ }
+}
+
+static void remove_from_hash(pgxc_ctl_var *var)
+{
+ int hash_v = hash_val(var->varname);
+ int ii, jj;
+
+ for(ii = 0; var_hash[hash_v].el[ii]; ii++)
+ {
+ if (var_hash[hash_v].el[ii] != var)
+ continue;
+ else
+ {
+ for(jj = ii; var_hash[hash_v].el[jj]; jj++)
+ var_hash[hash_v].el[jj] = var_hash[hash_v].el[jj + 1];
+ var_hash[hash_v].el_used--;
+ return;
+ }
+ }
+ return;
+}
+
+void add_var_hash(pgxc_ctl_var *var)
+{
+ int hash_v = hash_val(var->varname);
+ if (var_hash[hash_v].el_used + 1 >= var_hash[hash_v].el_size)
+ {
+ var_hash[hash_v].el_size = next_size(var_hash[hash_v].el_size);
+ var_hash[hash_v].el = (pgxc_ctl_var **)Realloc(var_hash[hash_v].el, sizeof(pgxc_ctl_var *) * var_hash[hash_v].el_size);
+ }
+ var_hash[hash_v].el[var_hash[hash_v].el_used++] = var;
+ var_hash[hash_v].el[var_hash[hash_v].el_used] = NULL;
+}
+
+pgxc_ctl_var *new_var(char *name)
+{
+ pgxc_ctl_var *newv;
+
+ if (find_var(name))
+ {
+ elog(ERROR, "ERROR: Variable %s already defined. Check your configuration.\n", name);
+ return NULL;
+ }
+
+ newv = (pgxc_ctl_var *)Malloc(sizeof(pgxc_ctl_var));
+ if (var_head == NULL)
+ {
+ var_head = var_tail = newv;
+ newv->prev = NULL;
+ }
+ else
+ {
+ newv->prev = var_tail;
+ var_tail->next = newv;
+ var_tail = newv;
+ }
+ newv->next = NULL;
+ newv->varname = Strdup(name);
+ newv->val_size = 1;
+ newv->val_used = 0;
+ newv->val = (char **)Malloc(sizeof(char *));
+ newv->val[0] = NULL;
+ add_var_hash(newv);
+ return(newv);
+}
+
+void remove_var(pgxc_ctl_var *var)
+{
+ if ((var_head == var_tail) && (var_head == var))
+ var_head = var_tail = NULL;
+ else if (var_head == var)
+ {
+ var_head = var_head->next;
+ var_head->prev = NULL;
+ }
+ else if (var_tail == var)
+ {
+ var_tail->next = NULL;
+ var_tail = var_tail->prev;
+ }
+ else
+ {
+ var->prev->next = var->next;
+ var->next->prev = var->prev;
+ }
+ clear_var(var);
+}
+
+static void clear_var(pgxc_ctl_var *var)
+{
+ int ii;
+
+ remove_from_hash(var);
+ for (ii = 0; var->val[ii]; ii++)
+ free(var->val[ii]);
+ free(var->varname);
+ free(var);
+
+}
+
+void add_val(pgxc_ctl_var *var, char *val)
+{
+ if (var->val_size <= var->val_used+1)
+ {
+ var->val_size = next_size(var->val_size);
+ var->val = (char **)Realloc(var->val, sizeof(char *)*var->val_size);
+ }
+ var->val[var->val_used++] = Strdup(val);
+ var->val[var->val_used] = NULL;
+}
+
+void add_val_name(char *name, char *val)
+{
+ pgxc_ctl_var *var;
+ if (!(var = find_var(name)))
+ return;
+ add_val(var, name);
+ return;
+}
+
+
+pgxc_ctl_var *find_var(char *name)
+{
+ pgxc_var_hash *hash = &var_hash[hash_val(name)];
+ int i;
+
+ for (i = 0; i < hash->el_used; i++)
+ {
+ if (strcmp(hash->el[i]->varname, name) == 0)
+ return hash->el[i];
+ }
+ return NULL;
+}
+
+char *sval(char *name)
+{
+ pgxc_ctl_var *var = find_var(name);
+ if (!var)
+ return NULL;
+ return var->val[0];
+}
+
+char **aval(char *name)
+{
+ pgxc_ctl_var *var = find_var(name);
+ if (!var)
+ return NULL;
+ return var->val;
+}
+
+void reset_value(pgxc_ctl_var *var)
+{
+ int i;
+ for (i = 0; var->val[i]; i++)
+ {
+ Free (var->val[i]);
+ var->val[i] = NULL;
+ }
+ var->val_used = 0;
+}
+
+void assign_val(char *destName, char *srcName)
+{
+ pgxc_ctl_var *dest = find_var(destName);
+ pgxc_ctl_var *src = find_var(srcName);
+ int ii;
+
+ reset_value(dest);
+ for (ii = 0; ii < src->val_used; ii++)
+ add_val(dest, src->val[ii]);
+}
+
+void assign_sval(char *destName, char *val)
+{
+ pgxc_ctl_var *dest = find_var(destName);
+
+ reset_value(dest);
+ add_val(dest, val);
+}
+
+void reset_var(char *name)
+{
+ confirm_var(name);
+ reset_value(find_var(name));
+}
+
+void reset_var_val(char *name, char *val)
+{
+ reset_var(name);
+ add_val(find_var(name), val);
+}
+
+pgxc_ctl_var *confirm_var(char *name)
+{
+ pgxc_ctl_var *rc;
+ if ((rc = find_var(name)))
+ return rc;
+ return new_var(name);
+}
+
+void print_vars(void)
+{
+ pgxc_ctl_var *cur;
+
+ lockLogFile();
+ for(cur = var_head; cur; cur=cur->next)
+ print_var(cur->varname);
+ unlockLogFile();
+}
+
+void print_var(char *vname)
+{
+ pgxc_ctl_var *var;
+ char outBuf[MAXLINE + 1];
+
+ outBuf[0] = 0;
+ if ((var = find_var(vname)) == NULL)
+ {
+ elog(ERROR, "ERROR: Variable %s not found.\n", vname);
+ return;
+ }
+ else
+ {
+ char **curv;
+ char editbuf[MAXPATH];
+
+ snprintf(editbuf, MAXPATH, "%s (", vname);
+ strncat(outBuf, editbuf, MAXLINE);
+ for (curv=var->val; *curv; curv++)
+ {
+ snprintf(editbuf, MAXPATH, " \"%s\" ", *curv);
+ strncat(outBuf, editbuf, MAXLINE);
+ }
+ strncat(outBuf, ")", MAXLINE);
+ elog(NOTICE, "%s\n", outBuf);
+ }
+
+}
+
+void log_var(char *varname)
+{
+ if (logFile)
+ print_var(varname);
+}
+
+int arraySizeName(char *name)
+{
+ pgxc_ctl_var *var;
+
+ if ((var = find_var(name)) == NULL)
+ return -1;
+ return(arraySize(var));
+}
+
+int arraySize(pgxc_ctl_var *var)
+{
+ return var->val_used;
+}
+
+char **add_member(char **array, char *val)
+{
+ char **rv;
+ int ii;
+
+ for (ii = 0; array[ii]; ii++);
+ rv = Realloc(array, sizeof(char *) * (ii + 2));
+ rv[ii] = Strdup(val);
+ rv[ii+1] = NULL;
+ return(rv);
+}
+
+void clean_array(char **array)
+{
+ int ii;
+ if (array)
+ {
+ for(ii = 0; array[ii]; ii++)
+ Free(array[ii]);
+ Free(array);
+ }
+}
+
+void var_assign(char **dest, char *src)
+{
+ Free(*dest);
+ *dest = src;
+}
+
+char *listValue(char *name)
+{
+ pgxc_ctl_var *dest;
+ int ii;
+ char *buf;
+
+ if ((dest = find_var(name)) == NULL)
+ return Strdup("");
+ buf = Malloc(MAXLINE+1);
+ buf[0]=0;
+ for(ii = 0; ii < dest->val_used; ii++)
+ {
+ strncat(buf, dest->val[ii], MAXLINE);
+ strncat(buf, " ", MAXLINE);
+ }
+ return buf;
+}
+
+int ifExists(char *name, char *value)
+{
+ pgxc_ctl_var *var = find_var(name);
+ int ii;
+
+ if (!var)
+ return FALSE;
+ for (ii = 0; ii < var->val_used; ii++)
+ if (strcmp((var->val)[ii], value) == 0)
+ return TRUE;
+ return FALSE;
+}
+
+int IfExists(char *name, char *value)
+{
+ pgxc_ctl_var *var = find_var(name);
+ int ii;
+
+ if (!var)
+ return FALSE;
+ for (ii = 0; ii < var->val_used; ii++)
+ if (strcasecmp((var->val)[ii], value) == 0)
+ return TRUE;
+ return FALSE;
+}
+
+/*
+ * Extend the variable values array to newSize (plus 1 for store the
+ * end-of-array marker
+ */
+int extendVar(char *name, int newSize, char *def_value)
+{
+ pgxc_ctl_var *target;
+ char **old_val;
+ int old_size;
+ int ii;
+
+ if ((target = find_var(name)) == NULL)
+ return -1;
+ if (def_value == NULL)
+ def_value = "none";
+
+ /*
+ * If the allocated array is not already big enough to store newSize + 1
+ * elements, we must extend it newSize + 1
+ */
+ if (target->val_size <= newSize)
+ {
+ old_val = target->val;
+ old_size = target->val_size;
+ target->val = Malloc0(sizeof(char *) * (newSize + 1));
+ memcpy(target->val, old_val, sizeof(char *) * old_size);
+ target->val_size = newSize + 1;
+ Free(old_val);
+ }
+
+ for (ii = target->val_used; ii < newSize; ii++)
+ (target->val)[ii] = Strdup(def_value);
+
+ /* Store NULL in the last element to mark the end-of-array */
+ (target->val)[newSize] = NULL;
+ if (target->val_used < newSize)
+ target->val_used = newSize;
+
+ return 0;
+}
+
+
+/*
+ * If pad is NULL, then "none" will be padded.
+ * Returns *val if success, NULL if failed
+ */
+void assign_arrayEl_internal(char *name, int idx, char *val, char *pad,
+ int extend)
+{
+ pgxc_ctl_var *var = confirm_var(name);
+
+ if (pad == NULL)
+ pad = "none";
+ /*
+ * Pad if needed
+ */
+ if (extend)
+ extendVar(name, idx+1, pad);
+ Free(var->val[idx]);
+ var->val[idx] = Strdup(val);
+}
+
+void assign_arrayEl(char *name, int idx, char *val, char *pad)
+{
+ return assign_arrayEl_internal(name, idx, val, pad, TRUE);
+}
+
+void replace_arrayEl(char *name, int idx, char *val, char *pad)
+{
+ return assign_arrayEl_internal(name, idx, val, pad, FALSE);
+}
+
+int doesExist(char *name, int idx)
+{
+ pgxc_ctl_var *var;
+
+ if (name == NULL)
+ return 0;
+ if ((var = find_var(name)) == NULL)
+ return 0;
+ if (var->val_used <= idx)
+ return 0;
+ return 1;
+}
diff --git a/contrib/pgxc_ctl/variables.h b/contrib/pgxc_ctl/variables.h
new file mode 100644
index 0000000000..d408c86fe1
--- /dev/null
+++ b/contrib/pgxc_ctl/variables.h
@@ -0,0 +1,88 @@
+/*-------------------------------------------------------------------------
+ *
+ * variables.h
+ *
+ * Variable handling module of Postgres-XC configuration and operation tool.
+ *
+ * Copyright (c) 2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef VARIABLES_H
+#define VARIABLES_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#define NUM_HASH_BUCKET 128
+
+typedef struct pgxc_ctl_var {
+ struct pgxc_ctl_var *next;
+ struct pgxc_ctl_var *prev;
+ char *varname;
+ int val_size; /*
+ * current size of the allocated array including
+ * place to store the NULL pointer as an
+ * end-of-array marker
+ */
+
+ int val_used; /* currently used values */
+
+ char **val; /*
+ * max (val_size - 1) values and NULL as the last
+ * element
+ */
+} pgxc_ctl_var;
+
+
+extern pgxc_ctl_var *var_head;
+extern pgxc_ctl_var *var_tail;
+
+typedef struct pgxc_var_hash {
+ int el_size;
+ int el_used;
+ pgxc_ctl_var **el;
+} pgxc_var_hash;
+
+
+pgxc_var_hash var_hash[NUM_HASH_BUCKET];
+
+void init_var_hash(void);
+void add_var_hash(pgxc_ctl_var *var);
+pgxc_ctl_var *new_var(char *name);
+void add_val(pgxc_ctl_var *var, char *val);
+void add_val_name(char *name, char *val);
+pgxc_ctl_var *find_var(char *name);
+char *sval(char *name);
+char **aval(char *name);
+int arraySizeName(char *name);
+int arraySize(pgxc_ctl_var *var);
+void print_vars(void);
+void print_var(char *vname);
+void reset_value(pgxc_ctl_var *var);
+void assign_val(char *dest, char *src);
+void assign_sval(char *name, char *val);
+void assign_arrayEl(char *name, int idx, char *val, char *pad);
+void replace_arrayEl(char *name, int idx, char *val, char *pad);
+pgxc_ctl_var *confirm_var(char *name);
+void reset_var_val(char *name, char *val);
+void reset_var(char *name);
+void remove_var(pgxc_ctl_var *var);
+void reset_value(pgxc_ctl_var *var);
+void log_var(char *name);
+char **add_member(char **array, char *val);
+void var_assign(char **dest, char *src);
+char *listValue(char *name);
+int extendVar(char *name, int newSize, char *def_value);
+int doesExist(char *name, int idx);
+void assign_arrayEl_internal(char *name, int idx, char *val, char *pad,
+ int extend);
+
+#define AddMember(a, b) do{if((a) == NULL) (a) = Malloc0(sizeof(char *)); (a) = add_member((a), (b));}while(0)
+void clean_array(char **array);
+#define CleanArray(a) do{clean_array(a); (a) = NULL;}while(0)
+#define VAR(a) find_var(a)
+
+int ifExists(char *name, char *value);
+int IfExists(char *name, char *value);
+
+#endif /* VARIABLES _H */
diff --git a/contrib/pgxc_ctl/varnames.h b/contrib/pgxc_ctl/varnames.h
new file mode 100644
index 0000000000..f3e65a4420
--- /dev/null
+++ b/contrib/pgxc_ctl/varnames.h
@@ -0,0 +1,152 @@
+/*-------------------------------------------------------------------------
+ *
+ * varnames.h
+ *
+* Variable name definition of Postgres-XC configuration and operation tool.
+ *
+ *
+ * Portions Copyright (c) 1996-2009, PostgreSQL Global Development Group
+ * Portions Copyright (c) 2010-2013 Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef VARNAMES_H
+#define VARNAMES_H
+
+/* Install Directory */
+#define VAR_pgxcInstallDir "pgxcInstallDir" /* Not mandatory */
+
+/* Overall */
+#define VAR_pgxcOwner "pgxcOwner"
+#define VAR_pgxcUser "pgxcUser"
+#define VAR_tmpDir "tmpDir"
+#define VAR_localTmpDir "localTmpDir"
+#define VAR_logOpt "logOpt"
+#define VAR_logDir "logDir"
+#define VAR_configBackup "configBackup"
+#define VAR_configBackupHost "configBackupHost"
+#define VAR_configBackupDir "configBackupDir"
+#define VAR_configBackupFile "configBackupFile"
+#define VAR_allServers "allServers"
+
+
+/* GTM master */
+#define VAR_gtmName "gtmName"
+#define VAR_gtmMasterServer "gtmMasterServer"
+#define VAR_gtmMasterPort "gtmMasterPort"
+#define VAR_gtmMasterDir "gtmMasterDir"
+#define VAR_gtmExtraConfig "gtmExtraConfig"
+#define VAR_gtmMasterSpecificExtraConfig "gtmMasterSpecificExtraConfig"
+
+/* GTM slave */
+#define VAR_gtmSlave "gtmSlave"
+#define VAR_gtmSlaveName "gtmSlaveName"
+#define VAR_gtmSlaveServer "gtmSlaveServer"
+#define VAR_gtmSlavePort "gtmSlavePort"
+#define VAR_gtmSlaveDir "gtmSlaveDir"
+#define VAR_gtmSlaveSpecificExtraConfig "gtmSlaveSpecificExtraConfig"
+
+/* GTM Proxy */
+#define VAR_gtmProxy "gtmProxy"
+#define VAR_gtmProxyNames "gtmProxyNames"
+#define VAR_gtmProxyServers "gtmProxyServers"
+#define VAR_gtmProxyPorts "gtmProxyPorts"
+#define VAR_gtmProxyDirs "gtmProxyDirs"
+#define VAR_gtmPxyExtraConfig "gtmPxyExtraConfig"
+#define VAR_gtmPxySpecificExtraConfig "gtmPxySpecificExtraConfig"
+
+/* Coordinators overall */
+#define VAR_coordNames "coordNames"
+#define VAR_coordPorts "coordPorts"
+#define VAR_poolerPorts "poolerPorts"
+#define VAR_coordPgHbaEntries "coordPgHbaEntries"
+
+/* Coordinators master */
+#define VAR_coordMasterServers "coordMasterServers"
+#define VAR_coordMasterDirs "coordMasterDirs"
+#define VAR_coordMaxWALSenders "coordMaxWALSenders"
+
+/* Coordinators slave */
+#define VAR_coordSlave "coordSlave"
+#define VAR_coordSlaveServers "coordSlaveServers"
+#define VAR_coordSlavePorts "coordSlavePorts"
+#define VAR_coordSlavePoolerPorts "coordSlavePoolerPorts"
+#define VAR_coordSlaveSync "coordSlaveSync"
+#define VAR_coordSlaveDirs "coordSlaveDirs"
+#define VAR_coordArchLogDirs "coordArchLogDirs"
+
+/* Coordinator configuration files */
+#define VAR_coordExtraConfig "coordExtraConfig"
+#define VAR_coordSpecificExtraConfig "coordSpecificExtraConfig"
+#define VAR_coordExtraPgHba "coordExtraPgHba"
+#define VAR_coordSpecificExtraPgHba "coordSpecificExtraPgHba"
+
+/* Coordinators additional slaves */
+/* Actual additional slave configuration will be obtained from coordAdditionalSlaveSet */
+#define VAR_coordAdditionalSlaves "coordAdditionalSlaves"
+#define VAR_coordAdditionalSlaveSet "coordAdditionalSlaveSet"
+
+
+/* Datanodes overall */
+#define VAR_coordAdditionalSlaveSet "coordAdditionalSlaveSet"
+#define VAR_datanodeNames "datanodeNames"
+#define VAR_datanodePorts "datanodePorts"
+#define VAR_datanodePoolerPorts "datanodePoolerPorts"
+#define VAR_datanodePgHbaEntries "datanodePgHbaEntries"
+#define VAR_primaryDatanode "primaryDatanode"
+
+/* Datanode masters */
+#define VAR_datanodeMasterServers "datanodeMasterServers"
+#define VAR_datanodeMasterDirs "datanodeMasterDirs"
+#define VAR_datanodeMasterWALDirs "datanodeMasterWALDirs"
+#define VAR_datanodeMaxWALSenders "datanodeMaxWALSenders"
+
+/* Datanode slaves */
+#define VAR_datanodeSlave "datanodeSlave"
+#define VAR_datanodeSlaveServers "datanodeSlaveServers"
+#define VAR_datanodeSlavePorts "datanodeSlavePorts"
+#define VAR_datanodeSlavePoolerPorts "datanodeSlavePoolerPorts"
+#define VAR_datanodeSlaveSync "datanodeSlaveSync"
+#define VAR_datanodeSlaveDirs "datanodeSlaveDirs"
+#define VAR_datanodeSlaveWALDirs "datanodeSlaveWALDirs"
+#define VAR_datanodeArchLogDirs "datanodeArchLogDirs"
+
+/* Datanode configuration files */
+#define VAR_datanodeExtraConfig "datanodeExtraConfig"
+#define VAR_datanodeSpecificExtraConfig "datanodeSpecificExtraConfig"
+#define VAR_datanodeExtraPgHba "datanodeExtraPgHba"
+#define VAR_datanodeSpecificExtraPgHba "datanodeSpecificExtraPgHba"
+
+/* Datanode additional slaves */
+/* Actual additional slave configuration will be obtained from datanodeAdditionalSlaveSet */
+#define VAR_datanodeAdditionalSlaves "datanodeAdditionalSlaves"
+#define VAR_datanodeAdditionalSlaveSet "datanodeAdditionalSlaveSet"
+
+/* WAL Archives */
+/* Actual wal archive will be obtained from walArchiveSet */
+#define VAR_walArchive "walArchive"
+#define VAR_walArchiveSet "walArchiveSet"
+
+/* Connection to datanode/coordinator */
+
+#define VAR_pgxcCtlName "pgxcCtlName"
+#define VAR_defaultDatabase "defaultDatabase"
+
+/* Other Options */
+
+#define VAR_pgxc_ctl_home "pgxc_ctl_home"
+#define VAR_xc_prompt "xc_prompt"
+#define VAR_verbose "verbose"
+#define VAR_logDir "logDir"
+#define VAR_logFile "logFile"
+#define VAR_tmpDir "tmpDir"
+#define VAR_localTmpDir "localTmpDir"
+#define VAR_configFile "configFile"
+#define VAR_echoAll "echoAll"
+#define VAR_debug "debug"
+#define VAR_logMessage "logMessage"
+#define VAR_printMessage "printMessage"
+#define VAR_logLocation "logLocation"
+#define VAR_printLocation "printLocation"
+
+#endif /* VARNAMES_H */
diff --git a/contrib/pgxc_ddl/README b/contrib/pgxc_ddl/README
new file mode 100644
index 0000000000..0a2c440b44
--- /dev/null
+++ b/contrib/pgxc_ddl/README
@@ -0,0 +1,47 @@
+Postgres-XC - pgxc_ddl
+=====================================
+
+This directory contains pgxc_ddl, an application used to make a cold synchronization of DDL
+in a Postgres-XC cluster by launching DDL and then copy Coordinator catalog file
+data from a remote Coordinator (where DDL has been launched) to other Coordinators.
+
+pgxc_ddl can also be used to synchronize catalog files.
+
+pgxc_ddl was put in the default install repository before DDL synchronizing was implemented
+(prior to version Postgres-XC 0.9.3).
+
+pgxc_ddl can be used with a configuration file called pgxc.conf.
+This file is kept with the name pgxc.conf.sample to stick with PostgreSQL format.
+Up to v0.9.3, pgxc.conf was by default installed by initdb in a data folder,
+but this is not really necessary since DDL Synchronization is implemented in Postgres-XC.
+
+So it is kept in a separate repository src/pgxc/bin/pgxc_ddl/.
+
+=====================================
+pgxc_ddl
+=====================================
+This script uses the following options:
+- D to locate the data folder, necessary to find pgxc.conf,
+ containing the characteristics of all the coordinators
+- l to locate the folder where applications are
+- f for a DDL file used as input
+- d for a Database name on which to launch DDL
+- n coordinator number where to launch DDL,
+ number based on the one written in pgxc.conf
+- t base name of folder where to save configuration files,
+ by default /tmp/pgxc_config, completed by $$ (process number for folder name uniqueness)
+
+=====================================
+pgxc.conf.sample
+=====================================
+Same format as for GUC files is used.
+
+This configuration file contains the list of following parameters:
+- coordinator_hosts, list of Coordinator hosts
+ This is an array and format is 'host_name_1,host_name_2'.
+- coordinator_ports, list of Coordinator ports
+ This is an array and format is 'port_1,port_2'
+- coordinator_folders
+ This is an array and format is 'data_folder_1,data_folder_2'
+
+All the arrays need to have the same size equal to the number of Coordinators.
diff --git a/contrib/pgxc_ddl/pgxc.conf.sample b/contrib/pgxc_ddl/pgxc.conf.sample
new file mode 100644
index 0000000000..9dcc0c7a2d
--- /dev/null
+++ b/contrib/pgxc_ddl/pgxc.conf.sample
@@ -0,0 +1,20 @@
+# -----------------------------
+# Postgres-XC configuration file
+# -----------------------------
+#
+# This file consists of lines of the form:
+#
+# name = value
+#
+# It describes the list of coordinators used in the cluster
+
+#------------------------------------------------------------------------------
+# POSTGRES-XC COORDINATORS
+#------------------------------------------------------------------------------
+
+#coordinator_hosts = 'localhost' # Host names or addresses of data nodes
+ # (change requires restart)
+#coordinator_ports = '5451,5452' # Port numbers of coordinators
+ # (change requires restart)
+#coordinator_folders = '/pgxc/data' # List of Data folders of coordinators
+ # (change require restart) \ No newline at end of file
diff --git a/contrib/pgxc_ddl/pgxc_ddl b/contrib/pgxc_ddl/pgxc_ddl
new file mode 100644
index 0000000000..b56a8ff86a
--- /dev/null
+++ b/contrib/pgxc_ddl/pgxc_ddl
@@ -0,0 +1,443 @@
+#!/bin/bash
+# Copyright (c) 2010-2012 Postgres-XC Development Group
+
+#Scripts to launch DDL in PGXC cluster using a cold_backup method
+#Be sure to have set a correct ssl environment in all the servers of the cluster
+
+#This script uses pgxc.conf as a base to find the settings of all the coordinators
+
+#Options possible to use for this script
+# -D to locate the data folder, necessary to find pgxc.conf, containing the characteristics of all the coordinators
+# -l to locate the folder where applications are
+# -f for a DDL file
+# -d for a Database name
+# -n coordinator number where to launch DDl, number based on the one written in pgxc.conf
+# -t base name of folder where to save configuration files, by default /tmp/pgxc_config, completed by $$
+
+count=0
+
+#Default options
+#local folder used to save temporary the configuration files of coordinator's data folder being erased
+CONFIG_FOLDER=/tmp/pgxc_config_files.$$
+PGXC_BASE=
+#options to launch the coordinator
+#don't forget to add -i as we are in a cluster :)
+COORD_OPTIONS="-C -i"
+
+#-----------------------------------------------------------------------
+# Option Management
+#-----------------------------------------------------------------------
+while getopts 'f:d:D:l:hn:t:' OPTION
+do
+ count=$((count +2))
+ case $OPTION in
+ d) #for a database name
+ DB_NAME="$OPTARG"
+ ;;
+
+ D) #for a data folder, to find pgxc.conf
+ DATA_FOLDER="$OPTARG"
+ ;;
+
+ f) #for a DDL file
+ DDL_FILE_NAME="$OPTARG"
+ ;;
+
+ l) #To define folder where applications are if necessary
+ PGXC_BASE="$OPTARG"/
+ ;;
+
+ n) #for a coordinator number
+ COORD_NUM_ORIGIN="$OPTARG"
+ ;;
+
+ h) printf "Usage: %s: [-d dbname] [-l bin folder] [-D data folder] [-n coord number] [-f ddl file] [-t save folder name in /tmp/]\n" $(basename $0) >&2
+ exit 0
+ ;;
+ t) #to set the name of the folder where to save conf files. All is mandatory saved in /tmp
+ CONFIG_FOLDER=/tmp/"$OPTARG"
+ ;;
+
+ ?) printf "Usage: %s: [-d dbname] [-l bin folder] [-D data folder] [-n coord number] [-f ddl file] [-t save folder name in /tmp/]\n" $(basename $0) >&2
+ exit 0
+ ;;
+ esac
+done
+
+if [ $# -lt "1" ]
+then
+ echo "No arguments defined, you should try help -h"
+ exit 2
+fi
+
+#A couple of option checks
+if [ "$count" -ne "$#" ]
+then
+ echo "Arguments not correctly set, try -h for help"
+ exit 2
+fi
+
+if [ -z $COORD_NUM_ORIGIN ]
+then
+ echo "Coordinator number not defined, mandatory -n argument missing"
+ exit 2
+fi
+if [ -z $DATA_FOLDER ]
+then
+ echo "Data folder not defined, mandatory -D argument missing"
+ exit 2
+fi
+
+#Check if Argument of -n is an integer
+if [ ! $(echo "$COORD_NUM_ORIGIN" | grep -E "^[0-9]+$") ]
+ then
+ echo "Argument -n is not a valid integer"
+ exit 2
+fi
+
+#Check if DDL file exists
+if [ "$DDL_FILE_NAME" != "" ]
+then
+ if [ ! -e $DDL_FILE_NAME ]
+ then
+ echo "DDL file not defined"
+ exit 2
+ fi
+ if [ -z $DB_NAME ]
+ then
+ echo "Dbname not defined, mandatory -d argument missing when using a ddl file"
+ exit 2
+ fi
+fi
+
+#-----------------------------------------------------------------------
+# Begin to read the pgxc.conf to get coordinator characteristics
+#-----------------------------------------------------------------------
+PGXC_CONF=$DATA_FOLDER/pgxc.conf
+
+if [ ! -e $PGXC_CONF ]
+then
+ echo "pgxc.conf not defined in the directory defined by -D"
+ exit 2
+fi
+
+#Find parameters
+hosts=`cat $PGXC_CONF | grep coordinator_hosts | cut -d "'" -f 2`
+ports=`cat $PGXC_CONF | grep coordinator_ports | cut -d "'" -f 2`
+folders=`cat $PGXC_CONF | grep coordinator_folders | cut -d "'" -f 2`
+if [ "$hosts" = "" ]
+then
+ echo "coordinator_hosts not defined in pgxc.conf"
+ exit 2
+fi
+if [ "$ports" = "" ]
+then
+ echo "coordinator_ports not defined in pgxc.conf"
+ exit 2
+fi
+if [ "$folders" = "" ]
+then
+ echo "coordinator_folders not defined in pgxc.conf"
+ exit 2
+fi
+
+#Check if the strings are using commas as separators
+hosts_sep="${hosts//[^,]/}"
+ports_sep="${ports//[^,]/}"
+folders_sep="${folders//[^,]/}"
+if [ "$hosts_sep" = "" ]
+then
+ echo "coordinator_hosts should use commas as a separator"
+ exit 2
+fi
+if [ "$ports_sep" = "" ]
+then
+ echo "coordinator_ports should use commas as a separator"
+ exit 2
+fi
+if [ "$folders_sep" = "" ]
+then
+ echo "coordinator_folders should use commas as a separator"
+ exit 2
+fi
+
+
+#-----------------------------------------------------------------------
+# Fill in Arrays that are used for the process from pgxc configuration file
+#-----------------------------------------------------------------------
+
+count=1
+#Coordinator list
+host_local=`echo $hosts | cut -d "," -f $count`
+while [ "$host_local" != "" ]
+do
+ COORD_HOSTNAMES[$((count -1))]=`echo $host_local`
+ count=$((count +1))
+ host_local=`echo $hosts | cut -d "," -f $count`
+done
+COORD_COUNT=${#COORD_HOSTNAMES[*]}
+
+#Port list corresponding to the coordinators
+#If all the coordinators use the same port on different servers,
+#it is possible to define that with a unique element array.
+count=1
+port_local=`echo $ports | cut -d "," -f $count`
+while [ "$port_local" != "" ]
+do
+ COORD_PORTS[$((count -1))]=$port_local
+ count=$((count +1))
+ port_local=`echo $ports | cut -d "," -f $count`
+done
+COORD_PORTS_COUNT=${#COORD_PORTS[*]}
+
+#Data folder list corresponding to the coordinators
+#If all the coordinators use the same data folder name on different servers,
+#it is possible to define that with a unique element array.
+count=1
+folder_local=`echo $folders | cut -d "," -f $count`
+
+while [ "$folder_local" != "" ]
+do
+ COORD_PGDATA[$((count -1))]=$folder_local
+ count=$((count +1))
+ folder_local=`echo $folders | cut -d "," -f $count`
+done
+COORD_PGDATA_COUNT=${#COORD_PGDATA[*]}
+
+
+#-----------------------------------------------------------------------
+# Start DDL process
+#-----------------------------------------------------------------------
+
+#It is supposed that the same bin folders are used among the servers
+#to call postgres processes
+#This can be customized by the user with option -l
+COORD_SERVER_PROCESS=postgres
+PGCTL_SERVER_PROCESS=pg_ctl
+PSQL_CLIENT_PROCESS=psql
+
+COORD_SERVER=$PGXC_BASE$COORD_SERVER_PROCESS
+PGCTL_SERVER=$PGXC_BASE$PGCTL_SERVER_PROCESS
+PSQL_CLIENT=$PGXC_BASE$PSQL_CLIENT_PROCESS
+
+#reajust coord number with index number
+COORD_NUM_ORIGIN=$((COORD_NUM_ORIGIN -1))
+
+#check data validity
+#Note: Add other checks here
+
+if [ $COORD_COUNT -eq "1" ]
+then
+ echo "Are you sure you want to use this utility with one only coordinator??"
+ exit 2
+fi
+
+if [ $COORD_PGDATA_COUNT -ne $COORD_COUNT ]
+then
+ echo "Number of pgdata folders must be the same as coordinator server number"
+ exit 2
+fi
+
+if [ $COORD_PORTS_COUNT -ne $COORD_COUNT ]
+then
+ echo "Number of coordinator ports defined must be the same as coordinator server number"
+ exit 2
+fi
+
+#Check if coordinator number is not outbounds
+if [ $COORD_NUM_ORIGIN -gt $((COORD_COUNT -1)) ]
+then
+ echo "coordinator number is out of bounds"
+ exit 2
+fi
+COORD_ORIG_INDEX=$COORD_NUM_ORIGIN
+
+#Check if the data folders are defined
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ targethost=${COORD_HOSTNAMES[$index]}
+ targetdata=${COORD_PGDATA[$index]}
+ if [[ `ssh $targethost test -d $targetdata && echo exists` ]]
+ then
+ echo "defined directory exists for "$targethost
+ else
+ echo "defined directory does not exist for "$targethost
+ exit 2
+ fi
+done
+
+#Origin Coordinator Index has been found?
+if [ -z $COORD_ORIG_INDEX ]
+then
+ echo "origin coordinator is not in the coordinator list"
+ exit 2
+fi
+
+#Main process begins
+
+#Check if the database is defined, This could lead to coordinator being stopped uselessly
+if [ "$DB_NAME" != "" ]
+then
+ #Simply launch a fake SQL on the Database wanted
+ $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -c 'select now()' -d $DB_NAME; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "Database not defined"
+ exit 2
+ fi
+fi
+
+#1) stop all the coordinators
+echo "Stopping all the coordinators"
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ targethost=${COORD_HOSTNAMES[$index]}
+ targetdata=${COORD_PGDATA[$index]}
+ echo ssh $targethost $PGCTL_SERVER stop -D $targetdata
+ ssh $targethost $PGCTL_SERVER stop -D $targetdata; err=$?
+ if [ $err -gt "0" ]
+ then
+ "pg_ctl couldn't stop server"
+ exit 2
+ fi
+done
+
+#If a DDL file is not set by the user, just synchronize the catalogs with the catalog of the chosen coordinator
+if [ "$DDL_FILE_NAME" != "" ]
+then
+ echo "-f activated, DDL being launched"
+
+ #2) restart the one we want to launch DDL to...
+ echo ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -D ${COORD_PGDATA[$COORD_ORIG_INDEX]}
+ ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -D ${COORD_PGDATA[$COORD_ORIG_INDEX]} &
+
+ #wait a little bit to be sure it switched on
+ sleep 3
+
+ #3) launch the DDL
+ #This has to be done depending on if the user has defined a file or a command
+ echo $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -f $DDL_FILE_NAME -d $DB_NAME
+ $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -f $DDL_FILE_NAME -d $DB_NAME; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "psql error, is Database defined?"
+ exit 2
+ fi
+
+ #4) Stop again the origin coordinator as we cannot copy the lock files to other coordinators
+ echo ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $PGCTL_SERVER stop -D ${COORD_PGDATA[$COORD_ORIG_INDEX]}
+ ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $PGCTL_SERVER stop -D ${COORD_PGDATA[$COORD_ORIG_INDEX]}; err=$?
+ if [ $err -gt "0" ]
+ then
+ "pg_ctl couldn't stop server"
+ exit 2
+ fi
+fi
+
+#5) before copying the catalogs, save the configuration files or they are erased by the catalog copy
+#make a copy of them in a folder in /tmp/pgxc_conf (default folder)
+if [ -d $CONFIG_FOLDER ]
+then
+ rm -rf $CONFIG_FOLDER
+fi
+mkdir $CONFIG_FOLDER
+
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ if [ $index -ne $COORD_ORIG_INDEX ]
+ then
+ targethost=${COORD_HOSTNAMES[$index]}
+ targetdata=${COORD_PGDATA[$index]}
+ echo scp -pr $targethost:$targetdata/postgresql.conf $CONFIG_FOLDER/postgresql.conf.$index
+ echo scp -pr $targethost:$targetdata/pg_hba.conf $CONFIG_FOLDER/pg_hba.conf.$index
+ scp -pr $targethost:$targetdata/postgresql.conf $CONFIG_FOLDER/postgresql.conf.$index; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "deleting saved configuration files"
+ rm -rf $CONFIG_FOLDER
+ echo "scp failed with "$targethost
+ exit 2
+ fi
+ scp -pr $targethost:$targetdata/pg_hba.conf $CONFIG_FOLDER/pg_hba.conf.$index; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "deleting saved configuration files"
+ rm -rf $CONFIG_FOLDER
+ echo "scp failed with "$targethost
+ exit 2
+ fi
+ fi
+done
+
+#6) copy catalog files to all coordinators but not to the origin one
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ if [ $index -ne $COORD_ORIG_INDEX ]
+ then
+ srchost=${COORD_HOSTNAMES[$COORD_ORIG_INDEX]}
+ srcdata=${COORD_PGDATA[$COORD_ORIG_INDEX]}
+ targethost=${COORD_HOSTNAMES[$index]}
+ targetdata=${COORD_PGDATA[$index]}
+ #First erase the data to have a nice cleanup
+ echo ssh $targethost rm -rf $targetdata
+ ssh $targethost rm -rf $targetdata
+
+ #Just to be sure that catalog files of origin coordinator are copied well
+ echo scp -pr $srchost:$srcdata $targethost:$targetdata
+ scp -pr $srchost:$srcdata $targethost:$targetdata; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "deleting saved configuration files"
+ rm -rf $CONFIG_FOLDER
+ echo "scp failed with "$targethost
+ exit 2
+ fi
+ fi
+done
+
+#7) copy back the configuration files to the corresponding fresh folders
+#but not the configuration files of the origin coordinator
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ if [ $index -ne $COORD_ORIG_INDEX ]
+ then
+ targethost=${COORD_HOSTNAMES[$index]}
+ targetdata=${COORD_PGDATA[$index]}
+ echo scp -pr $CONFIG_FOLDER/postgresql.conf.$index $targethost:$targetdata/postgresql.conf
+ echo scp -pr $CONFIG_FOLDER/pg_hba.conf.$index $targethost:$targetdata/pg_hba.conf
+ scp -pr $CONFIG_FOLDER/postgresql.conf.$index $targethost:$targetdata/postgresql.conf; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "deleting saved configuration files"
+ rm -rf $CONFIG_FOLDER
+ echo "scp failed with "$targethost
+ exit 2
+ fi
+ scp -pr $CONFIG_FOLDER/pg_hba.conf.$index $targethost:$targetdata/pg_hba.conf; err=$?
+ if [ $err -gt "0" ]
+ then
+ echo "deleting saved configuration files"
+ rm -rf $CONFIG_FOLDER
+ echo "scp failed with "$targethost
+ exit 2
+ fi
+ fi
+done
+
+#8) wait a little bit...
+sleep 1
+
+#9) restart all the other coordinators, origin coordinator has been stopped after DDL run
+for index in ${!COORD_HOSTNAMES[*]}
+do
+ echo ssh ${COORD_HOSTNAMES[$index]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$index]} -D ${COORD_PGDATA[$index]} &
+ ssh ${COORD_HOSTNAMES[$index]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$index]} -D ${COORD_PGDATA[$index]} &
+done
+
+sleep 2
+
+#Clean also the folder in tmp keeping the configuration files
+rm -rf $CONFIG_FOLDER
+
+#10) finished :p
+exit \ No newline at end of file
diff --git a/contrib/pgxc_monitor/.gitignore b/contrib/pgxc_monitor/.gitignore
new file mode 100644
index 0000000000..6a0845ba6a
--- /dev/null
+++ b/contrib/pgxc_monitor/.gitignore
@@ -0,0 +1 @@
+/pgxc_monitor
diff --git a/contrib/pgxc_monitor/Makefile b/contrib/pgxc_monitor/Makefile
new file mode 100644
index 0000000000..5feaabd643
--- /dev/null
+++ b/contrib/pgxc_monitor/Makefile
@@ -0,0 +1,35 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for contrib/pgxc_clean
+#
+# Portions Copyright (c) 2011-2012 Postgres-XC Development Group
+#
+# $PostgreSQL$
+#
+#-------------------------------------------------------------------------
+
+PGFILEDESC = "pgxc_clean - Abort prepared transaction for a Postgres-XC Coordinator"
+PGAPPICON = win32
+
+PROGRAM= pgxc_monitor
+OBJS= pgxc_monitor.o mcxt.o
+
+#Include GTM objects
+gtm_builddir = $(top_builddir)/src/gtm
+EX_OBJS = $(gtm_builddir)/common/assert.o \
+ $(gtm_builddir)/client/libgtmclient.a \
+ $(gtm_builddir)/common/gtm_serialize.o
+
+PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
+PG_LIBS = $(libpq_pgport) $(PTHREAD_LIBS) $(EX_OBJS)
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/pgxc_monitor
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/pgxc_monitor/mcxt.c b/contrib/pgxc_monitor/mcxt.c
new file mode 100644
index 0000000000..6d9c2170a8
--- /dev/null
+++ b/contrib/pgxc_monitor/mcxt.c
@@ -0,0 +1,77 @@
+/*----------------------------------------------------------------------------------
+ *
+ * mxct.c
+ * Postgres-XC memory context management code for applications.
+ *
+ * This module is for Postgres-XC application/utility programs. Sometimes,
+ * applications/utilities may need Postgres-XC internal functions which
+ * depends upon mcxt.c of gtm or Postgres.
+ *
+ * This module "virtualize" such module-dependent memory management.
+ *
+ * This code is for general use, which depends only upon confentional
+ * memory management functions.
+ *
+ * Copyright (c) 2012, Postgres-XC Development Group
+ *
+ *-------------------------------------------------------------------------------------
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include "gen_alloc.h"
+
+static void *current_cxt;
+
+static void *memCxtAlloc(void *, size_t);
+static void *memCxtRealloc(void *, size_t);
+static void *memCxtAlloc0(void *, size_t);
+static void memCxtFree(void *);
+static void *memCxtAllocTop(size_t);
+static void *memCxtCurrentContext(void);
+
+
+static void *memCxtAlloc(void* current, size_t needed)
+{
+ return(malloc(needed));
+}
+
+static void *memCxtRealloc(void *addr, size_t needed)
+{
+ return(realloc(addr, needed));
+}
+
+static void *memCxtAlloc0(void *current, size_t needed)
+{
+ void *allocated;
+
+ allocated = malloc(needed);
+ if (allocated == NULL)
+ return(NULL);
+ memset(allocated, 0, needed);
+ return(allocated);
+}
+
+static void memCxtFree(void *addr)
+{
+ free(addr);
+ return;
+}
+
+static void *memCxtCurrentContext()
+{
+ return((void *)&current_cxt);
+}
+
+static void *memCxtAllocTop(size_t needed)
+{
+ return(malloc(needed));
+}
+
+
+Gen_Alloc genAlloc_class = {(void *)memCxtAlloc,
+ (void *)memCxtAlloc0,
+ (void *)memCxtRealloc,
+ (void *)memCxtFree,
+ (void *)memCxtCurrentContext,
+ (void *)memCxtAllocTop};
diff --git a/contrib/pgxc_monitor/pgxc_monitor.c b/contrib/pgxc_monitor/pgxc_monitor.c
new file mode 100644
index 0000000000..155cb1718d
--- /dev/null
+++ b/contrib/pgxc_monitor/pgxc_monitor.c
@@ -0,0 +1,279 @@
+/*
+ * -----------------------------------------------------------------------------
+ *
+ * pgxc_monitor utility
+ *
+ * Monitors if a given node is running or not.
+ *
+ * Command syntax:
+ *
+ * pgxc_monitor [ options ]
+ *
+ * Options are:
+ * -Z nodetype What node type to monitor, gtm or node.
+ * gtm tests gtm, gtm_standby or gtm_proxy.
+ * node tests Coordinator or Datanode.
+ * -p port Port number of the monitored node.
+ * -h host Host name or IP address of the monitored node.
+ * -n nodename Specifies pgxc_monitor node name. Default is "pgxc_monitor"
+ * -q Run in quiet mode. Default is quiet mode.
+ * -v Run in verbose mode.
+ * -d database Database name to connect to.
+ * -U username Connect as specified database user.
+ * --help Prints the help message and exits with 0.
+ *
+ * When monitoring Coordinator or Datanode, -p and -h options can be
+ * supplied via .pgpass file. If you use non-default target database name
+ * and username, they must also be supplied by .pgpass file.
+ * If password is needed, it must also be supplied by .pgpass file.
+ *
+ * Monitoring Coordinator and Datanode uses system(3) function. Therefore,
+ * you should not use set-userid bit or set-groupid bit. Also, because
+ * this uses psql command, psql must be in your PATH.
+ *
+ * When testing Coordinator/Datanode, you must setup .pgpass file if you
+ * need to supply password, as well as non-default database name and username.
+ *
+ * The username and database name can be specified via command line options
+ * too. If password is needed, it must be supplied via .pgpass file though.
+ *
+ * If invalid parameters are given, error message will be printed even if
+ * -q is specified.
+ *
+ * --------------------------------------------------------------------------
+ */
+
+
+#include "gtm/gtm_client.h"
+#include "gtm/libpq-fe.h"
+
+#include <stdlib.h>
+#include <getopt.h>
+
+/* Define all the node types */
+typedef enum
+{
+ NONE = 0,
+ GTM, /* GTM or GTM-proxy */
+ NODE /* Coordinator or Datanode */
+} nodetype_t;
+
+static char *progname;
+
+#define Free(x) do{if((x)) free((x)); x = NULL;} while(0)
+
+static void usage(void);
+static int do_gtm_ping(char *host, char *node, nodetype_t nodetype, char *nodename, bool verbose);
+static int do_node_ping(char *host, char *node, char *username, char *database, bool verbose);
+
+int
+main(int ac, char *av[])
+{
+ int opt;
+ nodetype_t nodetype = NONE;
+ char *port = NULL;
+ char *host = NULL;
+ char *nodename = NULL;
+ bool verbose = false;
+ char *username = NULL;
+ char *database = NULL;
+
+ progname = strdup(av[0]);
+
+ /* Print help if necessary */
+ if (ac > 1)
+ {
+ if (strcmp(av[1], "--help") == 0 || strcmp(av[1], "-?") == 0)
+ {
+ usage();
+ exit(0);
+ }
+ }
+
+ /* Scan options */
+ while ((opt = getopt(ac, av, "Z:U:d:h:n:p:qv")) != -1)
+ {
+ switch(opt)
+ {
+ case 'Z':
+ if (strcmp(optarg, "gtm") == 0)
+ nodetype = GTM;
+ else if (strcmp(optarg, "node") == 0)
+ nodetype = NODE;
+ else
+ {
+ fprintf(stderr, "%s: invalid -Z option value.\n", progname);
+ exit(3);
+ }
+ break;
+ case 'h':
+ Free(host);
+ host = strdup(optarg);
+ break;
+ case 'n':
+ Free(nodename);
+ nodename = strdup(optarg);
+ break;
+ case 'p':
+ Free(port);
+ port = strdup(optarg);
+ break;
+ case 'q':
+ verbose = false;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ case 'U':
+ username = strdup(optarg);
+ break;
+ case 'd':
+ database = strdup(optarg);
+ break;
+ default:
+ fprintf(stderr, "%s: unknow option %c.\n", progname, opt);
+ exit(3);
+ }
+ }
+
+ /* If no types are defined, well there is nothing to be done */
+ if (nodetype == NONE)
+ {
+ fprintf(stderr, "%s: -Z option is missing, it is mandatory.\n", progname);
+ usage();
+ exit(3);
+ }
+
+ switch(nodetype)
+ {
+ case GTM:
+ exit(do_gtm_ping(host, port, nodetype, nodename, verbose));
+ case NODE:
+ exit(do_node_ping(host, port, username, database, verbose));
+ case NONE:
+ default:
+ break;
+ }
+
+ /* Should not happen */
+ fprintf(stderr, "%s: internal error.\n", progname);
+ exit(3);
+}
+
+/*
+ * Ping a given GTM or GTM-proxy
+ */
+static int
+do_gtm_ping(char *host, char* port, nodetype_t nodetype, char *nodename, bool verbose)
+{
+ char connect_str[256];
+ GTM_Conn *conn;
+
+ if (host == NULL)
+ {
+ fprintf(stderr, "%s: -h is mandatory for -Z gtm or -Z gtm_proxy\n", progname);
+ exit(3);
+ }
+ if (port == NULL)
+ {
+ fprintf(stderr, "%s: -p is mandatory for -Z gtm or -Z gtm_proxy\n", progname);
+ exit(3);
+ }
+ /* Use 60s as connection timeout */
+ sprintf(connect_str, "host=%s port=%s node_name=%s remote_type=%d postmaster=0 connect_timeout=60",
+ host, port, nodename ? nodename : "pgxc_monitor", GTM_NODE_COORDINATOR);
+ if ((conn = PQconnectGTM(connect_str)) == NULL)
+ {
+ if (verbose)
+ fprintf(stderr, "%s: Could not connect to %s\n", progname, "GTM");
+ exit(1);
+ }
+ GTMPQfinish(conn);
+ if (verbose)
+ printf("Running\n");
+ return 0;
+}
+
+/*
+ * Ping a given node
+ */
+static int
+do_node_ping(char *host, char *port, char *username, char *database, bool verbose)
+{
+ int rc;
+ int exitStatus;
+ char command_line[1024];
+ char *quiet_out = " > /dev/null 2> /dev/null";
+ char *verbose_out = "";
+ char *out = verbose ? verbose_out : quiet_out;
+
+ /* Build psql command launched to node */
+ sprintf(command_line, "psql -w -q -c \"select 1 a\"");
+
+ /* Then add options if necessary */
+ if (username)
+ {
+ strcat(command_line, " -U ");
+ strcat(command_line, username);
+ }
+
+ /* Add database name, default is "postgres" */
+ if (database)
+ {
+ strcat(command_line, " -d ");
+ strcat(command_line, database);
+ }
+ else
+ strcat(command_line, " -d postgres ");
+
+ if (host)
+ {
+ strcat(command_line, " -h ");
+ strcat(command_line, host);
+ }
+
+ if (port)
+ {
+ strcat(command_line, " -p ");
+ strcat(command_line, port);
+ }
+
+ strcat(command_line, out);
+
+ /* Launch the command and output result if necessary */
+ rc = system(command_line);
+ exitStatus = WEXITSTATUS(rc);
+ if (verbose)
+ {
+ if (exitStatus == 0)
+ printf("Running\n");
+ else
+ printf("Not running\n");
+ }
+
+ return exitStatus;
+}
+
+/*
+ * Show help information
+ */
+static void
+usage(void)
+{
+ printf("pgxc_monitor -Z nodetype -p port -h host\n\n");
+ printf("Options are:\n");
+ printf(" -Z nodetype What node type to monitor, GTM, GTM-Proxy,\n");
+ printf(" Coordinator, or Datanode.\n");
+ printf(" Use \"gtm\" for GTM and GTM-proxy, \"node\" for Coordinator and Datanode.\n");
+ printf(" -h host Host name or IP address of the monitored node.\n");
+ printf(" Mandatory for -Z gtm\n");
+ printf(" -n nodename Nodename of this pgxc_monitor.\n");
+ printf(" Only for -Z gtm. Default is pgxc_monitor\n");
+ printf(" This identifies what is the name of component connecting to GTM.\n");
+ printf(" -p port Port number of the monitored node. Mandatory for -Z gtm\n");
+ printf(" -d database Database name to connect to. Default is \"postgres\". \n");
+ printf(" -U username Connect as specified database user. \n");
+ printf(" -q Quiet mode.\n");
+ printf(" -v Verbose mode.\n");
+ printf(" --help Prints the help message and exits with 0.\n");
+}
diff --git a/contrib/sepgsql/hooks.c b/contrib/sepgsql/hooks.c
index c4b978b48f..dadf99e74b 100644
--- a/contrib/sepgsql/hooks.c
+++ b/contrib/sepgsql/hooks.c
@@ -304,6 +304,9 @@ sepgsql_utility_command(PlannedStmt *pstmt,
ParamListInfo params,
QueryEnvironment *queryEnv,
DestReceiver *dest,
+#ifdef PGXC
+ bool sentToRemote,
+#endif /* PGXC */
char *completionTag)
{
Node *parsetree = pstmt->utilityStmt;
@@ -367,11 +370,15 @@ sepgsql_utility_command(PlannedStmt *pstmt,
if (next_ProcessUtility_hook)
(*next_ProcessUtility_hook) (pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
else
standard_ProcessUtility(pstmt, queryString,
context, params, queryEnv,
- dest, completionTag);
+ dest,
+ sentToRemote,
+ completionTag);
}
PG_CATCH();
{
diff --git a/contrib/stormstats/Makefile b/contrib/stormstats/Makefile
new file mode 100644
index 0000000000..961489a501
--- /dev/null
+++ b/contrib/stormstats/Makefile
@@ -0,0 +1,15 @@
+MODULE_big = stormstats
+OBJS = stormstats.o
+
+EXTENSION = stormstats
+DATA = stormstats--1.0.sql stormstats--unpackaged--1.0.sql
+
+ifdef USE_PGXS
+PGXS := $(shell pg_config --pgxs)
+include $(PGXS)
+else
+subdir = contrib/stormstats
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/stormstats/stormstats--1.0.sql b/contrib/stormstats/stormstats--1.0.sql
new file mode 100644
index 0000000000..2ea2b32a6e
--- /dev/null
+++ b/contrib/stormstats/stormstats--1.0.sql
@@ -0,0 +1,17 @@
+CREATE FUNCTION storm_database_stats(
+ OUT datname text,
+ OUT conn_cnt int8,
+ OUT select_cnt int8,
+ OUT insert_cnt int8,
+ OUT update_cnt int8,
+ OUT delete_cnt int8,
+ OUT ddl_cnt int8
+)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME'
+LANGUAGE C;
+
+-- Register a view on the function for ease of use.
+CREATE VIEW storm_database_stats AS
+ SELECT * FROM storm_database_stats();
+
diff --git a/contrib/stormstats/stormstats--unpackaged--1.0.sql b/contrib/stormstats/stormstats--unpackaged--1.0.sql
new file mode 100644
index 0000000000..df9f3a033d
--- /dev/null
+++ b/contrib/stormstats/stormstats--unpackaged--1.0.sql
@@ -0,0 +1,5 @@
+/* contrib/stormstats/stormstats--unpackaged--1.0.sql */
+
+ALTER EXTENSION stormstats ADD function storm_database_stats();
+ALTER EXTENSION stormstats ADD view storm_database_stats;
+
diff --git a/contrib/stormstats/stormstats.c b/contrib/stormstats/stormstats.c
new file mode 100644
index 0000000000..ee13bc7ff7
--- /dev/null
+++ b/contrib/stormstats/stormstats.c
@@ -0,0 +1,897 @@
+#include "postgres.h"
+
+#include <unistd.h>
+
+#include "catalog/pg_type.h"
+#include "executor/spi.h"
+#include "miscadmin.h"
+#include "storage/ipc.h"
+#include "storage/spin.h"
+#include "access/hash.h"
+
+#include "tcop/utility.h"
+#include "commands/dbcommands.h"
+#include "utils/builtins.h"
+#include "utils/syscache.h"
+#include "utils/snapmgr.h"
+#include "libpq/auth.h"
+#include "optimizer/planner.h"
+#include "nodes/makefuncs.h"
+#include "funcapi.h"
+#include "stormstats.h"
+#include "storage/fd.h"
+
+#include "pgxc/pgxc.h"
+#include "pgxc/pgxcnode.h"
+#include "pgxc/planner.h"
+#include "pgxc/execRemote.h"
+
+/* mark this dynamic library to be compatible with PG */
+PG_MODULE_MAGIC;
+
+/* Location of stats file */
+#define STORM_DUMP_FILE "global/storm.stat"
+
+/* This constant defines the magic number in the stats file header */
+static const uint32 STORM_FILE_HEADER = 0x20120229;
+
+#define STORM_STATS_COLS 7
+
+typedef struct ssHashKey
+{
+ int dbname_len;
+ const char *dbname_ptr;
+} ssHashKey;
+
+typedef struct EventCounters
+{
+ int64 conn_cnt;
+ int64 select_cnt;
+ int64 insert_cnt;
+ int64 update_cnt;
+ int64 delete_cnt;
+ int64 ddl_cnt;
+} EventCounters;
+
+typedef struct StormStatsEntry
+{
+ ssHashKey key; /* hash key of entry - MUST BE FIRST */
+ EventCounters counters;
+ slock_t mutex;
+ char dbname[1]; /* VARIABLE LENGTH ARRAY - MUST BE LAST */
+
+} StormStatsEntry;
+
+/* Local hash table entry, no mutex needed */
+typedef struct LocalStatsEntry
+{
+ ssHashKey key; /* hash key of entry */
+ EventCounters counters;
+ char dbname[NAMEDATALEN];
+} LocalStatsEntry;
+
+typedef struct StormSharedState
+{
+ LWLockId lock;
+} StormSharedState;
+
+static bool sp_save; /* whether to save stats across shutdown */
+
+extern PlannedStmt *planner_callback(Query *parse, int cursorOptions, ParamListInfo boundParams);
+extern void auth_check(Port *port, int status);
+
+static void sp_shmem_startup(void);
+static void sp_shmem_shutdown(int code, Datum arg);
+static Size hash_memsize(void);
+
+static uint32 ss_hash_fn(const void *key, Size keysize);
+static int ss_match_fn(const void *key1, const void *key2, Size keysize);
+static void stats_store(const char *dbname, CmdType c, bool isConnEvent, bool isUtilEvent);
+
+static StormStatsEntry *alloc_event_entry(ssHashKey *key);
+
+/* Functions */
+Datum storm_database_stats(PG_FUNCTION_ARGS);
+
+PG_FUNCTION_INFO_V1(storm_database_stats);
+
+/* Shared Memory Objects */
+static HTAB *StatsEntryHash = NULL;
+static StormSharedState *shared_state = NULL;
+
+/* Session level objects */
+static shmem_startup_hook_type prev_shmem_startup_hook = NULL;
+
+static ClientAuthentication_hook_type original_client_auth_hook = NULL;
+
+static ProcessUtility_hook_type prev_ProcessUtility = NULL;
+
+static int max_tracked_dbs;
+
+static void
+ProcessUtility_callback(Node *parsetree,
+ const char *queryString,
+ ProcessUtilityContext context,
+ ParamListInfo params,
+ DestReceiver *dest,
+#ifdef PGXC
+ bool sentToRemote,
+#endif /* PGXC */
+ char *completionTag)
+{
+ elog( DEBUG1, "STORMSTATS: using plugin." );
+
+ standard_ProcessUtility(parsetree, queryString, context, params, dest,
+#ifdef PGXC
+ sentToRemote,
+#endif /* PGXC */
+ completionTag);
+
+ stats_store(get_database_name(MyDatabaseId), CMD_UNKNOWN, false, true);
+
+ /*
+ * Check if it's a CREATE/DROP DATABASE command. Update entries in the
+ * shared hash table accordingly.
+ */
+ switch (nodeTag(parsetree))
+ {
+ case T_CreatedbStmt:
+ {
+ ssHashKey key;
+ StormStatsEntry *entry;
+ CreatedbStmt *stmt = (CreatedbStmt *)parsetree;
+
+ /* Set up key for hashtable search */
+ key.dbname_len = strlen(stmt->dbname);
+ key.dbname_ptr = stmt->dbname;
+
+ /*
+ * Lookup the hash table entry with exclusive lock. We have to
+ * manipulate the entries immediately anyways..
+ */
+ LWLockAcquire(shared_state->lock, LW_EXCLUSIVE);
+
+ entry = (StormStatsEntry *) hash_search(StatsEntryHash, &key, HASH_FIND, NULL);
+
+ /* What do we do if we find an entry already? We WARN for now */
+ if (!entry)
+ entry = alloc_event_entry(&key);
+ else
+ ereport(WARNING,
+ (errmsg("entry exists already for database %s!",
+ entry->dbname)));
+ LWLockRelease(shared_state->lock);
+ break;
+ }
+ case T_DropdbStmt:
+ {
+ ssHashKey key;
+ StormStatsEntry *entry;
+ DropdbStmt *stmt = (DropdbStmt *)parsetree;
+
+ /* Set up key for hashtable search */
+ key.dbname_len = strlen(stmt->dbname);
+ key.dbname_ptr = stmt->dbname;
+
+ /*
+ * Lookup the hash table entry with exclusive lock. We have to
+ * manipulate the entries immediately anyways..
+ */
+ LWLockAcquire(shared_state->lock, LW_EXCLUSIVE);
+
+ entry = (StormStatsEntry *) hash_search(StatsEntryHash, &key, HASH_REMOVE, NULL);
+
+ /* What do we do if we do not find an entry? We WARN for now */
+ if (!entry && !stmt->missing_ok)
+ ereport(WARNING,
+ (errmsg("entry does not exist for database %s!",
+ stmt->dbname)));
+ LWLockRelease(shared_state->lock);
+ break;
+ }
+ default:
+ /* Nothing */;
+ }
+}
+
+void
+_PG_init(void)
+{
+ if (!process_shared_preload_libraries_in_progress)
+ return;
+
+ DefineCustomIntVariable("storm_stats.max_tracked_databases",
+ "Sets the maximum number of databases tracked.",
+ NULL,
+ &max_tracked_dbs,
+ 1000,
+ 1,
+ INT_MAX,
+ PGC_POSTMASTER,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ DefineCustomBoolVariable("storm_stats.save",
+ "Save statistics across server shutdowns.",
+ NULL,
+ &sp_save,
+ true,
+ PGC_SIGHUP,
+ 0,
+ NULL,
+ NULL,
+ NULL);
+
+ EmitWarningsOnPlaceholders("storm_stats");
+
+ RequestAddinShmemSpace(hash_memsize());
+ RequestNamedLWLockTranche("storm_stats", 1);
+
+ prev_shmem_startup_hook = shmem_startup_hook;
+ shmem_startup_hook = sp_shmem_startup;
+ planner_hook = planner_callback;
+
+ original_client_auth_hook = ClientAuthentication_hook;
+ ClientAuthentication_hook = auth_check;
+
+ prev_ProcessUtility = ProcessUtility_hook;
+ ProcessUtility_hook = ProcessUtility_callback;
+
+ elog( DEBUG1, "STORMSTATS: plugin loaded" );
+}
+
+void
+_PG_fini(void)
+{
+ shmem_startup_hook = prev_shmem_startup_hook;
+ planner_hook = NULL;
+ ProcessUtility_hook = prev_ProcessUtility;
+
+ elog( DEBUG1, "STORMSTATS: plugin unloaded." );
+}
+
+static void sp_shmem_startup(void)
+{
+ HASHCTL event_ctl;
+ bool found;
+ FILE *file;
+ uint32 header;
+ int32 num;
+ int32 i;
+ int buffer_size;
+ char *buffer = NULL;
+
+ if (prev_shmem_startup_hook)
+ prev_shmem_startup_hook();
+
+ /*
+ * Create or attach to the shared memory state, including hash table
+ */
+ LWLockAcquire(AddinShmemInitLock, LW_EXCLUSIVE);
+
+ shared_state = ShmemInitStruct("storm_stats state", sizeof(StormSharedState), &found);
+ if (!shared_state)
+ elog(ERROR, "out of shared memory");
+
+ if (!found)
+ shared_state->lock = &(GetNamedLWLockTranche("storm_stats"))->lock;
+
+ memset(&event_ctl, 0, sizeof(event_ctl));
+
+ event_ctl.keysize = sizeof(ssHashKey);
+ event_ctl.entrysize = sizeof(StormStatsEntry) + NAMEDATALEN;
+ event_ctl.hash = ss_hash_fn;
+ event_ctl.match = ss_match_fn;
+
+ StatsEntryHash = ShmemInitHash("storm_stats event hash", max_tracked_dbs,
+ max_tracked_dbs, &event_ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+ if (!StatsEntryHash)
+ elog(ERROR, "out of shared memory");
+
+ LWLockRelease(AddinShmemInitLock);
+
+ /*
+ * If we're in the postmaster (or a standalone backend...), set up a shmem
+ * exit hook to dump the statistics to disk.
+ */
+ if (!IsUnderPostmaster)
+ on_shmem_exit(sp_shmem_shutdown, (Datum) 0);
+
+ /*
+ * Attempt to load old statistics from the dump file, if this is the first
+ * time through and we weren't told not to.
+ */
+ if (found || !sp_save)
+ return;
+
+ /*
+ * Note: we don't bother with locks here, because there should be no other
+ * processes running when this code is reached.
+ */
+ file = AllocateFile(STORM_DUMP_FILE, PG_BINARY_R);
+ if (file == NULL)
+ {
+ if (errno == ENOENT)
+ return; /* ignore not-found error */
+ goto error;
+ }
+
+ buffer_size = NAMEDATALEN;
+ buffer = (char *) palloc(buffer_size);
+
+ if (fread(&header, sizeof(uint32), 1, file) != 1 ||
+ header != STORM_FILE_HEADER ||
+ fread(&num, sizeof(int32), 1, file) != 1)
+ goto error;
+
+ for (i = 0; i < num; i++)
+ {
+ StormStatsEntry temp;
+ StormStatsEntry *entry;
+
+ if (fread(&temp, offsetof(StormStatsEntry, mutex), 1, file) != 1)
+ goto error;
+
+ if (temp.key.dbname_len >= buffer_size)
+ {
+ buffer = (char *) repalloc(buffer, temp.key.dbname_len + 1);
+ buffer_size = temp.key.dbname_len + 1;
+ }
+
+ if (fread(buffer, 1, temp.key.dbname_len, file) != temp.key.dbname_len)
+ goto error;
+ buffer[temp.key.dbname_len] = '\0';
+
+ temp.key.dbname_ptr = buffer;
+
+ /* make the hashtable entry (discards old entries if too many) */
+ entry = alloc_event_entry(&temp.key);
+
+ /* copy in the actual stats */
+ entry->counters = temp.counters;
+ }
+
+ pfree(buffer);
+ FreeFile(file);
+ return;
+
+error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not read stormstats file \"%s\": %m",
+ STORM_DUMP_FILE)));
+ if (buffer)
+ pfree(buffer);
+ if (file)
+ FreeFile(file);
+ /* If possible, throw away the bogus file; ignore any error */
+ unlink(STORM_DUMP_FILE);
+}
+
+/*
+ * shmem_shutdown hook: Dump statistics into file.
+ *
+ * Note: we don't bother with acquiring lock, because there should be no
+ * other processes running when this is called.
+ */
+static void
+sp_shmem_shutdown(int code, Datum arg)
+{
+ FILE *file;
+ HASH_SEQ_STATUS hash_seq;
+ int32 num_entries;
+ StormStatsEntry *entry;
+
+ /* Don't try to dump during a crash. */
+ if (code)
+ return;
+
+ /* Safety check ... shouldn't get here unless shmem is set up. */
+ if (!shared_state || !StatsEntryHash)
+ return;
+
+ /* Don't dump if told not to. */
+ if (!sp_save)
+ return;
+
+ file = AllocateFile(STORM_DUMP_FILE, PG_BINARY_W);
+ if (file == NULL)
+ goto error;
+
+ if (fwrite(&STORM_FILE_HEADER, sizeof(uint32), 1, file) != 1)
+ goto error;
+ num_entries = hash_get_num_entries(StatsEntryHash);
+ if (fwrite(&num_entries, sizeof(int32), 1, file) != 1)
+ goto error;
+
+ hash_seq_init(&hash_seq, StatsEntryHash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ int len = entry->key.dbname_len;
+
+ if (fwrite(entry, offsetof(StormStatsEntry, mutex), 1, file) != 1 ||
+ fwrite(entry->dbname, 1, len, file) != len)
+ goto error;
+ }
+
+ if (FreeFile(file))
+ {
+ file = NULL;
+ goto error;
+ }
+
+ return;
+
+error:
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("could not write stormstats file \"%s\": %m",
+ STORM_DUMP_FILE)));
+
+ if (file)
+ FreeFile(file);
+ unlink(STORM_DUMP_FILE);
+}
+
+PlannedStmt *planner_callback(Query *parse, int cursorOptions, ParamListInfo boundParams)
+{
+ PlannedStmt *plan;
+
+ elog( DEBUG1, "STORMSTATS: using plugin." );
+
+ /* Generate a plan */
+ plan = standard_planner(parse, cursorOptions, boundParams);
+
+ stats_store(get_database_name(MyDatabaseId), parse->commandType, false, false);
+
+ return plan;
+}
+
+void auth_check(Port *port, int status)
+{
+ elog( DEBUG1, "STORMSTATS: using plugin." );
+
+ /*
+ * Any other plugins which use ClientAuthentication_hook.
+ */
+ if (original_client_auth_hook)
+ original_client_auth_hook(port, status);
+
+ if (status == STATUS_OK)
+ {
+ stats_store(port->database_name, CMD_UNKNOWN, true, false);
+ }
+}
+
+static Size hash_memsize(void)
+{
+ Size size;
+ Size events_size;
+ Size state_size;
+
+ events_size = hash_estimate_size(max_tracked_dbs, MAXALIGN(sizeof(StormStatsEntry)));
+ state_size = MAXALIGN(sizeof(StormSharedState));
+
+ size = add_size(events_size, state_size);
+
+ return size;
+}
+
+static StormStatsEntry *alloc_event_entry(ssHashKey *key)
+{
+ StormStatsEntry *entry;
+ bool found;
+
+ if (hash_get_num_entries(StatsEntryHash) >= max_tracked_dbs)
+ {
+ elog(ERROR, "STORMSTATS: The maximum number of tracked databases have been reached");
+ return NULL;
+ }
+
+ /* Find or create an entry with desired hash code */
+ entry = (StormStatsEntry *) hash_search(StatsEntryHash, key, HASH_ENTER, &found);
+
+ if (!found)
+ {
+ entry->key.dbname_ptr = entry->dbname;
+ memset(&entry->counters, 0, sizeof(EventCounters));
+ SpinLockInit(&entry->mutex);
+
+ memcpy(entry->dbname, key->dbname_ptr, key->dbname_len);
+ entry->dbname[key->dbname_len] = '\0';
+ }
+
+ return entry;
+}
+
+/*
+ * Calculate hash value for a key
+ */
+static uint32
+ss_hash_fn(const void *key, Size keysize)
+{
+ const ssHashKey *k = (const ssHashKey *) key;
+
+ /* we don't bother to include encoding in the hash */
+ return DatumGetUInt32(hash_any((const unsigned char *) k->dbname_ptr,
+ k->dbname_len));
+}
+
+/*
+ * Compare two keys - zero means match
+ */
+static int
+ss_match_fn(const void *key1, const void *key2, Size keysize)
+{
+ const ssHashKey *k1 = (const ssHashKey *) key1;
+ const ssHashKey *k2 = (const ssHashKey *) key2;
+
+ if (k1->dbname_len == k2->dbname_len &&
+ memcmp(k1->dbname_ptr, k2->dbname_ptr, k1->dbname_len) == 0)
+ return 0;
+ else
+ return 1;
+}
+
+static void
+stats_store(const char *dbname, CmdType c, bool isConnEvent, bool isUtilEvent)
+{
+ ssHashKey key;
+ StormStatsEntry *entry;
+
+ if (!shared_state || !StatsEntryHash)
+ return;
+
+ /* Set up key for hashtable search */
+ key.dbname_len = strlen(dbname);
+ key.dbname_ptr = dbname;
+
+ /* Lookup the hash table entry with shared lock. */
+ LWLockAcquire(shared_state->lock, LW_SHARED);
+
+ entry = (StormStatsEntry *) hash_search(StatsEntryHash, &key, HASH_FIND, NULL);
+ if (!entry)
+ {
+ /* Must acquire exclusive lock to add a new entry. */
+ LWLockRelease(shared_state->lock);
+ LWLockAcquire(shared_state->lock, LW_EXCLUSIVE);
+ entry = alloc_event_entry(&key);
+ }
+
+ /* Grab the spinlock while updating the counters. */
+ {
+ volatile StormStatsEntry *e = (volatile StormStatsEntry *) entry;
+
+ SpinLockAcquire(&e->mutex);
+
+ if (isConnEvent) {
+ e->counters.conn_cnt += 1;
+ } else if (isUtilEvent) {
+ e->counters.ddl_cnt += 1;
+ } else {
+ switch (c)
+ {
+ case CMD_SELECT:
+ e->counters.select_cnt += 1;
+ break;
+ case CMD_INSERT:
+ e->counters.insert_cnt += 1;
+ break;
+ case CMD_UPDATE:
+ e->counters.update_cnt += 1;
+ break;
+ case CMD_DELETE:
+ e->counters.delete_cnt += 1;
+ break;
+ case CMD_UTILITY:
+ case CMD_UNKNOWN:
+ case CMD_NOTHING:
+ break;
+ }
+ }
+ SpinLockRelease(&e->mutex);
+ }
+
+ LWLockRelease(shared_state->lock);
+}
+
+/*
+ * Gather statistics from remote coordinators
+ */
+static HTAB *
+storm_gather_remote_coord_info(Oid funcid)
+{
+ bool found;
+ EState *estate;
+ TupleTableSlot *result;
+ RemoteQuery *step;
+ RemoteQueryState *node;
+ int i, ncolumns;
+ HeapTuple tp;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ HTAB *LocalStatsHash;
+ HASHCTL event_ctl;
+
+ /*
+ * We will sort output by database name, should make adding up info from
+ * multiple remote coordinators easier
+ */
+ char *query = "SELECT * FROM storm_database_stats() ORDER BY datname";
+
+ /* Build up RemoteQuery */
+ step = makeNode(RemoteQuery);
+
+ step->combine_type = COMBINE_TYPE_NONE;
+ step->exec_nodes = NULL;
+ step->sql_statement = query;
+ step->force_autocommit = false;
+ step->read_only = true;
+ step->exec_type = EXEC_ON_COORDS;
+
+ /* Build a local hash table to contain info from remote nodes */
+ memset(&event_ctl, 0, sizeof(event_ctl));
+
+ event_ctl.keysize = sizeof(ssHashKey);
+ event_ctl.entrysize = sizeof(LocalStatsEntry);
+ event_ctl.hash = ss_hash_fn;
+ event_ctl.match = ss_match_fn;
+
+ LocalStatsHash = hash_create("storm_stats local hash", max_tracked_dbs,
+ &event_ctl,
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
+ if (!LocalStatsHash)
+ elog(ERROR, "out of memory");
+
+ /*
+ * Add targetlist entries. We use the proc oid to get the tupledesc for
+ * this. We could have hardcoded the types of existing set of columns, but
+ * if we change the columns later for whatever reasons, this keeps us sane
+ */
+ tp = SearchSysCache1(PROCOID, ObjectIdGetDatum(funcid));
+
+ /* Build a tupdesc of all the OUT parameters */
+ tupdesc = build_function_result_tupdesc_t(tp);
+ ncolumns = tupdesc->natts;
+
+ for (i = 0; i < ncolumns; ++i)
+ {
+ Var *var;
+ TargetEntry *tle;
+
+ var = makeVar(1,
+ tupdesc->attrs[i]->attnum,
+ tupdesc->attrs[i]->atttypid,
+ tupdesc->attrs[i]->atttypmod,
+ InvalidOid,
+ 0);
+
+ tle = makeTargetEntry((Expr *) var, tupdesc->attrs[i]->attnum, NULL, false);
+ step->scan.plan.targetlist = lappend(step->scan.plan.targetlist, tle);
+ }
+ ReleaseSysCache(tp);
+
+ /* Execute query on the data nodes */
+ estate = CreateExecutorState();
+
+ oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
+
+ estate->es_snapshot = GetActiveSnapshot();
+
+ node = ExecInitRemoteQuery(step, estate, 0);
+ MemoryContextSwitchTo(oldcontext);
+ /* get ready to combine results */
+ result = ExecRemoteQuery(node);
+ while (result != NULL && !TupIsNull(result))
+ {
+ Datum value;
+ bool isnull;
+ ssHashKey key;
+ LocalStatsEntry *entry;
+ char *dbname;
+
+ /* Process statistics from the coordinator nodes */
+ value = slot_getattr(result, 1, &isnull); /* datname */
+ if (isnull)
+ ereport(ERROR,
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+ errmsg("database name must not be null")));
+
+ dbname = TextDatumGetCString(value);
+
+ /* Set up key for hashtable search */
+ key.dbname_len = strlen(dbname);
+ key.dbname_ptr = dbname;
+
+ /* Find or create an entry with desired hash code */
+ entry = (LocalStatsEntry *) hash_search(LocalStatsHash, &key, HASH_ENTER, &found);
+ if (!found)
+ {
+ entry->key.dbname_ptr = entry->dbname;
+ memset(&entry->counters, 0, sizeof(EventCounters));
+ memcpy(entry->dbname, key.dbname_ptr, key.dbname_len);
+ entry->dbname[key.dbname_len] = '\0';
+ }
+
+ value = slot_getattr(result, 2, &isnull); /* conn_cnt */
+ if (!isnull)
+ entry->counters.conn_cnt += DatumGetInt64(value);
+
+ value = slot_getattr(result, 3, &isnull); /* select_cnt */
+ if (!isnull)
+ entry->counters.select_cnt += DatumGetInt64(value);
+
+ value = slot_getattr(result, 4, &isnull); /* insert_cnt */
+ if (!isnull)
+ entry->counters.insert_cnt += DatumGetInt64(value);
+
+ value = slot_getattr(result, 5, &isnull); /* update_cnt */
+ if (!isnull)
+ entry->counters.update_cnt += DatumGetInt64(value);
+
+ value = slot_getattr(result, 6, &isnull); /* delete_cnt */
+ if (!isnull)
+ entry->counters.delete_cnt += DatumGetInt64(value);
+
+ value = slot_getattr(result, 7, &isnull); /* ddl_cnt */
+ if (!isnull)
+ entry->counters.ddl_cnt += DatumGetInt64(value);
+
+ /* fetch next */
+ result = ExecRemoteQuery(node);
+ }
+ ExecEndRemoteQuery(node);
+
+ return LocalStatsHash;
+}
+
+Datum storm_database_stats(PG_FUNCTION_ARGS)
+{
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ HASH_SEQ_STATUS hash_seq;
+ StormStatsEntry *entry;
+ HTAB *LocalStatsHash = NULL;
+
+ if (IS_PGXC_DATANODE)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("invalid invocation on data node")));
+
+ if (!shared_state || !StatsEntryHash)
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("storm_stats must be loaded via shared_preload_libraries")));
+
+ /* check to see if caller supports us returning a tuplestore */
+ if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("set-valued function called in context that cannot accept a set")));
+ if (!(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("materialize mode required, but it is not " \
+ "allowed in this context")));
+
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+
+ /*
+ * Query the rest of the coordinators and get their stats. Do this only if
+ * you are query originator. Otherwise just provide your local info and
+ * return
+ */
+ if (IsConnFromApp())
+ LocalStatsHash = storm_gather_remote_coord_info(fcinfo->flinfo->fn_oid);
+
+ tupdesc = CreateTemplateTupleDesc(STORM_STATS_COLS, false);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 1, "dbname", TEXTOID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 2, "conn_cnt", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 3, "select_cnt", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 4, "insert_cnt", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 5, "update_cnt", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 6, "delete_cnt", INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "ddl_cnt", INT8OID, -1, 0);
+
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setResult = tupstore;
+ rsinfo->setDesc = tupdesc;
+
+ LWLockAcquire(shared_state->lock, LW_SHARED);
+
+ hash_seq_init(&hash_seq, StatsEntryHash);
+ while ((entry = hash_seq_search(&hash_seq)) != NULL)
+ {
+ Datum values[STORM_STATS_COLS];
+ bool nulls[STORM_STATS_COLS];
+ int i = 0;
+ EventCounters tmp, lcl;
+
+ /* generate junk in short-term context */
+ MemoryContextSwitchTo(oldcontext);
+
+ memset(values, 0, sizeof(values));
+ memset(nulls, 0, sizeof(nulls));
+ memset(&lcl, 0, sizeof(lcl));
+
+ values[i++] = CStringGetTextDatum(entry->dbname);
+
+ /* copy counters to a local variable to keep locking time short */
+ {
+ volatile StormStatsEntry *e = (volatile StormStatsEntry *) entry;
+
+ SpinLockAcquire(&e->mutex);
+ tmp = e->counters;
+ SpinLockRelease(&e->mutex);
+ }
+
+ /* See if LocalStatsHash has additional info to provide */
+ if (LocalStatsHash)
+ {
+ ssHashKey key;
+ LocalStatsEntry *le;
+ bool found;
+
+ /* Set up key for hashtable search */
+ key.dbname_len = strlen(entry->dbname);
+ key.dbname_ptr = entry->dbname;
+
+ /* Find an entry with desired hash code */
+ le = (LocalStatsEntry *) hash_search(LocalStatsHash, &key, HASH_FIND, &found);
+
+ /*
+ * What should we do if entry is not found on the other
+ * coordinators? WARN for now..
+ */
+ if (!found)
+ {
+ ereport(WARNING,
+ (errmsg("no stats collected from remote coordinators for database %s!",
+ entry->dbname)));
+ }
+ else
+ {
+ tmp.ddl_cnt += le->counters.ddl_cnt;
+ tmp.conn_cnt += le->counters.conn_cnt;
+ tmp.select_cnt += le->counters.select_cnt;
+ tmp.insert_cnt += le->counters.insert_cnt;
+ tmp.update_cnt += le->counters.update_cnt;
+ tmp.delete_cnt += le->counters.delete_cnt;
+ }
+ }
+
+ values[i++] = Int64GetDatumFast(tmp.conn_cnt);
+ values[i++] = Int64GetDatumFast(tmp.select_cnt);
+ values[i++] = Int64GetDatumFast(tmp.insert_cnt);
+ values[i++] = Int64GetDatumFast(tmp.update_cnt);
+ values[i++] = Int64GetDatumFast(tmp.delete_cnt);
+ values[i++] = Int64GetDatumFast(tmp.ddl_cnt);
+
+ Assert(i == STORM_STATS_COLS);
+
+ /* switch to appropriate context while storing the tuple */
+ MemoryContextSwitchTo(per_query_ctx);
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ }
+
+ LWLockRelease(shared_state->lock);
+
+ /* clean up and return the tuplestore */
+ tuplestore_donestoring(tupstore);
+
+ /* destroy local hash table */
+ if (LocalStatsHash)
+ hash_destroy(LocalStatsHash);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ return (Datum) 0;
+}
diff --git a/contrib/stormstats/stormstats.control b/contrib/stormstats/stormstats.control
new file mode 100644
index 0000000000..b7816feef9
--- /dev/null
+++ b/contrib/stormstats/stormstats.control
@@ -0,0 +1,5 @@
+# stormstats extension
+comment = 'collect deeper database stats for StormDB'
+default_version = '1.0'
+module_pathname = '$libdir/stormstats'
+relocatable = true
diff --git a/contrib/stormstats/stormstats.h b/contrib/stormstats/stormstats.h
new file mode 100644
index 0000000000..c11846d0a5
--- /dev/null
+++ b/contrib/stormstats/stormstats.h
@@ -0,0 +1,9 @@
+#ifndef STORMSTATS_H
+#define STORMSTATS_H
+
+#include "postgres.h"
+
+extern void _PG_init(void);
+extern void _PG_fini(void);
+
+#endif /* STORMSTATS_H */
diff --git a/contrib/tsm_system_time/expected/tsm_system_time.out b/contrib/tsm_system_time/expected/tsm_system_time.out
index ac44f30be9..c9557713a1 100644
--- a/contrib/tsm_system_time/expected/tsm_system_time.out
+++ b/contrib/tsm_system_time/expected/tsm_system_time.out
@@ -11,21 +11,16 @@ SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (0);
0
(1 row)
--- ... and we assume that this will finish before running out of time:
-SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (100000);
- count
--------
- 31
-(1 row)
-
-- bad parameters should get through planning, but not execution:
EXPLAIN (COSTS OFF)
SELECT id FROM test_tablesample TABLESAMPLE system_time (-1);
- QUERY PLAN
---------------------------------------------------
- Sample Scan on test_tablesample
- Sampling: system_time ('-1'::double precision)
-(2 rows)
+ QUERY PLAN
+--------------------------------------------------------
+ Remote Fast Query Execution
+ Node/s: datanode_1, datanode_2
+ -> Sample Scan on test_tablesample
+ Sampling: system_time ('-1'::double precision)
+(4 rows)
SELECT id FROM test_tablesample TABLESAMPLE system_time (-1);
ERROR: sample collection time must not be negative
@@ -40,15 +35,17 @@ SELECT * FROM
(VALUES (0),(100000)) v(time),
LATERAL (SELECT COUNT(*) FROM test_tablesample
TABLESAMPLE system_time (100000)) ss;
- QUERY PLAN
-------------------------------------------------------------------------
+ QUERY PLAN
+------------------------------------------------------------------------------------
Nested Loop
-> Aggregate
- -> Materialize
- -> Sample Scan on test_tablesample
- Sampling: system_time ('100000'::double precision)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Aggregate
+ -> Materialize
+ -> Sample Scan on test_tablesample
+ Sampling: system_time ('100000'::double precision)
-> Values Scan on "*VALUES*"
-(6 rows)
+(8 rows)
SELECT * FROM
(VALUES (0),(100000)) v(time),
@@ -65,36 +62,32 @@ SELECT * FROM
(VALUES (0),(100000)) v(time),
LATERAL (SELECT COUNT(*) FROM test_tablesample
TABLESAMPLE system_time (time)) ss;
- QUERY PLAN
-----------------------------------------------------------------
+ QUERY PLAN
+----------------------------------------------------------------------------
Nested Loop
-> Values Scan on "*VALUES*"
-> Aggregate
- -> Materialize
- -> Sample Scan on test_tablesample
- Sampling: system_time ("*VALUES*".column1)
-(6 rows)
-
-SELECT * FROM
- (VALUES (0),(100000)) v(time),
- LATERAL (SELECT COUNT(*) FROM test_tablesample
- TABLESAMPLE system_time (time)) ss;
- time | count
---------+-------
- 0 | 0
- 100000 | 31
-(2 rows)
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Aggregate
+ -> Materialize
+ -> Sample Scan on test_tablesample
+ Sampling: system_time ("*VALUES*".column1)
+(8 rows)
CREATE VIEW vv AS
SELECT * FROM test_tablesample TABLESAMPLE system_time (20);
EXPLAIN (COSTS OFF) SELECT * FROM vv;
- QUERY PLAN
---------------------------------------------------
- Sample Scan on test_tablesample
- Sampling: system_time ('20'::double precision)
-(2 rows)
+ QUERY PLAN
+--------------------------------------------------------
+ Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Sample Scan on test_tablesample
+ Sampling: system_time ('20'::double precision)
+(3 rows)
DROP EXTENSION tsm_system_time; -- fail, view depends on extension
ERROR: cannot drop extension tsm_system_time because other objects depend on it
DETAIL: view vv depends on function system_time(internal)
HINT: Use DROP ... CASCADE to drop the dependent objects too.
+DROP VIEW vv;
+DROP TABLE test_tablesample;
+DROP EXTENSION tsm_system_time;
diff --git a/contrib/tsm_system_time/expected/xl_known_bugs.out b/contrib/tsm_system_time/expected/xl_known_bugs.out
new file mode 100644
index 0000000000..69998b0dea
--- /dev/null
+++ b/contrib/tsm_system_time/expected/xl_known_bugs.out
@@ -0,0 +1,33 @@
+CREATE EXTENSION tsm_system_time;
+CREATE TABLE test_tablesample (id int, name text);
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000)
+ FROM generate_series(0, 30) s(i);
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (VALUES (0),(100000)) v(time),
+ LATERAL (SELECT COUNT(*) FROM test_tablesample
+ TABLESAMPLE system_time (time)) ss;
+ QUERY PLAN
+----------------------------------------------------------------------------
+ Nested Loop
+ -> Values Scan on "*VALUES*"
+ -> Aggregate
+ -> Remote Subquery Scan on all (datanode_1,datanode_2)
+ -> Aggregate
+ -> Materialize
+ -> Sample Scan on test_tablesample
+ Sampling: system_time ("*VALUES*".column1)
+(8 rows)
+
+SELECT * FROM
+ (VALUES (0),(100000)) v(time),
+ LATERAL (SELECT COUNT(*) FROM test_tablesample
+ TABLESAMPLE system_time (time)) ss;
+ time | count
+--------+-------
+ 0 | 0
+ 100000 | 31
+(2 rows)
+
+DROP TABLE test_tablesample;
+DROP EXTENSION tsm_system_time; -- fail, view depends on extension
diff --git a/contrib/tsm_system_time/sql/xl_known_bugs.sql b/contrib/tsm_system_time/sql/xl_known_bugs.sql
new file mode 100644
index 0000000000..2778b70f32
--- /dev/null
+++ b/contrib/tsm_system_time/sql/xl_known_bugs.sql
@@ -0,0 +1,18 @@
+CREATE EXTENSION tsm_system_time;
+CREATE TABLE test_tablesample (id int, name text);
+INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000)
+ FROM generate_series(0, 30) s(i);
+
+EXPLAIN (COSTS OFF)
+SELECT * FROM
+ (VALUES (0),(100000)) v(time),
+ LATERAL (SELECT COUNT(*) FROM test_tablesample
+ TABLESAMPLE system_time (time)) ss;
+
+SELECT * FROM
+ (VALUES (0),(100000)) v(time),
+ LATERAL (SELECT COUNT(*) FROM test_tablesample
+ TABLESAMPLE system_time (time)) ss;
+
+DROP TABLE test_tablesample;
+DROP EXTENSION tsm_system_time; -- fail, view depends on extension