diff options
| author | Michael Paquier | 2012-05-31 04:30:20 +0000 |
|---|---|---|
| committer | Michael Paquier | 2012-05-31 04:33:08 +0000 |
| commit | af488de7241de630c68c2f8a7a5ec6efb68ad083 (patch) | |
| tree | 1b9d6e14f34864e1fb1e49f3faebf8281fe13208 /src | |
| parent | 68032614b0a99f4b0ba666b52cbbf4dd30566e70 (diff) | |
Move pgxc_clean and pgxc_ddl to contrib modules
Both features are not directly related to the core features,
so it is better to move them there.
The installation of pgxc_clean can be done with the same way as
a normal contrib module. pgxc_ddl is not installed by default,
but is kept for future developments.
Documentation is moved the same way.
Diffstat (limited to 'src')
| -rw-r--r-- | src/backend/access/transam/twophase.c | 9 | ||||
| -rw-r--r-- | src/backend/pgxc/pool/execRemote.c | 4 | ||||
| -rw-r--r-- | src/pgxc/Makefile | 2 | ||||
| -rw-r--r-- | src/pgxc/bin/Makefile | 18 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/Makefile | 43 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/pgxc_clean.c | 1053 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/pgxc_clean.h | 13 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/pgxc_clean_test.sh | 85 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/txninfo.c | 338 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_clean/txninfo.h | 83 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_ddl/README | 47 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_ddl/pgxc.conf.sample | 20 | ||||
| -rw-r--r-- | src/pgxc/bin/pgxc_ddl/pgxc_ddl | 443 |
13 files changed, 3 insertions, 2155 deletions
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c index 411e4b2267..78755c2386 100644 --- a/src/backend/access/transam/twophase.c +++ b/src/backend/access/transam/twophase.c @@ -1257,15 +1257,6 @@ StandbyTransactionIdIsPrepared(TransactionId xid) /* * FinishPreparedTransaction: execute COMMIT PREPARED or ROLLBACK PREPARED - * - *(The following comment is only for Postgres-XC) - * - * With regard to xc_maintenance_mode related to pgxc_clean, COMMIT/ROLLBACK PREPARED - * might be called in the node where the transaction with given gid does not exist. - * This may happen at the originating Coordinator. In this case, we should - * skip to handle two-phase file. - * - * Please note that we don't have to write commit/abort log to WAL in this case. */ void FinishPreparedTransaction(const char *gid, bool isCommit) diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c index 3bae9570d0..c8aef7cbaf 100644 --- a/src/backend/pgxc/pool/execRemote.c +++ b/src/backend/pgxc/pool/execRemote.c @@ -4516,8 +4516,7 @@ FinishRemotePreparedTransaction(char *prepareGID, bool commit) /* * Please note that with xc_maintenance_mode = on, COMMIT/ROLLBACK PREPARED will not - * propagate to remote nodes. Only GTM status is cleaned up. Prepared transaction - * on remote nodes will be cleaned up by pgxc_clean using EXECUTE DIRECT. + * propagate to remote nodes. Only GTM status is cleaned up. */ if (xc_maintenance_mode) { @@ -4534,6 +4533,7 @@ FinishRemotePreparedTransaction(char *prepareGID, bool commit) } return false; } + /* * Get the list of nodes involved in this transaction. * diff --git a/src/pgxc/Makefile b/src/pgxc/Makefile index eeed4ebb66..553e5deb73 100644 --- a/src/pgxc/Makefile +++ b/src/pgxc/Makefile @@ -12,6 +12,6 @@ subdir = src/pgxc top_builddir = ../.. include $(top_builddir)/src/Makefile.global -SUBDIRS = bin tools +SUBDIRS = tools $(recurse) diff --git a/src/pgxc/bin/Makefile b/src/pgxc/bin/Makefile deleted file mode 100644 index 4f04bc8984..0000000000 --- a/src/pgxc/bin/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -#---------------------------------------------------------------------------- -# -# Postgres-XC documentation tool makefile -# -# Copyright (c) 2010-2012 Postgres-XC Development Group -# -# doc-xc/Makefile -# -#---------------------------------------------------------------------------- - -subdir = src/pgxc/bin -top_builddir = ../../.. -include $(top_builddir)/src/Makefile.global - -SUBDIRS = pgxc_clean - -all distprep html man install installdirs uninstall clean distclean maintainer-clean maintainer-check: - $(MAKE) -C $(SUBDIRS) $@ diff --git a/src/pgxc/bin/pgxc_clean/Makefile b/src/pgxc/bin/pgxc_clean/Makefile deleted file mode 100644 index 12556fced7..0000000000 --- a/src/pgxc/bin/pgxc_clean/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -#------------------------------------------------------------------------- -# -# Makefile for src/pgxc/bin/pgxc_clean -# -# Portions Copyright (c) 2011 Postgres-XC Development Group -# -# $PostgreSQL$ -# -#------------------------------------------------------------------------- - -PGFILEDESC = "pgxc_clean - Abort prepared transaction for a Postgres-XC Coordinator" -subdir = src/pgxc/bin/pgxc_clean -top_builddir = ../../../.. -include $(top_builddir)/src/Makefile.global - -override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS) - -gtm_builddir = $(top_builddir)/src/gtm - -OBJS= pgxc_clean.o txninfo.o -EX_OBJS = $(gtm_builddir)/common/assert.o \ - $(gtm_builddir)/client/libgtmclient.a - -override LDFLAGS := -L$(top_builddir)/src/gtm/client $(LDFLAGS) - -LIBS= -lpthread - -pgxc_clean: $(OBJS) - $(CC) $(CFLAGS) $(EX_OBJS) $(OBJS) $(libpq_pgport) $(LDFLAGS) $(LIBS) -o $@$(X) - -all: pgxc_clean - -install: $(all) - $(INSTALL_PROGRAM) pgxc_clean$(X) '$(DESTDIR)$(bindir)/pgxc_clean$(X)' - -installdirs: - $(mkinstalldirs) '$(DESTDIR)$(bindir)' - -uninstall: - rm -f $(addprefix '$(DESTDIR)$(bindir)'/, pgxc_clean$(X)) - -clean distclean maintainer-clean: - rm -f pgxc_clean$(X) $(OBJS) diff --git a/src/pgxc/bin/pgxc_clean/pgxc_clean.c b/src/pgxc/bin/pgxc_clean/pgxc_clean.c deleted file mode 100644 index c72600d1c5..0000000000 --- a/src/pgxc/bin/pgxc_clean/pgxc_clean.c +++ /dev/null @@ -1,1053 +0,0 @@ -/* - * ------------------------------------------------------------------------ - * - * pgxc_clean utility - * - * Recovers outstanding 2PC when after crashed nodes or entire cluster - * is recovered. - * - * Depending upon how nodes/XC cluster fail, there could be outstanding - * 2PC transactions which are partly prepared and partly commited/borted. - * Such transactions must be commited/aborted to remove them from the - * snapshot. - * - * This utility checks if there's such outstanding transactions and - * cleans them up. - * - * Command syntax - * - * pgxc_clean [option ... ] [database] [user] - * - * Options are: - * - * -a, --all cleanup all the database avilable - * -d, --dbname=DBNAME database name to clean up. Multiple -d option - * can be specified. - * -h, --host=HOSTNAME Coordinator hostname to connect to. - * -N, --no-clean only test. no cleanup actually. - * -o, --output=FILENAME output file name. - * -p, --port=PORT Coordinator port number. - * -q, --quiet do not print messages except for error, default. - * -s, --status prints out 2PC status. - * -U, --username=USERNAME database user name - * -v, --verbose same as -s, plus prints result of each cleanup. - * -V, --version prints out the version, - * -w, --no-password never prompt for the password. - * -W, --password prompt for the password, - * -?, --help prints help message - * - * ------------------------------------------------------------------------ - */ - -#include <sys/types.h> -#include <unistd.h> -#include <stdio.h> -#include <pwd.h> -#include <errno.h> -#include "libpq-fe.h" -#include "pg_config.h" -#include "getopt_long.h" -#include "pgxc_clean.h" -#include "txninfo.h" -#include "port.h" - -/* Who I am */ -const char *progname; -char *my_nodename; -int my_nodeidx = -1; /* Index in pgxc_clean_node_info */ - -/* Databases to clean */ -bool clean_all_databases = false; /* "--all" overrides specific database specification */ - -database_names *head_database_names = NULL; -database_names *last_database_names = NULL; - -/* Coordinator to connect to */ -char *coordinator_host = NULL; -int coordinator_port = -1; - -typedef enum passwd_opt -{ - TRI_DEFAULT, - TRI_YES, - TRI_NO -} passwd_opt; - -/* Miscellaneous */ -char *output_filename = NULL; -char *username = NULL; -bool version_opt = false; -passwd_opt try_password_opt = TRI_DEFAULT; -bool status_opt = false; -bool no_clean_opt = false; -bool verbose_opt = false; -FILE *outf; -FILE *errf; - -/* Global variables */ -node_info *pgxc_clean_node_info; -int pgxc_clean_node_count; - -database_info *head_database_info; -database_info *last_database_info; - -static char *password = NULL; -static char password_prompt[256]; - -/* Funcs */ -static void add_to_database_list(char *dbname); -static void parse_pgxc_clean_options(int argc, char *argv[]); -static void usage(void); -static char *GetUserName(void); -static void showVersion(void); -static PGconn *loginDatabase(char *host, int port, char *user, char *password, - char *dbname, const char *progname, char *encoding, char *password_prompt); -static void getMyNodename(PGconn *conn); -static void recover2PCForDatabase(database_info *db_info); -static void recover2PC(PGconn *conn, txn_info *txn); -static void getDatabaseList(PGconn *conn); -static void getNodeList(PGconn *conn); -static void showVersion(void); -static void add_to_database_list(char *dbname); -static void parse_pgxc_clean_options(int argc, char *argv[]); -static void usage(void); -static void getPreparedTxnList(PGconn *conn); -static void getTxnInfoOnOtherNodesAll(PGconn *conn); -static void do_commit(PGconn *conn, txn_info *txn); -static void do_abort(PGconn *conn, txn_info *txn); -static void do_commit_abort(PGconn *conn, txn_info *txn, bool is_commit); -static bool setMaintenanceMode(PGconn *conn); - -/* - * Connection to the Coordinator - */ -PGconn *coord_conn; - -/* - * - * Main - * - */ -int main(int argc, char *argv[]) -{ - - /* Should setup pglocale when it is supported by XC core */ - - if (argc > 1) - { - if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0) - { - usage(); - exit(0); - } - if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0) - { - showVersion(); - exit(0); - } - } - parse_pgxc_clean_options(argc, argv); - - /* - * Check missing arguments - */ - if (clean_all_databases == false && head_database_names == NULL) - { - fprintf(stderr, "%s: you must specify -a or -d option.\n", progname); - exit(1); - } - - /* - * Arrange my environment - */ - if (output_filename) - { - /* Prepare output filename */ - outf = fopen(output_filename, "w"); - if (outf == NULL) - { - fprintf(stderr, "%s: Cannot ope output file %s (%s)\n", progname, output_filename, strerror(errno)); - exit(1); - } - errf = outf; - } - else - { - outf = stdout; - errf = stderr; - } - if (coordinator_host == NULL) - { - /* Default Coordinator host */ - if ((coordinator_host = getenv("PGHOST")) == NULL) - coordinator_host = "localhost"; - } - if (coordinator_port == -1) - { - /* Default Coordinator port */ - char *pgport; - - if ((pgport = getenv("PGPORT")) == NULL) - coordinator_port = DEF_PGPORT; /* pg_config.h */ - else - coordinator_port = atoi(pgport); - } - if (username == NULL) - strcpy(password_prompt, "Password: "); - else - sprintf(password_prompt, "Password for user %s: ", username); - if (try_password_opt == TRI_YES) - password = simple_prompt(password_prompt, 100, false); - - if (verbose_opt) - { - /* Print environments */ - fprintf(outf, "%s (%s): Cleanup outstanding 2PCs.\n", progname, PG_VERSION); - /* Target databaess */ - fprintf(outf, "Target databases:"); - if (clean_all_databases) - fprintf(outf, "(ALL)\n"); - else - { - database_names *cur_name; - - for(cur_name = head_database_names; cur_name; cur_name = cur_name->next) - fprintf(outf, " %s", cur_name->database_name); - fprintf(outf, "\n"); - } - /* Username to use */ - fprintf(outf, "Username: %s\n", username ? username : "default"); - /* Status opt */ - fprintf(outf, "Status opt: %s\n", status_opt ? "on" : "off"); - /* No-dlean opt */ - fprintf(outf, "no-clean: %s\n", no_clean_opt ? "on" : "off"); - } - - /* Connect to XC server */ - if (verbose_opt) - { - fprintf(outf, "%s: connecting to database \"%s\", host: \"%s\", port: %d\n", - progname, - clean_all_databases ? "postgres" : head_database_names->database_name, - coordinator_host, coordinator_port); - } - coord_conn = loginDatabase(coordinator_host, coordinator_port, username, password, - clean_all_databases ? "postgres" : head_database_names->database_name, - progname, "auto", password_prompt); - if (verbose_opt) - { - fprintf(outf, "%s: connected successfully\n", progname); - } - - /* - * Get my nodename (connected Coordinator) - */ - getMyNodename(coord_conn); - if (verbose_opt) - { - fprintf(outf, "%s: Connected to the node \"%s\"\n", progname, my_nodename); - } - - /* - * Get available databases - * - * pgxc_clean assumes that all the database are available from the connecting Coordinator. - * Some (expert) DBA can create a database local to subset of the node by EXECUTE DIRECT. - * In this case, DBA may have to clean outstanding 2PC transactions manually or clean - * 2PC transactions by connecting pgxc_clean to different Coordinators. - * - * If such node-subset database is found to be used widely, pgxc_clean may need - * an extension to deal with this case. - */ - getDatabaseList(coord_conn); - if (verbose_opt) - { - database_info *cur_database; - - fprintf(outf, "%s: Databases visible from the node \"%s\": ", progname, my_nodename); - - for (cur_database = head_database_info; cur_database; cur_database = cur_database->next) - { - fprintf(outf, " \"%s\"", cur_database->database_name); - } - fputc('\n', outf); - } - - /* - * Get list of Coordinators - * - * As in the case of database, we clean transactions in visible nodes from the - * connecting Coordinator. DBA can also setup different node configuration - * at different Coordinators. In this case, DBA should be careful to choose - * appropriate Coordinator to clean up transactions. - */ - getNodeList(coord_conn); - if (verbose_opt) - { - int ii; - - fprintf(outf, "%s: Node list visible from the node \"%s\"\n", progname, my_nodename); - - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - fprintf(outf, "Name: %s, host: %s, port: %d, type: %s\n", - pgxc_clean_node_info[ii].node_name, - pgxc_clean_node_info[ii].host, - pgxc_clean_node_info[ii].port, - pgxc_clean_node_info[ii].type == NODE_TYPE_COORD ? "coordinator" : "datanode"); - } - } - - /* - * Get list of prepared statement - */ - getPreparedTxnList(coord_conn); - - /* - * Check if there're any 2PC candidate to recover - */ - if (!check2PCExists()) - { - fprintf(errf, "%s: There's no prepared 2PC in this cluster. Exiting.\n", progname); - exit(0); - } - - - /* - * Check status of each prepared transaction. To do this, look into - * nodes where the transaction is not recorded as "prepared". - * Possible status are unknown (prepare has not been issued), committed or - * aborted. - */ - getTxnInfoOnOtherNodesAll(coord_conn); - if (verbose_opt) - { - /* Print all the prepared transaction list */ - database_info *cur_db; - - fprintf(outf, "%s: 2PC transaction list.\n", progname); - for (cur_db = head_database_info; cur_db; cur_db = cur_db->next) - { - txn_info *txn; - - fprintf(outf, "Database: \"%s\":\n", cur_db->database_name); - - for (txn = cur_db->head_txn_info; txn; txn = txn->next) - { - int ii; - - fprintf(outf, " gxid: %d, xid: \"%s\", owner: %s\n", txn->gxid, txn->xid, txn->owner); - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - fprintf(outf, " node: %s, status: %s\n", - pgxc_clean_node_info[ii].node_name, - str_txn_stat(txn->txn_stat[ii])); - } - } - } - } - - /* - * Then disconnect from the database. - * I need to login to specified databases which 2PC is issued for. Again, we assume - * that all the prepare is issued against the same database in each node, which - * current Coordinator does and there seems to be no way to violate this assumption. - */ - if (verbose_opt) - { - fprintf(outf, "%s: disconnecting\n", progname); - } - PQfinish(coord_conn); - - /* - * If --no-clean option is specified, we exit here. - */ - if (no_clean_opt) - { - fprintf(outf, "--no-clean opt is specified. Exiting.\n"); - exit(0); - } - - /* - * Recover 2PC for specified databases - */ - if (clean_all_databases) - { - database_info *cur_database_info; - - for(cur_database_info = head_database_info; cur_database_info; cur_database_info = cur_database_info->next) - { - recover2PCForDatabase(cur_database_info); - } - } - else - { - database_info *cur_database_info; - database_names *cur_database_name; - - for(cur_database_name = head_database_names; cur_database_name; cur_database_name = cur_database_name->next) - { - cur_database_info = find_database_info(cur_database_name->database_name); - if (cur_database_info) - { - recover2PCForDatabase(cur_database_info); - } - } - } - exit(0); -} - -static void -getMyNodename(PGconn *conn) -{ - static const char *stmt = "SELECT pgxc_node_str()"; - PGresult *res; - - res = PQexec(conn, stmt); - /* Error handling here */ - my_nodename = strdup(PQgetvalue(res, 0, 0)); - PQclear(res); -} - -static void -recover2PCForDatabase(database_info *db_info) -{ - PGconn *coord_conn; - txn_info *cur_txn; - - if (verbose_opt) - fprintf(outf, "%s: recovering 2PC for database \"%s\"\n", progname, db_info->database_name); - coord_conn = loginDatabase(coordinator_host, coordinator_port, username, password, db_info->database_name, - progname, "auto", password_prompt); - if (coord_conn == NULL) - { - fprintf(errf, "Could not connect to the database %s.\n", db_info->database_name); - return; - } - if (!setMaintenanceMode(coord_conn)) - { - /* Cannot recover */ - fprintf(errf, "Skipping database %s.\n", db_info->database_name); - PQfinish(coord_conn); - return; - } - if (verbose_opt) - fprintf(outf, "%s: connected to the database \"%s\"\n", progname, db_info->database_name); - for(cur_txn = db_info->head_txn_info; cur_txn; cur_txn = cur_txn->next) - { - recover2PC(coord_conn, cur_txn); - } - PQfinish(coord_conn); -} - -static void -recover2PC(PGconn *conn, txn_info *txn) -{ - TXN_STATUS txn_stat; - - txn_stat = check_txn_global_status(txn); - if (verbose_opt) - { - fprintf(outf, " Recovering TXN: gxid: %d, xid: \"%s\", owner: \"%s\", global status: %s\n", - txn->gxid, txn->xid, txn->owner, str_txn_stat(txn_stat)); - } - switch (txn_stat) - { - case TXN_STATUS_FAILED: - if (verbose_opt) - fprintf(outf, " Recovery not needed.\n"); - return; - case TXN_STATUS_PREPARED: - if (verbose_opt) - fprintf(outf, " Recovery not needed.\n"); - return; - case TXN_STATUS_COMMITTED: - do_commit(conn, txn); - return; - case TXN_STATUS_ABORTED: - do_abort(conn, txn); - return; - default: - fprintf(stderr, " Unknown TXN status, pgxc_clean error.\n"); - exit(1); - } - return; -} - -static void -do_commit(PGconn *conn, txn_info *txn) -{ - do_commit_abort(conn, txn, true); -} - -static void -do_abort(PGconn *conn, txn_info *txn) -{ - do_commit_abort(conn, txn, false); -} - -static void -do_commit_abort(PGconn *conn, txn_info *txn, bool is_commit) -{ - int ii; - static const char *EXEC_DIRECT_STMT_FMT = "EXECUTE DIRECT ON %s '%s PREPARED ''%s'';';"; - static const char *GLOBAL_STMT_FMT = "%s PREPARED '%s';"; - char stmt[1024]; - PGresult *res; - ExecStatusType res_status; - - if (verbose_opt) - fprintf(outf, " %s... ", is_commit ? "committing" : "aborting"); - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - if (txn->txn_stat[ii] == TXN_STATUS_PREPARED && ii != my_nodeidx) - { - - sprintf(stmt, EXEC_DIRECT_STMT_FMT, - pgxc_clean_node_info[ii].node_name, - is_commit ? "COMMIT" : "ROLLBACK", - txn->xid); - res = PQexec(conn, stmt); - res_status = PQresultStatus(res); - if (verbose_opt) - { - if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK) - fprintf(outf, "succeeded (%s), ", pgxc_clean_node_info[ii].node_name); - else - fprintf(outf, "failed (%s: %s), ", - pgxc_clean_node_info[ii].node_name, - PQresultErrorMessage(res)); - } - else - { - if (res_status != PGRES_COMMAND_OK && res_status != PGRES_TUPLES_OK) - { - fprintf(errf, "Failed to recover TXN, gxid: %d, xid: \"%s\", owner: \"%s\", node: \"%s\" (%s)\n", - txn->gxid, txn->xid, txn->owner, pgxc_clean_node_info[ii].node_name, - PQresultErrorMessage(res)); - } - } - PQclear(res); - } - } - /* Issue global statment */ - sprintf(stmt, GLOBAL_STMT_FMT, - is_commit ? "COMMIT" : "ROLLBACK", - txn->xid); - res = PQexec(conn, stmt); - res_status = PQresultStatus(res); - if (verbose_opt) - { - if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK) - fprintf(outf, "succeeded (%s)\n", my_nodename); - else - fprintf(outf, "failed (%s: %s)\n", - my_nodename, - PQresultErrorMessage(res)); - } - else if (res_status != PGRES_COMMAND_OK && res_status != PGRES_TUPLES_OK) - { - fprintf(errf, "Failed to recover TXN, gxid: %d, xid: \"%s\", owner: \"%s\", node: \"%s\" (%s)\n", - txn->gxid, txn->xid, txn->owner, my_nodename, PQresultErrorMessage(res)); - } - PQclear(res); -} - -#if 0 -static database_info * -find_database_info(char *dbname) -{ - database_info *cur_database_info; - - for(cur_database_info = head_database_info; cur_database_info; cur_database_info = cur_database_info->next) - { - if (strcmp(cur_database_info->database_name, dbname) == 0) - return(cur_database_info); - } - return(NULL); -} -#endif - - -static PGconn * -loginDatabase(char *host, int port, char *user, char *password, char *dbname, const char *progname, char *encoding, char *password_prompt) -{ - bool new_pass = false; - PGconn *coord_conn; - char port_s[32]; - - sprintf(port_s, "%d", port); - - /* Loop until we have a password if requested by backend */ - do - { -#define PARAMS_ARRAY_SIZE 8 - const char **keywords = malloc(PARAMS_ARRAY_SIZE * sizeof(*keywords)); - const char **values = malloc(PARAMS_ARRAY_SIZE * sizeof(*values)); - - if (keywords == NULL || values == NULL) - { - fprintf(stderr, "No more memory.\n"); - exit(1); - } - - - keywords[0] = "host"; - values[0] = host; - keywords[1] = "port"; - values[1] = port_s; - keywords[2] = "user"; - values[2] = user; - keywords[3] = "password"; - values[3] = password; - keywords[4] = "dbname"; - values[4] = dbname; - keywords[5] = "fallback_application_name"; - values[5] = progname; - keywords[6] = "client_encoding"; - values[6] = encoding; - keywords[7] = NULL; - values[7] = NULL; - - new_pass = false; - coord_conn = PQconnectdbParams(keywords, values, true); - free(keywords); - free(values); - - if (PQstatus(coord_conn) == CONNECTION_BAD && - PQconnectionNeedsPassword(coord_conn) && - password == NULL && - try_password_opt != TRI_NO) - { - PQfinish(coord_conn); - password = simple_prompt(password_prompt, 100, false); - new_pass = true; - } - } while (new_pass); - - return(coord_conn); -} - - -static TXN_STATUS -getTxnStatus(PGconn *conn, GlobalTransactionId gxid, int node_idx) -{ - char *node_name; - char stmt[1024]; - PGresult *res; - char *res_s; - - static const char *STMT_FORM = "EXECUTE DIRECT ON %s 'SELECT pgxc_is_committed(''%d''::xid);'"; - - node_name = pgxc_clean_node_info[node_idx].node_name; - sprintf(stmt, STMT_FORM, node_name, gxid); - - res = PQexec(conn, stmt); - if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "Could not obtain transaction status for node %s, gxid %d\n", node_name, gxid); - exit(1); - } - if (PQgetisnull(res, 0, 0)) - return TXN_STATUS_UNKNOWN; - res_s = PQgetvalue(res, 0, 0); - if (strcmp(res_s, "t") == 0) - return TXN_STATUS_COMMITTED; - else - return TXN_STATUS_ABORTED; -} - -static void -getTxnInfoOnOtherNodes(PGconn *conn, txn_info *txn) -{ - int ii; - - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - if (txn->txn_stat[ii] == TXN_STATUS_INITIAL) - txn->txn_stat[ii] = getTxnStatus(conn, txn->gxid, ii); - } -} - - -static void -getTxnInfoOnOtherNodesForDatabase(PGconn *conn, database_info *database) -{ - txn_info *cur_txn; - - for (cur_txn = database->head_txn_info; cur_txn; cur_txn = cur_txn->next) - { - getTxnInfoOnOtherNodes(conn, cur_txn); - } -} - - -static void -getTxnInfoOnOtherNodesAll(PGconn *conn) -{ - database_info *cur_database; - - for (cur_database = head_database_info; cur_database; cur_database = cur_database->next) - { - getTxnInfoOnOtherNodesForDatabase(conn, cur_database); - } -} - - - -static void -getPreparedTxnListOfNode(PGconn *conn, int idx) -{ - int prep_txn_count; - int ii; - PGresult *res; - ExecStatusType pq_status; - -#define MAX_STMT_LEN 1024 - - /* SQL Statement */ - static const char *STMT_GET_PREP_TXN_ON_NODE - = "EXECUTE DIRECT ON %s 'SELECT TRANSACTION, GID, OWNER, DATABASE FROM PG_PREPARED_XACTS;'"; - char stmt[MAX_STMT_LEN]; - - sprintf(stmt, STMT_GET_PREP_TXN_ON_NODE, - pgxc_clean_node_info[idx].node_name); - - res = PQexec(conn, stmt); - if (res == NULL || (pq_status = PQresultStatus(res)) != PGRES_TUPLES_OK) - { - fprintf(stderr, "Could not obtain prepared transaction list for node %s.(%s)\n", - pgxc_clean_node_info[idx].node_name, res ? PQresultErrorMessage(res) : ""); - PQclear(res); - exit (1); - } - prep_txn_count = PQntuples(res); - for (ii = 0; ii < prep_txn_count; ii++) - { - GlobalTransactionId gxid; - char *xid; - char *owner; - char *database_name; - - gxid = atoi(PQgetvalue(res, ii, 0)); - xid = strdup(PQgetvalue(res, ii, 1)); - owner = strdup(PQgetvalue(res, ii, 2)); - database_name = strdup(PQgetvalue(res, ii, 3)); - - add_txn_info(database_name, pgxc_clean_node_info[idx].node_name, gxid, xid, owner, - TXN_STATUS_PREPARED); - free(xid); - free(owner); - free(database_name); - } - PQclear(res); -} - -static void -getPreparedTxnList(PGconn *conn) -{ - int ii; - - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - getPreparedTxnListOfNode(conn, ii); - } -} - -static void -getDatabaseList(PGconn *conn) -{ - int database_count; - int ii; - PGresult *res; - - /* SQL Statement */ - static const char *STMT_GET_DATABASE_LIST = "SELECT DATNAME FROM PG_DATABASE;"; - - /* - * Get database list - */ - res = PQexec(conn, STMT_GET_DATABASE_LIST); - if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "Could not obtain database list.\n"); - PQclear(res); - exit (1); - } - database_count = PQntuples(res); - for(ii = 0; ii < database_count; ii++) - add_database_info(PQgetvalue(res, ii, 0)); - PQclear(res); -} - -static void -getNodeList(PGconn *conn) -{ - int ii; - PGresult *res; - - /* SQL Statement */ - static const char *STMT_GET_NODE_INFO = "SELECT NODE_NAME, NODE_TYPE, NODE_PORT, NODE_HOST FROM PGXC_NODE;"; - - res = PQexec(conn, STMT_GET_NODE_INFO); - if (res == NULL || PQresultStatus(res) != PGRES_TUPLES_OK) - { - fprintf(stderr, "Could not obtain node list.\n"); - PQclear(res); - exit (1); - } - pgxc_clean_node_count = PQntuples(res); - pgxc_clean_node_info = (node_info *)malloc(sizeof(node_info) * pgxc_clean_node_count); - if (pgxc_clean_node_info == NULL) - { - fprintf(stderr, "No more memory.\n"); - exit(1); - } - - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - char *node_name; - char *node_type_c; - NODE_TYPE node_type; - int port; - char *host; - - node_name = strdup(PQgetvalue(res, ii, 0)); - node_type_c = strdup(PQgetvalue(res, ii, 1)); - switch (node_type_c[0]) - { - case 'C': - /* pgxc_clean has to connect to the Coordinator */ - node_type = NODE_TYPE_COORD; - if (strcmp(node_name, my_nodename) == 0) - my_nodeidx = ii; - break; - case 'D': - node_type = NODE_TYPE_DATANODE; - break; - default: - fprintf(stderr, "Invalid catalog data (node_type), node_name: %s, node_type: %s\n", node_name, node_type_c); - exit(1); - } - port = atoi(PQgetvalue(res, ii, 2)); - host = strdup(PQgetvalue(res, ii, 3)); - set_node_info(node_name, port, host, node_type, ii); - free(node_name); - free(node_type_c); - free(host); - } - /* Check if local Coordinator has been found */ - if (my_nodeidx == -1) - { - fprintf(stderr, "Failed to identify the coordinator which %s is connecting to. ", progname); - fprintf(stderr, "Connecting to a wrong node.\n"); - exit(1); - } -} - - - -static void -showVersion(void) -{ - puts("pgxc_clean (Postgres-XC) " PGXC_VERSION); -} - -static void -add_to_database_list(char *dbname) -{ - if (head_database_names == NULL) - { - head_database_names = last_database_names = (database_names *)malloc(sizeof(database_names)); - if (head_database_names == NULL) - { - fprintf(stderr, "No more memory, FILE:%s, LINE:%d.\n", __FILE__, __LINE__); - exit(1); - } - } - else - { - last_database_names->next = (database_names *)malloc(sizeof(database_names)); - if (last_database_names->next == NULL) - { - fprintf(stderr, "No more memory, FILE:%s, LINE:%d.\n", __FILE__, __LINE__); - exit(1); - } - last_database_names = last_database_names->next; - } - last_database_names->next = NULL; - last_database_names->database_name = dbname; -} - -static void -parse_pgxc_clean_options(int argc, char *argv[]) -{ - static struct option long_options[] = - { - {"all", no_argument, NULL, 'a'}, - {"dbname", required_argument, NULL, 'd'}, - {"host", required_argument, NULL, 'h'}, - {"no-clean", no_argument, NULL, 'N'}, - {"output", required_argument, NULL, 'o'}, - {"port", required_argument, NULL, 'p'}, - {"quiet", no_argument, NULL, 'q'}, - {"username", required_argument, NULL, 'U'}, - {"verbose", no_argument, NULL, 'v'}, - {"version", no_argument, NULL, 'V'}, - {"no-password", no_argument, NULL, 'w'}, - {"password", no_argument, NULL, 'W'}, - {"help", no_argument, NULL, '?'}, - {"status", no_argument, NULL, 's'}, - {NULL, 0, NULL, 0} - }; - - int optindex; - extern char *optarg; - extern int optind; - int c; - - progname = get_progname(argv[0]); /* Should be more fancy */ - - while ((c = getopt_long(argc, argv, "ad:h:No:p:qU:vVwWs?", long_options, &optindex)) != -1) - { - switch(c) - { - case 'a': - clean_all_databases = true; - break; - case 'd': - add_to_database_list(optarg); - break; - case 'h': - coordinator_host = optarg; - break; - case 'N': - no_clean_opt = true; - break; - case 'o': - output_filename = optarg; - break; - case 'p': - coordinator_port = atoi(optarg); - break; - case 'q': - verbose_opt = false; - break; - case 'U': - username = optarg; - break; - case 'V': - version_opt = 0; - break; - case 'v': - verbose_opt = true; - break; - case 'w': - try_password_opt = TRI_NO; - break; - case 'W': - try_password_opt = TRI_YES; - break; - case 's': - status_opt = true; - break; - case '?': - if (strcmp(argv[optind - 1], "-?") == 0 || strcmp(argv[optind - 1], "--help") == 0) - { - usage(); - exit(0); - } - else - { - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(1); - } - break; - default: - fprintf(stderr, "Try \"%s --help\" for more information.\n", progname); - exit(1); - break; - } - } - - while (argc - optind >= 1) - { - if (head_database_names == NULL) - add_to_database_list(argv[optind]); - if (username == NULL) - username = argv[optind]; - else - fprintf(stderr, "%s: warning: extra command-line argument \"%s\" ignored\n", - progname, argv[optind]); - optind++; - } -} - -static bool setMaintenanceMode(PGconn *conn) -{ - static const char *SetMaintenanceModeCommand = "SET xc_maintenance_mode = on;"; - PGresult *res; - ExecStatusType res_status; - - res = PQexec(conn, SetMaintenanceModeCommand); - res_status = PQresultStatus(res); - if (res_status == PGRES_COMMAND_OK || res_status == PGRES_TUPLES_OK) - return true; - else - fprintf(errf, "Failed to set xc_maintenance_mode. (%s)\n", - PQresultErrorMessage(res)); - return false; -} - -static char *GetUserName(void) -{ - struct passwd *passwd; - - passwd = getpwuid(getuid()); - if (passwd) - return(strdup(passwd->pw_name)); - else - { - fprintf(stderr, "%s: could not get current user name: %s\n", progname, strerror(errno)); - exit(1); - } - return NULL; -} - -static void usage(void) -{ - char *env; - char *user; - - user = getenv("PGUSER"); - if (!user) - user = GetUserName(); - - printf("pgxc_clean cleans up outstanding 2PCs after failed node is recovered.\n" - "Usage:\n" - "pgxc_clean [OPTION ...] [DBNAME [USERNAME]]\n\n" - "Options:\n"); - - env = getenv("PGDATABASE"); - if (!env) - env = user; - printf(" -a, --all cleanup all the databases available.\n"); - printf(" -d, --dbname=DBNAME database name to clean up (default: \"%s\")\n", env); - env = getenv("PGHOST"); - printf(" -h, --host=HOSTNAME target coordinator host address, (default: \"%s\")\n", env ? env : "local socket"); - printf(" -N, no-clean only collect 2PC information. Do not recover them\n"); - printf(" -o, --output=FILENAME output file name.\n"); - env = getenv("PGPORT"); - printf(" -p, --port=PORT port number of the coordinator (default: \"%s\")\n", env ? env : DEF_PGPORT_STR); - printf(" -q, --quiet quiet mode. do not print anything but error information.\n"); - printf(" -s, --status prints out 2PC status\n"); - env = getenv("PGUSER"); - if (!env) - env = user; - printf(" -U, --username=USERNAME database user name (default: \"%s\")\n", env); - printf(" -v, --verbose print recovery information.\n"); - printf(" -V, --version prints out the version.\n"); - printf(" -w, --no-password never prompt for the password.\n"); - printf(" -W, --password prompt for the password.\n"); - printf(" -?, --help print this message.\n"); -} diff --git a/src/pgxc/bin/pgxc_clean/pgxc_clean.h b/src/pgxc/bin/pgxc_clean/pgxc_clean.h deleted file mode 100644 index cc3def7c7f..0000000000 --- a/src/pgxc/bin/pgxc_clean/pgxc_clean.h +++ /dev/null @@ -1,13 +0,0 @@ -#ifndef PGXC_CLEAN -#define PGXC_CLEAN - -typedef struct database_names -{ - struct database_names *next; - char *database_name; -} database_names; - -extern FILE *outf; -extern FILE *errf; - -#endif /* PGXC_CLEAN */ diff --git a/src/pgxc/bin/pgxc_clean/pgxc_clean_test.sh b/src/pgxc/bin/pgxc_clean/pgxc_clean_test.sh deleted file mode 100644 index 39c9b98def..0000000000 --- a/src/pgxc/bin/pgxc_clean/pgxc_clean_test.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/bin/bash -# -# This script sets up test environment for pgxc_clean. -# Please note that all the prepared transactions are -# partially committed or aborted. -# -# You should configure PGPORT and PGHOST to connect, as -# well as node names for your test environment. -# -# Before you run this script, XC should be up and ready. -# Also, this may try to drop test databases. You may need -# to run CLEAN CONNECTION satement for each coordinator in -# advance. -# - -if [ $# -le 0 ] -then - create=no -else - if [ $1 = create ] - then - create=yes - else - create=no - fi -fi - -export PGPORT=20004 -exprot PGHOST=localhost -sourcedb=postgres - -if [ $create = yes ] -then -psql -e $sourcedb <<EOF -drop database if exists test1; -drop database if exists test2; -drop database if exists test3; -create database test1; -create database test2; -create database test3; -\q -EOF -fi - -psql -e test1 <<EOF -drop table if exists t; -begin; -create table t (a int); -prepare transaction 'test1_1'; -\q -EOF - -psql -e test2 <<EOF -drop table if exists t; -begin; -create table t (a int); -prepare transaction 'test2_1'; -\q -EOF - -psql -e test3 <<EOF -drop table if exists t; -begin; -create table t (a int); -prepare transaction 'test3_1'; -\q -EOF - -psql -e test1 <<EOF -set xc_maintenance_mode = on; -execute direct on node1 'commit prepared ''test1_1'' '; -\q -EOF - -psql -e test2 <<EOF -set xc_maintenance_mode = on; -execute direct on node2 'commit prepared ''test2_1'' '; -\q -EOF - -psql -e test3 <<EOF -set xc_maintenance_mode = on; -execute direct on node1 'rollback prepared ''test3_1'' '; -\q -EOF
\ No newline at end of file diff --git a/src/pgxc/bin/pgxc_clean/txninfo.c b/src/pgxc/bin/pgxc_clean/txninfo.c deleted file mode 100644 index 29a96ea92d..0000000000 --- a/src/pgxc/bin/pgxc_clean/txninfo.c +++ /dev/null @@ -1,338 +0,0 @@ -#include "txninfo.h" - -static int check_xid_is_implicit(char *xid); -static txn_info *find_txn(TransactionId gxid); -static txn_info *make_txn_info(char *dbname, TransactionId gxid, char *xid, char *owner); - -database_info *find_database_info(char *database_name) -{ - database_info *cur_database_info = head_database_info; - - for (;cur_database_info; cur_database_info = cur_database_info->next) - { - if(strcmp(cur_database_info->database_name, database_name) == 0) - return(cur_database_info); - } - return(NULL); -} - -database_info *add_database_info(char *database_name) -{ - database_info *rv; - - if ((rv = find_database_info(database_name)) != NULL) - return rv; /* Already in the list */ - rv = malloc(sizeof(database_info)); - if (rv == NULL) - return NULL; - rv->next = NULL; - rv->database_name = strdup(database_name); - if (rv->database_name == NULL) - { - free(rv); - return NULL; - } - rv->head_txn_info = NULL; - rv->last_txn_info = NULL; - if (head_database_info == NULL) - { - head_database_info = last_database_info = rv; - return rv; - } - else - { - last_database_info->next = rv; - last_database_info = rv; - return rv; - } -} - -int set_node_info(char *node_name, int port, char *host, NODE_TYPE type, int index) -{ - node_info *cur_node_info; - - if (index >= pgxc_clean_node_count) - return -1; - cur_node_info = &pgxc_clean_node_info[index]; - if (cur_node_info->node_name) - free(cur_node_info->node_name); - if (cur_node_info->host) - free(cur_node_info->host); - cur_node_info->node_name = strdup(node_name); - if (cur_node_info->node_name == NULL) - return -1; - cur_node_info->port = port; - cur_node_info->host = strdup(host); - if (cur_node_info->host == NULL) - return -1; - cur_node_info->type = type; - return 0; -} - -node_info *find_node_info(char *node_name) -{ - int i; - for (i = 0; i < pgxc_clean_node_count; i++) - { - if (pgxc_clean_node_info[i].node_name == NULL) - continue; - if (strcmp(pgxc_clean_node_info[i].node_name, node_name) == 0) - return &pgxc_clean_node_info[i]; - } - return(NULL); -} - -int find_node_index(char *node_name) -{ - int i; - for (i = 0; i < pgxc_clean_node_count; i++) - { - if (pgxc_clean_node_info[i].node_name == NULL) - continue; - if (strcmp(pgxc_clean_node_info[i].node_name, node_name) == 0) - return i; - } - return -1; -} - -int add_txn_info(char *dbname, char *node_name, TransactionId gxid, char *xid, char *owner, TXN_STATUS status) -{ - txn_info *txn; - int nodeidx; - - if ((txn = find_txn(gxid)) == NULL) - { - txn = make_txn_info(dbname, gxid, xid, owner); - if (txn == NULL) - { - fprintf(stderr, "No more memory.\n"); - exit(1); - } - } - nodeidx = find_node_index(node_name); - txn->txn_stat[nodeidx] = status; - return 1; -} - - -static txn_info * -make_txn_info(char *dbname, TransactionId gxid, char *xid, char *owner) -{ - database_info *dbinfo; - txn_info *txn; - - if ((dbinfo = find_database_info(dbname)) == NULL) - dbinfo = add_database_info(dbname); - txn = (txn_info *)malloc(sizeof(txn_info)); - if (txn == NULL) - return NULL; - memset(txn, 0, sizeof(txn_info)); - txn->gxid = gxid; - txn->xid = strdup(xid); - if (txn->xid == NULL) - { - free(txn); - return NULL; - } - txn->owner = strdup(owner); - if (txn->owner == NULL) - { - free(txn); - return NULL; - } - if (dbinfo->head_txn_info == NULL) - { - dbinfo->head_txn_info = dbinfo->last_txn_info = txn; - } - else - { - dbinfo->last_txn_info->next = txn; - dbinfo->last_txn_info = txn; - } - txn->txn_stat = (TXN_STATUS *)malloc(sizeof(TXN_STATUS) * pgxc_clean_node_count); - if (txn->txn_stat == NULL) - return(NULL); - memset(txn->txn_stat, sizeof(TXN_STATUS) * pgxc_clean_node_count, 0); - return txn; -} - - -/* Ugly ---> Remove this */ -txn_info *init_txn_info(char *database_name, TransactionId gxid) -{ - database_info *database; - txn_info *cur_txn_info; - - if ((database = find_database_info(database_name)) == NULL) - return NULL; - - if (database->head_txn_info == NULL) - { - database->head_txn_info = database->last_txn_info = (txn_info *)malloc(sizeof(txn_info)); - if (database->head_txn_info == NULL) - return NULL; - memset(database->head_txn_info, sizeof(txn_info), 0); - return database->head_txn_info; - } - for(cur_txn_info = database->head_txn_info; cur_txn_info; cur_txn_info = cur_txn_info->next) - { - if (cur_txn_info->gxid == gxid) - return(cur_txn_info); - } - cur_txn_info->next = database->last_txn_info = (txn_info *)malloc(sizeof(txn_info)); - if (cur_txn_info->next == NULL) - return(NULL); - memset(cur_txn_info->next, sizeof(txn_info), 0); - if ((cur_txn_info->next->txn_stat = (TXN_STATUS *)malloc(sizeof(TXN_STATUS) * pgxc_clean_node_count)) == NULL) - return(NULL); - memset(cur_txn_info->next->txn_stat, sizeof(TXN_STATUS) * pgxc_clean_node_count, 0); - return cur_txn_info->next; -} - - -static txn_info *find_txn(TransactionId gxid) -{ - database_info *cur_db; - txn_info *cur_txn; - - for (cur_db = head_database_info; cur_db; cur_db = cur_db->next) - { - for (cur_txn = cur_db->head_txn_info; cur_txn; cur_txn = cur_txn->next) - { - if (cur_txn->gxid == gxid) - return cur_txn; - } - } - return NULL; -} - -int set_txn_status(TransactionId gxid, char *node_name, TXN_STATUS status) -{ - txn_info *txn; - int node_idx; - - txn = find_txn(gxid); - if (txn == NULL) - return -1; - - node_idx = find_node_index(node_name); - if (node_idx < 0) - return -1; - - txn->txn_stat[node_idx] = status; - return 0; -} - -/* - * This function should be called "after" all the 2PC info - * has been collected. - * - * To determine if a prepared transaction is implicit or explicit, - * we use gxid. If gxid ~ '__XC[0-9]+', it is implicit 2PC. - */ - -TXN_STATUS check_txn_global_status_gxid(TransactionId gxid) -{ - return(check_txn_global_status(find_txn(gxid))); -} - -TXN_STATUS check_txn_global_status(txn_info *txn) -{ -#define TXN_PREPARED 0x0001 -#define TXN_COMMITTED 0x0002 -#define TXN_ABORTED 0x0004 - - int ii; - int check_flag = 0; - - if (txn == NULL) - return TXN_STATUS_INITIAL; - for (ii = 0; ii < pgxc_clean_node_count; ii++) - { - if (txn->txn_stat[ii] == TXN_STATUS_INITIAL) - continue; - else if (txn->txn_stat[ii] == TXN_STATUS_PREPARED) - check_flag |= TXN_PREPARED; - else if (txn->txn_stat[ii] == TXN_STATUS_COMMITTED) - check_flag |= TXN_COMMITTED; - else if (txn->txn_stat[ii] == TXN_STATUS_ABORTED) - check_flag |= TXN_ABORTED; - else - return TXN_STATUS_FAILED; - } - if ((check_flag & TXN_PREPARED) == 0) - /* Should be at least one "prepared statement" in nodes */ - return TXN_STATUS_FAILED; - if ((check_flag & TXN_COMMITTED) && (check_flag & TXN_ABORTED)) - /* Mix of committed and aborted. This should not happen. */ - return TXN_STATUS_FAILED; - if (check_flag & TXN_COMMITTED) - /* Some 2PC transactions are committed. Need to commit others. */ - return TXN_STATUS_COMMITTED; - if (check_flag & TXN_ABORTED) - /* Some 2PC transactions are aborted. Need to abort others. */ - return TXN_STATUS_ABORTED; - /* All the transactions remain prepared. No need to recover. */ - if (check_xid_is_implicit(txn->xid)) - return TXN_STATUS_COMMITTED; - else - return TXN_STATUS_PREPARED; -} - - -/* - * Returns 1 if implicit, 0 otherwise. - * - * Should this be replaced with regexp calls? - */ -static int check_xid_is_implicit(char *xid) -{ -#define XIDPREFIX "_$XC$" - - if (strncmp(xid, XIDPREFIX, strlen(XIDPREFIX)) != 0) - return 0; - for(xid += strlen(XIDPREFIX); *xid; xid++) - { - if (*xid < '0' || *xid > '9') - return 0; - } - return 1; -} - -bool check2PCExists(void) -{ - database_info *cur_db; - - for (cur_db = head_database_info; cur_db; cur_db = cur_db->next) - { - txn_info *cur_txn; - - for (cur_txn = cur_db->head_txn_info; cur_txn; cur_txn = cur_txn->next) - { - return (true); - } - } - return (false); -} - -char *str_txn_stat(TXN_STATUS status) -{ - switch(status) - { - case TXN_STATUS_INITIAL: - return("initial"); - case TXN_STATUS_UNKNOWN: - return("unknown"); - case TXN_STATUS_PREPARED: - return("prepared"); - case TXN_STATUS_COMMITTED: - return("committed"); - case TXN_STATUS_ABORTED: - return("aborted"); - case TXN_STATUS_FAILED: - return("failed"); - default: - return("undefined status"); - } - return("undefined status"); -} diff --git a/src/pgxc/bin/pgxc_clean/txninfo.h b/src/pgxc/bin/pgxc_clean/txninfo.h deleted file mode 100644 index 10798712ee..0000000000 --- a/src/pgxc/bin/pgxc_clean/txninfo.h +++ /dev/null @@ -1,83 +0,0 @@ -/*------------------------------------------------------------------------- - * - * txninfo.h - * Prepared transaction info - * - * Portions Copyright (c) 2012 Postgres-XC Development Group - * - * $Postgres-XC$ - * - *------------------------------------------------------------------------- - */ - -#ifndef TXNINFO_H -#define TXNINFO_H - -#include "gtm/gtm_c.h" - -typedef enum TXN_STATUS -{ - TXN_STATUS_INITIAL = 0, /* Initial */ - TXN_STATUS_UNKNOWN, /* Unknown: Frozen, running, or not started */ - TXN_STATUS_PREPARED, - TXN_STATUS_COMMITTED, - TXN_STATUS_ABORTED, - TXN_STATUS_FAILED /* Error detected while interacting with the node */ -} TXN_STATUS; - -typedef enum NODE_TYPE -{ - NODE_TYPE_COORD = 1, - NODE_TYPE_DATANODE -} NODE_TYPE; - - -typedef struct node_info -{ - char *node_name; - int port; - char *host; - NODE_TYPE type; -} node_info; - -extern node_info *pgxc_clean_node_info; -extern int pgxc_clean_node_count; - -typedef struct txn_info -{ - struct txn_info *next; - TransactionId gxid; - char *xid; /* xid used in prepare */ - char *owner; - TXN_STATUS *txn_stat; /* Array for each nodes */ - char *msg; /* Notice message for this txn. */ -} txn_info; - -typedef struct database_info -{ - struct database_info *next; - char *database_name; - txn_info *head_txn_info; - txn_info *last_txn_info; -} database_info; - -extern database_info *head_database_info; -extern database_info *last_database_info; - -/* Functions */ - -extern txn_info *init_txn_info(char *database_name, TransactionId gxid); -extern int add_txn_info(char *database, char *node, TransactionId gxid, char *xid, char *owner, TXN_STATUS status); -extern txn_info *find_txn_info(TransactionId gxid); -extern int set_txn_status(TransactionId gxid, char *node_name, TXN_STATUS status); -extern database_info *find_database_info(char *database_name); -extern database_info *add_database_info(char *database_name); -extern node_info *find_node_info(char *node_name); -extern int find_node_index(char *node_name); -extern int set_node_info(char *node_name, int port, char *host, NODE_TYPE type, int index); -extern TXN_STATUS check_txn_global_status(txn_info *txn); -extern TXN_STATUS check_txn_global_status_gxid(TransactionId gxid); -extern bool check2PCExists(void); -extern char *str_txn_stat(TXN_STATUS status); - -#endif /* TXNINFO_H */ diff --git a/src/pgxc/bin/pgxc_ddl/README b/src/pgxc/bin/pgxc_ddl/README deleted file mode 100644 index 0a2c440b44..0000000000 --- a/src/pgxc/bin/pgxc_ddl/README +++ /dev/null @@ -1,47 +0,0 @@ -Postgres-XC - pgxc_ddl -===================================== - -This directory contains pgxc_ddl, an application used to make a cold synchronization of DDL -in a Postgres-XC cluster by launching DDL and then copy Coordinator catalog file -data from a remote Coordinator (where DDL has been launched) to other Coordinators. - -pgxc_ddl can also be used to synchronize catalog files. - -pgxc_ddl was put in the default install repository before DDL synchronizing was implemented -(prior to version Postgres-XC 0.9.3). - -pgxc_ddl can be used with a configuration file called pgxc.conf. -This file is kept with the name pgxc.conf.sample to stick with PostgreSQL format. -Up to v0.9.3, pgxc.conf was by default installed by initdb in a data folder, -but this is not really necessary since DDL Synchronization is implemented in Postgres-XC. - -So it is kept in a separate repository src/pgxc/bin/pgxc_ddl/. - -===================================== -pgxc_ddl -===================================== -This script uses the following options: -- D to locate the data folder, necessary to find pgxc.conf, - containing the characteristics of all the coordinators -- l to locate the folder where applications are -- f for a DDL file used as input -- d for a Database name on which to launch DDL -- n coordinator number where to launch DDL, - number based on the one written in pgxc.conf -- t base name of folder where to save configuration files, - by default /tmp/pgxc_config, completed by $$ (process number for folder name uniqueness) - -===================================== -pgxc.conf.sample -===================================== -Same format as for GUC files is used. - -This configuration file contains the list of following parameters: -- coordinator_hosts, list of Coordinator hosts - This is an array and format is 'host_name_1,host_name_2'. -- coordinator_ports, list of Coordinator ports - This is an array and format is 'port_1,port_2' -- coordinator_folders - This is an array and format is 'data_folder_1,data_folder_2' - -All the arrays need to have the same size equal to the number of Coordinators. diff --git a/src/pgxc/bin/pgxc_ddl/pgxc.conf.sample b/src/pgxc/bin/pgxc_ddl/pgxc.conf.sample deleted file mode 100644 index 9dcc0c7a2d..0000000000 --- a/src/pgxc/bin/pgxc_ddl/pgxc.conf.sample +++ /dev/null @@ -1,20 +0,0 @@ -# ----------------------------- -# Postgres-XC configuration file -# ----------------------------- -# -# This file consists of lines of the form: -# -# name = value -# -# It describes the list of coordinators used in the cluster - -#------------------------------------------------------------------------------ -# POSTGRES-XC COORDINATORS -#------------------------------------------------------------------------------ - -#coordinator_hosts = 'localhost' # Host names or addresses of data nodes - # (change requires restart) -#coordinator_ports = '5451,5452' # Port numbers of coordinators - # (change requires restart) -#coordinator_folders = '/pgxc/data' # List of Data folders of coordinators - # (change require restart)
\ No newline at end of file diff --git a/src/pgxc/bin/pgxc_ddl/pgxc_ddl b/src/pgxc/bin/pgxc_ddl/pgxc_ddl deleted file mode 100644 index b56a8ff86a..0000000000 --- a/src/pgxc/bin/pgxc_ddl/pgxc_ddl +++ /dev/null @@ -1,443 +0,0 @@ -#!/bin/bash -# Copyright (c) 2010-2012 Postgres-XC Development Group - -#Scripts to launch DDL in PGXC cluster using a cold_backup method -#Be sure to have set a correct ssl environment in all the servers of the cluster - -#This script uses pgxc.conf as a base to find the settings of all the coordinators - -#Options possible to use for this script -# -D to locate the data folder, necessary to find pgxc.conf, containing the characteristics of all the coordinators -# -l to locate the folder where applications are -# -f for a DDL file -# -d for a Database name -# -n coordinator number where to launch DDl, number based on the one written in pgxc.conf -# -t base name of folder where to save configuration files, by default /tmp/pgxc_config, completed by $$ - -count=0 - -#Default options -#local folder used to save temporary the configuration files of coordinator's data folder being erased -CONFIG_FOLDER=/tmp/pgxc_config_files.$$ -PGXC_BASE= -#options to launch the coordinator -#don't forget to add -i as we are in a cluster :) -COORD_OPTIONS="-C -i" - -#----------------------------------------------------------------------- -# Option Management -#----------------------------------------------------------------------- -while getopts 'f:d:D:l:hn:t:' OPTION -do - count=$((count +2)) - case $OPTION in - d) #for a database name - DB_NAME="$OPTARG" - ;; - - D) #for a data folder, to find pgxc.conf - DATA_FOLDER="$OPTARG" - ;; - - f) #for a DDL file - DDL_FILE_NAME="$OPTARG" - ;; - - l) #To define folder where applications are if necessary - PGXC_BASE="$OPTARG"/ - ;; - - n) #for a coordinator number - COORD_NUM_ORIGIN="$OPTARG" - ;; - - h) printf "Usage: %s: [-d dbname] [-l bin folder] [-D data folder] [-n coord number] [-f ddl file] [-t save folder name in /tmp/]\n" $(basename $0) >&2 - exit 0 - ;; - t) #to set the name of the folder where to save conf files. All is mandatory saved in /tmp - CONFIG_FOLDER=/tmp/"$OPTARG" - ;; - - ?) printf "Usage: %s: [-d dbname] [-l bin folder] [-D data folder] [-n coord number] [-f ddl file] [-t save folder name in /tmp/]\n" $(basename $0) >&2 - exit 0 - ;; - esac -done - -if [ $# -lt "1" ] -then - echo "No arguments defined, you should try help -h" - exit 2 -fi - -#A couple of option checks -if [ "$count" -ne "$#" ] -then - echo "Arguments not correctly set, try -h for help" - exit 2 -fi - -if [ -z $COORD_NUM_ORIGIN ] -then - echo "Coordinator number not defined, mandatory -n argument missing" - exit 2 -fi -if [ -z $DATA_FOLDER ] -then - echo "Data folder not defined, mandatory -D argument missing" - exit 2 -fi - -#Check if Argument of -n is an integer -if [ ! $(echo "$COORD_NUM_ORIGIN" | grep -E "^[0-9]+$") ] - then - echo "Argument -n is not a valid integer" - exit 2 -fi - -#Check if DDL file exists -if [ "$DDL_FILE_NAME" != "" ] -then - if [ ! -e $DDL_FILE_NAME ] - then - echo "DDL file not defined" - exit 2 - fi - if [ -z $DB_NAME ] - then - echo "Dbname not defined, mandatory -d argument missing when using a ddl file" - exit 2 - fi -fi - -#----------------------------------------------------------------------- -# Begin to read the pgxc.conf to get coordinator characteristics -#----------------------------------------------------------------------- -PGXC_CONF=$DATA_FOLDER/pgxc.conf - -if [ ! -e $PGXC_CONF ] -then - echo "pgxc.conf not defined in the directory defined by -D" - exit 2 -fi - -#Find parameters -hosts=`cat $PGXC_CONF | grep coordinator_hosts | cut -d "'" -f 2` -ports=`cat $PGXC_CONF | grep coordinator_ports | cut -d "'" -f 2` -folders=`cat $PGXC_CONF | grep coordinator_folders | cut -d "'" -f 2` -if [ "$hosts" = "" ] -then - echo "coordinator_hosts not defined in pgxc.conf" - exit 2 -fi -if [ "$ports" = "" ] -then - echo "coordinator_ports not defined in pgxc.conf" - exit 2 -fi -if [ "$folders" = "" ] -then - echo "coordinator_folders not defined in pgxc.conf" - exit 2 -fi - -#Check if the strings are using commas as separators -hosts_sep="${hosts//[^,]/}" -ports_sep="${ports//[^,]/}" -folders_sep="${folders//[^,]/}" -if [ "$hosts_sep" = "" ] -then - echo "coordinator_hosts should use commas as a separator" - exit 2 -fi -if [ "$ports_sep" = "" ] -then - echo "coordinator_ports should use commas as a separator" - exit 2 -fi -if [ "$folders_sep" = "" ] -then - echo "coordinator_folders should use commas as a separator" - exit 2 -fi - - -#----------------------------------------------------------------------- -# Fill in Arrays that are used for the process from pgxc configuration file -#----------------------------------------------------------------------- - -count=1 -#Coordinator list -host_local=`echo $hosts | cut -d "," -f $count` -while [ "$host_local" != "" ] -do - COORD_HOSTNAMES[$((count -1))]=`echo $host_local` - count=$((count +1)) - host_local=`echo $hosts | cut -d "," -f $count` -done -COORD_COUNT=${#COORD_HOSTNAMES[*]} - -#Port list corresponding to the coordinators -#If all the coordinators use the same port on different servers, -#it is possible to define that with a unique element array. -count=1 -port_local=`echo $ports | cut -d "," -f $count` -while [ "$port_local" != "" ] -do - COORD_PORTS[$((count -1))]=$port_local - count=$((count +1)) - port_local=`echo $ports | cut -d "," -f $count` -done -COORD_PORTS_COUNT=${#COORD_PORTS[*]} - -#Data folder list corresponding to the coordinators -#If all the coordinators use the same data folder name on different servers, -#it is possible to define that with a unique element array. -count=1 -folder_local=`echo $folders | cut -d "," -f $count` - -while [ "$folder_local" != "" ] -do - COORD_PGDATA[$((count -1))]=$folder_local - count=$((count +1)) - folder_local=`echo $folders | cut -d "," -f $count` -done -COORD_PGDATA_COUNT=${#COORD_PGDATA[*]} - - -#----------------------------------------------------------------------- -# Start DDL process -#----------------------------------------------------------------------- - -#It is supposed that the same bin folders are used among the servers -#to call postgres processes -#This can be customized by the user with option -l -COORD_SERVER_PROCESS=postgres -PGCTL_SERVER_PROCESS=pg_ctl -PSQL_CLIENT_PROCESS=psql - -COORD_SERVER=$PGXC_BASE$COORD_SERVER_PROCESS -PGCTL_SERVER=$PGXC_BASE$PGCTL_SERVER_PROCESS -PSQL_CLIENT=$PGXC_BASE$PSQL_CLIENT_PROCESS - -#reajust coord number with index number -COORD_NUM_ORIGIN=$((COORD_NUM_ORIGIN -1)) - -#check data validity -#Note: Add other checks here - -if [ $COORD_COUNT -eq "1" ] -then - echo "Are you sure you want to use this utility with one only coordinator??" - exit 2 -fi - -if [ $COORD_PGDATA_COUNT -ne $COORD_COUNT ] -then - echo "Number of pgdata folders must be the same as coordinator server number" - exit 2 -fi - -if [ $COORD_PORTS_COUNT -ne $COORD_COUNT ] -then - echo "Number of coordinator ports defined must be the same as coordinator server number" - exit 2 -fi - -#Check if coordinator number is not outbounds -if [ $COORD_NUM_ORIGIN -gt $((COORD_COUNT -1)) ] -then - echo "coordinator number is out of bounds" - exit 2 -fi -COORD_ORIG_INDEX=$COORD_NUM_ORIGIN - -#Check if the data folders are defined -for index in ${!COORD_HOSTNAMES[*]} -do - targethost=${COORD_HOSTNAMES[$index]} - targetdata=${COORD_PGDATA[$index]} - if [[ `ssh $targethost test -d $targetdata && echo exists` ]] - then - echo "defined directory exists for "$targethost - else - echo "defined directory does not exist for "$targethost - exit 2 - fi -done - -#Origin Coordinator Index has been found? -if [ -z $COORD_ORIG_INDEX ] -then - echo "origin coordinator is not in the coordinator list" - exit 2 -fi - -#Main process begins - -#Check if the database is defined, This could lead to coordinator being stopped uselessly -if [ "$DB_NAME" != "" ] -then - #Simply launch a fake SQL on the Database wanted - $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -c 'select now()' -d $DB_NAME; err=$? - if [ $err -gt "0" ] - then - echo "Database not defined" - exit 2 - fi -fi - -#1) stop all the coordinators -echo "Stopping all the coordinators" -for index in ${!COORD_HOSTNAMES[*]} -do - targethost=${COORD_HOSTNAMES[$index]} - targetdata=${COORD_PGDATA[$index]} - echo ssh $targethost $PGCTL_SERVER stop -D $targetdata - ssh $targethost $PGCTL_SERVER stop -D $targetdata; err=$? - if [ $err -gt "0" ] - then - "pg_ctl couldn't stop server" - exit 2 - fi -done - -#If a DDL file is not set by the user, just synchronize the catalogs with the catalog of the chosen coordinator -if [ "$DDL_FILE_NAME" != "" ] -then - echo "-f activated, DDL being launched" - - #2) restart the one we want to launch DDL to... - echo ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -D ${COORD_PGDATA[$COORD_ORIG_INDEX]} - ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -D ${COORD_PGDATA[$COORD_ORIG_INDEX]} & - - #wait a little bit to be sure it switched on - sleep 3 - - #3) launch the DDL - #This has to be done depending on if the user has defined a file or a command - echo $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -f $DDL_FILE_NAME -d $DB_NAME - $PSQL_CLIENT -h ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} -p ${COORD_PORTS[$COORD_ORIG_INDEX]} -f $DDL_FILE_NAME -d $DB_NAME; err=$? - if [ $err -gt "0" ] - then - echo "psql error, is Database defined?" - exit 2 - fi - - #4) Stop again the origin coordinator as we cannot copy the lock files to other coordinators - echo ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $PGCTL_SERVER stop -D ${COORD_PGDATA[$COORD_ORIG_INDEX]} - ssh ${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} $PGCTL_SERVER stop -D ${COORD_PGDATA[$COORD_ORIG_INDEX]}; err=$? - if [ $err -gt "0" ] - then - "pg_ctl couldn't stop server" - exit 2 - fi -fi - -#5) before copying the catalogs, save the configuration files or they are erased by the catalog copy -#make a copy of them in a folder in /tmp/pgxc_conf (default folder) -if [ -d $CONFIG_FOLDER ] -then - rm -rf $CONFIG_FOLDER -fi -mkdir $CONFIG_FOLDER - -for index in ${!COORD_HOSTNAMES[*]} -do - if [ $index -ne $COORD_ORIG_INDEX ] - then - targethost=${COORD_HOSTNAMES[$index]} - targetdata=${COORD_PGDATA[$index]} - echo scp -pr $targethost:$targetdata/postgresql.conf $CONFIG_FOLDER/postgresql.conf.$index - echo scp -pr $targethost:$targetdata/pg_hba.conf $CONFIG_FOLDER/pg_hba.conf.$index - scp -pr $targethost:$targetdata/postgresql.conf $CONFIG_FOLDER/postgresql.conf.$index; err=$? - if [ $err -gt "0" ] - then - echo "deleting saved configuration files" - rm -rf $CONFIG_FOLDER - echo "scp failed with "$targethost - exit 2 - fi - scp -pr $targethost:$targetdata/pg_hba.conf $CONFIG_FOLDER/pg_hba.conf.$index; err=$? - if [ $err -gt "0" ] - then - echo "deleting saved configuration files" - rm -rf $CONFIG_FOLDER - echo "scp failed with "$targethost - exit 2 - fi - fi -done - -#6) copy catalog files to all coordinators but not to the origin one -for index in ${!COORD_HOSTNAMES[*]} -do - if [ $index -ne $COORD_ORIG_INDEX ] - then - srchost=${COORD_HOSTNAMES[$COORD_ORIG_INDEX]} - srcdata=${COORD_PGDATA[$COORD_ORIG_INDEX]} - targethost=${COORD_HOSTNAMES[$index]} - targetdata=${COORD_PGDATA[$index]} - #First erase the data to have a nice cleanup - echo ssh $targethost rm -rf $targetdata - ssh $targethost rm -rf $targetdata - - #Just to be sure that catalog files of origin coordinator are copied well - echo scp -pr $srchost:$srcdata $targethost:$targetdata - scp -pr $srchost:$srcdata $targethost:$targetdata; err=$? - if [ $err -gt "0" ] - then - echo "deleting saved configuration files" - rm -rf $CONFIG_FOLDER - echo "scp failed with "$targethost - exit 2 - fi - fi -done - -#7) copy back the configuration files to the corresponding fresh folders -#but not the configuration files of the origin coordinator -for index in ${!COORD_HOSTNAMES[*]} -do - if [ $index -ne $COORD_ORIG_INDEX ] - then - targethost=${COORD_HOSTNAMES[$index]} - targetdata=${COORD_PGDATA[$index]} - echo scp -pr $CONFIG_FOLDER/postgresql.conf.$index $targethost:$targetdata/postgresql.conf - echo scp -pr $CONFIG_FOLDER/pg_hba.conf.$index $targethost:$targetdata/pg_hba.conf - scp -pr $CONFIG_FOLDER/postgresql.conf.$index $targethost:$targetdata/postgresql.conf; err=$? - if [ $err -gt "0" ] - then - echo "deleting saved configuration files" - rm -rf $CONFIG_FOLDER - echo "scp failed with "$targethost - exit 2 - fi - scp -pr $CONFIG_FOLDER/pg_hba.conf.$index $targethost:$targetdata/pg_hba.conf; err=$? - if [ $err -gt "0" ] - then - echo "deleting saved configuration files" - rm -rf $CONFIG_FOLDER - echo "scp failed with "$targethost - exit 2 - fi - fi -done - -#8) wait a little bit... -sleep 1 - -#9) restart all the other coordinators, origin coordinator has been stopped after DDL run -for index in ${!COORD_HOSTNAMES[*]} -do - echo ssh ${COORD_HOSTNAMES[$index]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$index]} -D ${COORD_PGDATA[$index]} & - ssh ${COORD_HOSTNAMES[$index]} $COORD_SERVER $COORD_OPTIONS -p ${COORD_PORTS[$index]} -D ${COORD_PGDATA[$index]} & -done - -sleep 2 - -#Clean also the folder in tmp keeping the configuration files -rm -rf $CONFIG_FOLDER - -#10) finished :p -exit
\ No newline at end of file |
