summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--contrib/pgxc_ctl/config.c17
-rw-r--r--contrib/pgxc_ctl/coord_cmd.c21
-rw-r--r--contrib/pgxc_ctl/datanode_cmd.c107
-rw-r--r--contrib/pgxc_ctl/do_command.c52
-rw-r--r--contrib/pgxc_ctl/pgxc_ctl.c6
-rw-r--r--contrib/pgxc_ctl/varnames.h4
-rw-r--r--src/backend/access/common/heaptuple.c46
-rw-r--r--src/backend/access/common/printtup.c8
-rw-r--r--src/backend/access/transam/clog.c7
-rw-r--r--src/backend/access/transam/gtm.c76
-rw-r--r--src/backend/access/transam/varsup.c4
-rw-r--r--src/backend/access/transam/xact.c148
-rw-r--r--src/backend/catalog/dependency.c11
-rw-r--r--src/backend/catalog/pg_aggregate.c30
-rw-r--r--src/backend/catalog/pg_proc.c15
-rw-r--r--src/backend/commands/copy.c85
-rw-r--r--src/backend/commands/explain.c11
-rw-r--r--src/backend/commands/indexcmds.c8
-rw-r--r--src/backend/commands/schemacmds.c5
-rw-r--r--src/backend/commands/sequence.c42
-rw-r--r--src/backend/commands/tablecmds.c10
-rw-r--r--src/backend/commands/view.c8
-rw-r--r--src/backend/executor/execAmi.c6
-rw-r--r--src/backend/executor/execCurrent.c10
-rw-r--r--src/backend/executor/execMain.c16
-rw-r--r--src/backend/executor/execTuples.c193
-rw-r--r--src/backend/executor/execUtils.c5
-rw-r--r--src/backend/executor/nodeAgg.c177
-rw-r--r--src/backend/executor/nodeModifyTable.c139
-rw-r--r--src/backend/libpq/be-fsstubs.c105
-rw-r--r--src/backend/nodes/copyfuncs.c31
-rw-r--r--src/backend/nodes/equalfuncs.c19
-rw-r--r--src/backend/nodes/outfuncs.c25
-rw-r--r--src/backend/nodes/readfuncs.c53
-rw-r--r--src/backend/optimizer/path/allpaths.c29
-rw-r--r--src/backend/optimizer/plan/createplan.c2508
-rw-r--r--src/backend/optimizer/plan/planner.c137
-rw-r--r--src/backend/optimizer/plan/setrefs.c178
-rw-r--r--src/backend/optimizer/util/pathnode.c27
-rw-r--r--src/backend/optimizer/util/plancat.c25
-rw-r--r--src/backend/parser/analyze.c184
-rw-r--r--src/backend/parser/parse_agg.c30
-rw-r--r--src/backend/parser/parse_relation.c21
-rw-r--r--src/backend/parser/parse_utilcmd.c209
-rw-r--r--src/backend/pgxc/copy/remotecopy.c62
-rw-r--r--src/backend/pgxc/locator/locator.c484
-rw-r--r--src/backend/pgxc/locator/redistrib.c114
-rw-r--r--src/backend/pgxc/nodemgr/nodemgr.c28
-rw-r--r--src/backend/pgxc/plan/planner.c2134
-rw-r--r--src/backend/pgxc/pool/execRemote.c3359
-rw-r--r--src/backend/pgxc/pool/pgxcnode.c368
-rw-r--r--src/backend/pgxc/pool/poolmgr.c676
-rw-r--r--src/backend/pgxc/pool/poolutils.c113
-rw-r--r--src/backend/pgxc/pool/postgresql_fdw.c37
-rw-r--r--src/backend/postmaster/postmaster.c146
-rw-r--r--src/backend/storage/ipc/procarray.c3
-rw-r--r--src/backend/tcop/postgres.c41
-rw-r--r--src/backend/tcop/pquery.c33
-rw-r--r--src/backend/tcop/utility.c122
-rw-r--r--src/backend/utils/adt/arrayfuncs.c14
-rw-r--r--src/backend/utils/adt/dbsize.c9
-rw-r--r--src/backend/utils/adt/ri_triggers.c15
-rw-r--r--src/backend/utils/adt/ruleutils.c86
-rw-r--r--src/backend/utils/adt/version.c9
-rw-r--r--src/backend/utils/cache/plancache.c57
-rw-r--r--src/backend/utils/cache/relcache.c6
-rw-r--r--src/backend/utils/misc/guc.c85
-rw-r--r--src/backend/utils/sort/tuplesort.c170
-rw-r--r--src/backend/utils/time/snapmgr.c4
-rw-r--r--src/bin/initdb/initdb.c16
-rw-r--r--src/bin/initgtm/initgtm.c8
-rw-r--r--src/bin/pg_ctl/pg_ctl.c4
-rw-r--r--src/bin/psql/command.c4
-rw-r--r--src/bin/psql/startup.c4
-rw-r--r--src/gtm/client/fe-protocol.c5
-rw-r--r--src/gtm/client/gtm_client.c99
-rw-r--r--src/gtm/common/gtm_serialize.c39
-rw-r--r--src/gtm/common/gtm_utils.c2
-rw-r--r--src/gtm/main/gtm_seq.c254
-rw-r--r--src/gtm/main/gtm_txn.c22
-rw-r--r--src/gtm/main/main.c41
-rw-r--r--src/gtm/proxy/proxy_main.c26
-rw-r--r--src/gtm/recovery/register_common.c96
-rw-r--r--src/gtm/recovery/register_gtm.c3
-rw-r--r--src/include/access/gtm.h6
-rw-r--r--src/include/access/xact.h4
-rw-r--r--src/include/executor/tuptable.h19
-rw-r--r--src/include/gtm/gtm_client.h12
-rw-r--r--src/include/gtm/gtm_msg.h4
-rw-r--r--src/include/gtm/gtm_seq.h16
-rw-r--r--src/include/gtm/gtm_serialize.h4
-rw-r--r--src/include/gtm/register.h7
-rw-r--r--src/include/nodes/execnodes.h7
-rw-r--r--src/include/nodes/parsenodes.h19
-rw-r--r--src/include/nodes/plannodes.h12
-rw-r--r--src/include/nodes/primnodes.h6
-rw-r--r--src/include/nodes/relation.h17
-rw-r--r--src/include/optimizer/pathnode.h5
-rw-r--r--src/include/optimizer/planmain.h10
-rw-r--r--src/include/pgxc/execRemote.h94
-rw-r--r--src/include/pgxc/locator.h15
-rw-r--r--src/include/pgxc/pgxc.h4
-rw-r--r--src/include/pgxc/pgxcnode.h63
-rw-r--r--src/include/pgxc/planner.h34
-rw-r--r--src/include/pgxc/poolmgr.h111
-rw-r--r--src/include/pgxc/remotecopy.h12
-rw-r--r--src/include/storage/lwlock.h2
-rw-r--r--src/include/utils/builtins.h9
-rw-r--r--src/include/utils/tuplesort.h4
-rw-r--r--src/pl/plpgsql/src/pl_exec.c18
110 files changed, 70 insertions, 14090 deletions
diff --git a/contrib/pgxc_ctl/config.c b/contrib/pgxc_ctl/config.c
index 694548ddbc..414046e21d 100644
--- a/contrib/pgxc_ctl/config.c
+++ b/contrib/pgxc_ctl/config.c
@@ -349,9 +349,7 @@ static void emptyDatanodeSlaves()
reset_var_val(VAR_datanodeSlave, "n");
reset_var(VAR_datanodeSlaveServers);
reset_var(VAR_datanodeSlavePorts);
-#ifdef XCP
reset_var(VAR_datanodeSlavePoolerPorts);
-#endif
reset_var(VAR_datanodeSlaveDirs);
reset_var(VAR_datanodeArchLogDirs);
for (ii = 0; ii < arraySizeName(VAR_datanodeSlaveServers); ii++)
@@ -837,9 +835,7 @@ static void verifyResource(void)
#endif
char *datanodeMasterVars[] = {VAR_datanodeNames,
VAR_datanodePorts,
-#ifdef XCP
VAR_datanodePoolerPorts,
-#endif
VAR_datanodeMasterServers,
VAR_datanodeMasterDirs,
VAR_datanodeMaxWALSenders,
@@ -847,9 +843,7 @@ static void verifyResource(void)
char *datanodeSlaveVars[] = {VAR_datanodeNames,
VAR_datanodeSlaveServers,
VAR_datanodeSlavePorts,
-#ifdef XCP
- VAR_datanodeSlavePoolerPorts,
-#endif
+ VAR_datanodeSlavePoolerPorts,
VAR_datanodeSlaveDirs,
VAR_datanodeArchLogDirs,
NULL};
@@ -1059,18 +1053,9 @@ void check_configuration(void)
!find_var(VAR_coordMasterServers) || !find_var(VAR_coordMasterDirs))
elog(ERROR, "ERROR: Coordinator master configuration is missing. coordNames, coodPorts, poolerPorts, coordMasterPorts or coordMasterDirs\n");
/* Datanode Master */
-#ifdef XCP
if (!find_var(VAR_datanodeNames) || !find_var(VAR_datanodePorts) || !find_var(VAR_datanodeMasterServers) ||
-#else
- if (!find_var(VAR_datanodeNames) || !find_var(VAR_datanodePorts) || !find_var(VAR_datanodePoolerPorts) || !find_var(VAR_datanodeMasterServers) ||
-#endif
-
!find_var(VAR_datanodeMasterDirs))
-#ifdef XCP
elog(ERROR, "ERROR: Datanode master configuration is missing. datanodeNames, datanodePorts, datanodePoolerPorts, datanodeMasterPorts or datanodeMasterDirs\n");
-#else
- elog(ERROR, "ERROR: Datanode master configuration is missing. datanodeNames, datanodePorts, datanodeMasterPorts or datanodeMasterDirs\n");
-#endif
handle_no_slaves();
verifyResource();
makeServerList();
diff --git a/contrib/pgxc_ctl/coord_cmd.c b/contrib/pgxc_ctl/coord_cmd.c
index c809047934..480f7f15b5 100644
--- a/contrib/pgxc_ctl/coord_cmd.c
+++ b/contrib/pgxc_ctl/coord_cmd.c
@@ -2019,12 +2019,9 @@ static int failover_oneCoordinator(int coordIdx)
char *gtmPort;
FILE *f;
char timestamp[MAXTOKEN+1];
-
-#ifdef XCP
char cmd[MAXLINE];
int cmdlen;
bool dnReconfigured;
-#endif
#define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
@@ -2041,11 +2038,6 @@ static int failover_oneCoordinator(int coordIdx)
elog(NOTICE, "Failover coordinator %s using GTM itself\n",
aval(VAR_coordNames)[coordIdx]);
-#ifndef XCP
- /* Unregister the coordinator from GTM */
- unregister_coordinator(aval(VAR_coordNames)[coordIdx]);
-#endif
-
/* Promote the slave */
rc_local = doImmediate(aval(VAR_coordSlaveServers)[coordIdx], NULL,
"pg_ctl promote -Z coordinator -D %s",
@@ -2122,7 +2114,6 @@ static int failover_oneCoordinator(int coordIdx)
checkRc();
}
-#ifdef XCP
cmdlen = 0;
cmd[0] = '\0';
/*
@@ -2158,7 +2149,6 @@ static int failover_oneCoordinator(int coordIdx)
cmdlen += len;
}
dnReconfigured = false;
-#endif
/*
* Reconfigure coordinators with new coordinator
@@ -2187,15 +2177,12 @@ static int failover_oneCoordinator(int coordIdx)
fprintf(f,
"ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
"select pgxc_pool_reload();\n"
-#ifdef XCP
"%s"
-#endif
"\\q\n",
- aval(VAR_coordNames)[coordIdx], aval(VAR_coordMasterServers)[coordIdx], aval(VAR_coordPorts)[coordIdx]
-#ifdef XCP
- ,dnReconfigured ? "" : cmd
-#endif
- );
+ aval(VAR_coordNames)[coordIdx],
+ aval(VAR_coordMasterServers)[coordIdx],
+ aval(VAR_coordPorts)[coordIdx],
+ dnReconfigured ? "" : cmd);
pclose(f);
}
return(rc);
diff --git a/contrib/pgxc_ctl/datanode_cmd.c b/contrib/pgxc_ctl/datanode_cmd.c
index 5defe8a7e8..304181edca 100644
--- a/contrib/pgxc_ctl/datanode_cmd.c
+++ b/contrib/pgxc_ctl/datanode_cmd.c
@@ -113,15 +113,11 @@ cmd_t *prepare_initDatanodeMaster(char *nodeName)
gtmPort = (gtmIdx < 0) ? sval(VAR_gtmMasterPort) : aval(VAR_gtmProxyPorts)[gtmIdx];
fprintf(f,
"port = %s\n"
-#ifdef XCP
"pooler_port = %s\n"
-#endif
"gtm_host = '%s'\n"
"gtm_port = %s\n",
aval(VAR_datanodePorts)[idx],
-#ifdef XCP
aval(VAR_datanodePoolerPorts)[idx],
-#endif
gtmHost, gtmPort);
fclose(f);
@@ -729,11 +725,9 @@ static int failover_oneDatanode(int datanodeIdx)
FILE *f;
char timestamp[MAXTOKEN+1];
-#ifdef XCP
char cmd[MAXLINE];
int cmdlen;
bool dnReconfigured;
-#endif
# define checkRc() do{if(WEXITSTATUS(rc_local) > rc) rc = WEXITSTATUS(rc_local);}while(0)
@@ -750,11 +744,6 @@ static int failover_oneDatanode(int datanodeIdx)
elog(NOTICE, "Failover datanode %s using GTM itself\n",
aval(VAR_datanodeNames)[datanodeIdx]);
-#ifndef XCP
- /* Unregister the datanode */
- unregister_datanode(aval(VAR_datanodeNames)[datanodeIdx]);
-#endif
-
/* Promote the slave */
rc_local = doImmediate(aval(VAR_datanodeSlaveServers)[datanodeIdx], NULL,
"pg_ctl promote -Z datanode -D %s",
@@ -792,13 +781,9 @@ static int failover_oneDatanode(int datanodeIdx)
var_assign(&(aval(VAR_datanodeMasterDirs)[datanodeIdx]), Strdup(aval(VAR_datanodeSlaveDirs)[datanodeIdx]));
var_assign(&(aval(VAR_datanodeSlaveDirs)[datanodeIdx]), Strdup("none"));
var_assign(&(aval(VAR_datanodePorts)[datanodeIdx]), Strdup(aval(VAR_datanodeSlavePorts)[datanodeIdx]));
-#ifdef XCP
var_assign(&(aval(VAR_datanodePoolerPorts)[datanodeIdx]), Strdup(aval(VAR_datanodeSlavePoolerPorts)[datanodeIdx]));
-#endif
var_assign(&(aval(VAR_datanodeSlavePorts)[datanodeIdx]), Strdup("none"));
-#ifdef XCP
var_assign(&(aval(VAR_datanodeSlavePoolerPorts)[datanodeIdx]), Strdup("none"));
-#endif
/*
* Update the configuration file
*/
@@ -812,29 +797,21 @@ static int failover_oneDatanode(int datanodeIdx)
"# Updated due to the datanode failover, %s, %s\n"
"datanodeMasterServers=( %s )\n"
"datanodePorts=( %s )\n"
-#ifdef XCP
"datanodePoolerPorts=( %s )\n"
-#endif
"datanodeMasterDirs=( %s )\n"
"datanodeSlaveServers=( %s )\n"
"datanodeSlavePorts=( %s )\n"
-#ifdef XCP
"datanodeSlavePoolerPorts=( %s )\n"
-#endif
"datanodeSlaveDirs=( %s )\n"
"# End of the update\n",
aval(VAR_datanodeNames)[datanodeIdx], timeStampString(timestamp, MAXTOKEN),
listValue(VAR_datanodeMasterServers),
listValue(VAR_datanodePorts),
-#ifdef XCP
listValue(VAR_datanodePoolerPorts),
-#endif
listValue(VAR_datanodeMasterDirs),
listValue(VAR_datanodeSlaveServers),
listValue(VAR_datanodeSlavePorts),
-#ifdef XCP
listValue(VAR_datanodeSlavePoolerPorts),
-#endif
listValue(VAR_datanodeSlaveDirs));
fclose(f);
@@ -845,7 +822,6 @@ static int failover_oneDatanode(int datanodeIdx)
checkRc();
}
-#ifdef XCP
cmdlen = 0;
cmd[0] = '\0';
/*
@@ -881,7 +857,6 @@ static int failover_oneDatanode(int datanodeIdx)
cmdlen += len;
}
dnReconfigured = false;
-#endif
/*
* Reconfigure coordinators with new datanode
*/
@@ -909,16 +884,12 @@ static int failover_oneDatanode(int datanodeIdx)
fprintf(f,
"ALTER NODE %s WITH (HOST='%s', PORT=%s);\n"
"select pgxc_pool_reload();\n"
-#ifdef XCP
"%s"
-#endif
"\\q\n",
aval(VAR_datanodeNames)[datanodeIdx],
aval(VAR_datanodeMasterServers)[datanodeIdx],
- aval(VAR_datanodePorts)[datanodeIdx]
-#ifdef XCP
- ,dnReconfigured ? "" : cmd
-#endif
+ aval(VAR_datanodePorts)[datanodeIdx],
+ dnReconfigured ? "" : cmd
);
dnReconfigured = true;
pclose(f);
@@ -936,20 +907,13 @@ static int failover_oneDatanode(int datanodeIdx)
* Add command
*
*-----------------------------------------------------------------------*/
-#ifdef XCP
int add_datanodeMaster(char *name, char *host, int port, int pooler, char *dir,
char *restore_dname, char *extraConf, char *extraPgHbaConf)
-#else
-int add_datanodeMaster(char *name, char *host, int port, char *dir,
- char *restore_dname, char *extraConf, char *extraPgHbaConf)
-#endif
{
FILE *f, *lockf;
int size, idx;
char port_s[MAXTOKEN+1];
-#ifdef XCP
char pooler_s[MAXTOKEN+1];
-#endif
int gtmPxyIdx;
char *gtmHost;
char *gtmPort;
@@ -972,19 +936,11 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
elog(ERROR, "ERROR: Node name %s duplicate.\n", name);
return 1;
}
-#ifdef XCP
if (checkPortConflict(host, port) || checkPortConflict(host, pooler))
{
elog(ERROR, "ERROR: port numbrer (%d) or pooler port (%d) at host %s conflicts.\n", port, pooler, host);
return 1;
}
-#else
- if (checkPortConflict(host, port))
- {
- elog(ERROR, "ERROR: port numbrer (%d) at host %s conflicts.\n", port, host);
- return 1;
- }
-#endif
if (checkDirConflict(host, dir))
{
elog(ERROR, "ERROR: directory \"%s\" conflicts at host %s.\n", dir, host);
@@ -995,9 +951,7 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
*/
idx = size = arraySizeName(VAR_datanodeNames);
if ((arraySizeName(VAR_datanodePorts) != size) ||
-#ifdef XCP
(arraySizeName(VAR_datanodePoolerPorts) != size) ||
-#endif
(arraySizeName(VAR_datanodeMasterServers) != size) ||
(arraySizeName(VAR_datanodeMasterDirs) != size) ||
(arraySizeName(VAR_datanodeMaxWALSenders) != size) ||
@@ -1011,16 +965,12 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
if ((extendVar(VAR_datanodeNames, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeMasterServers, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodePorts, idx + 1, "none") != 0) ||
-#ifdef XCP
(extendVar(VAR_datanodePoolerPorts, idx + 1, "none") != 0) ||
-#endif
(extendVar(VAR_datanodeMasterDirs, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeMaxWALSenders, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeSlaveServers, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeSlavePorts, idx + 1, "none") != 0) ||
-#ifdef XCP
(extendVar(VAR_datanodeSlavePoolerPorts, idx + 1, "none") != 0) ||
-#endif
(extendVar(VAR_datanodeSlaveDirs, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeArchLogDirs, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeSpecificExtraConfig, idx + 1, "none") != 0) ||
@@ -1038,22 +988,16 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
* 000 We need another way to configure specific pg_hba.conf and max_wal_senders.
*/
snprintf(port_s, MAXTOKEN, "%d", port);
-#ifdef XCP
snprintf(pooler_s, MAXTOKEN, "%d", pooler);
-#endif
assign_arrayEl(VAR_datanodeNames, idx, name, NULL);
assign_arrayEl(VAR_datanodeMasterServers, idx, host, NULL);
assign_arrayEl(VAR_datanodePorts, idx, port_s, "-1");
-#ifdef XCP
assign_arrayEl(VAR_datanodePoolerPorts, idx, pooler_s, "-1");
-#endif
assign_arrayEl(VAR_datanodeMasterDirs, idx, dir, NULL);
assign_arrayEl(VAR_datanodeMaxWALSenders, idx, aval(VAR_datanodeMaxWALSenders)[0], NULL); /* Could be vulnerable */
assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
assign_arrayEl(VAR_datanodeSlavePorts, idx, "-1", NULL);
-#ifdef XCP
assign_arrayEl(VAR_datanodeSlavePoolerPorts, idx, "-1", NULL);
-#endif
assign_arrayEl(VAR_datanodeSlaveDirs, idx, "none", NULL);
assign_arrayEl(VAR_datanodeArchLogDirs, idx, "none", NULL);
assign_arrayEl(VAR_datanodeSpecificExtraConfig, idx, extraConf, NULL);
@@ -1093,16 +1037,12 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
fprintAval(f, VAR_datanodeNames);
fprintAval(f, VAR_datanodeMasterServers);
fprintAval(f, VAR_datanodePorts);
-#ifdef XCP
fprintAval(f, VAR_datanodePoolerPorts);
-#endif
fprintAval(f, VAR_datanodeMasterDirs);
fprintAval(f, VAR_datanodeMaxWALSenders);
fprintAval(f, VAR_datanodeSlaveServers);
fprintAval(f, VAR_datanodeSlavePorts);
-#ifdef XCP
fprintAval(f, VAR_datanodeSlavePoolerPorts);
-#endif
fprintAval(f, VAR_datanodeSlaveDirs);
fprintAval(f, VAR_datanodeArchLogDirs);
fprintAval(f, VAR_datanodeSpecificExtraConfig);
@@ -1128,18 +1068,12 @@ int add_datanodeMaster(char *name, char *host, int port, char *dir,
"#===========================================\n"
"# Added at initialization. %s\n"
"port = %d\n"
-#ifdef XCP
"pooler_port = %d\n"
-#endif
"gtm_host = '%s'\n"
"gtm_port = %s\n"
"# End of Additon\n",
timeStampString(date, MAXTOKEN+1),
-#ifdef XCP
port, pooler, gtmHost, gtmPort);
-#else
- port, gtmHost, gtmPort);
-#endif
pclose(f);
}
CleanArray(confFiles);
@@ -1272,9 +1206,7 @@ int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir, c
int idx;
FILE *f;
char port_s[MAXTOKEN+1];
-#ifdef XCP
char pooler_s[MAXTOKEN+1];
-#endif
int kk;
/* Check if the name is valid datanode */
@@ -1367,9 +1299,7 @@ int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir, c
/* Need an API to expand the array to desired size */
if ((extendVar(VAR_datanodeSlaveServers, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeSlavePorts, idx + 1, "none") != 0) ||
-#ifdef XCP
(extendVar(VAR_datanodeSlavePoolerPorts, idx + 1, "none") != 0) ||
-#endif
(extendVar(VAR_datanodeSlaveDirs, idx + 1, "none") != 0) ||
(extendVar(VAR_datanodeArchLogDirs, idx + 1, "none") != 0))
{
@@ -1379,17 +1309,13 @@ int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir, c
/* Reconfigure pgxc_ctl configuration with the new slave */
snprintf(port_s, MAXTOKEN, "%d", port);
-#ifdef XCP
snprintf(pooler_s, MAXTOKEN, "%d", pooler);
-#endif
if (!isVarYes(VAR_datanodeSlave))
assign_sval(VAR_datanodeSlave, "y");
assign_arrayEl(VAR_datanodeSlaveServers, idx, host, NULL);
assign_arrayEl(VAR_datanodeSlavePorts, idx, port_s, NULL);
-#ifdef XCP
assign_arrayEl(VAR_datanodeSlavePoolerPorts, idx, pooler_s, NULL);
-#endif
assign_arrayEl(VAR_datanodeSlaveDirs, idx, dir, NULL);
assign_arrayEl(VAR_datanodeArchLogDirs, idx, archDir, NULL);
/* Update the configuration file and backup it */
@@ -1407,9 +1333,7 @@ int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir, c
fprintSval(f, VAR_datanodeSlave);
fprintAval(f, VAR_datanodeSlaveServers);
fprintAval(f, VAR_datanodeSlavePorts);
-#ifdef XCP
fprintAval(f, VAR_datanodeSlavePoolerPorts);
-#endif
fprintAval(f, VAR_datanodeArchLogDirs);
fprintAval(f, VAR_datanodeSlaveDirs);
fprintf(f, "%s", "#----End of reconfiguration -------------------------\n");
@@ -1446,19 +1370,13 @@ int add_datanodeSlave(char *name, char *host, int port, int pooler, char *dir, c
"# Added to initialize the slave, %s\n"
"hot_standby = off\n"
"port = %s\n"
-#ifdef XCP
"pooler_port = %s\n"
-#endif
"wal_level = minimal\n" /* WAL level --- minimal. No cascade slave so far. */
"archive_mode = off\n" /* No archive mode */
"archive_command = ''\n" /* No archive mode */
"max_wal_senders = 0\n" /* Minimum WAL senders */
"# End of Addition\n",
-#ifdef XCP
timeStampString(date, MAXTOKEN), aval(VAR_datanodeSlavePorts)[idx], aval(VAR_datanodeSlavePoolerPorts)[idx]);
-#else
- timeStampString(date, MAXTOKEN), aval(VAR_datanodeSlavePorts)[idx]);
-#endif
pclose(f);
/* Update the slave recovery.conf */
if ((f = pgxc_popen_w(host, "cat >> %s/recovery.conf", dir)) == NULL)
@@ -1604,7 +1522,6 @@ int remove_datanodeMaster(char *name, int clean_opt)
pclose(f);
}
}
-#if 1
/* Stop the datanode master if running */
if (pingNode(aval(VAR_datanodeMasterServers)[idx], aval(VAR_datanodePorts)[idx]) == 0)
{
@@ -1615,14 +1532,11 @@ int remove_datanodeMaster(char *name, int clean_opt)
/* Cleanup the datanode master resource if specified */
if (clean_opt)
doImmediate(aval(VAR_datanodeMasterServers)[idx], NULL, "rm -rf %s", aval(VAR_datanodeMasterDirs)[idx]);
-#endif
/* Update configuration and backup --> should cleanup "none" entries here */
assign_arrayEl(VAR_datanodeNames, idx, "none", NULL);
assign_arrayEl(VAR_datanodeMasterDirs, idx, "none", NULL);
assign_arrayEl(VAR_datanodePorts, idx, "-1", "-1");
-#ifdef XCP
assign_arrayEl(VAR_datanodePoolerPorts, idx, "-1", "-1");
-#endif
assign_arrayEl(VAR_datanodeMasterServers, idx, "none", NULL);
assign_arrayEl(VAR_datanodeMaxWALSenders, idx, "0", "0");
assign_arrayEl(VAR_datanodeSlaveServers, idx, "none", NULL);
@@ -1648,9 +1562,7 @@ int remove_datanodeMaster(char *name, int clean_opt)
fprintAval(f, VAR_datanodeNames);
fprintAval(f, VAR_datanodeMasterDirs);
fprintAval(f, VAR_datanodePorts);
-#ifdef XCP
fprintAval(f, VAR_datanodePoolerPorts);
-#endif
fprintAval(f, VAR_datanodeMasterServers);
fprintAval(f, VAR_datanodeMaxWALSenders);
fprintAval(f, VAR_datanodeSlaveServers);
@@ -1758,13 +1670,8 @@ cmd_t *prepare_cleanDatanodeMaster(char *nodeName)
}
cmd = initCmd(aval(VAR_datanodeMasterServers)[idx]);
snprintf(newCommand(cmd), MAXLINE,
-#ifdef XCP
"rm -rf %s; mkdir -p %s; chmod 0700 %s; rm -f /tmp/.s.*%d*",
aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx], atoi(aval(VAR_datanodePoolerPorts)[idx]));
-#else
- "rm -rf %s; mkdir -p %s; chmod 0700 %s*",
- aval(VAR_datanodeMasterDirs)[idx], aval(VAR_datanodeMasterDirs)[idx]);
-#endif
return(cmd);
}
@@ -1870,13 +1777,8 @@ int show_config_datanodeMaster(int flag, int idx, char *hostname)
lockLogFile();
if (outBuf[0])
elog(NOTICE, "%s", outBuf);
-#ifdef XCP
elog(NOTICE, " Nodename: '%s', port: %s, pooler port %s\n",
aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx], aval(VAR_poolerPorts)[idx]);
-#else
- elog(NOTICE, " Nodename: '%s', port: %s\n",
- aval(VAR_datanodeNames)[idx], aval(VAR_datanodePorts)[idx]);
-#endif
elog(NOTICE, " MaxWALSenders: %s, Dir: '%s'\n",
aval(VAR_datanodeMaxWALSenders)[idx], aval(VAR_datanodeMasterDirs)[idx]);
elog(NOTICE, " ExtraConfig: '%s', Specific Extra Config: '%s'\n",
@@ -1912,13 +1814,8 @@ int show_config_datanodeSlave(int flag, int idx, char *hostname)
lockLogFile();
if (outBuf[0])
elog(NOTICE, "%s", outBuf);
-#ifdef XCP
elog(NOTICE, " Nodename: '%s', port: %s, pooler port: %s\n",
aval(VAR_datanodeNames)[idx], aval(VAR_datanodeSlavePorts)[idx], aval(VAR_poolerPorts)[idx]);
-#else
- elog(NOTICE, " Nodename: '%s', port: %s\n",
- aval(VAR_datanodeNames)[idx], aval(VAR_datanodeSlavePorts)[idx]);
-#endif
elog(NOTICE," Dir: '%s', Archive Log Dir: '%s'\n",
aval(VAR_datanodeSlaveDirs)[idx], aval(VAR_datanodeArchLogDirs)[idx]);
unlockLogFile();
diff --git a/contrib/pgxc_ctl/do_command.c b/contrib/pgxc_ctl/do_command.c
index e8e1c8ac14..d2d8333baf 100644
--- a/contrib/pgxc_ctl/do_command.c
+++ b/contrib/pgxc_ctl/do_command.c
@@ -131,20 +131,12 @@ static void do_deploy(char *line)
}
if (TestToken("all"))
{
-#ifdef XCP
elog(NOTICE, "Deploying Postgres-XL components to all the target servers.\n");
-#else
- elog(NOTICE, "Deploying Postgres-XC materials to all the target servers.\n");
-#endif
deploy_xc(aval(VAR_allServers));
}
else
{
-#ifdef XCP
elog(NOTICE, "Deploying Postgres-XL components.\n");
-#else
- elog(NOTICE, "Deploying Postgres-XC materials.\n");
-#endif
/*
* Please note that the following code does not check if the specified nost
* appears in the configuration file.
@@ -979,26 +971,17 @@ static void do_add_command(char *line)
GetAndSet(name, "ERROR: please specify the name of the datanode master\n");
GetAndSet(host, "ERROR: please specify the host for the datanode masetr\n");
GetAndSet(port, "ERROR: please specify the port number for the datanode master\n");
-#ifdef XCP
GetAndSet(pooler, "ERROR: please specify the pooler port number for the datanode master.\n");
-#endif
GetAndSet(dir, "ERROR: please specify the working director for the datanode master\n");
GetAndSet(dnode, "ERROR: please specify name of existing datanode of which this will be a copy of. Specify 'none' for a bare datanode\n");
GetAndSet(extraConf, "ERROR: please specify file to read extra configuration. Specify 'none' if nothig extra to be added.\n");
GetAndSet(extraPgHbaConf, "ERROR: please specify file to read extra pg_hba configuration. Specify 'none' if nothig extra to be added.\n");
-#ifdef XCP
add_datanodeMaster(name, host, atoi(port), atoi(pooler), dir,
dnode, extraConf, extraPgHbaConf);
-#else
- add_datanodeMaster(name, host, atoi(port), dir, dnode, extraConf,
- extraPgHbaConf);
-#endif
freeAndReset(name);
freeAndReset(host);
freeAndReset(port);
-#ifdef XCP
freeAndReset(pooler);
-#endif
freeAndReset(dir);
}
else
@@ -1006,23 +989,15 @@ static void do_add_command(char *line)
GetAndSet(name, "ERROR: please specify the name of the datanode slave\n");
GetAndSet(host, "ERROR: please specify the host for the datanode slave\n");
GetAndSet(port, "ERROR: please specify the port number for the datanode slave\n");
-#ifdef XCP
GetAndSet(pooler, "ERROR: please specify the pooler port number for the datanode slave.\n");
-#endif
GetAndSet(dir, "ERROR: please specify the working director for datanode slave\n");
GetAndSet(archDir, "ERROR: please specify WAL archive directory for datanode slave\n");
-#ifdef XCP
add_datanodeSlave(name, host, atoi(port), atoi(pooler), dir, archDir);
-#else
- add_datanodeSlave(name, host, atoi(port), dir, archDir);
-#endif
freeAndReset(name);
freeAndReset(host);
freeAndReset(port);
-#ifdef XCP
freeAndReset(pooler);
-#endif
freeAndReset(dir);
}
}
@@ -1500,7 +1475,6 @@ static void show_config_servers(char **hostList)
*/
static void show_basicConfig(void)
{
-#ifdef XCP
elog(NOTICE, "========= Postgres-XL configuration Common Info ========================\n");
elog(NOTICE, "=== Overall ===\n");
elog(NOTICE, "Postgres-XL owner: %s\n", sval(VAR_pgxcOwner));
@@ -1515,22 +1489,6 @@ static void show_basicConfig(void)
elog(NOTICE, "pgxc_ctl configBackupHost: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupHost) : "none");
elog(NOTICE, "pgxc_ctl configBackupFile: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupFile) : "none");
elog(NOTICE, "========= Postgres-XL configuration End Common Info ===================\n");
-#else
- elog(NOTICE, "========= Postgres-XC configuration Common Info ========================\n");
- elog(NOTICE, "=== Overall ===\n");
- elog(NOTICE, "Postgres-XC owner: %s\n", sval(VAR_pgxcOwner));
- elog(NOTICE, "Postgres-XC user: %s\n", sval(VAR_pgxcUser));
- elog(NOTICE, "Postgres-XC install directory: %s\n", sval(VAR_pgxcInstallDir));
- elog(NOTICE, "pgxc_ctl home: %s\n", pgxc_ctl_home);
- elog(NOTICE, "pgxc_ctl configuration file: %s\n", pgxc_ctl_config_path);
- elog(NOTICE, "pgxc_ctl tmpDir: %s\n", sval(VAR_tmpDir));
- elog(NOTICE, "pgxc_ctl localTempDir: %s\n", sval(VAR_localTmpDir));
- elog(NOTICE, "pgxc_ctl log file: %s\n", logFileName);
- elog(NOTICE, "pgxc_ctl configBackup: %s\n", isVarYes(VAR_configBackup) ? "y" : "n");
- elog(NOTICE, "pgxc_ctl configBackupHost: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupHost) : "none");
- elog(NOTICE, "pgxc_ctl configBackupFile: %s\n", isVarYes(VAR_configBackup) ? sval(VAR_configBackupFile) : "none");
- elog(NOTICE, "========= Postgres-XC configuration End Common Info ===================\n");
-#endif
}
@@ -2621,16 +2579,6 @@ int do_singleLine(char *buf, char *wkline)
Free(cmdLine);
return 0;
}
-#ifndef XCP
- else if (TestToken("unregister"))
- {
- /*
- * unregiseter [-n myname] -Z nodetype nodename
- */
- unregisterFromGtm(line);
- return 0;
- }
-#endif
else if (TestToken("test"))
{
do_test(line);
diff --git a/contrib/pgxc_ctl/pgxc_ctl.c b/contrib/pgxc_ctl/pgxc_ctl.c
index 81ddd298f2..eaad8a0b8a 100644
--- a/contrib/pgxc_ctl/pgxc_ctl.c
+++ b/contrib/pgxc_ctl/pgxc_ctl.c
@@ -67,11 +67,7 @@ char pgxc_ctl_config_path[MAXPATH+1];
char progname[MAXPATH+1];
char *myName;
char *defaultDatabase;
-#ifdef XCP
#define versionString "V9.2 for Postgres-XL 9.2"
-#else
-#define versionString "V1.0 for Postgres-XC 1.1"
-#endif
FILE *inF;
FILE *outF;
@@ -453,7 +449,6 @@ int main(int argc, char *argv[])
{0, 0, 0, 0}
};
-#ifdef XCP
int is_bash_exist = system("command -v bash");
if ( is_bash_exist != 0 )
@@ -462,7 +457,6 @@ int main(int argc, char *argv[])
"installed and available in the PATH\n");
exit(2);
}
-#endif
strcpy(progname, argv[0]);
init_var_hash();
diff --git a/contrib/pgxc_ctl/varnames.h b/contrib/pgxc_ctl/varnames.h
index 4298f66ae7..ca545f8790 100644
--- a/contrib/pgxc_ctl/varnames.h
+++ b/contrib/pgxc_ctl/varnames.h
@@ -91,9 +91,7 @@
#define VAR_coordAdditionalSlaveSet "coordAdditionalSlaveSet"
#define VAR_datanodeNames "datanodeNames"
#define VAR_datanodePorts "datanodePorts"
-#ifdef XCP
#define VAR_datanodePoolerPorts "datanodePoolerPorts"
-#endif
#define VAR_datanodePgHbaEntries "datanodePgHbaEntries"
#define VAR_primaryDatanode "primaryDatanode"
@@ -106,9 +104,7 @@
#define VAR_datanodeSlave "datanodeSlave"
#define VAR_datanodeSlaveServers "datanodeSlaveServers"
#define VAR_datanodeSlavePorts "datanodeSlavePorts"
-#ifdef XCP
#define VAR_datanodeSlavePoolerPorts "datanodeSlavePoolerPorts"
-#endif
#define VAR_datanodeSlaveSync "datanodeSlaveSync"
#define VAR_datanodeSlaveDirs "datanodeSlaveDirs"
#define VAR_datanodeArchLogDirs "datanodeArchLogDirs"
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 9a03496a32..5e91e7c4e8 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -1215,23 +1215,14 @@ slot_deform_datarow(TupleTableSlot *slot)
int attnum;
int i;
int col_count;
-#ifdef XCP
char *cur = slot->tts_datarow->msg;
-#else
- char *cur = slot->tts_dataRow;
-#endif
StringInfo buffer;
uint16 n16;
uint32 n32;
MemoryContext oldcontext;
-#ifdef XCP
if (slot->tts_tupleDescriptor == NULL || slot->tts_datarow == NULL)
return;
-#else
- if (slot->tts_tupleDescriptor == NULL || slot->tts_dataRow == NULL)
- return;
-#endif
attnum = slot->tts_tupleDescriptor->natts;
@@ -1239,11 +1230,6 @@ slot_deform_datarow(TupleTableSlot *slot)
if (slot->tts_nvalid == attnum)
return;
-#ifndef XCP
- /* XCP: Can not happen, we return earlier if condition not true */
- Assert(slot->tts_dataRow);
-#endif
-
memcpy(&n16, cur, 2);
cur += 2;
col_count = ntohs(n16);
@@ -1253,7 +1239,6 @@ slot_deform_datarow(TupleTableSlot *slot)
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("Tuple does not match the descriptor")));
-#ifdef XCP
if (slot->tts_attinmeta == NULL)
{
/*
@@ -1276,16 +1261,6 @@ slot_deform_datarow(TupleTableSlot *slot)
ALLOCSET_DEFAULT_INITSIZE,
ALLOCSET_DEFAULT_MAXSIZE);
}
-#else
- /*
- * Ensure info about input functions is available as long as slot lives
- * as well as deformed values
- */
- oldcontext = MemoryContextSwitchTo(slot->tts_mcxt);
-
- if (slot->tts_attinmeta == NULL)
- slot->tts_attinmeta = TupleDescGetAttInMetadata(slot->tts_tupleDescriptor);
-#endif
buffer = makeStringInfo();
for (i = 0; i < attnum; i++)
@@ -1317,7 +1292,6 @@ slot_deform_datarow(TupleTableSlot *slot)
resetStringInfo(buffer);
-#ifdef XCP
/*
* The input function was executed in caller's memory context,
* because it may be allocating working memory, and caller may
@@ -1357,7 +1331,6 @@ slot_deform_datarow(TupleTableSlot *slot)
memcpy(data, val, data_length);
slot->tts_values[i] = (Datum) data;
}
-#endif
}
}
pfree(buffer->data);
@@ -1365,9 +1338,6 @@ slot_deform_datarow(TupleTableSlot *slot)
slot->tts_nvalid = attnum;
-#ifndef XCP
- MemoryContextSwitchTo(oldcontext);
-#endif
}
#endif
@@ -1422,11 +1392,7 @@ slot_getattr(TupleTableSlot *slot, int attnum, bool *isnull)
#ifdef PGXC
/* If it is a data row tuple extract all and return requested */
-#ifdef XCP
if (slot->tts_datarow)
-#else
- if (slot->tts_dataRow)
-#endif
{
slot_deform_datarow(slot);
*isnull = slot->tts_isnull[attnum - 1];
@@ -1506,11 +1472,7 @@ slot_getallattrs(TupleTableSlot *slot)
#ifdef PGXC
/* Handle the DataRow tuple case */
-#ifdef XCP
if (slot->tts_datarow)
-#else
- if (slot->tts_dataRow)
-#endif
{
slot_deform_datarow(slot);
return;
@@ -1562,11 +1524,7 @@ slot_getsomeattrs(TupleTableSlot *slot, int attnum)
#ifdef PGXC
/* Handle the DataRow tuple case */
-#ifdef XCP
if (slot->tts_datarow)
-#else
- if (slot->tts_dataRow)
-#endif
{
slot_deform_datarow(slot);
return;
@@ -1642,11 +1600,7 @@ slot_attisnull(TupleTableSlot *slot, int attnum)
#ifdef PGXC
/* If it is a data row tuple extract all and return requested */
-#ifdef XCP
if (slot->tts_datarow)
-#else
- if (slot->tts_dataRow)
-#endif
{
slot_deform_datarow(slot);
return slot->tts_isnull[attnum - 1];
diff --git a/src/backend/access/common/printtup.c b/src/backend/access/common/printtup.c
index f0b6f2409e..c3606cd0c8 100644
--- a/src/backend/access/common/printtup.c
+++ b/src/backend/access/common/printtup.c
@@ -337,19 +337,11 @@ printtup(TupleTableSlot *slot, DestReceiver *self)
* values, just send over the DataRow message as we received it from the
* Datanode
*/
-#ifdef XCP
if (slot->tts_datarow)
{
pq_putmessage('D', slot->tts_datarow->msg, slot->tts_datarow->msglen);
return;
}
-#else
- if (slot->tts_dataRow)
- {
- pq_putmessage('D', slot->tts_dataRow, slot->tts_dataLen);
- return;
- }
-#endif
#endif
/* Set or update my derived attribute info, if needed */
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index c3f63003a1..df4e398160 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -361,7 +361,6 @@ TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, i
* Current state change should be from 0 or subcommitted to target state
* or we should already be there when replaying changes during recovery.
*/
-#ifdef XCP
if (!(curval == 0 ||
(curval == TRANSACTION_STATUS_SUB_COMMITTED &&
status != TRANSACTION_STATUS_IN_PROGRESS) ||
@@ -370,12 +369,6 @@ TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, i
elog(WARNING, "Unexpected clog condition. curval = %d, status = %d",
curval, status);
}
-#else
- Assert(curval == 0 ||
- (curval == TRANSACTION_STATUS_SUB_COMMITTED &&
- status != TRANSACTION_STATUS_IN_PROGRESS) ||
- curval == status);
-#endif
/* note this assumes exclusive access to the clog page */
byteval = *byteptr;
diff --git a/src/backend/access/transam/gtm.c b/src/backend/access/transam/gtm.c
index 32e343792c..4b6e3d591f 100644
--- a/src/backend/access/transam/gtm.c
+++ b/src/backend/access/transam/gtm.c
@@ -31,9 +31,7 @@
char *GtmHost = "localhost";
int GtmPort = 6666;
static int GtmConnectTimeout = 60;
-#ifdef XCP
bool IsXidFromGTM = false;
-#endif
bool gtm_backup_barrier = false;
extern bool FirstSnapshotSet;
@@ -115,10 +113,8 @@ InitGTM(void)
CloseGTM();
}
-#ifdef XCP
else if (IS_PGXC_COORDINATOR)
register_session(conn, PGXCNodeName, MyProcPid, MyBackendId);
-#endif
}
void
@@ -158,10 +154,8 @@ BeginTranGTM(GTM_Timestamp *timestamp)
if (conn)
xid = begin_transaction(conn, GTM_ISOLATION_RC, timestamp);
}
-#ifdef XCP
if (xid)
IsXidFromGTM = true;
-#endif
currentGxid = xid;
return xid;
}
@@ -200,10 +194,8 @@ CommitTranGTM(GlobalTransactionId gxid, int waited_xid_count,
if (!GlobalTransactionIdIsValid(gxid))
return 0;
CheckConnection();
-#ifdef XCP
ret = -1;
if (conn)
-#endif
ret = commit_transaction(conn, gxid, waited_xid_count, waited_xids);
/*
@@ -215,10 +207,8 @@ CommitTranGTM(GlobalTransactionId gxid, int waited_xid_count,
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = commit_transaction(conn, gxid, waited_xid_count, waited_xids);
-#endif
}
/* Close connection in case commit is done by autovacuum worker or launcher */
@@ -243,11 +233,9 @@ CommitPreparedTranGTM(GlobalTransactionId gxid,
if (!GlobalTransactionIdIsValid(gxid) || !GlobalTransactionIdIsValid(prepared_gxid))
return ret;
CheckConnection();
-#ifdef XCP
ret = -1;
if (conn)
-#endif
- ret = commit_prepared_transaction(conn, gxid, prepared_gxid,
+ ret = commit_prepared_transaction(conn, gxid, prepared_gxid,
waited_xid_count, waited_xids);
/*
@@ -260,11 +248,9 @@ CommitPreparedTranGTM(GlobalTransactionId gxid,
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = commit_prepared_transaction(conn, gxid, prepared_gxid,
waited_xid_count, waited_xids);
-#endif
}
currentGxid = InvalidGlobalTransactionId;
return ret;
@@ -291,10 +277,8 @@ RollbackTranGTM(GlobalTransactionId gxid)
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = abort_transaction(conn, gxid);
-#endif
}
currentGxid = InvalidGlobalTransactionId;
@@ -312,11 +296,9 @@ StartPreparedTranGTM(GlobalTransactionId gxid,
return 0;
CheckConnection();
-#ifdef XCP
ret = -1;
if (conn)
-#endif
- ret = start_prepared_transaction(conn, gxid, gid, nodestring);
+ ret = start_prepared_transaction(conn, gxid, gid, nodestring);
/*
* If something went wrong (timeout), try and reset GTM connection.
@@ -327,10 +309,8 @@ StartPreparedTranGTM(GlobalTransactionId gxid,
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = start_prepared_transaction(conn, gxid, gid, nodestring);
-#endif
}
return ret;
@@ -344,11 +324,9 @@ PrepareTranGTM(GlobalTransactionId gxid)
if (!GlobalTransactionIdIsValid(gxid))
return 0;
CheckConnection();
-#ifdef XCP
ret = -1;
if (conn)
-#endif
- ret = prepare_transaction(conn, gxid);
+ ret = prepare_transaction(conn, gxid);
/*
* If something went wrong (timeout), try and reset GTM connection.
@@ -359,10 +337,8 @@ PrepareTranGTM(GlobalTransactionId gxid)
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = prepare_transaction(conn, gxid);
-#endif
}
currentGxid = InvalidGlobalTransactionId;
return ret;
@@ -378,11 +354,9 @@ GetGIDDataGTM(char *gid,
int ret = 0;
CheckConnection();
-#ifdef XCP
ret = -1;
if (conn)
-#endif
- ret = get_gid_data(conn, GTM_ISOLATION_RC, gid, gxid,
+ ret = get_gid_data(conn, GTM_ISOLATION_RC, gid, gxid,
prepared_gxid, nodestring);
/*
@@ -394,11 +368,9 @@ GetGIDDataGTM(char *gid,
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret = get_gid_data(conn, GTM_ISOLATION_RC, gid, gxid,
prepared_gxid, nodestring);
-#endif
}
return ret;
@@ -415,10 +387,8 @@ GetSnapshotGTM(GlobalTransactionId gxid, bool canbe_grouped)
{
CloseGTM();
InitGTM();
-#ifdef XCP
if (conn)
ret_snapshot = get_snapshot(conn, gxid, canbe_grouped);
-#endif
}
return ret_snapshot;
}
@@ -463,16 +433,14 @@ GetCurrentValGTM(char *seqname)
{
GTM_Sequence ret = -1;
GTM_SequenceKeyData seqkey;
-#ifdef XCP
char *coordName = IS_PGXC_COORDINATOR ? PGXCNodeName : MyCoordName;
int coordPid = IS_PGXC_COORDINATOR ? MyProcPid : MyCoordPid;
int status;
-#endif
+
CheckConnection();
seqkey.gsk_keylen = strlen(seqname) + 1;
seqkey.gsk_key = seqname;
-#ifdef XCP
if (conn)
status = get_current(conn, &seqkey, coordName, coordPid, &ret);
else
@@ -490,16 +458,6 @@ GetCurrentValGTM(char *seqname)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("%s", GTMPQerrorMessage(conn))));
-#else
- if (conn)
- ret = get_current(conn, &seqkey);
-
- if (ret < 0)
- {
- CloseGTM();
- InitGTM();
- }
-#endif
return ret;
}
@@ -507,24 +465,18 @@ GetCurrentValGTM(char *seqname)
* Get the next sequence value
*/
GTM_Sequence
-#ifdef XCP
GetNextValGTM(char *seqname, GTM_Sequence range, GTM_Sequence *rangemax)
-#else
-GetNextValGTM(char *seqname)
-#endif
{
GTM_Sequence ret = -1;
GTM_SequenceKeyData seqkey;
-#ifdef XCP
char *coordName = IS_PGXC_COORDINATOR ? PGXCNodeName : MyCoordName;
int coordPid = IS_PGXC_COORDINATOR ? MyProcPid : MyCoordPid;
int status;
-#endif
+
CheckConnection();
seqkey.gsk_keylen = strlen(seqname) + 1;
seqkey.gsk_key = seqname;
-#ifdef XCP
if (conn)
status = get_next(conn, &seqkey, coordName,
coordPid, range, &ret, rangemax);
@@ -544,15 +496,6 @@ GetNextValGTM(char *seqname)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("%s", GTMPQerrorMessage(conn))));
-#else
- if (conn)
- ret = get_next(conn, &seqkey);
- if (ret < 0)
- {
- CloseGTM();
- InitGTM();
- }
-#endif
return ret;
}
@@ -563,19 +506,14 @@ int
SetValGTM(char *seqname, GTM_Sequence nextval, bool iscalled)
{
GTM_SequenceKeyData seqkey;
-#ifdef XCP
char *coordName = IS_PGXC_COORDINATOR ? PGXCNodeName : MyCoordName;
int coordPid = IS_PGXC_COORDINATOR ? MyProcPid : MyCoordPid;
-#endif
+
CheckConnection();
seqkey.gsk_keylen = strlen(seqname) + 1;
seqkey.gsk_key = seqname;
-#ifdef XCP
return conn ? set_val(conn, &seqkey, coordName, coordPid, nextval, iscalled) : -1;
-#else
- return conn ? set_val(conn, &seqkey, nextval, iscalled) : -1;
-#endif
}
/*
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 21c3dadc47..40432be15f 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -80,7 +80,6 @@ GetForceXidFromGTM(void)
#endif /* PGXC */
-#ifdef XCP
/*
* Check if GlobalTransactionId associated with the current distributed session
* equals to specified xid.
@@ -103,7 +102,6 @@ GetNextTransactionId(void)
{
return next_xid;
}
-#endif
/*
@@ -127,10 +125,8 @@ GetNewTransactionId(bool isSubXact)
#ifdef PGXC
bool increment_xid = true;
*timestamp_received = false;
-#ifdef XCP
/* Will be set if we obtain from GTM */
IsXidFromGTM = false;
-#endif
#endif /* PGXC */
/*
* Workers synchronize transaction state at the beginning of each parallel
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index c72dd98d92..636466560d 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -32,9 +32,7 @@
/* PGXC_COORD */
#include "gtm/gtm_c.h"
#include "pgxc/execRemote.h"
-#ifdef XCP
#include "pgxc/pause.h"
-#endif
/* PGXC_DATANODE */
#include "postmaster/autovacuum.h"
#include "libpq/pqformat.h"
@@ -200,10 +198,6 @@ typedef struct TransactionStateData
GlobalTransactionId transactionId;
GlobalTransactionId topGlobalTransansactionId;
GlobalTransactionId auxilliaryTransactionId;
-#ifndef XCP
- bool isLocalParameterUsed; /* Check if a local parameter is active
- * in transaction block (SET LOCAL, DEFERRED) */
-#endif
#else
TransactionId transactionId; /* my XID, or Invalid if none */
#endif
@@ -244,9 +238,6 @@ static TransactionStateData TopTransactionStateData = {
0, /* global transaction id */
0, /* prepared global transaction id */
0, /* commit prepared global transaction id */
-#ifndef XCP
- false, /* isLocalParameterUsed */
-#endif
#else
0, /* transaction id */
#endif
@@ -599,33 +590,6 @@ GetStableLatestTransactionId(void)
return stablexid;
}
-#ifdef PGXC
-#ifndef XCP
-/*
- * GetCurrentLocalParamStatus
- *
- * This will return if current sub xact is using local parameters
- * that may involve pooler session related parameters (SET LOCAL).
- */
-bool
-GetCurrentLocalParamStatus(void)
-{
- return CurrentTransactionState->isLocalParameterUsed;
-}
-
-/*
- * SetCurrentLocalParamStatus
- *
- * This sets local parameter usage for current sub xact.
- */
-void
-SetCurrentLocalParamStatus(bool status)
-{
- CurrentTransactionState->isLocalParameterUsed = status;
-}
-#endif
-#endif
-
/*
* AssignTransactionId
*
@@ -882,11 +846,7 @@ GetCurrentCommandId(bool used)
{
#ifdef PGXC
/* If coordinator has sent a command id, remote node should use it */
-#ifdef XCP
if (isCommandIdReceived)
-#else
- if (IsConnFromCoord() && isCommandIdReceived)
-#endif
{
/*
* Indicate to successive calls of this function that the sent command id has
@@ -1504,11 +1464,7 @@ RecordTransactionCommit(void)
SetCurrentTransactionStopTimestamp();
-#ifdef XCP
XactLogCommitRecord(xactStopTimestamp + GTMdeltaTimestamp,
-#else
- XactLogCommitRecord(xactStopTimestamp,
-#endif
nchildren, children, nrels, rels,
nmsgs, invalMessages,
RelcacheInitFileInval, forceSyncCommit,
@@ -1872,11 +1828,7 @@ RecordTransactionAbort(bool isSubXact)
else
{
SetCurrentTransactionStopTimestamp();
-#ifdef XCP
xact_time = xactStopTimestamp + GTMdeltaTimestamp;
-#else
- xact_time = xactStopTimestamp;
-#endif
}
XactLogAbortRecord(xact_time,
@@ -2121,11 +2073,6 @@ StartTransaction(void)
* start processing
*/
s->state = TRANS_START;
-#ifdef PGXC
-#ifndef XCP
- s->isLocalParameterUsed = false;
-#endif
-#endif
s->transactionId = InvalidTransactionId; /* until assigned */
/*
* Make sure we've reset xact state variables
@@ -2339,45 +2286,18 @@ CommitTransaction(void)
saveNodeString = NULL;
}
#endif
-
-#ifndef XCP
- /*
- * Check if there are any ON COMMIT actions or if temporary objects are in use.
- * If session is set-up to enforce 2PC for such transactions, return an error.
- * If not, simply enforce autocommit on each remote node.
- */
- if (IsOnCommitActions() || ExecIsTempObjectIncluded())
- {
- if (!EnforceTwoPhaseCommit)
- ExecSetTempObjectIncluded();
- else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot PREPARE a transaction that has operated on temporary tables"),
- errdetail("Disabling enforce_two_phase_commit is recommended to enforce COMMIT")));
- }
-#endif
-
/*
* If the local node has done some write activity, prepare the local node
* first. If that fails, the transaction is aborted on all the remote
* nodes
*/
-#ifdef XCP
/*
* Fired OnCommit actions would fail 2PC process
*/
if (!IsOnCommitActions() && IsTwoPhaseCommitRequired(XactWriteLocalNode))
-#else
- if (IsTwoPhaseCommitRequired(XactWriteLocalNode))
-#endif
{
prepareGID = MemoryContextAlloc(TopTransactionContext, 256);
-#ifdef XCP
sprintf(prepareGID, implicit2PC_head"%u", GetTopTransactionId());
-#else
- sprintf(prepareGID, "T%u", GetTopTransactionId());
-#endif
savePrepareGID = MemoryContextStrdup(TopMemoryContext, prepareGID);
@@ -2405,14 +2325,10 @@ CommitTransaction(void)
s->auxilliaryTransactionId = GetTopTransactionId();
}
else
-#ifdef XCP
{
s->auxilliaryTransactionId = InvalidGlobalTransactionId;
PrePrepare_Remote(prepareGID, false, true);
}
-#else
- s->auxilliaryTransactionId = InvalidGlobalTransactionId;
-#endif
}
}
#endif
@@ -2462,21 +2378,13 @@ CommitTransaction(void)
PreCommit_Notify();
#ifdef PGXC
-#ifdef XCP
if (IS_PGXC_DATANODE || !IsConnFromCoord())
-#else
- if (IS_PGXC_LOCAL_COORDINATOR)
-#endif
{
/*
* Now run 2PC on the remote nodes. Any errors will be reported via
* ereport and we will run error recovery as part of AbortTransaction
*/
-#ifdef XCP
PreCommit_Remote(savePrepareGID, saveNodeString, XactLocalNodePrepared);
-#else
- PreCommit_Remote(savePrepareGID, XactLocalNodePrepared);
-#endif
/*
* Now that all the remote nodes have successfully prepared and
* commited, commit the local transaction as well. Remember, any errors
@@ -2675,9 +2583,6 @@ CommitTransaction(void)
s->maxChildXids = 0;
#ifdef PGXC
-#ifndef XCP
- s->isLocalParameterUsed = false;
-#endif
ForgetTransactionLocalNode();
/*
@@ -2753,7 +2658,6 @@ AtEOXact_GlobalTxn(bool commit)
RollbackTranGTM(s->topGlobalTransansactionId);
}
}
-#ifdef XCP
/*
* If GTM is connected the current gxid is acquired from GTM directly.
* So directly report transaction end. However this applies only if
@@ -2773,27 +2677,6 @@ AtEOXact_GlobalTxn(bool commit)
CloseGTM();
}
}
-#else
- else if (IS_PGXC_DATANODE || IsConnFromCoord())
- {
- /* If we are autovacuum, commit on GTM */
- if ((IsAutoVacuumWorkerProcess() || GetForceXidFromGTM())
- && IsGTMConnected())
- {
- if (commit)
- CommitTranGTM(s->topGlobalTransansactionId);
- else
- RollbackTranGTM(s->topGlobalTransansactionId);
- }
- else if (GlobalTransactionIdIsValid(currentGxid))
- {
- if (commit)
- CommitTranGTM(currentGxid);
- else
- RollbackTranGTM(currentGxid);
- }
- }
-#endif
s->topGlobalTransansactionId = InvalidGlobalTransactionId;
s->auxilliaryTransactionId = InvalidGlobalTransactionId;
@@ -2828,9 +2711,6 @@ PrepareTransaction(void)
TimestampTz prepared_at;
#ifdef PGXC
bool isImplicit = !(s->blockState == TBLOCK_PREPARE);
-#ifndef XCP
- char *nodestring = NULL;
-#endif
#endif
Assert(!IsInParallelMode());
@@ -2845,25 +2725,6 @@ PrepareTransaction(void)
TransStateAsString(s->state));
Assert(s->parent == NULL);
-#ifdef PGXC
-#ifndef XCP
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- if (savePrepareGID)
- pfree(savePrepareGID);
- savePrepareGID = MemoryContextStrdup(TopMemoryContext, prepareGID);
- nodestring = PrePrepare_Remote(savePrepareGID, XactWriteLocalNode, isImplicit);
- s->topGlobalTransansactionId = s->transactionId;
-
- /*
- * Callback on GTM if necessary, this needs to be done before HOLD_INTERRUPTS
- * as this is not a part of the end of transaction processing involving clean up.
- */
- CallGTMCallbacks(GTM_EVENT_PREPARE);
- }
-#endif
-#endif
-
/*
* Do pre-commit processing that involves calling user-defined code, such
* as triggers. Since closing cursors could queue trigger actions,
@@ -3164,11 +3025,7 @@ PrepareTransaction(void)
*/
if (IS_PGXC_LOCAL_COORDINATOR)
{
-#ifdef XCP
PostPrepare_Remote(savePrepareGID, isImplicit);
-#else
- PostPrepare_Remote(savePrepareGID, nodestring, isImplicit);
-#endif
if (!isImplicit)
s->topGlobalTransansactionId = InvalidGlobalTransactionId;
ForgetTransactionLocalNode();
@@ -6587,9 +6444,7 @@ IsTransactionLocalNode(bool write)
bool
IsXidImplicit(const char *xid)
{
-#ifndef XCP
#define implicit2PC_head "_$XC$"
-#endif
const size_t implicit2PC_head_len = strlen(implicit2PC_head);
if (strncmp(xid, implicit2PC_head, implicit2PC_head_len))
@@ -6611,9 +6466,6 @@ SaveReceivedCommandId(CommandId cid)
* Change command ID information status to report any changes in remote ID
* for a remote node. A new command ID has also been received.
*/
-#ifndef XCP
- if (IsConnFromCoord())
-#endif
{
SetSendCommandId(true);
isCommandIdReceived = true;
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index bf809861ad..1f1a28da2d 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1305,17 +1305,6 @@ doDeletion(const ObjectAddress *object, int flags)
break;
case RELKIND_RELATION:
case RELKIND_VIEW:
-#ifndef XCP
- /*
- * Flag temporary objects in use in case a temporary table or view
- * is dropped by dependency. This check is particularly useful with
- * CASCADE when temporary objects are removed by dependency in order
- * to avoid implicit 2PC would result in an error as temporary
- * objects cannot be prepared.
- */
- if (IsTempTable(object->objectId))
- ExecSetTempObjectIncluded();
-#endif
break;
default:
break;
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 51de82be64..202c198e60 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -137,16 +137,6 @@ AggregateCreate(const char *aggName,
FUNC_MAX_ARGS - 1,
FUNC_MAX_ARGS - 1)));
-#ifdef PGXC
-#ifndef XCP
-
- if (aggTransType == INTERNALOID)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("unsafe use of pseudo-type \"internal\""),
- errdetail("Transition type can not be \"internal\".")));
-#endif
-#endif
/* check for polymorphic and INTERNAL arguments */
hasPolyArg = false;
hasInternalArg = false;
@@ -288,7 +278,6 @@ AggregateCreate(const char *aggName,
ReleaseSysCache(tup);
#ifdef PGXC
-#ifdef XCP
if (aggcollectfnName)
{
/*
@@ -308,25 +297,6 @@ AggregateCreate(const char *aggName,
format_type_be(aggCollectType)
)));
}
-#else
- if (aggcollectfnName)
- {
- /*
- * Collection function must be of two arguments, both of type aggTransType
- * and return type is also aggTransType
- */
- fnArgs[0] = aggTransType;
- fnArgs[1] = aggTransType;
- collectfn = lookup_agg_function(aggcollectfnName, 2, fnArgs, variadicArgType,
- &rettype);
- if (rettype != aggTransType)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of collection function %s is not %s",
- NameListToString(aggcollectfnName),
- format_type_be(aggTransType))));
- }
-#endif
#endif
/* handle moving-aggregate transfn, if supplied */
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index b148e35915..5628d4271a 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -958,21 +958,6 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
(ParserSetupHook) sql_fn_parser_setup,
pinfo);
-#ifdef PGXC
-#ifndef XCP
- /* Check if the list of queries contains temporary objects */
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- if (pgxc_query_contains_utility(querytree_sublist))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("In XC, SQL functions cannot contain utility statements")));
-
- if (pgxc_query_contains_temp_tables(querytree_sublist))
- ExecSetTempObjectIncluded();
- }
-#endif
-#endif
querytree_list = list_concat(querytree_list,
querytree_sublist);
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index c76814c17f..a356229140 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -893,14 +893,6 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed)
rte->requiredPerms = required_access;
range_table = list_make1(rte);
-#ifdef PGXC
-#ifndef XCP
- /* In case COPY is used on a temporary table, never use 2PC for implicit commits */
- if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP)
- ExecSetTempObjectIncluded();
-#endif
-#endif
-
tupDesc = RelationGetDescr(rel);
attnums = CopyGetAttnums(tupDesc, rel, stmt->attlist);
foreach(cur, attnums)
@@ -1722,15 +1714,8 @@ BeginCopy(bool is_from,
*/
if (remoteCopyState && remoteCopyState->rel_loc)
{
-#ifdef XCP
DataNodeCopyBegin(remoteCopyState);
if (!remoteCopyState->locator)
-#else
- remoteCopyState->connections = DataNodeCopyBegin(remoteCopyState->query_buf.data,
- remoteCopyState->exec_nodes->nodeList,
- GetActiveSnapshot());
- if (!remoteCopyState->connections)
-#endif
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_EXCEPTION),
errmsg("Failed to initialize Datanodes for COPY")));
@@ -2073,33 +2058,10 @@ CopyTo(CopyState cstate)
cstate->remoteCopyState->rel_loc)
{
RemoteCopyData *rcstate = cstate->remoteCopyState;
-#ifdef XCP
processed = DataNodeCopyOut(
(PGXCNodeHandle **) getLocatorNodeMap(rcstate->locator),
getLocatorNodeCount(rcstate->locator),
cstate->copy_dest == COPY_FILE ? cstate->copy_file : NULL);
-#else
- RemoteCopyType remoteCopyType;
-
- /* Set up remote COPY to correct operation */
- if (cstate->copy_dest == COPY_FILE)
- remoteCopyType = REMOTE_COPY_FILE;
- else
- remoteCopyType = REMOTE_COPY_STDOUT;
-
- /*
- * We don't know the value of the distribution column value, so need to
- * read from all nodes. Hence indicate that the value is NULL.
- */
- processed = DataNodeCopyOut(GetRelationNodes(remoteCopyState->rel_loc, 0,
- true, UNKNOWNOID,
- RELATION_ACCESS_READ),
- remoteCopyState->connections,
- NULL,
- cstate->copy_file,
- NULL,
- remoteCopyType);
-#endif
}
else
{
@@ -2596,7 +2558,6 @@ CopyFrom(CopyState cstate)
*/
if (IS_PGXC_COORDINATOR && cstate->remoteCopyState->rel_loc)
{
-#ifdef XCP
Datum value = (Datum) 0;
bool isnull = true;
RemoteCopyData *rcstate = cstate->remoteCopyState;
@@ -2616,40 +2577,6 @@ CopyFrom(CopyState cstate)
(errcode(ERRCODE_CONNECTION_EXCEPTION),
errmsg("Copy failed on a data node")));
processed++;
-#else
- Form_pg_attribute *attr = tupDesc->attrs;
- Datum dist_col_value;
- bool dist_col_is_null;
- Oid dist_col_type;
- RemoteCopyData *remoteCopyState = cstate->remoteCopyState;
-
- if (remoteCopyState->idx_dist_by_col >= 0)
- {
- dist_col_value = values[remoteCopyState->idx_dist_by_col];
- dist_col_is_null = nulls[remoteCopyState->idx_dist_by_col];
- dist_col_type = attr[remoteCopyState->idx_dist_by_col]->atttypid;
- }
- else
- {
- /* We really don't care, since the table is not distributed */
- dist_col_value = (Datum) 0;
- dist_col_is_null = true;
- dist_col_type = UNKNOWNOID;
- }
-
- if (DataNodeCopyIn(cstate->line_buf.data,
- cstate->line_buf.len,
- GetRelationNodes(remoteCopyState->rel_loc,
- dist_col_value,
- dist_col_is_null,
- dist_col_type,
- RELATION_ACCESS_INSERT),
- remoteCopyState->connections))
- ereport(ERROR,
- (errcode(ERRCODE_CONNECTION_EXCEPTION),
- errmsg("Copy failed on a Datanode")));
- processed++;
-#endif
}
else
{
@@ -3169,13 +3096,9 @@ BeginCopyFrom(Relation rel,
tmp = htonl(tmp);
appendBinaryStringInfo(&cstate->line_buf, (char *) &tmp, 4);
-#ifdef XCP
if (DataNodeCopyInBinaryForAll(cstate->line_buf.data, 19,
getLocatorNodeCount(remoteCopyState->locator),
(PGXCNodeHandle **) getLocatorNodeMap(remoteCopyState->locator)))
-#else
- if (DataNodeCopyInBinaryForAll(cstate->line_buf.data, 19, remoteCopyState->connections))
-#endif
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
errmsg("invalid COPY file header (COPY SEND)")));
@@ -3646,16 +3569,8 @@ EndCopyFrom(CopyState cstate)
/* For PGXC related COPY, free also relation location data */
if (IS_PGXC_COORDINATOR && remoteCopyState->rel_loc)
{
-#ifdef XCP
DataNodeCopyFinish(getLocatorNodeCount(remoteCopyState->locator),
(PGXCNodeHandle **) getLocatorNodeMap(remoteCopyState->locator));
-#else
- bool replicated = remoteCopyState->rel_loc->locatorType == LOCATOR_TYPE_REPLICATED;
- DataNodeCopyFinish(
- remoteCopyState->connections,
- replicated ? PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE) : -1,
- replicated ? COMBINE_TYPE_SAME : COMBINE_TYPE_SUM);
-#endif
FreeRemoteCopyData(remoteCopyState);
}
#endif
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index c350737e76..2b1aa00f33 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1466,17 +1466,6 @@ ExplainNode(PlanState *planstate, List *ancestors,
"Index Cond", planstate, ancestors, es);
break;
#ifdef PGXC
-#ifndef XCP
- case T_ModifyTable:
- {
- /* Remote query planning on DMLs */
- ModifyTable *mt = (ModifyTable *)plan;
- ListCell *elt;
- foreach(elt, mt->remote_plans)
- ExplainRemoteQuery((RemoteQuery *) lfirst(elt), planstate, ancestors, es);
- }
- break;
-#endif
case T_RemoteQuery:
/* Remote query */
ExplainRemoteQuery((RemoteQuery *)plan, planstate, ancestors, es);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 7c55ccd68e..5ed6b47a3c 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -551,13 +551,11 @@ DefineIndex(Oid relationId,
{
IndexElem *key = (IndexElem *) lfirst(elem);
-#ifdef XCP
if (rel->rd_locator_info == NULL)
{
isSafe = true;
break;
}
-#endif
if (CheckLocalIndexColumn(rel->rd_locator_info->locatorType,
rel->rd_locator_info->partAttrName, key->name))
@@ -567,7 +565,6 @@ DefineIndex(Oid relationId,
}
}
if (!isSafe)
-#ifdef XCP
{
if (loose_constraints)
{
@@ -583,11 +580,6 @@ DefineIndex(Oid relationId,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("Unique index of partitioned table must contain the hash/modulo distribution column.")));
}
-#else
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("Unique index of partitioned table must contain the hash/modulo distribution column.")));
-#endif
}
#endif
/*
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 948eab4803..61fe624894 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -182,13 +182,8 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString)
* if not done already.
*/
if (!sentToRemote)
-#ifdef XCP
parsetree_list = AddRemoteQueryNode(parsetree_list, queryString,
EXEC_ON_ALL_NODES);
-#else
- parsetree_list = AddRemoteQueryNode(parsetree_list, queryString,
- EXEC_ON_ALL_NODES, false);
-#endif
#endif
/*
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 321aef818e..83056ae98d 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -747,25 +747,17 @@ nextval_internal(Oid relid)
page = BufferGetPage(buf);
#ifdef PGXC /* PGXC_COORD */
-#ifdef XCP
/* Allow nextval executed on datanodes */
if (!is_temp)
-#else
- if (IS_PGXC_COORDINATOR && !is_temp)
-#endif
{
-#ifdef XCP
int64 range = seq->cache_value; /* how many values to ask from GTM? */
int64 rangemax; /* the max value returned from the GTM for our request */
-#endif
char *seqname = GetGlobalSeqName(seqrel, NULL, NULL);
/*
* Above, we still use the page as a locking mechanism to handle
* concurrency
- */
-#ifdef XCP
- /*
+ *
* If the user has set a CACHE parameter, we use that. Else we pass in
* the SequenceRangeVal value
*/
@@ -823,9 +815,6 @@ nextval_internal(Oid relid)
}
result = (int64) GetNextValGTM(seqname, range, &rangemax);
-#else
- result = (int64) GetNextValGTM(seqname);
-#endif
pfree(seqname);
/* Update the on-disk data */
@@ -834,11 +823,7 @@ nextval_internal(Oid relid)
/* save info in local cache */
elm->last = result; /* last returned number */
-#ifdef XCP
elm->cached = rangemax; /* last fetched range max limit */
-#else
- elm->cached = result; /* last fetched number */
-#endif
elm->last_valid = true;
last_used_seq = elm;
@@ -1101,27 +1086,6 @@ currval_oid(PG_FUNCTION_ARGS)
}
#endif
-#ifndef XCP
-#ifdef PGXC
- if (IS_PGXC_COORDINATOR &&
- seqrel->rd_backend != MyBackendId)
- {
- char *seqname = GetGlobalSeqName(seqrel, NULL, NULL);
-
- result = (int64) GetCurrentValGTM(seqname);
- if (result < 0)
- ereport(ERROR,
- (errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("GTM error, could not obtain sequence value")));
- pfree(seqname);
- }
- else {
-#endif
- result = elm->last;
-#ifdef PGXC
- }
-#endif
-#endif
relation_close(seqrel, NoLock);
PG_RETURN_INT64(result);
@@ -1230,12 +1194,8 @@ do_setval(Oid relid, int64 next, bool iscalled)
}
#ifdef PGXC
-#ifdef XCP
/* Allow to execute on datanodes */
if (!is_temp)
-#else
- if (IS_PGXC_COORDINATOR && !is_temp)
-#endif
{
char *seqname = GetGlobalSeqName(seqrel, NULL, NULL);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index b4d44bc52b..4291106a28 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -728,12 +728,8 @@ DefineRelation(CreateStmt *stmt, char relkind, Oid ownerId,
* DISTRIBUTE BY clause is missing in the statemnet the system
* should not try to find out the node list itself.
*/
-#ifdef XCP
if ((IS_PGXC_COORDINATOR && stmt->distributeby) ||
(isRestoreMode && stmt->distributeby != NULL))
-#else
- if (IS_PGXC_COORDINATOR && relkind == RELKIND_RELATION)
-#endif
{
AddRelationDistribution(relationId, stmt->distributeby,
stmt->subcluster, inheritOids, descriptor);
@@ -11601,15 +11597,9 @@ BuildRedistribCommands(Oid relid, List *subCmds)
/* Build relation node list for new locator info */
for (i = 0; i < new_num; i++)
-#ifdef XCP
newLocInfo->nodeList = lappend_int(newLocInfo->nodeList,
PGXCNodeGetNodeId(new_oid_array[i],
&node_type));
-#else
- newLocInfo->nodeList = lappend_int(newLocInfo->nodeList,
- PGXCNodeGetNodeId(new_oid_array[i],
- PGXC_NODE_DATANODE));
-#endif
/* Build the command tree for table redistribution */
PGXCRedistribCreateCommandList(redistribState, newLocInfo);
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index a39005ddd8..7e040d9685 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -550,14 +550,6 @@ DefineView(ViewStmt *stmt, const char *queryString)
view->relname)));
}
-#ifdef PGXC
-#ifndef XCP
- /* In case view is temporary, be sure not to use 2PC on such relations */
- if (view->relpersistence == RELPERSISTENCE_TEMP)
- ExecSetTempObjectIncluded();
-#endif
-#endif
-
/*
* Create the view relation
*
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index d66546b6da..ae1e2c9290 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -213,15 +213,9 @@ ExecReScan(PlanState *node)
break;
#ifdef PGXC
-#ifdef XCP
case T_RemoteSubplanState:
ExecReScanRemoteSubplan((RemoteSubplanState *) node);
break;
-#else
- case T_RemoteQueryState:
- ExecRemoteQueryReScan((RemoteQueryState *) node, node->ps_ExprContext);
- break;
-#endif
#endif
case T_CustomScanState:
ExecReScanCustomScan((CustomScanState *) node);
diff --git a/src/backend/executor/execCurrent.c b/src/backend/executor/execCurrent.c
index dcb778ad07..3dffcbeb00 100644
--- a/src/backend/executor/execCurrent.c
+++ b/src/backend/executor/execCurrent.c
@@ -272,16 +272,6 @@ search_plan_tree(PlanState *node, Oid table_oid)
return NULL;
switch (nodeTag(node))
{
-#ifdef PGXC
-#ifndef XCP
- case T_RemoteQueryState:
- {
- RemoteQueryState *rqs = (RemoteQueryState *) node;
- ScanState *sstate = &(rqs->ss);
- return sstate;
- }
-#endif
-#endif
/*
* Relation scan nodes can all be treated alike
*/
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index bae0792b4a..6cd78c6188 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -874,11 +874,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_num_result_relations = numResultRelations;
/* es_result_relation_info is NULL except when within ModifyTable */
estate->es_result_relation_info = NULL;
-#ifdef PGXC
-#ifndef XCP
- estate->es_result_remoterel = NULL;
-#endif
-#endif
}
else
{
@@ -888,11 +883,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_result_relations = NULL;
estate->es_num_result_relations = 0;
estate->es_result_relation_info = NULL;
-#ifdef PGXC
-#ifndef XCP
-estate->es_result_remoterel = NULL;
-#endif
-#endif
}
/*
@@ -2818,12 +2808,6 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
numResultRelations * sizeof(ResultRelInfo));
estate->es_result_relations = resultRelInfos;
estate->es_num_result_relations = numResultRelations;
-#ifdef PGXC
-#ifndef XCP
- /* XXX Check if this is OK */
- estate->es_result_remoterel = parentestate->es_result_remoterel;
-#endif
-#endif
}
/* es_result_relation_info must NOT be copied */
/* es_trig_target_relations must NOT be copied */
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index 517ddce462..a4caae147c 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -128,13 +128,8 @@ MakeTupleTableSlot(void)
slot->tts_tupleDescriptor = NULL;
#ifdef PGXC
slot->tts_shouldFreeRow = false;
-#ifdef XCP
slot->tts_datarow = NULL;
slot->tts_drowcxt = NULL;
-#else
- slot->tts_dataRow = NULL;
- slot->tts_dataLen = -1;
-#endif
slot->tts_attinmeta = NULL;
#endif
slot->tts_mcxt = CurrentMemoryContext;
@@ -368,25 +363,15 @@ ExecStoreTuple(HeapTuple tuple,
if (slot->tts_shouldFreeMin)
heap_free_minimal_tuple(slot->tts_mintuple);
#ifdef PGXC
-#ifdef XCP
if (slot->tts_shouldFreeRow)
{
pfree(slot->tts_datarow);
if (slot->tts_drowcxt)
MemoryContextReset(slot->tts_drowcxt);
}
-#else
- if (slot->tts_shouldFreeRow)
- pfree(slot->tts_dataRow);
-#endif
slot->tts_shouldFreeRow = false;
-#ifdef XCP
slot->tts_datarow = NULL;
-#else
- slot->tts_dataRow = NULL;
- slot->tts_dataLen = -1;
-#endif
#endif
/*
@@ -450,25 +435,15 @@ ExecStoreMinimalTuple(MinimalTuple mtup,
if (slot->tts_shouldFreeMin)
heap_free_minimal_tuple(slot->tts_mintuple);
#ifdef PGXC
-#ifdef XCP
if (slot->tts_shouldFreeRow)
{
pfree(slot->tts_datarow);
if (slot->tts_drowcxt)
MemoryContextReset(slot->tts_drowcxt);
}
-#else
- if (slot->tts_shouldFreeRow)
- pfree(slot->tts_dataRow);
-#endif
slot->tts_shouldFreeRow = false;
-#ifdef XCP
slot->tts_datarow = NULL;
-#else
- slot->tts_dataRow = NULL;
- slot->tts_dataLen = -1;
-#endif
#endif
/*
@@ -522,21 +497,11 @@ ExecClearTuple(TupleTableSlot *slot) /* slot in which to store tuple */
if (slot->tts_shouldFreeMin)
heap_free_minimal_tuple(slot->tts_mintuple);
#ifdef PGXC
-#ifdef XCP
if (slot->tts_shouldFreeRow)
pfree(slot->tts_datarow);
-#else
- if (slot->tts_shouldFreeRow)
- pfree(slot->tts_dataRow);
-#endif
slot->tts_shouldFreeRow = false;
-#ifdef XCP
slot->tts_datarow = NULL;
-#else
- slot->tts_dataRow = NULL;
- slot->tts_dataLen = -1;
-#endif
#endif
slot->tts_tuple = NULL;
@@ -649,13 +614,8 @@ ExecCopySlotTuple(TupleTableSlot *slot)
/*
* Ensure values are extracted from data row to the Datum array
*/
-#ifdef XCP
if (slot->tts_datarow)
slot_getallattrs(slot);
-#else
- if (slot->tts_dataRow)
- slot_getallattrs(slot);
-#endif
#endif
/*
* Otherwise we need to build a tuple from the Datum array.
@@ -693,13 +653,8 @@ ExecCopySlotMinimalTuple(TupleTableSlot *slot)
/*
* Ensure values are extracted from data row to the Datum array
*/
-#ifdef XCP
if (slot->tts_datarow)
slot_getallattrs(slot);
-#else
- if (slot->tts_dataRow)
- slot_getallattrs(slot);
-#endif
#endif
/*
* Otherwise we need to build a tuple from the Datum array.
@@ -710,7 +665,6 @@ ExecCopySlotMinimalTuple(TupleTableSlot *slot)
}
#ifdef PGXC
-#ifdef XCP
/* --------------------------------
* ExecCopySlotDatarow
* Obtain a copy of a slot's data row. The copy is
@@ -810,89 +764,6 @@ ExecCopySlotDatarow(TupleTableSlot *slot, MemoryContext tmpcxt)
return datarow;
}
}
-#else
-/* --------------------------------
- * ExecCopySlotDatarow
- * Obtain a copy of a slot's data row. The copy is
- * palloc'd in the current memory context.
- * Pointer to the datarow is returned as a var parameter, function
- * returns the length of the data row
- * The slot itself is undisturbed
- * --------------------------------
- */
-int
-ExecCopySlotDatarow(TupleTableSlot *slot, char **datarow)
-{
- Assert(datarow);
-
- if (slot->tts_dataRow)
- {
- /* if we already have datarow make a copy */
- *datarow = (char *)palloc(slot->tts_dataLen);
- memcpy(*datarow, slot->tts_dataRow, slot->tts_dataLen);
- return slot->tts_dataLen;
- }
- else
- {
- TupleDesc tdesc = slot->tts_tupleDescriptor;
- StringInfoData buf;
- uint16 n16;
- int i;
-
- initStringInfo(&buf);
- /* Number of parameter values */
- n16 = htons(tdesc->natts);
- appendBinaryStringInfo(&buf, (char *) &n16, 2);
-
- /* ensure we have all values */
- slot_getallattrs(slot);
- for (i = 0; i < tdesc->natts; i++)
- {
- uint32 n32;
-
- if (slot->tts_isnull[i])
- {
- n32 = htonl(-1);
- appendBinaryStringInfo(&buf, (char *) &n32, 4);
- }
- else
- {
- Form_pg_attribute attr = tdesc->attrs[i];
- Oid typOutput;
- bool typIsVarlena;
- Datum pval;
- char *pstring;
- int len;
-
- /* Get info needed to output the value */
- getTypeOutputInfo(attr->atttypid, &typOutput, &typIsVarlena);
- /*
- * If we have a toasted datum, forcibly detoast it here to avoid
- * memory leakage inside the type's output routine.
- */
- if (typIsVarlena)
- pval = PointerGetDatum(PG_DETOAST_DATUM(slot->tts_values[i]));
- else
- pval = slot->tts_values[i];
-
- /* Convert Datum to string */
- pstring = OidOutputFunctionCall(typOutput, pval);
-
- /* copy data to the buffer */
- len = strlen(pstring);
- n32 = htonl(len);
- appendBinaryStringInfo(&buf, (char *) &n32, 4);
- appendBinaryStringInfo(&buf, pstring, len);
- }
- }
- /* copy data to the buffer */
- *datarow = palloc(buf.len);
- memcpy(*datarow, buf.data, buf.len);
- pfree(buf.data);
- return buf.len;
- }
-}
-#endif
#endif
/* --------------------------------
@@ -1076,14 +947,7 @@ ExecMaterializeSlot(TupleTableSlot *slot)
#ifdef PGXC
if (!slot->tts_shouldFreeRow)
- {
-#ifdef XCP
slot->tts_datarow = NULL;
-#else
- slot->tts_dataRow = NULL;
- slot->tts_dataLen = -1;
-#endif
- }
#endif
return slot->tts_tuple;
@@ -1676,7 +1540,6 @@ end_tup_output(TupOutputState *tstate)
*
* --------------------------------
*/
-#ifdef XCP
TupleTableSlot *
ExecStoreDataRowTuple(RemoteDataRow datarow,
TupleTableSlot *slot,
@@ -1727,60 +1590,4 @@ ExecStoreDataRowTuple(RemoteDataRow datarow,
return slot;
}
-#else
-TupleTableSlot *
-ExecStoreDataRowTuple(char *msg, size_t len, TupleTableSlot *slot,
- bool shouldFree)
-{
- /*
- * sanity checks
- */
- Assert(msg != NULL);
- Assert(len > 0);
- Assert(slot != NULL);
- Assert(slot->tts_tupleDescriptor != NULL);
-
- /*
- * Free any old physical tuple belonging to the slot.
- */
- if (slot->tts_shouldFree)
- heap_freetuple(slot->tts_tuple);
- if (slot->tts_shouldFreeMin)
- heap_free_minimal_tuple(slot->tts_mintuple);
- /*
- * if msg == slot->tts_dataRow then we would
- * free the dataRow in the slot loosing the contents in msg. It is safe
- * to reset shouldFreeRow, since it will be overwritten just below.
- */
- if (msg == slot->tts_dataRow)
- slot->tts_shouldFreeRow = false;
- if (slot->tts_shouldFreeRow)
- pfree(slot->tts_dataRow);
-
- /*
- * Drop the pin on the referenced buffer, if there is one.
- */
- if (BufferIsValid(slot->tts_buffer))
- ReleaseBuffer(slot->tts_buffer);
-
- slot->tts_buffer = InvalidBuffer;
-
- /*
- * Store the new tuple into the specified slot.
- */
- slot->tts_isempty = false;
- slot->tts_shouldFree = false;
- slot->tts_shouldFreeMin = false;
- slot->tts_shouldFreeRow = shouldFree;
- slot->tts_tuple = NULL;
- slot->tts_mintuple = NULL;
- slot->tts_dataRow = msg;
- slot->tts_dataLen = len;
-
- /* Mark extracted state invalid */
- slot->tts_nvalid = 0;
-
- return slot;
-}
-#endif
#endif
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 5138640e24..985bbab9f7 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -113,11 +113,6 @@ CreateExecutorState(void)
estate->es_result_relations = NULL;
estate->es_num_result_relations = 0;
estate->es_result_relation_info = NULL;
-#ifdef PGXC
-#ifndef XCP
- estate->es_result_remoterel = NULL;
-#endif
-#endif
estate->es_trig_target_relations = NIL;
estate->es_trig_tuple_slot = NULL;
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 1e2d9c54d8..5404b79dd3 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -783,115 +783,6 @@ advance_transition_function(AggState *aggstate,
MemoryContextSwitchTo(oldContext);
}
-#ifdef PGXC
-#ifndef XCP
-/*
- * Given new input value(s), advance the collection function of an aggregate.
- *
- * The new values (and null flags) have been preloaded into argument positions
- * 1 and up in fcinfo, so that we needn't copy them again to pass to the
- * collection function. No other fields of fcinfo are assumed valid.
- *
- * It doesn't matter which memory context this is called in.
- */
-static void
-advance_collection_function(AggState *aggstate,
- AggStatePerAgg peraggstate,
- AggStatePerGroup pergroupstate,
- FunctionCallInfoData *fcinfo)
-{
- int numArguments = peraggstate->numArguments;
- Datum newVal;
- MemoryContext oldContext;
-
- Assert(OidIsValid(peraggstate->collectfn.fn_oid));
-
- /*
- * numArgument has to be one, since each Datanode is going to send a single
- * transition value
- */
- Assert(numArguments == 1);
- if (peraggstate->collectfn.fn_strict)
- {
- int cntArgs;
- /*
- * For a strict collectfn, nothing happens when there's a NULL input; we
- * just keep the prior transition value, transValue.
- */
- for (cntArgs = 1; cntArgs <= numArguments; cntArgs++)
- {
- if (fcinfo->argnull[cntArgs])
- return;
- }
- if (pergroupstate->noCollectValue)
- {
- /*
- * collection result has not been initialized. This is the first non-NULL
- * transition value. We use it as the initial value for collectValue.
- * Aggregate's transition and collection type are same
- * We must copy the datum into result if it is pass-by-ref. We
- * do not need to pfree the old result, since it's NULL.
- */
- oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
- pergroupstate->collectValue = datumCopy(fcinfo->arg[1],
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
- pergroupstate->collectValueIsNull = false;
- pergroupstate->noCollectValue = false;
- MemoryContextSwitchTo(oldContext);
- return;
- }
- if (pergroupstate->collectValueIsNull)
- {
- /*
- * Don't call a strict function with NULL inputs. Note it is
- * possible to get here despite the above tests, if the collectfn is
- * strict *and* returned a NULL on a prior cycle. If that happens
- * we will propagate the NULL all the way to the end.
- */
- return;
- }
- }
-
- /* We run the collection functions in per-input-tuple memory context */
- oldContext = MemoryContextSwitchTo(aggstate->tmpcontext->ecxt_per_tuple_memory);
-
- /*
- * OK to call the collection function
- */
- InitFunctionCallInfoData(*fcinfo, &(peraggstate->collectfn), 2,
- peraggstate->aggCollation, (void *)aggstate, NULL);
- fcinfo->arg[0] = pergroupstate->collectValue;
- fcinfo->argnull[0] = pergroupstate->collectValueIsNull;
- newVal = FunctionCallInvoke(fcinfo);
-
- /*
- * If pass-by-ref datatype, must copy the new value into aggcontext and
- * pfree the prior transValue. But if collectfn returned a pointer to its
- * first input, we don't need to do anything.
- */
- if (!peraggstate->transtypeByVal &&
- DatumGetPointer(newVal) != DatumGetPointer(pergroupstate->collectValue))
- {
- if (!fcinfo->isnull)
- {
- MemoryContextSwitchTo(aggstate->aggcontext);
- newVal = datumCopy(newVal,
- peraggstate->transtypeByVal,
- peraggstate->transtypeLen);
- }
- if (!pergroupstate->collectValueIsNull)
- pfree(DatumGetPointer(pergroupstate->collectValue));
- }
-
- pergroupstate->collectValue = newVal;
- pergroupstate->collectValueIsNull = fcinfo->isnull;
-
- MemoryContextSwitchTo(oldContext);
-}
-#endif /* XCP */
-#endif /* PGXC */
-
/*
* Advance all the aggregates for one input tuple. The input tuple
* has been stored in tmpcontext->ecxt_outertuple, so that it is accessible
@@ -1241,18 +1132,6 @@ finalize_aggregate(AggState *aggstate,
value = pergroupstate->transValue;
isnull = pergroupstate->transValueIsNull;
}
-#else
-#ifdef PGXC
- /*
- * if we skipped the transition phase, we have the collection result in the
- * collectValue, move it to transValue for finalization to work on
- */
- if (aggstate->skip_trans)
- {
- pergroupstate->transValue = pergroupstate->collectValue;
- pergroupstate->transValueIsNull = pergroupstate->collectValueIsNull;
- }
-#endif /* PGXC */
#endif /* XCP */
/*
@@ -2223,9 +2102,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggstate->pergroup = NULL;
aggstate->grp_firstTuple = NULL;
aggstate->hashtable = NULL;
-#ifndef XCP
- aggstate->skip_trans = node->skip_trans;
-#endif
aggstate->sort_in = NULL;
aggstate->sort_out = NULL;
@@ -2566,7 +2442,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
peraggstate->finalfn_oid = finalfn_oid = aggform->aggfinalfn;
#ifdef PGXC
peraggstate->collectfn_oid = collectfn_oid = aggform->aggcollectfn;
-#ifdef XCP
/*
* If preparing PHASE1 skip finalization step and return transmission
* value to be collected and finalized on master node.
@@ -2592,17 +2467,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggref->aggfilter = NULL;
aggrefstate->aggfilter = NULL;
}
-#else
- /*
- * For PGXC final and collection functions are used to combine results at Coordinator,
- * disable those for Datanode
- */
- if (IS_PGXC_DATANODE)
- {
- peraggstate->finalfn_oid = finalfn_oid = InvalidOid;
- peraggstate->collectfn_oid = collectfn_oid = InvalidOid;
- }
-#endif /* XCP */
#endif /* PGXC */
/* Check that aggregate owner has permission to call component fns */
{
@@ -2717,35 +2581,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
&collectfnexpr,
#endif
&finalfnexpr);
-#ifdef PGXC
-#ifndef XCP
- if (OidIsValid(collectfn_oid))
- {
- /* we expect final function expression to be NULL in call to
- * build_aggregate_fnexprs below, since InvalidOid is passed for
- * finalfn_oid argument. Use a dummy expression to accept that.
- */
- Expr *dummyexpr;
- /*
- * for XC, we need to setup the collection function expression as well.
- * Use build_aggregate_fnexpr() with invalid final function oid, and collection
- * function information instead of transition function information.
- * PGXCTODO: we should really be adding this step inside
- * build_aggregate_fnexprs() but this way it becomes easy to merge.
- */
- build_aggregate_fnexprs(&aggtranstype,
- 1,
- aggtranstype,
- aggref->aggtype,
- aggref->inputcollid,
- collectfn_oid,
- InvalidOid,
- &collectfnexpr,
- &dummyexpr);
- Assert(!dummyexpr);
- }
-#endif /* XCP */
-#endif /* PGXC */
/* set up infrastructure for calling the transfn and finalfn */
fmgr_info(transfn_oid, &peraggstate->transfn);
@@ -2816,7 +2651,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
* access it as a struct field. Must do it the hard way with
* SysCacheGetAttr.
*/
-#ifdef XCP
if (OidIsValid(aggcollecttype))
{
textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
@@ -2843,17 +2677,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
aggref->aggfnoid)));
}
}
-#else
- textInitVal = SysCacheGetAttr(AGGFNOID, aggTuple,
- Anum_pg_aggregate_agginitcollect,
- &peraggstate->initCollectValueIsNull);
-
- if (peraggstate->initCollectValueIsNull)
- peraggstate->initCollectValue = (Datum) 0;
- else
- peraggstate->initCollectValue = GetAggInitVal(textInitVal,
- aggtranstype);
-#endif /* XCP */
#endif /* PGXC */
/*
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 101fdbddc7..81a18b315c 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -50,12 +50,6 @@
#include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
-#ifdef PGXC
-#ifndef XCP
-#include "pgxc/execRemote.h"
-#include "pgxc/pgxc.h"
-#endif
-#endif
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "utils/builtins.h"
@@ -240,11 +234,6 @@ ExecInsert(ModifyTableState *mtstate,
Relation resultRelationDesc;
Oid newId;
List *recheckIndexes = NIL;
-#ifdef PGXC
-#ifndef XCP
- PlanState *resultRemoteRel = NULL;
-#endif
-#endif
/*
* get the heap tuple out of the tuple table slot, making sure we have a
@@ -257,11 +246,6 @@ ExecInsert(ModifyTableState *mtstate,
*/
resultRelInfo = estate->es_result_relation_info;
resultRelationDesc = resultRelInfo->ri_RelationDesc;
-#ifdef PGXC
-#ifndef XCP
- resultRemoteRel = estate->es_result_remoterel;
-#endif
-#endif
/*
* If the result relation has OIDs, force the tuple's OID to zero so that
* heap_insert will assign a fresh OID. Usually the OID already will be
@@ -548,22 +532,12 @@ ExecDelete(ItemPointer tupleid,
HTSU_Result result;
HeapUpdateFailureData hufd;
TupleTableSlot *slot = NULL;
-#ifdef PGXC
-#ifndef XCP
- PlanState *resultRemoteRel = NULL;
-#endif
-#endif
/*
* get information on the (current) result relation
*/
resultRelInfo = estate->es_result_relation_info;
resultRelationDesc = resultRelInfo->ri_RelationDesc;
-#ifdef PGXC
-#ifndef XCP
- resultRemoteRel = estate->es_result_remoterel;
-#endif
-#endif
/* BEFORE ROW DELETE Triggers */
if (resultRelInfo->ri_TrigDesc &&
@@ -623,16 +597,6 @@ ExecDelete(ItemPointer tupleid,
* mode transactions.
*/
ldelete:;
-#ifdef PGXC
-#ifndef XCP
- if (IS_PGXC_COORDINATOR && resultRemoteRel)
- {
- ExecRemoteQueryStandard(resultRelationDesc, (RemoteQueryState *)resultRemoteRel, planSlot);
- }
- else
- {
-#endif
-#endif
result = heap_delete(resultRelationDesc, tupleid,
estate->es_output_cid,
estate->es_crosscheck_snapshot,
@@ -717,24 +681,11 @@ ldelete:;
* anyway, since the tuple is still visible to other transactions.
*/
-#ifdef PGXC
-#ifndef XCP
- }
-#endif
-#endif
}
if (canSetTag)
(estate->es_processed)++;
-#ifdef PGXC
-#ifndef XCP
- /*
- * Do not fire triggers on remote relation, it would not find old tuple
- */
- if (resultRemoteRel == NULL)
-#endif
-#endif
/* AFTER ROW DELETE Triggers */
ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple);
@@ -832,11 +783,6 @@ ExecUpdate(ItemPointer tupleid,
HTSU_Result result;
HeapUpdateFailureData hufd;
List *recheckIndexes = NIL;
-#ifdef PGXC
-#ifndef XCP
- PlanState *resultRemoteRel = NULL;
-#endif
-#endif
/*
* abort the operation if not running transactions
@@ -855,11 +801,6 @@ ExecUpdate(ItemPointer tupleid,
*/
resultRelInfo = estate->es_result_relation_info;
resultRelationDesc = resultRelInfo->ri_RelationDesc;
-#ifdef PGXC
-#ifndef XCP
- resultRemoteRel = estate->es_result_remoterel;
-#endif
-#endif
/* BEFORE ROW UPDATE Triggers */
if (resultRelInfo->ri_TrigDesc &&
@@ -937,16 +878,6 @@ lreplace:;
if (resultRelationDesc->rd_att->constr)
ExecConstraints(resultRelInfo, slot, estate);
-#ifdef PGXC
-#ifndef XCP
- if (IS_PGXC_COORDINATOR && resultRemoteRel)
- {
- ExecRemoteQueryStandard(resultRelationDesc, (RemoteQueryState *)resultRemoteRel, planSlot);
- }
- else
- {
-#endif
-#endif
/*
* replace the heap tuple
*
@@ -1056,14 +987,6 @@ lreplace:;
if (canSetTag)
(estate->es_processed)++;
-#ifdef PGXC
-#ifndef XCP
- /*
- * Do not fire triggers on remote relation, it would not find old tuple
- */
- if (resultRemoteRel == NULL)
-#endif
-#endif
/* AFTER ROW UPDATE Triggers */
ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple, tuple,
recheckIndexes);
@@ -1343,12 +1266,6 @@ ExecModifyTable(ModifyTableState *node)
ResultRelInfo *saved_resultRelInfo;
ResultRelInfo *resultRelInfo;
PlanState *subplanstate;
-#ifdef PGXC
-#ifndef XCP
- PlanState *remoterelstate;
- PlanState *saved_resultRemoteRel;
-#endif
-#endif
JunkFilter *junkfilter;
TupleTableSlot *slot;
TupleTableSlot *planSlot;
@@ -1390,11 +1307,6 @@ ExecModifyTable(ModifyTableState *node)
/* Preload local variables */
resultRelInfo = node->resultRelInfo + node->mt_whichplan;
subplanstate = node->mt_plans[node->mt_whichplan];
-#ifdef PGXC
-#ifndef XCP
- remoterelstate = node->mt_remoterels[node->mt_whichplan];
-#endif
-#endif
junkfilter = resultRelInfo->ri_junkFilter;
/*
@@ -1405,18 +1317,8 @@ ExecModifyTable(ModifyTableState *node)
* CTE). So we have to save and restore the caller's value.
*/
saved_resultRelInfo = estate->es_result_relation_info;
-#ifdef PGXC
-#ifndef XCP
- saved_resultRemoteRel = estate->es_result_remoterel;
-#endif
-#endif
estate->es_result_relation_info = resultRelInfo;
-#ifdef PGXC
-#ifndef XCP
- estate->es_result_remoterel = remoterelstate;
-#endif
-#endif
/*
* Fetch rows from subplan(s), and execute the required table modification
@@ -1442,13 +1344,6 @@ ExecModifyTable(ModifyTableState *node)
{
resultRelInfo++;
subplanstate = node->mt_plans[node->mt_whichplan];
-#ifdef PGXC
-#ifndef XCP
- /* Move to next remote plan */
- estate->es_result_remoterel = node->mt_remoterels[node->mt_whichplan];
- remoterelstate = node->mt_plans[node->mt_whichplan];
-#endif
-#endif
junkfilter = resultRelInfo->ri_junkFilter;
estate->es_result_relation_info = resultRelInfo;
EvalPlanQualSetPlan(&node->mt_epqstate, subplanstate->plan,
@@ -1568,11 +1463,6 @@ ExecModifyTable(ModifyTableState *node)
/* Restore es_result_relation_info before exiting */
estate->es_result_relation_info = saved_resultRelInfo;
-#ifdef PGXC
-#ifndef XCP
- estate->es_result_remoterel = saved_resultRemoteRel;
-#endif
-#endif
/*
* We're done, but fire AFTER STATEMENT triggers before exiting.
@@ -1617,11 +1507,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
mtstate->mt_done = false;
mtstate->mt_plans = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
-#ifdef PGXC
-#ifndef XCP
- mtstate->mt_remoterels = (PlanState **) palloc0(sizeof(PlanState *) * nplans);
-#endif
-#endif
mtstate->resultRelInfo = estate->es_result_relations + node->resultRelIndex;
mtstate->mt_arowmarks = (List **) palloc0(sizeof(List *) * nplans);
mtstate->mt_nplans = nplans;
@@ -1646,21 +1531,9 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
i = 0;
foreach(l, node->plans)
{
-#ifdef PGXC
-#ifndef XCP
- Plan *remoteplan = NULL;
-#endif
-#endif
subplan = (Plan *) lfirst(l);
-#ifdef PGXC
-#ifndef XCP
- if (node->remote_plans)
- remoteplan = list_nth(node->remote_plans, i);
-#endif
-#endif
-
/*
* Verify result relation is a valid target for the current operation
*/
@@ -1684,18 +1557,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
estate->es_result_relation_info = resultRelInfo;
mtstate->mt_plans[i] = ExecInitNode(subplan, estate, eflags);
-#ifdef PGXC
-#ifndef XCP
- if (remoteplan)
- {
- /*
- * Init the plan for the remote execution for this result rel. This is
- * used to execute data modification queries on the remote nodes
- */
- mtstate->mt_remoterels[i] = ExecInitNode(remoteplan, estate, eflags);
- }
-#endif
-#endif
/* Also let FDWs init themselves for foreign-table result rels */
if (resultRelInfo->ri_FdwRoutine != NULL &&
resultRelInfo->ri_FdwRoutine->BeginForeignModify != NULL)
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 65de91a9cc..29aeaf5891 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -103,17 +103,10 @@ lo_open(PG_FUNCTION_ARGS)
int fd;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
#if FSDB
@@ -143,17 +136,10 @@ lo_close(PG_FUNCTION_ARGS)
int32 fd = PG_GETARG_INT32(0);
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
@@ -188,17 +174,10 @@ lo_read(int fd, char *buf, int len)
LargeObjectDesc *lobj;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
@@ -236,17 +215,10 @@ lo_write(int fd, const char *buf, int len)
LargeObjectDesc *lobj;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
@@ -315,17 +287,10 @@ lo_lseek64(PG_FUNCTION_ARGS)
int64 status;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
@@ -344,17 +309,10 @@ lo_creat(PG_FUNCTION_ARGS)
Oid lobjId;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
/*
@@ -374,17 +332,10 @@ lo_create(PG_FUNCTION_ARGS)
Oid lobjId = PG_GETARG_OID(0);
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
/*
@@ -405,17 +356,10 @@ lo_tell(PG_FUNCTION_ARGS)
int64 offset;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
@@ -457,17 +401,10 @@ lo_unlink(PG_FUNCTION_ARGS)
Oid lobjId = PG_GETARG_OID(0);
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
/* Must be owner of the largeobject */
@@ -514,17 +451,10 @@ loread(PG_FUNCTION_ARGS)
int totalread;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (len < 0)
@@ -546,17 +476,10 @@ lowrite(PG_FUNCTION_ARGS)
int totalwritten;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
bytestowrite = VARSIZE(wbuf) - VARHDRSZ;
@@ -578,17 +501,10 @@ lo_import(PG_FUNCTION_ARGS)
text *filename = PG_GETARG_TEXT_PP(0);
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
PG_RETURN_OID(lo_import_internal(filename, InvalidOid));
@@ -605,17 +521,10 @@ lo_import_with_oid(PG_FUNCTION_ARGS)
Oid oid = PG_GETARG_OID(1);
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
PG_RETURN_OID(lo_import_internal(filename, oid));
@@ -699,17 +608,10 @@ lo_export(PG_FUNCTION_ARGS)
mode_t oumask;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
#ifndef ALLOW_DANGEROUS_LO_FUNCTIONS
@@ -774,17 +676,10 @@ lo_truncate_internal(int32 fd, int64 len)
LargeObjectDesc *lobj;
#ifdef PGXC
-#ifdef XCP
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Postgres-XL does not yet support large objects"),
errdetail("The feature is not currently supported")));
-#else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Postgres-XC does not support large object yet"),
- errdetail("The feature is not currently supported")));
-#endif
#endif
if (fd < 0 || fd >= cookies_size || cookies[fd] == NULL)
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index 1be12a7c10..efd7088003 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -208,11 +208,6 @@ _copyModifyTable(const ModifyTable *from)
COPY_NODE_FIELD(fdwPrivLists);
COPY_NODE_FIELD(rowMarks);
COPY_SCALAR_FIELD(epqParam);
-#ifdef PGXC
-#ifndef XCP
- COPY_NODE_FIELD(remote_plans);
-#endif
-#endif
COPY_SCALAR_FIELD(onConflictAction);
COPY_NODE_FIELD(arbiterIndexes);
COPY_NODE_FIELD(onConflictSet);
@@ -1118,9 +1113,6 @@ _copyRemoteQuery(const RemoteQuery *from)
COPY_POINTER_FIELD(remote_param_types,
sizeof(from->remote_param_types[0]) * from->remote_num_params);
COPY_SCALAR_FIELD(exec_type);
-#ifndef XCP
- COPY_SCALAR_FIELD(is_temp);
-#endif
COPY_SCALAR_FIELD(reduce_level);
COPY_NODE_FIELD(base_tlist);
@@ -1382,12 +1374,6 @@ _copyAggref(const Aggref *from)
COPY_SCALAR_FIELD(aggfnoid);
COPY_SCALAR_FIELD(aggtype);
-#ifdef PGXC
-#ifndef XCP
- COPY_SCALAR_FIELD(aggtrantype);
- COPY_SCALAR_FIELD(agghas_collectfn);
-#endif /* XCP */
-#endif /* PGXC */
COPY_SCALAR_FIELD(aggcollid);
COPY_SCALAR_FIELD(inputcollid);
COPY_NODE_FIELD(aggdirectargs);
@@ -2290,12 +2276,6 @@ _copyRangeTblEntry(const RangeTblEntry *from)
COPY_SCALAR_FIELD(rtekind);
-#ifdef PGXC
-#ifndef XCP
- COPY_STRING_FIELD(relname);
-#endif
-#endif
-
COPY_SCALAR_FIELD(relid);
COPY_SCALAR_FIELD(relkind);
COPY_NODE_FIELD(tablesample);
@@ -2917,12 +2897,6 @@ _copyQuery(const Query *from)
COPY_NODE_FIELD(rowMarks);
COPY_NODE_FIELD(setOperations);
COPY_NODE_FIELD(constraintDeps);
-#ifdef PGXC
-#ifndef XCP
- COPY_STRING_FIELD(sql_statement);
- COPY_SCALAR_FIELD(is_ins_child_sel_parent);
-#endif
-#endif
return newnode;
}
@@ -4425,7 +4399,6 @@ _copyBarrierStmt(const BarrierStmt *from)
return newnode;
}
-#ifdef XCP
static PauseClusterStmt *
_copyPauseClusterStmt(const PauseClusterStmt *from)
{
@@ -4435,7 +4408,7 @@ _copyPauseClusterStmt(const PauseClusterStmt *from)
return newnode;
}
-#endif
+
/* ****************************************************************
* nodemgr.h copy functions
* ****************************************************************
@@ -5169,11 +5142,9 @@ copyObject(const void *from)
case T_BarrierStmt:
retval = _copyBarrierStmt(from);
break;
-#ifdef XCP
case T_PauseClusterStmt:
retval = _copyPauseClusterStmt(from);
break;
-#endif
case T_AlterNodeStmt:
retval = _copyAlterNodeStmt(from);
break;
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index ab88afbc28..bf5fc96ffb 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -196,17 +196,6 @@ static bool
_equalAggref(const Aggref *a, const Aggref *b)
{
COMPARE_SCALAR_FIELD(aggfnoid);
-#ifndef XCP
- /*
- * In XCP ignore aggtype difference because Phase 1 of aggregate have
- * aggtype set to aggtrantype
- */
- COMPARE_SCALAR_FIELD(aggtype);
-#ifdef PGXC
- COMPARE_SCALAR_FIELD(aggtrantype);
- COMPARE_SCALAR_FIELD(agghas_collectfn);
-#endif /* PGXC */
-#endif /* XCP */
COMPARE_SCALAR_FIELD(aggcollid);
COMPARE_SCALAR_FIELD(inputcollid);
COMPARE_NODE_FIELD(aggdirectargs);
@@ -937,12 +926,6 @@ _equalQuery(const Query *a, const Query *b)
COMPARE_NODE_FIELD(setOperations);
COMPARE_NODE_FIELD(constraintDeps);
-#ifdef PGXC
-#ifndef XCP
- COMPARE_SCALAR_FIELD(is_ins_child_sel_parent);
-#endif
-#endif
-
return true;
}
@@ -3292,11 +3275,9 @@ equal(const void *a, const void *b)
case T_BarrierStmt:
retval = _equalBarrierStmt(a, b);
break;
-#ifdef XCP
case T_PauseClusterStmt:
retval = _equalPauseClusterStmt(a, b);
break;
-#endif
case T_AlterNodeStmt:
retval = _equalAlterNodeStmt(a, b);
break;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 7c789ba7ad..84e25e16fd 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -539,11 +539,6 @@ _outModifyTable(StringInfo str, const ModifyTable *node)
WRITE_NODE_FIELD(fdwPrivLists);
WRITE_NODE_FIELD(rowMarks);
WRITE_INT_FIELD(epqParam);
-#ifdef PGXC
-#ifndef XCP
- WRITE_NODE_FIELD(remote_plans);
-#endif
-#endif
WRITE_ENUM_FIELD(onConflictAction, OnConflictAction);
#ifdef XCP
if (portable_output)
@@ -774,9 +769,6 @@ _outRemoteQuery(StringInfo str, const RemoteQuery *node)
appendStringInfo(str, " %d", node->remote_param_types[i]);
WRITE_ENUM_FIELD(exec_type, RemoteQueryExecType);
-#ifndef XCP
- WRITE_BOOL_FIELD(is_temp);
-#endif
WRITE_BOOL_FIELD(has_row_marks);
WRITE_BOOL_FIELD(has_ins_child_sel_parent);
}
@@ -1739,12 +1731,6 @@ _outAggref(StringInfo str, const Aggref *node)
else
#endif
WRITE_OID_FIELD(aggtype);
-#ifdef PGXC
-#ifndef XCP
- WRITE_OID_FIELD(aggtrantype);
- WRITE_BOOL_FIELD(agghas_collectfn);
-#endif /* XCP */
-#endif /* PGXC */
#ifdef XCP
if (portable_output)
WRITE_COLLID_FIELD(aggcollid);
@@ -2907,12 +2893,6 @@ _outPlannerInfo(StringInfo str, const PlannerInfo *node)
WRITE_BOOL_FIELD(hasHavingQual);
WRITE_BOOL_FIELD(hasPseudoConstantQuals);
WRITE_BOOL_FIELD(hasRecursion);
-#ifdef PGXC
-#ifndef XCP
- WRITE_INT_FIELD(rs_alias_index);
- WRITE_NODE_FIELD(xc_rowMarks);
-#endif /* XCP */
-#endif /* PGXC */
WRITE_INT_FIELD(wt_param_id);
WRITE_BITMAPSET_FIELD(curOuterRels);
WRITE_NODE_FIELD(curOuterParams);
@@ -3669,11 +3649,6 @@ _outRangeTblEntry(StringInfo str, const RangeTblEntry *node)
WRITE_NODE_FIELD(alias);
WRITE_NODE_FIELD(eref);
WRITE_ENUM_FIELD(rtekind, RTEKind);
-#ifdef PGXC
-#ifndef XCP
- WRITE_STRING_FIELD(relname);
-#endif
-#endif
switch (node->rtekind)
{
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index fae88fe109..36fe01dc39 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -926,12 +926,6 @@ _readAggref(void)
else
#endif
READ_OID_FIELD(aggtype);
-#ifdef PGXC
-#ifndef XCP
- READ_OID_FIELD(aggtrantype);
- READ_BOOL_FIELD(agghas_collectfn);
-#endif /* XCP */
-#endif /* PGXC */
#ifdef XCP
if (portable_input)
READ_COLLID_FIELD(aggcollid);
@@ -1130,18 +1124,6 @@ _readOpExpr(void)
#endif
READ_OID_FIELD(opfuncid);
-#ifndef XCP
- /*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
- */
- local_node->opfuncid = InvalidOid;
-#endif
-
#ifdef XCP
if (portable_input)
READ_TYPID_FIELD(opresulttype);
@@ -1188,18 +1170,6 @@ _readDistinctExpr(void)
#endif
READ_OID_FIELD(opfuncid);
-#ifndef XCP
- /*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
- */
- local_node->opfuncid = InvalidOid;
-#endif
-
#ifdef XCP
if (portable_input)
READ_TYPID_FIELD(opresulttype);
@@ -1305,18 +1275,6 @@ _readScalarArrayOpExpr(void)
else
#endif
READ_OID_FIELD(opfuncid);
-#ifndef XCP
- /*
- * The opfuncid is stored in the textual format primarily for debugging
- * and documentation reasons. We want to always read it as zero to force
- * it to be re-looked-up in the pg_operator entry. This ensures that
- * stored rules don't have hidden dependencies on operators' functions.
- * (We don't currently support an ALTER OPERATOR command, but might
- * someday.)
- */
- local_node->opfuncid = InvalidOid;
-#endif
-
READ_BOOL_FIELD(useOr);
#ifdef XCP
if (portable_input)
@@ -2057,11 +2015,6 @@ _readRangeTblEntry(void)
READ_NODE_FIELD(alias);
READ_NODE_FIELD(eref);
READ_ENUM_FIELD(rtekind, RTEKind);
-#ifdef PGXC
-#ifndef XCP
- READ_STRING_FIELD(relname);
-#endif
-#endif
switch (local_node->rtekind)
{
@@ -2200,12 +2153,6 @@ _readModifyTable(void)
READ_NODE_FIELD(fdwPrivLists);
READ_NODE_FIELD(rowMarks);
READ_INT_FIELD(epqParam);
-#ifdef PGXC
-#ifndef XCP
- READ_NODE_FIELD(remote_plans);
-#endif
-#endif
-
READ_ENUM_FIELD(onConflictAction, OnConflictAction);
#ifdef XCP
if (portable_input)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 5dab4d7a1c..91797b8f68 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -45,14 +45,8 @@
#include "parser/parse_clause.h"
#include "parser/parsetree.h"
#ifdef PGXC
-#ifdef XCP
#include "nodes/makefuncs.h"
#include "miscadmin.h"
-#else
-#include "catalog/pg_namespace.h"
-#include "catalog/pg_class.h"
-#include "pgxc/pgxc.h"
-#endif /* XCP */
#endif /* PGXC */
#include "rewrite/rewriteManip.h"
#include "utils/lsyscache.h"
@@ -478,23 +472,6 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
{
Relids required_outer;
-#ifdef PGXC
-#ifndef XCP
- /*
- * If we are on the Coordinator, we always want to use
- * the remote query path unless it is a pg_catalog table
- * or a sequence relation.
- */
- if (IS_PGXC_LOCAL_COORDINATOR &&
- get_rel_namespace(rte->relid) != PG_CATALOG_NAMESPACE &&
- get_rel_relkind(rte->relid) != RELKIND_SEQUENCE &&
- !root->parse->is_local)
- add_path(rel, create_remotequery_path(root, rel));
- else
- {
-#endif /* XCP */
-#endif /* PGXC */
-
/*
* We don't support pushing join clauses into the quals of a seqscan, but
* it could still have required parameterization due to LATERAL refs in
@@ -510,12 +487,6 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
/* Consider TID scans */
create_tidscan_paths(root, rel);
-#ifdef PGXC
-#ifndef XCP
- }
-#endif /* XCP */
-#endif /* PGXC */
-
}
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 778554ffbb..acb603f4de 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -61,13 +61,9 @@
#include "catalog/pg_proc.h"
#include "catalog/pg_type.h"
#include "executor/executor.h"
-#ifdef XCP
#include "access/gtm.h"
#include "catalog/pg_aggregate.h"
#include "parser/parse_coerce.h"
-#else
-#include "rewrite/rewriteManip.h"
-#endif /* XCP */
#include "commands/prepare.h"
#include "commands/tablecmds.h"
#endif /* PGXC */
@@ -116,32 +112,6 @@ static CteScan *create_ctescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static WorkTableScan *create_worktablescan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
-#ifdef PGXC
-#ifndef XCP
-static RowMarkClause *mk_row_mark_clause(PlanRowMark *prm);
-static bool compare_alias(Alias *a1, Alias *a2);
-static Plan *create_remotequery_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
-static Plan *create_remotejoin_plan(PlannerInfo *root, JoinPath *best_path,
- Plan *parent, Plan *outer_plan, Plan *inner_plan);
-static List *create_remote_target_list(PlannerInfo *root,
- StringInfo targets, List *out_tlist, List *in_tlist,
- char *out_alias, int out_index,
- char *in_alias, int in_index);
-static Alias *generate_remote_rte_alias(RangeTblEntry *rte, int varno,
- char *aliasname, int reduce_level);
-static void pgxc_locate_grouping_columns(PlannerInfo *root, List *tlist,
- AttrNumber *grpColIdx);
-static List *pgxc_process_grouping_targetlist(PlannerInfo *root,
- List **local_tlist);
-static List *pgxc_process_having_clause(PlannerInfo *root, List *remote_tlist,
- Node *havingQual, List **local_qual,
- List **remote_qual, bool *reduce_plan);
-static Expr *pgxc_set_en_expr(Oid tableoid, Index resultRelationIndex);
-static int pgxc_count_rowmarks_entries(List *rowMarks);
-static Oid *pgxc_build_rowmark_entries(List *rowMarks, List *rtable, Oid *types, int prepparams, int totparams);
-#endif /* XCP */
-#endif /* PGXC */
static ForeignScan *create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
List *tlist, List *scan_clauses);
static CustomScan *create_customscan_plan(PlannerInfo *root,
@@ -194,12 +164,6 @@ static CteScan *make_ctescan(List *qptlist, List *qpqual,
Index scanrelid, int ctePlanId, int cteParam);
static WorkTableScan *make_worktablescan(List *qptlist, List *qpqual,
Index scanrelid, int wtParam);
-#ifdef PGXC
-#ifndef XCP
-static RemoteQuery *make_remotequery(List *qptlist, List *qpqual,
- Index scanrelid);
-#endif
-#endif
static BitmapAnd *make_bitmap_and(List *bitmapplans);
static BitmapOr *make_bitmap_or(List *bitmapplans);
static NestLoop *make_nestloop(List *tlist,
@@ -245,16 +209,6 @@ static EquivalenceMember *find_ec_member_for_tle(EquivalenceClass *ec,
Relids relids);
static Material *make_material(Plan *lefttree);
-#ifdef PGXC
-#ifndef XCP
-static void findReferencedVars(List *parent_vars, RemoteQuery *plan, List **out_tlist, Relids *out_relids);
-static void create_remote_clause_expr(PlannerInfo *root, Plan *parent, StringInfo clauses,
- List *qual, RemoteQuery *scan);
-static void create_remote_expr(PlannerInfo *root, Plan *parent, StringInfo expr,
- Node *node, RemoteQuery *scan);
-#endif /* XCP */
-#endif /* PGXC */
-
#ifdef XCP
static int add_sort_column(AttrNumber colIdx, Oid sortOp, Oid coll,
bool nulls_first,int numCols, AttrNumber *sortColIdx,
@@ -332,11 +286,6 @@ create_plan_recurse(PlannerInfo *root, Path *best_path)
case T_CteScan:
case T_WorkTableScan:
case T_ForeignScan:
-#ifdef PGXC
-#ifndef XCP
- case T_RemoteQuery:
-#endif /* XCP */
-#endif /* PGXC */
case T_CustomScan:
plan = create_scan_plan(root, best_path);
break;
@@ -520,19 +469,6 @@ create_scan_plan(PlannerInfo *root, Path *best_path)
scan_clauses);
break;
-#ifdef PGXC
-#ifndef XCP
- case T_RemoteQuery:
- /* For RemoteQuery path always use relation tlist */
- tlist = build_relation_tlist(rel);
- plan = (Plan *) create_remotequery_plan(root,
- best_path,
- tlist,
- scan_clauses);
- break;
-#endif /* XCP */
-#endif /* PGXC */
-
case T_ForeignScan:
plan = (Plan *) create_foreignscan_plan(root,
(ForeignPath *) best_path,
@@ -803,641 +739,9 @@ create_join_plan(PlannerInfo *root, JoinPath *best_path)
get_actual_clauses(get_loc_restrictinfo(best_path))));
#endif
-#ifdef PGXC
-#ifndef XCP
- /*
- * Check if this join can be reduced to an equiv. remote scan node
- * This can only be executed on a remote Coordinator
- */
- if (IS_PGXC_LOCAL_COORDINATOR)
- plan = create_remotejoin_plan(root, best_path, plan, outer_plan, inner_plan);
-#endif /* XCP */
-#endif /* PGXC */
-
return plan;
}
-
-#ifdef PGXC
-#ifndef XCP
-/*
- * create_remotejoin_plan
- * check if the children plans involve remote entities from the same remote
- * node. If so, this join can be reduced to an equivalent remote scan plan
- * node
- *
- * RULES:
- *
- * * provide unique aliases to both inner and outer nodes to represent their
- * corresponding subqueries
- *
- * * identify target entries from both inner and outer that appear in the join
- * targetlist, only those need to be selected from these aliased subqueries
- *
- * * a join node has a joinqual list which represents the join condition. E.g.
- * SELECT * from emp e LEFT JOIN emp2 d ON e.x = d.x
- * Here the joinqual contains "e.x = d.x". If the joinqual itself has a local
- * dependency, e.g "e.x = localfunc(d.x)", then this join cannot be reduced
- *
- * * other than the joinqual, the join node can contain additional quals. Even
- * if they have any local dependencies, we can reduce the join and just
- * append these quals into the reduced remote scan node. We DO do a pass to
- * identify remote quals and ship those in the squery though
- *
- * * these quals (both joinqual and normal quals with no local dependencies)
- * need to be converted into expressions referring to the aliases assigned to
- * the nodes. These expressions will eventually become part of the squery of
- * the reduced remote scan node
- *
- * * the children remote scan nodes themselves can have local dependencies in
- * their quals (the remote ones are already part of the squery). We can still
- * reduce the join and just append these quals into the reduced remote scan
- * node
- *
- * * if we reached successfully so far, generate a new remote scan node with
- * this new squery generated using the aliased references
- *
- * One important point to note here about targetlists is that this function
- * does not set any DUMMY var references in the Var nodes appearing in it. It
- * follows the standard mechanism as is followed by other nodes. Similar to the
- * existing nodes, the references which point to DUMMY vars is done in
- * set_remote_references() function in set_plan_references phase at the fag
- * end. Avoiding such DUMMY references manipulations till the end also makes
- * this code a lot much readable and easier.
- */
-static Plan *
-create_remotejoin_plan(PlannerInfo *root, JoinPath *best_path, Plan *parent, Plan *outer_plan, Plan *inner_plan)
-{
- NestLoop *nest_parent;
- ExecNodes *join_exec_nodes;
- RemoteQuery *outer;
- RemoteQuery *inner;
-
- if (!enable_remotejoin)
- return parent;
-
- /* meh, what are these for :( */
- if (root->hasPseudoConstantQuals)
- return parent;
-
- /* do not optimize CURSOR based select statements */
- if (root->parse->rowMarks != NIL)
- return parent;
-
- /*
- * optimize only simple NestLoop joins for now. Other joins like Merge and
- * Hash can be reduced too. But they involve additional intermediate nodes
- * and we need to understand them a bit more as yet
- */
- if (!IsA(parent, NestLoop))
- return parent;
- else
- nest_parent = (NestLoop *)parent;
-
- if (!IsA(outer_plan, RemoteQuery) || !IsA(inner_plan, RemoteQuery))
- return parent;
-
- outer = (RemoteQuery *)outer_plan;
- inner = (RemoteQuery *)inner_plan;
-
- /* check if both the nodes qualify for reduction */
- if (!outer->scan.plan.qual && !inner->scan.plan.qual)
- {
- int i;
- List *rtable_list = NIL;
- List *parent_vars, *out_tlist = NIL, *in_tlist = NIL, *base_tlist;
- Relids out_relids = NULL, in_relids = NULL;
-
- /*
- * Check if both these plans are from the same remote node. If yes,
- * replace this JOIN along with it's two children with one equivalent
- * remote node
- */
-
- /*
- * Build up rtable for XC Walker
- * (was not sure I could trust this, but it seems to work in various cases)
- */
- for (i = 0; i < root->simple_rel_array_size; i++)
- {
- RangeTblEntry *rte = root->simple_rte_array[i];
-
- /* Check for NULL first, sometimes it is NULL at position 0 */
- if (rte)
- rtable_list = lappend(rtable_list, root->simple_rte_array[i]);
- }
- /*
- * Walk the left, right trees and identify which vars appear in the
- * parent targetlist, only those need to be selected. Note that
- * depending on whether the parent targetlist is top-level or
- * intermediate, the children vars may or may not be referenced
- * multiple times in it.
- */
- parent_vars = pull_var_clause((Node *)parent->targetlist,
- PVC_RECURSE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS);
-
- findReferencedVars(parent_vars, outer, &out_tlist, &out_relids);
- findReferencedVars(parent_vars, inner, &in_tlist, &in_relids);
-
- join_exec_nodes = IsJoinReducible(inner, outer, in_relids, out_relids,
- &(nest_parent->join),
- best_path, root->parse->rtable);
- /* XXX Check if the join optimization is possible */
- if (join_exec_nodes)
- {
- RemoteQuery *result;
- Plan *result_plan;
- StringInfoData targets, clauses, scan_clauses, fromlist, join_condition;
- StringInfoData squery;
- ListCell *l;
- char in_alias[15], out_alias[15];
- bool use_where = false;
- Index dummy_rtindex;
- RangeTblEntry *dummy_rte;
- List *local_scan_clauses = NIL, *remote_scan_clauses = NIL;
- char *pname;
- List *colnames;
-
-
- /* KISS! As long as distinct aliases are provided for all the objects in
- * involved in query, remote server should not crib! */
- sprintf(in_alias, "out_%d", root->rs_alias_index);
- sprintf(out_alias, "in_%d", root->rs_alias_index);
-
- /*
- * If the JOIN ON clause has a local dependency then we cannot ship
- * the join to the remote side at all, bail out immediately.
- */
- if (!pgxc_is_expr_shippable((Expr *)nest_parent->join.joinqual, NULL))
- {
- elog(DEBUG1, "cannot reduce: local dependencies in the joinqual");
- return parent;
- }
-
- /*
- * If the normal plan qual has local dependencies, the join can
- * still be shipped. Try harder to ship remote clauses out of the
- * entire list. These local quals will become part of the quals
- * list of the reduced remote scan node down later.
- */
- if (!pgxc_is_expr_shippable((Expr *)nest_parent->join.plan.qual, NULL))
- {
- elog(DEBUG1, "local dependencies in the join plan qual");
-
- /*
- * trawl through each entry and come up with remote and local
- * clauses... sigh
- */
- foreach(l, nest_parent->join.plan.qual)
- {
- Node *clause = lfirst(l);
-
- /*
- * if the currentof in the above call to
- * clause_is_local_bound is set, somewhere in the list there
- * is currentof clause, so keep that information intact and
- * pass a dummy argument here.
- */
- if (!pgxc_is_expr_shippable((Expr *)clause, NULL))
- local_scan_clauses = lappend(local_scan_clauses, clause);
- else
- remote_scan_clauses = lappend(remote_scan_clauses, clause);
- }
- }
- else
- {
- /*
- * there is no local bound clause, all the clauses are remote
- * scan clauses
- */
- remote_scan_clauses = nest_parent->join.plan.qual;
- }
-
- /* generate the tlist for the new RemoteScan node using out_tlist, in_tlist */
- initStringInfo(&targets);
- colnames = create_remote_target_list(root, &targets, out_tlist, in_tlist,
- out_alias, outer->reduce_level, in_alias, inner->reduce_level);
-
- /*
- * generate the fromlist now. The code has to appropriately mention
- * the JOIN type in the string being generated.
- */
- initStringInfo(&fromlist);
- appendStringInfo(&fromlist, " (%s) %s ",
- outer->sql_statement, quote_identifier(out_alias));
-
- use_where = false;
- switch (nest_parent->join.jointype)
- {
- case JOIN_INNER:
- pname = ", ";
- use_where = true;
- break;
- case JOIN_LEFT:
- pname = "LEFT JOIN";
- break;
- case JOIN_FULL:
- pname = "FULL JOIN";
- break;
- case JOIN_RIGHT:
- pname = "RIGHT JOIN";
- break;
- case JOIN_SEMI:
- case JOIN_ANTI:
- default:
- return parent;
- }
-
- /*
- * splendid! we can actually replace this join hierarchy with a
- * single RemoteScan node now. Start off by constructing the
- * appropriate new tlist and tupdescriptor
- */
- result = makeNode(RemoteQuery);
-
- /*
- * Save various information about the inner and the outer plans. We
- * may need this information later if more entries are added to it
- * as part of the remote expression optimization
- */
- result->read_only = true;
- result->inner_alias = pstrdup(in_alias);
- result->outer_alias = pstrdup(out_alias);
- result->inner_reduce_level = inner->reduce_level;
- result->outer_reduce_level = outer->reduce_level;
- result->inner_relids = in_relids;
- result->outer_relids = out_relids;
- result->inner_statement = pstrdup(inner->sql_statement);
- result->outer_statement = pstrdup(outer->sql_statement);
- result->join_condition = NULL;
- result->exec_nodes = join_exec_nodes;
- result->is_temp = inner->is_temp || outer->is_temp;
-
- appendStringInfo(&fromlist, " %s (%s) %s",
- pname, inner->sql_statement, quote_identifier(in_alias));
-
- /* generate join.joinqual remote clause string representation */
- initStringInfo(&clauses);
- if (nest_parent->join.joinqual != NIL)
- {
- create_remote_clause_expr(root, parent, &clauses,
- nest_parent->join.joinqual, result);
- }
-
- /* generate join.plan.qual remote clause string representation */
- initStringInfo(&scan_clauses);
- if (remote_scan_clauses != NIL)
- {
- create_remote_clause_expr(root, parent, &scan_clauses,
- remote_scan_clauses, result);
- }
-
- /*
- * set the base tlist of the involved base relations, useful in
- * set_plan_refs later. Additionally the tupledescs should be
- * generated using this base_tlist and not the parent targetlist.
- * This is because we want to take into account any additional
- * column references from the scan clauses too
- */
- base_tlist = add_to_flat_tlist(NIL, list_concat(out_tlist, in_tlist));
-
- /*
- * Create and append the dummy range table entry to the range table.
- * Note that this modifies the master copy the caller passed us, otherwise
- * e.g EXPLAIN VERBOSE will fail to find the rte the Vars built below refer
- * to.
- */
- dummy_rte = make_dummy_remote_rte("__REMOTE_JOIN_QUERY__",
- makeAlias("__REMOTE_JOIN_QUERY__", colnames));
- root->parse->rtable = lappend(root->parse->rtable, dummy_rte);
- dummy_rtindex = list_length(root->parse->rtable);
-
- result_plan = &result->scan.plan;
-
- /* Set the join targetlist to the new base_tlist */
- result_plan->targetlist = parent->targetlist;
- result_plan->lefttree = NULL;
- result_plan->righttree = NULL;
- result->scan.scanrelid = dummy_rtindex;
-
- /* generate the squery for this node */
-
- /* NOTE: it's assumed that the remote_paramNums array is
- * filled in the same order as we create the query here.
- *
- * TODO: we need some way to ensure that the remote_paramNums
- * is filled in the same order as the order in which the clauses
- * are added in the query below.
- */
- initStringInfo(&squery);
- appendStringInfo(&squery, "SELECT %s FROM %s", targets.data, fromlist.data);
-
- initStringInfo(&join_condition);
- if (clauses.data[0] != '\0')
- appendStringInfo(&join_condition, " %s %s", use_where? " WHERE " : " ON ", clauses.data);
-
- if (scan_clauses.data[0] != '\0')
- appendStringInfo(&join_condition, " %s %s", use_where? " AND " : " WHERE ", scan_clauses.data);
-
- if (join_condition.data[0] != '\0')
- appendStringInfoString(&squery, join_condition.data);
-
- result->sql_statement = squery.data;
- result->join_condition = join_condition.data;
- /* don't forget to increment the index for the next time around! */
- result->reduce_level = root->rs_alias_index++;
-
-
- /* set_plan_refs needs this later */
- result->base_tlist = base_tlist;
-
- /*
- * if there were any local scan clauses stick them up here. They
- * can come from the join node or from remote scan node themselves.
- * Because of the processing being done earlier in
- * create_remotescan_plan, all of the clauses if present will be
- * local ones and hence can be stuck without checking for
- * remoteness again here into result_plan->qual
- */
- result_plan->qual = list_concat(result_plan->qual, outer_plan->qual);
- result_plan->qual = list_concat(result_plan->qual, inner_plan->qual);
- result_plan->qual = list_concat(result_plan->qual, local_scan_clauses);
-
- /* we actually need not worry about costs since this is the final plan */
- result_plan->startup_cost = outer_plan->startup_cost;
- result_plan->total_cost = outer_plan->total_cost;
- result_plan->plan_rows = outer_plan->plan_rows;
- result_plan->plan_width = outer_plan->plan_width;
-
- return (Plan *)result_plan;
- }
- }
-
- return parent;
-}
-
-/*
- * Generate aliases for columns of remote tables using the
- * colname_varno_varattno_reduce_level nomenclature
- */
-static Alias *
-generate_remote_rte_alias(RangeTblEntry *rte, int varno, char *aliasname, int reduce_level)
-{
- int maxattrs;
- int varattno;
- List *colnames = NIL;
- StringInfo attr = makeStringInfo();
- Relation relation;
-
- if (rte->rtekind != RTE_RELATION)
- elog(ERROR, "called in improper context");
-
- relation = heap_open(rte->relid, AccessShareLock);
-
- maxattrs = RelationGetNumberOfAttributes(relation);
-
- for (varattno = 0; varattno < maxattrs; varattno++)
- {
- char *attname = get_rte_attribute_name(rte, varattno + 1);
-
- if (reduce_level == 0)
- {
- /*
- * Even if reduce level is 0, we still need to copy column aliases
- * from rte because we don't want to loose any user-supplied table
- * column aliases, in case any.
- */
- colnames = lappend(colnames, makeString(pstrdup((attname))));
- }
- else
- {
- resetStringInfo(attr);
- appendStringInfo(attr, "%s_%d_%d_%d",
- attname, varno, varattno + 1, reduce_level);
- colnames = lappend(colnames, makeString(pstrdup(attr->data)));
- }
-
- }
-
- heap_close(relation, AccessShareLock);
-
- return makeAlias(aliasname, colnames);
-}
-
-/* create_remote_target_list
- * generate a targetlist using out_alias and in_alias appropriately. It is
- * possible that in case of multiple-hierarchy reduction, both sides can have
- * columns with the same name. E.g. consider the following:
- *
- * select * from emp e join emp f on e.x = f.x, emp g;
- *
- * So if we just use new_alias.columnname it can
- * very easily clash with other columnname from the same side of an already
- * reduced join. To avoid this, we generate unique column aliases using the
- * following convention:
- * colname_varno_varattno_reduce_level_index
- *
- * Each RemoteScan node carries it's reduce_level index to indicate the
- * convention that should be adopted while referring to it's columns. If the
- * level is 0, then normal column names can be used because they will never
- * clash at the join level
- */
-static List *
-create_remote_target_list(PlannerInfo *root, StringInfo targets, List *out_tlist, List *in_tlist,
- char *out_alias, int out_index, char *in_alias, int in_index)
-{
- int i = 0;
- ListCell *l;
- StringInfo attrname = makeStringInfo();
- bool add_null_target = true;
- List *colnames = NIL;
-
- foreach(l, out_tlist)
- {
- Var *var = (Var *) lfirst(l);
- RangeTblEntry *rte = planner_rt_fetch(var->varno, root);
- char *attname;
-
-
- if (i++ > 0)
- appendStringInfo(targets, ", ");
-
- attname = get_rte_attribute_name(rte, var->varattno);
-
- if (out_index)
- {
- resetStringInfo(attrname);
- /* varattno can be negative for sys attributes, hence the abs! */
- appendStringInfo(attrname, "%s_%d_%d_%d",
- attname, var->varno, abs(var->varattno), out_index);
- appendStringInfo(targets, "%s.%s",
- quote_identifier(out_alias), quote_identifier(attrname->data));
- }
- else
- appendStringInfo(targets, "%s.%s",
- quote_identifier(out_alias), quote_identifier(attname));
-
- /* generate the new alias now using root->rs_alias_index */
- resetStringInfo(attrname);
- appendStringInfo(attrname, "%s_%d_%d_%d",
- attname, var->varno, abs(var->varattno), root->rs_alias_index);
- appendStringInfo(targets, " AS %s", quote_identifier(attrname->data));
- colnames = lappend(colnames, makeString(pstrdup(attrname->data)));
- add_null_target = false;
- }
-
- foreach(l, in_tlist)
- {
- Var *var = (Var *) lfirst(l);
- RangeTblEntry *rte = planner_rt_fetch(var->varno, root);
- char *attname;
-
- if (i++ > 0)
- appendStringInfo(targets, ", ");
-
- attname = get_rte_attribute_name(rte, var->varattno);
-
- if (in_index)
- {
- resetStringInfo(attrname);
- /* varattno can be negative for sys attributes, hence the abs! */
- appendStringInfo(attrname, "%s_%d_%d_%d",
- attname, var->varno, abs(var->varattno), in_index);
- appendStringInfo(targets, "%s.%s",
- quote_identifier(in_alias), quote_identifier(attrname->data));
- }
- else
- appendStringInfo(targets, "%s.%s",
- quote_identifier(in_alias), quote_identifier(attname));
-
- /* generate the new alias now using root->rs_alias_index */
- resetStringInfo(attrname);
- appendStringInfo(attrname, "%s_%d_%d_%d",
- attname, var->varno, abs(var->varattno), root->rs_alias_index);
- appendStringInfo(targets, " AS %s", quote_identifier(attrname->data));
- colnames = lappend(colnames, makeString(pstrdup(attrname->data)));
- add_null_target = false;
- }
-
- /*
- * It's possible that in some cases, the targetlist might not refer to any
- * vars from the joined relations, eg.
- * select count(*) from t1, t2; select const from t1, t2; etc
- * For such cases just add a NULL selection into this targetlist
- */
- if (add_null_target)
- appendStringInfo(targets, " NULL ");
- return colnames;
-}
-
-/*
- * create_remote_clause_expr
- * generate a string to represent the clause list expression using out_alias
- * and in_alias references. This function does a cute hack by temporarily
- * modifying the rte->eref entries of the involved relations to point to
- * out_alias and in_alias appropriately. The deparse_expression call then
- * generates a string using these erefs which is exactly what is desired here.
- *
- * Additionally it creates aliases for the column references based on the
- * reduce_level values too. This handles the case when both sides have same
- * named columns..
- *
- * Obviously this function restores the eref, alias values to their former selves
- * appropriately too, after use
- */
-static void
-create_remote_clause_expr(PlannerInfo *root, Plan *parent, StringInfo clauses,
- List *qual, RemoteQuery *scan)
-{
- Node *node = (Node *) make_ands_explicit(qual);
-
- return create_remote_expr(root, parent, clauses, node, scan);
-}
-
-static void
-create_remote_expr(PlannerInfo *root, Plan *parent, StringInfo expr,
- Node *node, RemoteQuery *scan)
-{
- List *context;
- List *leref = NIL;
- ListCell *cell;
- char *exprstr;
- int rtindex;
- Relids tmprelids, relids;
-
- relids = pull_varnos((Node *)node);
-
- tmprelids = bms_copy(relids);
-
- while ((rtindex = bms_first_member(tmprelids)) >= 0)
- {
- RangeTblEntry *rte = planner_rt_fetch(rtindex, root);
-
- /*
- * This rtindex should be a member of either out_relids or
- * in_relids and never both
- */
- if (bms_is_member(rtindex, scan->outer_relids) &&
- bms_is_member(rtindex, scan->inner_relids))
- elog(ERROR, "improper relid references in the join clause list");
-
- /*
- * save the current rte->eref and rte->alias values and stick in a new
- * one in the rte with the proper inner or outer alias
- */
- leref = lappend(leref, rte->eref);
- leref = lappend(leref, rte->alias);
-
- if (bms_is_member(rtindex, scan->outer_relids))
- {
- rte->eref = makeAlias(scan->outer_alias, NIL);
-
- /* attach proper column aliases.. */
- rte->alias = generate_remote_rte_alias(rte, rtindex,
- scan->outer_alias, scan->outer_reduce_level);
- }
- if (bms_is_member(rtindex, scan->inner_relids))
- {
- rte->eref = makeAlias(scan->inner_alias, NIL);
-
- /* attach proper column aliases.. */
- rte->alias = generate_remote_rte_alias(rte, rtindex,
- scan->inner_alias, scan->inner_reduce_level);
- }
- }
- bms_free(tmprelids);
-
- /* Set up deparsing context */
- context = deparse_context_for_plan((Node *) parent,
- NULL,
- root->parse->rtable);
-
- exprstr = deparse_expression(node, context, true, false);
-
- /* revert back the saved eref entries in the same order now! */
- cell = list_head(leref);
- tmprelids = bms_copy(relids);
- while ((rtindex = bms_first_member(tmprelids)) >= 0)
- {
- RangeTblEntry *rte = planner_rt_fetch(rtindex, root);
-
- Assert(cell != NULL);
-
- rte->eref = lfirst(cell);
- cell = lnext(cell);
-
- rte->alias = lfirst(cell);
- cell = lnext(cell);
- }
- bms_free(tmprelids);
-
- appendStringInfo(expr, " %s", exprstr);
- return;
-}
-#endif /* XCP */
-#endif /* PGXC */
-
/*
* create_append_plan
* Create an Append plan for 'best_path' and (recursively) plans
@@ -2968,335 +2272,6 @@ create_worktablescan_plan(PlannerInfo *root, Path *best_path,
return scan_plan;
}
-
-#ifdef PGXC
-#ifndef XCP
-/*
- * mk_row_mark_clause
- * Given a PlanRowMark, create a corresponding RowMarkClause
- */
-static RowMarkClause *
-mk_row_mark_clause(PlanRowMark *prm)
-{
- RowMarkClause *rmc;
-
- if (prm == NULL)
- return NULL;
-
- /* We are intrested in either FOR UPDATE or FOR SHARE */
- if (prm->markType != ROW_MARK_EXCLUSIVE && prm->markType != ROW_MARK_SHARE)
- return NULL;
-
- rmc = makeNode(RowMarkClause);
-
- /* Copy rti as is form the PlanRowMark */
- rmc->rti = prm->rti;
-
- /* Assume FOR SHARE unless compelled FOR UPDATE */
- rmc->forUpdate = false;
- if (prm->markType == ROW_MARK_EXCLUSIVE)
- rmc->forUpdate = true;
-
- /* Copy noWait as is form the PlanRowMark */
- rmc->noWait = prm->noWait;
-
- /* true or false does not matter since we will use the result only while deparsing */
- rmc->pushedDown = false;
-
- return rmc;
-}
-
-/*
- * compare_alias
- * Compare two aliases
- */
-static bool
-compare_alias(Alias *a1, Alias *a2)
-{
- if (a1 == NULL && a2 == NULL)
- return true;
-
- if (a1 == NULL && a2 != NULL)
- return false;
-
- if (a2 == NULL && a1 != NULL)
- return false;
-
- if (strcmp(a1->aliasname, a2->aliasname) == 0)
- return true;
-
- return false;
-}
-
-/*
- * contains_only_vars(tlist)
- * Return true only if each element of tlist is a target entry having Var node
- * as its containing expression.
- */
-static bool
-contains_only_vars(List *tlist)
-{
- ListCell *l;
-
- foreach(l, (List *) tlist)
- {
- Node *tle = lfirst(l);
- if (nodeTag(tle) != T_TargetEntry)
- return false;
- else
- {
- Expr *expr = ((TargetEntry *) tle)->expr;
- if (nodeTag(expr) != T_Var)
- return false;
- }
- }
- return true;
-}
-
-/*
- * create_remotequery_plan
- * Returns a remotequery plan for the base relation scanned by 'best_path'
- * with restriction clauses 'scan_clauses' and targetlist 'tlist'.
- */
-static Plan *
-create_remotequery_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses)
-{
- RemoteQuery *scan_plan;
- Index scan_relid = best_path->parent->relid;
- RangeTblEntry *rte;
- List *remote_scan_clauses = NIL;
- List *local_scan_clauses = NIL;
- StringInfoData sql;
- Query *query;
- RangeTblRef *rtr;
- List *varlist;
- ListCell *varcell;
- Node *tmp_node;
- List *rmlist;
- List *tvarlist;
- bool tlist_is_simple;
- List *base_tlist; /* the target list representing the
- * result obtained from datanode
- */
- RangeTblEntry *dummy_rte; /* RTE for the remote query node being
- * added.
- */
- Index dummy_rtindex;
-
- Assert(scan_relid > 0);
- Assert(best_path->parent->rtekind == RTE_RELATION);
-
- /* Sort clauses into best execution order */
- scan_clauses = order_qual_clauses(root, scan_clauses);
- /* Reduce RestrictInfo list to bare expressions; ignore pseudoconstants */
- scan_clauses = extract_actual_clauses(scan_clauses, false);
-
- if (scan_clauses)
- {
- ListCell *l;
-
- foreach(l, (List *)scan_clauses)
- {
- Node *clause = lfirst(l);
-
- if (pgxc_is_expr_shippable((Expr *)clause, NULL))
- remote_scan_clauses = lappend(remote_scan_clauses, clause);
- else
- local_scan_clauses = lappend(local_scan_clauses, clause);
- }
- }
-
- /*
- * The target list passed in may not contain the Vars required for
- * evaluating the quals. Add those quals in the targetlist
- */
- tlist = add_to_flat_tlist(tlist, copyObject(pull_var_clause((Node *)local_scan_clauses,
- PVC_RECURSE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS)));
- tlist_is_simple = contains_only_vars(tlist);
-
- /*
- * Construct a Query structure for the query to be fired on the Datanodes
- * and deparse it. Fields not set remain memzero'ed as set by makeNode.
- */
- rte = rt_fetch(scan_relid, root->parse->rtable);
- Assert(rte->rtekind == RTE_RELATION);
- /* Make a copy of RTE to be included in the new query structure */
- rte = copyObject(rte);
- /* This RTE should appear in FROM clause of the SQL statement constructed */
- rte->inFromCl = true;
-
- query = makeNode(Query);
- query->commandType = CMD_SELECT;
- query->rtable = list_make1(rte);
- query->jointree = makeNode(FromExpr);
-
- rtr = makeNode(RangeTblRef);
- rtr->rtindex = list_length(query->rtable);
- /* There can be only one table */
- Assert(rtr->rtindex == 1);
-
- query->jointree->fromlist = list_make1(rtr);
- query->jointree->quals = (Node *)make_ands_explicit(copyObject(remote_scan_clauses));
-
- /*
- * RemoteQuery node cannot handle arbitrary expressions in the target list.
- * So if the target list has any elements that are not plain Vars, we need
- * to create a Result node above RemoteQuery, and assign a plain var tlist
- * in RemoteQuery node, and Result node will handle the expressions. So if
- * the passed-in tlist is not a simple vars tlist, derive one out of the
- * tlist.
- */
- if (tlist_is_simple)
- query->targetList = copyObject(tlist);
- else
- {
- tvarlist = copyObject(pull_var_clause((Node *)tlist,
- PVC_RECURSE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS));
- query->targetList = add_to_flat_tlist(NIL, copyObject(tvarlist));
- }
-
- /*
- * We are going to change the Var nodes in the target list to be sent to the
- * datanode. We need the original tlist to establish the mapping of result
- * obtained from the datanode in this plan. It will be saved in
- * RemoteQuery->base_tlist. So, copy the target list before modifying it
- */
- base_tlist = copyObject(query->targetList);
-
- /*
- * Change the varno in Var nodes in the targetlist of the query to be shipped to the
- * Datanode to 1, to match the rtable in the query. Do the same for Var
- * nodes in quals.
- */
- varlist = list_concat(pull_var_clause((Node *)query->targetList,
- PVC_RECURSE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS),
- pull_var_clause((Node *)query->jointree->quals,
- PVC_RECURSE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS));
-
- foreach(varcell, varlist)
- {
- Var *var = lfirst(varcell);
- if (var->varno != scan_relid)
- elog(ERROR, "Single table scan can not handle vars from more than one relation");
- var->varno = rtr->rtindex;
- }
- list_free(varlist);
-
- /*
- * Call fix_scan_expr to fix the PlaceHolderVars. This step is not needed if
- * we construct the query at the time of execution.
- */
- tmp_node = pgxc_fix_scan_expr(root, (Node *)query->targetList, 0);
- Assert(!tmp_node || IsA(tmp_node, List));
- query->targetList = (List *)tmp_node;
- tmp_node = pgxc_fix_scan_expr(root, (Node *)query->jointree->quals, 0);
- query->jointree->quals = tmp_node;
-
- /*
- * Before deparsing the query we need to check whether there are any FOR UPDATE/SHARE clauses
- * in the query that we need to propagate to Datanodes
- */
- rmlist = NULL;
- if (root->xc_rowMarks != NULL)
- {
- ListCell *rmcell;
-
- foreach(rmcell, root->xc_rowMarks)
- {
- PlanRowMark *prm = lfirst(rmcell);
- RangeTblEntry *rte_in_rm;
-
- /*
- * One remote query node contains one table only, check to make sure that
- * this row mark clause is referring to the same table that this remote
- * query node is targeting.
- */
- rte_in_rm = rt_fetch(prm->rti, root->parse->rtable);
- if (rte_in_rm->relid == rte->relid && compare_alias(rte->alias, rte_in_rm->alias))
- {
- RowMarkClause *rmc;
-
- /*
- * Change the range table index in the row mark clause to 1
- * to match the rtable in the query
- */
- prm->rti = 1;
-
- /* Come up with a Row Mark Clause given a Plan Row Mark */
- rmc = mk_row_mark_clause(prm);
-
- if (rmc != NULL)
- {
- /* Add this row mark clause to the list to be added in the query to deparse */
- rmlist = lappend(rmlist, rmc);
-
- /*
- * Although we can have mutiple row mark clauses even for a single table
- * but here we will have only one plan row mark clause per table
- * The reason is that here we are talking about only FOR UPDATE & FOR SHARE
- * If we have both FOR SHARE and FOR UPDATE mentioned for the same table
- * FOR UPDATE takes priority over FOR SHARE and in effect we will have only one clause.
- */
- break;
- }
- }
- }
-
- /* copy the row mark clause list in the query to deparse */
- query->rowMarks = rmlist;
-
- /* If there is a row mark clause, set the flag for deprasing of the row mark clause */
- if (rmlist != NULL)
- query->hasForUpdate = true;
- }
- initStringInfo(&sql);
- deparse_query(query, &sql, NIL);
-
- if (rmlist != NULL)
- list_free_deep(rmlist);
-
- /*
- * Create and append the dummy range table entry to the range table.
- * Note that this modifies the master copy the caller passed us, otherwise
- * e.g EXPLAIN VERBOSE will fail to find the rte the Vars built below refer
- * to.
- */
- dummy_rte = make_dummy_remote_rte(get_rel_name(rte->relid),
- makeAlias("_REMOTE_TABLE_QUERY_", NIL));
- root->parse->rtable = lappend(root->parse->rtable, dummy_rte);
- dummy_rtindex = list_length(root->parse->rtable);
-
- scan_plan = make_remotequery(tlist, local_scan_clauses, dummy_rtindex);
-
- /* Track if the remote query involves a temporary object */
- scan_plan->is_temp = IsTempTable(rte->relid);
- scan_plan->read_only = (query->commandType == CMD_SELECT && !query->hasForUpdate);
- scan_plan->has_row_marks = query->hasForUpdate;
- scan_plan->sql_statement = sql.data;
- scan_plan->base_tlist = base_tlist;
- scan_plan->exec_nodes = GetRelationNodesByQuals(rte->relid, rtr->rtindex,
- query->jointree->quals,
- RELATION_ACCESS_READ);
- if (!scan_plan->exec_nodes)
- elog(ERROR, "No distribution information found for relid %d", rte->relid);
-
- copy_path_costsize(&scan_plan->scan.plan, best_path);
-
- /* PGXCTODO - get better estimates */
- scan_plan->scan.plan.plan_rows = 1000;
-
- scan_plan->has_ins_child_sel_parent = root->parse->is_ins_child_sel_parent;
-
- return (Plan *)scan_plan;
-}
-#endif /* XCP */
-#endif /* PGXC */
-
/*
* create_foreignscan_plan
* Returns a foreignscan plan for the relation scanned by 'best_path'
@@ -4967,29 +3942,6 @@ make_worktablescan(List *qptlist,
return node;
}
-#ifdef PGXC
-#ifndef XCP
-static RemoteQuery *
-make_remotequery(List *qptlist, List *qpqual, Index scanrelid)
-{
- RemoteQuery *node = makeNode(RemoteQuery);
- Plan *plan = &node->scan.plan;
-
- /* cost should be inserted by caller */
- plan->targetlist = qptlist;
- plan->qual = qpqual;
- plan->lefttree = NULL;
- plan->righttree = NULL;
- node->scan.scanrelid = scanrelid;
- node->read_only = true;
- node->has_row_marks = false;
-
- return node;
-}
-#endif /* XCP */
-#endif /* PGXC */
-
-
#ifdef XCP
/*
* make_remotesubplan
@@ -7407,1463 +6359,3 @@ get_internal_cursor(void)
}
#endif
-
-#ifdef PGXC
-#ifndef XCP
-/*
- * findReferencedVars()
- *
- * Constructs a list of those Vars in targetlist which are found in
- * parent_vars (in other words, the intersection of targetlist and
- * parent_vars). Returns a new list in *out_tlist and a bitmap of
- * those relids found in the result.
- *
- * Additionally do look at the qual references to other vars! They
- * also need to be selected..
- */
-static void
-findReferencedVars(List *parent_vars, RemoteQuery *plan, List **out_tlist, Relids *out_relids)
-{
- List *vars;
- Relids relids = NULL;
- List *tlist = NIL;
- ListCell *l;
-
- /* Pull vars from both the targetlist and the clauses attached to this plan */
- vars = pull_var_clause((Node *)plan->base_tlist,
- PVC_RECURSE_AGGREGATES,
- PVC_REJECT_PLACEHOLDERS);
-
- foreach(l, vars)
- {
- Var *var = lfirst(l);
-
- if (search_tlist_for_var(var, parent_vars))
- tlist = lappend(tlist, var);
-
- if (!bms_is_member(var->varno, relids))
- relids = bms_add_member(relids, var->varno);
- }
-
- /* Now consider the local quals */
- vars = pull_var_clause((Node *)plan->scan.plan.qual,
- PVC_RECURSE_AGGREGATES,
- PVC_REJECT_PLACEHOLDERS);
-
- foreach(l, vars)
- {
- Var *var = lfirst(l);
-
- if (search_tlist_for_var(var, tlist) == NULL)
- tlist = lappend(tlist, var);
-
- if (!bms_is_member(var->varno, relids))
- relids = bms_add_member(relids, var->varno);
- }
-
- *out_tlist = tlist;
- *out_relids = relids;
-}
-
-/*
- * create_remoteinsert_plan()
- *
- * For every target relation, add a remote query node to carry out remote
- * operations.
- */
-Plan *
-create_remoteinsert_plan(PlannerInfo *root, Plan *topplan)
-{
- ModifyTable *mt = (ModifyTable *)topplan;
- ListCell *l;
-
- /* We expect to work only on ModifyTable node */
- if (!IsA(topplan, ModifyTable))
- elog(ERROR, "Unexpected node type: %d", topplan->type);
-
- /*
- * For every result relation, build a remote plan to execute remote insert.
- */
- foreach(l, mt->resultRelations)
- {
- Index resultRelationIndex = lfirst_int(l);
- RangeTblEntry *ttab;
- RelationLocInfo *rel_loc_info;
- StringInfo buf, buf2;
- RemoteQuery *fstep;
- Oid nspid;
- char *nspname;
- int natts, att;
- Oid *att_types;
- char *relname;
- bool first_att_printed = false;
-
- ttab = rt_fetch(resultRelationIndex, root->parse->rtable);
-
- /* Bad relation ? */
- if (ttab == NULL || ttab->rtekind != RTE_RELATION)
- continue;
-
- /* Get location info of the target table */
- rel_loc_info = GetRelationLocInfo(ttab->relid);
- if (rel_loc_info == NULL)
- continue;
-
- /* For main string */
- buf = makeStringInfo();
- /* For values */
- buf2 = makeStringInfo();
-
- /* Compose INSERT FROM target_table */
- nspid = get_rel_namespace(ttab->relid);
- nspname = get_namespace_name(nspid);
- relname = get_rel_name(ttab->relid);
-
- /*
- * Do not qualify with namespace for TEMP tables. The schema name may
- * vary on each node
- */
- if (IsTempTable(ttab->relid))
- appendStringInfo(buf, "INSERT INTO %s (",
- quote_identifier(relname));
- else
- appendStringInfo(buf, "INSERT INTO %s.%s (", quote_identifier(nspname),
- quote_identifier(relname));
-
- fstep = make_remotequery(NIL, NIL, resultRelationIndex);
- fstep->is_temp = IsTempTable(ttab->relid);
-
- natts = get_relnatts(ttab->relid);
- att_types = (Oid *) palloc0 (sizeof (Oid) * natts);
-
- /*
- * Populate the column information
- */
- for (att = 1; att <= natts; att++)
- {
- HeapTuple tp;
-
- tp = SearchSysCache(ATTNUM,
- ObjectIdGetDatum(ttab->relid),
- Int16GetDatum(att),
- 0, 0);
- if (HeapTupleIsValid(tp))
- {
- Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp);
-
- /* Bypass dropped attributes in query */
- if (att_tup->attisdropped)
- {
- /* Dropped attributes are casted as int4 in prepared parameters */
- att_types[att - 1] = INT4OID;
- }
- else
- {
- /* Add comma before all except first attributes */
- if (first_att_printed)
- appendStringInfoString(buf, ", ");
-
- /* Build the value part, parameters are filled at run time */
- if (first_att_printed)
- appendStringInfoString(buf2, ", ");
-
- first_att_printed = true;
-
- /* Append column name */
- appendStringInfoString(buf, quote_identifier(NameStr(att_tup->attname)));
-
- /* Append value in string */
- appendStringInfo(buf2, "$%d", att);
-
- /* Assign parameter type */
- att_types[att - 1] = att_tup->atttypid;
- }
-
- ReleaseSysCache(tp);
- }
- else
- elog(ERROR, "cache lookup failed for attribute %d of relation %u",
- att, ttab->relid);
- }
-
- /* Gather the two strings */
- appendStringInfo(buf, ") VALUES (%s)", buf2->data);
-
- fstep->sql_statement = pstrdup(buf->data);
-
- /* Processed rows are counted by the main planner */
- fstep->combine_type = COMBINE_TYPE_NONE;
-
- fstep->read_only = false;
- fstep->exec_nodes = makeNode(ExecNodes);
- fstep->exec_nodes->baselocatortype = rel_loc_info->locatorType;
- fstep->exec_nodes->primarynodelist = NULL;
- fstep->exec_nodes->nodeList = rel_loc_info->nodeList;
- fstep->exec_nodes->en_relid = ttab->relid;
- fstep->exec_nodes->accesstype = RELATION_ACCESS_INSERT;
- fstep->exec_nodes->en_expr = pgxc_set_en_expr(ttab->relid, resultRelationIndex);
-
- SetRemoteStatementName((Plan *) fstep, NULL, natts, att_types, 0);
-
- /* Free everything */
- pfree(buf->data);
- pfree(buf);
- pfree(buf2->data);
- pfree(buf2);
-
- mt->remote_plans = lappend(mt->remote_plans, fstep);
- }
-
- return topplan;
-}
-
-
-/*
- * create_remoteupdate_plan()
- *
- * For every target relation, add a remote query node to carry out remote
- * operations.
- * WHERE and SET clauses are populated with the relation attributes.
- * Target list is used for SET clause and completed with the expressions already given
- * Those are the non-junk expressions in target list of parser tree.
- * WHERE clause is completed by the other expressions in target tree that have been
- * marked as junk during target list rewriting to be able to identify consistently
- * tuples on remote Coordinators. This target list is based on the information obtained
- * from the inner plan that should be generated by create_remotequery_plan.
- */
-Plan *
-create_remoteupdate_plan(PlannerInfo *root, Plan *topplan)
-{
- ModifyTable *mt = (ModifyTable *)topplan;
- ListCell *l;
-
- /* We expect to work only on ModifyTable node */
- if (!IsA(topplan, ModifyTable))
- elog(ERROR, "Unexpected node type: %d", topplan->type);
-
- /*
- * For every result relation, build a remote plan to execute remote update.
- */
- foreach(l, mt->resultRelations)
- {
- Index resultRelationIndex = lfirst_int(l);
- Query *parse = root->parse;
- RangeTblEntry *ttab;
- RelationLocInfo *rel_loc_info;
- StringInfo buf, buf2;
- Oid nspid; /* Relation namespace Oid */
- char *nspname; /* Relation namespace name */
- Oid *param_types; /* Types of query parameters */
- bool is_set_printed = false; /* Control of SET generation */
- bool is_where_printed = false; /* Control of WHERE generation */
- RemoteQuery *fstep; /* Plan step generated */
- ListCell *elt;
- int count = 0, where_count = 1;
- int natts, count_prepparams, tot_prepparams;
- char *relname;
-
- ttab = rt_fetch(resultRelationIndex, parse->rtable);
-
- /* Bad relation ? */
- if (ttab == NULL || ttab->rtekind != RTE_RELATION)
- continue;
-
- relname = get_rel_name(ttab->relid);
-
- /* Get location info of the target table */
- rel_loc_info = GetRelationLocInfo(ttab->relid);
- if (rel_loc_info == NULL)
- continue;
-
- /* Create query buffers */
- buf = makeStringInfo(); /* For SET clause */
- buf2 = makeStringInfo(); /* For WHERE clause */
-
- /* Compose UPDATE target_table */
- natts = get_relnatts(ttab->relid);
- nspid = get_rel_namespace(ttab->relid);
- nspname = get_namespace_name(nspid);
-
- /*
- * Do not qualify with namespace for TEMP tables. The schema name may
- * vary on each node
- */
- if (IsTempTable(ttab->relid))
- appendStringInfo(buf, "UPDATE ONLY %s SET ",
- quote_identifier(relname));
- else
- appendStringInfo(buf, "UPDATE ONLY %s.%s SET ", quote_identifier(nspname),
- quote_identifier(relname));
-
- /*
- * Count the number of junk entries before setting the parameter type list.
- * This helps to know how many parameters part of the WHERE clause need to
- * be sent down by extended query protocol.
- */
- foreach(elt, parse->targetList)
- {
- TargetEntry *tle = lfirst(elt);
- if (tle->resjunk)
- count++;
- }
- count_prepparams = natts + count;
- /* Count entries related to Rowmarks */
- tot_prepparams = count_prepparams + pgxc_count_rowmarks_entries(root->rowMarks);
-
- /* Then allocate the array for this purpose */
- param_types = (Oid *) palloc0(sizeof (Oid) * tot_prepparams);
-
- /*
- * Now build the query based on the target list. SET clause is completed
- * by non-junk entries and WHERE clause by junk entries used to identify
- * uniquely tuples on remote nodes.
- */
- foreach(elt, parse->targetList)
- {
- TargetEntry *tle = lfirst(elt);
-
- if (!tle->resjunk)
- {
- int attno = 0;
- int i;
-
- /* Add target list element to SET clause */
-
- /* Add comma before all except first attributes */
- if (!is_set_printed)
- is_set_printed = true;
- else
- appendStringInfoString(buf, ", ");
-
- /* We need first to find the position of this element in attribute list */
- for (i = 0; i < natts; i++)
- {
- if (strcmp(tle->resname,
- get_relid_attribute_name(ttab->relid, i + 1)) == 0)
- {
- attno = i + 1;
- break;
- }
- }
-
- /* Complete string */
- appendStringInfo(buf, "%s = $%d",
- tle->resname,
- attno);
-
- /* Set parameter type correctly */
- param_types[attno - 1] = exprType((Node *) tle->expr);
- }
- else
- {
- /* Set parameter type */
- param_types[natts + where_count - 1] = exprType((Node *) tle->expr);
- where_count++;
-
- /*
- * ctid and xc_node_id are sufficient to identify
- * remote tuple.
- */
- if (strcmp(tle->resname, "xc_node_id") != 0 &&
- strcmp(tle->resname, "ctid") != 0)
- continue;
-
- /* Set the clause if necessary */
- if (!is_where_printed)
- {
- is_where_printed = true;
- appendStringInfoString(buf2, " WHERE ");
- }
- else
- appendStringInfoString(buf2, "AND ");
-
- /* Complete string */
- appendStringInfo(buf2, "%s = $%d ",
- tle->resname,
- natts + where_count - 1);
- }
- }
-
- /*
- * Before finalizing query be sure that there are no missing entries for attributes.
- * If there are complete the last holes. Those ones are mandatory to insure that
- * update is executed consistently.
- */
- for (count = 1; count <= natts; count++)
- {
- if (param_types[count - 1] == 0)
- {
- HeapTuple tp;
-
- tp = SearchSysCache(ATTNUM,
- ObjectIdGetDatum(ttab->relid),
- Int16GetDatum(count),
- 0, 0);
-
- if (HeapTupleIsValid(tp))
- {
- Form_pg_attribute att_saved = (Form_pg_attribute) GETSTRUCT(tp);
-
- /*
- * Set parameter type of attribute
- * Dropped columns are casted as int4
- */
- if (att_saved->attisdropped)
- param_types[count - 1] = INT4OID;
- else
- param_types[count - 1] = att_saved->atttypid;
- ReleaseSysCache(tp);
- }
- else
- elog(ERROR, "cache lookup failed for attribute %d of relation %u",
- count, ttab->relid);
- }
- }
-
- /*
- * The query needs to be completed by nullifying the non-parent entries
- * defined in RowMarks. This is essential for UPDATE queries running with child
- * entries as we need to bypass them correctly at executor level.
- */
- param_types = pgxc_build_rowmark_entries(root->rowMarks, parse->rtable, param_types,
- count_prepparams, tot_prepparams);
-
- /* Finish building the query by gathering SET and WHERE clauses */
- appendStringInfo(buf, "%s", buf2->data);
-
- /* Finally build the final UPDATE step */
- fstep = make_remotequery(parse->targetList, NIL, resultRelationIndex);
- fstep->is_temp = IsTempTable(ttab->relid);
- fstep->sql_statement = pstrdup(buf->data);
- fstep->combine_type = COMBINE_TYPE_NONE;
-
- fstep->read_only = false;
- /*
- * Get the nodes to execute the query on. We will execute this query on
- * all nodes. The WHERE condition will take care of updating the columns
- * accordingly.
- */
- fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, true, UNKNOWNOID, RELATION_ACCESS_UPDATE);
- fstep->exec_nodes->baselocatortype = rel_loc_info->locatorType;
- fstep->exec_nodes->en_relid = ttab->relid;
- fstep->exec_nodes->nodeList = rel_loc_info->nodeList;
- fstep->exec_nodes->accesstype = RELATION_ACCESS_UPDATE;
- fstep->exec_nodes->en_expr = pgxc_set_en_expr(ttab->relid, resultRelationIndex);
- SetRemoteStatementName((Plan *) fstep, NULL, tot_prepparams, param_types, 0);
- pfree(buf->data);
- pfree(buf2->data);
- pfree(buf);
- pfree(buf2);
-
- mt->remote_plans = lappend(mt->remote_plans, fstep);
- }
-
- return topplan;
-}
-
-/*
- * create_remotedelete_plan()
- *
- * For every target relation, add a remote query node to carry out remote
- * operations. The tuple to be deleted is selected depending on the target
- * list of given plan, generating parametrized WHERE clause in consequence.
- */
-Plan *
-create_remotedelete_plan(PlannerInfo *root, Plan *topplan)
-{
- ModifyTable *mt = (ModifyTable *)topplan;
- ListCell *l;
-
- /* We expect to work only on ModifyTable node */
- if (!IsA(topplan, ModifyTable))
- elog(ERROR, "Unexpected node type: %d", topplan->type);
-
- /*
- * For every result relation, build a remote plan to execute remote delete.
- */
- foreach(l, mt->resultRelations)
- {
- Index resultRelationIndex = lfirst_int(l);
- Query *parse = root->parse;
- RangeTblEntry *ttab;
- RelationLocInfo *rel_loc_info;
- StringInfo buf;
- Oid nspid; /* Relation namespace Oid */
- char *nspname; /* Relation namespace name */
- int count_prepparams, tot_prepparams; /* Attribute used is CTID */
- Oid *param_types; /* Types of query parameters */
- RemoteQuery *fstep; /* Plan step generated */
- bool is_where_created = false;
- ListCell *elt;
- int count = 1;
- char *relname;
-
- ttab = rt_fetch(resultRelationIndex, parse->rtable);
-
- /* Bad relation ? */
- if (ttab == NULL || ttab->rtekind != RTE_RELATION)
- continue;
-
- /* Get location info of the target table */
- rel_loc_info = GetRelationLocInfo(ttab->relid);
- if (rel_loc_info == NULL)
- continue;
-
- /* Create query buffers */
- buf = makeStringInfo();
-
- /* Compose DELETE target_table */
- nspid = get_rel_namespace(ttab->relid);
- nspname = get_namespace_name(nspid);
- relname = get_rel_name(ttab->relid);
-
- /* Parameters are defined by target list */
- count_prepparams = list_length(parse->targetList);
-
- /* Count entries related to Rowmarks only if there are child relations here */
- if (list_length(mt->resultRelations) != 1)
- tot_prepparams = count_prepparams + pgxc_count_rowmarks_entries(root->rowMarks);
- else
- tot_prepparams = count_prepparams;
-
- param_types = (Oid *) palloc0(sizeof(Oid) * tot_prepparams);
-
- /*
- * Do not qualify with namespace for TEMP tables. The schema name may
- * vary on each node.
- */
- if (IsTempTable(ttab->relid))
- appendStringInfo(buf, "DELETE FROM ONLY %s ",
- quote_identifier(relname));
- else
- appendStringInfo(buf, "DELETE FROM ONLY %s.%s ", quote_identifier(nspname),
- quote_identifier(relname));
-
- /* Generate WHERE clause for each target list item */
- foreach(elt, parse->targetList)
- {
- TargetEntry *tle = lfirst(elt);
-
- /* Set up the parameter type */
- param_types[count - 1] = exprType((Node *) tle->expr);
- count++;
-
- /*
- * In WHERE clause, ctid and xc_node_id are
- * sufficient to fetch a tuple from remote node.
- */
- if (strcmp(tle->resname, "xc_node_id") != 0 &&
- strcmp(tle->resname, "ctid") != 0)
- continue;
-
- /* Set the clause if necessary */
- if (!is_where_created)
- {
- is_where_created = true;
- appendStringInfoString(buf, "WHERE ");
- }
- else
- appendStringInfoString(buf, "AND ");
-
- appendStringInfo(buf, "%s = $%d ",
- quote_identifier(tle->resname),
- count - 1);
- }
-
- /*
- * The query needs to be completed by nullifying the non-parent entries
- * defined in RowMarks. This is essential for UPDATE queries running with child
- * entries as we need to bypass them correctly at executor level.
- */
- param_types = pgxc_build_rowmark_entries(root->rowMarks, parse->rtable, param_types,
- count_prepparams, tot_prepparams);
-
- /* Finish by building the plan step */
- fstep = make_remotequery(parse->targetList, NIL, resultRelationIndex);
- fstep->is_temp = IsTempTable(ttab->relid);
- fstep->sql_statement = pstrdup(buf->data);
- fstep->combine_type = COMBINE_TYPE_NONE;
-
- fstep->read_only = false;
- /*
- * Get the nodes to execute the query on. We will execute this query on
- * all nodes. The WHERE condition will take care of updating the columns
- * accordingly.
- */
- fstep->exec_nodes = GetRelationNodes(rel_loc_info, 0, true, UNKNOWNOID,
- RELATION_ACCESS_UPDATE);
- fstep->exec_nodes->baselocatortype = rel_loc_info->locatorType;
- fstep->exec_nodes->en_relid = ttab->relid;
- fstep->exec_nodes->nodeList = rel_loc_info->nodeList;
- fstep->exec_nodes->accesstype = RELATION_ACCESS_UPDATE;
- SetRemoteStatementName((Plan *) fstep, NULL, tot_prepparams, param_types, 0);
- pfree(buf->data);
- pfree(buf);
-
- mt->remote_plans = lappend(mt->remote_plans, fstep);
- }
-
- return topplan;
-}
-
-
-/*
- * create_remotegrouping_plan
- * Check if the grouping and aggregates can be pushed down to the
- * Datanodes.
- * Right now we can push with following restrictions
- * 1. there are plain aggregates (no expressions involving aggregates) and/or
- * expressions in group by clauses
- * 2. No distinct or order by clauses
- * 3. No windowing clause
- * 4. No having clause
- *
- * Inputs
- * root - planerInfo root for this query
- * agg_plan - local grouping plan produced by grouping_planner()
- *
- * PGXCTODO: work on reducing these restrictions as much or document the reasons
- * why we need the restrictions, in these comments themselves. In case of
- * replicated tables, we should be able to push the whole query to the data
- * node in case there are no local clauses.
- */
-Plan *
-create_remotegrouping_plan(PlannerInfo *root, Plan *local_plan)
-{
- Query *query = root->parse;
- Sort *sort_plan;
- RemoteQuery *remote_scan; /* remote query in the passed in plan */
- RemoteQuery *remote_group; /* remote query after optimization */
- Plan *remote_group_plan; /* plan portion of remote_group */
- Plan *temp_plan;
- List *temp_vars; /* temporarily hold the VARs */
- List *temp_vartlist; /* temporarity hold tlist of VARs */
- ListCell *temp;
- StringInfo remote_targetlist;/* SELECT clause of remote query */
- StringInfo remote_sql_stmt;
- StringInfo groupby_clause; /* remote query GROUP BY */
- StringInfo orderby_clause; /* remote query ORDER BY */
- StringInfo remote_fromlist; /* remote query FROM */
- StringInfo in_alias;
- StringInfo having_clause; /* remote query HAVING clause */
- Relids in_relids; /* the list of Relids referenced by lefttree */
- Index dummy_rtindex;
- List *base_tlist;
- RangeTblEntry *dummy_rte;
- int numGroupCols;
- AttrNumber *grpColIdx;
- bool reduce_plan;
- List *remote_qual;
- List *local_qual;
-
- /* Remote grouping is not enabled, don't do anything */
- if (!enable_remotegroup)
- return local_plan;
- /*
- * We don't push aggregation and grouping to Datanodes, in case there are
- * windowing aggregates, distinct, having clause or sort clauses.
- */
- if (query->hasWindowFuncs ||
- query->distinctClause ||
- query->sortClause)
- return local_plan;
-
- /* for now only Agg/Group plans */
- if (local_plan && IsA(local_plan, Agg))
- {
- numGroupCols = ((Agg *)local_plan)->numCols;
- grpColIdx = ((Agg *)local_plan)->grpColIdx;
- }
- else if (local_plan && IsA(local_plan, Group))
- {
- numGroupCols = ((Group *)local_plan)->numCols;
- grpColIdx = ((Group *)local_plan)->grpColIdx;
- }
- else
- return local_plan;
-
- /*
- * We expect plan tree as Group/Agg->Sort->Result->Material->RemoteQuery,
- * Result, Material nodes are optional. Sort is compulsory for Group but not
- * for Agg.
- * anything else is not handled right now.
- */
- temp_plan = local_plan->lefttree;
- remote_scan = NULL;
- sort_plan = NULL;
- if (temp_plan && IsA(temp_plan, Sort))
- {
- sort_plan = (Sort *)temp_plan;
- temp_plan = temp_plan->lefttree;
- }
- if (temp_plan && IsA(temp_plan, Result))
- temp_plan = temp_plan->lefttree;
- if (temp_plan && IsA(temp_plan, Material))
- temp_plan = temp_plan->lefttree;
- if (temp_plan && IsA(temp_plan, RemoteQuery))
- remote_scan = (RemoteQuery *)temp_plan;
-
- if (!remote_scan)
- return local_plan;
- /*
- * for Group plan we expect Sort under the Group, which is always the case,
- * the condition below is really for some possibly non-existent case.
- */
- if (IsA(local_plan, Group) && !sort_plan)
- return local_plan;
- /*
- * If the remote_scan has any quals on it, those need to be executed before
- * doing anything. Hence we won't be able to push any aggregates or grouping
- * to the Datanode.
- * If it has any SimpleSort in it, then sorting is intended to be applied
- * before doing anything. Hence can not push any aggregates or grouping to
- * the Datanode.
- */
- if (remote_scan->scan.plan.qual || remote_scan->sort)
- return local_plan;
-
- /*
- * Grouping_planner may add Sort node to sort the rows
- * based on the columns in GROUP BY clause. Hence the columns in Sort and
- * those in Group node in should be same. The columns are usually in the
- * same order in both nodes, hence check the equality in order. If this
- * condition fails, we can not handle this plan for now.
- */
- if (sort_plan)
- {
- int cntCols;
- if (sort_plan->numCols != numGroupCols)
- return local_plan;
- for (cntCols = 0; cntCols < numGroupCols; cntCols++)
- {
- if (sort_plan->sortColIdx[cntCols] != grpColIdx[cntCols])
- return local_plan;
- }
- }
-
- /*
- * At last we find the plan underneath is reducible into a single
- * RemoteQuery node.
- */
-
- /* find all the relations referenced by targetlist of Grouping node */
- temp_vars = pull_var_clause((Node *)local_plan->targetlist,
- PVC_RECURSE_AGGREGATES,
- PVC_REJECT_PLACEHOLDERS);
- findReferencedVars(temp_vars, remote_scan, &temp_vartlist, &in_relids);
-
- /*
- * process the targetlist of the grouping plan, also construct the
- * targetlist of the query to be shipped to the remote side
- */
- base_tlist = pgxc_process_grouping_targetlist(root, &(local_plan->targetlist));
- /*
- * If can not construct a targetlist shippable to the Datanode. Resort to
- * the plan created by grouping_planner()
- */
- if (!base_tlist)
- return local_plan;
-
- base_tlist = pgxc_process_having_clause(root, base_tlist, query->havingQual,
- &local_qual, &remote_qual, &reduce_plan);
- /*
- * Because of HAVING clause, we can not push the aggregates and GROUP BY
- * clause to the Datanode. Resort to the plan created by grouping planner.
- */
- if (!reduce_plan)
- return local_plan;
- Assert(base_tlist);
-
- /*
- * We are now ready to create the RemoteQuery node to push the query to
- * Datanode.
- * 1. Create a remote query node reflecting the query to be pushed to the
- * Datanode.
- * 2. Modify the Grouping node passed in, to accept the results sent by the
- * Datanodes, then group and aggregate them, if needed.
- */
- remote_targetlist = makeStringInfo();
- remote_sql_stmt = makeStringInfo();
- groupby_clause = makeStringInfo();
- orderby_clause = makeStringInfo();
- remote_fromlist = makeStringInfo();
- in_alias = makeStringInfo();
- having_clause = makeStringInfo();
-
- appendStringInfo(in_alias, "%s_%d", "group", root->rs_alias_index);
-
- /*
- * Build partial RemoteQuery node to be used for creating the Select clause
- * to be sent to the remote node. Rest of the node will be built later
- */
- remote_group = makeNode(RemoteQuery);
-
- /*
- * Save information about the plan we are reducing.
- * We may need this information later if more entries are added to it
- * as part of the remote expression optimization.
- */
- remote_group->inner_alias = pstrdup(in_alias->data);
- remote_group->inner_reduce_level = remote_scan->reduce_level;
- remote_group->inner_relids = in_relids;
- remote_group->inner_statement = pstrdup(remote_scan->sql_statement);
- remote_group->exec_nodes = remote_scan->exec_nodes;
- /* Don't forget to increment the index for the next time around! */
- remote_group->reduce_level = root->rs_alias_index++;
- /* Remember if the remote query is accessing a temporary object */
- remote_group->is_temp = remote_scan->is_temp;
-
- /* Generate the select clause of the remote query */
- appendStringInfoString(remote_targetlist, "SELECT");
- foreach (temp, base_tlist)
- {
- TargetEntry *tle = lfirst(temp);
- Node *expr = (Node *)tle->expr;
-
- create_remote_expr(root, local_plan, remote_targetlist, expr, remote_group);
-
- /* If this is not last target entry, add a comma */
- if (lnext(temp))
- appendStringInfoString(remote_targetlist, ",");
- }
-
- /* Generate the from clause of the remote query */
- appendStringInfo(remote_fromlist, " FROM (%s) %s",
- remote_group->inner_statement, remote_group->inner_alias);
-
- /*
- * Generate group by clause for the remote query and recompute the group by
- * column locations. We want the tuples from remote node to be ordered by
- * the grouping columns so that ExecGroup can work without any modification,
- * hence create a SimpleSort structure to be added to RemoteQuery (which
- * will merge the sorted results and present to Group node in sorted
- * manner).
- */
- if (query->groupClause)
- {
- int cntCols;
- char *sep;
-
- /*
- * recompute the column ids of the grouping columns,
- * the group column indexes computed earlier point in the
- * targetlists of the scan plans under this node. But now the grouping
- * column indexes will be pointing in the targetlist of the new
- * RemoteQuery, hence those need to be recomputed
- */
- pgxc_locate_grouping_columns(root, base_tlist, grpColIdx);
-
- appendStringInfoString(groupby_clause, "GROUP BY ");
- sep = "";
- for (cntCols = 0; cntCols < numGroupCols; cntCols++)
- {
- appendStringInfo(groupby_clause, "%s%d", sep, grpColIdx[cntCols]);
- sep = ", ";
- }
- if (sort_plan)
- {
- SimpleSort *remote_sort = makeNode(SimpleSort);
- /*
- * reuse the arrays allocated in sort_plan to create SimpleSort
- * structure. sort_plan is useless henceforth.
- */
- remote_sort->numCols = sort_plan->numCols;
- remote_sort->sortColIdx = sort_plan->sortColIdx;
- remote_sort->sortOperators = sort_plan->sortOperators;
- remote_sort->sortCollations = sort_plan->collations;
- remote_sort->nullsFirst = sort_plan->nullsFirst;
- appendStringInfoString(orderby_clause, "ORDER BY ");
- sep = "";
- for (cntCols = 0; cntCols < remote_sort->numCols; cntCols++)
- {
- remote_sort->sortColIdx[cntCols] = grpColIdx[cntCols];
- appendStringInfo(orderby_clause, "%s%d", sep,
- remote_sort->sortColIdx[cntCols]);
- sep = ", ";
- }
- remote_group->sort = remote_sort;
- }
- }
-
- if (remote_qual)
- {
- appendStringInfoString(having_clause, "HAVING ");
- create_remote_clause_expr(root, local_plan, having_clause, remote_qual,
- remote_group);
- }
-
- /* Generate the remote sql statement from the pieces */
- appendStringInfo(remote_sql_stmt, "%s %s %s %s %s", remote_targetlist->data,
- remote_fromlist->data, groupby_clause->data,
- orderby_clause->data, having_clause->data);
- /*
- * Create a dummy RTE for the remote query being created. Append the dummy
- * range table entry to the range table. Note that this modifies the master
- * copy the caller passed us, otherwise e.g EXPLAIN VERBOSE will fail to
- * find the rte the Vars built below refer to. Also create the tuple
- * descriptor for the result of this query from the base_tlist (targetlist
- * we used to generate the remote node query).
- */
- dummy_rte = make_dummy_remote_rte("__REMOTE_GROUP_QUERY__",
- makeAlias("__REMOTE_GROUP_QUERY__", NIL));
- /* Rest will be zeroed out in makeNode() */
- root->parse->rtable = lappend(root->parse->rtable, dummy_rte);
- dummy_rtindex = list_length(root->parse->rtable);
-
- /* Build rest of the RemoteQuery node and the plan there */
- remote_group_plan = &remote_group->scan.plan;
-
- /* The join targetlist becomes this node's tlist */
- remote_group_plan->targetlist = base_tlist;
- remote_group_plan->lefttree = NULL;
- remote_group_plan->righttree = NULL;
- remote_group->scan.scanrelid = dummy_rtindex;
- remote_group->sql_statement = remote_sql_stmt->data;
-
- /* set_plan_refs needs this later */
- remote_group->read_only = (query->commandType == CMD_SELECT && !query->hasForUpdate);
- remote_group->has_row_marks = query->hasForUpdate;
- remote_group->base_tlist = base_tlist;
-
- /* we actually need not worry about costs since this is the final plan */
- remote_group_plan->startup_cost = remote_scan->scan.plan.startup_cost;
- remote_group_plan->total_cost = remote_scan->scan.plan.total_cost;
- remote_group_plan->plan_rows = remote_scan->scan.plan.plan_rows;
- remote_group_plan->plan_width = remote_scan->scan.plan.plan_width;
-
- /*
- * Modify the passed in grouping plan according to the remote query we built
- * Materialization is always needed for RemoteQuery in case we need to restart
- * the scan.
- */
- local_plan->lefttree = remote_group_plan;
- local_plan->qual = local_qual;
- /* indicate that we should apply collection function directly */
- if (IsA(local_plan, Agg))
- ((Agg *)local_plan)->skip_trans = true;
-
- return local_plan;
-}
-
-/*
- * pgxc_locate_grouping_columns
- * Locates the grouping clauses in the given target list. This is very similar
- * to locate_grouping_columns except that there is only one target list to
- * search into.
- * PGXCTODO: Can we reuse locate_grouping_columns() instead of writing this
- * function? But this function is optimized to search in the same target list.
- */
-static void
-pgxc_locate_grouping_columns(PlannerInfo *root, List *tlist,
- AttrNumber *groupColIdx)
-{
- int keyno = 0;
- ListCell *gl;
-
- /*
- * No work unless grouping.
- */
- if (!root->parse->groupClause)
- {
- Assert(groupColIdx == NULL);
- return;
- }
- Assert(groupColIdx != NULL);
-
- foreach(gl, root->parse->groupClause)
- {
- SortGroupClause *grpcl = (SortGroupClause *) lfirst(gl);
- TargetEntry *te = get_sortgroupclause_tle(grpcl, tlist);
- if (!te)
- elog(ERROR, "failed to locate grouping columns");
- groupColIdx[keyno++] = te->resno;
- }
-}
-
-/*
- * pgxc_add_node_to_grouping_tlist
- * Add the given node to the target list to be sent to the Datanode. If it's
- * Aggref node, also change the passed in node to point to the Aggref node in
- * the Datanode's target list
- */
-static List *
-pgxc_add_node_to_grouping_tlist(List *remote_tlist, Node *expr, Index ressortgroupref)
-{
- TargetEntry *remote_tle;
- Oid saved_aggtype = InvalidOid;
-
- /*
- * When we add an aggregate to the remote targetlist the aggtype of such
- * Aggref node is changed to aggtrantype. Hence while searching a given
- * Aggref in remote targetlist, we need to change the aggtype accordingly
- * and then switch it back.
- */
- if (IsA(expr, Aggref))
- {
- Aggref *aggref = (Aggref *)expr;
- saved_aggtype = aggref->aggtype;
- aggref->aggtype = aggref->aggtrantype;
- }
- remote_tle = tlist_member(expr, remote_tlist);
- if (IsA(expr, Aggref))
- ((Aggref *)expr)->aggtype = saved_aggtype;
-
- if (!remote_tle)
- {
- remote_tle = makeTargetEntry(copyObject(expr),
- list_length(remote_tlist) + 1,
- NULL,
- false);
- /* Copy GROUP BY/SORT BY reference for the locating group by columns */
- remote_tle->ressortgroupref = ressortgroupref;
- remote_tlist = lappend(remote_tlist, remote_tle);
- }
- else
- {
- if (remote_tle->ressortgroupref == 0)
- remote_tle->ressortgroupref = ressortgroupref;
- else if (ressortgroupref == 0)
- {
- /* do nothing remote_tle->ressortgroupref has the right value */
- }
- else
- {
- /*
- * if the expression's TLE already has a Sorting/Grouping reference,
- * and caller has passed a non-zero one as well, better both of them
- * be same
- */
- Assert(remote_tle->ressortgroupref == ressortgroupref);
- }
- }
-
- /*
- * Replace the args of the local Aggref with Aggref node to be
- * included in RemoteQuery node, so that set_plan_refs can convert
- * the args into VAR pointing to the appropriate result in the tuple
- * coming from RemoteQuery node
- * PGXCTODO: should we push this change in targetlists of plans
- * above?
- */
- if (IsA(expr, Aggref))
- {
- Aggref *local_aggref = (Aggref *)expr;
- Aggref *remote_aggref = (Aggref *)remote_tle->expr;
- Assert(IsA(remote_tle->expr, Aggref));
- remote_aggref->aggtype = remote_aggref->aggtrantype;
- /* Is copyObject() needed here? probably yes */
- local_aggref->args = list_make1(makeTargetEntry(copyObject(remote_tle->expr),
- 1, NULL,
- false));
- }
- return remote_tlist;
-}
-/*
- * pgxc_process_grouping_targetlist
- * The function scans the targetlist to check if the we can push anything
- * from the targetlist to the Datanode. Following rules govern the choice
- * 1. Either all of the aggregates are pushed to the Datanode or none is pushed
- * 2. If there are no aggregates, the targetlist is good to be shipped as is
- * 3. If aggregates are involved in expressions, we push the aggregates to the
- * Datanodes but not the involving expressions.
- *
- * The function constructs the targetlist for the query to be pushed to the
- * Datanode. It modifies the local targetlist to point to the expressions in
- * remote targetlist wherever necessary (e.g. aggregates)
- *
- * PGXCTODO: we should be careful while pushing the function expressions, it's
- * better to push functions like strlen() which can be evaluated at the
- * Datanode, but we should avoid pushing functions which can only be evaluated
- * at Coordinator.
- */
-static List *
-pgxc_process_grouping_targetlist(PlannerInfo *root, List **local_tlist)
-{
- bool shippable_remote_tlist = true;
- List *remote_tlist = NIL;
- List *orig_local_tlist = NIL;/* Copy original local_tlist, in case it changes */
- ListCell *temp;
-
- /*
- * Walk through the target list and find out whether we can push the
- * aggregates and grouping to Datanodes. Also while doing so, create the
- * targetlist for the query to be shipped to the Datanode. Adjust the local
- * targetlist accordingly.
- */
- foreach(temp, *local_tlist)
- {
- TargetEntry *local_tle = lfirst(temp);
- Node *expr = (Node *)local_tle->expr;
- bool has_aggs;
-
- /*
- * If the expression is not Aggref but involves aggregates (has Aggref
- * nodes in the expression tree, we can not push the entire expression
- * to the Datanode, but push those aggregates to the Datanode, if those
- * aggregates can be evaluated at the Datanodes (if is_foreign_expr
- * returns true for entire expression). To evaluate the rest of the
- * expression, we need to fetch the values of VARs participating in the
- * expression. But, if we include the VARs under the aggregate nodes,
- * they may not be part of GROUP BY clause, thus generating an invalid
- * query. Hence, is_foreign_expr() wouldn't collect VARs under the
- * expression tree rooted under Aggref node.
- * For example, the original query is
- * SELECT sum(val) * val2 FROM tab1 GROUP BY val2;
- * the query pushed to the Datanode is
- * SELECT sum(val), val2 FROM tab1 GROUP BY val2;
- * Notice that, if we include val in the query, it will become invalid.
- */
- if (!pgxc_is_expr_shippable((Expr *)expr, &has_aggs))
- {
- shippable_remote_tlist = false;
- break;
- }
-
- /*
- * We are about to change the local_tlist, check if we have already
- * copied original local_tlist, if not take a copy
- */
- if (!orig_local_tlist && has_aggs)
- orig_local_tlist = copyObject(*local_tlist);
-
- /*
- * If there are aggregates involved in the expression, whole expression
- * can not be pushed to the Datanode. Pick up the aggregates and the
- * VAR nodes not covered by aggregates.
- */
- if (has_aggs)
- {
- ListCell *lcell;
- List *aggs_n_vars;
- /*
- * This expression is not going to be pushed as whole, thus other
- * clauses won't be able to find out this TLE in the results
- * obtained from Datanode. Hence can't optimize this query.
- * PGXCTODO: with projection support in RemoteQuery node, this
- * condition can be worked around, please check.
- */
- if (local_tle->ressortgroupref > 0)
- {
- shippable_remote_tlist = false;
- break;
- }
-
- aggs_n_vars = pull_var_clause(expr, PVC_INCLUDE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS);
- /* copy the aggregates into the remote target list */
- foreach (lcell, aggs_n_vars)
- {
- Assert(IsA(lfirst(lcell), Aggref) || IsA(lfirst(lcell), Var));
- remote_tlist = pgxc_add_node_to_grouping_tlist(remote_tlist, lfirst(lcell),
- 0);
- }
- }
- /* Expression doesn't contain any aggregate */
- else
- remote_tlist = pgxc_add_node_to_grouping_tlist(remote_tlist, expr,
- local_tle->ressortgroupref);
- }
-
- if (!shippable_remote_tlist)
- {
- /*
- * If local_tlist has changed but we didn't find anything shippable to
- * Datanode, we need to restore the local_tlist to original state,
- */
- if (orig_local_tlist)
- *local_tlist = orig_local_tlist;
- if (remote_tlist)
- list_free_deep(remote_tlist);
- remote_tlist = NIL;
- }
- else if (orig_local_tlist)
- {
- /*
- * If we have changed the targetlist passed, we need to pass back the
- * changed targetlist. Free the copy that has been created.
- */
- list_free_deep(orig_local_tlist);
- }
-
- return remote_tlist;
-}
-
-/*
- * pgxc_process_having_clause
- * For every expression in the havingQual take following action
- * 1. If it has aggregates, which can be evaluated at the Datanodes, add those
- * aggregates to the targetlist and modify the local aggregate expressions to
- * point to the aggregate expressions being pushed to the Datanode. Add this
- * expression to the local qual to be evaluated locally.
- * 2. If the expression does not have aggregates and the whole expression can be
- * evaluated at the Datanode, add the expression to the remote qual to be
- * evaluated at the Datanode.
- * 3. If qual contains an expression which can not be evaluated at the data
- * node, the parent group plan can not be reduced to a remote_query.
- */
-static List *
-pgxc_process_having_clause(PlannerInfo *root, List *remote_tlist, Node *havingQual,
- List **local_qual, List **remote_qual,
- bool *reduce_plan)
-{
- List *qual;
- ListCell *temp;
-
- *reduce_plan = true;
- *remote_qual = NIL;
- *local_qual = NIL;
-
- if (!havingQual)
- return remote_tlist;
- /*
- * PGXCTODO: we expect the quals in the form of List only. Is there a
- * possibility that the quals will be another form?
- */
- if (!IsA(havingQual, List))
- {
- *reduce_plan = false;
- return remote_tlist;
- }
- /*
- * Copy the havingQual so that the copy can be modified later. In case we
- * back out in between, the original expression remains intact.
- */
- qual = copyObject(havingQual);
- foreach(temp, qual)
- {
- Node *expr = lfirst(temp);
- bool has_aggs;
- List *vars_n_aggs;
-
- if (!pgxc_is_expr_shippable((Expr *)expr, &has_aggs))
- {
- *reduce_plan = false;
- break;
- }
-
- if (has_aggs)
- {
- ListCell *lcell;
-
- /* Pull the aggregates and var nodes from the quals */
- vars_n_aggs = pull_var_clause(expr, PVC_INCLUDE_AGGREGATES,
- PVC_RECURSE_PLACEHOLDERS);
- /* copy the aggregates into the remote target list */
- foreach (lcell, vars_n_aggs)
- {
- Assert(IsA(lfirst(lcell), Aggref) || IsA(lfirst(lcell), Var));
- remote_tlist = pgxc_add_node_to_grouping_tlist(remote_tlist, lfirst(lcell),
- 0);
- }
- *local_qual = lappend(*local_qual, expr);
- }
- else
- *remote_qual = lappend(*remote_qual, expr);
- }
-
- if (!(*reduce_plan))
- list_free_deep(qual);
-
- return remote_tlist;
-}
-
-/*
- * pgxc_set_en_expr
- * Try to find the expression of distribution column to calculate node at plan execution
- */
-static Expr *
-pgxc_set_en_expr(Oid tableoid, Index resultRelationIndex)
-{
- HeapTuple tp;
- Form_pg_attribute partAttrTup;
- Var *var;
- RelationLocInfo *rel_loc_info;
-
- /* Get location info of the target table */
- rel_loc_info = GetRelationLocInfo(tableoid);
- if (rel_loc_info == NULL)
- return NULL;
-
- /*
- * For hash/modulo distributed tables, the target node must be selected
- * at the execution time based on the partition column value.
- *
- * For round robin distributed tables, tuples must be divided equally
- * between the nodes.
- *
- * For replicated tables, tuple must be inserted in all the Datanodes
- *
- * XXX Need further testing for replicated and round-robin tables
- */
- if (rel_loc_info->locatorType != LOCATOR_TYPE_HASH &&
- rel_loc_info->locatorType != LOCATOR_TYPE_MODULO)
- return NULL;
-
- tp = SearchSysCache(ATTNUM,
- ObjectIdGetDatum(tableoid),
- Int16GetDatum(rel_loc_info->partAttrNum),
- 0, 0);
- partAttrTup = (Form_pg_attribute) GETSTRUCT(tp);
-
- /*
- * Create a Var for the distribution column and set it for
- * execution time evaluation of target node. ExecEvalVar() picks
- * up values from ecxt_scantuple if Var does not refer either OUTER
- * or INNER varno. We utilize that mechanism to pick up values from
- * the tuple returned by the current plan node
- */
- var = makeVar(resultRelationIndex,
- rel_loc_info->partAttrNum,
- partAttrTup->atttypid,
- partAttrTup->atttypmod,
- partAttrTup->attcollation,
- 0);
- ReleaseSysCache(tp);
-
- return (Expr *) var;
-}
-
-/*
- * pgxc_count_rowmarks_entries
- * Count the number of rowmarks that need to be added as prepared parameters
- * for remote DML plan
- */
-static int
-pgxc_count_rowmarks_entries(List *rowMarks)
-{
- int res = 0;
- ListCell *elt;
-
- foreach(elt, rowMarks)
- {
- PlanRowMark *rc = (PlanRowMark *) lfirst(elt);
-
- /* RowMarks with different parent are not needed */
- if (rc->rti != rc->prti)
- continue;
-
- /*
- * Count the entry and move to next element
- * For a non-parent rowmark, only ctid is used.
- * For a parent rowmark, ctid and tableoid are used.
- */
- if (!rc->isParent)
- res++;
- else
- res = res + 2;
- }
-
- return res;
-}
-
-/*
- * pgxc_build_rowmark_entries
- * Complete type array for SetRemoteStatementName based on given RowMarks list
- * The list of total parameters is calculated based on the current number of prepared
- * parameters and the rowmark list.
- */
-static Oid *
-pgxc_build_rowmark_entries(List *rowMarks, List *rtable, Oid *types, int prepparams, int totparams)
-{
- Oid *newtypes = types;
- int rowmark_entry_num;
- int count = prepparams;
- ListCell *elt;
-
- /* No modifications is list is empty */
- if (rowMarks == NIL)
- return newtypes;
-
- /* Nothing to do, total number of parameters is already correct */
- if (prepparams == totparams)
- return newtypes;
-
- /* Fetch number of extra entries related to Rowmarks */
- rowmark_entry_num = pgxc_count_rowmarks_entries(rowMarks);
-
- /* Nothing to do */
- if (rowmark_entry_num == 0)
- return newtypes;
-
- /* This needs to be absolutely verified */
- Assert(totparams == (prepparams + rowmark_entry_num));
-
- foreach(elt, rowMarks)
- {
- PlanRowMark *rc = (PlanRowMark *) lfirst(elt);
-
- /* RowMarks with different parent are not needed */
- if (rc->rti != rc->prti)
- continue;
-
- /* Determine the correct parameter type */
- switch (rc->markType)
- {
- case ROW_MARK_COPY:
- {
- RangeTblEntry *rte = rt_fetch(rc->prti, rtable);
-
- /*
- * PGXCTODO: We still need to determine the rowtype
- * in case relation involved here is a view (see inherit.sql).
- */
- if (!OidIsValid(rte->relid))
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("Cannot generate remote query plan"),
- errdetail("This relation rowtype cannot be fetched")));
-
- /*
- * This is the complete copy of a row, so it is necessary
- * to set parameter as a rowtype
- */
- count++;
- newtypes[count - 1] = get_rel_type_id(rte->relid);
- }
- break;
-
- case ROW_MARK_REFERENCE:
- /* Here we have a ctid for sure */
- count++;
- newtypes[count - 1] = TIDOID;
-
- if (rc->isParent)
- {
- /* For a parent table, tableoid is also necessary */
- count++;
- /* Set parameter type */
- newtypes[count - 1] = OIDOID;
- }
- break;
-
- /* Ignore other entries */
- case ROW_MARK_SHARE:
- case ROW_MARK_EXCLUSIVE:
- default:
- break;
- }
- }
-
- /* This should not happen */
- if (count != totparams)
- ereport(ERROR,
- (errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("Error when generating remote query plan")));
-
- return newtypes;
-}
-
-static RangeTblEntry *
-make_dummy_remote_rte(char *relname, Alias *alias)
-{
- RangeTblEntry *dummy_rte = makeNode(RangeTblEntry);
- dummy_rte->rtekind = RTE_REMOTE_DUMMY;
-
- /* use a dummy relname... */
- dummy_rte->relname = relname;
- dummy_rte->eref = alias;
-
- return dummy_rte;
-}
-#endif /* XCP */
-#endif /* PGXC */
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 207da46605..eafa4a652d 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -139,11 +139,6 @@ static Plan *grouping_distribution(PlannerInfo *root, Plan *plan,
static bool equal_distributions(PlannerInfo *root, Distribution *dst1,
Distribution *dst2);
#endif
-#ifdef PGXC
-#ifndef XCP
-static void separate_rowmarks(PlannerInfo *root);
-#endif
-#endif
static Plan *build_grouping_chain(PlannerInfo *root,
Query *parse,
List *tlist,
@@ -176,18 +171,7 @@ planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
if (planner_hook)
result = (*planner_hook) (parse, cursorOptions, boundParams);
else
-#ifdef PGXC
-#ifndef XCP
- /*
- * A Coordinator receiving a query from another Coordinator
- * is not allowed to go into PGXC planner.
- */
- if (IS_PGXC_LOCAL_COORDINATOR)
- result = pgxc_planner(parse, cursorOptions, boundParams);
- else
-#endif /* XCP */
-#endif /* PGXC */
- result = standard_planner(parse, cursorOptions, boundParams);
+ result = standard_planner(parse, cursorOptions, boundParams);
return result;
}
@@ -301,35 +285,6 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
lfirst(lp) = set_plan_references(subroot, subplan);
}
-#ifdef PGXC
-#ifndef XCP
- /*
- * PGXC should apply INSERT/UPDATE/DELETE to a Datanode. We are overriding
- * normal Postgres behavior by modifying final plan or by adding a node on
- * top of it.
- * If the optimizer finds out that there is nothing to UPDATE/INSERT/DELETE
- * in the table/s (say using constraint exclusion), it does not add modify
- * table plan on the top. We should send queries to the remote nodes only
- * when there is something to modify.
- */
- if (IS_PGXC_COORDINATOR && IsA(top_plan, ModifyTable))
- switch (parse->commandType)
- {
- case CMD_INSERT:
- top_plan = create_remoteinsert_plan(root, top_plan);
- break;
- case CMD_UPDATE:
- top_plan = create_remoteupdate_plan(root, top_plan);
- break;
- case CMD_DELETE:
- top_plan = create_remotedelete_plan(root, top_plan);
- break;
- default:
- break;
- }
-#endif /* XCP */
-#endif
-
/* build the PlannedStmt result */
result = makeNode(PlannedStmt);
@@ -419,11 +374,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
root->hasInheritedTarget = false;
root->grouping_map = NULL;
-#ifdef PGXC
-#ifndef XCP
- root->rs_alias_index = 1;
-#endif /* XCP */
-#endif /* PGXC */
root->hasRecursion = hasRecursion;
if (hasRecursion)
root->wt_param_id = SS_assign_special_param(root);
@@ -501,27 +451,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse,
*/
preprocess_rowmarks(root);
-#ifdef PGXC
-#ifndef XCP
- /*
- * In Coordinators we separate row marks in two groups
- * one comprises of row marks of types ROW_MARK_EXCLUSIVE & ROW_MARK_SHARE
- * and the other contains the rest of the types of row marks
- * The former is handeled on Coordinator in such a way that
- * FOR UPDATE/SHARE gets added in the remote query, whereas
- * the later needs to be handeled the way pg does
- *
- * PGXCTODO : This is not a very efficient way of handling row marks
- * Consider this join query
- * select * from t1, t2 where t1.val = t2.val for update
- * It results in this query to be fired at the Datanodes
- * SELECT val, val2, ctid FROM ONLY t2 WHERE true FOR UPDATE OF t2
- * We are locking the complete table where as we should have locked
- * only the rows where t1.val = t2.val is met
- */
- separate_rowmarks(root);
-#endif
-#endif
/*
* Expand any rangetable entries that are inheritance sets into "append
* relations". This can add entries to the rangetable, but they must be
@@ -2146,18 +2075,6 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
NIL,
numGroups,
result_plan);
-#ifdef PGXC
-#ifndef XCP
- /*
- * Grouping will certainly not increase the number of rows
- * coordinator fetches from datanode, in fact it's expected to
- * reduce the number drastically. Hence, try pushing GROUP BY
- * clauses and aggregates to the datanode, thus saving bandwidth.
- */
- if (IS_PGXC_LOCAL_COORDINATOR)
- result_plan = create_remoteagg_plan(root, result_plan);
-#endif /* XCP */
-#endif /* PGXC */
/* Hashed aggregation produces randomly-ordered results */
current_pathkeys = NIL;
}
@@ -2277,19 +2194,6 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
result_plan = (Plan *) make_append(plans, tlist);
}
}
-#ifdef PGXC
-#ifndef XCP
- /*
- * Grouping will certainly not increase the number of rows
- * Coordinator fetches from Datanode, in fact it's expected to
- * reduce the number drastically. Hence, try pushing GROUP BY
- * clauses and aggregates to the Datanode, thus saving bandwidth.
- */
- if (IS_PGXC_LOCAL_COORDINATOR)
- result_plan = create_remotegrouping_plan(root, result_plan);
-#endif /* XCP */
-#endif /* PGXC */
-
} /* end of non-minmax-aggregate case */
/*
@@ -3255,45 +3159,6 @@ preprocess_rowmarks(PlannerInfo *root)
root->rowMarks = prowmarks;
}
-#ifdef PGXC
-#ifndef XCP
-/*
- * separate_rowmarks - In XC Coordinators are supposed to skip handling
- * of type ROW_MARK_EXCLUSIVE & ROW_MARK_SHARE.
- * In order to do that we simply remove such type
- * of row marks from the list. Instead they are saved
- * in another list that is then handeled to add
- * FOR UPDATE/SHARE in the remote query
- * in the function create_remotequery_plan
- */
-static void
-separate_rowmarks(PlannerInfo *root)
-{
- List *rml_1, *rml_2;
- ListCell *rm;
-
- if (IS_PGXC_DATANODE || IsConnFromCoord() || root->rowMarks == NULL)
- return;
-
- rml_1 = NULL;
- rml_2 = NULL;
-
- foreach(rm, root->rowMarks)
- {
- PlanRowMark *prm = (PlanRowMark *) lfirst(rm);
-
- if (prm->markType == ROW_MARK_EXCLUSIVE || prm->markType == ROW_MARK_SHARE)
- rml_1 = lappend(rml_1, prm);
- else
- rml_2 = lappend(rml_2, prm);
- }
- list_free(root->rowMarks);
- root->rowMarks = rml_2;
- root->xc_rowMarks = rml_1;
-}
-#endif /*XCP*/
-#endif /*PGXC*/
-
/*
* Select RowMarkType to use for a given table
*/
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index f4c6cf874a..538235572a 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -168,19 +168,6 @@ static bool fix_opfuncids_walker(Node *node, void *context);
static bool extract_query_dependencies_walker(Node *node,
PlannerInfo *context);
-#ifdef PGXC
-#ifndef XCP
-/* References for remote plans */
-static List * fix_remote_expr(PlannerInfo *root,
- List *clauses,
- indexed_tlist *base_itlist,
- Index newrelid,
- int rtoffset);
-static Node *fix_remote_expr_mutator(Node *node,
- fix_remote_expr_context *context);
-static void set_remote_references(PlannerInfo *root, RemoteQuery *rscan, int rtoffset);
-#endif
-#endif
#ifdef XCP
static void set_remotesubplan_references(PlannerInfo *root, Plan *plan, int rtoffset);
#endif
@@ -625,29 +612,6 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
fix_scan_list(root, splan->scan.plan.qual, rtoffset);
}
break;
-#ifdef PGXC
-#ifndef XCP
- case T_RemoteQuery:
- {
- RemoteQuery *splan = (RemoteQuery *) plan;
-
- /*
- * If base_tlist is set, it means that we have a reduced remote
- * query plan. So need to set the var references accordingly.
- */
- if (splan->base_tlist)
- set_remote_references(root, splan, rtoffset);
- splan->scan.plan.targetlist =
- fix_scan_list(root, splan->scan.plan.targetlist, rtoffset);
- splan->scan.plan.qual =
- fix_scan_list(root, splan->scan.plan.qual, rtoffset);
- splan->base_tlist =
- fix_scan_list(root, splan->base_tlist, rtoffset);
- splan->scan.scanrelid += rtoffset;
- }
- break;
-#endif
-#endif
case T_ForeignScan:
set_foreignscan_references(root, (ForeignScan *) plan, rtoffset);
break;
@@ -2056,34 +2020,6 @@ search_indexed_tlist_for_non_var(Node *node,
return NULL; /* no match */
}
-#ifdef PGXC
-#ifndef XCP
-/*
- * search_tlist_for_var --- find a Var in the provided tlist. This does a
- * basic scan through the list. So not very efficient...
- *
- * If no match, return NULL.
- *
- */
-Var *
-search_tlist_for_var(Var *var, List *jtlist)
-{
- Index varno = var->varno;
- AttrNumber varattno = var->varattno;
- ListCell *l;
-
- foreach(l, jtlist)
- {
- Var *listvar = (Var *) lfirst(l);
-
- if (listvar->varno == varno && listvar->varattno == varattno)
- return var;
- }
- return NULL; /* no match */
-}
-#endif
-#endif
-
/*
* search_indexed_tlist_for_sortgroupref --- find a sort/group expression
* (which is assumed not to be just a Var)
@@ -2635,120 +2571,6 @@ extract_query_dependencies_walker(Node *node, PlannerInfo *context)
(void *) context);
}
-
-#ifdef PGXC
-#ifndef XCP
-/*
- * fix_remote_expr
- * Create a new set of targetlist entries or qual clauses by
- * changing the varno/varattno values of variables in the clauses
- * to reference target list values from the base
- * relation target lists. Also perform opcode lookup and add
- * regclass OIDs to glob->relationOids.
- *
- * 'clauses' is the targetlist or list of clauses
- * 'base_itlist' is the indexed target list of the base referenced relations
- *
- * Returns the new expression tree. The original clause structure is
- * not modified.
- */
-static List *
-fix_remote_expr(PlannerInfo *root,
- List *clauses,
- indexed_tlist *base_itlist,
- Index newrelid,
- int rtoffset)
-{
- fix_remote_expr_context context;
-
- context.glob = root->glob;
- context.base_itlist = base_itlist;
- context.relid = newrelid;
- context.rtoffset = rtoffset;
-
- return (List *) fix_remote_expr_mutator((Node *) clauses, &context);
-}
-
-static Node *
-fix_remote_expr_mutator(Node *node, fix_remote_expr_context *context)
-{
- Var *newvar;
-
- if (node == NULL)
- return NULL;
-
- if (IsA(node, Var))
- {
- Var *var = (Var *) node;
-
- /* First look for the var in the input base tlists */
- newvar = search_indexed_tlist_for_var(var,
- context->base_itlist,
- context->relid,
- context->rtoffset);
- if (newvar)
- return (Node *) newvar;
-
- /* No reference found for Var */
- elog(ERROR, "variable not found in base remote scan target lists");
- }
- /* Try matching more complex expressions too, if tlists have any */
- if (context->base_itlist->has_non_vars)
- {
- newvar = search_indexed_tlist_for_non_var(node,
- context->base_itlist,
- context->relid);
- if (newvar)
- return (Node *) newvar;
- }
-
- return expression_tree_mutator(node, fix_remote_expr_mutator, context);
-}
-
-/*
- * set_remote_references
- *
- * Modify the target list and quals of a remote scan node to reference its
- * base rels, by setting the varnos to DUMMY (even OUTER is fine) setting attno
- * values to the result domain number of the base rels.
- * Also perform opcode lookup for these expressions. and add regclass
- * OIDs to glob->relationOids.
- */
-static void
-set_remote_references(PlannerInfo *root, RemoteQuery *rscan, int rtoffset)
-{
- indexed_tlist *base_itlist;
-
- if (!rscan->base_tlist)
- return;
-
- base_itlist = build_tlist_index(rscan->base_tlist);
-
- /* All remotescan plans have tlist, and quals */
- rscan->scan.plan.targetlist = fix_remote_expr(root ,
- rscan->scan.plan.targetlist,
- base_itlist,
- rscan->scan.scanrelid,
- rtoffset);
-
- rscan->scan.plan.qual = fix_remote_expr(root ,
- rscan->scan.plan.qual,
- base_itlist,
- rscan->scan.scanrelid,
- rtoffset);
-
- pfree(base_itlist);
-}
-
-Node *
-pgxc_fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset)
-{
- return fix_scan_expr(root, node, rtoffset);
-}
-#endif /* XCP */
-#endif /* PGXC */
-
-
#ifdef XCP
/*
* set_remotesubplan_references
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 58093aa45b..027d28e261 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -2630,33 +2630,6 @@ create_worktablescan_path(PlannerInfo *root, RelOptInfo *rel,
return pathnode;
}
-
-#ifdef PGXC
-#ifndef XCP
-/*
- * create_remotequery_path
- * Creates a path corresponding to a scan of a remote query,
- * returning the pathnode.
- */
-Path *
-create_remotequery_path(PlannerInfo *root, RelOptInfo *rel)
-{
- Path *pathnode = makeNode(Path);
-
- pathnode->pathtype = T_RemoteQuery;
- pathnode->parent = rel;
- pathnode->param_info = NULL; /* never parameterized at present */
- pathnode->pathkeys = NIL; /* result is always unordered */
-
- /* PGXCTODO - set cost properly */
- cost_seqscan(pathnode, root, rel, pathnode->param_info);
-
- return pathnode;
-}
-#endif /* XCP */
-#endif /* PGXC */
-
-
/*
* create_foreignscan_path
* Creates a path corresponding to a scan of a foreign table,
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 76bd1a74d7..04d1be526e 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -802,31 +802,6 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
switch (rel->rd_rel->relkind)
{
case RELKIND_RELATION:
-#ifdef PGXC
-#ifndef XCP
- /*
- * This is a remote table... we have no idea how many pages/rows
- * we may get from a scan of this table. However, we should set the
- * costs in such a manner that cheapest paths should pick up the
- * ones involving these remote rels
- *
- * These allow for maximum query shipping to the remote
- * side later during the planning phase
- *
- * This has to be set on a remote Coordinator only
- * as it hugely penalizes performance on backend Nodes.
- *
- * Override the estimates only for remote tables (currently
- * identified by non-NULL rd_locator_info)
- */
- if (IS_PGXC_LOCAL_COORDINATOR && rel->rd_locator_info)
- {
- *pages = 10;
- *tuples = 10;
- break;
- }
-#endif
-#endif
case RELKIND_INDEX:
case RELKIND_MATVIEW:
case RELKIND_TOASTVALUE:
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 3e72b0fdbc..83fb925191 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -104,11 +104,6 @@ static Query *transformCreateTableAsStmt(ParseState *pstate,
CreateTableAsStmt *stmt);
#ifdef PGXC
static Query *transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt);
-#ifndef XCP
-static bool IsExecDirectUtilityStmt(Node *node);
-static bool is_relation_child(RangeTblEntry *child_rte, List *rtable);
-static bool is_rel_child_of_rel(RangeTblEntry *child_rte, RangeTblEntry *parent_rte);
-#endif
#endif
static void transformLockingClause(ParseState *pstate, Query *qry,
@@ -581,11 +576,6 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
*/
ParseState *sub_pstate = make_parsestate(pstate);
Query *selectQuery;
-#ifdef PGXC
-#ifndef XCP
- RangeTblEntry *target_rte;
-#endif
-#endif
/*
* Process the source SELECT.
@@ -618,24 +608,6 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
makeAlias("*SELECT*", NIL),
false,
false);
-#ifdef PGXC
-#ifndef XCP
- /*
- * For an INSERT SELECT involving INSERT on a child after scanning
- * the parent, set flag to send command ID communication to remote
- * nodes in order to maintain global data visibility.
- */
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- target_rte = rt_fetch(qry->resultRelation, pstate->p_rtable);
- if (is_relation_child(target_rte, selectQuery->rtable))
- {
- qry->is_ins_child_sel_parent = true;
- SetSendCommandId(true);
- }
- }
-#endif
-#endif
rtr = makeNode(RangeTblRef);
/* assume new rte is at end */
rtr->rtindex = list_length(pstate->p_rtable);
@@ -2469,9 +2441,6 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt)
List *raw_parsetree_list;
ListCell *raw_parsetree_item;
char *nodename;
-#ifndef XCP
- Oid nodeoid;
-#endif
int nodeIndex;
char nodetype;
@@ -2536,11 +2505,6 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt)
result = parse_analyze(parsetree, query, NULL, 0);
}
-#ifndef XCP
- /* Needed by planner */
- result->sql_statement = pstrdup(query);
-#endif
-
/* Default list of parameters to set */
step->sql_statement = NULL;
step->exec_nodes = makeNode(ExecNodes);
@@ -2607,41 +2571,9 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt)
}
}
-#ifndef XCP
- /*
- * Features not yet supported
- * DML can be launched without errors but this could compromise data
- * consistency, so block it.
- */
- if (step->exec_direct_type == EXEC_DIRECT_DELETE
- || step->exec_direct_type == EXEC_DIRECT_UPDATE
- || step->exec_direct_type == EXEC_DIRECT_INSERT)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("EXECUTE DIRECT cannot execute DML queries")));
- else if (step->exec_direct_type == EXEC_DIRECT_UTILITY &&
- !IsExecDirectUtilityStmt(result->utilityStmt) && !xc_maintenance_mode)
- {
- /* In case this statement is an utility, check if it is authorized */
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("EXECUTE DIRECT cannot execute this utility query")));
- }
- else if (step->exec_direct_type == EXEC_DIRECT_LOCAL_UTILITY && !xc_maintenance_mode)
- {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("EXECUTE DIRECT cannot execute locally this utility query")));
- }
-#endif
-
/* Build Execute Node list, there is a unique node for the time being */
step->exec_nodes->nodeList = lappend_int(step->exec_nodes->nodeList, nodeIndex);
- /* Associate newly-created RemoteQuery node to the returned Query result */
-#ifndef XCP
- result->is_local = is_local;
-#endif
if (!is_local)
result->utilityStmt = (Node *) step;
@@ -2653,122 +2585,6 @@ transformExecDirectStmt(ParseState *pstate, ExecDirectStmt *stmt)
return result;
}
-#ifndef XCP
-/*
- * Check if given node is authorized to go through EXECUTE DURECT
- */
-static bool
-IsExecDirectUtilityStmt(Node *node)
-{
- bool res = true;
-
- if (!node)
- return res;
-
- switch(nodeTag(node))
- {
- /*
- * CREATE/DROP TABLESPACE are authorized to control
- * tablespace at single node level.
- */
- case T_CreateTableSpaceStmt:
- case T_DropTableSpaceStmt:
- res = true;
- break;
- default:
- res = false;
- break;
- }
-
- return res;
-}
-
-/*
- * Returns whether or not the rtable (and its subqueries)
- * contain any relation who is the parent of
- * the passed relation
- */
-static bool
-is_relation_child(RangeTblEntry *child_rte, List *rtable)
-{
- ListCell *item;
-
- if (child_rte == NULL || rtable == NULL)
- return false;
-
- if (child_rte->rtekind != RTE_RELATION)
- return false;
-
- foreach(item, rtable)
- {
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(item);
-
- if (rte->rtekind == RTE_RELATION)
- {
- if (is_rel_child_of_rel(child_rte, rte))
- return true;
- }
- else if (rte->rtekind == RTE_SUBQUERY)
- {
- return is_relation_child(child_rte, rte->subquery->rtable);
- }
- }
- return false;
-}
-
-/*
- * Returns whether the passed RTEs have a parent child relationship
- */
-static bool
-is_rel_child_of_rel(RangeTblEntry *child_rte, RangeTblEntry *parent_rte)
-{
- Oid parentOID;
- bool res;
- Relation relation;
- SysScanDesc scan;
- ScanKeyData key[1];
- HeapTuple inheritsTuple;
- Oid inhrelid;
-
- /* Does parent RT entry allow inheritance? */
- if (!parent_rte->inh)
- return false;
-
- /* Ignore any already-expanded UNION ALL nodes */
- if (parent_rte->rtekind != RTE_RELATION)
- return false;
-
- /* Fast path for common case of childless table */
- parentOID = parent_rte->relid;
- if (!has_subclass(parentOID))
- return false;
-
- /* Assume we did not find any match */
- res = false;
-
- /* Scan pg_inherits and get all the subclass OIDs one by one. */
- relation = heap_open(InheritsRelationId, AccessShareLock);
- ScanKeyInit(&key[0], Anum_pg_inherits_inhparent, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(parentOID));
- scan = systable_beginscan(relation, InheritsParentIndexId, true, SnapshotNow, 1, key);
-
- while ((inheritsTuple = systable_getnext(scan)) != NULL)
- {
- inhrelid = ((Form_pg_inherits) GETSTRUCT(inheritsTuple))->inhrelid;
-
- /* Did we find the Oid of the passed RTE in one of the children? */
- if (child_rte->relid == inhrelid)
- {
- res = true;
- break;
- }
- }
-
- systable_endscan(scan);
- heap_close(relation, AccessShareLock);
- return res;
-}
-
-#endif
#endif
/*
diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c
index 68f870f488..e558543d42 100644
--- a/src/backend/parser/parse_agg.c
+++ b/src/backend/parser/parse_agg.c
@@ -117,12 +117,6 @@ transformAggregateCall(ParseState *pstate, Aggref *agg,
AttrNumber attno = 1;
int save_next_resno;
ListCell *lc;
-#ifdef PGXC
-#ifndef XCP
- HeapTuple aggTuple;
- Form_pg_aggregate aggform;
-#endif /* XCP */
-#endif /* PGXC */
const char *err;
bool errkind;
@@ -335,30 +329,6 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr)
while (min_varlevel-- > 0)
pstate = pstate->parentParseState;
pstate->p_hasAggs = true;
-#ifdef PGXC
-#ifndef XCP
- /*
- * Return data type of PGXC Datanode's aggregate should always return the
- * result of transition function, that is expected by collection function
- * on the Coordinator.
- * Look up the aggregate definition and replace agg->aggtype
- */
-
- aggTuple = SearchSysCache(AGGFNOID,
- ObjectIdGetDatum(agg->aggfnoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(aggTuple))
- elog(ERROR, "cache lookup failed for aggregate %u",
- agg->aggfnoid);
- aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
- agg->aggtrantype = aggform->aggtranstype;
- agg->agghas_collectfn = OidIsValid(aggform->aggcollectfn);
- if (IS_PGXC_DATANODE)
- agg->aggtype = agg->aggtrantype;
-
- ReleaseSysCache(aggTuple);
-#endif
-#endif
/*
* Check to see if the aggregate function is in an invalid place within
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index 735ffc485f..fb6168c835 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -1267,12 +1267,6 @@ addRangeTableEntry(ParseState *pstate,
rte->relkind = rel->rd_rel->relkind;
-#ifdef PGXC
-#ifndef XCP
- rte->relname = RelationGetRelationName(rel);
-#endif
-#endif
-
/*
* Build the list of effective column names using user-supplied aliases
* and/or actual column names.
@@ -1354,12 +1348,6 @@ addRangeTableEntryForRelation(ParseState *pstate,
rte->relid = RelationGetRelid(rel);
rte->relkind = rel->rd_rel->relkind;
-#ifdef PGXC
-#ifndef XCP
- rte->relname = RelationGetRelationName(rel);
-#endif
-#endif
-
/*
* Build the list of effective column names using user-supplied aliases
* and/or actual column names.
@@ -1921,15 +1909,6 @@ addRangeTableEntryForCTE(ParseState *pstate,
errmsg("WITH query \"%s\" does not have a RETURNING clause",
cte->ctename),
parser_errposition(pstate, rv->location)));
-
-#ifdef PGXC
-#ifndef XCP
- if (ctequery->returningList != NIL)
- ereport(ERROR,
- (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
- (errmsg("RETURNING clause not yet supported"))));
-#endif
-#endif
}
rte->ctecoltypes = cte->ctecoltypes;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index a0bb4dd893..41ed933950 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -112,12 +112,8 @@ typedef struct
* the table */
IndexStmt *pkey; /* PRIMARY KEY index, if any */
#ifdef PGXC
-#ifdef XCP
FallbackSrc fallback_source;
List *fallback_dist_cols;
-#else
- char *fallback_dist_col; /* suggested column to distribute on */
-#endif
DistributeBy *distributeby; /* original distribute by column of CREATE TABLE */
PGXCSubCluster *subcluster; /* original subcluster option of CREATE TABLE */
#endif
@@ -276,12 +272,8 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
cxt.alist = NIL;
cxt.pkey = NULL;
#ifdef PGXC
-#ifdef XCP
cxt.fallback_source = FBS_NONE;
cxt.fallback_dist_cols = NIL;
-#else
- cxt.fallback_dist_col = NULL;
-#endif
cxt.distributeby = stmt->distributeby;
cxt.subcluster = stmt->subcluster;
#endif
@@ -366,7 +358,6 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
* If the user did not specify any distribution clause and there is no
* inherits clause, try and use PK or unique index
*/
-#ifdef XCP
if (IS_PGXC_COORDINATOR && autodistribute && !stmt->distributeby)
{
/* always apply suggested subcluster */
@@ -449,14 +440,6 @@ transformCreateStmt(CreateStmt *stmt, const char *queryString)
stmt->distributeby->colname = NULL;
}
}
-#else
- if (!stmt->distributeby && !stmt->inhRelations && cxt.fallback_dist_col)
- {
- stmt->distributeby = (DistributeBy *) palloc0(sizeof(DistributeBy));
- stmt->distributeby->disttype = DISTTYPE_HASH;
- stmt->distributeby->colname = cxt.fallback_dist_col;
- }
-#endif
#endif
return result;
@@ -897,16 +880,6 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
cancel_parser_errposition_callback(&pcbstate);
#ifdef PGXC
-#ifndef XCP
- /*
- * Check if relation is temporary and assign correct flag.
- * This will override transaction direct commit as no 2PC
- * can be used for transactions involving temporary objects.
- */
- if (IsTempTable(RelationGetRelid(relation)))
- ExecSetTempObjectIncluded();
-#endif
-
/*
* Block the creation of tables using views in their LIKE clause.
* Views are not created on Datanodes, so this will result in an error
@@ -920,11 +893,7 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
if (relation->rd_rel->relkind == RELKIND_VIEW)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support VIEW in LIKE clauses"),
-#else
- errmsg("Postgres-XC does not support VIEW in LIKE clauses"),
-#endif
errdetail("The feature is not currently supported")));
#endif
@@ -1988,24 +1957,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
if (strcmp(column->colname, key) == 0)
{
found = true;
-
-#ifdef PGXC
-#ifndef XCP
- /*
- * Only allow locally enforceable constraints.
- * See if it is a distribution column
- * If not set, set it to first column in index.
- * If primary key, we prefer that over a unique constraint.
- */
- if (IS_PGXC_COORDINATOR && !isLocalSafe)
- {
- if (cxt->distributeby)
- isLocalSafe = CheckLocalIndexColumn (
- ConvertToLocatorType(cxt->distributeby->disttype),
- cxt->distributeby->colname, key);
- }
-#endif
-#endif
break;
}
}
@@ -2125,7 +2076,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
#ifdef PGXC
if (IS_PGXC_COORDINATOR)
{
-#ifdef XCP
/*
* Check if index can be enforced locally
*/
@@ -2166,23 +2116,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
}
}
}
-#else
- /*
- * Set fallback distribution column.
- * If not set, set it to first column in index.
- * If primary key, we prefer that over a unique constraint.
- */
- if (index->indexParams == NIL
- && (index->primary || !cxt->fallback_dist_col))
- {
- cxt->fallback_dist_col = pstrdup(key);
- }
-
- /* Existing table, check if it is safe */
- if (cxt->isalter && !cxt->distributeby && !isLocalSafe)
- isLocalSafe = CheckLocalIndexColumn (
- cxt->rel->rd_locator_info->locatorType, cxt->rel->rd_locator_info->partAttrName, key);
-#endif
}
#endif
@@ -2198,7 +2131,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
index->indexParams = lappend(index->indexParams, iparam);
}
#ifdef PGXC
-#ifdef XCP
if (IS_PGXC_COORDINATOR && !isLocalSafe)
{
if (cxt->distributeby || cxt->isalter)
@@ -2242,15 +2174,6 @@ transformIndexConstraint(Constraint *constraint, CreateStmtContext *cxt)
}
}
}
-#else
- if (IS_PGXC_COORDINATOR && cxt->distributeby
- && (cxt->distributeby->disttype == DISTTYPE_HASH ||
- cxt->distributeby->disttype == DISTTYPE_MODULO)
- && !isLocalSafe)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("Unique index of partitioned table must contain the hash distribution column.")));
-#endif
#endif
return index;
@@ -2301,28 +2224,6 @@ transformFKConstraints(CreateStmtContext *cxt,
constraint->skip_validation = true;
constraint->initially_valid = true;
-#ifdef PGXC
-#ifndef XCP
- /*
- * Set fallback distribution column.
- * If not yet set, set it to first column in FK constraint
- * if it references a partitioned table
- */
- if (IS_PGXC_COORDINATOR && !cxt->fallback_dist_col)
- {
- Oid pk_rel_id = RangeVarGetRelid(constraint->pktable, NoLock, false);
-
- /* make sure it is a partitioned column */
- if (list_length(constraint->pk_attrs) != 0
- && IsHashColumnForRelId(pk_rel_id, strVal(list_nth(constraint->pk_attrs,0))))
- {
- /* take first column */
- char *colstr = strdup(strVal(list_nth(constraint->fk_attrs,0)));
- cxt->fallback_dist_col = pstrdup(colstr);
- }
- }
-#endif
-#endif
}
}
@@ -2861,12 +2762,8 @@ transformAlterTableStmt(Oid relid, AlterTableStmt *stmt,
cxt.alist = NIL;
cxt.pkey = NULL;
#ifdef PGXC
-#ifdef XCP
cxt.fallback_source = FBS_NONE;
cxt.fallback_dist_cols = NIL;
-#else
- cxt.fallback_dist_col = NULL;
-#endif
cxt.distributeby = NULL;
cxt.subcluster = NULL;
#endif
@@ -3348,11 +3245,7 @@ setSchemaName(char *context_schema, char **stmt_schema_name)
bool
CheckLocalIndexColumn (char loctype, char *partcolname, char *indexcolname)
{
-#ifdef XCP
if (IsLocatorReplicated(loctype))
-#else
- if (loctype == LOCATOR_TYPE_REPLICATED)
-#endif
/* always safe */
return true;
if (loctype == LOCATOR_TYPE_RROBIN)
@@ -3367,8 +3260,6 @@ CheckLocalIndexColumn (char loctype, char *partcolname, char *indexcolname)
return false;
}
-
-#ifdef XCP
/*
* Given relation, find the index of the attribute in the primary key,
* which is the distribution key. Returns -1 if table is not a Hash/Modulo
@@ -3434,8 +3325,6 @@ find_relation_pk_dist_index(Relation rel)
return result;
}
-#endif
-
/*
* check to see if the constraint can be enforced locally
@@ -3445,22 +3334,16 @@ static void
checkLocalFKConstraints(CreateStmtContext *cxt)
{
ListCell *fkclist;
-#ifdef XCP
List *nodelist = NIL;
if (cxt->subcluster)
nodelist = transformSubclusterNodes(cxt->subcluster);
-#endif
+
foreach(fkclist, cxt->fkconstraints)
{
Constraint *constraint;
Oid pk_rel_id;
-#ifdef XCP
RelationLocInfo *rel_loc_info;
-#else
- char refloctype;
- char *checkcolname = NULL;
-#endif
constraint = (Constraint *) lfirst(fkclist);
/*
@@ -3496,7 +3379,6 @@ checkLocalFKConstraints(CreateStmtContext *cxt)
if (fkcon_schemaname &&
strcmp(fkcon_schemaname,
cxt->relation->schemaname) == 0)
-#ifdef XCP
{
/* check if bad distribution is already defined */
if ((cxt->distributeby && cxt->distributeby->disttype != DISTTYPE_REPLICATION) ||
@@ -3513,13 +3395,9 @@ checkLocalFKConstraints(CreateStmtContext *cxt)
}
continue;
}
-#else
- continue;
-#endif
}
pk_rel_id = RangeVarGetRelid(constraint->pktable, NoLock, false);
-#ifdef XCP
rel_loc_info = GetRelationLocInfo(pk_rel_id);
/* If referenced table is replicated, the constraint is safe */
if (rel_loc_info == NULL || IsLocatorReplicated(rel_loc_info->locatorType))
@@ -3798,97 +3676,12 @@ checkLocalFKConstraints(CreateStmtContext *cxt)
errmsg("Cannot reference a table with distribution type \"%c\"",
rel_loc_info->locatorType)));
}
-#else
- refloctype = GetLocatorType(pk_rel_id);
- /* If referenced table is replicated, the constraint is safe */
- if (refloctype == LOCATOR_TYPE_REPLICATED)
- continue;
- else if (refloctype == LOCATOR_TYPE_RROBIN)
- {
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("Cannot reference a round robin table in a foreign key constraint")));
- }
- /*
- * See if we are hash or modulo partitioned and the column appears in the
- * constraint, and it corresponds to the position in the referenced table.
- */
- if (cxt->isalter)
- {
- if (cxt->rel->rd_locator_info->locatorType == LOCATOR_TYPE_HASH ||
- cxt->rel->rd_locator_info->locatorType == LOCATOR_TYPE_MODULO)
- {
- checkcolname = cxt->rel->rd_locator_info->partAttrName;
- }
- }
- else
- {
- if (cxt->distributeby)
- {
- if (cxt->distributeby->disttype == DISTTYPE_HASH ||
- cxt->distributeby->disttype == DISTTYPE_MODULO)
- checkcolname = cxt->distributeby->colname;
- }
- else
- {
- if (cxt->fallback_dist_col)
- checkcolname = cxt->fallback_dist_col;
- }
- }
- if (checkcolname)
- {
- int pos = 0;
-
- ListCell *attritem;
-
- foreach(attritem, constraint->fk_attrs)
- {
- char *attrname = (char *) strVal(lfirst(attritem));
-
- if (strcmp(checkcolname, attrname) == 0)
- {
- /* Found the ordinal position in constraint */
- break;
- }
- pos++;
- }
-
- if (pos >= list_length(constraint->fk_attrs))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("Hash/Modulo distributed table must include distribution column in index")));
-
- /*
- * The check to make sure that the referenced column in pk table is the same
- * as the one used to distribute it makes sense only when the user
- * supplies the name of the referenced colum while adding the constraint
- * because if the user did not specify it the system will choose the pk column
- * which will obviously be the one used to distribute it knowing the
- * existing constraints in XC
- * This is required to make sure that both
- * alter table dtab add foreign key (b) references rtab(a);
- * and
- * alter table dtab add foreign key (b) references rtab;
- * behave similarly
- */
- if (constraint->pk_attrs != NULL)
- {
- /* Verify that the referenced table is partitioned at the same position in the index */
- if (!IsDistColumnForRelId(pk_rel_id, strVal(list_nth(constraint->pk_attrs,pos))))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("Hash/Modulo distribution column does not refer to hash/modulo distribution column in referenced table.")));
- }
- }
-#endif
}
-#ifdef XCP
/*
* If presence of a foreign constraint suggested a set of nodes, fix it here
*/
if (nodelist && cxt->subcluster == NULL)
cxt->subcluster = makeSubCluster(nodelist);
-#endif
}
#endif
diff --git a/src/backend/pgxc/copy/remotecopy.c b/src/backend/pgxc/copy/remotecopy.c
index b5256f219c..0700af150b 100644
--- a/src/backend/pgxc/copy/remotecopy.c
+++ b/src/backend/pgxc/copy/remotecopy.c
@@ -46,10 +46,6 @@ RemoteCopy_GetRelationLoc(RemoteCopyData *state,
Relation rel,
List *attnums)
{
-#ifndef XCP
- ExecNodes *exec_nodes = makeNode(ExecNodes);
-#endif
-
/*
* If target table does not exists on nodes (e.g. system table)
* the location info returned is NULL. This is the criteria, when
@@ -57,7 +53,6 @@ RemoteCopy_GetRelationLoc(RemoteCopyData *state,
*/
state->rel_loc = GetRelationLocInfo(RelationGetRelid(rel));
-#ifdef XCP
if (state->rel_loc &&
AttributeNumberIsValid(state->rel_loc->partAttrNum))
{
@@ -73,58 +68,6 @@ RemoteCopy_GetRelationLoc(RemoteCopyData *state,
state->dist_type = InvalidOid;
state->locator = NULL;
-#else
- if (state->rel_loc)
- {
- /*
- * Pick up one node only
- * This case corresponds to a replicated table with COPY TO
- *
- */
- exec_nodes = makeNode(ExecNodes);
- if (!state->is_from &&
- IsLocatorReplicated(state->rel_loc->locatorType))
- exec_nodes->nodeList = GetPreferredReplicationNode(state->rel_loc->nodeList);
- else
- {
- /* All nodes necessary */
- exec_nodes->nodeList = list_concat(exec_nodes->nodeList, state->rel_loc->nodeList);
- }
- }
-
- state->idx_dist_by_col = -1;
- if (state->rel_loc && state->rel_loc->partAttrNum != 0)
- {
- /*
- * Find the column used as key for data distribution.
- * First scan attributes of tuple descriptor with the list
- * of attributes used in COPY if any list is specified.
- * If no list is specified, set this value to the one of
- * locator information.
- */
- if (attnums != NIL)
- {
- ListCell *cur;
- foreach(cur, attnums)
- {
- int attnum = lfirst_int(cur);
-
- if (state->rel_loc->partAttrNum == attnum)
- {
- state->idx_dist_by_col = attnum - 1;
- break;
- }
- }
- }
- else
- {
- state->idx_dist_by_col = state->rel_loc->partAttrNum - 1;
- }
- }
-
- /* Then save obtained result */
- state->exec_nodes = exec_nodes;
-#endif
}
/*
@@ -347,13 +290,8 @@ FreeRemoteCopyData(RemoteCopyData *state)
/* Leave if nothing */
if (state == NULL)
return;
-#ifdef XCP
if (state->locator)
freeLocator(state->locator);
-#else
- if (state->connections)
- pfree(state->connections);
-#endif
if (state->query_buf.data)
pfree(state->query_buf.data);
FreeRelationLocInfo(state->rel_loc);
diff --git a/src/backend/pgxc/locator/locator.c b/src/backend/pgxc/locator/locator.c
index 9b98c38f05..bea916408c 100644
--- a/src/backend/pgxc/locator/locator.c
+++ b/src/backend/pgxc/locator/locator.c
@@ -85,11 +85,6 @@ struct _Locator
};
#endif
-#ifndef XCP
-static Expr *pgxc_find_distcol_expr(Index varno, PartAttrNumber partAttrNum,
- Node *quals);
-#endif
-
Oid primary_data_node = InvalidOid;
int num_preferred_data_nodes = 0;
Oid preferred_data_node[MAX_PREFERRED_NODES];
@@ -174,8 +169,6 @@ static const unsigned int xc_mod_r[][6] =
{0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff}
};
-
-#ifdef XCP
/*
* GetAnyDataNode
* Pick any data node from given set, but try a preferred node
@@ -226,47 +219,6 @@ GetAnyDataNode(Bitmapset *nodes)
*/
return members[((unsigned int) random()) % nmembers];
}
-#else
-/*
- * GetPreferredReplicationNode
- * Pick any Datanode from given list, however fetch a preferred node first.
- */
-List *
-GetPreferredReplicationNode(List *relNodes)
-{
- /*
- * Try to find the first node in given list relNodes
- * that is in the list of preferred nodes
- */
- if (num_preferred_data_nodes != 0)
- {
- ListCell *item;
- foreach(item, relNodes)
- {
- int relation_nodeid = lfirst_int(item);
- int i;
- for (i = 0; i < num_preferred_data_nodes; i++)
- {
-#ifdef XCP
- char nodetype = PGXC_NODE_DATANODE;
- int nodeid = PGXCNodeGetNodeId(preferred_data_node[i],
- &nodetype);
-#else
- int nodeid = PGXCNodeGetNodeId(preferred_data_node[i], PGXC_NODE_DATANODE);
-#endif
-
- /* OK, found one */
- if (nodeid == relation_nodeid)
- return lappend_int(NULL, nodeid);
- }
- }
- }
-
- /* Nothing found? Return the first one in relation node list */
- return lappend_int(NULL, linitial_int(relNodes));
-}
-#endif
-
/*
* compute_modulo
@@ -321,23 +273,6 @@ compute_modulo(unsigned int numerator, unsigned int denominator)
return numerator % denominator;
}
-#ifndef XCP
-/*
- * get_node_from_modulo - determine node based on modulo
- *
- * compute_modulo
- */
-static int
-get_node_from_modulo(int modulo, List *nodeList)
-{
- if (nodeList == NIL || modulo >= list_length(nodeList) || modulo < 0)
- ereport(ERROR, (errmsg("Modulo value out of range\n")));
-
- return list_nth_int(nodeList, modulo);
-}
-#endif
-
-
/*
* GetRelationDistColumn - Returns the name of the hash or modulo distribution column
* First hash distribution is checked
@@ -364,39 +299,7 @@ char *pColName;
bool
IsTypeHashDistributable(Oid col_type)
{
-#ifdef XCP
return (hash_func_ptr(col_type) != NULL);
-#else
- if(col_type == INT8OID
- || col_type == INT2OID
- || col_type == OIDOID
- || col_type == INT4OID
- || col_type == BOOLOID
- || col_type == CHAROID
- || col_type == NAMEOID
- || col_type == INT2VECTOROID
- || col_type == TEXTOID
- || col_type == OIDVECTOROID
- || col_type == FLOAT4OID
- || col_type == FLOAT8OID
- || col_type == ABSTIMEOID
- || col_type == RELTIMEOID
- || col_type == CASHOID
- || col_type == BPCHAROID
- || col_type == BYTEAOID
- || col_type == VARCHAROID
- || col_type == DATEOID
- || col_type == TIMEOID
- || col_type == TIMESTAMPOID
- || col_type == TIMESTAMPTZOID
- || col_type == INTERVALOID
- || col_type == TIMETZOID
- || col_type == NUMERICOID
- )
- return true;
-
- return false;
-#endif
}
/*
@@ -483,39 +386,7 @@ IsDistColumnForRelId(Oid relid, char *part_col_name)
bool
IsTypeModuloDistributable(Oid col_type)
{
-#ifdef XCP
return (modulo_value_len(col_type) != -1);
-#else
- if(col_type == INT8OID
- || col_type == INT2OID
- || col_type == OIDOID
- || col_type == INT4OID
- || col_type == BOOLOID
- || col_type == CHAROID
- || col_type == NAMEOID
- || col_type == INT2VECTOROID
- || col_type == TEXTOID
- || col_type == OIDVECTOROID
- || col_type == FLOAT4OID
- || col_type == FLOAT8OID
- || col_type == ABSTIMEOID
- || col_type == RELTIMEOID
- || col_type == CASHOID
- || col_type == BPCHAROID
- || col_type == BYTEAOID
- || col_type == VARCHAROID
- || col_type == DATEOID
- || col_type == TIMEOID
- || col_type == TIMESTAMPOID
- || col_type == TIMESTAMPTZOID
- || col_type == INTERVALOID
- || col_type == TIMETZOID
- || col_type == NUMERICOID
- )
- return true;
-
- return false;
-#endif
}
/*
@@ -586,13 +457,8 @@ GetRoundRobinNode(Oid relid)
int ret_node;
Relation rel = relation_open(relid, AccessShareLock);
-#ifdef XCP
Assert (IsLocatorReplicated(rel->rd_locator_info->locatorType) ||
rel->rd_locator_info->locatorType == LOCATOR_TYPE_RROBIN);
-#else
- Assert (rel->rd_locator_info->locatorType == LOCATOR_TYPE_REPLICATED ||
- rel->rd_locator_info->locatorType == LOCATOR_TYPE_RROBIN);
-#endif
ret_node = lfirst_int(rel->rd_locator_info->roundRobinNode);
@@ -625,14 +491,9 @@ IsTableDistOnPrimary(RelationLocInfo *rel_loc_info)
foreach(item, rel_loc_info->nodeList)
{
-#ifdef XCP
char ntype = PGXC_NODE_DATANODE;
if (PGXCNodeGetNodeId(primary_data_node, &ntype) == lfirst_int(item))
return true;
-#else
- if (PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE) == lfirst_int(item))
- return true;
-#endif
}
return false;
}
@@ -672,241 +533,6 @@ IsLocatorInfoEqual(RelationLocInfo *rel_loc_info1, RelationLocInfo *rel_loc_info
return true;
}
-
-#ifndef XCP
-/*
- * GetRelationNodes
- *
- * Get list of relation nodes
- * If the table is replicated and we are reading, we can just pick one.
- * If the table is partitioned, we apply partitioning column value, if possible.
- *
- * If the relation is partitioned, partValue will be applied if present
- * (indicating a value appears for partitioning column), otherwise it
- * is ignored.
- *
- * preferredNodes is only used when for replicated tables. If set, it will
- * use one of the nodes specified if the table is replicated on it.
- * This helps optimize for avoiding introducing additional nodes into the
- * transaction.
- *
- * The returned List is a copy, so it should be freed when finished.
- */
-ExecNodes *
-GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol,
- bool isValueNull, Oid typeOfValueForDistCol,
- RelationAccessType accessType)
-{
- ExecNodes *exec_nodes;
- long hashValue;
- int modulo;
- int nodeIndex;
- int k;
-
- if (rel_loc_info == NULL)
- return NULL;
-
- exec_nodes = makeNode(ExecNodes);
- exec_nodes->baselocatortype = rel_loc_info->locatorType;
-
- switch (rel_loc_info->locatorType)
- {
- case LOCATOR_TYPE_REPLICATED:
-
- if (accessType == RELATION_ACCESS_UPDATE || accessType == RELATION_ACCESS_INSERT)
- {
- /* we need to write to all synchronously */
- exec_nodes->nodeList = list_concat(exec_nodes->nodeList, rel_loc_info->nodeList);
-
- /*
- * Write to primary node first, to reduce chance of a deadlock
- * on replicated tables. If -1, do not use primary copy.
- */
- if (IsTableDistOnPrimary(rel_loc_info)
- && exec_nodes->nodeList
- && list_length(exec_nodes->nodeList) > 1) /* make sure more than 1 */
- {
- exec_nodes->primarynodelist = lappend_int(NULL,
- PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE));
- list_delete_int(exec_nodes->nodeList,
- PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE));
- }
- }
- else
- {
- /*
- * In case there are nodes defined in location info, initialize node list
- * with a default node being the first node in list.
- * This node list may be changed if a better one is found afterwards.
- */
- if (rel_loc_info->nodeList)
- exec_nodes->nodeList = lappend_int(NULL,
- linitial_int(rel_loc_info->nodeList));
-
- if (accessType == RELATION_ACCESS_READ_FOR_UPDATE &&
- IsTableDistOnPrimary(rel_loc_info))
- {
- /*
- * We should ensure row is locked on the primary node to
- * avoid distributed deadlock if updating the same row
- * concurrently
- */
- exec_nodes->nodeList = lappend_int(NULL,
- PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE));
- }
- else if (num_preferred_data_nodes > 0)
- {
- ListCell *item;
-
- foreach(item, rel_loc_info->nodeList)
- {
- for (k = 0; k < num_preferred_data_nodes; k++)
- {
- if (PGXCNodeGetNodeId(preferred_data_node[k],
- PGXC_NODE_DATANODE) == lfirst_int(item))
- {
- exec_nodes->nodeList = lappend_int(NULL,
- lfirst_int(item));
- break;
- }
- }
- }
- }
-
- /* If nothing found just read from one of them. Use round robin mechanism */
- if (exec_nodes->nodeList == NULL)
- exec_nodes->nodeList = lappend_int(NULL,
- GetRoundRobinNode(rel_loc_info->relid));
- }
- break;
-
- case LOCATOR_TYPE_HASH:
- case LOCATOR_TYPE_MODULO:
- if (!isValueNull)
- {
- hashValue = compute_hash(typeOfValueForDistCol, valueForDistCol,
- rel_loc_info->locatorType);
- modulo = compute_modulo(abs(hashValue), list_length(rel_loc_info->nodeList));
- nodeIndex = get_node_from_modulo(modulo, rel_loc_info->nodeList);
- exec_nodes->nodeList = lappend_int(NULL, nodeIndex);
- }
- else
- {
- if (accessType == RELATION_ACCESS_INSERT)
- /* Insert NULL to first node*/
- exec_nodes->nodeList = lappend_int(NULL, linitial_int(rel_loc_info->nodeList));
- else
- exec_nodes->nodeList = list_concat(exec_nodes->nodeList, rel_loc_info->nodeList);
- }
- break;
-
- case LOCATOR_TYPE_SINGLE:
- /* just return first (there should only be one) */
- exec_nodes->nodeList = list_concat(exec_nodes->nodeList,
- rel_loc_info->nodeList);
- break;
-
- case LOCATOR_TYPE_RROBIN:
- /* round robin, get next one */
- if (accessType == RELATION_ACCESS_INSERT)
- {
- /* write to just one of them */
- exec_nodes->nodeList = lappend_int(NULL, GetRoundRobinNode(rel_loc_info->relid));
- }
- else
- {
- /* we need to read from all */
- exec_nodes->nodeList = list_concat(exec_nodes->nodeList,
- rel_loc_info->nodeList);
- }
- break;
-
- /* PGXCTODO case LOCATOR_TYPE_RANGE: */
- /* PGXCTODO case LOCATOR_TYPE_CUSTOM: */
- default:
- ereport(ERROR, (errmsg("Error: no such supported locator type: %c\n",
- rel_loc_info->locatorType)));
- break;
- }
-
- return exec_nodes;
-}
-
-/*
- * GetRelationNodesByQuals
- * A wrapper around GetRelationNodes to reduce the node list by looking at the
- * quals. varno is assumed to be the varno of reloid inside the quals. No check
- * is made to see if that's correct.
- */
-ExecNodes *
-GetRelationNodesByQuals(Oid reloid, Index varno, Node *quals,
- RelationAccessType relaccess)
-{
- RelationLocInfo *rel_loc_info = GetRelationLocInfo(reloid);
- Expr *distcol_expr = NULL;
- ExecNodes *exec_nodes;
- Datum distcol_value;
- bool distcol_isnull;
- Oid distcol_type;
-
- if (!rel_loc_info)
- return NULL;
- /*
- * If the table distributed by value, check if we can reduce the Datanodes
- * by looking at the qualifiers for this relation
- */
- if (IsLocatorDistributedByValue(rel_loc_info->locatorType))
- {
- Oid disttype = get_atttype(reloid, rel_loc_info->partAttrNum);
- int32 disttypmod = get_atttypmod(reloid, rel_loc_info->partAttrNum);
- distcol_expr = pgxc_find_distcol_expr(varno, rel_loc_info->partAttrNum,
- quals);
- /*
- * If the type of expression used to find the Datanode, is not same as
- * the distribution column type, try casting it. This is same as what
- * will happen in case of inserting that type of expression value as the
- * distribution column value.
- */
- if (distcol_expr)
- {
- distcol_expr = (Expr *)coerce_to_target_type(NULL,
- (Node *)distcol_expr,
- exprType((Node *)distcol_expr),
- disttype, disttypmod,
- COERCION_ASSIGNMENT,
- COERCE_IMPLICIT_CAST, -1);
- /*
- * PGXC_FQS_TODO: We should set the bound parameters here, but we don't have
- * PlannerInfo struct and we don't handle them right now.
- * Even if constant expression mutator changes the expression, it will
- * only simplify it, keeping the semantics same
- */
- distcol_expr = (Expr *)eval_const_expressions(NULL,
- (Node *)distcol_expr);
- }
- }
-
- if (distcol_expr && IsA(distcol_expr, Const))
- {
- Const *const_expr = (Const *)distcol_expr;
- distcol_value = const_expr->constvalue;
- distcol_isnull = const_expr->constisnull;
- distcol_type = const_expr->consttype;
- }
- else
- {
- distcol_value = (Datum) 0;
- distcol_isnull = true;
- distcol_type = InvalidOid;
- }
-
- exec_nodes = GetRelationNodes(rel_loc_info, distcol_value,
- distcol_isnull, distcol_type,
- relaccess);
- return exec_nodes;
-}
-#endif
-
/*
* ConvertToLocatorType
* get locator distribution type
@@ -1053,19 +679,12 @@ RelationBuildLocator(Relation rel)
relationLocInfo->nodeList = NIL;
-#ifdef XCP
for (j = 0; j < pgxc_class->nodeoids.dim1; j++)
{
char ntype = PGXC_NODE_DATANODE;
int nid = PGXCNodeGetNodeId(pgxc_class->nodeoids.values[j], &ntype);
relationLocInfo->nodeList = lappend_int(relationLocInfo->nodeList, nid);
}
-#else
- for (j = 0; j < pgxc_class->nodeoids.dim1; j++)
- relationLocInfo->nodeList = lappend_int(relationLocInfo->nodeList,
- PGXCNodeGetNodeId(pgxc_class->nodeoids.values[j],
- PGXC_NODE_DATANODE));
-#endif
/*
* If the locator type is round robin, we set a node to
@@ -1073,11 +692,7 @@ RelationBuildLocator(Relation rel)
* we choose a node to use for balancing reads.
*/
if (relationLocInfo->locatorType == LOCATOR_TYPE_RROBIN
-#ifdef XCP
|| IsLocatorReplicated(relationLocInfo->locatorType))
-#else
- || relationLocInfo->locatorType == LOCATOR_TYPE_REPLICATED)
-#endif
{
int offset;
/*
@@ -1878,102 +1493,3 @@ getLocatorNodeCount(Locator *self)
return self->nodeCount;
}
#endif
-
-
-#ifndef XCP
-/*
- * pgxc_find_distcol_expr
- * Search through the quals provided and find out an expression which will give
- * us value of distribution column if exists in the quals. Say for a table
- * tab1 (val int, val2 int) distributed by hash(val), a query "SELECT * FROM
- * tab1 WHERE val = fn(x, y, z) and val2 = 3", fn(x,y,z) is the expression which
- * decides the distribution column value in the rows qualified by this query.
- * Hence return fn(x, y, z). But for a query "SELECT * FROM tab1 WHERE val =
- * fn(x, y, z) || val2 = 3", there is no expression which decides the values
- * distribution column val can take in the qualified rows. So, in such cases
- * this function returns NULL.
- */
-static Expr *
-pgxc_find_distcol_expr(Index varno, PartAttrNumber partAttrNum,
- Node *quals)
-{
- /* Convert the qualification into list of arguments of AND */
- List *lquals = make_ands_implicit((Expr *)quals);
- ListCell *qual_cell;
- /*
- * For every ANDed expression, check if that expression is of the form
- * <distribution_col> = <expr>. If so return expr.
- */
- foreach(qual_cell, lquals)
- {
- Expr *qual_expr = (Expr *)lfirst(qual_cell);
- OpExpr *op;
- Expr *lexpr;
- Expr *rexpr;
- Var *var_expr;
- Expr *distcol_expr;
-
- if (!IsA(qual_expr, OpExpr))
- continue;
- op = (OpExpr *)qual_expr;
- /* If not a binary operator, it can not be '='. */
- if (list_length(op->args) != 2)
- continue;
-
- lexpr = linitial(op->args);
- rexpr = lsecond(op->args);
-
- /*
- * If either of the operands is a RelabelType, extract the Var in the RelabelType.
- * A RelabelType represents a "dummy" type coercion between two binary compatible datatypes.
- * If we do not handle these then our optimization does not work in case of varchar
- * For example if col is of type varchar and is the dist key then
- * select * from vc_tab where col = 'abcdefghijklmnopqrstuvwxyz';
- * should be shipped to one of the nodes only
- */
- if (IsA(lexpr, RelabelType))
- lexpr = ((RelabelType*)lexpr)->arg;
- if (IsA(rexpr, RelabelType))
- rexpr = ((RelabelType*)rexpr)->arg;
-
- /*
- * If either of the operands is a Var expression, assume the other
- * one is distribution column expression. If none is Var check next
- * qual.
- */
- if (IsA(lexpr, Var))
- {
- var_expr = (Var *)lexpr;
- distcol_expr = rexpr;
- }
- else if (IsA(rexpr, Var))
- {
- var_expr = (Var *)rexpr;
- distcol_expr = lexpr;
- }
- else
- continue;
- /*
- * If Var found is not the distribution column of required relation,
- * check next qual
- */
- if (var_expr->varno != varno || var_expr->varattno != partAttrNum)
- continue;
- /*
- * If the operator is not an assignment operator, check next
- * constraint. An operator is an assignment operator if it's
- * mergejoinable or hashjoinable. Beware that not every assignment
- * operator is mergejoinable or hashjoinable, so we might leave some
- * oportunity. But then we have to rely on the opname which may not
- * be something we know to be equality operator as well.
- */
- if (!op_mergejoinable(op->opno, exprType((Node *)lexpr)) &&
- !op_hashjoinable(op->opno, exprType((Node *)lexpr)))
- continue;
- /* Found the distribution column expression return it */
- return distcol_expr;
- }
- /* Exhausted all quals, but no distribution column expression */
- return NULL;
-}
-#endif
diff --git a/src/backend/pgxc/locator/redistrib.c b/src/backend/pgxc/locator/redistrib.c
index 7385290b24..49a2665694 100644
--- a/src/backend/pgxc/locator/redistrib.c
+++ b/src/backend/pgxc/locator/redistrib.c
@@ -410,7 +410,6 @@ distrib_copy_to(RedistribState *distribState)
get_namespace_name(RelationGetNamespace(rel)),
RelationGetRelationName(rel))));
-#ifdef XCP
/* Begin the COPY process */
DataNodeCopyBegin(copyState);
@@ -421,23 +420,6 @@ distrib_copy_to(RedistribState *distribState)
DataNodeCopyStore(
(PGXCNodeHandle **) getLocatorNodeMap(copyState->locator),
getLocatorNodeCount(copyState->locator), store);
-#else
- /* Begin the COPY process */
- copyState->connections = DataNodeCopyBegin(copyState->query_buf.data,
- copyState->exec_nodes->nodeList,
- GetActiveSnapshot());
-
- /* Create tuplestore storage */
- store = tuplestore_begin_heap(true, false, work_mem);
-
- /* Then get rows and copy them to the tuplestore used for redistribution */
- DataNodeCopyOut(copyState->exec_nodes,
- copyState->connections,
- RelationGetDescr(rel), /* Need also to set up the tuple descriptor */
- NULL,
- store, /* Tuplestore used for redistribution */
- REMOTE_COPY_TUPLESTORE);
-#endif
/* Do necessary clean-up */
FreeRemoteCopyOptions(options);
@@ -463,17 +445,12 @@ distrib_copy_from(RedistribState *distribState, ExecNodes *exec_nodes)
Relation rel;
RemoteCopyOptions *options;
RemoteCopyData *copyState;
-#ifndef XCP
- bool replicated, contains_tuple = true;
-#endif
TupleDesc tupdesc;
-#ifdef XCP
/* May be needed to decode partitioning value */
int partIdx = -1;
FmgrInfo in_function;
Oid typioparam;
int typmod = 0;
-#endif
/* Nothing to do if on remote node */
if (IS_PGXC_DATANODE || IsConnFromCoord())
@@ -494,28 +471,14 @@ distrib_copy_from(RedistribState *distribState, ExecNodes *exec_nodes)
RemoteCopy_GetRelationLoc(copyState, rel, NIL);
RemoteCopy_BuildStatement(copyState, rel, options, NIL, NIL);
-#ifdef XCP
/* Modify relation location as requested */
if (exec_nodes)
{
if (exec_nodes->nodeList)
copyState->rel_loc->nodeList = exec_nodes->nodeList;
}
-#else
- /*
- * When building COPY FROM command in redistribution list,
- * use the list of nodes that has been calculated there.
- * It might be possible that this COPY is done only on a portion of nodes.
- */
- if (exec_nodes && exec_nodes->nodeList != NIL)
- {
- copyState->exec_nodes->nodeList = exec_nodes->nodeList;
- copyState->rel_loc->nodeList = exec_nodes->nodeList;
- }
-#endif
tupdesc = RelationGetDescr(rel);
-#ifdef XCP
if (AttributeNumberIsValid(copyState->rel_loc->partAttrNum))
{
Oid in_func_oid;
@@ -542,7 +505,6 @@ distrib_copy_from(RedistribState *distribState, ExecNodes *exec_nodes)
}
partIdx -= dropped;
}
-#endif
/* Inform client of operation being done */
ereport(DEBUG1,
@@ -550,7 +512,6 @@ distrib_copy_from(RedistribState *distribState, ExecNodes *exec_nodes)
get_namespace_name(RelationGetNamespace(rel)),
RelationGetRelationName(rel))));
-#ifdef XCP
DataNodeCopyBegin(copyState);
/* Send each COPY message stored to remote nodes */
@@ -602,78 +563,6 @@ distrib_copy_from(RedistribState *distribState, ExecNodes *exec_nodes)
}
DataNodeCopyFinish(getLocatorNodeCount(copyState->locator),
(PGXCNodeHandle **) getLocatorNodeMap(copyState->locator));
-#else
- /* Begin redistribution on remote nodes */
- copyState->connections = DataNodeCopyBegin(copyState->query_buf.data,
- copyState->exec_nodes->nodeList,
- GetActiveSnapshot());
-
- /* Transform each tuple stored into a COPY message and send it to remote nodes */
- while (contains_tuple)
- {
- char *data;
- int len;
- Form_pg_attribute *attr = tupdesc->attrs;
- Datum dist_col_value = (Datum) 0;
- bool dist_col_is_null = true;
- Oid dist_col_type = UNKNOWNOID;
- TupleTableSlot *slot;
- ExecNodes *local_execnodes;
-
- /* Build table slot for this relation */
- slot = MakeSingleTupleTableSlot(tupdesc);
-
- /* Get tuple slot from the tuplestore */
- contains_tuple = tuplestore_gettupleslot(store, true, false, slot);
- if (!contains_tuple)
- {
- ExecDropSingleTupleTableSlot(slot);
- break;
- }
-
- /* Make sure the tuple is fully deconstructed */
- slot_getallattrs(slot);
-
- /* Find value of distribution column if necessary */
- if (copyState->idx_dist_by_col >= 0)
- {
- dist_col_value = slot->tts_values[copyState->idx_dist_by_col];
- dist_col_is_null = slot->tts_isnull[copyState->idx_dist_by_col];
- dist_col_type = attr[copyState->idx_dist_by_col]->atttypid;
- }
-
- /* Build message to be sent to Datanodes */
- data = CopyOps_BuildOneRowTo(tupdesc, slot->tts_values, slot->tts_isnull, &len);
-
- /* Build relation node list */
- local_execnodes = GetRelationNodes(copyState->rel_loc,
- dist_col_value,
- dist_col_is_null,
- dist_col_type,
- RELATION_ACCESS_INSERT);
- /* Take a copy of the node lists so as not to interfere with locator info */
- local_execnodes->primarynodelist = list_copy(local_execnodes->primarynodelist);
- local_execnodes->nodeList = list_copy(local_execnodes->nodeList);
-
- /* Process data to Datanodes */
- DataNodeCopyIn(data,
- len,
- local_execnodes,
- copyState->connections);
-
- /* Clean up */
- pfree(data);
- FreeExecNodes(&local_execnodes);
- ExecClearTuple(slot);
- ExecDropSingleTupleTableSlot(slot);
- }
-
- /* Finish the redistribution process */
- replicated = copyState->rel_loc->locatorType == LOCATOR_TYPE_REPLICATED;
- DataNodeCopyFinish(copyState->connections,
- replicated ? PGXCNodeGetNodeId(primary_data_node, PGXC_NODE_DATANODE) : -1,
- replicated ? COMBINE_TYPE_SAME : COMBINE_TYPE_SUM);
-#endif
/* Lock is maintained until transaction commits */
relation_close(rel, NoLock);
@@ -974,9 +863,6 @@ distrib_execute_query(char *sql, bool is_temp, ExecNodes *exec_nodes)
/* Redistribution operations only concern Datanodes */
step->exec_type = EXEC_ON_DATANODES;
-#ifndef XCP
- step->is_temp = is_temp;
-#endif
ExecRemoteUtility(step);
pfree(step->sql_statement);
pfree(step);
diff --git a/src/backend/pgxc/nodemgr/nodemgr.c b/src/backend/pgxc/nodemgr/nodemgr.c
index 7ff9fdabac..d8f8dc006c 100644
--- a/src/backend/pgxc/nodemgr/nodemgr.c
+++ b/src/backend/pgxc/nodemgr/nodemgr.c
@@ -609,9 +609,6 @@ PgxcNodeAlter(AlterNodeStmt *stmt)
const char *node_name = stmt->node_name;
char *node_host;
char node_type;
-#ifndef XCP
- char node_type_old;
-#endif
int node_port;
bool is_preferred;
bool is_primary;
@@ -653,9 +650,6 @@ PgxcNodeAlter(AlterNodeStmt *stmt)
is_preferred = is_pgxc_nodepreferred(nodeOid);
is_primary = is_pgxc_nodeprimary(nodeOid);
node_type = get_pgxc_nodetype(nodeOid);
-#ifndef XCP
- node_type_old = node_type;
-#endif
node_id = get_pgxc_node_id(nodeOid);
/* Filter options */
@@ -676,28 +670,6 @@ PgxcNodeAlter(AlterNodeStmt *stmt)
errmsg("PGXC node %s: two nodes cannot be primary",
node_name)));
- /* Check type dependency */
-#ifndef XCP
- /*
- * XCP:
- * Initially node identify itself as a Coordinator and this should be
- * changed for datanodes. In general, it should be safe to turn
- * Coordinator to Datanode and back
- */
- if (node_type_old == PGXC_NODE_COORDINATOR &&
- node_type == PGXC_NODE_DATANODE)
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("PGXC node %s: cannot alter Coordinator to Datanode",
- node_name)));
- else if (node_type_old == PGXC_NODE_DATANODE &&
- node_type == PGXC_NODE_COORDINATOR)
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("PGXC node %s: cannot alter Datanode to Coordinator",
- node_name)));
-#endif
-
/* Update values for catalog entry */
MemSet(new_record, 0, sizeof(new_record));
MemSet(new_record_nulls, false, sizeof(new_record_nulls));
diff --git a/src/backend/pgxc/plan/planner.c b/src/backend/pgxc/plan/planner.c
index dbe16c49f8..54cc67f8bc 100644
--- a/src/backend/pgxc/plan/planner.c
+++ b/src/backend/pgxc/plan/planner.c
@@ -66,2060 +66,6 @@
/* Forbid unsafe SQL statements */
bool StrictStatementChecking = true;
-#ifndef XCP
-/* fast query shipping is enabled by default */
-bool enable_fast_query_shipping = true;
-
-static RemoteQuery *makeRemoteQuery(void);
-static void validate_part_col_updatable(const Query *query);
-static bool contains_temp_tables(List *rtable);
-static bool contains_only_pg_catalog(List *rtable);
-static void pgxc_handle_unsupported_stmts(Query *query);
-static PlannedStmt *pgxc_FQS_planner(Query *query, int cursorOptions,
- ParamListInfo boundParams);
-static bool pgxc_query_needs_coord(Query *query);
-static ExecNodes *pgxc_is_query_shippable(Query *query, int query_level);
-static void pgxc_FQS_find_datanodes(Shippability_context *sc_context);
-static ExecNodes *pgxc_merge_exec_nodes(ExecNodes *exec_nodes1,
- ExecNodes *exec_nodes2,
- bool merge_dist_equijoin,
- bool merge_replicated_only);
-static PlannedStmt *pgxc_handle_exec_direct(Query *query, int cursorOptions,
- ParamListInfo boundParams);
-static RemoteQuery *pgxc_FQS_create_remote_plan(Query *query,
- ExecNodes *exec_nodes,
- bool is_exec_direct);
-static void pgxc_set_remote_parameters(PlannedStmt *plan, ParamListInfo boundParams);
-static ExecNodes *pgxc_FQS_get_relation_nodes(RangeTblEntry *rte, Index varno,
- Query *query);
-static bool pgxc_qual_hash_dist_equijoin(Relids varnos_1, Relids varnos_2,
- Oid distcol_type, Node *quals,
- List *rtable);
-static bool VarAttrIsPartAttr(Var *var, List *rtable);
-static void pgxc_set_shippability_reason(Shippability_context *context, ShippabilityStat reason);
-
-/*
- * make_ctid_col_ref
- *
- * creates a Var for a column referring to ctid
- */
-
-static Var *
-make_ctid_col_ref(Query *qry)
-{
- ListCell *lc1, *lc2;
- RangeTblEntry *rte1, *rte2;
- int tableRTEs, firstTableRTENumber;
- RangeTblEntry *rte_in_query = NULL;
- AttrNumber attnum;
- Oid vartypeid;
- int32 type_mod;
- Oid varcollid;
-
- /*
- * If the query has more than 1 table RTEs where both are different, we can not add ctid to the query target list
- * We should in this case skip adding it to the target list and a WHERE CURRENT OF should then
- * fail saying the query is not a simply update able scan of table
- */
-
- tableRTEs = 0;
- foreach(lc1, qry->rtable)
- {
- rte1 = (RangeTblEntry *) lfirst(lc1);
-
- if (rte1->rtekind == RTE_RELATION)
- {
- tableRTEs++;
- if (tableRTEs > 1)
- {
- /*
- * See if we get two RTEs in case we have two references
- * to the same table with different aliases
- */
- foreach(lc2, qry->rtable)
- {
- rte2 = (RangeTblEntry *) lfirst(lc2);
-
- if (rte2->rtekind == RTE_RELATION)
- {
- if (rte2->relid != rte1->relid)
- {
- return NULL;
- }
- }
- }
- continue;
- }
- rte_in_query = rte1;
- }
- }
-
- if (tableRTEs > 1)
- {
- firstTableRTENumber = 0;
- foreach(lc1, qry->rtable)
- {
- rte1 = (RangeTblEntry *) lfirst(lc1);
- firstTableRTENumber++;
- if (rte1->rtekind == RTE_RELATION)
- {
- break;
- }
- }
- }
- else
- {
- firstTableRTENumber = 1;
- }
-
- attnum = specialAttNum("ctid");
- Assert(rte_in_query);
- get_rte_attribute_type(rte_in_query, attnum, &vartypeid, &type_mod, &varcollid);
- return makeVar(firstTableRTENumber, attnum, vartypeid, type_mod, varcollid, 0);
-}
-
-/*
- * Returns whether or not the rtable (and its subqueries)
- * only contain pg_catalog entries.
- */
-static bool
-contains_only_pg_catalog(List *rtable)
-{
- ListCell *item;
-
- /* May be complicated. Before giving up, just check for pg_catalog usage */
- foreach(item, rtable)
- {
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(item);
-
- if (rte->rtekind == RTE_RELATION)
- {
- if (get_rel_namespace(rte->relid) != PG_CATALOG_NAMESPACE)
- return false;
- }
- else if (rte->rtekind == RTE_SUBQUERY &&
- !contains_only_pg_catalog(rte->subquery->rtable))
- return false;
- }
- return true;
-}
-
-
-/*
- * Returns true if at least one temporary table is in use
- * in query (and its subqueries)
- */
-static bool
-contains_temp_tables(List *rtable)
-{
- ListCell *item;
-
- foreach(item, rtable)
- {
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(item);
-
- if (rte->rtekind == RTE_RELATION)
- {
- if (IsTempTable(rte->relid))
- return true;
- }
- else if (rte->rtekind == RTE_SUBQUERY &&
- contains_temp_tables(rte->subquery->rtable))
- return true;
- }
-
- return false;
-}
-
-/*
- * Create an instance of RemoteQuery and initialize fields
- */
-static RemoteQuery *
-makeRemoteQuery(void)
-{
- RemoteQuery *result = makeNode(RemoteQuery);
- result->combine_type = COMBINE_TYPE_NONE;
- result->exec_type = EXEC_ON_DATANODES;
- result->exec_direct_type = EXEC_DIRECT_NONE;
-
- return result;
-}
-
-/*
- * get_plan_combine_type - determine combine type
- *
- * COMBINE_TYPE_SAME - for replicated updates
- * COMBINE_TYPE_SUM - for hash and round robin updates
- * COMBINE_TYPE_NONE - for operations where row_count is not applicable
- *
- * return NULL if it is not safe to be done in a single step.
- */
-static CombineType
-get_plan_combine_type(Query *query, char baselocatortype)
-{
-
- switch (query->commandType)
- {
- case CMD_INSERT:
- case CMD_UPDATE:
- case CMD_DELETE:
- return baselocatortype == LOCATOR_TYPE_REPLICATED ?
- COMBINE_TYPE_SAME : COMBINE_TYPE_SUM;
-
- default:
- return COMBINE_TYPE_NONE;
- }
- /* quiet compiler warning */
- return COMBINE_TYPE_NONE;
-}
-
-/*
- * get oid of the function whose name is passed as argument
- */
-
-static Oid
-get_fn_oid(char *fn_name, Oid *p_rettype)
-{
- Value *fn_nm;
- List *fn_name_list;
- FuncDetailCode fdc;
- bool retset;
- int nvargs;
- Oid *true_typeids;
- Oid func_oid;
-
- fn_nm = makeString(fn_name);
- fn_name_list = list_make1(fn_nm);
-
- fdc = func_get_detail(fn_name_list,
- NULL, /* argument expressions */
- NULL, /* argument names */
- 0, /* argument numbers */
- NULL, /* argument types */
- false, /* expand variable number or args */
- false, /* expand defaults */
- &func_oid, /* oid of the function - returned detail*/
- p_rettype, /* function return type - returned detail */
- &retset, /* - returned detail*/
- &nvargs, /* - returned detail*/
- &true_typeids, /* - returned detail */
- NULL /* arguemnt defaults returned*/
- );
-
- pfree(fn_name_list);
- if (fdc == FUNCDETAIL_NORMAL)
- {
- return func_oid;
- }
- return InvalidOid;
-}
-
-/*
- * Append ctid to the field list of step queries to support update
- * WHERE CURRENT OF. The ctid is not sent down to client but used as a key
- * to find target tuple.
- * PGXCTODO: Bug
- * This function modifies the original query to add ctid
- * and nodename in the targetlist. It should rather modify the targetlist of the
- * query to be shipped by the RemoteQuery node.
- */
-static void
-fetch_ctid_of(Plan *subtree, Query *query)
-{
- /* recursively process subnodes */
- if (innerPlan(subtree))
- fetch_ctid_of(innerPlan(subtree), query);
- if (outerPlan(subtree))
- fetch_ctid_of(outerPlan(subtree), query);
-
- /* we are only interested in RemoteQueries */
- if (IsA(subtree, RemoteQuery))
- {
- RemoteQuery *step = (RemoteQuery *) subtree;
- TargetEntry *te1;
- Query *temp_qry;
- FuncExpr *func_expr;
- AttrNumber resno;
- Oid funcid;
- Oid rettype;
- Var *ctid_expr;
- MemoryContext oldcontext;
- MemoryContext tmpcontext;
-
- tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
- "Temp Context",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
- oldcontext = MemoryContextSwitchTo(tmpcontext);
-
- /* Copy the query tree to make changes to the target list */
- temp_qry = copyObject(query);
- /* Get the number of entries in the target list */
- resno = list_length(temp_qry->targetList);
-
- /* Make a ctid column ref expr to add in target list */
- ctid_expr = make_ctid_col_ref(temp_qry);
- if (ctid_expr == NULL)
- {
- MemoryContextSwitchTo(oldcontext);
- MemoryContextDelete(tmpcontext);
- return;
- }
-
- te1 = makeTargetEntry((Expr *)ctid_expr, resno+1, NULL, false);
-
- /* add the target entry to the query target list */
- temp_qry->targetList = lappend(temp_qry->targetList, te1);
-
- /* PGXCTODO We can take this call in initialization rather than getting it always */
-
- /* Get the Oid of the function */
- funcid = get_fn_oid("pgxc_node_str", &rettype);
- if (OidIsValid(funcid))
- {
- StringInfoData deparsed_qry;
- TargetEntry *te2;
-
- /* create a function expression */
- func_expr = makeFuncExpr(funcid, rettype, NULL, InvalidOid, InvalidOid, COERCE_DONTCARE);
- /* make a target entry for function call */
- te2 = makeTargetEntry((Expr *)func_expr, resno+2, NULL, false);
- /* add the target entry to the query target list */
- temp_qry->targetList = lappend(temp_qry->targetList, te2);
-
- initStringInfo(&deparsed_qry);
- deparse_query(temp_qry, &deparsed_qry, NIL);
-
- MemoryContextSwitchTo(oldcontext);
-
- if (step->sql_statement != NULL)
- pfree(step->sql_statement);
-
- step->sql_statement = pstrdup(deparsed_qry.data);
-
- MemoryContextDelete(tmpcontext);
- }
- else
- {
- MemoryContextSwitchTo(oldcontext);
- MemoryContextDelete(tmpcontext);
- }
- }
-}
-
-/*
- * Build up a QueryPlan to execute on.
- *
- * This functions tries to find out whether
- * 1. The statement can be shipped to the Datanode and Coordinator is needed
- * only as a proxy - in which case, it creates a single node plan.
- * 2. The statement can be evaluated on the Coordinator completely - thus no
- * query shipping is involved and standard_planner() is invoked to plan the
- * statement
- * 3. The statement needs Coordinator as well as Datanode for evaluation -
- * again we use standard_planner() to plan the statement.
- *
- * The plan generated in either of the above cases is returned.
- */
-PlannedStmt *
-pgxc_planner(Query *query, int cursorOptions, ParamListInfo boundParams)
-{
- PlannedStmt *result;
-
- /* handle the un-supported statements, obvious errors etc. */
- pgxc_handle_unsupported_stmts(query);
-
- result = pgxc_handle_exec_direct(query, cursorOptions, boundParams);
- if (result)
- return result;
-
- /* see if can ship the query completely */
- result = pgxc_FQS_planner(query, cursorOptions, boundParams);
- if (result)
- return result;
-
- /* we need Coordinator for evaluation, invoke standard planner */
- result = standard_planner(query, cursorOptions, boundParams);
- pgxc_set_remote_parameters(result, boundParams);
- return result;
-}
-
-static PlannedStmt *
-pgxc_handle_exec_direct(Query *query, int cursorOptions,
- ParamListInfo boundParams)
-{
- PlannedStmt *result = NULL;
- PlannerGlobal *glob;
- PlannerInfo *root;
- /*
- * if the query has its utility set, it could be an EXEC_DIRECT statement,
- * check if it needs to be executed on Coordinator
- */
- if (query->utilityStmt &&
- IsA(query->utilityStmt, RemoteQuery))
- {
- RemoteQuery *node = (RemoteQuery *)query->utilityStmt;
- /* EXECUTE DIRECT statements on remote nodes don't need Coordinator */
- if (node->exec_direct_type != EXEC_DIRECT_NONE &&
- node->exec_direct_type != EXEC_DIRECT_LOCAL &&
- node->exec_direct_type != EXEC_DIRECT_LOCAL_UTILITY)
- {
- glob = makeNode(PlannerGlobal);
- glob->boundParams = boundParams;
- /* Create a PlannerInfo data structure, usually it is done for a subquery */
- root = makeNode(PlannerInfo);
- root->parse = query;
- root->glob = glob;
- root->query_level = 1;
- root->planner_cxt = CurrentMemoryContext;
- root->recursiveOk = true;
- /* build the PlannedStmt result */
- result = makeNode(PlannedStmt);
- /* Try and set what we can, rest must have been zeroed out by makeNode() */
- result->commandType = query->commandType;
- result->canSetTag = query->canSetTag;
- /* Set result relations */
- if (query->commandType != CMD_SELECT)
- result->resultRelations = list_make1_int(query->resultRelation);
-
- result->planTree = (Plan *)pgxc_FQS_create_remote_plan(query, NULL, true);
- result->rtable = query->rtable;
- /*
- * We need to save plan dependencies, so that dropping objects will
- * invalidate the cached plan if it depends on those objects. Table
- * dependencies are available in glob->relationOids and all other
- * dependencies are in glob->invalItems. These fields can be retrieved
- * through set_plan_references().
- */
- result->planTree = set_plan_references(root, result->planTree);
- result->relationOids = glob->relationOids;
- result->invalItems = glob->invalItems;
- }
- }
-
- /* Set existing remote parameters */
- pgxc_set_remote_parameters(result, boundParams);
-
- return result;
-}
-/*
- * pgxc_handle_unsupported_stmts
- * Throw error for the statements that can not be handled in XC
- */
-static void
-pgxc_handle_unsupported_stmts(Query *query)
-{
- /*
- * PGXCTODO: This validation will not be removed
- * until we support moving tuples from one node to another
- * when the partition column of a table is updated
- */
- if (query->commandType == CMD_UPDATE)
- validate_part_col_updatable(query);
-
- if (query->returningList)
- ereport(ERROR,
- (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
- (errmsg("RETURNING clause not yet supported"))));
-}
-
-/*
- * pgxc_FQS_planner
- * The routine tries to see if the statement can be completely evaluated on the
- * Datanodes. In such cases Coordinator is not needed to evaluate the statement,
- * and just acts as a proxy. A statement can be completely shipped to the remote
- * node if every row of the result can be evaluated on a single Datanode.
- * For example:
- *
- * 1. SELECT * FROM tab1; where tab1 is a distributed table - Every row of the
- * result set can be evaluated at a single Datanode. Hence this statement is
- * completely shippable even though many Datanodes are involved in evaluating
- * complete result set. In such case Coordinator will be able to gather rows
- * arisign from individual Datanodes and proxy the result to the client.
- *
- * 2. SELECT count(*) FROM tab1; where tab1 is a distributed table - there is
- * only one row in the result but it needs input from all the Datanodes. Hence
- * this is not completely shippable.
- *
- * 3. SELECT count(*) FROM tab1; where tab1 is replicated table - since result
- * can be obtained from a single Datanode, this is a completely shippable
- * statement.
- *
- * fqs in the name of function is acronym for fast query shipping.
- */
-static PlannedStmt *
-pgxc_FQS_planner(Query *query, int cursorOptions, ParamListInfo boundParams)
-{
- PlannedStmt *result;
- PlannerGlobal *glob;
- PlannerInfo *root;
- ExecNodes *exec_nodes;
- Plan *top_plan;
-
- /* Try by-passing standard planner, if fast query shipping is enabled */
- if (!enable_fast_query_shipping)
- return NULL;
-
- /* Cursor options may come from caller or from DECLARE CURSOR stmt */
- if (query->utilityStmt &&
- IsA(query->utilityStmt, DeclareCursorStmt))
- cursorOptions |= ((DeclareCursorStmt *) query->utilityStmt)->options;
- /*
- * If the query can not be or need not be shipped to the Datanodes, don't
- * create any plan here. standard_planner() will take care of it.
- */
- exec_nodes = pgxc_is_query_shippable(query, 0);
- if (exec_nodes == NULL)
- return NULL;
-
- glob = makeNode(PlannerGlobal);
- glob->boundParams = boundParams;
- /* Create a PlannerInfo data structure, usually it is done for a subquery */
- root = makeNode(PlannerInfo);
- root->parse = query;
- root->glob = glob;
- root->query_level = 1;
- root->planner_cxt = CurrentMemoryContext;
- root->recursiveOk = true;
-
- /*
- * We decided to ship the query to the Datanode/s, create a RemoteQuery node
- * for the same.
- */
- top_plan = (Plan *)pgxc_FQS_create_remote_plan(query, exec_nodes, false);
- /*
- * If creating a plan for a scrollable cursor, make sure it can run
- * backwards on demand. Add a Material node at the top at need.
- */
- if (cursorOptions & CURSOR_OPT_SCROLL)
- {
- if (!ExecSupportsBackwardScan(top_plan))
- top_plan = materialize_finished_plan(top_plan);
- }
-
- /*
- * Just before creating the PlannedStmt, do some final cleanup
- * We need to save plan dependencies, so that dropping objects will
- * invalidate the cached plan if it depends on those objects. Table
- * dependencies are available in glob->relationOids and all other
- * dependencies are in glob->invalItems. These fields can be retrieved
- * through set_plan_references().
- */
- top_plan = set_plan_references(root, top_plan);
-
- /* build the PlannedStmt result */
- result = makeNode(PlannedStmt);
- /* Try and set what we can, rest must have been zeroed out by makeNode() */
- result->commandType = query->commandType;
- result->canSetTag = query->canSetTag;
- result->utilityStmt = query->utilityStmt;
-
- /* Set result relations */
- if (query->commandType != CMD_SELECT)
- result->resultRelations = list_make1_int(query->resultRelation);
- result->planTree = top_plan;
- result->rtable = query->rtable;
- result->relationOids = glob->relationOids;
- result->invalItems = glob->invalItems;
-
- /*
- * If query is DECLARE CURSOR fetch CTIDs and node names from the remote node
- * Use CTID as a key to update/delete tuples on remote nodes when handling
- * WHERE CURRENT OF.
- */
- if (query->utilityStmt && IsA(query->utilityStmt, DeclareCursorStmt))
- fetch_ctid_of(result->planTree, query);
-
- /* Set existing remote parameters */
- pgxc_set_remote_parameters(result, boundParams);
-
- return result;
-}
-
-static RemoteQuery *
-pgxc_FQS_create_remote_plan(Query *query, ExecNodes *exec_nodes, bool is_exec_direct)
-{
- RemoteQuery *query_step;
- StringInfoData buf;
- RangeTblEntry *dummy_rte;
-
- /* EXECUTE DIRECT statements have their RemoteQuery node already built when analyzing */
- if (is_exec_direct)
- {
- Assert(IsA(query->utilityStmt, RemoteQuery));
- query_step = (RemoteQuery *)query->utilityStmt;
- query->utilityStmt = NULL;
- }
- else
- {
- query_step = makeRemoteQuery();
- query_step->exec_nodes = exec_nodes;
- }
-
- Assert(query_step->exec_nodes);
-
- /* Datanodes should finalise the results of this query */
- query->qry_finalise_aggs = true;
-
- /* Deparse query tree to get step query. */
- if ( query_step->sql_statement == NULL )
- {
- initStringInfo(&buf);
- deparse_query(query, &buf, NIL);
- query_step->sql_statement = pstrdup(buf.data);
- pfree(buf.data);
- }
- /*
- * PGXCTODO: we may route this same Query structure through
- * standard_planner, where we don't want Datanodes to finalise the results.
- * Turn it off. At some point, we will avoid routing the same query
- * structure through the standard_planner by modifying it only when it's not
- * be routed through standard_planner.
- */
- query->qry_finalise_aggs = false;
- /* Optimize multi-node handling */
- query_step->read_only = (query->commandType == CMD_SELECT && !query->hasForUpdate);
- query_step->has_row_marks = query->hasForUpdate;
-
- /* Check if temporary tables are in use in query */
- /* PGXC_FQS_TODO: scanning the rtable again for the queries should not be
- * needed. We should be able to find out if the query has a temporary object
- * while finding nodes for the objects. But there is no way we can convey
- * that information here. Till such a connection is available, this is it.
- */
- if (contains_temp_tables(query->rtable))
- query_step->is_temp = true;
-
- /*
- * We need to evaluate some expressions like the ExecNodes->en_expr at
- * Coordinator, prepare those for evaluation. Ideally we should call
- * preprocess_expression, but it needs PlannerInfo structure for the same
- */
- fix_opfuncids((Node *)(query_step->exec_nodes->en_expr));
- /*
- * PGXCTODO
- * When Postgres runs insert into t (a) values (1); against table
- * defined as create table t (a int, b int); the plan is looking
- * like insert into t (a,b) values (1,null);
- * Later executor is verifying plan, to make sure table has not
- * been altered since plan has been created and comparing table
- * definition with plan target list and output error if they do
- * not match.
- * I could not find better way to generate targetList for pgxc plan
- * then call standard planner and take targetList from the plan
- * generated by Postgres.
- */
- query_step->combine_type = get_plan_combine_type(
- query, query_step->exec_nodes->baselocatortype);
-
- /*
- * Create a dummy RTE for the remote query being created. Append the dummy
- * range table entry to the range table. Note that this modifies the master
- * copy the caller passed us, otherwise e.g EXPLAIN VERBOSE will fail to
- * find the rte the Vars built below refer to. Also create the tuple
- * descriptor for the result of this query from the base_tlist (targetlist
- * we used to generate the remote node query).
- */
- dummy_rte = makeNode(RangeTblEntry);
- dummy_rte->rtekind = RTE_REMOTE_DUMMY;
- /* Use a dummy relname... */
- if (is_exec_direct)
- dummy_rte->relname = "__EXECUTE_DIRECT__";
- else
- dummy_rte->relname = "__REMOTE_FQS_QUERY__";
- dummy_rte->eref = makeAlias("__REMOTE_FQS_QUERY__", NIL);
- /* Rest will be zeroed out in makeNode() */
-
- query->rtable = lappend(query->rtable, dummy_rte);
- query_step->scan.scanrelid = list_length(query->rtable);
- query_step->scan.plan.targetlist = query->targetList;
- query_step->base_tlist = query->targetList;
-
- return query_step;
-}
-
-/*
- * pgxc_query_needs_coord
- * Check if the query needs Coordinator for evaluation or it can be completely
- * evaluated on Coordinator. Return true if so, otherwise return false.
- */
-static bool
-pgxc_query_needs_coord(Query *query)
-{
- /*
- * If the query is an EXEC DIRECT on the same Coordinator where it's fired,
- * it should not be shipped
- */
- if (query->is_local)
- return true;
- /*
- * If the query involves just the catalog tables, and is not an EXEC DIRECT
- * statement, it can be evaluated completely on the Coordinator. No need to
- * involve Datanodes.
- */
- if (contains_only_pg_catalog(query->rtable))
- return true;
-
-
- /* Allow for override */
- if (query->commandType != CMD_SELECT &&
- query->commandType != CMD_INSERT &&
- query->commandType != CMD_UPDATE &&
- query->commandType != CMD_DELETE)
- {
- if (StrictStatementChecking)
- ereport(ERROR,
- (errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
- (errmsg("This command is not yet supported."))));
-
- return true;
- }
-
- return false;
-}
-
-/*
- * Set the given reason in Shippability_context indicating why the query can not be
- * shipped directly to the Datanodes.
- */
-static void
-pgxc_set_shippability_reason(Shippability_context *context, ShippabilityStat reason)
-{
- context->sc_shippability = bms_add_member(context->sc_shippability, reason);
-}
-
-/*
- * See if a given reason is why the query can not be shipped directly
- * to the Datanodes.
- */
-bool
-pgxc_test_shippability_reason(Shippability_context *context, ShippabilityStat reason)
-{
- return bms_is_member(reason, context->sc_shippability);
-}
-
-/*
- * pgxc_is_query_shippable
- * This function calls the query walker to analyse the query to gather
- * information like Constraints under which the query can be shippable, nodes
- * on which the query is going to be executed etc.
- * Based on the information gathered, it decides whether the query can be
- * executed on Datanodes directly without involving Coordinator.
- * If the query is shippable this routine also returns the nodes where the query
- * should be shipped. If the query is not shippable, it returns NULL.
- */
-static ExecNodes *
-pgxc_is_query_shippable(Query *query, int query_level)
-{
- Shippability_context sc_context;
- ExecNodes *exec_nodes;
- bool canShip = true;
- Bitmapset *shippability;
-
- memset(&sc_context, 0, sizeof(sc_context));
- /* let's assume that by default query is shippable */
- sc_context.sc_query = query;
- sc_context.sc_query_level = query_level;
- sc_context.sc_for_expr = false;
-
- /*
- * We might have already decided not to ship the query to the Datanodes, but
- * still walk it anyway to find out if there are any subqueries which can be
- * shipped.
- */
- pgxc_shippability_walker((Node *)query, &sc_context);
- /*
- * We have merged the nodelists and distributions of all subqueries seen in
- * the query tree, merge it with the same obtained for the relations
- * involved in the query.
- * PGXC_FQS_TODO:
- * Merge the subquery ExecNodes if both of them are replicated.
- * The logic to merge node lists with other distribution
- * strategy is not clear yet.
- */
- exec_nodes = sc_context.sc_exec_nodes;
- if (exec_nodes)
- exec_nodes = pgxc_merge_exec_nodes(exec_nodes,
- sc_context.sc_subquery_en, false,
- true);
-
- /*
- * Look at the information gathered by the walker in Shippability_context and that
- * in the Query structure to decide whether we should ship this query
- * directly to the Datanode or not
- */
-
- /*
- * If the planner was not able to find the Datanodes to the execute the
- * query, the query is not completely shippable. So, return NULL
- */
- if (!exec_nodes)
- return NULL;
-
- /* Copy the shippability reasons. We modify the copy for easier handling.
- * The original can be saved away */
- shippability = bms_copy(sc_context.sc_shippability);
-
- /*
- * If the query has an expression which renders the shippability to single
- * node, and query needs to be shipped to more than one node, it can not be
- * shipped
- */
- if (bms_is_member(SS_NEED_SINGLENODE, shippability))
- {
- /* We handled the reason here, reset it */
- shippability = bms_del_member(shippability, SS_NEED_SINGLENODE);
- /* if nodeList has no nodes, it ExecNodes will have other means to know
- * the nodes where to execute like distribution column expression. We
- * can't tell how many nodes the query will be executed on, hence treat
- * that as multiple nodes.
- */
- if (list_length(exec_nodes->nodeList) != 1)
- canShip = false;
- }
- /* We have delt with aggregates as well, delete the Has aggregates status */
- shippability = bms_del_member(shippability, SS_HAS_AGG_EXPR);
-
- /* Can not ship the query for some reason */
- if (!bms_is_empty(shippability))
- canShip = false;
-
- /* Always keep this at the end before checking canShip and return */
- if (!canShip && exec_nodes)
- FreeExecNodes(&exec_nodes);
- /* If query is to be shipped, we should know where to execute the query */
- Assert (!canShip || exec_nodes);
-
- bms_free(shippability);
- shippability = NULL;
-
- return exec_nodes;
-}
-
-/*
- * pgxc_merge_exec_nodes
- * The routine combines the two exec_nodes passed such that the resultant
- * exec_node corresponds to the JOIN of respective relations.
- * If both exec_nodes can not be merged, it returns NULL.
- */
-static ExecNodes *
-pgxc_merge_exec_nodes(ExecNodes *en1, ExecNodes *en2, bool merge_dist_equijoin,
- bool merge_replicated_only)
-{
- ExecNodes *merged_en = makeNode(ExecNodes);
- ExecNodes *tmp_en;
-
- /* If either of exec_nodes are NULL, return the copy of other one */
- if (!en1)
- {
- tmp_en = copyObject(en2);
- return tmp_en;
- }
- if (!en2)
- {
- tmp_en = copyObject(en1);
- return tmp_en;
- }
-
- /* Following cases are not handled in this routine */
- /* PGXC_FQS_TODO how should we handle table usage type? */
- if (en1->primarynodelist || en2->primarynodelist ||
- en1->en_expr || en2->en_expr ||
- OidIsValid(en1->en_relid) || OidIsValid(en2->en_relid) ||
- en1->accesstype != RELATION_ACCESS_READ || en2->accesstype != RELATION_ACCESS_READ)
- return NULL;
-
- if (IsLocatorReplicated(en1->baselocatortype) &&
- IsLocatorReplicated(en2->baselocatortype))
- {
- /*
- * Replicated/replicated join case
- * Check that replicated relation is not disjoint
- * with initial relation which is also replicated.
- * If there is a common portion of the node list between
- * the two relations, other rtables have to be checked on
- * this restricted list.
- */
- merged_en->nodeList = list_intersection_int(en1->nodeList,
- en2->nodeList);
- merged_en->baselocatortype = LOCATOR_TYPE_REPLICATED;
- /* No intersection, so has to go though standard planner... */
- if (!merged_en->nodeList)
- FreeExecNodes(&merged_en);
- return merged_en;
- }
-
- /*
- * We are told to merge the nodelists if both the distributions are
- * replicated. We checked that above, so bail out
- */
- if (merge_replicated_only)
- {
- FreeExecNodes(&merged_en);
- return merged_en;
- }
-
- if (IsLocatorReplicated(en1->baselocatortype) &&
- IsLocatorColumnDistributed(en2->baselocatortype))
- {
- List *diff_nodelist = NULL;
- /*
- * Replicated/distributed join case.
- * Node list of distributed table has to be included
- * in node list of replicated table.
- */
- diff_nodelist = list_difference_int(en2->nodeList, en1->nodeList);
- /*
- * If the difference list is not empty, this means that node list of
- * distributed table is not completely mapped by node list of replicated
- * table, so go through standard planner.
- */
- if (diff_nodelist)
- FreeExecNodes(&merged_en);
- else
- {
- merged_en->nodeList = list_copy(en2->nodeList);
- merged_en->baselocatortype = LOCATOR_TYPE_DISTRIBUTED;
- }
- return merged_en;
- }
-
- if (IsLocatorColumnDistributed(en1->baselocatortype) &&
- IsLocatorReplicated(en2->baselocatortype))
- {
- List *diff_nodelist = NULL;
- /*
- * Distributed/replicated join case.
- * Node list of distributed table has to be included
- * in node list of replicated table.
- */
- diff_nodelist = list_difference_int(en1->nodeList, en2->nodeList);
-
- /*
- * If the difference list is not empty, this means that node list of
- * distributed table is not completely mapped by node list of replicated
- * table, so go through standard planner.
- */
- if (diff_nodelist)
- FreeExecNodes(&merged_en);
- else
- {
- merged_en->nodeList = list_copy(en1->nodeList);
- merged_en->baselocatortype = LOCATOR_TYPE_DISTRIBUTED;
- }
- return merged_en;
- }
-
- if (IsLocatorColumnDistributed(en1->baselocatortype) &&
- IsLocatorColumnDistributed(en2->baselocatortype))
- {
- /*
- * Distributed/distributed case
- * If the caller has suggested that this is an equi-join between two
- * distributed results, check if both are distributed by the same
- * distribution strategy, and have the same nodes in the distribution
- * node list. The caller should have made sure that distribution column
- * type is same.
- */
- if (merge_dist_equijoin &&
- en1->baselocatortype == en2->baselocatortype &&
- !list_difference_int(en1->nodeList, en2->nodeList) &&
- !list_difference_int(en2->nodeList, en1->nodeList))
- {
- merged_en->nodeList = list_copy(en1->nodeList);
- merged_en->baselocatortype = en1->baselocatortype;
- }
- else if (list_length(en1->nodeList) == 1 && list_length(en2->nodeList) == 1)
- {
- merged_en->nodeList = list_intersection_int(en1->nodeList,
- en2->nodeList);
- merged_en->baselocatortype = LOCATOR_TYPE_DISTRIBUTED;
- }
- else
- FreeExecNodes(&merged_en);
- return merged_en;
- }
-
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
- errmsg("Postgres-XL does not support this distribution type yet"),
-#else
- errmsg("Postgres-XC does not support this distribution type yet"),
-#endif
- errdetail("The feature is not currently supported")));
-
- /* Keep compiler happy */
- return NULL;
-}
-
-static void
-pgxc_FQS_find_datanodes(Shippability_context *sc_context)
-{
- Query *query = sc_context->sc_query;
- ListCell *rt;
- ExecNodes *exec_nodes = NULL;
- bool canShip = true;
- Index varno = 0;
-
- /* No query, no nodes to execute! */
- if (!query)
- {
- sc_context->sc_exec_nodes = NULL;
- return;
- }
-
- /*
- * For every range table entry,
- * 1. Find out the Datanodes needed for that range table
- * 2. Merge these Datanodes with the already available Datanodes
- * 3. If the merge is unsuccessful, we can not ship this query directly to
- * the Datanode/s
- */
- foreach(rt, query->rtable)
- {
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(rt);
- Oid distcol_type; /* TODO mostly this is not needed */
- Relids dist_varnos;
-
- varno++;
- switch (rte->rtekind)
- {
- case RTE_RELATION:
- {
- ExecNodes *rel_exec_nodes;
- ExecNodes *tmp_en;
- bool merge_dist_equijoin = false;
- /*
- * In case of inheritance, child tables can have completely different
- * Datanode distribution than parent. To handle inheritance we need
- * to merge the Datanodes of the children table as well. The inheritance
- * is resolved during planning(?), so we may not have the RTEs of the
- * children here. Also, the exact method of merging Datanodes of the
- * children is not known yet. So, when inheritance is requested, query
- * can not be shipped.
- */
- if (rte->inh)
- {
- /*
- * See prologue of has_subclass, we might miss on the
- * optimization because has_subclass can return true
- * even if there aren't any subclasses, but it's ok
- */
- if (has_subclass(rte->relid))
- {
- canShip = false;
- break;
- }
- }
-
- if (rte->relkind != RELKIND_RELATION)
- {
- canShip = false;
- break;
- }
- rel_exec_nodes = pgxc_FQS_get_relation_nodes(rte,varno, query);
- if (!rel_exec_nodes)
- {
- /*
- * No information about the location of relation in XC,
- * a local table OR system catalog. The query can not be
- * pushed.
- */
- canShip = false;
- break;
- }
- if (varno == 1)
- {
- if (IsLocatorColumnDistributed(rel_exec_nodes->baselocatortype))
- {
- RelationLocInfo *rel_loc_info = GetRelationLocInfo(rte->relid);
- distcol_type = get_atttype(rte->relid,
- rel_loc_info->partAttrNum);
- dist_varnos = bms_make_singleton(varno);
- }
- else
- {
- distcol_type = InvalidOid;
- dist_varnos = NULL;
- }
- }
- if (exec_nodes &&
- IsLocatorDistributedByValue(exec_nodes->baselocatortype) &&
- OidIsValid(distcol_type) && bms_num_members(dist_varnos) > 0 &&
- exec_nodes->baselocatortype == rel_exec_nodes->baselocatortype)
- {
- /*
- * If the already reduced JOINs is distributed the same way
- * as the current relation, check if there exists an
- * equi-join condition between the relations and the data type
- * of distribution column involved is same for both the
- * relations
- */
- if (pgxc_qual_hash_dist_equijoin(dist_varnos,
- bms_make_singleton(varno),
- distcol_type,
- query->jointree->quals,
- query->rtable))
- merge_dist_equijoin = true;
- }
-
- /* Save the current exec_nodes to be freed later */
- tmp_en = exec_nodes;
- exec_nodes = pgxc_merge_exec_nodes(exec_nodes, rel_exec_nodes,
- merge_dist_equijoin,
- false);
- /*
- * The JOIN is equijoin between distributed tables, and we could
- * obtain the nodelist for pushing this JOIN, so add the current
- * relation to the list of relations already JOINed in the same
- * fashion.
- */
- if (exec_nodes && merge_dist_equijoin)
- dist_varnos = bms_add_member(dist_varnos, varno);
- FreeExecNodes(&tmp_en);
- }
- break;
-
- case RTE_JOIN:
- /* Is information here useful in some or other way? */
- break;
- case RTE_CTE:
- case RTE_SUBQUERY:
- case RTE_FUNCTION:
- case RTE_VALUES:
- default:
- canShip = false;
- }
-
- if (!canShip || !exec_nodes)
- break;
- }
-
- /*
- * If we didn't find the Datanodes to ship the query to, we shouldn't ship
- * the query :)
- */
- if (!exec_nodes || !(exec_nodes->nodeList || exec_nodes->en_expr))
- canShip = false;
-
- if (canShip)
- {
- /*
- * If relations involved in the query are such that ultimate JOIN is
- * replicated JOIN, choose only one of them. If one of them is a
- * preferred node choose that one, otherwise choose the first one.
- */
- if (IsLocatorReplicated(exec_nodes->baselocatortype) &&
- exec_nodes->accesstype == RELATION_ACCESS_READ)
- {
- List *tmp_list = exec_nodes->nodeList;
- ListCell *item;
- int nodeid = -1;
- foreach(item, exec_nodes->nodeList)
- {
- int cnt_nodes;
- for (cnt_nodes = 0;
- cnt_nodes < num_preferred_data_nodes && nodeid < 0;
- cnt_nodes++)
- {
- if (PGXCNodeGetNodeId(preferred_data_node[cnt_nodes],
- PGXC_NODE_DATANODE) == lfirst_int(item))
- nodeid = lfirst_int(item);
- }
- if (nodeid >= 0)
- break;
- }
- if (nodeid < 0)
- exec_nodes->nodeList = list_make1_int(linitial_int(exec_nodes->nodeList));
- else
- exec_nodes->nodeList = list_make1_int(nodeid);
- list_free(tmp_list);
- }
- sc_context->sc_exec_nodes = exec_nodes;
- }
- else if (exec_nodes)
- {
- FreeExecNodes(&exec_nodes);
- }
- return;
-}
-
-static bool
-pgxc_qual_hash_dist_equijoin(Relids varnos_1, Relids varnos_2, Oid distcol_type,
- Node *quals, List *rtable)
-{
- List *lquals;
- ListCell *qcell;
-
- /*
- * Make a copy of the argument bitmaps, it will be modified by
- * bms_first_member().
- */
- varnos_1 = bms_copy(varnos_1);
- varnos_2 = bms_copy(varnos_2);
-
- lquals = make_ands_implicit((Expr *)quals);
- foreach(qcell, lquals)
- {
- Expr *qual_expr = (Expr *)lfirst(qcell);
- OpExpr *op;
- Var *lvar;
- Var *rvar;
-
- if (!IsA(qual_expr, OpExpr))
- continue;
- op = (OpExpr *)qual_expr;
- /* If not a binary operator, it can not be '='. */
- if (list_length(op->args) != 2)
- continue;
-
- /*
- * Check if both operands are Vars, if not check next expression */
- if (IsA(linitial(op->args), Var) && IsA(lsecond(op->args), Var))
- {
- lvar = (Var *)linitial(op->args);
- rvar = (Var *)lsecond(op->args);
- }
- else
- continue;
-
- /*
- * If the data types of both the columns are not same, continue. Hash
- * and Modulo of a the same bytes will be same if the data types are
- * same. So, only when the data types of the columns are same, we can
- * ship a distributed JOIN to the Datanodes
- */
- if (exprType((Node *)lvar) != exprType((Node *)rvar))
- continue;
-
- /* if the vars do not correspond to the required varnos, continue. */
- if ((bms_is_member(lvar->varno, varnos_1) && bms_is_member(rvar->varno, varnos_2)) ||
- (bms_is_member(lvar->varno, varnos_2) && bms_is_member(rvar->varno, varnos_1)))
- {
- if (!VarAttrIsPartAttr(lvar, rtable) ||
- !VarAttrIsPartAttr(rvar, rtable))
- continue;
- }
- else
- continue;
- /*
- * If the operator is not an assignment operator, check next
- * constraint. An operator is an assignment operator if it's
- * mergejoinable or hashjoinable. Beware that not every assignment
- * operator is mergejoinable or hashjoinable, so we might leave some
- * oportunity. But then we have to rely on the opname which may not
- * be something we know to be equality operator as well.
- */
- if (!op_mergejoinable(op->opno, exprType((Node *)lvar)) &&
- !op_hashjoinable(op->opno, exprType((Node *)lvar)))
- continue;
- /* Found equi-join condition on distribution columns */
- return true;
- }
- return false;
-}
-
-static bool VarAttrIsPartAttr(Var *var, List *rtable)
-{
- RangeTblEntry *rte = rt_fetch(var->varno, rtable);
- RelationLocInfo *rel_loc_info;
- /* distribution column only applies to the relations */
- if (rte->rtekind != RTE_RELATION ||
- rte->relkind != RELKIND_RELATION)
- return false;
- rel_loc_info = GetRelationLocInfo(rte->relid);
- if (!rel_loc_info)
- return false;
- if (var->varattno == rel_loc_info->partAttrNum)
- return true;
- return false;
-}
-/*
- * pgxc_FQS_get_relation_nodes
- * For FQS return ExecNodes structure so as to decide which Datanodes the query
- * should execute on. If it is possible to set the node list directly, set it.
- * Otherwise set the appropriate distribution column expression or relid in
- * ExecNodes structure.
- */
-static ExecNodes *
-pgxc_FQS_get_relation_nodes(RangeTblEntry *rte, Index varno, Query *query)
-{
- CmdType command_type = query->commandType;
- bool for_update = query->rowMarks ? true : false;
- ExecNodes *rel_exec_nodes;
- RelationAccessType rel_access = RELATION_ACCESS_READ;
- RelationLocInfo *rel_loc_info;
-
- Assert(rte == rt_fetch(varno, (query->rtable)));
-
- switch (command_type)
- {
- case CMD_SELECT:
- if (for_update)
- rel_access = RELATION_ACCESS_READ_FOR_UPDATE;
- else
- rel_access = RELATION_ACCESS_READ;
- break;
-
- case CMD_UPDATE:
- case CMD_DELETE:
- rel_access = RELATION_ACCESS_UPDATE;
- break;
-
- case CMD_INSERT:
- rel_access = RELATION_ACCESS_INSERT;
- break;
-
- default:
- /* should not happen, but */
- elog(ERROR, "Unrecognised command type %d", command_type);
- break;
- }
-
-
- rel_loc_info = GetRelationLocInfo(rte->relid);
- /* If we don't know about the distribution of relation, bail out */
- if (!rel_loc_info)
- return NULL;
-
- /*
- * Find out the datanodes to execute this query on.
- * PGXC_FQS_TODO: for now, we apply node reduction only when there is only
- * one relation involved in the query. If there are multiple distributed
- * tables in the query and we apply node reduction here, we may fail to ship
- * the entire join. We should apply node reduction transitively.
- */
- if (list_length(query->rtable) == 1)
- rel_exec_nodes = GetRelationNodesByQuals(rte->relid, varno,
- query->jointree->quals, rel_access);
- else
- rel_exec_nodes = GetRelationNodes(rel_loc_info, (Datum) 0,
- true, InvalidOid, rel_access);
-
- if (!rel_exec_nodes)
- return NULL;
- rel_exec_nodes->accesstype = rel_access;
- /*
- * If we are reading a replicated table, pick all the nodes where it
- * resides. If the query has JOIN, it helps picking up a matching set of
- * Datanodes for that JOIN. FQS planner will ultimately pick up one node if
- * the JOIN is replicated.
- */
- if (rel_access == RELATION_ACCESS_READ &&
- IsLocatorReplicated(rel_loc_info->locatorType))
- {
- list_free(rel_exec_nodes->nodeList);
- rel_exec_nodes->nodeList = list_copy(rel_loc_info->nodeList);
- }
- else if (rel_access == RELATION_ACCESS_INSERT &&
- IsLocatorDistributedByValue(rel_loc_info->locatorType))
- {
- ListCell *lc;
- TargetEntry *tle;
- /*
- * If the INSERT is happening on a table distributed by value of a
- * column, find out the
- * expression for distribution column in the targetlist, and stick in
- * in ExecNodes, and clear the nodelist. Execution will find
- * out where to insert the row.
- */
- /* It is a partitioned table, get value by looking in targetList */
- foreach(lc, query->targetList)
- {
- tle = (TargetEntry *) lfirst(lc);
-
- if (tle->resjunk)
- continue;
- if (strcmp(tle->resname, rel_loc_info->partAttrName) == 0)
- break;
- }
- /* Not found, bail out */
- if (!lc)
- return NULL;
-
- Assert(tle);
- /* We found the TargetEntry for the partition column */
- list_free(rel_exec_nodes->primarynodelist);
- rel_exec_nodes->primarynodelist = NULL;
- list_free(rel_exec_nodes->nodeList);
- rel_exec_nodes->nodeList = NULL;
- rel_exec_nodes->en_expr = tle->expr;
- rel_exec_nodes->en_relid = rel_loc_info->relid;
- }
- return rel_exec_nodes;
-}
-/*
- * pgxc_shippability_walker
- * walks the query/expression tree routed at the node passed in, gathering
- * information which will help decide whether the query to which this node
- * belongs is shippable to the Datanodes.
- *
- * The function should try to walk the entire tree analysing each subquery for
- * shippability. If a subquery is shippable but not the whole query, we would be
- * able to create a RemoteQuery node for that subquery, shipping it to the
- * Datanode.
- *
- * Return value of this function is governed by the same rules as
- * expression_tree_walker(), see prologue of that function for details.
- */
-bool
-pgxc_shippability_walker(Node *node, Shippability_context *sc_context)
-{
- if (node == NULL)
- return false;
-
- /* Below is the list of nodes that can appear in a query, examine each
- * kind of node and find out under what conditions query with this node can
- * be shippable. For each node, update the context (add fields if
- * necessary) so that decision whether to FQS the query or not can be made.
- */
- switch(nodeTag(node))
- {
- /* Constants are always shippable */
- case T_Const:
- break;
-
- /*
- * For placeholder nodes the shippability of the node, depends upon the
- * expression which they refer to. It will be checked separately, when
- * that expression is encountered.
- */
- case T_CaseTestExpr:
- break;
-
- /*
- * record_in() function throws error, thus requesting a result in the
- * form of anonymous record from datanode gets into error. Hence, if the
- * top expression of a target entry is ROW(), it's not shippable.
- */
- case T_TargetEntry:
- {
- TargetEntry *tle = (TargetEntry *)node;
- if (tle->expr)
- {
- char typtype = get_typtype(exprType((Node *)tle->expr));
- if (!typtype || typtype == TYPTYPE_PSEUDO)
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- }
- break;
-
- case T_SortGroupClause:
- if (sc_context->sc_for_expr)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- break;
-
- /*
- * Nodes, which are shippable if the tree rooted under these nodes is
- * shippable
- */
- case T_List:
- case T_CoerceToDomainValue:
- /*
- * PGXCTODO: mostly, CoerceToDomainValue node appears in DDLs,
- * do we handle DDLs here?
- */
- case T_FieldSelect:
- case T_RangeTblRef:
- case T_NamedArgExpr:
- case T_BoolExpr:
- /*
- * PGXCTODO: we might need to take into account the kind of boolean
- * operator we have in the quals and see if the corresponding
- * function is immutable.
- */
- case T_RelabelType:
- case T_CoerceViaIO:
- case T_ArrayCoerceExpr:
- case T_ConvertRowtypeExpr:
- case T_CaseExpr:
- case T_ArrayExpr:
- case T_RowExpr:
- case T_CollateExpr:
- case T_CoalesceExpr:
- case T_XmlExpr:
- case T_NullTest:
- case T_BooleanTest:
- case T_CoerceToDomain:
- break;
-
- case T_ArrayRef:
- /*
- * When multiple values of of an array are updated at once
- * FQS planner cannot yet handle SQL representation correctly.
- * So disable FQS in this case and let standard planner manage it.
- */
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- break;
-
- case T_FieldStore:
- /*
- * PostgreSQL deparsing logic does not handle the FieldStore
- * for more than one fields (see processIndirection()). So, let's
- * handle it through standard planner, where whole row will be
- * constructed.
- */
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- break;
-
- case T_SetToDefault:
- /*
- * PGXCTODO: we should actually check whether the default value to
- * be substituted is shippable to the Datanode. Some cases like
- * nextval() of a sequence can not be shipped to the Datanode, hence
- * for now default values can not be shipped to the Datanodes
- */
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- break;
-
- case T_Var:
- {
- Var *var = (Var *)node;
- /*
- * if a subquery references an upper level variable, that query is
- * not shippable, if shipped alone.
- */
- if (var->varlevelsup > sc_context->sc_max_varlevelsup)
- sc_context->sc_max_varlevelsup = var->varlevelsup;
- }
- break;
-
- case T_Param:
- {
- Param *param = (Param *)node;
- /* PGXCTODO: Can we handle internally generated parameters? */
- if (param->paramkind != PARAM_EXTERN)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- }
- break;
-
- case T_CurrentOfExpr:
- {
- /*
- * Ideally we should not see CurrentOf expression here, it
- * should have been replaced by the CTID = ? expression. But
- * still, no harm in shipping it as is.
- */
- }
- break;
-
- case T_Aggref:
- {
- Aggref *aggref = (Aggref *)node;
- /*
- * An aggregate is completely shippable to the Datanode, if the
- * whole group resides on that Datanode. This will be clear when
- * we see the GROUP BY clause.
- * agglevelsup is minimum of variable's varlevelsup, so we will
- * set the sc_max_varlevelsup when we reach the appropriate
- * VARs in the tree.
- */
- pgxc_set_shippability_reason(sc_context, SS_HAS_AGG_EXPR);
- /*
- * If a stand-alone expression to be shipped, is an
- * 1. aggregate with ORDER BY, DISTINCT directives, it needs all
- * the qualifying rows
- * 2. aggregate without collection function
- * 3. (PGXCTODO:)aggregate with polymorphic transition type, the
- * the transition type needs to be resolved to correctly interpret
- * the transition results from Datanodes.
- * Hence, such an expression can not be shipped to the datanodes.
- */
- if (aggref->aggorder ||
- aggref->aggdistinct ||
- aggref->agglevelsup ||
- !aggref->agghas_collectfn ||
- IsPolymorphicType(aggref->aggtrantype))
- pgxc_set_shippability_reason(sc_context, SS_NEED_SINGLENODE);
- }
- break;
-
- case T_FuncExpr:
- {
- FuncExpr *funcexpr = (FuncExpr *)node;
- /*
- * PGXC_FQS_TODO: it's too restrictive not to ship non-immutable
- * functions to the Datanode. We need a better way to see what
- * can be shipped to the Datanode and what can not be.
- */
- if (!is_immutable_func(funcexpr->funcid))
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- break;
-
- case T_OpExpr:
- case T_DistinctExpr: /* struct-equivalent to OpExpr */
- case T_NullIfExpr: /* struct-equivalent to OpExpr */
- {
- /*
- * All of these three are structurally equivalent to OpExpr, so
- * cast the node to OpExpr and check if the operator function is
- * immutable. See PGXC_FQS_TODO item for FuncExpr.
- */
- OpExpr *op_expr = (OpExpr *)node;
- Oid opfuncid = OidIsValid(op_expr->opfuncid) ?
- op_expr->opfuncid : get_opcode(op_expr->opno);
- if (!OidIsValid(opfuncid) || !is_immutable_func(opfuncid))
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- break;
-
- case T_ScalarArrayOpExpr:
- {
- /*
- * Check if the operator function is shippable to the Datanode
- * PGXC_FQS_TODO: see immutability note for FuncExpr above
- */
- ScalarArrayOpExpr *sao_expr = (ScalarArrayOpExpr *)node;
- Oid opfuncid = OidIsValid(sao_expr->opfuncid) ?
- sao_expr->opfuncid : get_opcode(sao_expr->opno);
- if (!OidIsValid(opfuncid) || !is_immutable_func(opfuncid))
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- break;
-
- case T_RowCompareExpr:
- case T_MinMaxExpr:
- {
- /*
- * PGXCTODO should we be checking the comparision operator
- * functions as well, as we did for OpExpr OR that check is
- * unnecessary. Operator functions are always shippable?
- * Otherwise this node should be treated similar to other
- * "shell" nodes.
- */
- }
- break;
-
- case T_Query:
- {
- Query *query = (Query *)node;
-
- /* A stand-alone expression containing Query is not shippable */
- if (sc_context->sc_for_expr)
- {
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- break;
- }
- /* We are checking shippability of whole query, go ahead */
-
- /* CREATE TABLE AS is not supported in FQS */
- if (query->commandType == CMD_UTILITY &&
- IsA(query->utilityStmt, CreateTableAsStmt))
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
-
- if (query->hasRecursive)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- /*
- * If the query needs Coordinator for evaluation or the query can be
- * completed on Coordinator itself, we don't ship it to the Datanode
- */
- if (pgxc_query_needs_coord(query))
- pgxc_set_shippability_reason(sc_context, SS_NEEDS_COORD);
-
- /* PGXC_FQS_TODO: It should be possible to look at the Query and find out
- * whether it can be completely evaluated on the Datanode just like SELECT
- * queries. But we need to be careful while finding out the Datanodes to
- * execute the query on, esp. for the result relations. If one happens to
- * remove/change this restriction, make sure you change
- * pgxc_FQS_get_relation_nodes appropriately.
- * For now DMLs with single rtable entry are candidates for FQS
- */
- if (query->commandType != CMD_SELECT && list_length(query->rtable) > 1)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
-
- /*
- * In following conditions query is shippable when there is only one
- * Datanode involved
- * 1. the query has aggregagtes
- * 2. the query has window functions
- * 3. the query has ORDER BY clause
- * 4. the query has Distinct clause
- * 5. the query has limit and offset clause
- *
- * PGXC_FQS_TODO: Condition 1 above is really dependent upon the GROUP BY clause. If
- * all rows in each group reside on the same Datanode, aggregates can be
- * evaluated on that Datanode, thus condition 1 is has aggregates & the rows
- * in any group reside on multiple Datanodes.
- * PGXC_FQS_TODO: Condition 2 above is really dependent upon whether the distinct
- * clause has distribution column in it. If the distinct clause has
- * distribution column in it, we can ship DISTINCT clause to the Datanodes.
- */
- if (query->hasAggs || query->hasWindowFuncs || query->sortClause ||
- query->distinctClause || query->groupClause || query->havingQual ||
- query->limitOffset || query->limitCount)
- pgxc_set_shippability_reason(sc_context, SS_NEED_SINGLENODE);
-
- /* walk the entire query tree to analyse the query */
- if (query_tree_walker(query, pgxc_shippability_walker, sc_context, 0))
- return true;
-
- /*
- * PGXC_FQS_TODO:
- * There is a subquery in this query, which references Vars in the upper
- * query. For now stop shipping such queries. We should get rid of this
- * condition.
- */
- if (sc_context->sc_max_varlevelsup != 0)
- pgxc_set_shippability_reason(sc_context, SS_VARLEVEL);
-
- /*
- * Walk the RangeTableEntries of the query and find the
- * Datanodes needed for evaluating this query
- */
- pgxc_FQS_find_datanodes(sc_context);
- }
- break;
-
- case T_FromExpr:
- {
- /* We don't expect FromExpr in a stand-alone expression */
- if (sc_context->sc_for_expr)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
-
- /*
- * We will be examining the range table entries separately and
- * Join expressions are not candidate for FQS.
- * If this is an INSERT query with quals, resulting from say
- * conditional rule, we can not handle those in FQS, since there is
- * not SQL representation for such quals.
- */
- if (sc_context->sc_query->commandType == CMD_INSERT &&
- ((FromExpr *)node)->quals)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
-
- }
- break;
-
- case T_WindowFunc:
- {
- WindowFunc *winf = (WindowFunc *)node;
- /*
- * A window function can be evaluated on a Datanode if there is
- * only one Datanode involved.
- */
- pgxc_set_shippability_reason(sc_context, SS_NEED_SINGLENODE);
-
- /*
- * A window function is not shippable as part of a stand-alone
- * expression. If the window function is non-immutable, it can not
- * be shipped to the datanodes.
- */
- if (sc_context->sc_for_expr ||
- !is_immutable_func(winf->winfnoid))
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- break;
-
- case T_WindowClause:
- {
- /*
- * A window function can be evaluated on a Datanode if there is
- * only one Datanode involved.
- */
- pgxc_set_shippability_reason(sc_context, SS_NEED_SINGLENODE);
-
- /*
- * A window function is not shippable as part of a stand-alone
- * expression
- */
- if (sc_context->sc_for_expr)
- pgxc_set_shippability_reason(sc_context, SS_UNSHIPPABLE_EXPR);
- }
- break;
-
- case T_JoinExpr:
- /* We don't expect JoinExpr in a stand-alone expression */
- if (sc_context->sc_for_expr)
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
-
- /*
- * For JoinExpr in a Query
- * The compatibility of joining ranges will be deduced while
- * examining the range table of the query. Nothing to do here
- */
- break;
-
- case T_SubLink:
- {
- SubLink *sublink = (SubLink *)node;
- ExecNodes *sublink_en;
- /*
- * Walk the query and find the nodes where the query should be
- * executed and node distribution. Merge this with the existing
- * node list obtained for other subqueries. If merging fails, we
- * can not ship the whole query.
- */
- if (IsA(sublink->subselect, Query))
- sublink_en = pgxc_is_query_shippable((Query *)(sublink->subselect),
- sc_context->sc_query_level);
- else
- sublink_en = NULL;
-
- /* PGXCTODO free the old sc_subquery_en. */
- /* If we already know that this query does not have a set of nodes
- * to evaluate on, don't bother to merge again.
- */
- if (!pgxc_test_shippability_reason(sc_context, SS_NO_NODES))
- {
- sc_context->sc_subquery_en = pgxc_merge_exec_nodes(sublink_en,
- sc_context->sc_subquery_en,
- false,
- true);
- if (!sc_context->sc_subquery_en)
- pgxc_set_shippability_reason(sc_context, SS_NO_NODES);
- }
- }
- break;
-
- case T_SubPlan:
- case T_AlternativeSubPlan:
- case T_CommonTableExpr:
- case T_SetOperationStmt:
- case T_PlaceHolderVar:
- case T_AppendRelInfo:
- case T_PlaceHolderInfo:
- {
- /* PGXCTODO: till we exhaust this list */
- pgxc_set_shippability_reason(sc_context, SS_UNSUPPORTED_EXPR);
- }
- break;
-
- default:
- elog(ERROR, "unrecognized node type: %d",
- (int) nodeTag(node));
- break;
- }
- return expression_tree_walker(node, pgxc_shippability_walker, (void *)sc_context);
-}
-
-/*
- * See if we can reduce the passed in RemoteQuery nodes to a single step.
- *
- * We need to check when we can further collapse already collapsed nodes.
- * We cannot always collapse- we do not want to allow a replicated table
- * to be used twice. That is if we have
- *
- * partitioned_1 -- replicated -- partitioned_2
- *
- * partitioned_1 and partitioned_2 cannot (usually) be safely joined only
- * locally.
- * We can do this by checking (may need tracking) what type it is,
- * and looking at context->conditions->replicated_joins
- *
- * The following cases are possible, and whether or not it is ok
- * to reduce.
- *
- * If the join between the two RemoteQuery nodes is replicated
- *
- * Node 1 Node 2
- * rep-part folded rep-part folded ok to reduce?
- * 0 0 0 1 1
- * 0 0 1 1 1
- * 0 1 0 1 1
- * 0 1 1 1 1
- * 1 1 1 1 0
- *
- *
- * If the join between the two RemoteQuery nodes is replicated - partitioned
- *
- * Node 1 Node 2
- * rep-part folded rep-part folded ok to reduce?
- * 0 0 0 1 1
- * 0 0 1 1 0
- * 0 1 0 1 1
- * 0 1 1 1 0
- * 1 1 1 1 0
- *
- *
- * If the join between the two RemoteQuery nodes is partitioned - partitioned
- * it is always reducibile safely,
- *
- * RemoteQuery *innernode - the inner node
- * RemoteQuery *outernode - the outer node
- * List *rtable_list - rtables
- * JoinPath *join_path - used to examine join restrictions
- * PGXCJoinInfo *join_info - contains info about the join reduction
- * join_info->partitioned_replicated is set to true if we have a partitioned-replicated
- * join. We want to use replicated tables with non-replicated
- * tables ony once. Only use this value if this function
- * returns true.
- */
-ExecNodes *
-IsJoinReducible(RemoteQuery *innernode, RemoteQuery *outernode, Relids in_relids, Relids out_relids,
- Join *join, JoinPath *join_path, List *rtables)
-{
- ExecNodes *join_exec_nodes;
- bool merge_dist_equijoin = false;
- bool merge_replicated_only;
- ListCell *cell;
- ExecNodes *inner_en = innernode->exec_nodes;
- ExecNodes *outer_en = outernode->exec_nodes;
- List *quals = join->joinqual;
-
- /*
- * When join type is other than INNER, we will get the unmatched rows on
- * either side. The result will be correct only in case both the sides of
- * join are replicated. In case one of the sides is replicated, and the
- * unmatched results are not coming from that side, it might be possible to
- * ship such join, but this needs to be validated from correctness
- * perspective.
- */
- merge_replicated_only = (join->jointype != JOIN_INNER);
-
- /*
- * If both the relations are distributed with similar distribution strategy
- * walk through the restriction info for this JOIN to find if there is an
- * equality condition on the distributed columns of both the relations. In
- * such case, we can reduce the JOIN if the distribution nodelist is also
- * same.
- */
- if (IsLocatorDistributedByValue(inner_en->baselocatortype) &&
- inner_en->baselocatortype == outer_en->baselocatortype &&
- !merge_replicated_only)
- {
- foreach(cell, quals)
- {
- Node *qual = (Node *)lfirst(cell);
- if (pgxc_qual_hash_dist_equijoin(in_relids, out_relids, InvalidOid,
- qual, rtables))
- {
- merge_dist_equijoin = true;
- break;
- }
- }
- }
- /*
- * If the ExecNodes of inner and outer nodes can be merged, the JOIN is
- * shippable
- * PGXCTODO: Can we take into consideration the JOIN conditions to optimize
- * further?
- */
- join_exec_nodes = pgxc_merge_exec_nodes(inner_en, outer_en,
- merge_dist_equijoin,
- merge_replicated_only);
- return join_exec_nodes;
-}
-
-/*
- * validate whether partition column of a table is being updated
- */
-static void
-validate_part_col_updatable(const Query *query)
-{
- RangeTblEntry *rte;
- RelationLocInfo *rel_loc_info;
- ListCell *lc;
-
- /* Make sure there is one table at least */
- if (query->rtable == NULL)
- return;
-
- rte = (RangeTblEntry *) list_nth(query->rtable, query->resultRelation - 1);
-
-
- if (rte != NULL && rte->relkind != RELKIND_RELATION)
- /* Bad relation type */
- return;
-
- /* See if we have the partitioned case. */
- rel_loc_info = GetRelationLocInfo(rte->relid);
-
- /* Any column updation on local relations is fine */
- if (!rel_loc_info)
- return;
-
-
- /* Only LOCATOR_TYPE_HASH & LOCATOR_TYPE_MODULO should be checked */
- if ( (rel_loc_info->partAttrName != NULL) &&
- ( (rel_loc_info->locatorType == LOCATOR_TYPE_HASH) || (rel_loc_info->locatorType == LOCATOR_TYPE_MODULO) ) )
- {
- /* It is a partitioned table, check partition column in targetList */
- foreach(lc, query->targetList)
- {
- TargetEntry *tle = (TargetEntry *) lfirst(lc);
-
- if (tle->resjunk)
- continue;
-
- /*
- * See if we have a constant expression comparing against the
- * designated partitioned column
- */
- if (strcmp(tle->resname, rel_loc_info->partAttrName) == 0)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- (errmsg("Partition column can't be updated in current version"))));
- }
- }
-}
-
-/*
- * AddRemoteQueryNode
- *
- * Add a Remote Query node to launch on Datanodes.
- * This can only be done for a query a Top Level to avoid
- * duplicated queries on Datanodes.
- */
-List *
-AddRemoteQueryNode(List *stmts, const char *queryString, RemoteQueryExecType remoteExecType, bool is_temp)
-{
- List *result = stmts;
-
- /* If node is appplied on EXEC_ON_NONE, simply return the list unchanged */
- if (remoteExecType == EXEC_ON_NONE)
- return result;
-
- /* Only a remote Coordinator is allowed to send a query to backend nodes */
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- RemoteQuery *step = makeNode(RemoteQuery);
- step->combine_type = COMBINE_TYPE_SAME;
- step->sql_statement = (char *) queryString;
- step->exec_type = remoteExecType;
- step->is_temp = is_temp;
- result = lappend(result, step);
- }
-
- return result;
-}
-
-/*
- * pgxc_query_contains_temp_tables
- *
- * Check if there is any temporary object used in given list of queries.
- */
-bool
-pgxc_query_contains_temp_tables(List *queries)
-{
- ListCell *elt;
-
- foreach(elt, queries)
- {
- Query *query = (Query *) lfirst(elt);
-
- if (!query)
- continue;
-
- switch(query->commandType)
- {
- case CMD_SELECT:
- case CMD_UPDATE:
- case CMD_INSERT:
- case CMD_DELETE:
- if (contains_temp_tables(query->rtable))
- return true;
- default:
- break;
- }
- }
-
- return false;
-}
-#endif
-
#ifdef XCP
/*
@@ -2198,88 +144,8 @@ pgxc_direct_planner(Query *query, int cursorOptions, ParamListInfo boundParams)
result->planTree = (Plan *) query_step;
-#ifndef XCP
- query->qry_finalise_aggs = false;
-#endif
query_step->scan.plan.targetlist = query->targetList;
return result;
}
-#ifndef XCP
-/*
- * pgxc_query_contains_utility
- *
- * Check if there is any utility statement in given list of queries.
- */
-bool
-pgxc_query_contains_utility(List *queries)
-{
- ListCell *elt;
-
- foreach(elt, queries)
- {
- Query *query = (Query *) lfirst(elt);
-
- if (!query)
- continue;
-
- if (query->commandType == CMD_UTILITY)
- return true;
- }
-
- return false;
-}
-
-
-/*
- * pgxc_set_remote_parameters
- *
- * Set the list of remote parameters for remote plan
- */
-static void
-pgxc_set_remote_parameters(PlannedStmt *plan, ParamListInfo boundParams)
-{
- Oid *param_types;
- int cntParam, i;
-
- /* Leave if no plan */
- if (!plan)
- return;
-
- /* Leave if no parameters */
- if (!boundParams)
- return;
-
- /*
- * Count the number of remote parameters available
- * We need to take into account all the parameters
- * that are prior to the latest available. This insures
- * that remote node will not complain about an incorrect
- * number of parameter. In case parameters with no types
- * are taken into account, they are considered as NULL entries.
- */
- cntParam = 0;
- for (i = 0; i < boundParams->numParams; i++)
- {
- if (OidIsValid(boundParams->params[i].ptype))
- cntParam = i + 1;
- }
-
- /* If there are no parameters available, simply leave */
- if (cntParam == 0)
- return;
-
- param_types = (Oid *) palloc(sizeof(Oid) * cntParam);
-
- /* Then fill the array of types */
- for (i = 0; i < cntParam; i++)
- param_types[i] = boundParams->params[i].ptype;
-
- /* Finally save the parameters in plan */
- SetRemoteStatementName(plan->planTree, NULL,
- cntParam, param_types, 0);
-
- return;
-}
-#endif
diff --git a/src/backend/pgxc/pool/execRemote.c b/src/backend/pgxc/pool/execRemote.c
index 8d7d4da593..ec7d57d8d0 100644
--- a/src/backend/pgxc/pool/execRemote.c
+++ b/src/backend/pgxc/pool/execRemote.c
@@ -36,11 +36,9 @@
#include "libpq/libpq.h"
#include "miscadmin.h"
#include "pgxc/execRemote.h"
-#ifdef XCP
#include "executor/nodeSubplan.h"
#include "nodes/nodeFuncs.h"
#include "pgstat.h"
-#endif
#include "nodes/nodes.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/var.h"
@@ -72,70 +70,12 @@ bool EnforceTwoPhaseCommit = true;
* seconds while it being cleaned up when, for example, cancelling query.
*/
#define END_QUERY_TIMEOUT 20
-#ifndef XCP
-#define ROLLBACK_RESP_LEN 9
-
-typedef enum RemoteXactNodeStatus
-{
- RXACT_NODE_NONE, /* Initial state */
- RXACT_NODE_PREPARE_SENT, /* PREPARE request sent */
- RXACT_NODE_PREPARE_FAILED, /* PREPARE failed on the node */
- RXACT_NODE_PREPARED, /* PREPARED successfully on the node */
- RXACT_NODE_COMMIT_SENT, /* COMMIT sent successfully */
- RXACT_NODE_COMMIT_FAILED, /* failed to COMMIT on the node */
- RXACT_NODE_COMMITTED, /* COMMITTed successfully on the node */
- RXACT_NODE_ABORT_SENT, /* ABORT sent successfully */
- RXACT_NODE_ABORT_FAILED, /* failed to ABORT on the node */
- RXACT_NODE_ABORTED /* ABORTed successfully on the node */
-} RemoteXactNodeStatus;
-
-typedef enum RemoteXactStatus
-{
- RXACT_NONE, /* Initial state */
- RXACT_PREPARE_FAILED, /* PREPARE failed */
- RXACT_PREPARED, /* PREPARED succeeded on all nodes */
- RXACT_COMMIT_FAILED, /* COMMIT failed on all the nodes */
- RXACT_PART_COMMITTED, /* COMMIT failed on some and succeeded on other nodes */
- RXACT_COMMITTED, /* COMMIT succeeded on all the nodes */
- RXACT_ABORT_FAILED, /* ABORT failed on all the nodes */
- RXACT_PART_ABORTED, /* ABORT failed on some and succeeded on other nodes */
- RXACT_ABORTED /* ABORT succeeded on all the nodes */
-} RemoteXactStatus;
-
-typedef struct RemoteXactState
-{
- /* Current status of the remote 2PC */
- RemoteXactStatus status;
-
- /*
- * Information about all the nodes involved in the transaction. We track
- * the number of writers and readers. The first numWriteRemoteNodes entries
- * in the remoteNodeHandles and remoteNodeStatus correspond to the writer
- * connections and rest correspond to the reader connections.
- */
- int numWriteRemoteNodes;
- int numReadRemoteNodes;
- int maxRemoteNodes;
- PGXCNodeHandle **remoteNodeHandles;
- RemoteXactNodeStatus *remoteNodeStatus;
-
- GlobalTransactionId commitXid;
-
- bool preparedLocalNode;
-
- char prepareGID[256]; /* GID used for internal 2PC */
-} RemoteXactState;
-static RemoteXactState remoteXactState;
-#endif
-
-#ifdef PGXC
typedef struct
{
xact_callback function;
void *fparams;
} abort_callback_type;
-#endif
/*
* Buffer size does not affect performance significantly, just do not allow
@@ -144,79 +84,35 @@ typedef struct
#define COPY_BUFFER_SIZE 8192
#define PRIMARY_NODE_WRITEAHEAD 1024 * 1024
-#ifndef XCP
-/*
- * List of PGXCNodeHandle to track readers and writers involved in the
- * current transaction
- */
-static List *XactWriteNodes;
-static List *XactReadNodes;
-static char *preparedNodes;
-#endif
-
/*
* Flag to track if a temporary object is accessed by the current transaction
*/
static bool temp_object_included = false;
-
-#ifdef PGXC
static abort_callback_type dbcleanup_info = { NULL, NULL };
-#endif
static int pgxc_node_begin(int conn_count, PGXCNodeHandle ** connections,
GlobalTransactionId gxid, bool need_tran_block,
bool readOnly, char node_type);
-#ifdef XCP
static PGXCNodeAllHandles *get_exec_connections(RemoteQueryState *planstate,
ExecNodes *exec_nodes,
RemoteQueryExecType exec_type,
bool is_global_session);
-#else
-static PGXCNodeAllHandles * get_exec_connections(RemoteQueryState *planstate,
- ExecNodes *exec_nodes,
- RemoteQueryExecType exec_type);
-#endif
-
-#ifndef XCP
-static void close_node_cursors(PGXCNodeHandle **connections, int conn_count, char *cursor);
-static int pgxc_get_transaction_nodes(PGXCNodeHandle *connections[], int size, bool writeOnly);
-static int pgxc_get_connections(PGXCNodeHandle *connections[], int size, List *connlist);
-#endif
static bool pgxc_start_command_on_connection(PGXCNodeHandle *connection,
RemoteQueryState *remotestate, Snapshot snapshot);
-#ifndef XCP
-static TupleTableSlot * RemoteQueryNext(ScanState *node);
-static bool RemoteQueryRecheck(RemoteQueryState *node, TupleTableSlot *slot);
-static char *generate_begin_command(void);
-#endif
-
-#ifdef XCP
static char *pgxc_node_remote_prepare(char *prepareGID, bool localNode);
static bool pgxc_node_remote_finish(char *prepareGID, bool commit,
char *nodestring, GlobalTransactionId gxid,
GlobalTransactionId prepare_gxid);
-#else
-static bool pgxc_node_remote_prepare(char *prepareGID, bool localNode);
-static char *pgxc_node_get_nodelist(bool localNode);
-#endif
static void pgxc_node_remote_commit(void);
static void pgxc_node_remote_abort(void);
-#ifdef XCP
static void pgxc_connections_cleanup(ResponseCombiner *combiner);
static void pgxc_node_report_error(ResponseCombiner *combiner);
-#else
-static void ExecClearTempObjectIncluded(void);
-static void init_RemoteXactState(bool preparedLocalNode);
-static void clear_RemoteXactState(void);
-static void pgxc_node_report_error(RemoteQueryState *combiner);
-#endif
-#ifdef XCP
#define REMOVE_CURR_CONN(combiner) \
if ((combiner)->current_conn < --((combiner)->conn_count)) \
{ \
@@ -225,7 +121,6 @@ static void pgxc_node_report_error(RemoteQueryState *combiner);
} \
else \
(combiner)->current_conn = 0
-#endif
#define MAX_STATEMENTS_PER_TRAN 10
@@ -279,21 +174,6 @@ stat_transaction(int node_count)
}
-#ifdef NOT_USED
-/*
- * To collect statistics: count a two-phase commit on nodes
- */
-static void
-stat_2pc()
-{
- if (autocommit)
- autocommit_2pc++;
- else
- nonautocommit_2pc++;
-}
-#endif
-
-
/*
* Output collected statistics to the log
*/
@@ -331,26 +211,10 @@ stat_log()
* Create a structure to store parameters needed to combine responses from
* multiple connections as well as state information
*/
-#ifdef XCP
void
InitResponseCombiner(ResponseCombiner *combiner, int node_count,
CombineType combine_type)
-#else
-static RemoteQueryState *
-CreateResponseCombiner(int node_count, CombineType combine_type)
-#endif
{
-#ifndef XCP
- RemoteQueryState *combiner;
-
- /* ResponseComber is a typedef for pointer to ResponseCombinerData */
- combiner = makeNode(RemoteQueryState);
- if (combiner == NULL)
- {
- /* Out of memory */
- return combiner;
- }
-#endif
combiner->node_count = node_count;
combiner->connections = NULL;
combiner->conn_count = 0;
@@ -365,18 +229,11 @@ CreateResponseCombiner(int node_count, CombineType combine_type)
combiner->errorDetail = NULL;
combiner->errorHint = NULL;
combiner->tuple_desc = NULL;
-#ifdef XCP
combiner->probing_primary = false;
combiner->returning_node = InvalidOid;
combiner->currentRow = NULL;
-#else
- combiner->currentRow.msg = NULL;
- combiner->currentRow.msglen = 0;
- combiner->currentRow.msgnode = 0;
-#endif
combiner->rowBuffer = NIL;
combiner->tapenodes = NULL;
-#ifdef XCP
combiner->merge_sort = false;
combiner->extended_query = false;
combiner->tapemarks = NULL;
@@ -386,14 +243,6 @@ CreateResponseCombiner(int node_count, CombineType combine_type)
combiner->cursor_count = 0;
combiner->cursor_connections = NULL;
combiner->remoteCopyType = REMOTE_COPY_NONE;
-#else
- combiner->initAggregates = true;
- combiner->query_Done = false;
- combiner->copy_file = NULL;
- combiner->rqs_cmd_id = FirstCommandId;
-
- return combiner;
-#endif
}
@@ -492,16 +341,10 @@ create_tuple_desc(char *msg_body, size_t len)
* Handle CopyOutCommandComplete ('c') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleCopyOutComplete(ResponseCombiner *combiner)
-#else
-HandleCopyOutComplete(RemoteQueryState *combiner)
-#endif
{
-#ifdef XCP
if (combiner->request_type == REQUEST_TYPE_ERROR)
return;
-#endif
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
if (combiner->request_type != REQUEST_TYPE_COPY_OUT)
@@ -517,11 +360,7 @@ HandleCopyOutComplete(RemoteQueryState *combiner)
* Handle CommandComplete ('C') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleCommandComplete(ResponseCombiner *combiner, char *msg_body, size_t len, PGXCNodeHandle *conn)
-#else
-HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PGXCNodeHandle *conn)
-#endif
{
int digits = 0;
EState *estate = combiner->ss.ps.state;
@@ -543,7 +382,6 @@ HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PG
{
if (combiner->command_complete_count)
{
-#ifdef XCP
/*
* Replicated command may succeed on on node and fail on
* another. The example is if distributed table referenced
@@ -556,9 +394,6 @@ HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PG
* not the scaring data corruption message.
*/
if (combiner->errorMessage == NULL && rowcount != estate->es_processed)
-#else
- if (rowcount != estate->es_processed)
-#endif
/* There is a consistency issue in the database with the replicated table */
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
@@ -576,7 +411,6 @@ HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PG
}
/* If response checking is enable only then do further processing */
-#ifdef XCP
if (conn->ck_resp_rollback)
{
if (strcmp(msg_body, "ROLLBACK") == 0)
@@ -614,18 +448,6 @@ HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PG
}
}
}
-#else
- if (conn->ck_resp_rollback == RESP_ROLLBACK_CHECK)
- {
- conn->ck_resp_rollback = RESP_ROLLBACK_NOT_RECEIVED;
- if (len == ROLLBACK_RESP_LEN) /* No need to do string comparison otherwise */
- {
- if (strcmp(msg_body, "ROLLBACK") == 0)
- conn->ck_resp_rollback = RESP_ROLLBACK_RECEIVED;
- }
- }
-#endif
-
combiner->command_complete_count++;
}
@@ -633,16 +455,10 @@ HandleCommandComplete(RemoteQueryState *combiner, char *msg_body, size_t len, PG
* Handle RowDescription ('T') message from a Datanode connection
*/
static bool
-#ifdef XCP
HandleRowDescription(ResponseCombiner *combiner, char *msg_body, size_t len)
-#else
-HandleRowDescription(RemoteQueryState *combiner, char *msg_body, size_t len)
-#endif
{
-#ifdef XCP
if (combiner->request_type == REQUEST_TYPE_ERROR)
return false;
-#endif
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_QUERY;
if (combiner->request_type != REQUEST_TYPE_QUERY)
@@ -662,44 +478,14 @@ HandleRowDescription(RemoteQueryState *combiner, char *msg_body, size_t len)
}
-#ifdef NOT_USED
-/*
- * Handle ParameterStatus ('S') message from a Datanode connection (SET command)
- */
-static void
-HandleParameterStatus(RemoteQueryState *combiner, char *msg_body, size_t len)
-{
- if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
- combiner->request_type = REQUEST_TYPE_QUERY;
- if (combiner->request_type != REQUEST_TYPE_QUERY)
- {
- /* Inconsistent responses */
- ereport(ERROR,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("Unexpected response from the Datanodes for 'S' message, current request type %d", combiner->request_type)));
- }
- /* Proxy last */
- if (++combiner->description_count == combiner->node_count)
- {
- pq_putmessage('S', msg_body, len);
- }
-}
-#endif
-
/*
* Handle CopyInResponse ('G') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleCopyIn(ResponseCombiner *combiner)
-#else
-HandleCopyIn(RemoteQueryState *combiner)
-#endif
{
-#ifdef XCP
if (combiner->request_type == REQUEST_TYPE_ERROR)
return;
-#endif
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_IN;
if (combiner->request_type != REQUEST_TYPE_COPY_IN)
@@ -720,16 +506,10 @@ HandleCopyIn(RemoteQueryState *combiner)
* Handle CopyOutResponse ('H') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleCopyOut(ResponseCombiner *combiner)
-#else
-HandleCopyOut(RemoteQueryState *combiner)
-#endif
{
-#ifdef XCP
if (combiner->request_type == REQUEST_TYPE_ERROR)
return;
-#endif
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
if (combiner->request_type != REQUEST_TYPE_COPY_OUT)
@@ -750,16 +530,10 @@ HandleCopyOut(RemoteQueryState *combiner)
* Handle CopyOutDataRow ('d') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleCopyDataRow(ResponseCombiner *combiner, char *msg_body, size_t len)
-#else
-HandleCopyDataRow(RemoteQueryState *combiner, char *msg_body, size_t len)
-#endif
{
-#ifdef XCP
if (combiner->request_type == REQUEST_TYPE_ERROR)
return;
-#endif
if (combiner->request_type == REQUEST_TYPE_NOT_DEFINED)
combiner->request_type = REQUEST_TYPE_COPY_OUT;
@@ -784,90 +558,12 @@ HandleCopyDataRow(RemoteQueryState *combiner, char *msg_body, size_t len)
pq_putmessage('d', msg_body, len);
break;
case REMOTE_COPY_TUPLESTORE:
-#ifdef XCP
/*
* Do not store trailing \n character.
* When tuplestore data are loaded to a table it automatically
* inserts line ends.
*/
tuplestore_putmessage(combiner->tuplestorestate, len-1, msg_body);
-#else
- {
- Datum *values;
- bool *nulls;
- TupleDesc tupdesc = combiner->tuple_desc;
- int i, dropped;
- Form_pg_attribute *attr = tupdesc->attrs;
- FmgrInfo *in_functions;
- Oid *typioparams;
- char **fields;
-
- values = (Datum *) palloc(tupdesc->natts * sizeof(Datum));
- nulls = (bool *) palloc(tupdesc->natts * sizeof(bool));
- in_functions = (FmgrInfo *) palloc(tupdesc->natts * sizeof(FmgrInfo));
- typioparams = (Oid *) palloc(tupdesc->natts * sizeof(Oid));
-
- /* Calculate the Oids of input functions */
- for (i = 0; i < tupdesc->natts; i++)
- {
- Oid in_func_oid;
-
- /* Do not need any information for dropped attributes */
- if (attr[i]->attisdropped)
- continue;
-
- getTypeInputInfo(attr[i]->atttypid,
- &in_func_oid, &typioparams[i]);
- fmgr_info(in_func_oid, &in_functions[i]);
- }
-
- /*
- * Convert message into an array of fields.
- * Last \n is not included in converted message.
- */
- fields = CopyOps_RawDataToArrayField(tupdesc, msg_body, len - 1);
-
- /* Fill in the array values */
- dropped = 0;
- for (i = 0; i < tupdesc->natts; i++)
- {
- char *string = fields[i - dropped];
- /* Do not need any information for dropped attributes */
- if (attr[i]->attisdropped)
- {
- dropped++;
- nulls[i] = true; /* Consider dropped parameter as NULL */
- continue;
- }
-
- /* Find value */
- values[i] = InputFunctionCall(&in_functions[i],
- string,
- typioparams[i],
- attr[i]->atttypmod);
- /* Setup value with NULL flag if necessary */
- if (string == NULL)
- nulls[i] = true;
- else
- nulls[i] = false;
- }
-
- /* Then insert the values into tuplestore */
- tuplestore_putvalues(combiner->tuplestorestate,
- combiner->tuple_desc,
- values,
- nulls);
-
- /* Clean up everything */
- if (*fields)
- pfree(*fields);
- pfree(fields);
- pfree(values);
- pfree(nulls);
- pfree(in_functions);
- pfree(typioparams);
- }
-#endif
break;
case REMOTE_COPY_NONE:
default:
@@ -880,7 +576,6 @@ HandleCopyDataRow(RemoteQueryState *combiner, char *msg_body, size_t len)
* The function returns true if data row is accepted and successfully stored
* within the combiner.
*/
-#ifdef XCP
static bool
HandleDataRow(ResponseCombiner *combiner, char *msg_body, size_t len, Oid node)
{
@@ -937,54 +632,12 @@ HandleDataRow(ResponseCombiner *combiner, char *msg_body, size_t len, Oid node)
return true;
}
-#else
-static void
-HandleDataRow(RemoteQueryState *combiner, char *msg_body, size_t len, int nid)
-{
- /* We expect previous message is consumed */
- Assert(combiner->currentRow.msg == NULL);
-
- if (nid < 0)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("invalid node id %d",
- nid)));
-
- if (combiner->request_type != REQUEST_TYPE_QUERY)
- {
- /* Inconsistent responses */
- ereport(ERROR,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("Unexpected response from the Datanodes for 'D' message, current request type %d", combiner->request_type)));
- }
-
- /*
- * If we got an error already ignore incoming data rows from other nodes
- * Still we want to continue reading until get CommandComplete
- */
- if (combiner->errorMessage)
- return;
-
- /*
- * We are copying message because it points into connection buffer, and
- * will be overwritten on next socket read
- */
- combiner->currentRow.msg = (char *) palloc(len);
- memcpy(combiner->currentRow.msg, msg_body, len);
- combiner->currentRow.msglen = len;
- combiner->currentRow.msgnode = nid;
-}
-#endif
/*
* Handle ErrorResponse ('E') message from a Datanode connection
*/
static void
-#ifdef XCP
HandleError(ResponseCombiner *combiner, char *msg_body, size_t len, PGXCNodeHandle *conn)
-#else
-HandleError(RemoteQueryState *combiner, char *msg_body, size_t len)
-#endif
{
/* parse error message */
char *code = NULL;
@@ -1040,9 +693,7 @@ HandleError(RemoteQueryState *combiner, char *msg_body, size_t len)
* because we should read from this and other connections until
* ReadyForQuery is received, so we just store the error message.
* If multiple connections return errors only first one is reported.
- */
-#ifdef XCP
- /*
+ *
* The producer error may be hiding primary error, so if previously received
* error is a producer error allow it to be overwritten.
*/
@@ -1072,21 +723,6 @@ HandleError(RemoteQueryState *combiner, char *msg_body, size_t len)
if (conn->ck_resp_rollback)
conn->ck_resp_rollback = false;
-#else
- if (!combiner->errorMessage)
- {
- combiner->errorMessage = pstrdup(message);
- /* Error Code is exactly 5 significant bytes */
- if (code)
- memcpy(combiner->errorCode, code, 5);
- }
-
- if (!combiner->errorDetail && detail != NULL)
- {
- combiner->errorDetail = pstrdup(detail);
- }
-#endif
-
/*
* If Datanode have sent ErrorResponse it will never send CommandComplete.
* Increment the counter to prevent endless waiting for it.
@@ -1173,13 +809,8 @@ HandleCmdComplete(CmdType commandType, CombineTag *combine,
/*
* HandleDatanodeCommandId ('M') message from a Datanode connection
*/
-#ifdef XCP
static void
HandleDatanodeCommandId(ResponseCombiner *combiner, char *msg_body, size_t len)
-#else
-static void
-HandleDatanodeCommandId(RemoteQueryState *combiner, char *msg_body, size_t len)
-#endif
{
uint32 n32;
CommandId cid;
@@ -1226,11 +857,7 @@ HandleWaitXids(char *msg_body, size_t len)
* successfully
*/
static bool
-#ifdef XCP
validate_combiner(ResponseCombiner *combiner)
-#else
-validate_combiner(RemoteQueryState *combiner)
-#endif
{
/* There was error message while combining */
if (combiner->errorMessage)
@@ -1269,7 +896,6 @@ validate_combiner(RemoteQueryState *combiner)
/*
* Close combiner and free allocated memory, if it is not needed
*/
-#ifdef XCP
void
CloseCombiner(ResponseCombiner *combiner)
{
@@ -1290,46 +916,12 @@ CloseCombiner(ResponseCombiner *combiner)
if (combiner->tapemarks)
pfree(combiner->tapemarks);
}
-#else
-static void
-CloseCombiner(RemoteQueryState *combiner)
-{
- if (combiner)
- {
- if (combiner->connections)
- pfree(combiner->connections);
- if (combiner->tuple_desc)
- {
- /*
- * In the case of a remote COPY with tuplestore, combiner is not
- * responsible from freeing the tuple store. This is done at an upper
- * level once data redistribution is completed.
- */
- if (combiner->remoteCopyType != REMOTE_COPY_TUPLESTORE)
- FreeTupleDesc(combiner->tuple_desc);
- }
- if (combiner->errorMessage)
- pfree(combiner->errorMessage);
- if (combiner->errorDetail)
- pfree(combiner->errorDetail);
- if (combiner->cursor_connections)
- pfree(combiner->cursor_connections);
- if (combiner->tapenodes)
- pfree(combiner->tapenodes);
- pfree(combiner);
- }
-}
-#endif
/*
* Validate combiner and release storage freeing allocated memory
*/
static bool
-#ifdef XCP
ValidateAndCloseCombiner(ResponseCombiner *combiner)
-#else
-ValidateAndCloseCombiner(RemoteQueryState *combiner)
-#endif
{
bool valid = validate_combiner(combiner);
@@ -1354,7 +946,6 @@ ValidateAndCloseCombiner(RemoteQueryState *combiner)
* points to the original RemoteQueryState. If combiner differs from "this" the
* connection should be buffered.
*/
-#ifdef XCP
void
BufferConnection(PGXCNodeHandle *conn)
{
@@ -1520,111 +1111,11 @@ BufferConnection(PGXCNodeHandle *conn)
MemoryContextSwitchTo(oldcontext);
conn->combiner = NULL;
}
-#else
-void
-BufferConnection(PGXCNodeHandle *conn)
-{
- RemoteQueryState *combiner = conn->combiner;
- MemoryContext oldcontext;
-
- if (combiner == NULL || conn->state != DN_CONNECTION_STATE_QUERY)
- return;
-
- /*
- * When BufferConnection is invoked CurrentContext is related to other
- * portal, which is trying to control the connection.
- * TODO See if we can find better context to switch to
- */
- oldcontext = MemoryContextSwitchTo(combiner->ss.ss_ScanTupleSlot->tts_mcxt);
-
- /* Verify the connection is in use by the combiner */
- combiner->current_conn = 0;
- while (combiner->current_conn < combiner->conn_count)
- {
- if (combiner->connections[combiner->current_conn] == conn)
- break;
- combiner->current_conn++;
- }
- Assert(combiner->current_conn < combiner->conn_count);
-
- /*
- * Buffer data rows until Datanode return number of rows specified by the
- * fetch_size parameter of last Execute message (PortalSuspended message)
- * or end of result set is reached (CommandComplete message)
- */
- while (conn->state == DN_CONNECTION_STATE_QUERY)
- {
- int res;
-
- /* Move to buffer currentRow (received from the Datanode) */
- if (combiner->currentRow.msg)
- {
- RemoteDataRow dataRow = (RemoteDataRow) palloc(sizeof(RemoteDataRowData));
- *dataRow = combiner->currentRow;
- combiner->currentRow.msg = NULL;
- combiner->currentRow.msglen = 0;
- combiner->currentRow.msgnode = 0;
- combiner->rowBuffer = lappend(combiner->rowBuffer, dataRow);
- }
-
- res = handle_response(conn, combiner);
- /*
- * If response message is a DataRow it will be handled on the next
- * iteration.
- * PortalSuspended will cause connection state change and break the loop
- * The same is for CommandComplete, but we need additional handling -
- * remove connection from the list of active connections.
- * We may need to add handling error response
- */
- if (res == RESPONSE_EOF)
- {
- /* incomplete message, read more */
- if (pgxc_node_receive(1, &conn, NULL))
- {
- conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
- add_error_message(conn, "Failed to fetch from Datanode");
- }
- }
- else if (res == RESPONSE_COMPLETE)
- {
- /*
- * End of result set is reached, so either set the pointer to the
- * connection to NULL (step with sort) or remove it from the list
- * (step without sort)
- */
- if (combiner->tuplesortstate)
- {
- combiner->connections[combiner->current_conn] = NULL;
- if (combiner->tapenodes == NULL)
- combiner->tapenodes = (int*) palloc0(NumDataNodes * sizeof(int));
- combiner->tapenodes[combiner->current_conn] =
- PGXCNodeGetNodeId(conn->nodeoid,
- PGXC_NODE_DATANODE);
- }
- else
- /* Remove current connection, move last in-place, adjust current_conn */
- if (combiner->current_conn < --combiner->conn_count)
- combiner->connections[combiner->current_conn] = combiner->connections[combiner->conn_count];
- else
- combiner->current_conn = 0;
- }
- /*
- * Before output RESPONSE_COMPLETE or PORTAL_SUSPENDED handle_response()
- * changes connection state to DN_CONNECTION_STATE_IDLE, breaking the
- * loop. We do not need to do anything specific in case of
- * PORTAL_SUSPENDED so skiping "else if" block for that case
- */
- }
- MemoryContextSwitchTo(oldcontext);
- conn->combiner = NULL;
-}
-#endif
/*
* copy the datarow from combiner to the given slot, in the slot's memory
* context
*/
-#ifdef XCP
static void
CopyDataRowTupleToSlot(ResponseCombiner *combiner, TupleTableSlot *slot)
{
@@ -1640,26 +1131,8 @@ CopyDataRowTupleToSlot(ResponseCombiner *combiner, TupleTableSlot *slot)
combiner->currentRow = NULL;
MemoryContextSwitchTo(oldcontext);
}
-#else
-static void
-CopyDataRowTupleToSlot(RemoteQueryState *combiner, TupleTableSlot *slot)
-{
- char *msg;
- MemoryContext oldcontext;
- oldcontext = MemoryContextSwitchTo(slot->tts_mcxt);
- msg = (char *)palloc(combiner->currentRow.msglen);
- memcpy(msg, combiner->currentRow.msg, combiner->currentRow.msglen);
- ExecStoreDataRowTuple(msg, combiner->currentRow.msglen, slot, true);
- pfree(combiner->currentRow.msg);
- combiner->currentRow.msg = NULL;
- combiner->currentRow.msglen = 0;
- combiner->currentRow.msgnode = 0;
- MemoryContextSwitchTo(oldcontext);
-}
-#endif
-#ifdef XCP
/*
* FetchTuple
*
@@ -2028,167 +1501,14 @@ FetchTuple(ResponseCombiner *combiner)
return NULL;
}
-#else
-/*
- * Get next data row from the combiner's buffer into provided slot
- * Just clear slot and return false if buffer is empty, that means end of result
- * set is reached
- */
-bool
-FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot)
-{
- bool have_tuple = false;
-
- /* If we have message in the buffer, consume it */
- if (combiner->currentRow.msg)
- {
- CopyDataRowTupleToSlot(combiner, slot);
- have_tuple = true;
- }
-
- /*
- * If this is ordered fetch we can not know what is the node
- * to handle next, so sorter will choose next itself and set it as
- * currentRow to have it consumed on the next call to FetchTuple.
- * Otherwise allow to prefetch next tuple
- */
- if (((RemoteQuery *)combiner->ss.ps.plan)->sort)
- return have_tuple;
-
- /*
- * Note: If we are fetching not sorted results we can not have both
- * currentRow and buffered rows. When connection is buffered currentRow
- * is moved to buffer, and then it is cleaned after buffering is
- * completed. Afterwards rows will be taken from the buffer bypassing
- * currentRow until buffer is empty, and only after that data are read
- * from a connection.
- * PGXCTODO: the message should be allocated in the same memory context as
- * that of the slot. Are we sure of that in the call to
- * ExecStoreDataRowTuple below? If one fixes this memory issue, please
- * consider using CopyDataRowTupleToSlot() for the same.
- */
- if (list_length(combiner->rowBuffer) > 0)
- {
- RemoteDataRow dataRow = (RemoteDataRow) linitial(combiner->rowBuffer);
- combiner->rowBuffer = list_delete_first(combiner->rowBuffer);
- ExecStoreDataRowTuple(dataRow->msg, dataRow->msglen, slot, true);
- pfree(dataRow);
- return true;
- }
-
- while (combiner->conn_count > 0)
- {
- int res;
- PGXCNodeHandle *conn = combiner->connections[combiner->current_conn];
-
- /* Going to use a connection, buffer it if needed */
- if (conn->state == DN_CONNECTION_STATE_QUERY && conn->combiner != NULL
- && conn->combiner != combiner)
- BufferConnection(conn);
-
- /*
- * If current connection is idle it means portal on the Datanode is
- * suspended. If we have a tuple do not hurry to request more rows,
- * leave connection clean for other RemoteQueries.
- * If we do not have, request more and try to get it
- */
- if (conn->state == DN_CONNECTION_STATE_IDLE)
- {
- /*
- * If we have tuple to return do not hurry to request more, keep
- * connection clean
- */
- if (have_tuple)
- return true;
- else
- {
- if (pgxc_node_send_execute(conn, combiner->cursor, 1) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from Datanode")));
- if (pgxc_node_send_sync(conn) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from Datanode")));
- if (pgxc_node_receive(1, &conn, NULL))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from Datanode")));
- conn->combiner = combiner;
- }
- }
-
- /* read messages */
- res = handle_response(conn, combiner);
- if (res == RESPONSE_EOF)
- {
- /* incomplete message, read more */
- if (pgxc_node_receive(1, &conn, NULL))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from Datanode")));
- continue;
- }
- else if (res == RESPONSE_SUSPENDED)
- {
- /* Make next connection current */
- if (++combiner->current_conn >= combiner->conn_count)
- combiner->current_conn = 0;
- }
- else if (res == RESPONSE_COMPLETE)
- {
- /* Remove current connection, move last in-place, adjust current_conn */
- if (combiner->current_conn < --combiner->conn_count)
- combiner->connections[combiner->current_conn] = combiner->connections[combiner->conn_count];
- else
- combiner->current_conn = 0;
- }
- else if (res == RESPONSE_DATAROW && have_tuple)
- {
- /*
- * We already have a tuple and received another one, leave it till
- * next fetch
- */
- return true;
- }
-
- /* If we have message in the buffer, consume it */
- if (combiner->currentRow.msg)
- {
- CopyDataRowTupleToSlot(combiner, slot);
- have_tuple = true;
- }
-
- /*
- * If this is ordered fetch we can not know what is the node
- * to handle next, so sorter will choose next itself and set it as
- * currentRow to have it consumed on the next call to FetchTuple.
- * Otherwise allow to prefetch next tuple
- */
- if (((RemoteQuery *)combiner->ss.ps.plan)->sort)
- return have_tuple;
- }
-
- /* report end of data to the caller */
- if (!have_tuple)
- ExecClearTuple(slot);
-
- return have_tuple;
-}
-#endif
/*
* Handle responses from the Datanode connections
*/
static int
-#ifdef XCP
pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
struct timeval * timeout, ResponseCombiner *combiner)
-#else
-pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
- struct timeval * timeout, RemoteQueryState *combiner)
-#endif
{
int count = conn_count;
PGXCNodeHandle *to_receive[conn_count];
@@ -2217,14 +1537,12 @@ pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
i++;
break;
case RESPONSE_COMPLETE:
-#ifdef XCP
if (to_receive[i]->state != DN_CONNECTION_STATE_ERROR_FATAL)
/* Continue read until ReadyForQuery */
break;
/* fallthru */
case RESPONSE_READY:
/* fallthru */
-#endif
case RESPONSE_COPY:
/* Handling is done, do not track this connection */
count--;
@@ -2232,14 +1550,12 @@ pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
if (i < count)
to_receive[i] = to_receive[count];
break;
-#ifdef XCP
case RESPONSE_ERROR:
/* no handling needed, just wait for ReadyForQuery */
break;
case RESPONSE_WAITXIDS:
break;
-#endif
default:
/* Inconsistent responses */
add_error_message(to_receive[i], "Unexpected response from the Datanodes");
@@ -2251,14 +1567,10 @@ pgxc_node_receive_responses(const int conn_count, PGXCNodeHandle ** connections,
}
}
}
-#ifndef XCP
- pgxc_node_report_error(combiner);
-#endif
return 0;
}
-#ifdef XCP
/*
* Read next message from the connection and update the combiner
* and connection state accordingly
@@ -2402,157 +1714,9 @@ handle_response(PGXCNodeHandle *conn, ResponseCombiner *combiner)
return RESPONSE_BARRIER_OK;
case 'I': /* EmptyQuery */
return RESPONSE_COMPLETE;
-#ifdef XCP
case 'W':
HandleWaitXids(msg, msg_len);
return RESPONSE_WAITXIDS;
-#endif
- default:
- /* sync lost? */
- elog(WARNING, "Received unsupported message type: %c", msg_type);
- conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
- /* stop reading */
- return RESPONSE_COMPLETE;
- }
- }
- /* never happen, but keep compiler quiet */
- return RESPONSE_EOF;
-}
-#else
-/*
- * Read next message from the connection and update the combiner accordingly
- * If we are in an error state we just consume the messages, and do not proxy
- * Long term, we should look into cancelling executing statements
- * and closing the connections.
- * Return values:
- * RESPONSE_EOF - need to receive more data for the connection
- * RESPONSE_COMPLETE - done with the connection
- * RESPONSE_TUPLEDESC - got tuple description
- * RESPONSE_DATAROW - got data row
- * RESPONSE_COPY - got copy response
- * RESPONSE_BARRIER_OK - barrier command completed successfully
- */
-int
-handle_response(PGXCNodeHandle * conn, RemoteQueryState *combiner)
-{
- char *msg;
- int msg_len;
- char msg_type;
-
- for (;;)
- {
- Assert(conn->state != DN_CONNECTION_STATE_IDLE);
-
- /*
- * If we are in the process of shutting down, we
- * may be rolling back, and the buffer may contain other messages.
- * We want to avoid a procarray exception
- * as well as an error stack overflow.
- */
- if (proc_exit_inprogress)
- conn->state = DN_CONNECTION_STATE_ERROR_FATAL;
-
- /* don't read from from the connection if there is a fatal error */
- if (conn->state == DN_CONNECTION_STATE_ERROR_FATAL)
- return RESPONSE_COMPLETE;
-
- /* No data available, exit */
- if (!HAS_MESSAGE_BUFFERED(conn))
- return RESPONSE_EOF;
-
- Assert(conn->combiner == combiner || conn->combiner == NULL);
-
- /* TODO handle other possible responses */
- msg_type = get_message(conn, &msg_len, &msg);
- switch (msg_type)
- {
- case '\0': /* Not enough data in the buffer */
- return RESPONSE_EOF;
- case 'c': /* CopyToCommandComplete */
- HandleCopyOutComplete(combiner);
- break;
- case 'C': /* CommandComplete */
- HandleCommandComplete(combiner, msg, msg_len);
- break;
- case 'T': /* RowDescription */
-#ifdef DN_CONNECTION_DEBUG
- Assert(!conn->have_row_desc);
- conn->have_row_desc = true;
-#endif
- if (HandleRowDescription(combiner, msg, msg_len))
- return RESPONSE_TUPDESC;
- break;
- case 'D': /* DataRow */
-#ifdef DN_CONNECTION_DEBUG
- Assert(conn->have_row_desc);
-#endif
- HandleDataRow(combiner, msg, msg_len, PGXCNodeGetNodeId(conn->nodeoid,
- PGXC_NODE_DATANODE));
- return RESPONSE_DATAROW;
- case 's': /* PortalSuspended */
- suspended = true;
- break;
- case '1': /* ParseComplete */
- case '2': /* BindComplete */
- case '3': /* CloseComplete */
- case 'n': /* NoData */
- /* simple notifications, continue reading */
- break;
- case 'G': /* CopyInResponse */
- conn->state = DN_CONNECTION_STATE_COPY_IN;
- HandleCopyIn(combiner);
- /* Done, return to caller to let it know the data can be passed in */
- return RESPONSE_COPY;
- case 'H': /* CopyOutResponse */
- conn->state = DN_CONNECTION_STATE_COPY_OUT;
- HandleCopyOut(combiner);
- return RESPONSE_COPY;
- case 'd': /* CopyOutDataRow */
- conn->state = DN_CONNECTION_STATE_COPY_OUT;
- HandleCopyDataRow(combiner, msg, msg_len);
- break;
- case 'E': /* ErrorResponse */
- HandleError(combiner, msg, msg_len);
- add_error_message(conn, combiner->errorMessage);
- /*
- * Do not return with an error, we still need to consume Z,
- * ready-for-query
- */
- break;
- case 'A': /* NotificationResponse */
- case 'N': /* NoticeResponse */
- case 'S': /* SetCommandComplete */
- /*
- * Ignore these to prevent multiple messages, one from each
- * node. Coordinator will send one for DDL anyway
- */
- break;
- case 'Z': /* ReadyForQuery */
- {
- /*
- * Return result depends on previous connection state.
- * If it was PORTAL_SUSPENDED coordinator want to send down
- * another EXECUTE to fetch more rows, otherwise it is done
- * with the connection
- */
- int result = suspended ? RESPONSE_SUSPENDED : RESPONSE_COMPLETE;
- conn->transaction_status = msg[0];
- conn->state = DN_CONNECTION_STATE_IDLE;
- conn->combiner = NULL;
-#ifdef DN_CONNECTION_DEBUG
- conn->have_row_desc = false;
-#endif
- return result;
- }
- case 'M': /* Command Id */
- HandleDatanodeCommandId(combiner, msg, msg_len);
- break;
- case 'b':
- {
- conn->state = DN_CONNECTION_STATE_IDLE;
- return RESPONSE_BARRIER_OK;
- }
- case 'I': /* EmptyQuery */
default:
/* sync lost? */
elog(WARNING, "Received unsupported message type: %c", msg_type);
@@ -2564,8 +1728,6 @@ handle_response(PGXCNodeHandle * conn, RemoteQueryState *combiner)
/* never happen, but keep compiler quiet */
return RESPONSE_EOF;
}
-#endif
-
/*
* Has the data node sent Ready For Query
@@ -2617,40 +1779,6 @@ is_data_node_ready(PGXCNodeHandle * conn)
}
-#ifndef XCP
-/*
- * Construct a BEGIN TRANSACTION command after taking into account the
- * current options. The returned string is not palloced and is valid only until
- * the next call to the function.
- */
-static char *
-generate_begin_command(void)
-{
- static char begin_cmd[1024];
- const char *read_only;
- const char *isolation_level;
-
- /*
- * First get the READ ONLY status because the next call to GetConfigOption
- * will overwrite the return buffer
- */
- if (strcmp(GetConfigOption("transaction_read_only", false, false), "on") == 0)
- read_only = "READ ONLY";
- else
- read_only = "READ WRITE";
-
- /* Now get the isolation_level for the transaction */
- isolation_level = GetConfigOption("transaction_isolation", false, false);
- if (strcmp(isolation_level, "default") == 0)
- isolation_level = GetConfigOption("default_transaction_isolation", false, false);
-
- /* Finally build a START TRANSACTION command */
- sprintf(begin_cmd, "START TRANSACTION ISOLATION LEVEL %s %s", isolation_level, read_only);
-
- return begin_cmd;
-}
-#endif
-
/*
* Send BEGIN command to the Datanodes or Coordinators and receive responses.
* Also send the GXID for the transaction.
@@ -2662,20 +1790,11 @@ pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
{
int i;
struct timeval *timeout = NULL;
-#ifdef XCP
ResponseCombiner combiner;
-#else
- RemoteQueryState *combiner;
-#endif
TimestampTz timestamp = GetCurrentGTMStartTimestamp();
PGXCNodeHandle *new_connections[conn_count];
int new_count = 0;
-#ifdef XCP
char *init_str;
-#else
- int con[conn_count];
- int j = 0;
-#endif
/*
* If no remote connections, we don't have anything to do
@@ -2685,27 +1804,8 @@ pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
for (i = 0; i < conn_count; i++)
{
-#ifdef XCP
if (!readOnly && !IsConnFromDatanode())
- {
connections[i]->read_only = false;
- }
-#else
- /*
- * If the node is already a participant in the transaction, skip it
- */
- if (list_member(XactReadNodes, connections[i]) ||
- list_member(XactWriteNodes, connections[i]))
- {
- /*
- * If we are doing a write operation, we may need to shift the node
- * to the write-list. RegisterTransactionNodes does that for us
- */
- if (!readOnly)
- RegisterTransactionNodes(1, (void **)&connections[i], true);
- continue;
- }
-#endif
/*
* PGXC TODO - A connection should not be in DN_CONNECTION_STATE_QUERY
* state when we are about to send a BEGIN TRANSACTION command to the
@@ -2723,42 +1823,17 @@ pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
if (GlobalTimestampIsValid(timestamp) && pgxc_node_send_timestamp(connections[i], timestamp))
return EOF;
-#ifdef XCP
if (IS_PGXC_DATANODE && GlobalTransactionIdIsValid(gxid))
need_tran_block = true;
else if (IS_PGXC_REMOTE_COORDINATOR)
need_tran_block = false;
/* Send BEGIN if not already in transaction */
if (need_tran_block && connections[i]->transaction_status == 'I')
-#else
- /* Send BEGIN */
- if (need_tran_block)
-#endif
{
/* Send the BEGIN TRANSACTION command and check for errors */
-#ifdef XCP
if (pgxc_node_send_query(connections[i], "BEGIN"))
return EOF;
-#else
- if (pgxc_node_send_query(connections[i], generate_begin_command()))
- return EOF;
-#endif
-#ifndef XCP
- con[j++] = PGXCNodeGetNodeId(connections[i]->nodeoid, node_type);
- /*
- * Register the node as a participant in the transaction. The
- * caller should tell us if the node may do any write activitiy
- *
- * XXX This is a bit tricky since it would be difficult to know if
- * statement has any side effect on the Datanode. So a SELECT
- * statement may invoke a function on the Datanode which may end up
- * modifying the data at the Datanode. We can possibly rely on the
- * function qualification to decide if a statement is a read-only or a
- * read-write statement.
- */
- RegisterTransactionNodes(1, (void **)&connections[i], !readOnly);
-#endif
new_connections[new_count++] = connections[i];
}
}
@@ -2770,7 +1845,6 @@ pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
if (new_count == 0)
return 0;
-#ifdef XCP
InitResponseCombiner(&combiner, new_count, COMBINE_TYPE_NONE);
/*
* Make sure there are zeroes in unused fields
@@ -2794,41 +1868,12 @@ pgxc_node_begin(int conn_count, PGXCNodeHandle **connections,
pgxc_node_set_query(new_connections[i], init_str);
}
}
-#else
- combiner = CreateResponseCombiner(new_count, COMBINE_TYPE_NONE);
-
- /* Receive responses */
- if (pgxc_node_receive_responses(new_count, new_connections, timeout, combiner))
- return EOF;
-
- /* Verify status */
- if (!ValidateAndCloseCombiner(combiner))
- return EOF;
-
- /*
- * Ask pooler to send commands (if any) to nodes involved in transaction to alter the
- * behavior of current transaction. This fires all transaction level commands before
- * issuing any DDL, DML or SELECT within the current transaction block.
- */
- if (GetCurrentLocalParamStatus())
- {
- int res;
- if (node_type == PGXC_NODE_DATANODE)
- res = PoolManagerSendLocalCommand(j, con, 0, NULL);
- else
- res = PoolManagerSendLocalCommand(0, NULL, j, con);
-
- if (res != 0)
- return EOF;
- }
-#endif
/* No problem, let's get going */
return 0;
}
-#ifdef XCP
/*
* Execute DISCARD ALL command on all allocated nodes to remove all session
* specific stuff before releasing them to pool for reuse by other sessions.
@@ -3528,524 +2573,10 @@ pgxc_node_remote_abort(void)
pfree_pgxc_all_handles(handles);
}
-#else
-
-/*
- * Prepare all remote nodes involved in this transaction. The local node is
- * handled separately and prepared first in xact.c. If there is any error
- * during this phase, it will be reported via ereport() and the transaction
- * will be aborted on the local as well as remote nodes
- *
- * prepareGID is created and passed from xact.c
- */
-static bool
-pgxc_node_remote_prepare(char *prepareGID)
-{
- int result = 0;
- int write_conn_count = remoteXactState.numWriteRemoteNodes;
- char prepare_cmd[256];
- int i;
- PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
- RemoteQueryState *combiner = NULL;
-
- /*
- * If there is NO write activity or the caller does not want us to run a
- * 2PC protocol, we don't need to do anything special
- */
- if ((write_conn_count == 0) || (prepareGID == NULL))
- return false;
-
- SetSendCommandId(false);
-
- /* Save the prepareGID in the global state information */
- sprintf(remoteXactState.prepareGID, "%s", prepareGID);
-
- /* Generate the PREPARE TRANSACTION command */
- sprintf(prepare_cmd, "PREPARE TRANSACTION '%s'", remoteXactState.prepareGID);
-
- for (i = 0; i < write_conn_count; i++)
- {
- /*
- * PGXCTODO - We should actually make sure that the connection state is
- * IDLE when we reach here. The executor should have guaranteed that
- * before the transaction gets to the commit point. For now, consume
- * the pending data on the connection
- */
- if (connections[i]->state != DN_CONNECTION_STATE_IDLE)
- BufferConnection(connections[i]);
-
- /* Clean the previous errors, if any */
- connections[i]->error = NULL;
-
- /*
- * Now we are ready to PREPARE the transaction. Any error at this point
- * can be safely ereport-ed and the transaction will be aborted.
- */
- if (pgxc_node_send_query(connections[i], prepare_cmd))
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
- remoteXactState.status = RXACT_PREPARE_FAILED;
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("failed to send PREPARE TRANSACTION command to "
- "the node %u", connections[i]->nodeoid)));
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_SENT;
- /* Let the HandleCommandComplete know response checking is enable */
- connections[i]->ck_resp_rollback = RESP_ROLLBACK_CHECK;
- }
- }
-
- /*
- * Receive and check for any errors. In case of errors, we don't bail out
- * just yet. We first go through the list of connections and look for
- * errors on each connection. This is important to ensure that we run
- * an appropriate ROLLBACK command later on (prepared transactions must be
- * rolled back with ROLLBACK PREPARED commands).
- *
- * PGXCTODO - There doesn't seem to be a solid mechanism to track errors on
- * individual connections. The transaction_status field doesn't get set
- * every time there is an error on the connection. The combiner mechanism is
- * good for parallel proessing, but I think we should have a leak-proof
- * mechanism to track connection status
- */
- if (write_conn_count)
- {
- combiner = CreateResponseCombiner(write_conn_count, COMBINE_TYPE_NONE);
- /* Receive responses */
- result = pgxc_node_receive_responses(write_conn_count, connections, NULL, combiner);
- if (result || !validate_combiner(combiner))
- result = EOF;
- else
- {
- CloseCombiner(combiner);
- combiner = NULL;
- }
-
- for (i = 0; i < write_conn_count; i++)
- {
- if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARE_SENT)
- {
- if (connections[i]->error)
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
- remoteXactState.status = RXACT_PREPARE_FAILED;
- }
- else
- {
- /* Did we receive ROLLBACK in response to PREPARE TRANSCATION? */
- if (connections[i]->ck_resp_rollback == RESP_ROLLBACK_RECEIVED)
- {
- /* If yes, it means PREPARE TRANSACTION failed */
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARE_FAILED;
- remoteXactState.status = RXACT_PREPARE_FAILED;
- result = 0;
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARED;
- }
- }
- }
- }
- }
-
- /*
- * If we failed to PREPARE on one or more nodes, report an error and let
- * the normal abort processing take charge of aborting the transaction
- */
- if (result)
- {
- remoteXactState.status = RXACT_PREPARE_FAILED;
- if (combiner)
- pgxc_node_report_error(combiner);
- else
- elog(ERROR, "failed to PREPARE transaction on one or more nodes");
- }
-
- if (remoteXactState.status == RXACT_PREPARE_FAILED)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to PREPARE the transaction on one or more nodes")));
-
- /* Everything went OK. */
- remoteXactState.status = RXACT_PREPARED;
- return result;
-}
-
-/*
- * Commit a running or a previously PREPARED transaction on the remote nodes.
- * The local transaction is handled separately in xact.c
- *
- * Once a COMMIT command is sent to any node, the transaction must be finally
- * be committed. But we still report errors via ereport and let
- * AbortTransaction take care of handling partly committed transactions.
- *
- * For 2PC transactions: If local node is involved in the transaction, its
- * already prepared locally and we are in a context of a different transaction
- * (we call it auxulliary transaction) already. So AbortTransaction will
- * actually abort the auxilliary transaction, which is OK. OTOH if the local
- * node is not involved in the main transaction, then we don't care much if its
- * rolled back on the local node as part of abort processing.
- *
- * When 2PC is not used for reasons such as because transaction has accessed
- * some temporary objects, we are already exposed to the risk of committing it
- * one node and aborting on some other node. So such cases need not get more
- * attentions.
- */
-static void
-pgxc_node_remote_commit(void)
-{
- int result = 0;
- char commitPrepCmd[256];
- char commitCmd[256];
- int write_conn_count = remoteXactState.numWriteRemoteNodes;
- int read_conn_count = remoteXactState.numReadRemoteNodes;
- PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
- PGXCNodeHandle *new_connections[write_conn_count + read_conn_count];
- int new_conn_count = 0;
- int i;
- RemoteQueryState *combiner = NULL;
-
- /*
- * We must handle reader and writer connections both since the transaction
- * must be closed even on a read-only node
- */
- if (read_conn_count + write_conn_count == 0)
- return;
-
- SetSendCommandId(false);
-
- /*
- * Barrier:
- *
- * We should acquire the BarrierLock in SHARE mode here to ensure that
- * there are no in-progress barrier at this point. This mechanism would
- * work as long as LWLock mechanism does not starve a EXCLUSIVE lock
- * requester
- */
- LWLockAcquire(BarrierLock, LW_SHARED);
-
- /*
- * The readers can be committed with a simple COMMIT command. We still need
- * this to close the transaction block
- */
- sprintf(commitCmd, "COMMIT TRANSACTION");
-
- /*
- * If we are running 2PC, construct a COMMIT command to commit the prepared
- * transactions
- */
- if (remoteXactState.status == RXACT_PREPARED)
- {
- sprintf(commitPrepCmd, "COMMIT PREPARED '%s'", remoteXactState.prepareGID);
- /*
- * If the local node is involved in the transaction, we would have
- * already prepared it and started a new transaction. We can use the
- * GXID of the new transaction to run the COMMIT PREPARED commands.
- * So get an auxilliary GXID only if the local node is not involved
- */
-
- if (!GlobalTransactionIdIsValid(remoteXactState.commitXid))
- remoteXactState.commitXid = GetAuxilliaryTransactionId();
- }
-
- /*
- * First send GXID if necessary. If there is an error at this stage, the
- * transaction can be aborted safely because we haven't yet sent COMMIT
- * command to any participant
- */
- for (i = 0; i < write_conn_count + read_conn_count; i++)
- {
- if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED)
- {
- Assert(GlobalTransactionIdIsValid(remoteXactState.commitXid));
- if (pgxc_node_send_gxid(connections[i], remoteXactState.commitXid))
- {
- remoteXactState.status = RXACT_COMMIT_FAILED;
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("failed to send GXID for COMMIT PREPARED "
- "command")));
- }
- }
- }
-
- /*
- * Now send the COMMIT command to all the participants
- */
- for (i = 0; i < write_conn_count + read_conn_count; i++)
- {
- const char *command;
-
- Assert(remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED ||
- remoteXactState.remoteNodeStatus[i] == RXACT_NODE_NONE);
-
- if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_PREPARED)
- command = commitPrepCmd;
- else
- command = commitCmd;
-
- /* Clean the previous errors, if any */
- connections[i]->error = NULL;
-
- if (pgxc_node_send_query(connections[i], command))
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_FAILED;
- remoteXactState.status = RXACT_COMMIT_FAILED;
-
- /*
- * If the error occurred on the first connection, we still have
- * chance to abort the whole transaction. We prefer that because
- * that reduces the need for any manual intervention at least until
- * we have a automatic mechanism to resolve in-doubt transactions
- *
- * XXX We can ideally check for first writer connection, but keep
- * it simple for now
- */
- if (i == 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("failed to send COMMIT command to node")));
- else
- add_error_message(connections[i], "failed to send COMMIT "
- "command to node");
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_SENT;
- new_connections[new_conn_count++] = connections[i];
- }
- }
-
- /*
- * Release the BarrierLock.
- */
- LWLockRelease(BarrierLock);
-
- if (new_conn_count)
- {
- combiner = CreateResponseCombiner(new_conn_count, COMBINE_TYPE_NONE);
- /* Receive responses */
- result = pgxc_node_receive_responses(new_conn_count, new_connections, NULL, combiner);
- if (result || !validate_combiner(combiner))
- result = EOF;
- else
- {
- CloseCombiner(combiner);
- combiner = NULL;
- }
- /*
- * Even if the command failed on some node, don't throw an error just
- * yet. That gives a chance to look for individual connection status
- * and record appropriate information for later recovery
- *
- * XXX A node once prepared must be able to either COMMIT or ABORT. So a
- * COMMIT can fail only because of either communication error or because
- * the node went down. Even if one node commits, the transaction must be
- * eventually committed on all the nodes.
- */
-
- /* At this point, we must be in one the following state */
- Assert(remoteXactState.status == RXACT_COMMIT_FAILED ||
- remoteXactState.status == RXACT_PREPARED ||
- remoteXactState.status == RXACT_NONE);
-
- /*
- * Go through every connection and check if COMMIT succeeded or failed on
- * that connection. If the COMMIT has failed on one node, but succeeded on
- * some other, such transactions need special attention (by the
- * administrator for now)
- */
- for (i = 0; i < write_conn_count + read_conn_count; i++)
- {
- if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_COMMIT_SENT)
- {
- if (connections[i]->error)
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMIT_FAILED;
- if (remoteXactState.status != RXACT_PART_COMMITTED)
- remoteXactState.status = RXACT_COMMIT_FAILED;
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_COMMITTED;
- if (remoteXactState.status == RXACT_COMMIT_FAILED)
- remoteXactState.status = RXACT_PART_COMMITTED;
- }
- }
- }
- }
-
- stat_transaction(write_conn_count + read_conn_count);
-
- if (result)
- {
- if (combiner)
- pgxc_node_report_error(combiner);
- else
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to COMMIT the transaction on one or more nodes")));
- }
-
- if (remoteXactState.status == RXACT_COMMIT_FAILED ||
- remoteXactState.status == RXACT_PART_COMMITTED)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to COMMIT the transaction on one or more nodes")));
-
- remoteXactState.status = RXACT_COMMITTED;
-}
-
-/*
- * Abort the current transaction on the local and remote nodes. If the
- * transaction is prepared on the remote node, we send a ROLLBACK PREPARED
- * command, otherwise a ROLLBACK command is sent.
- *
- * Note that if the local node was involved and prepared successfully, we are
- * running in a separate transaction context right now
- */
-static void
-pgxc_node_remote_abort(void)
-{
- int result = 0;
- char *rollbackCmd = "ROLLBACK TRANSACTION";
- char rollbackPrepCmd[256];
- int write_conn_count = remoteXactState.numWriteRemoteNodes;
- int read_conn_count = remoteXactState.numReadRemoteNodes;
- int i;
- PGXCNodeHandle **connections = remoteXactState.remoteNodeHandles;
- PGXCNodeHandle *new_connections[remoteXactState.numWriteRemoteNodes + remoteXactState.numReadRemoteNodes];
- int new_conn_count = 0;
- RemoteQueryState *combiner = NULL;
-
- SetSendCommandId(false);
-
- /* Send COMMIT/ROLLBACK PREPARED TRANSACTION to the remote nodes */
- for (i = 0; i < write_conn_count + read_conn_count; i++)
- {
- RemoteXactNodeStatus status = remoteXactState.remoteNodeStatus[i];
-
- /* Clean the previous errors, if any */
- connections[i]->error = NULL;
-
- if ((status == RXACT_NODE_PREPARED) ||
- (status == RXACT_NODE_PREPARE_SENT))
- {
- sprintf(rollbackPrepCmd, "ROLLBACK PREPARED '%s'", remoteXactState.prepareGID);
-
- if (!GlobalTransactionIdIsValid(remoteXactState.commitXid))
- remoteXactState.commitXid = GetAuxilliaryTransactionId();
-
- if (pgxc_node_send_gxid(connections[i], remoteXactState.commitXid))
- {
- add_error_message(connections[i], "failed to send GXID for "
- "ROLLBACK PREPARED command");
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
- remoteXactState.status = RXACT_ABORT_FAILED;
-
- }
- else if (pgxc_node_send_query(connections[i], rollbackPrepCmd))
- {
- add_error_message(connections[i], "failed to send ROLLBACK PREPARED "
- "TRANSACTION command to node");
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
- remoteXactState.status = RXACT_ABORT_FAILED;
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_SENT;
- new_connections[new_conn_count++] = connections[i];
- }
- }
- else
- {
- if (pgxc_node_send_query(connections[i], rollbackCmd))
- {
- add_error_message(connections[i], "failed to send ROLLBACK "
- "TRANSACTION command to node");
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
- remoteXactState.status = RXACT_ABORT_FAILED;
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_SENT;
- new_connections[new_conn_count++] = connections[i];
- }
- }
- }
-
- if (new_conn_count)
- {
- combiner = CreateResponseCombiner(new_conn_count, COMBINE_TYPE_NONE);
- /* Receive responses */
- result = pgxc_node_receive_responses(new_conn_count, new_connections, NULL, combiner);
- if (result || !validate_combiner(combiner))
- result = EOF;
- else
- {
- CloseCombiner(combiner);
- combiner = NULL;
- }
-
- for (i = 0; i < write_conn_count + read_conn_count; i++)
- {
- if (remoteXactState.remoteNodeStatus[i] == RXACT_NODE_ABORT_SENT)
- {
- if (connections[i]->error)
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORT_FAILED;
- if (remoteXactState.status != RXACT_PART_ABORTED)
- remoteXactState.status = RXACT_ABORT_FAILED;
- elog(LOG, "Failed to ABORT at node %d\nDetail: %s",
- connections[i]->nodeoid, connections[i]->error);
- }
- else
- {
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_ABORTED;
- if (remoteXactState.status == RXACT_ABORT_FAILED)
- remoteXactState.status = RXACT_PART_ABORTED;
- }
- }
- }
- }
-
- if (result)
- {
- if (combiner)
- pgxc_node_report_error(combiner);
- else
- elog(LOG, "Failed to ABORT an implicitly PREPARED "
- "transaction - result %d", result);
- }
-
- /*
- * Don't ereport because we might already been abort processing and any
- * error at this point can lead to infinite recursion
- *
- * XXX How do we handle errors reported by internal functions used to
- * communicate with remote nodes ?
- */
- if (remoteXactState.status == RXACT_ABORT_FAILED ||
- remoteXactState.status == RXACT_PART_ABORTED)
- elog(LOG, "Failed to ABORT an implicitly PREPARED transaction "
- "status - %d", remoteXactState.status);
- else
- remoteXactState.status = RXACT_ABORTED;
-
- return;
-}
-#endif
-
-
/*
* Begin COPY command
* The copy_connections array must have room for NumDataNodes items
*/
-#ifdef XCP
void
DataNodeCopyBegin(RemoteCopyData *rcstate)
{
@@ -4164,119 +2695,11 @@ DataNodeCopyBegin(RemoteCopyData *rcstate)
}
pfree(connections);
}
-#else
-PGXCNodeHandle**
-DataNodeCopyBegin(const char *query, List *nodelist, Snapshot snapshot)
-{
- int i;
- int conn_count = list_length(nodelist) == 0 ? NumDataNodes : list_length(nodelist);
- struct timeval *timeout = NULL;
- PGXCNodeAllHandles *pgxc_handles;
- PGXCNodeHandle **connections;
- PGXCNodeHandle **copy_connections;
- ListCell *nodeitem;
- bool need_tran_block;
- GlobalTransactionId gxid;
- RemoteQueryState *combiner;
-
- if (conn_count == 0)
- return NULL;
-
- /* Get needed Datanode connections */
- pgxc_handles = get_handles(nodelist, NULL, false);
- connections = pgxc_handles->datanode_handles;
-
- if (!connections)
- return NULL;
-
- /*
- * If more than one nodes are involved or if we are already in a
- * transaction block, we must the remote statements in a transaction block
- */
- need_tran_block = (conn_count > 1) || (TransactionBlockStatusCode() == 'T');
-
- elog(DEBUG1, "conn_count = %d, need_tran_block = %s", conn_count,
- need_tran_block ? "true" : "false");
-
- /*
- * We need to be able quickly find a connection handle for specified node number,
- * So store connections in an array where index is node-1.
- * Unused items in the array should be NULL
- */
- copy_connections = (PGXCNodeHandle **) palloc0(NumDataNodes * sizeof(PGXCNodeHandle *));
- i = 0;
- foreach(nodeitem, nodelist)
- copy_connections[lfirst_int(nodeitem)] = connections[i++];
-
- /* Gather statistics */
- stat_statement();
- stat_transaction(conn_count);
-
- gxid = GetCurrentTransactionId();
-
- if (!GlobalTransactionIdIsValid(gxid))
- {
- pfree_pgxc_all_handles(pgxc_handles);
- pfree(copy_connections);
- return NULL;
- }
-
- /* Start transaction on connections where it is not started */
- if (pgxc_node_begin(conn_count, connections, gxid, need_tran_block, false, PGXC_NODE_DATANODE))
- {
- pfree_pgxc_all_handles(pgxc_handles);
- pfree(copy_connections);
- return NULL;
- }
-
- /* Send query to nodes */
- for (i = 0; i < conn_count; i++)
- {
- if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
- BufferConnection(connections[i]);
-
- if (snapshot && pgxc_node_send_snapshot(connections[i], snapshot))
- {
- add_error_message(connections[i], "Can not send request");
- pfree_pgxc_all_handles(pgxc_handles);
- pfree(copy_connections);
- return NULL;
- }
- if (pgxc_node_send_query(connections[i], query) != 0)
- {
- add_error_message(connections[i], "Can not send request");
- pfree_pgxc_all_handles(pgxc_handles);
- pfree(copy_connections);
- return NULL;
- }
- }
-
- /*
- * We are expecting CopyIn response, but do not want to send it to client,
- * caller should take care about this, because here we do not know if
- * client runs console or file copy
- */
- combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
-
- /* Receive responses */
- if (pgxc_node_receive_responses(conn_count, connections, timeout, combiner)
- || !ValidateAndCloseCombiner(combiner))
- {
- DataNodeCopyFinish(connections, -1, COMBINE_TYPE_NONE);
- pfree(connections);
- pfree(copy_connections);
- return NULL;
- }
- pfree(connections);
- return copy_connections;
-}
-#endif
/*
* Send a data row to the specified nodes
*/
-#ifdef XCP
int
DataNodeCopyIn(char *data_row, int len, int conn_count, PGXCNodeHandle** copy_connections)
{
@@ -4354,162 +2777,7 @@ DataNodeCopyIn(char *data_row, int len, int conn_count, PGXCNodeHandle** copy_co
}
return 0;
}
-#else
-int
-DataNodeCopyIn(char *data_row, int len, ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections)
-{
- PGXCNodeHandle *primary_handle = NULL;
- ListCell *nodeitem;
- /* size + data row + \n */
- int msgLen = 4 + len + 1;
- int nLen = htonl(msgLen);
-
- if (exec_nodes->primarynodelist)
- {
- primary_handle = copy_connections[lfirst_int(list_head(exec_nodes->primarynodelist))];
- }
-
- if (primary_handle)
- {
- if (primary_handle->state == DN_CONNECTION_STATE_COPY_IN)
- {
- /* precalculate to speed up access */
- int bytes_needed = primary_handle->outEnd + 1 + msgLen;
- /* flush buffer if it is almost full */
- if (bytes_needed > COPY_BUFFER_SIZE)
- {
- /* First look if Datanode has sent a error message */
- int read_status = pgxc_node_read_data(primary_handle, true);
- if (read_status == EOF || read_status < 0)
- {
- add_error_message(primary_handle, "failed to read data from Datanode");
- return EOF;
- }
-
- if (primary_handle->inStart < primary_handle->inEnd)
- {
- RemoteQueryState *combiner = CreateResponseCombiner(1, COMBINE_TYPE_NONE);
- handle_response(primary_handle, combiner);
- if (!ValidateAndCloseCombiner(combiner))
- return EOF;
- }
-
- if (DN_CONNECTION_STATE_ERROR(primary_handle))
- return EOF;
-
- if (send_some(primary_handle, primary_handle->outEnd) < 0)
- {
- add_error_message(primary_handle, "failed to send data to Datanode");
- return EOF;
- }
- }
-
- if (ensure_out_buffer_capacity(bytes_needed, primary_handle) != 0)
- {
- ereport(ERROR,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
- }
-
- primary_handle->outBuffer[primary_handle->outEnd++] = 'd';
- memcpy(primary_handle->outBuffer + primary_handle->outEnd, &nLen, 4);
- primary_handle->outEnd += 4;
- memcpy(primary_handle->outBuffer + primary_handle->outEnd, data_row, len);
- primary_handle->outEnd += len;
- primary_handle->outBuffer[primary_handle->outEnd++] = '\n';
- }
- else
- {
- add_error_message(primary_handle, "Invalid Datanode connection");
- return EOF;
- }
- }
-
- foreach(nodeitem, exec_nodes->nodeList)
- {
- PGXCNodeHandle *handle = copy_connections[lfirst_int(nodeitem)];
- if (handle && handle->state == DN_CONNECTION_STATE_COPY_IN)
- {
- /* precalculate to speed up access */
- int bytes_needed = handle->outEnd + 1 + msgLen;
-
- /* flush buffer if it is almost full */
- if ((primary_handle && bytes_needed > PRIMARY_NODE_WRITEAHEAD)
- || (!primary_handle && bytes_needed > COPY_BUFFER_SIZE))
- {
- int to_send = handle->outEnd;
-
- /* First look if Datanode has sent a error message */
- int read_status = pgxc_node_read_data(handle, true);
- if (read_status == EOF || read_status < 0)
- {
- add_error_message(handle, "failed to read data from Datanode");
- return EOF;
- }
-
- if (handle->inStart < handle->inEnd)
- {
- RemoteQueryState *combiner = CreateResponseCombiner(1, COMBINE_TYPE_NONE);
- handle_response(handle, combiner);
- if (!ValidateAndCloseCombiner(combiner))
- return EOF;
- }
-
- if (DN_CONNECTION_STATE_ERROR(handle))
- return EOF;
-
- /*
- * Allow primary node to write out data before others.
- * If primary node was blocked it would not accept copy data.
- * So buffer at least PRIMARY_NODE_WRITEAHEAD at the other nodes.
- * If primary node is blocked and is buffering, other buffers will
- * grow accordingly.
- */
- if (primary_handle)
- {
- if (primary_handle->outEnd + PRIMARY_NODE_WRITEAHEAD < handle->outEnd)
- to_send = handle->outEnd - primary_handle->outEnd - PRIMARY_NODE_WRITEAHEAD;
- else
- to_send = 0;
- }
-
- /*
- * Try to send down buffered data if we have
- */
- if (to_send && send_some(handle, to_send) < 0)
- {
- add_error_message(handle, "failed to send data to Datanode");
- return EOF;
- }
- }
-
- if (ensure_out_buffer_capacity(bytes_needed, handle) != 0)
- {
- ereport(ERROR,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
- }
-
- handle->outBuffer[handle->outEnd++] = 'd';
- memcpy(handle->outBuffer + handle->outEnd, &nLen, 4);
- handle->outEnd += 4;
- memcpy(handle->outBuffer + handle->outEnd, data_row, len);
- handle->outEnd += len;
- handle->outBuffer[handle->outEnd++] = '\n';
- }
- else
- {
- add_error_message(handle, "Invalid Datanode connection");
- return EOF;
- }
- }
- return 0;
-}
-#endif
-
-
-#ifdef XCP
uint64
DataNodeCopyOut(PGXCNodeHandle** copy_connections,
int conn_count, FILE* copy_file)
@@ -4580,133 +2848,17 @@ DataNodeCopyStore(PGXCNodeHandle** copy_connections,
return processed;
}
-#else
-uint64
-DataNodeCopyOut(ExecNodes *exec_nodes,
- PGXCNodeHandle** copy_connections,
- TupleDesc tupleDesc,
- FILE* copy_file,
- Tuplestorestate *store,
- RemoteCopyType remoteCopyType)
-{
- RemoteQueryState *combiner;
- int conn_count = list_length(exec_nodes->nodeList) == 0 ? NumDataNodes : list_length(exec_nodes->nodeList);
- ListCell *nodeitem;
- uint64 processed;
-
- combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_SUM);
- combiner->processed = 0;
- combiner->remoteCopyType = remoteCopyType;
-
- /*
- * If there is an existing file where to copy data,
- * pass it to combiner when remote COPY output is sent back to file.
- */
- if (copy_file && remoteCopyType == REMOTE_COPY_FILE)
- combiner->copy_file = copy_file;
- if (store && remoteCopyType == REMOTE_COPY_TUPLESTORE)
- {
- combiner->tuplestorestate = store;
- combiner->tuple_desc = tupleDesc;
- }
-
- foreach(nodeitem, exec_nodes->nodeList)
- {
- PGXCNodeHandle *handle = copy_connections[lfirst_int(nodeitem)];
- int read_status = 0;
-
- Assert(handle && handle->state == DN_CONNECTION_STATE_COPY_OUT);
-
- /*
- * H message has been consumed, continue to manage data row messages.
- * Continue to read as long as there is data.
- */
- while (read_status >= 0 && handle->state == DN_CONNECTION_STATE_COPY_OUT)
- {
- if (handle_response(handle,combiner) == RESPONSE_EOF)
- {
- /* read some extra-data */
- read_status = pgxc_node_read_data(handle, true);
- if (read_status < 0)
- ereport(ERROR,
- (errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("unexpected EOF on datanode connection")));
- else
- /*
- * Set proper connection status - handle_response
- * has changed it to DN_CONNECTION_STATE_QUERY
- */
- handle->state = DN_CONNECTION_STATE_COPY_OUT;
- }
- /* There is no more data that can be read from connection */
- }
- }
-
- processed = combiner->processed;
-
- if (!ValidateAndCloseCombiner(combiner))
- {
- if (!PersistentConnections)
- release_handles();
- pfree(copy_connections);
- ereport(ERROR,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("Unexpected response from the Datanodes when combining, request type %d", combiner->request_type)));
- }
-
- return processed;
-}
-#endif
/*
* Finish copy process on all connections
*/
-#ifdef XCP
void
DataNodeCopyFinish(int conn_count, PGXCNodeHandle** connections)
-#else
-void
-DataNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_dn_index, CombineType combine_type)
-#endif
{
int i;
-#ifdef XCP
ResponseCombiner combiner;
-#else
- RemoteQueryState *combiner = NULL;
-#endif
bool error = false;
-#ifndef XCP
- struct timeval *timeout = NULL; /* wait forever */
- PGXCNodeHandle *connections[NumDataNodes];
- PGXCNodeHandle *primary_handle = NULL;
- int conn_count = 0;
-
- for (i = 0; i < NumDataNodes; i++)
- {
- PGXCNodeHandle *handle = copy_connections[i];
-
- if (!handle)
- continue;
-
- if (i == primary_dn_index)
- primary_handle = handle;
- else
- connections[conn_count++] = handle;
- }
-
- if (primary_handle)
- {
- error = true;
- if (primary_handle->state == DN_CONNECTION_STATE_COPY_IN || primary_handle->state == DN_CONNECTION_STATE_COPY_OUT)
- error = DataNodeCopyEnd(primary_handle, false);
-
- combiner = CreateResponseCombiner(conn_count + 1, combine_type);
- error = (pgxc_node_receive_responses(1, &primary_handle, timeout, combiner) != 0) || error;
- }
-#endif
-
for (i = 0; i < conn_count; i++)
{
PGXCNodeHandle *handle = connections[i];
@@ -4716,7 +2868,6 @@ DataNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_dn_index, Comb
error = DataNodeCopyEnd(handle, false);
}
-#ifdef XCP
InitResponseCombiner(&combiner, conn_count, COMBINE_TYPE_NONE);
/*
* Make sure there are zeroes in unused fields
@@ -4735,16 +2886,6 @@ DataNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_dn_index, Comb
}
else
CloseCombiner(&combiner);
-#else
- if (!combiner)
- combiner = CreateResponseCombiner(conn_count, combine_type);
- error = (pgxc_node_receive_responses(conn_count, connections, timeout, combiner) != 0) || error;
-
- if (!ValidateAndCloseCombiner(combiner) || error)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Error while running COPY")));
-#endif
}
/*
@@ -4778,97 +2919,15 @@ DataNodeCopyEnd(PGXCNodeHandle *handle, bool is_error)
}
-#ifndef XCP
-RemoteQueryState *
-ExecInitRemoteQuery(RemoteQuery *node, EState *estate, int eflags)
-{
- RemoteQueryState *remotestate;
- TupleDesc scan_type;
-
- /* RemoteQuery node is the leaf node in the plan tree, just like seqscan */
- Assert(innerPlan(node) == NULL);
- Assert(outerPlan(node) == NULL);
-
- remotestate = CreateResponseCombiner(0, node->combine_type);
- remotestate->ss.ps.plan = (Plan *) node;
- remotestate->ss.ps.state = estate;
-
- /*
- * Miscellaneous initialisation
- *
- * create expression context for node
- */
- ExecAssignExprContext(estate, &remotestate->ss.ps);
-
- /* Initialise child expressions */
- remotestate->ss.ps.targetlist = (List *)
- ExecInitExpr((Expr *) node->scan.plan.targetlist,
- (PlanState *) remotestate);
- remotestate->ss.ps.qual = (List *)
- ExecInitExpr((Expr *) node->scan.plan.qual,
- (PlanState *) remotestate);
-
- /* check for unsupported flags */
- Assert(!(eflags & (EXEC_FLAG_MARK)));
-
- /* Extract the eflags bits that are relevant for tuplestorestate */
- remotestate->eflags = (eflags & (EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD));
-
- /* We anyways have to support REWIND for ReScan */
- remotestate->eflags |= EXEC_FLAG_REWIND;
-
- remotestate->eof_underlying = false;
- remotestate->tuplestorestate = NULL;
-
- ExecInitResultTupleSlot(estate, &remotestate->ss.ps);
- ExecInitScanTupleSlot(estate, &remotestate->ss);
- scan_type = ExecTypeFromTL(node->base_tlist, false);
- ExecAssignScanType(&remotestate->ss, scan_type);
-
- remotestate->ss.ps.ps_TupFromTlist = false;
-
- /*
- * If there are parameters supplied, get them into a form to be sent to the
- * Datanodes with bind message. We should not have had done this before.
- */
- if (estate->es_param_list_info)
- {
- Assert(!remotestate->paramval_data);
- remotestate->paramval_len = ParamListToDataRow(estate->es_param_list_info,
- &remotestate->paramval_data);
- }
-
- /*
- * Initialize result tuple type and projection info.
- */
- ExecAssignResultTypeFromTL(&remotestate->ss.ps);
- ExecAssignScanProjectionInfo(&remotestate->ss);
-
- if (node->has_ins_child_sel_parent)
- {
- /* Save command id of the insert-select query */
- remotestate->rqs_cmd_id = GetCurrentCommandId(false);
- }
-
- return remotestate;
-}
-#endif
-
/*
* Get Node connections depending on the connection type:
* Datanodes Only, Coordinators only or both types
*/
static PGXCNodeAllHandles *
-#ifdef XCP
get_exec_connections(RemoteQueryState *planstate,
ExecNodes *exec_nodes,
RemoteQueryExecType exec_type,
bool is_global_session)
-#else
-get_exec_connections(RemoteQueryState *planstate,
- ExecNodes *exec_nodes,
- RemoteQueryExecType exec_type)
-#endif
{
List *nodelist = NIL;
List *primarynode = NIL;
@@ -4885,69 +2944,11 @@ get_exec_connections(RemoteQueryState *planstate,
if (exec_type == EXEC_ON_COORDS)
is_query_coord_only = true;
-#ifdef XCP
if (exec_type == EXEC_ON_CURRENT)
return get_current_handles();
-#endif
if (exec_nodes)
{
-#ifndef XCP
- if (exec_nodes->en_expr)
- {
- /* execution time determining of target Datanodes */
- bool isnull;
- ExprState *estate = ExecInitExpr(exec_nodes->en_expr,
- (PlanState *) planstate);
- Datum partvalue = ExecEvalExpr(estate,
- planstate->ss.ps.ps_ExprContext,
- &isnull,
- NULL);
- RelationLocInfo *rel_loc_info = GetRelationLocInfo(exec_nodes->en_relid);
- /* PGXCTODO what is the type of partvalue here */
- ExecNodes *nodes = GetRelationNodes(rel_loc_info,
- partvalue,
- isnull,
- exprType((Node *) exec_nodes->en_expr),
- exec_nodes->accesstype);
- if (nodes)
- {
- nodelist = nodes->nodeList;
- primarynode = nodes->primarynodelist;
- pfree(nodes);
- }
- FreeRelationLocInfo(rel_loc_info);
- }
- else if (OidIsValid(exec_nodes->en_relid))
- {
- RelationLocInfo *rel_loc_info = GetRelationLocInfo(exec_nodes->en_relid);
- ExecNodes *nodes = GetRelationNodes(rel_loc_info, 0, true, InvalidOid, exec_nodes->accesstype);
-
- /* Use the obtained list for given table */
- if (nodes)
- nodelist = nodes->nodeList;
-
- /*
- * Special handling for ROUND ROBIN distributed tables. The target
- * node must be determined at the execution time
- */
- if (rel_loc_info->locatorType == LOCATOR_TYPE_RROBIN && nodes)
- {
- nodelist = nodes->nodeList;
- primarynode = nodes->primarynodelist;
- }
- else if (nodes)
- {
- if (exec_type == EXEC_ON_DATANODES || exec_type == EXEC_ON_ALL_NODES)
- nodelist = exec_nodes->nodeList;
- }
-
- if (nodes)
- pfree(nodes);
- FreeRelationLocInfo(rel_loc_info);
- }
- else
-#endif
{
if (exec_type == EXEC_ON_DATANODES || exec_type == EXEC_ON_ALL_NODES)
nodelist = exec_nodes->nodeList;
@@ -4995,11 +2996,7 @@ get_exec_connections(RemoteQueryState *planstate,
}
/* Get other connections (non-primary) */
-#ifdef XCP
pgxc_handles = get_handles(nodelist, coordlist, is_query_coord_only, is_global_session);
-#else
- pgxc_handles = get_handles(nodelist, coordlist, is_query_coord_only);
-#endif
if (!pgxc_handles)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
@@ -5010,11 +3007,7 @@ get_exec_connections(RemoteQueryState *planstate,
{
/* Let's assume primary connection is always a Datanode connection for the moment */
PGXCNodeAllHandles *pgxc_conn_res;
-#ifdef XCP
pgxc_conn_res = get_handles(primarynode, NULL, false, is_global_session);
-#else
- pgxc_conn_res = get_handles(primarynode, NULL, false);
-#endif
/* primary connection is unique */
primaryconnection = pgxc_conn_res->datanode_handles[0];
@@ -5042,45 +3035,16 @@ pgxc_start_command_on_connection(PGXCNodeHandle *connection,
Snapshot snapshot)
{
CommandId cid;
-#ifdef XCP
ResponseCombiner *combiner = (ResponseCombiner *) remotestate;
RemoteQuery *step = (RemoteQuery *) combiner->ss.ps.plan;
CHECK_OWNERSHIP(connection, combiner);
-#else
- RemoteQuery *step = (RemoteQuery *) remotestate->ss.ps.plan;
- if (connection->state == DN_CONNECTION_STATE_QUERY)
- BufferConnection(connection);
-#endif
/*
* Scan descriptor would be valid and would contain a valid snapshot
* in cases when we need to send out of order command id to data node
* e.g. in case of a fetch
*/
-#ifdef XCP
cid = GetCurrentCommandId(false);
-#else
- if (remotestate->cursor != NULL &&
- remotestate->cursor[0] != '\0' &&
- remotestate->ss.ss_currentScanDesc != NULL &&
- remotestate->ss.ss_currentScanDesc->rs_snapshot != NULL)
- cid = remotestate->ss.ss_currentScanDesc->rs_snapshot->curcid;
- else
- {
- /*
- * An insert into a child by selecting form its parent gets translated
- * into a multi-statement transaction in which first we select from parent
- * and then insert into child, then select form child and insert into child.
- * The select from child should not see the just inserted rows.
- * The command id of the select from child is therefore set to
- * the command id of the insert-select query saved earlier.
- */
- if (step->exec_nodes->accesstype == RELATION_ACCESS_READ && step->has_ins_child_sel_parent)
- cid = remotestate->rqs_cmd_id;
- else
- cid = GetCurrentCommandId(false);
- }
-#endif
if (pgxc_node_send_cmd_id(connection, cid) < 0 )
return false;
@@ -5093,13 +3057,6 @@ pgxc_start_command_on_connection(PGXCNodeHandle *connection,
int fetch = 0;
bool prepared = false;
-#ifndef XCP
- /* if prepared statement is referenced see if it is already exist */
- if (step->statement)
- prepared = ActivateDatanodeStatementOnNode(step->statement,
- PGXCNodeGetNodeId(connection->nodeoid,
- PGXC_NODE_DATANODE));
-#endif
/*
* execute and fetch rows only if they will be consumed
* immediately by the sorter
@@ -5107,9 +3064,7 @@ pgxc_start_command_on_connection(PGXCNodeHandle *connection,
if (step->cursor)
fetch = 1;
-#ifdef XCP
combiner->extended_query = true;
-#endif
if (pgxc_node_send_query_extended(connection,
prepared ? NULL : step->sql_statement,
@@ -5131,607 +3086,6 @@ pgxc_start_command_on_connection(PGXCNodeHandle *connection,
return true;
}
-
-#ifndef XCP
-static void
-do_query(RemoteQueryState *node)
-{
- RemoteQuery *step = (RemoteQuery *) node->ss.ps.plan;
- TupleTableSlot *scanslot = node->ss.ss_ScanTupleSlot;
- bool force_autocommit = step->force_autocommit;
- bool is_read_only = step->read_only;
- GlobalTransactionId gxid = InvalidGlobalTransactionId;
- Snapshot snapshot = GetActiveSnapshot();
- PGXCNodeHandle **connections = NULL;
- PGXCNodeHandle *primaryconnection = NULL;
- int i;
- int regular_conn_count;
- int total_conn_count;
- bool need_tran_block;
- PGXCNodeAllHandles *pgxc_connections;
-
- /*
- * Remember if the remote query is accessing a temp object
- *
- * !! PGXC TODO Check if the is_temp flag is propogated correctly when a
- * remote join is reduced
- */
- if (step->is_temp)
- ExecSetTempObjectIncluded();
-
- /*
- * Get connections for Datanodes only, utilities and DDLs
- * are launched in ExecRemoteUtility
- */
- pgxc_connections = get_exec_connections(node, step->exec_nodes, step->exec_type);
-
- if (step->exec_type == EXEC_ON_DATANODES)
- {
- connections = pgxc_connections->datanode_handles;
- total_conn_count = regular_conn_count = pgxc_connections->dn_conn_count;
- }
- else if (step->exec_type == EXEC_ON_COORDS)
- {
- connections = pgxc_connections->coord_handles;
- total_conn_count = regular_conn_count = pgxc_connections->co_conn_count;
- }
-
- primaryconnection = pgxc_connections->primary_handle;
-
- /*
- * Primary connection is counted separately but is included in total_conn_count if used.
- */
- if (primaryconnection)
- regular_conn_count--;
-
- pfree(pgxc_connections);
-
- /*
- * We save only regular connections, at the time we exit the function
- * we finish with the primary connection and deal only with regular
- * connections on subsequent invocations
- */
- node->node_count = regular_conn_count;
-
- if (force_autocommit || is_read_only)
- need_tran_block = false;
- else
- need_tran_block = true;
- /*
- * XXX We are forcing a transaction block for non-read-only every remote query. We can
- * get smarter here and avoid a transaction block if all of the following
- * conditions are true:
- *
- * - there is only one writer node involved in the transaction (including
- * the local node)
- * - the statement being executed on the remote writer node is a single
- * step statement. IOW, Coordinator must not send multiple queries to the
- * remote node.
- *
- * Once we have leak-proof mechanism to enforce these constraints, we
- * should relax the transaction block requirement.
- *
- need_tran_block = (!is_read_only && total_conn_count > 1) ||
- (TransactionBlockStatusCode() == 'T');
- */
-
- elog(DEBUG1, "has primary = %s, regular_conn_count = %d, "
- "need_tran_block = %s", primaryconnection ? "true" : "false",
- regular_conn_count, need_tran_block ? "true" : "false");
-
- stat_statement();
- stat_transaction(total_conn_count);
-
- gxid = GetCurrentTransactionId();
-
- if (!GlobalTransactionIdIsValid(gxid))
- {
- if (primaryconnection)
- pfree(primaryconnection);
- pfree(connections);
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to get next transaction ID")));
- }
-
- /* See if we have a primary node, execute on it first before the others */
- if (primaryconnection)
- {
- if (pgxc_node_begin(1, &primaryconnection, gxid, need_tran_block,
- is_read_only, PGXC_NODE_DATANODE))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Could not begin transaction on primary Datanode.")));
-
- if (!pgxc_start_command_on_connection(primaryconnection, node, snapshot))
- {
- pgxc_node_remote_abort();
- pfree(connections);
- pfree(primaryconnection);
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to send command to Datanodes")));
- }
- Assert(node->combine_type == COMBINE_TYPE_SAME);
-
- /* Make sure the command is completed on the primary node */
- while (true)
- {
- int res;
- if (pgxc_node_receive(1, &primaryconnection, NULL))
- break;
-
- res = handle_response(primaryconnection, node);
- if (res == RESPONSE_COMPLETE)
- break;
- else if (res == RESPONSE_EOF)
- continue;
- else
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Unexpected response from Datanode")));
- }
- /* report error if any */
- pgxc_node_report_error(node);
- }
-
- for (i = 0; i < regular_conn_count; i++)
- {
- if (pgxc_node_begin(1, &connections[i], gxid, need_tran_block,
- is_read_only, PGXC_NODE_DATANODE))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Could not begin transaction on Datanodes.")));
-
- if (!pgxc_start_command_on_connection(connections[i], node, snapshot))
- {
- pgxc_node_remote_abort();
- pfree(connections);
- if (primaryconnection)
- pfree(primaryconnection);
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to send command to Datanodes")));
- }
- connections[i]->combiner = node;
- }
-
- if (step->cursor)
- {
- node->cursor_count = regular_conn_count;
- node->cursor_connections = (PGXCNodeHandle **) palloc(regular_conn_count * sizeof(PGXCNodeHandle *));
- memcpy(node->cursor_connections, connections, regular_conn_count * sizeof(PGXCNodeHandle *));
- }
-
- /*
- * Stop if all commands are completed or we got a data row and
- * initialized state node for subsequent invocations
- */
- while (regular_conn_count > 0 && node->connections == NULL)
- {
- int i = 0;
-
- if (pgxc_node_receive(regular_conn_count, connections, NULL))
- {
- pfree(connections);
- if (primaryconnection)
- pfree(primaryconnection);
- if (node->cursor_connections)
- pfree(node->cursor_connections);
-
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to read response from Datanodes")));
- }
- /*
- * Handle input from the Datanodes.
- * If we got a RESPONSE_DATAROW we can break handling to wrap
- * it into a tuple and return. Handling will be continued upon
- * subsequent invocations.
- * If we got 0, we exclude connection from the list. We do not
- * expect more input from it. In case of non-SELECT query we quit
- * the loop when all nodes finish their work and send ReadyForQuery
- * with empty connections array.
- * If we got EOF, move to the next connection, will receive more
- * data on the next iteration.
- */
- while (i < regular_conn_count)
- {
- int res = handle_response(connections[i], node);
- if (res == RESPONSE_EOF)
- {
- i++;
- }
- else if (res == RESPONSE_COMPLETE)
- {
- if (i < --regular_conn_count)
- connections[i] = connections[regular_conn_count];
- }
- else if (res == RESPONSE_TUPDESC)
- {
- ExecSetSlotDescriptor(scanslot, node->tuple_desc);
- /*
- * Now tuple table slot is responsible for freeing the
- * descriptor
- */
- node->tuple_desc = NULL;
- if (step->sort)
- {
- SimpleSort *sort = step->sort;
-
- node->connections = connections;
- node->conn_count = regular_conn_count;
- /*
- * First message is already in the buffer
- * Further fetch will be under tuplesort control
- * If query does not produce rows tuplesort will not
- * be initialized
- */
- node->tuplesortstate = tuplesort_begin_merge(
- scanslot->tts_tupleDescriptor,
- sort->numCols,
- sort->sortColIdx,
- sort->sortOperators,
- sort->sortCollations,
- sort->nullsFirst,
- node,
- work_mem);
- /*
- * Break the loop, do not wait for first row.
- * Tuplesort module want to control node it is
- * fetching rows from, while in this loop first
- * row would be got from random node
- */
- break;
- }
- else
- {
- /*
- * RemoteQuery node doesn't support backward scan, so
- * randomAccess is false, neither we want this tuple store
- * persist across transactions.
- */
- node->tuplestorestate = tuplestore_begin_heap(false, false, work_mem);
- tuplestore_set_eflags(node->tuplestorestate, node->eflags);
- }
- }
- else if (res == RESPONSE_DATAROW)
- {
- /*
- * Got first data row, quit the loop
- */
- node->connections = connections;
- node->conn_count = regular_conn_count;
- node->current_conn = i;
- break;
- }
- else
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Unexpected response from Datanode")));
- }
- /* report error if any */
- pgxc_node_report_error(node);
- }
-
- if (node->cursor_count)
- {
- node->conn_count = node->cursor_count;
- memcpy(connections, node->cursor_connections, node->cursor_count * sizeof(PGXCNodeHandle *));
- node->connections = connections;
- }
-}
-
-/*
- * ExecRemoteQuery
- * Wrapper around the main RemoteQueryNext() function. This
- * wrapper provides materialization of the result returned by
- * RemoteQueryNext
- */
-
-TupleTableSlot *
-ExecRemoteQuery(RemoteQueryState *node)
-{
- return ExecScan(&(node->ss),
- (ExecScanAccessMtd) RemoteQueryNext,
- (ExecScanRecheckMtd) RemoteQueryRecheck);
-}
-
-/*
- * RemoteQueryRecheck -- remote query routine to recheck a tuple in EvalPlanQual
- */
-static bool
-RemoteQueryRecheck(RemoteQueryState *node, TupleTableSlot *slot)
-{
- /*
- * Note that unlike IndexScan, RemoteQueryScan never use keys in heap_beginscan
- * (and this is very bad) - so, here we do not check are keys ok or not.
- */
- return true;
-}
-/*
- * Execute step of PGXC plan.
- * The step specifies a command to be executed on specified nodes.
- * On first invocation connections to the Datanodes are initialized and
- * command is executed. Further, as well as within subsequent invocations,
- * responses are received until step is completed or there is a tuple to emit.
- * If there is a tuple it is returned, otherwise returned NULL. The NULL result
- * from the function indicates completed step.
- * The function returns at most one tuple per invocation.
- */
-static TupleTableSlot *
-RemoteQueryNext(ScanState *scan_node)
-{
- RemoteQueryState *node = (RemoteQueryState *)scan_node;
- TupleTableSlot *scanslot = scan_node->ss_ScanTupleSlot;
-
- if (!node->query_Done)
- {
- do_query(node);
- node->query_Done = true;
- }
-
- if (node->update_cursor)
- {
- PGXCNodeAllHandles *all_dn_handles = get_exec_connections(node, NULL, EXEC_ON_DATANODES);
- close_node_cursors(all_dn_handles->datanode_handles,
- all_dn_handles->dn_conn_count,
- node->update_cursor);
- pfree(node->update_cursor);
- node->update_cursor = NULL;
- pfree_pgxc_all_handles(all_dn_handles);
- }
-
- /* We can't have both tuplesortstate and tuplestorestate */
- Assert(!(node->tuplesortstate && node->tuplestorestate));
-
- if (node->tuplesortstate)
- tuplesort_gettupleslot((Tuplesortstate *) node->tuplesortstate,
- true, scanslot);
- else if(node->tuplestorestate)
- {
- /*
- * If we are not at the end of the tuplestore, try
- * to fetch a tuple from tuplestore.
- */
- Tuplestorestate *tuplestorestate = node->tuplestorestate;
- bool eof_tuplestore = tuplestore_ateof(tuplestorestate);
-
- /*
- * If we can fetch another tuple from the tuplestore, return it.
- */
- if (!eof_tuplestore)
- {
- /* RemoteQuery node doesn't support backward scans */
- if(!tuplestore_gettupleslot(tuplestorestate, true, false, scanslot))
- eof_tuplestore = true;
- }
-
- if (eof_tuplestore && !node->eof_underlying)
- {
- /*
- * If tuplestore has reached its end but the underlying RemoteQueryNext() hasn't
- * finished yet, try to fetch another row.
- */
- if (FetchTuple(node, scanslot))
- {
- /*
- * Append a copy of the returned tuple to tuplestore. NOTE: because
- * the tuplestore is certainly in EOF state, its read position will
- * move forward over the added tuple. This is what we want.
- */
- if (tuplestorestate && !TupIsNull(scanslot))
- tuplestore_puttupleslot(tuplestorestate, scanslot);
- }
- else
- node->eof_underlying = true;
- }
-
- if (eof_tuplestore && node->eof_underlying)
- ExecClearTuple(scanslot);
- }
- /* No tuple store whatsoever, no result from the datanode */
- else
- ExecClearTuple(scanslot);
-
- /* report error if any */
- pgxc_node_report_error(node);
-
- return scanslot;
-}
-
-/*
- * End the remote query
- */
-void
-ExecEndRemoteQuery(RemoteQueryState *node)
-{
- ListCell *lc;
-
- /* clean up the buffer */
- foreach(lc, node->rowBuffer)
- {
- RemoteDataRow dataRow = (RemoteDataRow) lfirst(lc);
- pfree(dataRow->msg);
- }
- list_free_deep(node->rowBuffer);
-
- node->current_conn = 0;
- while (node->conn_count > 0)
- {
- int res;
- PGXCNodeHandle *conn = node->connections[node->current_conn];
-
- /* throw away message */
- if (node->currentRow.msg)
- {
- pfree(node->currentRow.msg);
- node->currentRow.msg = NULL;
- }
-
- if (conn == NULL)
- {
- node->conn_count--;
- continue;
- }
-
- /* no data is expected */
- if (conn->state == DN_CONNECTION_STATE_IDLE ||
- conn->state == DN_CONNECTION_STATE_ERROR_FATAL)
- {
- if (node->current_conn < --node->conn_count)
- node->connections[node->current_conn] = node->connections[node->conn_count];
- continue;
- }
- res = handle_response(conn, node);
- if (res == RESPONSE_EOF)
- {
- struct timeval timeout;
-#ifdef XCP
- timeout.tv_sec = END_QUERY_TIMEOUT / 1000;
- timeout.tv_usec = (END_QUERY_TIMEOUT % 1000) * 1000;
-#else
- timeout.tv_sec = END_QUERY_TIMEOUT;
- timeout.tv_usec = 0;
-#endif
- if (pgxc_node_receive(1, &conn, &timeout))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to read response from Datanodes when ending query")));
- }
- }
-
- if (node->tuplesortstate != NULL || node->tuplestorestate != NULL)
- ExecClearTuple(node->ss.ss_ScanTupleSlot);
- /*
- * Release tuplesort resources
- */
- if (node->tuplesortstate != NULL)
- tuplesort_end((Tuplesortstate *) node->tuplesortstate);
- node->tuplesortstate = NULL;
- /*
- * Release tuplestore resources
- */
- if (node->tuplestorestate != NULL)
- tuplestore_end(node->tuplestorestate);
- node->tuplestorestate = NULL;
-
- /*
- * If there are active cursors close them
- */
- if (node->cursor || node->update_cursor)
- {
- PGXCNodeAllHandles *all_handles = NULL;
- PGXCNodeHandle **cur_handles;
- bool bFree = false;
- int nCount;
- int i;
-
- cur_handles = node->cursor_connections;
- nCount = node->cursor_count;
-
- for(i=0;i<node->cursor_count;i++)
- {
- if (node->cursor_connections == NULL || node->cursor_connections[i]->sock == -1)
- {
- bFree = true;
- all_handles = get_exec_connections(node, NULL, EXEC_ON_DATANODES);
- cur_handles = all_handles->datanode_handles;
- nCount = all_handles->dn_conn_count;
- break;
- }
- }
-
- if (node->cursor)
- {
- close_node_cursors(cur_handles, nCount, node->cursor);
- pfree(node->cursor);
- node->cursor = NULL;
- }
-
- if (node->update_cursor)
- {
- close_node_cursors(cur_handles, nCount, node->update_cursor);
- pfree(node->update_cursor);
- node->update_cursor = NULL;
- }
-
- if (bFree)
- pfree_pgxc_all_handles(all_handles);
- }
-
- /*
- * Clean up parameters if they were set
- */
- if (node->paramval_data)
- {
- pfree(node->paramval_data);
- node->paramval_data = NULL;
- node->paramval_len = 0;
- }
-
- if (node->ss.ss_currentRelation)
- ExecCloseScanRelation(node->ss.ss_currentRelation);
-
- if (node->tmp_ctx)
- MemoryContextDelete(node->tmp_ctx);
-
- CloseCombiner(node);
-}
-
-static void
-close_node_cursors(PGXCNodeHandle **connections, int conn_count, char *cursor)
-{
- int i;
- RemoteQueryState *combiner;
-
- for (i = 0; i < conn_count; i++)
- {
- if (connections[i]->state == DN_CONNECTION_STATE_QUERY)
- BufferConnection(connections[i]);
- if (pgxc_node_send_close(connections[i], false, cursor) != 0)
- ereport(WARNING,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to close Datanode cursor")));
- if (pgxc_node_send_sync(connections[i]) != 0)
- ereport(WARNING,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to close Datanode cursor")));
- }
-
- combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
-
- while (conn_count > 0)
- {
- if (pgxc_node_receive(conn_count, connections, NULL))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to close Datanode cursor")));
- i = 0;
- while (i < conn_count)
- {
- int res = handle_response(connections[i], combiner);
- if (res == RESPONSE_EOF)
- {
- i++;
- }
- else if (res == RESPONSE_COMPLETE)
- {
- if (--conn_count > i)
- connections[i] = connections[conn_count];
- }
- else
- {
- // Unexpected response, ignore?
- }
- }
- }
-
- ValidateAndCloseCombiner(combiner);
-}
-#endif
-
-
/*
* Encode parameter values to format of DataRow message (the same format is
* used in Bind) to prepare for sending down to Datanodes.
@@ -5839,41 +3193,6 @@ ParamListToDataRow(ParamListInfo params, char** result)
return buf.len;
}
-
-#ifndef XCP
-/* ----------------------------------------------------------------
- * ExecRemoteQueryReScan
- *
- * Rescans the relation.
- * ----------------------------------------------------------------
- */
-void
-ExecRemoteQueryReScan(RemoteQueryState *node, ExprContext *exprCtxt)
-{
- /*
- * If the materialized store is not empty, just rewind the stored output.
- */
- ExecClearTuple(node->ss.ps.ps_ResultTupleSlot);
-
- if (((RemoteQuery *) node->ss.ps.plan)->sort)
- {
- if (!node->tuplesortstate)
- return;
-
- tuplesort_rescan(node->tuplesortstate);
- }
- else
- {
- if (!node->tuplestorestate)
- return;
-
- tuplestore_rescan(node->tuplestorestate);
- }
-
-}
-#endif
-
-
/*
* Execute utility statement on multiple Datanodes
* It does approximately the same as
@@ -5889,31 +3208,22 @@ void
ExecRemoteUtility(RemoteQuery *node)
{
RemoteQueryState *remotestate;
-#ifdef XCP
ResponseCombiner *combiner;
-#endif
bool force_autocommit = node->force_autocommit;
RemoteQueryExecType exec_type = node->exec_type;
GlobalTransactionId gxid = InvalidGlobalTransactionId;
-#ifdef XCP
Snapshot snapshot = NULL;
-#else
- Snapshot snapshot = GetActiveSnapshot();
-#endif
PGXCNodeAllHandles *pgxc_connections;
int co_conn_count;
int dn_conn_count;
bool need_tran_block;
ExecDirectType exec_direct_type = node->exec_direct_type;
int i;
-#ifdef XCP
CommandId cid = GetCurrentCommandId(true);
-#endif
if (!force_autocommit)
RegisterTransactionLocalNode(true);
-#ifdef XCP
remotestate = makeNode(RemoteQueryState);
combiner = (ResponseCombiner *)remotestate;
InitResponseCombiner(combiner, 0, node->combine_type);
@@ -5924,29 +3234,15 @@ ExecRemoteUtility(RemoteQuery *node)
*/
pgxc_connections = get_exec_connections(NULL, node->exec_nodes, exec_type,
exec_direct_type != EXEC_DIRECT_UTILITY);
-#else
- /*
- * It is possible to invoke create table with inheritance on
- * temporary objects. Remember that we might have accessed a temp object
- */
- if (node->is_temp)
- ExecSetTempObjectIncluded();
-
- remotestate = CreateResponseCombiner(0, node->combine_type);
-
- pgxc_connections = get_exec_connections(NULL, node->exec_nodes, exec_type);
-#endif
dn_conn_count = pgxc_connections->dn_conn_count;
co_conn_count = pgxc_connections->co_conn_count;
-#ifdef XCP
/* exit right away if no nodes to run command on */
if (dn_conn_count == 0 && co_conn_count == 0)
{
pfree_pgxc_all_handles(pgxc_connections);
return;
}
-#endif
if (force_autocommit)
need_tran_block = false;
@@ -5966,18 +3262,13 @@ ExecRemoteUtility(RemoteQuery *node)
}
gxid = GetCurrentTransactionId();
-#ifdef XCP
if (ActiveSnapshotSet())
snapshot = GetActiveSnapshot();
-#endif
if (!GlobalTransactionIdIsValid(gxid))
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("Failed to get next transaction ID")));
-#ifndef XCP
- if (exec_type == EXEC_ON_ALL_NODES || exec_type == EXEC_ON_DATANODES)
-#endif
{
if (pgxc_node_begin(dn_conn_count, pgxc_connections->datanode_handles,
gxid, need_tran_block, false, PGXC_NODE_DATANODE))
@@ -6012,9 +3303,6 @@ ExecRemoteUtility(RemoteQuery *node)
}
}
-#ifndef XCP
- if (exec_type == EXEC_ON_ALL_NODES || exec_type == EXEC_ON_COORDS)
-#endif
{
if (pgxc_node_begin(co_conn_count, pgxc_connections->coord_handles,
gxid, need_tran_block, false, PGXC_NODE_COORDINATOR))
@@ -6050,10 +3338,6 @@ ExecRemoteUtility(RemoteQuery *node)
* Stop if all commands are completed or we got a data row and
* initialized state node for subsequent invocations
*/
-#ifndef XCP
- if (exec_type == EXEC_ON_ALL_NODES ||
- exec_type == EXEC_ON_DATANODES)
-#endif
{
while (dn_conn_count > 0)
{
@@ -6071,17 +3355,12 @@ ExecRemoteUtility(RemoteQuery *node)
while (i < dn_conn_count)
{
PGXCNodeHandle *conn = pgxc_connections->datanode_handles[i];
-#ifdef XCP
int res = handle_response(conn, combiner);
-#else
- int res = handle_response(conn, remotestate);
-#endif
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
-#ifdef XCP
{
/* Ignore, wait for ReadyForQuery */
}
@@ -6090,7 +3369,6 @@ ExecRemoteUtility(RemoteQuery *node)
/* Ignore, wait for ReadyForQuery */
}
else if (res == RESPONSE_READY)
-#endif
{
if (i < --dn_conn_count)
pgxc_connections->datanode_handles[i] =
@@ -6113,10 +3391,6 @@ ExecRemoteUtility(RemoteQuery *node)
}
/* Make the same for Coordinators */
-#ifndef XCP
- if (exec_type == EXEC_ON_ALL_NODES ||
- exec_type == EXEC_ON_COORDS)
-#endif
{
while (co_conn_count > 0)
{
@@ -6127,17 +3401,12 @@ ExecRemoteUtility(RemoteQuery *node)
while (i < co_conn_count)
{
-#ifdef XCP
int res = handle_response(pgxc_connections->coord_handles[i], combiner);
-#else
- int res = handle_response(pgxc_connections->coord_handles[i], remotestate);
-#endif
if (res == RESPONSE_EOF)
{
i++;
}
else if (res == RESPONSE_COMPLETE)
-#ifdef XCP
{
/* Ignore, wait for ReadyForQuery */
}
@@ -6146,7 +3415,6 @@ ExecRemoteUtility(RemoteQuery *node)
/* Ignore, wait for ReadyForQuery */
}
else if (res == RESPONSE_READY)
-#endif
{
if (i < --co_conn_count)
pgxc_connections->coord_handles[i] =
@@ -6173,12 +3441,8 @@ ExecRemoteUtility(RemoteQuery *node)
* error message pending we can report it. All connections should be in
* consistent state now and so they can be released to the pool after ROLLBACK.
*/
-#ifdef XCP
pfree_pgxc_all_handles(pgxc_connections);
pgxc_node_report_error(combiner);
-#else
- pgxc_node_report_error(remotestate);
-#endif
}
@@ -6188,13 +3452,6 @@ ExecRemoteUtility(RemoteQuery *node)
void
PGXCNodeCleanAndRelease(int code, Datum arg)
{
-#ifndef XCP
- /* Clean up prepared transactions before releasing connections */
- DropAllPreparedStatements();
-
- /* Release Datanode connections */
- release_handles();
-#endif
/* Disconnect from Pooler, if any connection is still held Pooler close it */
PoolManagerDisconnect();
@@ -6206,44 +3463,12 @@ PGXCNodeCleanAndRelease(int code, Datum arg)
stat_log();
}
-
-#ifndef XCP
-static int
-pgxc_get_connections(PGXCNodeHandle *connections[], int size, List *connlist)
-{
- ListCell *lc;
- int count = 0;
-
- foreach(lc, connlist)
- {
- PGXCNodeHandle *conn = (PGXCNodeHandle *) lfirst(lc);
- Assert (count < size);
- connections[count++] = conn;
- }
- return count;
-}
-/*
- * Get all connections for which we have an open transaction,
- * for both Datanodes and Coordinators
- */
-static int
-pgxc_get_transaction_nodes(PGXCNodeHandle *connections[], int size, bool write)
-{
- return pgxc_get_connections(connections, size, write ? XactWriteNodes : XactReadNodes);
-}
-#endif
-
-
void
ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
{
PGXCNodeAllHandles *all_handles;
PGXCNodeHandle **connections;
-#ifdef XCP
ResponseCombiner combiner;
-#else
- RemoteQueryState *combiner;
-#endif
int conn_count;
int i;
@@ -6252,11 +3477,7 @@ ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
return;
/* get needed Datanode connections */
-#ifdef XCP
all_handles = get_handles(nodelist, NIL, false, true);
-#else
- all_handles = get_handles(nodelist, NIL, false);
-#endif
conn_count = all_handles->dn_conn_count;
connections = all_handles->datanode_handles;
@@ -6285,15 +3506,11 @@ ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
}
}
-#ifdef XCP
InitResponseCombiner(&combiner, conn_count, COMBINE_TYPE_NONE);
/*
* Make sure there are zeroes in unused fields
*/
memset(&combiner, 0, sizeof(ScanState));
-#else
- combiner = CreateResponseCombiner(conn_count, COMBINE_TYPE_NONE);
-#endif
while (conn_count > 0)
{
@@ -6309,39 +3526,21 @@ ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
i = 0;
while (i < conn_count)
{
-#ifdef XCP
int res = handle_response(connections[i], &combiner);
-#else
- int res = handle_response(connections[i], combiner);
-#endif
if (res == RESPONSE_EOF)
{
i++;
}
-#ifdef XCP
else if (res == RESPONSE_READY ||
connections[i]->state == DN_CONNECTION_STATE_ERROR_FATAL)
-#else
- else if (res == RESPONSE_COMPLETE)
-#endif
{
if (--conn_count > i)
connections[i] = connections[conn_count];
}
-#ifndef XCP
- else
- {
- connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
- }
-#endif
}
}
-#ifdef XCP
ValidateAndCloseCombiner(&combiner);
-#else
- ValidateAndCloseCombiner(combiner);
-#endif
pfree_pgxc_all_handles(all_handles);
}
@@ -6350,34 +3549,14 @@ ExecCloseRemoteStatement(const char *stmt_name, List *nodelist)
*
* In a COPY TO, send to all Datanodes PG_HEADER for a COPY TO in binary mode.
*/
-#ifdef XCP
int
DataNodeCopyInBinaryForAll(char *msg_buf, int len, int conn_count,
PGXCNodeHandle** connections)
-#else
-int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections)
-#endif
{
int i;
-#ifndef XCP
- int conn_count = 0;
- PGXCNodeHandle *connections[NumDataNodes];
-#endif
int msgLen = 4 + len + 1;
int nLen = htonl(msgLen);
-#ifndef XCP
- for (i = 0; i < NumDataNodes; i++)
- {
- PGXCNodeHandle *handle = copy_connections[i];
-
- if (!handle)
- continue;
-
- connections[conn_count++] = handle;
- }
-#endif
-
for (i = 0; i < conn_count; i++)
{
PGXCNodeHandle *handle = connections[i];
@@ -6408,129 +3587,15 @@ int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_con
return 0;
}
-#ifndef XCP
-/*
- * ExecSetTempObjectIncluded
- *
- * Remember that we have accessed a temporary object.
- */
-void
-ExecSetTempObjectIncluded(void)
-{
- temp_object_included = true;
-}
-
-/*
- * ExecClearTempObjectIncluded
- *
- * Forget about temporary objects
- */
-static void
-ExecClearTempObjectIncluded(void)
-{
- temp_object_included = false;
-}
-
-/* ExecIsTempObjectIncluded
- *
- * Check if a temporary object has been accessed
- */
-bool
-ExecIsTempObjectIncluded(void)
-{
- return temp_object_included;
-}
-
-/*
- * Execute given tuple in the remote relation. We use extended query protocol
- * to avoid repeated planning of the query. So we must pass the column values
- * as parameters while executing the query.
- * This is used by queries using a remote query planning of standard planner.
- */
-void
-ExecRemoteQueryStandard(Relation resultRelationDesc,
- RemoteQueryState *resultRemoteRel,
- TupleTableSlot *slot)
-{
- ExprContext *econtext = resultRemoteRel->ss.ps.ps_ExprContext;
-
- /*
- * Use data row returned by the previous step as a parameters for
- * the main query.
- */
- if (!TupIsNull(slot))
- {
- resultRemoteRel->paramval_len = ExecCopySlotDatarow(slot,
- &resultRemoteRel->paramval_data);
-
- /*
- * The econtext is set only when en_expr is set for execution time
- * evalulation of the target node.
- */
- if (econtext)
- econtext->ecxt_scantuple = slot;
- do_query(resultRemoteRel);
- }
-}
-
-
-void
-RegisterTransactionNodes(int count, void **connections, bool write)
-{
- int i;
- MemoryContext oldcontext = MemoryContextSwitchTo(TopMemoryContext);
-
- for (i = 0; i < count; i++)
- {
- /*
- * Add the node to either read or write participants. If a node is
- * already in the write participant's list, don't add it to the read
- * participant's list. OTOH if a node is currently in the read
- * participant's list, but we are now initiating a write operation on
- * the node, move it to the write participant's list
- */
- if (write)
- {
- XactWriteNodes = list_append_unique(XactWriteNodes, connections[i]);
- XactReadNodes = list_delete(XactReadNodes, connections[i]);
- }
- else
- {
- if (!list_member(XactWriteNodes, connections[i]))
- XactReadNodes = list_append_unique(XactReadNodes, connections[i]);
- }
- }
-
- MemoryContextSwitchTo(oldcontext);
-}
-
-void
-ForgetTransactionNodes(void)
-{
- list_free(XactReadNodes);
- XactReadNodes = NIL;
-
- list_free(XactWriteNodes);
- XactWriteNodes = NIL;
-}
-#endif
-
/*
* Clear per transaction remote information
*/
void
AtEOXact_Remote(void)
{
-#ifdef XCP
PGXCNodeResetParams(true);
-#else
- ExecClearTempObjectIncluded();
- ForgetTransactionNodes();
- clear_RemoteXactState();
-#endif
}
-#ifdef XCP
/*
* Invoked when local transaction is about to be committed.
* If nodestring is specified commit specified prepared transaction on remote
@@ -6571,73 +3636,6 @@ PreCommit_Remote(char *prepareGID, char *nodestring, bool preparedLocalNode)
else
pgxc_node_remote_commit();
}
-#else
-/*
- * Do pre-commit processing for remote nodes which includes Datanodes and
- * Coordinators. If more than one nodes are involved in the transaction write
- * activity, then we must run 2PC. For 2PC, we do the following steps:
- *
- * 1. PREPARE the transaction locally if the local node is involved in the
- * transaction. If local node is not involved, skip this step and go to the
- * next step
- * 2. PREPARE the transaction on all the remote nodes. If any node fails to
- * PREPARE, directly go to step 6
- * 3. Now that all the involved nodes are PREPAREd, we can commit the
- * transaction. We first inform the GTM that the transaction is fully
- * PREPARED and also supply the list of the nodes involved in the
- * transaction
- * 4. COMMIT PREPARED the transaction on all the remotes nodes and then
- * finally COMMIT PREPARED on the local node if its involved in the
- * transaction and start a new transaction so that normal commit processing
- * works unchanged. Go to step 5.
- * 5. Return and let the normal commit processing resume
- * 6. Abort by ereporting the error and let normal abort-processing take
- * charge.
- */
-void
-PreCommit_Remote(char *prepareGID, bool preparedLocalNode)
-{
- if (!preparedLocalNode)
- PrePrepare_Remote(prepareGID, preparedLocalNode, false);
-
- /*
- * OK, everything went fine. At least one remote node is in PREPARED state
- * and the transaction is successfully prepared on all the involved nodes.
- * Now we are ready to commit the transaction. We need a new GXID to send
- * down the remote nodes to execute the forthcoming COMMIT PREPARED
- * command. So grab one from the GTM and track it. It will be closed along
- * with the main transaction at the end.
- */
- pgxc_node_remote_commit();
-
- /*
- * If the transaction is not committed successfully on all the involved
- * nodes, it will remain in PREPARED state on those nodes. Such transaction
- * should be be reported as live in the snapshots. So we must not close the
- * transaction on the GTM. We just record the state of the transaction in
- * the GTM and flag a warning for applications to take care of such
- * in-doubt transactions
- */
- if (remoteXactState.status == RXACT_PART_COMMITTED)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to commit the transaction on one or more nodes")));
-
-
- Assert(remoteXactState.status == RXACT_COMMITTED ||
- remoteXactState.status == RXACT_NONE);
-
- clear_RemoteXactState();
-
- /*
- * The transaction is now successfully committed on all the remote nodes.
- * (XXX How about the local node ?). It can now be cleaned up from the GTM
- * as well
- */
- if (!PersistentConnections)
- release_handles();
-}
-#endif
/*
* Do abort processing for the transaction. We must abort the transaction on
@@ -6655,7 +3653,6 @@ PreCommit_Remote(char *prepareGID, bool preparedLocalNode)
bool
PreAbort_Remote(void)
{
-#ifdef XCP
/*
* We are about to abort current transaction, and there could be an
* unexpected error leaving the node connection in some state requiring
@@ -6757,60 +3754,6 @@ PreAbort_Remote(void)
}
pfree_pgxc_all_handles(all_handles);
-#else
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- cancel_query();
- clear_all_data();
- }
-
- if (remoteXactState.status == RXACT_COMMITTED)
- return false;
-
- if (remoteXactState.status == RXACT_PART_COMMITTED)
- {
- /*
- * In this case transaction is partially committed, pick up the list of nodes
- * prepared and not committed and register them on GTM as if it is an explicit 2PC.
- * This permits to keep the transaction alive in snapshot and other transaction
- * don't have any side effects with partially committed transactions
- */
- char *nodestring = NULL;
-
- /*
- * Get the list of nodes in prepared state; such nodes have not
- * committed successfully
- */
- nodestring = pgxc_node_get_nodelist(remoteXactState.preparedLocalNode);
- Assert(nodestring);
-
- /* Save the node list and gid on GTM. */
- StartPreparedTranGTM(GetTopGlobalTransactionId(),
- remoteXactState.prepareGID,
- nodestring);
-
- /* Finish to prepare the transaction. */
- PrepareTranGTM(GetTopGlobalTransactionId());
- clear_RemoteXactState();
- return false;
- }
- else
- {
- /*
- * The transaction is neither part or fully committed. We can safely
- * abort such transaction
- */
- if (remoteXactState.status == RXACT_NONE)
- init_RemoteXactState(false);
-
- pgxc_node_remote_abort();
- }
-
- clear_RemoteXactState();
-
- if (!PersistentConnections)
- release_handles();
-#endif
return true;
}
@@ -6831,7 +3774,6 @@ PreAbort_Remote(void)
char *
PrePrepare_Remote(char *prepareGID, bool localNode, bool implicit)
{
-#ifdef XCP
/* Always include local node if running explicit prepare */
char *nodestring;
@@ -6865,27 +3807,8 @@ PrePrepare_Remote(char *prepareGID, bool localNode, bool implicit)
return NULL;
}
return nodestring;
-#else
- init_RemoteXactState(false);
- /*
- * PREPARE the transaction on all nodes including remote nodes as well as
- * local node. Any errors will be reported via ereport and the transaction
- * will be aborted accordingly.
- */
- pgxc_node_remote_prepare(prepareGID);
-
- if (preparedNodes)
- pfree(preparedNodes);
- preparedNodes = NULL;
-
- if (!implicit)
- preparedNodes = pgxc_node_get_nodelist(true);
-
- return preparedNodes;
-#endif
}
-#ifdef XCP
/*
* Invoked immediately after local node is prepared.
* Notify GTM about completed prepare.
@@ -6896,87 +3819,7 @@ PostPrepare_Remote(char *prepareGID, bool implicit)
if (!implicit)
PrepareTranGTM(GetTopGlobalTransactionId());
}
-#else
-void
-PostPrepare_Remote(char *prepareGID, char *nodestring, bool implicit)
-{
- remoteXactState.preparedLocalNode = true;
-
- /*
- * If this is an explicit PREPARE request by the client, we must also save
- * the list of nodes involved in this transaction on the GTM for later use
- */
- if (!implicit)
- {
- /* Save the node list and gid on GTM. */
- StartPreparedTranGTM(GetTopGlobalTransactionId(),
- prepareGID,
- nodestring);
-
- /* Finish to prepare the transaction. */
- PrepareTranGTM(GetTopGlobalTransactionId());
- clear_RemoteXactState();
- }
-
- /* Now forget the transaction nodes */
- ForgetTransactionNodes();
-}
-#endif
-
-#ifndef XCP
-/*
- * Return the list of nodes where the prepared transaction is not yet committed
- */
-static char *
-pgxc_node_get_nodelist(bool localNode)
-{
- int i;
- char *nodestring = NULL, *nodename;
-
- for (i = 0; i < remoteXactState.numWriteRemoteNodes; i++)
- {
- RemoteXactNodeStatus status = remoteXactState.remoteNodeStatus[i];
- PGXCNodeHandle *conn = remoteXactState.remoteNodeHandles[i];
-
- if (status != RXACT_NODE_COMMITTED)
- {
- nodename = get_pgxc_nodename(conn->nodeoid);
- if (!nodestring)
- {
- nodestring = (char *) MemoryContextAlloc(TopMemoryContext, strlen(nodename) + 1);
- sprintf(nodestring, "%s", nodename);
- }
- else
- {
- nodestring = (char *) repalloc(nodestring,
- strlen(nodename) + strlen(nodestring) + 2);
- sprintf(nodestring, "%s,%s", nodestring, nodename);
- }
- }
- }
-
- /* Case of a single Coordinator */
- if (localNode && PGXCNodeId >= 0)
- {
- if (!nodestring)
- {
- nodestring = (char *) MemoryContextAlloc(TopMemoryContext, strlen(PGXCNodeName) + 1);
- sprintf(nodestring, "%s", PGXCNodeName);
- }
- else
- {
- nodestring = (char *) repalloc(nodestring,
- strlen(PGXCNodeName) + strlen(nodestring) + 2);
- sprintf(nodestring, "%s,%s", nodestring, PGXCNodeName);
- }
- }
-
- return nodestring;
-}
-#endif
-
-#ifdef XCP
/*
* Returns true if 2PC is required for consistent commit: if there was write
* activity on two or more nodes within current transaction.
@@ -7027,84 +3870,6 @@ IsTwoPhaseCommitRequired(bool localWrite)
}
return false;
}
-#else
-bool
-IsTwoPhaseCommitRequired(bool localWrite)
-{
-
- if ((list_length(XactWriteNodes) > 1) ||
- ((list_length(XactWriteNodes) == 1) && localWrite))
- {
- if (ExecIsTempObjectIncluded())
- {
- elog(DEBUG1, "Transaction accessed temporary objects - "
- "2PC will not be used and that can lead to data inconsistencies "
- "in case of failures");
- return false;
- }
- return true;
- }
- else
- return false;
-}
-
-static void
-clear_RemoteXactState(void)
-{
- /* Clear the previous state */
- remoteXactState.numWriteRemoteNodes = 0;
- remoteXactState.numReadRemoteNodes = 0;
- remoteXactState.status = RXACT_NONE;
- remoteXactState.commitXid = InvalidGlobalTransactionId;
- remoteXactState.prepareGID[0] = '\0';
-
- if ((remoteXactState.remoteNodeHandles == NULL) ||
- (remoteXactState.maxRemoteNodes < (NumDataNodes + NumCoords)))
- {
- remoteXactState.remoteNodeHandles = (PGXCNodeHandle **)
- realloc (remoteXactState.remoteNodeHandles,
- sizeof (PGXCNodeHandle *) * (NumDataNodes + NumCoords));
- remoteXactState.remoteNodeStatus = (RemoteXactNodeStatus *)
- realloc (remoteXactState.remoteNodeStatus,
- sizeof (RemoteXactNodeStatus) * (NumDataNodes + NumCoords));
- remoteXactState.maxRemoteNodes = NumDataNodes + NumCoords;
- }
-
- if (remoteXactState.remoteNodeHandles)
- memset(remoteXactState.remoteNodeHandles, 0,
- sizeof (PGXCNodeHandle *) * (NumDataNodes + NumCoords));
- if (remoteXactState.remoteNodeStatus)
- memset(remoteXactState.remoteNodeStatus, 0,
- sizeof (RemoteXactNodeStatus) * (NumDataNodes + NumCoords));
-}
-
-static void
-init_RemoteXactState(bool preparedLocalNode)
-{
- int write_conn_count, read_conn_count;
- PGXCNodeHandle **connections;
-
- clear_RemoteXactState();
-
- remoteXactState.preparedLocalNode = preparedLocalNode;
- connections = remoteXactState.remoteNodeHandles;
-
- Assert(connections);
-
- /*
- * First get information about all the nodes involved in this transaction
- */
- write_conn_count = pgxc_get_transaction_nodes(connections,
- NumDataNodes + NumCoords, true);
- remoteXactState.numWriteRemoteNodes = write_conn_count;
-
- read_conn_count = pgxc_get_transaction_nodes(connections + write_conn_count,
- NumDataNodes + NumCoords - write_conn_count, false);
- remoteXactState.numReadRemoteNodes = read_conn_count;
-
-}
-#endif
-
/*
* Execute COMMIT/ABORT PREPARED issued by the remote client on remote nodes.
@@ -7115,18 +3880,9 @@ init_RemoteXactState(bool preparedLocalNode)
bool
FinishRemotePreparedTransaction(char *prepareGID, bool commit)
{
-#ifdef XCP
char *nodestring;
GlobalTransactionId gxid, prepare_gxid;
bool prepared_local = false;
-#else
- char *nodename, *nodestring;
- List *nodelist = NIL, *coordlist = NIL;
- GlobalTransactionId gxid, prepare_gxid;
- PGXCNodeAllHandles *pgxc_handles;
- bool prepared_local = false;
- int i;
-#endif
/*
* Please note that with xc_maintenance_mode = on, COMMIT/ROLLBACK PREPARED will not
@@ -7166,7 +3922,6 @@ FinishRemotePreparedTransaction(char *prepareGID, bool commit)
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("prepared transaction with identifier \"%s\" does not exist",
prepareGID)));
-#ifdef XCP
prepared_local = pgxc_node_remote_finish(prepareGID, commit, nodestring,
gxid, prepare_gxid);
@@ -7208,7 +3963,6 @@ pgxc_node_remote_finish(char *prepareGID, bool commit,
List *nodelist = NIL;
List *coordlist = NIL;
int i;
-#endif
/*
* Now based on the nodestring, run COMMIT/ROLLBACK PREPARED command on the
* remote nodes and also finish the transaction locally is required
@@ -7216,7 +3970,6 @@ pgxc_node_remote_finish(char *prepareGID, bool commit,
nodename = strtok(nodestring, ",");
while (nodename != NULL)
{
-#ifdef XCP
int nodeIndex;
char nodetype;
@@ -7228,23 +3981,6 @@ pgxc_node_remote_finish(char *prepareGID, bool commit,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("PGXC Node %s: object not defined",
nodename)));
-#else
- Oid nodeoid;
- int nodeIndex;
- char nodetype;
-
- nodeoid = get_pgxc_nodeoid(nodename);
-
- if (!OidIsValid(nodeoid))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("PGXC Node %s: object not defined",
- nodename)));
-
- /* Get node type and index */
- nodetype = get_pgxc_nodetype(nodeoid);
- nodeIndex = PGXCNodeGetNodeId(nodeoid, get_pgxc_nodetype(nodeoid));
-#endif
/* Check if node is requested is the self-node or not */
if (nodetype == PGXC_NODE_COORDINATOR)
@@ -7260,7 +3996,6 @@ pgxc_node_remote_finish(char *prepareGID, bool commit,
nodename = strtok(NULL, ",");
}
-#ifdef XCP
if (nodelist == NIL && coordlist == NIL)
return prepared_local;
@@ -7357,77 +4092,10 @@ pgxc_node_remote_finish(char *prepareGID, bool commit,
}
pfree_pgxc_all_handles(pgxc_handles);
-#else
- /*
- * Now get handles for all the involved Datanodes and the Coordinators
- */
- pgxc_handles = get_handles(nodelist, coordlist, false);
-
- /*
- * Send GXID (as received above) to the remote nodes.
- if (pgxc_node_begin(pgxc_handles->dn_conn_count,
- pgxc_handles->datanode_handles,
- gxid, false, false))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Could not begin transaction on Datanodes")));
- */
- RegisterTransactionNodes(pgxc_handles->dn_conn_count,
- (void **) pgxc_handles->datanode_handles, true);
-
- /*
- if (pgxc_node_begin(pgxc_handles->co_conn_count,
- pgxc_handles->coord_handles,
- gxid, false, false))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Could not begin transaction on coordinators")));
- */
- RegisterTransactionNodes(pgxc_handles->co_conn_count,
- (void **) pgxc_handles->coord_handles, true);
-
- /*
- * Initialize the remoteXactState so that we can use the APIs to take care
- * of commit/abort.
- */
- init_RemoteXactState(prepared_local);
- remoteXactState.commitXid = gxid;
-
- /*
- * At this point, most of the things are set up in remoteXactState except
- * the state information for all the involved nodes. Force that now and we
- * are ready to call the commit/abort API
- */
- strcpy(remoteXactState.prepareGID, prepareGID);
- for (i = 0; i < remoteXactState.numWriteRemoteNodes; i++)
- remoteXactState.remoteNodeStatus[i] = RXACT_NODE_PREPARED;
- remoteXactState.status = RXACT_PREPARED;
-
- if (commit)
- {
- pgxc_node_remote_commit();
- CommitPreparedTranGTM(prepare_gxid, gxid);
- }
- else
- {
- pgxc_node_remote_abort();
- RollbackTranGTM(prepare_gxid);
- RollbackTranGTM(gxid);
- }
-
- /*
- * The following is also only for usual operation. With xc_maintenance_mode = on,
- * no remote operation will be done here and no post-operation work is needed.
- */
- clear_RemoteXactState();
- ForgetTransactionNodes();
-#endif
return prepared_local;
}
-
-#ifdef XCP
/*****************************************************************************
*
* Simplified versions of ExecInitRemoteQuery, ExecRemoteQuery and
@@ -7518,14 +4186,9 @@ ExecRemoteQuery(RemoteQueryState *node)
* Get connections for Datanodes only, utilities and DDLs
* are launched in ExecRemoteUtility
*/
-#ifdef XCP
pgxc_connections = get_exec_connections(node, step->exec_nodes,
step->exec_type,
true);
-#else
- pgxc_connections = get_exec_connections(node, step->exec_nodes,
- step->exec_type);
-#endif
if (step->exec_type == EXEC_ON_DATANODES)
{
@@ -7764,13 +4427,8 @@ pgxc_connections_cleanup(ResponseCombiner *combiner)
if (res == RESPONSE_EOF)
{
struct timeval timeout;
-#ifdef XCP
timeout.tv_sec = END_QUERY_TIMEOUT / 1000;
timeout.tv_usec = (END_QUERY_TIMEOUT % 1000) * 1000;
-#else
- timeout.tv_sec = END_QUERY_TIMEOUT;
- timeout.tv_usec = 0;
-#endif
if (pgxc_node_receive(1, &conn, &timeout))
ereport(ERROR,
@@ -8526,11 +5184,7 @@ ExecFinishInitRemoteSubplan(RemoteSubplanState *node)
if (node->execOnAll)
{
PGXCNodeAllHandles *pgxc_connections;
-#ifdef XCP
pgxc_connections = get_handles(node->execNodes, NIL, false, true);
-#else
- pgxc_connections = get_handles(node->execNodes, NIL, false);
-#endif
combiner->conn_count = pgxc_connections->dn_conn_count;
combiner->connections = pgxc_connections->datanode_handles;
combiner->current_conn = 0;
@@ -9222,20 +5876,13 @@ ExecEndRemoteSubplan(RemoteSubplanState *node)
ValidateAndCloseCombiner(combiner);
pfree(node);
}
-#endif
-
/*
* pgxc_node_report_error
* Throw error from Datanode if any.
*/
-#ifdef XCP
static void
pgxc_node_report_error(ResponseCombiner *combiner)
-#else
-static void
-pgxc_node_report_error(RemoteQueryState *combiner)
-#endif
{
/* If no combiner, nothing to do */
if (!combiner)
@@ -9310,11 +5957,7 @@ get_success_nodes(int node_count, PGXCNodeHandle **handles, char node_type, Stri
void
pgxc_all_success_nodes(ExecNodes **d_nodes, ExecNodes **c_nodes, char **failednodes_msg)
{
-#ifdef XCP
PGXCNodeAllHandles *connections = get_exec_connections(NULL, NULL, EXEC_ON_ALL_NODES, true);
-#else
- PGXCNodeAllHandles *connections = get_exec_connections(NULL, NULL, EXEC_ON_ALL_NODES);
-#endif
StringInfoData failednodes;
initStringInfo(&failednodes);
diff --git a/src/backend/pgxc/pool/pgxcnode.c b/src/backend/pgxc/pool/pgxcnode.c
index b4270b2ba6..cc92b1fc27 100644
--- a/src/backend/pgxc/pool/pgxcnode.c
+++ b/src/backend/pgxc/pool/pgxcnode.c
@@ -221,7 +221,6 @@ InitMultinodeExecutor(bool is_force)
coord_count = 0;
PGXCNodeId = 0;
-#ifdef XCP
MemoryContextSwitchTo(oldcontext);
if (IS_PGXC_COORDINATOR)
@@ -242,38 +241,14 @@ InitMultinodeExecutor(bool is_force)
PGXCNodeId = count + 1;
}
}
-#else
- /* Finally determine which is the node-self */
- for (count = 0; count < NumCoords; count++)
- {
- if (pg_strcasecmp(PGXCNodeName,
- get_pgxc_nodename(co_handles[count].nodeoid)) == 0)
- PGXCNodeId = count + 1;
- }
-
- /*
- * No node-self?
- * PGXCTODO: Change error code
- */
- if (PGXCNodeId == 0)
- ereport(ERROR,
- (errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("Coordinator cannot identify itself")));
-#endif
}
-
/*
* Builds up a connection string
*/
char *
-#ifdef XCP
PGXCNodeConnStr(char *host, int port, char *dbname,
char *user, char *pgoptions, char *remote_type, char *parent_node)
-#else
-PGXCNodeConnStr(char *host, int port, char *dbname,
- char *user, char *pgoptions, char *remote_type)
-#endif
{
char *out,
connstr[1024];
@@ -283,16 +258,10 @@ PGXCNodeConnStr(char *host, int port, char *dbname,
* Build up connection string
* remote type can be Coordinator, Datanode or application.
*/
-#ifdef XCP
num = snprintf(connstr, sizeof(connstr),
"host=%s port=%d dbname=%s user=%s application_name=pgxc sslmode=disable options='-c remotetype=%s -c parentnode=%s %s'",
host, port, dbname, user, remote_type, parent_node,
pgoptions);
-#else
- num = snprintf(connstr, sizeof(connstr),
- "host=%s port=%d dbname=%s user=%s application_name=pgxc options='-c remotetype=%s %s'",
- host, port, dbname, user, remote_type, pgoptions);
-#endif
/* Check for overflow */
if (num > 0 && num < sizeof(connstr))
@@ -332,32 +301,6 @@ PGXCNodeClose(NODE_CONNECTION *conn)
PQfinish((PGconn *) conn);
}
-
-#ifndef XCP
-/*
- * Send SET query to given connection.
- * Query is sent asynchronously and results are consumed
- */
-int
-PGXCNodeSendSetQuery(NODE_CONNECTION *conn, const char *sql_command)
-{
- PGresult *result;
-
- if (!PQsendQuery((PGconn *) conn, sql_command))
- return -1;
-
- /* Consume results from SET commands */
- while ((result = PQgetResult((PGconn *) conn)) != NULL)
- {
- /* TODO: Check that results are of type 'S' */
- PQclear(result);
- }
-
- return 0;
-}
-#endif
-
-
/*
* Checks if connection active
*/
@@ -427,9 +370,7 @@ pgxc_node_all_free(void)
co_handles = NULL;
dn_handles = NULL;
-#ifdef XCP
HandlesInvalidatePending = false;
-#endif
}
/*
@@ -438,23 +379,15 @@ pgxc_node_all_free(void)
* Structure stores state info and I/O buffers
*/
static void
-#ifdef XCP
pgxc_node_init(PGXCNodeHandle *handle, int sock, bool global_session)
-#else
-pgxc_node_init(PGXCNodeHandle *handle, int sock)
-#endif
{
-#ifdef XCP
char *init_str;
-#endif
handle->sock = sock;
handle->transaction_status = 'I';
handle->state = DN_CONNECTION_STATE_IDLE;
-#ifdef XCP
handle->read_only = true;
handle->ck_resp_rollback = false;
-#endif
handle->combiner = NULL;
#ifdef DN_CONNECTION_DEBUG
handle->have_row_desc = false;
@@ -464,7 +397,6 @@ pgxc_node_init(PGXCNodeHandle *handle, int sock)
handle->inStart = 0;
handle->inEnd = 0;
handle->inCursor = 0;
-#ifdef XCP
/*
* We got a new connection, set on the remote node the session parameters
* if defined. The transaction parameter should be sent after BEGIN
@@ -477,7 +409,6 @@ pgxc_node_init(PGXCNodeHandle *handle, int sock)
pgxc_node_set_query(handle, init_str);
}
}
-#endif
}
@@ -540,9 +471,7 @@ pgxc_node_receive(const int conn_count,
}
retry:
-#ifdef XCP
CHECK_FOR_INTERRUPTS();
-#endif
res_select = select(nfds + 1, &readfds, NULL, NULL, timeout);
if (res_select < 0)
{
@@ -564,10 +493,8 @@ retry:
{
/* Handle timeout */
elog(DEBUG1, "timeout while waiting for response");
-#ifdef XCP
for (i = 0; i < conn_count; i++)
connections[i]->state = DN_CONNECTION_STATE_ERROR_FATAL;
-#endif
return NO_ERROR_OCCURED;
}
@@ -678,11 +605,6 @@ retry:
if (nread < 0)
{
-#ifndef XCP
- /* too noisy */
- if (close_if_error)
- elog(DEBUG1, "dnrd errno = %d", errno);
-#endif
if (errno == EINTR)
goto retry;
/* Some systems return EAGAIN/EWOULDBLOCK for no data */
@@ -867,12 +789,9 @@ get_message(PGXCNodeHandle *conn, int *len, char **msg)
void
release_handles(void)
{
-#ifdef XCP
bool destroy = false;
-#endif
int i;
-#ifdef XCP
if (HandlesInvalidatePending)
{
DoInvalidateRemoteHandles();
@@ -881,10 +800,7 @@ release_handles(void)
/* don't free connection if holding a cluster lock */
if (cluster_ex_lock_held)
- {
return;
- }
-#endif
if (datanode_count == 0 && coord_count == 0)
return;
@@ -900,7 +816,6 @@ release_handles(void)
if (handle->sock != NO_SOCKET)
{
-#ifdef XCP
/*
* Connections at this point should be completely inactive,
* otherwise abaandon them. We can not allow not cleaned up
@@ -913,194 +828,43 @@ release_handles(void)
elog(DEBUG1, "Connection to Datanode %d has unexpected state %d and will be dropped",
handle->nodeoid, handle->state);
}
-#else
- if (handle->state != DN_CONNECTION_STATE_IDLE)
- elog(DEBUG1, "Connection to Datanode %d has unexpected state %d and will be dropped",
- handle->nodeoid, handle->state);
-#endif
pgxc_node_free(handle);
}
}
-#ifdef XCP
if (IS_PGXC_COORDINATOR)
{
-#endif
- /* Collect Coordinator handles */
- for (i = 0; i < NumCoords; i++)
- {
- PGXCNodeHandle *handle = &co_handles[i];
-
- if (handle->sock != NO_SOCKET)
+ /* Collect Coordinator handles */
+ for (i = 0; i < NumCoords; i++)
{
-#ifdef XCP
- /*
- * Connections at this point should be completely inactive,
- * otherwise abaandon them. We can not allow not cleaned up
- * connection is returned to pool.
- */
- if (handle->state != DN_CONNECTION_STATE_IDLE ||
- handle->transaction_status != 'I')
+ PGXCNodeHandle *handle = &co_handles[i];
+
+ if (handle->sock != NO_SOCKET)
{
- destroy = true;
- elog(DEBUG1, "Connection to Coordinator %d has unexpected state %d and will be dropped",
- handle->nodeoid, handle->state);
+ /*
+ * Connections at this point should be completely inactive,
+ * otherwise abaandon them. We can not allow not cleaned up
+ * connection is returned to pool.
+ */
+ if (handle->state != DN_CONNECTION_STATE_IDLE ||
+ handle->transaction_status != 'I')
+ {
+ destroy = true;
+ elog(DEBUG1, "Connection to Coordinator %d has unexpected state %d and will be dropped",
+ handle->nodeoid, handle->state);
+ }
+ pgxc_node_free(handle);
}
-#else
- if (handle->state != DN_CONNECTION_STATE_IDLE)
- elog(DEBUG1, "Connection to Coordinator %d has unexpected state %d and will be dropped",
- handle->nodeoid, handle->state);
-#endif
- pgxc_node_free(handle);
}
}
-#ifdef XCP
- }
-#endif
/* And finally release all the connections on pooler */
-#ifdef XCP
PoolManagerReleaseConnections(destroy);
-#else
- PoolManagerReleaseConnections();
-#endif
datanode_count = 0;
coord_count = 0;
}
-#ifndef XCP
-/*
- * cancel a running query due to error while processing rows
- */
-void
-cancel_query(void)
-{
- int i;
- int dn_cancel[NumDataNodes];
- int co_cancel[NumCoords];
- int dn_count = 0;
- int co_count = 0;
-
- if (datanode_count == 0 && coord_count == 0)
- return;
-
- /* Collect Datanodes handles */
- for (i = 0; i < NumDataNodes; i++)
- {
- PGXCNodeHandle *handle = &dn_handles[i];
-
- if (handle->sock != NO_SOCKET)
- {
- if (handle->state == DN_CONNECTION_STATE_COPY_IN ||
- handle->state == DN_CONNECTION_STATE_COPY_OUT)
- {
- DataNodeCopyEnd(handle, true);
- }
- else
- {
- if (handle->state != DN_CONNECTION_STATE_IDLE)
- {
- dn_cancel[dn_count++] = PGXCNodeGetNodeId(handle->nodeoid,
- PGXC_NODE_DATANODE);
- }
- }
- }
- }
-
- /* Collect Coordinator handles */
- for (i = 0; i < NumCoords; i++)
- {
- PGXCNodeHandle *handle = &co_handles[i];
-
- if (handle->sock != NO_SOCKET)
- {
- if (handle->state == DN_CONNECTION_STATE_COPY_IN ||
- handle->state == DN_CONNECTION_STATE_COPY_OUT)
- {
- DataNodeCopyEnd(handle, true);
- }
- else
- {
- if (handle->state != DN_CONNECTION_STATE_IDLE)
- {
- co_cancel[dn_count++] = PGXCNodeGetNodeId(handle->nodeoid,
- PGXC_NODE_COORDINATOR);
- }
- }
- }
- }
-
- PoolManagerCancelQuery(dn_count, dn_cancel, co_count, co_cancel);
-
- /*
- * Read responses from the nodes to whom we sent the cancel command. This
- * ensures that there are no pending messages left on the connection
- */
- for (i = 0; i < NumDataNodes; i++)
- {
- PGXCNodeHandle *handle = &dn_handles[i];
-
- if ((handle->sock != NO_SOCKET) && (handle->state != DN_CONNECTION_STATE_IDLE))
- {
- pgxc_node_flush_read(handle);
- handle->state = DN_CONNECTION_STATE_IDLE;
- }
- }
-
- for (i = 0; i < NumCoords; i++)
- {
- PGXCNodeHandle *handle = &co_handles[i];
-
- if (handle->sock != NO_SOCKET && handle->state != DN_CONNECTION_STATE_IDLE)
- {
- pgxc_node_flush_read(handle);
- handle->state = DN_CONNECTION_STATE_IDLE;
- }
- }
-}
-/*
- * This method won't return until all network buffers are empty
- * To ensure all data in all network buffers is read and wasted
- */
-void
-clear_all_data(void)
-{
- int i;
-
- if (datanode_count == 0 && coord_count == 0)
- return;
-
- /* Collect Datanodes handles */
- for (i = 0; i < NumDataNodes; i++)
- {
- PGXCNodeHandle *handle = &dn_handles[i];
-
- if (handle->sock != NO_SOCKET && handle->state != DN_CONNECTION_STATE_IDLE)
- {
- pgxc_node_flush_read(handle);
- handle->state = DN_CONNECTION_STATE_IDLE;
- }
- /* Clear any previous error messages */
- handle->error = NULL;
- }
-
- /* Collect Coordinator handles */
- for (i = 0; i < NumCoords; i++)
- {
- PGXCNodeHandle *handle = &co_handles[i];
-
- if (handle->sock != NO_SOCKET && handle->state != DN_CONNECTION_STATE_IDLE)
- {
- pgxc_node_flush_read(handle);
- handle->state = DN_CONNECTION_STATE_IDLE;
- }
- /* Clear any previous error messages */
- handle->error = NULL;
- }
-}
-#endif
-
/*
* Ensure specified amount of data can fit to the incoming buffer and
* increase it if necessary
@@ -1389,8 +1153,6 @@ pgxc_node_send_parse(PGXCNodeHandle * handle, const char* statement,
return 0;
}
-
-#ifdef XCP
/*
* Send PLAN message down to the Data node
*/
@@ -1470,8 +1232,6 @@ pgxc_node_send_plan(PGXCNodeHandle * handle, const char *statement,
return 0;
}
-#endif
-
/*
* Send BIND message down to the Datanode
@@ -1758,13 +1518,8 @@ pgxc_node_send_query_extended(PGXCNodeHandle *handle, const char *query,
if (fetch_size >= 0)
if (pgxc_node_send_execute(handle, portal, fetch_size))
return EOF;
-#ifdef XCP
if (pgxc_node_send_flush(handle))
return EOF;
-#else
- if (pgxc_node_send_sync(handle))
- return EOF;
-#endif
return 0;
}
@@ -1801,13 +1556,11 @@ pgxc_node_flush_read(PGXCNodeHandle *handle)
if (handle == NULL)
return;
-#ifdef XCP
/*
* Before reading input send Sync to make sure
* we will eventually receive ReadyForQuery
*/
pgxc_node_send_sync(handle);
-#endif
while(true)
{
read_result = pgxc_node_read_data(handle, false);
@@ -2030,9 +1783,7 @@ pgxc_node_send_timestamp(PGXCNodeHandle *handle, TimestampTz timestamp)
void
add_error_message(PGXCNodeHandle *handle, const char *message)
{
-#ifdef XCP
elog(LOG, "Connection error %s", message);
-#endif
handle->transaction_status = 'E';
if (handle->error)
{
@@ -2042,8 +1793,6 @@ add_error_message(PGXCNodeHandle *handle, const char *message)
handle->error = pstrdup(message);
}
-
-#ifdef XCP
static int load_balancer = 0;
/*
* Get one of the specified nodes to query replicated data source.
@@ -2116,11 +1865,7 @@ get_any_handle(List *datanodelist)
(errcode(ERRCODE_INSUFFICIENT_RESOURCES),
errmsg("Failed to get pooled connections")));
}
-#ifdef XCP
pgxc_node_init(&dn_handles[node], fds[0], true);
-#else
- pgxc_node_init(&dn_handles[node], fds[0]);
-#endif
datanode_count++;
/*
@@ -2138,8 +1883,6 @@ get_any_handle(List *datanodelist)
/* Keep compiler quiet */
return NULL;
}
-#endif
-
/*
* for specified list return array of PGXCNodeHandles
@@ -2151,11 +1894,7 @@ get_any_handle(List *datanodelist)
* Coordinator fds is returned only if transaction uses a DDL
*/
PGXCNodeAllHandles *
-#ifdef XCP
get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query, bool is_global_session)
-#else
-get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query)
-#endif
{
PGXCNodeAllHandles *result;
ListCell *node_list_item;
@@ -2166,13 +1905,11 @@ get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query)
/* index of the result array */
int i = 0;
-#ifdef XCP
if (HandlesInvalidatePending)
if (DoInvalidateRemoteHandles())
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("canceling transaction due to cluster configuration reset by administrator command")));
-#endif
result = (PGXCNodeAllHandles *) palloc(sizeof(PGXCNodeAllHandles));
if (!result)
@@ -2364,11 +2101,7 @@ get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query)
}
node_handle = &dn_handles[node];
-#ifdef XCP
pgxc_node_init(node_handle, fdsock, is_global_session);
-#else
- pgxc_node_init(node_handle, fdsock);
-#endif
dn_handles[node] = *node_handle;
datanode_count++;
}
@@ -2389,11 +2122,7 @@ get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query)
}
node_handle = &co_handles[node];
-#ifdef XCP
pgxc_node_init(node_handle, fdsock, is_global_session);
-#else
- pgxc_node_init(node_handle, fdsock);
-#endif
co_handles[node] = *node_handle;
coord_count++;
}
@@ -2410,8 +2139,6 @@ get_handles(List *datanodelist, List *coordlist, bool is_coord_only_query)
return result;
}
-
-#ifdef XCP
PGXCNodeAllHandles *
get_current_handles(void)
{
@@ -2465,8 +2192,6 @@ get_current_handles(void)
return result;
}
-#endif
-
/* Free PGXCNodeAllHandles structure */
void
@@ -2485,7 +2210,6 @@ pfree_pgxc_all_handles(PGXCNodeAllHandles *pgxc_handles)
pfree(pgxc_handles);
}
-#ifdef XCP
/*
* PGXCNode_getNodeId
* Look at the data cached for handles and return node position
@@ -2530,46 +2254,6 @@ PGXCNodeGetNodeId(Oid nodeoid, char *node_type)
*node_type = PGXC_NODE_NONE;
return -1;
}
-#else
-/*
- * PGXCNode_getNodeId
- * Look at the data cached for handles and return node position
- */
-int
-PGXCNodeGetNodeId(Oid nodeoid, char node_type)
-{
- PGXCNodeHandle *handles;
- int num_nodes, i;
- int res = 0;
-
- switch (node_type)
- {
- case PGXC_NODE_COORDINATOR:
- num_nodes = NumCoords;
- handles = co_handles;
- break;
- case PGXC_NODE_DATANODE:
- num_nodes = NumDataNodes;
- handles = dn_handles;
- break;
- default:
- /* Should not happen */
- Assert(0);
- return res;
- }
-
- /* Look into the handles and return correct position in array */
- for (i = 0; i < num_nodes; i++)
- {
- if (handles[i].nodeoid == nodeoid)
- {
- res = i;
- break;
- }
- }
- return res;
-}
-#endif
/*
* PGXCNode_getNodeOid
@@ -2613,47 +2297,32 @@ pgxc_node_str(PG_FUNCTION_ARGS)
* Return node position in handles array
*/
int
-#ifdef XCP
PGXCNodeGetNodeIdFromName(char *node_name, char *node_type)
-#else
-PGXCNodeGetNodeIdFromName(char *node_name, char node_type)
-#endif
{
char *nm;
Oid nodeoid;
if (node_name == NULL)
-#ifdef XCP
{
if (node_type)
*node_type = PGXC_NODE_NONE;
return -1;
}
-#else
- return -1;
-#endif
nm = str_tolower(node_name, strlen(node_name), DEFAULT_COLLATION_OID);
nodeoid = get_pgxc_nodeoid(nm);
pfree(nm);
if (!OidIsValid(nodeoid))
-#ifdef XCP
{
if (node_type)
*node_type = PGXC_NODE_NONE;
return -1;
}
-#else
- return -1;
-#endif
return PGXCNodeGetNodeId(nodeoid, node_type);
}
-
-#ifdef XCP
-
static List *
paramlist_delete_param(List *param_list, const char *name)
{
@@ -2983,4 +2652,3 @@ DoInvalidateRemoteHandles(void)
return result;
}
-#endif
diff --git a/src/backend/pgxc/pool/poolmgr.c b/src/backend/pgxc/pool/poolmgr.c
index c02803634b..35c00d64c2 100644
--- a/src/backend/pgxc/pool/poolmgr.c
+++ b/src/backend/pgxc/pool/poolmgr.c
@@ -66,18 +66,12 @@
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
-#ifdef XCP
#include "pgxc/pause.h"
#include "storage/procarray.h"
-#endif
/* Configuration options */
-#ifdef XCP
int PoolConnKeepAlive = 600;
int PoolMaintenanceTimeout = 30;
-#else
-int MinPoolSize = 1;
-#endif
int MaxPoolSize = 100;
int PoolerPort = 6667;
@@ -94,14 +88,12 @@ typedef struct
int port;
} PGXCNodeConnectionInfo;
-#ifdef XCP
/* Handle to the pool manager (Session's side) */
typedef struct
{
/* communication channel */
PoolPort port;
} PoolHandle;
-#endif
/* The root memory context */
static MemoryContext PoolerMemoryContext = NULL;
@@ -133,15 +125,6 @@ static void agent_init(PoolAgent *agent, const char *database, const char *user_
static void agent_destroy(PoolAgent *agent);
static void agent_create(void);
static void agent_handle_input(PoolAgent *agent, StringInfo s);
-#ifndef XCP
-static int agent_session_command(PoolAgent *agent,
- const char *set_command,
- PoolCommandType command_type);
-static int agent_set_command(PoolAgent *agent,
- const char *set_command,
- PoolCommandType command_type);
-static int agent_temp_command(PoolAgent *agent);
-#endif
static DatabasePool *create_database_pool(const char *database, const char *user_name, const char *pgoptions);
static void insert_database_pool(DatabasePool *pool);
static int destroy_database_pool(const char *database, const char *user_name);
@@ -149,15 +132,9 @@ static void reload_database_pools(PoolAgent *agent);
static DatabasePool *find_database_pool(const char *database, const char *user_name, const char *pgoptions);
static DatabasePool *remove_database_pool(const char *database, const char *user_name);
static int *agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist);
-#ifndef XCP
-static int send_local_commands(PoolAgent *agent, List *datanodelist, List *coordlist);
-#endif
static int cancel_query_on_connections(PoolAgent *agent, List *datanodelist, List *coordlist);
static PGXCNodePoolSlot *acquire_connection(DatabasePool *dbPool, Oid node);
static void agent_release_connections(PoolAgent *agent, bool force_destroy);
-#ifndef XCP
-static void agent_reset_session(PoolAgent *agent);
-#endif
static void release_connection(DatabasePool *dbPool, PGXCNodePoolSlot *slot,
Oid node, bool force_destroy);
static void destroy_slot(PGXCNodePoolSlot *slot);
@@ -175,19 +152,15 @@ static char *build_node_conn_str(Oid node, DatabasePool *dbPool);
/* Signal handlers */
static void pooler_die(SIGNAL_ARGS);
static void pooler_quickdie(SIGNAL_ARGS);
-#ifdef XCP
static void PoolManagerConnect(const char *database, const char *user_name,
const char *pgoptions);
static void pooler_sighup(SIGNAL_ARGS);
static bool shrink_pool(DatabasePool *pool);
static void pools_maintenance(void);
-#endif
/*
* Flags set by interrupt handlers for later service in the main loop.
*/
-#ifdef XCP
static volatile sig_atomic_t got_SIGHUP = false;
-#endif
static volatile sig_atomic_t shutdown_requested = false;
void
@@ -237,11 +210,7 @@ PoolManagerInit()
pqsignal(SIGINT, pooler_die);
pqsignal(SIGTERM, pooler_die);
pqsignal(SIGQUIT, pooler_quickdie);
-#ifdef XCP
pqsignal(SIGHUP, pooler_sighup);
-#else
- pqsignal(SIGHUP, SIG_IGN);
-#endif
/* TODO other signal handlers */
/* We allow SIGQUIT (quickdie) at all times */
@@ -363,30 +332,18 @@ PoolManagerDestroy(void)
return status;
}
-
-#ifdef XCP
/*
* Connect to the pooler process
*/
static void
-#else
-/*
- * Get handle to pool manager
- * Invoked from Postmaster's main loop just before forking off new session
- * Returned PoolHandle structure will be inherited by session process
- */
-PoolHandle *
-#endif
GetPoolManagerHandle(void)
{
PoolHandle *handle;
int fdsock;
-#ifdef XCP
if (poolHandle)
/* already connected */
return;
-#endif
#ifdef HAVE_UNIX_SOCKETS
if (Unix_socket_directories)
@@ -453,9 +410,6 @@ GetPoolManagerHandle(void)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
-#ifndef XCP
- return NULL;
-#endif
}
handle->port.fdsock = fdsock;
@@ -463,26 +417,8 @@ GetPoolManagerHandle(void)
handle->port.RecvPointer = 0;
handle->port.SendPointer = 0;
-#ifdef XCP
poolHandle = handle;
-#else
- return handle;
-#endif
-}
-
-
-#ifndef XCP
-/*
- * XXX May create on_proc_exit callback instead
- */
-void
-PoolManagerCloseHandle(PoolHandle *handle)
-{
- close(Socket(handle->port));
- free(handle);
- handle = NULL;
}
-#endif
/*
* Create agent
@@ -535,11 +471,6 @@ agent_create(void)
agent->coord_conn_oids = NULL;
agent->dn_connections = NULL;
agent->coord_connections = NULL;
-#ifndef XCP
- agent->session_params = NULL;
- agent->local_params = NULL;
- agent->is_temp = false;
-#endif
agent->pid = 0;
/* Append new agent to the list */
@@ -608,7 +539,6 @@ char *session_options(void)
* Associate session with specified database and respective connection pool
* Invoked from Session process
*/
-#ifdef XCP
static void
PoolManagerConnect(const char *database, const char *user_name,
const char *pgoptions)
@@ -690,59 +620,6 @@ PoolManagerConnect(const char *database, const char *user_name,
pool_putbytes(&poolHandle->port, "\0", 1);
pool_flush(&poolHandle->port);
}
-#else
-void
-PoolManagerConnect(PoolHandle *handle,
- const char *database, const char *user_name,
- char *pgoptions)
-{
- int n32;
- char msgtype = 'c';
-
- Assert(handle);
- Assert(database);
- Assert(user_name);
-
- /* Save the handle */
- poolHandle = handle;
-
- /* Message type */
- pool_putbytes(&handle->port, &msgtype, 1);
-
- /* Message length */
- n32 = htonl(strlen(database) + strlen(user_name) + strlen(pgoptions) + 23);
- pool_putbytes(&handle->port, (char *) &n32, 4);
-
- /* PID number */
- n32 = htonl(MyProcPid);
- pool_putbytes(&handle->port, (char *) &n32, 4);
-
- /* Length of Database string */
- n32 = htonl(strlen(database) + 1);
- pool_putbytes(&handle->port, (char *) &n32, 4);
-
- /* Send database name followed by \0 terminator */
- pool_putbytes(&handle->port, database, strlen(database) + 1);
- pool_flush(&handle->port);
-
- /* Length of user name string */
- n32 = htonl(strlen(user_name) + 1);
- pool_putbytes(&handle->port, (char *) &n32, 4);
-
- /* Send user name followed by \0 terminator */
- pool_putbytes(&handle->port, user_name, strlen(user_name) + 1);
- pool_flush(&handle->port);
-
- /* Length of pgoptions string */
- n32 = htonl(strlen(pgoptions) + 1);
- pool_putbytes(&handle->port, (char *) &n32, 4);
-
- /* Send pgoptions followed by \0 terminator */
- pool_putbytes(&handle->port, pgoptions, strlen(pgoptions) + 1);
- pool_flush(&handle->port);
-
-}
-#endif
/*
* Reconnect to pool manager
@@ -751,142 +628,15 @@ PoolManagerConnect(PoolHandle *handle,
void
PoolManagerReconnect(void)
{
-#ifdef XCP
/* Connected, disconnect */
if (poolHandle)
PoolManagerDisconnect();
PoolManagerConnect(get_database_name(MyDatabaseId), GetClusterUserName(),
session_options());
-#else
- PoolHandle *handle;
-
- Assert(poolHandle);
-
- PoolManagerDisconnect();
- handle = GetPoolManagerHandle();
- PoolManagerConnect(handle,
- get_database_name(MyDatabaseId),
- GetUserNameFromId(GetUserId()),
- session_options());
-#endif
-}
-
-
-#ifndef XCP
-int
-PoolManagerSetCommand(PoolCommandType command_type, const char *set_command)
-{
- int n32, res;
- char msgtype = 's';
-
- Assert(poolHandle);
-
- /*
- * If SET LOCAL is in use, flag current transaction as using
- * transaction-block related parameters with pooler agent.
- */
- if (command_type == POOL_CMD_LOCAL_SET)
- SetCurrentLocalParamStatus(true);
-
- /* Message type */
- pool_putbytes(&poolHandle->port, &msgtype, 1);
-
- /* Message length */
- if (set_command)
- n32 = htonl(strlen(set_command) + 13);
- else
- n32 = htonl(12);
-
- pool_putbytes(&poolHandle->port, (char *) &n32, 4);
-
- /* LOCAL or SESSION parameter ? */
- n32 = htonl(command_type);
- pool_putbytes(&poolHandle->port, (char *) &n32, 4);
-
- if (set_command)
- {
- /* Length of SET command string */
- n32 = htonl(strlen(set_command) + 1);
- pool_putbytes(&poolHandle->port, (char *) &n32, 4);
-
- /* Send command string followed by \0 terminator */
- pool_putbytes(&poolHandle->port, set_command, strlen(set_command) + 1);
- }
- else
- {
- /* Send empty command */
- n32 = htonl(0);
- pool_putbytes(&poolHandle->port, (char *) &n32, 4);
- }
-
- pool_flush(&poolHandle->port);
-
- /* Get result */
- res = pool_recvres(&poolHandle->port);
-
- return res;
}
/*
- * Send commands to alter the behavior of current transaction and update begin sent status
- */
-
-int
-PoolManagerSendLocalCommand(int dn_count, int* dn_list, int co_count, int* co_list)
-{
- uint32 n32;
- /*
- * Buffer contains the list of both Coordinator and Datanodes, as well
- * as the number of connections
- */
- uint32 buf[2 + dn_count + co_count];
- int i;
-
- if (poolHandle == NULL)
- return EOF;
-
- if (dn_count == 0 && co_count == 0)
- return EOF;
-
- if (dn_count != 0 && dn_list == NULL)
- return EOF;
-
- if (co_count != 0 && co_list == NULL)
- return EOF;
-
- /* Insert the list of Datanodes in buffer */
- n32 = htonl((uint32) dn_count);
- buf[0] = n32;
-
- for (i = 0; i < dn_count;)
- {
- n32 = htonl((uint32) dn_list[i++]);
- buf[i] = n32;
- }
-
- /* Insert the list of Coordinators in buffer */
- n32 = htonl((uint32) co_count);
- buf[dn_count + 1] = n32;
-
- /* Not necessary to send to pooler a request if there is no Coordinator */
- if (co_count != 0)
- {
- for (i = dn_count + 1; i < (dn_count + co_count + 1);)
- {
- n32 = htonl((uint32) co_list[i - (dn_count + 1)]);
- buf[++i] = n32;
- }
- }
- pool_putmessage(&poolHandle->port, 'b', (char *) buf, (2 + dn_count + co_count) * sizeof(uint32));
- pool_flush(&poolHandle->port);
-
- /* Get result */
- return pool_recvres(&poolHandle->port);
-}
-#endif
-
-/*
* Lock/unlock pool manager
* During locking, the only operations not permitted are abort, connection and
* connection obtention.
@@ -897,13 +647,9 @@ PoolManagerLock(bool is_lock)
char msgtype = 'o';
int n32;
int msglen = 8;
-#ifdef XCP
if (poolHandle == NULL)
PoolManagerConnect(get_database_name(MyDatabaseId),
GetClusterUserName(), "");
-#else
- Assert(poolHandle);
-#endif
/* Message type */
pool_putbytes(&poolHandle->port, &msgtype, 1);
@@ -972,25 +718,11 @@ agent_destroy(PoolAgent *agent)
/* Discard connections if any remaining */
if (agent->pool)
{
-#ifdef XCP
/*
* If session is disconnecting while there are active connections
* we can not know if they clean or not, so force destroy them
*/
agent_release_connections(agent, true);
-#else
- /*
- * Agent is being destroyed, so reset session parameters
- * before putting back connections to pool.
- */
- agent_reset_session(agent);
-
- /*
- * Release them all.
- * Force disconnection if there are temporary objects on agent.
- */
- agent_release_connections(agent, agent->is_temp);
-#endif
}
/* find agent in the list */
@@ -1018,20 +750,14 @@ agent_destroy(PoolAgent *agent)
void
PoolManagerDisconnect(void)
{
-#ifdef XCP
if (!poolHandle)
return; /* not even connected */
-#else
- Assert(poolHandle);
-#endif
pool_putmessage(&poolHandle->port, 'd', NULL, 0);
pool_flush(&poolHandle->port);
close(Socket(poolHandle->port));
-#ifdef XCP
free(poolHandle);
-#endif
poolHandle = NULL;
}
@@ -1048,13 +774,9 @@ PoolManagerGetConnections(List *datanodelist, List *coordlist)
int totlen = list_length(datanodelist) + list_length(coordlist);
int nodes[totlen + 2];
-#ifdef XCP
if (poolHandle == NULL)
PoolManagerConnect(get_database_name(MyDatabaseId),
GetClusterUserName(), session_options());
-#else
- Assert(poolHandle);
-#endif
/*
* Prepare end send message to pool manager.
@@ -1114,7 +836,6 @@ PoolManagerAbortTransactions(char *dbname, char *username, int **proc_pids)
int dblen = dbname ? strlen(dbname) + 1 : 0;
int userlen = username ? strlen(username) + 1 : 0;
-#ifdef XCP
/*
* New connection may be established to clean connections to
* specified nodes and databases.
@@ -1122,9 +843,6 @@ PoolManagerAbortTransactions(char *dbname, char *username, int **proc_pids)
if (poolHandle == NULL)
PoolManagerConnect(get_database_name(MyDatabaseId),
GetClusterUserName(), session_options());
-#else
- Assert(poolHandle);
-#endif
/* Message type */
pool_putbytes(&poolHandle->port, &msgtype, 1);
@@ -1173,7 +891,6 @@ PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname, ch
int userlen = username ? strlen(username) + 1 : 0;
int dblen = dbname ? strlen(dbname) + 1 : 0;
-#ifdef XCP
/*
* New connection may be established to clean connections to
* specified nodes and databases.
@@ -1181,7 +898,6 @@ PoolManagerCleanConnection(List *datanodelist, List *coordlist, char *dbname, ch
if (poolHandle == NULL)
PoolManagerConnect(get_database_name(MyDatabaseId),
GetClusterUserName(), session_options());
-#endif
nodes[0] = htonl(list_length(datanodelist));
i = 1;
@@ -1247,7 +963,6 @@ PoolManagerCheckConnectionInfo(void)
{
int res;
-#ifdef XCP
/*
* New connection may be established to clean connections to
* specified nodes and databases.
@@ -1255,9 +970,6 @@ PoolManagerCheckConnectionInfo(void)
if (poolHandle == NULL)
PoolManagerConnect(get_database_name(MyDatabaseId),
GetClusterUserName(), session_options());
-#else
- Assert(poolHandle);
-#endif
PgxcNodeListAndCount();
pool_putmessage(&poolHandle->port, 'q', NULL, 0);
pool_flush(&poolHandle->port);
@@ -1301,9 +1013,6 @@ agent_handle_input(PoolAgent * agent, StringInfo s)
const char *database = NULL;
const char *user_name = NULL;
const char *pgoptions = NULL;
-#ifndef XCP
- PoolCommandType command_type;
-#endif
int datanodecount;
int coordcount;
List *nodelist = NIL;
@@ -1345,34 +1054,6 @@ agent_handle_input(PoolAgent * agent, StringInfo s)
if (pids)
pfree(pids);
break;
-#ifndef XCP
- case 'b': /* Fire transaction-block commands on given nodes */
- /*
- * Length of message is caused by:
- * - Message header = 4bytes
- * - Number of Datanodes sent = 4bytes
- * - List of Datanodes = NumPoolDataNodes * 4bytes (max)
- * - Number of Coordinators sent = 4bytes
- * - List of Coordinators = NumPoolCoords * 4bytes (max)
- */
- pool_getmessage(&agent->port, s, 4 * agent->num_dn_connections + 4 * agent->num_coord_connections + 12);
- datanodecount = pq_getmsgint(s, 4);
- for (i = 0; i < datanodecount; i++)
- datanodelist = lappend_int(datanodelist, pq_getmsgint(s, 4));
- coordcount = pq_getmsgint(s, 4);
- /* It is possible that no Coordinators are involved in the transaction */
- for (i = 0; i < coordcount; i++)
- coordlist = lappend_int(coordlist, pq_getmsgint(s, 4));
- pq_getmsgend(s);
- /* Send local commands if any to the nodes involved in the transaction */
- res = send_local_commands(agent, datanodelist, coordlist);
- /* Send result */
- pool_sendres(&agent->port, res);
-
- list_free(datanodelist);
- list_free(coordlist);
- break;
-#endif
case 'c': /* CONNECT */
pool_getmessage(&agent->port, s, 0);
agent->pid = pq_getmsgint(s, 4);
@@ -1531,7 +1212,6 @@ agent_handle_input(PoolAgent * agent, StringInfo s)
pool_sendres(&agent->port, res);
break;
case 'r': /* RELEASE CONNECTIONS */
-#ifdef XCP
{
bool destroy;
@@ -1540,31 +1220,7 @@ agent_handle_input(PoolAgent * agent, StringInfo s)
pq_getmsgend(s);
agent_release_connections(agent, destroy);
}
-#else
- pool_getmessage(&agent->port, s, 4);
- pq_getmsgend(s);
- agent_release_connections(agent, false);
-#endif
break;
-#ifndef XCP
- case 's': /* Session-related COMMAND */
- pool_getmessage(&agent->port, s, 0);
- /* Determine if command is local or session */
- command_type = (PoolCommandType) pq_getmsgint(s, 4);
- /* Get the SET command if necessary */
- len = pq_getmsgint(s, 4);
- if (len != 0)
- set_command = pq_getmsgbytes(s, len);
-
- pq_getmsgend(s);
-
- /* Manage command depending on its type */
- res = agent_session_command(agent, set_command, command_type);
-
- /* Send success result */
- pool_sendres(&agent->port, res);
- break;
-#endif
default: /* EOF or protocol violation */
agent_destroy(agent);
return;
@@ -1575,120 +1231,6 @@ agent_handle_input(PoolAgent * agent, StringInfo s)
}
}
-#ifndef XCP
-/*
- * Manage a session command for pooler
- */
-static int
-agent_session_command(PoolAgent *agent, const char *set_command, PoolCommandType command_type)
-{
- int res;
-
- switch (command_type)
- {
- case POOL_CMD_LOCAL_SET:
- case POOL_CMD_GLOBAL_SET:
- res = agent_set_command(agent, set_command, command_type);
- break;
- case POOL_CMD_TEMP:
- res = agent_temp_command(agent);
- break;
- default:
- res = -1;
- break;
- }
-
- return res;
-}
-
-/*
- * Set agent flag that a temporary object is in use.
- */
-static int
-agent_temp_command(PoolAgent *agent)
-{
- agent->is_temp = true;
- return 0;
-}
-
-/*
- * Save a SET command and distribute it to the agent connections
- * already in use.
- */
-static int
-agent_set_command(PoolAgent *agent, const char *set_command, PoolCommandType command_type)
-{
- char *params_string;
- int i;
- int res = 0;
-
- Assert(agent);
- Assert(set_command);
- Assert(command_type == POOL_CMD_LOCAL_SET || command_type == POOL_CMD_GLOBAL_SET);
-
- if (command_type == POOL_CMD_LOCAL_SET)
- params_string = agent->local_params;
- else if (command_type == POOL_CMD_GLOBAL_SET)
- params_string = agent->session_params;
- else
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Set command process failed")));
-
- /* First command recorded */
- if (!params_string)
- {
- params_string = pstrdup(set_command);
- if (!params_string)
- ereport(ERROR,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
- }
- else
- {
- /*
- * Second command or more recorded.
- * Commands are saved with format 'SET param1 TO value1;...;SET paramN TO valueN'
- */
- params_string = (char *) repalloc(params_string,
- strlen(params_string) + strlen(set_command) + 2);
- if (!params_string)
- ereport(ERROR,
- (errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory")));
-
- sprintf(params_string, "%s;%s", params_string, set_command);
- }
-
- /*
- * Launch the new command to all the connections already hold by the agent
- * It does not matter if command is local or global as this has explicitely been sent
- * by client. PostgreSQL backend also cannot send to its pooler agent SET LOCAL if current
- * transaction is not in a transaction block. This has also no effect on local Coordinator
- * session.
- */
- for (i = 0; i < agent->num_dn_connections; i++)
- {
- if (agent->dn_connections[i])
- res |= PGXCNodeSendSetQuery(agent->dn_connections[i]->conn, set_command);
- }
-
- for (i = 0; i < agent->num_coord_connections; i++)
- {
- if (agent->coord_connections[i])
- res |= PGXCNodeSendSetQuery(agent->coord_connections[i]->conn, set_command);
- }
-
- /* Save the latest string */
- if (command_type == POOL_CMD_LOCAL_SET)
- agent->local_params = params_string;
- else if (command_type == POOL_CMD_GLOBAL_SET)
- agent->session_params = params_string;
-
- return res;
-}
-#endif
-
/*
* acquire connection
*/
@@ -1759,10 +1301,6 @@ agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist)
* Local parameters are fired only once BEGIN has been launched on
* remote nodes.
*/
-#ifndef XCP
- if (agent->session_params)
- PGXCNodeSendSetQuery(slot->conn, agent->session_params);
-#endif
}
result[i++] = PQsocket((PGconn *) agent->dn_connections[node]->conn);
@@ -1794,10 +1332,6 @@ agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist)
* Local parameters are fired only once BEGIN has been launched on
* remote nodes.
*/
-#ifndef XCP
- if (agent->session_params)
- PGXCNodeSendSetQuery(slot->conn, agent->session_params);
-#endif
}
result[i++] = PQsocket((PGconn *) agent->coord_connections[node]->conn);
@@ -1808,80 +1342,6 @@ agent_acquire_connections(PoolAgent *agent, List *datanodelist, List *coordlist)
return result;
}
-#ifndef XCP
-/*
- * send transaction local commands if any, set the begin sent status in any case
- */
-static int
-send_local_commands(PoolAgent *agent, List *datanodelist, List *coordlist)
-{
- int tmp;
- int res;
- ListCell *nodelist_item;
- PGXCNodePoolSlot *slot;
-
- Assert(agent);
-
- res = 0;
-
- if (datanodelist != NULL)
- {
- res = list_length(datanodelist);
- if (res > 0 && agent->dn_connections == NULL)
- return 0;
-
- foreach(nodelist_item, datanodelist)
- {
- int node = lfirst_int(nodelist_item);
-
- if(node < 0 || node >= agent->num_dn_connections)
- continue;
-
- slot = agent->dn_connections[node];
-
- if (slot == NULL)
- continue;
-
- if (agent->local_params != NULL)
- {
- tmp = PGXCNodeSendSetQuery(slot->conn, agent->local_params);
- res = res + tmp;
- }
- }
- }
-
- if (coordlist != NULL)
- {
- res = list_length(coordlist);
- if (res > 0 && agent->coord_connections == NULL)
- return 0;
-
- foreach(nodelist_item, coordlist)
- {
- int node = lfirst_int(nodelist_item);
-
- if(node < 0 || node >= agent->num_coord_connections)
- continue;
-
- slot = agent->coord_connections[node];
-
- if (slot == NULL)
- continue;
-
- if (agent->local_params != NULL)
- {
- tmp = PGXCNodeSendSetQuery(slot->conn, agent->local_params);
- res = res + tmp;
- }
- }
- }
-
- if (res < 0)
- return -res;
- return 0;
-}
-#endif
-
/*
* Cancel query
*/
@@ -1940,7 +1400,6 @@ cancel_query_on_connections(PoolAgent *agent, List *datanodelist, List *coordlis
/*
* Return connections back to the pool
*/
-#ifdef XCP
void
PoolManagerReleaseConnections(bool force)
{
@@ -1964,16 +1423,6 @@ PoolManagerReleaseConnections(bool force)
pool_putbytes(&poolHandle->port, (char *) &n32, 4);
pool_flush(&poolHandle->port);
}
-#else
-void
-PoolManagerReleaseConnections(void)
-{
- Assert(poolHandle);
- pool_putmessage(&poolHandle->port, 'r', NULL, 0);
- pool_flush(&poolHandle->port);
-}
-#endif
-
/*
* Cancel Query
@@ -2039,30 +1488,11 @@ agent_release_connections(PoolAgent *agent, bool force_destroy)
if (!agent->dn_connections && !agent->coord_connections)
return;
-#ifdef XCP
if (!force_destroy && cluster_ex_lock_held)
{
elog(LOG, "Not releasing connection with cluster lock");
return;
}
-#endif
-
-#ifndef XCP
- /*
- * If there are some session parameters or temporary objects,
- * do not put back connections to pool.
- * Disconnection will be made when session is cut for this user.
- * Local parameters are reset when transaction block is finished,
- * so don't do anything for them, but just reset their list.
- */
- if (agent->local_params)
- {
- pfree(agent->local_params);
- agent->local_params = NULL;
- }
- if ((agent->session_params || agent->is_temp) && !force_destroy)
- return;
-#endif
/*
* There are possible memory allocations in the core pooler, we want
@@ -2100,7 +1530,6 @@ agent_release_connections(PoolAgent *agent, bool force_destroy)
agent->coord_connections[i] = NULL;
}
-#ifdef XCP
/*
* Released connections are now in the pool and we may want to close
* them eventually. Update the oldest_idle value to reflect the latest
@@ -2108,68 +1537,10 @@ agent_release_connections(PoolAgent *agent, bool force_destroy)
*/
if (!force_destroy && agent->pool->oldest_idle == (time_t) 0)
agent->pool->oldest_idle = time(NULL);
-#endif
MemoryContextSwitchTo(oldcontext);
}
-
-#ifndef XCP
-/*
- * Reset session parameters for given connections in the agent.
- * This is done before putting back to pool connections that have been
- * modified by session parameters.
- */
-static void
-agent_reset_session(PoolAgent *agent)
-{
- int i;
-
- if (!agent->session_params && !agent->local_params)
- return;
-
- /* Reset connection params */
- /* Check agent slot for each Datanode */
- if (agent->dn_connections)
- {
- for (i = 0; i < agent->num_dn_connections; i++)
- {
- PGXCNodePoolSlot *slot = agent->dn_connections[i];
-
- /* Reset given slot with parameters */
- if (slot)
- PGXCNodeSendSetQuery(slot->conn, "SET SESSION AUTHORIZATION DEFAULT;RESET ALL;SET GLOBAL_SESSION TO NONE;");
- }
- }
-
- if (agent->coord_connections)
- {
- /* Check agent slot for each Coordinator */
- for (i = 0; i < agent->num_coord_connections; i++)
- {
- PGXCNodePoolSlot *slot = agent->coord_connections[i];
-
- /* Reset given slot with parameters */
- if (slot)
- PGXCNodeSendSetQuery(slot->conn, "SET SESSION AUTHORIZATION DEFAULT;RESET ALL;SET GLOBAL_SESSION TO NONE;");
- }
- }
-
- /* Parameters are reset, so free commands */
- if (agent->session_params)
- {
- pfree(agent->session_params);
- agent->session_params = NULL;
- }
- if (agent->local_params)
- {
- pfree(agent->local_params);
- agent->local_params = NULL;
- }
-}
-#endif
-
-
/*
* Create new empty pool for a database.
* By default Database Pools have a size null so as to avoid interactions
@@ -2209,10 +1580,8 @@ create_database_pool(const char *database, const char *user_name, const char *pg
databasePool->database = pstrdup(database);
/* Copy the user name */
databasePool->user_name = pstrdup(user_name);
-#ifdef XCP
/* Reset the oldest_idle value */
databasePool->oldest_idle = (time_t) 0;
-#endif
/* Copy the pgoptions */
databasePool->pgoptions = pstrdup(pgoptions);
@@ -2513,9 +1882,7 @@ release_connection(DatabasePool *dbPool, PGXCNodePoolSlot *slot,
{
/* Insert the slot into the array and increase pool size */
nodePool->slot[(nodePool->freeSize)++] = slot;
-#ifdef XCP
slot->released = time(NULL);
-#endif
}
else
{
@@ -2535,10 +1902,8 @@ release_connection(DatabasePool *dbPool, PGXCNodePoolSlot *slot,
static PGXCNodePool *
grow_pool(DatabasePool *dbPool, Oid node)
{
-#ifdef XCP
/* if error try to release idle connections and try again */
bool tryagain = true;
-#endif
PGXCNodePool *nodePool;
bool found;
@@ -2567,11 +1932,7 @@ grow_pool(DatabasePool *dbPool, Oid node)
nodePool->size = 0;
}
-#ifdef XCP
while (nodePool->freeSize == 0 && nodePool->size < MaxPoolSize)
-#else
- while (nodePool->size < MinPoolSize || (nodePool->freeSize == 0 && nodePool->size < MaxPoolSize))
-#endif
{
PGXCNodePoolSlot *slot;
@@ -2595,7 +1956,6 @@ grow_pool(DatabasePool *dbPool, Oid node)
ereport(LOG,
(errcode(ERRCODE_CONNECTION_FAILURE),
errmsg("failed to connect to Datanode")));
-#ifdef XCP
/*
* If we failed to connect probably number of connections on the
* target node reached max_connections. Try and release idle
@@ -2612,16 +1972,13 @@ grow_pool(DatabasePool *dbPool, Oid node)
tryagain = false;
continue;
}
-#endif
break;
}
slot->xc_cancelConn = (NODE_CANCEL *) PQgetCancel((PGconn *)slot->conn);
-#ifdef XCP
slot->released = time(NULL);
if (dbPool->oldest_idle == (time_t) 0)
dbPool->oldest_idle = slot->released;
-#endif
/* Insert at the end of the pool */
nodePool->slot[(nodePool->freeSize)++] = slot;
@@ -2688,9 +2045,7 @@ static void
PoolerLoop(void)
{
StringInfoData input_message;
-#ifdef XCP
time_t last_maintenance = (time_t) 0;
-#endif
#ifdef HAVE_UNIX_SOCKETS
if (Unix_socket_directories)
@@ -2773,7 +2128,6 @@ PoolerLoop(void)
nfds = Max(nfds, sockfd);
}
-#ifdef XCP
if (PoolMaintenanceTimeout > 0)
{
struct timeval maintenance_timeout;
@@ -2801,9 +2155,7 @@ PoolerLoop(void)
retval = select(nfds + 1, &rfds, NULL, NULL, &maintenance_timeout);
}
else
-#endif
retval = select(nfds + 1, &rfds, NULL, NULL, NULL);
-#ifdef XCP
/*
* Emergency bailout if postmaster has died. This is to avoid the
* necessity for manual cleanup of all postmaster children.
@@ -2819,7 +2171,6 @@ PoolerLoop(void)
got_SIGHUP = false;
ProcessConfigFile(PGC_SIGHUP);
}
-#endif
if (shutdown_requested)
{
for (i = agentCount - 1; i >= 0; i--)
@@ -2853,14 +2204,12 @@ PoolerLoop(void)
if (FD_ISSET(server_fd, &rfds))
agent_create();
}
-#ifdef XCP
else if (retval == 0)
{
/* maintenance timeout */
pools_maintenance();
last_maintenance = time(NULL);
}
-#endif
}
}
@@ -2992,24 +2341,11 @@ pooler_quickdie(SIGNAL_ARGS)
}
-#ifdef XCP
static void
pooler_sighup(SIGNAL_ARGS)
{
got_SIGHUP = true;
}
-#endif
-
-
-#ifndef XCP
-bool
-IsPoolHandle(void)
-{
- if (poolHandle == NULL)
- return false;
- return true;
-}
-#endif
/*
* Given node identifier, dbname and user name build connection string.
@@ -3028,7 +2364,6 @@ build_node_conn_str(Oid node, DatabasePool *dbPool)
return NULL;
}
-#ifdef XCP
connstr = PGXCNodeConnStr(NameStr(nodeDef->nodehost),
nodeDef->nodeport,
dbPool->database,
@@ -3036,21 +2371,11 @@ build_node_conn_str(Oid node, DatabasePool *dbPool)
dbPool->pgoptions,
IS_PGXC_COORDINATOR ? "coordinator" : "datanode",
PGXCNodeName);
-#else
- connstr = PGXCNodeConnStr(NameStr(nodeDef->nodehost),
- nodeDef->nodeport,
- dbPool->database,
- dbPool->user_name,
- dbPool->pgoptions,
- IS_PGXC_COORDINATOR ? "coordinator" : "datanode");
-#endif
pfree(nodeDef);
return connstr;
}
-
-#ifdef XCP
/*
* Check all pooled connections, and close which have been released more then
* PooledConnKeepAlive seconds ago.
@@ -3170,4 +2495,3 @@ pools_maintenance(void)
elog(DEBUG1, "Pool maintenance, done in %f seconds, removed %d pools",
difftime(time(NULL), now), count);
}
-#endif
diff --git a/src/backend/pgxc/pool/poolutils.c b/src/backend/pgxc/pool/poolutils.c
index 9b4cbd7601..fad39cda9e 100644
--- a/src/backend/pgxc/pool/poolutils.c
+++ b/src/backend/pgxc/pool/poolutils.c
@@ -35,9 +35,7 @@
#include "catalog/pgxc_node.h"
#include "commands/dbcommands.h"
#include "commands/prepare.h"
-#ifdef XCP
#include "storage/ipc.h"
-#endif
#include "storage/procarray.h"
#include "utils/acl.h"
#include "utils/builtins.h"
@@ -98,21 +96,11 @@ pgxc_pool_check(PG_FUNCTION_ARGS)
Datum
pgxc_pool_reload(PG_FUNCTION_ARGS)
{
-#ifndef XCP
- MemoryContext old_context;
-
- if (!superuser())
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to manage pooler"))));
-#endif
-
if (IsTransactionBlock())
ereport(ERROR,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
errmsg("pgxc_pool_reload cannot run inside a transaction block")));
-#ifdef XCP
/* Session is being reloaded, drop prepared and temporary objects */
DropAllPreparedStatements();
@@ -129,53 +117,6 @@ pgxc_pool_reload(PG_FUNCTION_ARGS)
/* Signal other sessions to reconnect to pooler if have privileges */
if (superuser())
ReloadConnInfoOnBackends();
-#else
- /* A Datanode has no pooler active, so do not bother about that */
- if (IS_PGXC_DATANODE)
- PG_RETURN_BOOL(true);
-
- /* Take a lock on pooler to forbid any action during reload */
- PoolManagerLock(true);
-
- /* No need to reload, node information is consistent */
- if (PoolManagerCheckConnectionInfo())
- {
- /* Release the lock on pooler */
- PoolManagerLock(false);
- PG_RETURN_BOOL(true);
- }
-
- /* Reload connection information in pooler */
- PoolManagerReloadConnectionInfo();
-
- /* Be sure it is done consistently */
- if (!PoolManagerCheckConnectionInfo())
- {
- /* Release the lock on pooler */
- PoolManagerLock(false);
- PG_RETURN_BOOL(false);
- }
-
- /* Now release the lock on pooler */
- PoolManagerLock(false);
-
- /* Signal other sessions to reconnect to pooler */
- ReloadConnInfoOnBackends();
-
- /* Session is being reloaded, drop prepared and temporary objects */
- DropAllPreparedStatements();
-
- /* Now session information is reset in correct memory context */
- old_context = MemoryContextSwitchTo(TopMemoryContext);
-
- /* Reinitialize session, while old pooler connection is active */
- InitMultinodeExecutor(true);
-
- /* And reconnect to pool manager */
- PoolManagerReconnect();
-
- MemoryContextSwitchTo(old_context);
-#endif
PG_RETURN_BOOL(true);
}
@@ -320,7 +261,6 @@ CleanConnection(CleanConnStmt *stmt)
foreach(nodelist_item, stmt->nodes)
{
char *node_name = strVal(lfirst(nodelist_item));
-#ifdef XCP
char node_type = PGXC_NODE_NONE;
stmt_nodes = lappend_int(stmt_nodes,
PGXCNodeGetNodeIdFromName(node_name,
@@ -330,18 +270,6 @@ CleanConnection(CleanConnStmt *stmt)
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("PGXC Node %s: object not defined",
node_name)));
-#else
- Oid nodeoid = get_pgxc_nodeoid(node_name);
-
- if (!OidIsValid(nodeoid))
- ereport(ERROR,
- (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("PGXC Node %s: object not defined",
- node_name)));
-
- stmt_nodes = lappend_int(stmt_nodes,
- PGXCNodeGetNodeId(nodeoid, get_pgxc_nodetype(nodeoid)));
-#endif
}
/* Build lists to be sent to Pooler Manager */
@@ -412,7 +340,6 @@ DropDBCleanConnection(char *dbname)
void
HandlePoolerReload(void)
{
-#ifdef XCP
if (proc_exit_inprogress)
return;
@@ -425,44 +352,4 @@ HandlePoolerReload(void)
/* Prevent using of cached connections to remote nodes */
RequestInvalidateRemoteHandles();
-#else
- MemoryContext old_context;
-
- /* A Datanode has no pooler active, so do not bother about that */
- if (IS_PGXC_DATANODE)
- return;
-
- /* Abort existing xact if any */
- AbortCurrentTransaction();
-
- /* Session is being reloaded, drop prepared and temporary objects */
- DropAllPreparedStatements();
-
- /* Now session information is reset in correct memory context */
- old_context = MemoryContextSwitchTo(TopMemoryContext);
-
- /* Need to be able to look into catalogs */
- CurrentResourceOwner = ResourceOwnerCreate(NULL, "ForPoolerReload");
-
- /* Reinitialize session, while old pooler connection is active */
- InitMultinodeExecutor(true);
-
- /* And reconnect to pool manager */
- PoolManagerReconnect();
-
- /* Send a message back to client regarding session being reloaded */
- ereport(WARNING,
- (errcode(ERRCODE_OPERATOR_INTERVENTION),
- errmsg("session has been reloaded due to a cluster configuration modification"),
- errdetail("Temporary and prepared objects hold by session have been"
- " dropped and current transaction has been aborted.")));
-
- /* Release everything */
- ResourceOwnerRelease(CurrentResourceOwner, RESOURCE_RELEASE_BEFORE_LOCKS, true, true);
- ResourceOwnerRelease(CurrentResourceOwner, RESOURCE_RELEASE_LOCKS, true, true);
- ResourceOwnerRelease(CurrentResourceOwner, RESOURCE_RELEASE_AFTER_LOCKS, true, true);
- CurrentResourceOwner = NULL;
-
- MemoryContextSwitchTo(old_context);
-#endif
}
diff --git a/src/backend/pgxc/pool/postgresql_fdw.c b/src/backend/pgxc/pool/postgresql_fdw.c
index e6e80805a9..fe9db3a598 100644
--- a/src/backend/pgxc/pool/postgresql_fdw.c
+++ b/src/backend/pgxc/pool/postgresql_fdw.c
@@ -91,42 +91,5 @@ is_immutable_func(Oid funcid)
bool
pgxc_is_expr_shippable(Expr *node, bool *has_aggs)
{
-#ifdef XCP
return false;
-#else
- Shippability_context sc_context;
-
- /* Create the FQS context */
- memset(&sc_context, 0, sizeof(sc_context));
- sc_context.sc_query = NULL;
- sc_context.sc_query_level = 0;
- sc_context.sc_for_expr = true;
-
- /* Walk the expression to check its shippability */
- pgxc_shippability_walker((Node *)node, &sc_context);
-
- /*
- * If caller is interested in knowing, whether the expression has aggregets
- * let the caller know about it. The caller is capable of handling such
- * expressions. Otherwise assume such an expression as unshippable.
- */
- if (has_aggs)
- *has_aggs = pgxc_test_shippability_reason(&sc_context, SS_HAS_AGG_EXPR);
- else if (pgxc_test_shippability_reason(&sc_context, SS_HAS_AGG_EXPR))
- return false;
-
- /*
- * If the expression unshippable or unsupported by expression shipping
- * algorithm, return false. We don't have information about the number of
- * nodes involved in expression evaluation, hence even if the expression can
- * be evaluated only on single node, return false.
- */
- if (pgxc_test_shippability_reason(&sc_context, SS_UNSUPPORTED_EXPR) ||
- pgxc_test_shippability_reason(&sc_context, SS_UNSHIPPABLE_EXPR) ||
- pgxc_test_shippability_reason(&sc_context, SS_NEED_SINGLENODE))
- return false;
-
- /* If nothing wrong found, the expression is shippable */
- return true;
-#endif
}
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 1d4fc3d1ff..58d9c9dfdc 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -382,21 +382,13 @@ static DNSServiceRef bonjour_sdref = NULL;
#ifdef PGXC
char *PGXCNodeName = NULL;
-#ifdef XCP
int PGXCNodeId = 0;
-#else
-int PGXCNodeId = -1;
-#endif
/*
* When a particular node starts up, store the node identifier in this variable
* so that we dont have to calculate it OR do a search in cache any where else
* This will have minimal impact on performance
*/
uint32 PGXCNodeIdentifier = 0;
-
-#ifndef XCP
-static bool isNodeRegistered = false;
-#endif
#endif
/*
@@ -897,11 +889,7 @@ PostmasterMain(int argc, char *argv[])
#ifdef PGXC
if (!IS_PGXC_COORDINATOR && !IS_PGXC_DATANODE)
{
-#ifdef XCP
write_stderr("%s: Postgres-XL: must start as either a Coordinator (--coordinator) or Data Node (--datanode)\n",
-#else
- write_stderr("%s: Postgres-XC: must start as either a Coordinator (--coordinator) or Data Node (--datanode)\n",
-#endif
progname);
ExitPostmaster(1);
}
@@ -1362,7 +1350,6 @@ PostmasterMain(int argc, char *argv[])
pmState = PM_STARTUP;
#ifdef PGXC /* PGXC_COORD */
-#ifdef XCP
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
/*
@@ -1371,19 +1358,6 @@ PostmasterMain(int argc, char *argv[])
PgPoolerPID = StartPoolManager();
MemoryContextSwitchTo(oldcontext);
-#else
- if (IS_PGXC_COORDINATOR)
- {
- oldcontext = MemoryContextSwitchTo(TopMemoryContext);
-
- /*
- * Initialize the Data Node connection pool
- */
- PgPoolerPID = StartPoolManager();
-
- MemoryContextSwitchTo(oldcontext);
- }
-#endif /* XCP */
#endif /* PGXC */
/* Some workers may be scheduled to start now */
maybe_start_bgworker();
@@ -1803,11 +1777,7 @@ ServerLoop(void)
#ifdef PGXC
/* If we have lost the pooler, try to start a new one */
-#ifdef XCP
if (PgPoolerPID == 0 && pmState == PM_RUN)
-#else
- if (IS_PGXC_COORDINATOR && PgPoolerPID == 0 && pmState == PM_RUN)
-#endif /* XCP */
PgPoolerPID = StartPoolManager();
#endif /* PGXC */
@@ -2509,11 +2479,7 @@ SIGHUP_handler(SIGNAL_ARGS)
if (StartupPID != 0)
signal_child(StartupPID, SIGHUP);
#ifdef PGXC /* PGXC_COORD */
-#ifdef XCP
if (PgPoolerPID != 0)
-#else
- if (IS_PGXC_COORDINATOR && PgPoolerPID != 0)
-#endif /* XCP */
signal_child(PgPoolerPID, SIGHUP);
#endif /* PGXC */
if (BgWriterPID != 0)
@@ -2603,23 +2569,9 @@ pmdie(SIGNAL_ARGS)
#ifdef PGXC /* PGXC_COORD */
/* and the pool manager too */
-#ifdef XCP
if (PgPoolerPID != 0)
-#else
- if (IS_PGXC_COORDINATOR && PgPoolerPID != 0)
-#endif
signal_child(PgPoolerPID, SIGTERM);
-#ifndef XCP
- /* Unregister Node on GTM */
- if (isNodeRegistered)
- {
- if (IS_PGXC_COORDINATOR)
- UnregisterGTM(GTM_NODE_COORDINATOR);
- else if (IS_PGXC_DATANODE)
- UnregisterGTM(GTM_NODE_DATANODE);
- }
-#endif
#endif
/*
@@ -2697,22 +2649,6 @@ pmdie(SIGNAL_ARGS)
/* and the walwriter too */
if (WalWriterPID != 0)
signal_child(WalWriterPID, SIGTERM);
-#ifdef PGXC
-#ifndef XCP
- /* and the pool manager too */
- if (IS_PGXC_COORDINATOR && PgPoolerPID != 0)
- signal_child(PgPoolerPID, SIGTERM);
-
- /* Unregister Node on GTM */
- if (isNodeRegistered)
- {
- if (IS_PGXC_COORDINATOR)
- UnregisterGTM(GTM_NODE_COORDINATOR);
- else if (IS_PGXC_DATANODE)
- UnregisterGTM(GTM_NODE_DATANODE);
- }
-#endif /* XCP */
-#endif /* PGXC */
pmState = PM_WAIT_BACKENDS;
}
@@ -2867,11 +2803,7 @@ reaper(SIGNAL_ARGS)
if (PgStatPID == 0)
PgStatPID = pgstat_start();
#ifdef PGXC
-#ifdef XCP
if (PgPoolerPID == 0)
-#else
- if (IS_PGXC_COORDINATOR && PgPoolerPID == 0)
-#endif /* XCP */
PgPoolerPID = StartPoolManager();
#endif /* PGXC */
@@ -3049,11 +2981,7 @@ reaper(SIGNAL_ARGS)
* Was it the pool manager? TODO decide how to handle
* Probably we should restart the system
*/
-#ifdef XCP
if (pid == PgPoolerPID)
-#else
- if (IS_PGXC_COORDINATOR && pid == PgPoolerPID)
-#endif /* XCP */
{
PgPoolerPID = 0;
if (!EXIT_STATUS_0(exitstatus))
@@ -3486,7 +3414,6 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
#ifdef PGXC
/* Take care of the pool manager too */
-#ifdef XCP
if (pid == PgPoolerPID)
PgPoolerPID = 0;
else if (PgPoolerPID != 0 && !FatalError)
@@ -3497,21 +3424,6 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
(int) PgPoolerPID)));
signal_child(PgPoolerPID, (SendStop ? SIGSTOP : SIGQUIT));
}
-#else
- if (IS_PGXC_COORDINATOR)
- {
- if (pid == PgPoolerPID)
- PgPoolerPID = 0;
- else if (PgPoolerPID != 0 && !FatalError)
- {
- ereport(DEBUG2,
- (errmsg_internal("sending %s to process %d",
- (SendStop ? "SIGSTOP" : "SIGQUIT"),
- (int) PgPoolerPID)));
- signal_child(PgPoolerPID, (SendStop ? SIGSTOP : SIGQUIT));
- }
- }
-#endif /* XCP */
#endif /* PGXC */
/*
@@ -4008,13 +3920,8 @@ TerminateChildren(int signal)
if (StartupPID != 0)
signal_child(StartupPID, signal);
#ifdef PGXC /* PGXC_COORD */
-#ifdef XCP
- if (PgPoolerPID != 0)
-#else
- if (IS_PGXC_COORDINATOR && PgPoolerPID != 0)
-#endif /* XCP */
- signal_child(PgPoolerPID, SIGQUIT);
-
+ if (PgPoolerPID != 0)
+ signal_child(PgPoolerPID, SIGQUIT);
#endif
if (BgWriterPID != 0)
signal_child(BgWriterPID, signal);
@@ -5121,55 +5028,6 @@ sigusr1_handler(SIGNAL_ARGS)
StartWorkerNeeded = true;
}
-#ifdef PGXC
-#ifndef XCP
- /*
- * Register node to GTM.
- * A node can only be registered if it has reached a stable recovery state
- * and if is a master node.
- * A standby node is created from a hot backup of master so master and slave
- * nodes will normally share the same node name. Having master and slave share
- * the same node name is convenient for slave promotion, and this makes master
- * and slave nodes being seen as equal by GTM in cluster. As two nodes cannot
- * register on GTM with the same name, it looks normal to let only master
- * register and have slave nodes bypass this process.
- */
- if (pmState == PM_RUN &&
- !isNodeRegistered)
- {
- isNodeRegistered = true;
-
- /* Register node on GTM during Postmaster Startup. */
- if (IS_PGXC_COORDINATOR)
- {
- if (RegisterGTM(GTM_NODE_COORDINATOR, PostPortNumber, data_directory) < 0)
- {
- UnregisterGTM(GTM_NODE_COORDINATOR);
- if (RegisterGTM(GTM_NODE_COORDINATOR, PostPortNumber, data_directory) < 0)
- {
- ereport(FATAL,
- (errcode(ERRCODE_IO_ERROR),
- errmsg("Can not register Coordinator on GTM")));
- }
- }
- }
- if (IS_PGXC_DATANODE)
- {
- if (RegisterGTM(GTM_NODE_DATANODE, PostPortNumber, data_directory) < 0)
- {
- UnregisterGTM(GTM_NODE_DATANODE);
- if (RegisterGTM(GTM_NODE_DATANODE, PostPortNumber, data_directory) < 0)
- {
- ereport(FATAL,
- (errcode(ERRCODE_IO_ERROR),
- errmsg("Can not register Datanode on GTM")));
- }
- }
- }
- }
-#endif
-#endif
-
if (StartWorkerNeeded || HaveCrashedWorker)
maybe_start_bgworker();
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 604e96a6ac..e26ff6c102 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1441,7 +1441,6 @@ GetSnapshotData(Snapshot snapshot)
*/
if (GetPGXCSnapshotData(snapshot))
return snapshot;
-#ifdef XCP
/*
* We only make one exception for using local snapshot and that's the
* initdb time. When IsPostmasterEnvironment is true, snapshots must either
@@ -1453,8 +1452,6 @@ GetSnapshotData(Snapshot snapshot)
*/
if (IsPostmasterEnvironment && !useLocalXid)
elog(ERROR, "Was unable to obtain a snapshot from GTM.");
-#else
-#endif
#endif
/*
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index d09945264c..1a40987132 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -832,23 +832,6 @@ pg_analyze_and_rewrite(Node *parsetree, const char *query_string,
*/
querytree_list = pg_rewrite_query(query);
-#ifdef PGXC
-#ifndef XCP
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- ListCell *lc;
-
- foreach(lc, querytree_list)
- {
- Query *query = (Query *) lfirst(lc);
-
- if (query->sql_statement == NULL)
- query->sql_statement = pstrdup(query_string);
- }
- }
-#endif
-#endif
-
TRACE_POSTGRESQL_QUERY_REWRITE_DONE(query_string);
return querytree_list;
@@ -1627,22 +1610,6 @@ exec_parse_message(const char *query_string, /* string to execute */
ShowUsage("PARSE ANALYSIS STATISTICS");
querytree_list = pg_rewrite_query(query);
-#ifdef PGXC
-#ifndef XCP
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- ListCell *lc;
-
- foreach(lc, querytree_list)
- {
- Query *query = (Query *) lfirst(lc);
-
- if (query->sql_statement == NULL)
- query->sql_statement = pstrdup(query_string);
- }
- }
-#endif
-#endif
/* Done with the snapshot used for parsing */
if (snapshot_set)
@@ -3973,11 +3940,7 @@ process_postgres_switches(int argc, char *argv[], GucContext ctx,
{
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
-#ifdef XCP
errmsg("Postgres-XL: must start as either a Coordinator (--coordinator) or Datanode (-datanode)\n")));
-#else
- errmsg("Postgres-XC: must start as either a Coordinator (--coordinator) or Datanode (-datanode)\n")));
-#endif
}
if (!IsPostmasterEnvironment)
@@ -4052,12 +4015,10 @@ PostgresMain(int argc, char *argv[],
TransactionId *xip;
/* Timestamp info */
TimestampTz timestamp;
-#ifndef XCP
- PoolHandle *pool_handle;
-#endif
remoteConnType = REMOTE_CONN_APP;
#endif
+
#ifdef XCP
parentPGXCNode = NULL;
parentPGXCNodeId = -1;
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index fac44220d0..a0d7cc2b4d 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1406,35 +1406,6 @@ PortalRunSelect(Portal portal,
{
PushActiveSnapshot(queryDesc->snapshot);
-#ifdef PGXC
-#ifndef XCP
- if (portal->name != NULL &&
- portal->name[0] != '\0' &&
- IsA(queryDesc->planstate, RemoteQueryState))
- {
- /*
- * The snapshot in the query descriptor contains the
- * command id of the command creating the cursor. We copy
- * that snapshot in RemoteQueryState so that the do_query
- * function knows while sending the select (resulting from
- * a fetch) to the corresponding remote node with the command
- * id of the command that created the cursor.
- */
- HeapScanDesc scan;
- RemoteQueryState *rqs = (RemoteQueryState *)queryDesc->planstate;
-
- /* Allocate and initialize scan descriptor */
- scan = (HeapScanDesc) palloc0(sizeof(HeapScanDescData));
- /* Copy snap shot into the scan descriptor */
- scan->rs_snapshot = queryDesc->snapshot;
- /* Copy scan descriptor in remote query state */
- rqs->ss.ss_currentScanDesc = scan;
-
- rqs->cursor = pstrdup(portal->name);
- }
-#endif
-#endif
-
ExecutorRun(queryDesc, direction, count);
nprocessed = queryDesc->estate->es_processed;
PopActiveSnapshot();
@@ -1669,14 +1640,10 @@ PortalRunUtility(Portal portal, Node *utilityStmt, bool isTopLevel,
IsA(utilityStmt, NotifyStmt) ||
IsA(utilityStmt, UnlistenStmt) ||
#ifdef PGXC
-#ifdef XCP
IsA(utilityStmt, PauseClusterStmt) ||
IsA(utilityStmt, BarrierStmt) ||
(IsA(utilityStmt, CheckPointStmt) && IS_PGXC_DATANODE)))
#else
- (IsA(utilityStmt, CheckPointStmt) && IS_PGXC_DATANODE)))
-#endif
-#else
IsA(utilityStmt, CheckPointStmt)))
#endif
{
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index ba78dc3914..90a68c7e64 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -813,11 +813,7 @@ standard_ProcessUtility(Node *parsetree,
/* Clean also remote Coordinators */
sprintf(query, "CLEAN CONNECTION TO ALL FOR DATABASE %s;", stmt->dbname);
-#ifdef XCP
ExecUtilityStmtOnNodes(query, NULL, sentToRemote, true, EXEC_ON_ALL_NODES, false);
-#else
- ExecUtilityStmtOnNodes(query, NULL, sentToRemote, true, EXEC_ON_COORDS, false);
-#endif
}
#endif
@@ -924,32 +920,6 @@ standard_ProcessUtility(Node *parsetree,
case T_VariableSetStmt:
ExecSetVariableStmt((VariableSetStmt *) parsetree, isTopLevel);
-#ifdef PGXC
-#ifndef XCP
- /* Let the pooler manage the statement */
- if (IS_PGXC_LOCAL_COORDINATOR)
- {
- VariableSetStmt *stmt = (VariableSetStmt *) parsetree;
- /*
- * If command is local and we are not in a transaction block do NOT
- * send this query to backend nodes, it is just bypassed by the backend.
- */
- if (stmt->is_local)
- {
- if (IsTransactionBlock())
- {
- if (PoolManagerSetCommand(POOL_CMD_LOCAL_SET, queryString) < 0)
- elog(ERROR, "Postgres-XC: ERROR SET query");
- }
- }
- else
- {
- if (PoolManagerSetCommand(POOL_CMD_GLOBAL_SET, queryString) < 0)
- elog(ERROR, "Postgres-XC: ERROR SET query");
- }
- }
-#endif
-#endif
break;
case T_VariableShowStmt:
@@ -1057,7 +1027,6 @@ standard_ProcessUtility(Node *parsetree,
WarnNoTransactionChain(isTopLevel, "SET CONSTRAINTS");
AfterTriggerSetState((ConstraintsSetStmt *) parsetree);
#ifdef PGXC
-#ifdef XCP
/*
* Just send statement to all the datanodes. It is effectively noop
* if no transaction, because transaction will be committed and
@@ -1068,18 +1037,6 @@ standard_ProcessUtility(Node *parsetree,
*/
ExecUtilityStmtOnNodes(queryString, NULL, sentToRemote, false,
EXEC_ON_DATANODES, false);
-#else
- /*
- * Let the pooler manage the statement, SET CONSTRAINT can just be used
- * inside a transaction block, hence it has no effect outside that, so use
- * it as a local one.
- */
- if (IS_PGXC_LOCAL_COORDINATOR && IsTransactionBlock())
- {
- if (PoolManagerSetCommand(POOL_CMD_LOCAL_SET, queryString) < 0)
- elog(ERROR, "Postgres-XC: ERROR SET query");
- }
-#endif
#endif
break;
@@ -1448,7 +1405,6 @@ standard_ProcessUtility(Node *parsetree,
case T_CleanConnStmt:
-#ifdef XCP
/*
* First send command to other nodes via probably existing
* connections, then clean local pooler
@@ -1456,13 +1412,6 @@ standard_ProcessUtility(Node *parsetree,
if (IS_PGXC_COORDINATOR)
ExecUtilityStmtOnNodes(queryString, NULL, sentToRemote, true, EXEC_ON_ALL_NODES, false);
CleanConnection((CleanConnStmt *) parsetree);
-#else
- Assert(IS_PGXC_COORDINATOR);
- CleanConnection((CleanConnStmt *) parsetree);
-
- if (IS_PGXC_COORDINATOR)
- ExecUtilityStmtOnNodes(queryString, NULL, sentToRemote, true, EXEC_ON_COORDS, false);
-#endif
break;
#endif
case T_CommentStmt:
@@ -1661,13 +1610,9 @@ ProcessUtilitySlow(Node *parsetree,
* Coordinator, if not already done so
*/
if (!sentToRemote)
-#ifdef XCP
stmts = AddRemoteQueryNode(stmts, queryString, is_local
? EXEC_ON_NONE
: (is_temp ? EXEC_ON_DATANODES : EXEC_ON_ALL_NODES));
-#else
- stmts = AddRemoteQueryNode(stmts, queryString, EXEC_ON_ALL_NODES, is_temp);
-#endif
#endif
/* ... and do it */
@@ -1679,13 +1624,6 @@ ProcessUtilitySlow(Node *parsetree,
{
Datum toast_options;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
-#ifdef PGXC
-#ifndef XCP
- /* Set temporary object object flag in pooler */
- if (is_temp)
- PoolManagerSetCommand(POOL_CMD_TEMP, NULL);
-#endif
-#endif
/* Create the table itself */
address = DefineRelation((CreateStmt *) stmt,
@@ -1801,11 +1739,7 @@ ProcessUtilitySlow(Node *parsetree,
relid,
&is_temp);
-#ifdef XCP
stmts = AddRemoteQueryNode(stmts, queryString, exec_type);
-#else
- stmts = AddRemoteQueryNode(stmts, queryString, exec_type, is_temp);
-#endif
}
}
#endif
@@ -2100,11 +2034,7 @@ ProcessUtilitySlow(Node *parsetree,
#ifdef PGXC
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support FOREIGN DATA WRAPPER yet"),
-#else
- errmsg("Postgres-XC does not support FOREIGN DATA WRAPPER yet"),
-#endif
errdetail("The feature is not currently supported")));
#endif
address = CreateForeignDataWrapper((CreateFdwStmt *) parsetree);
@@ -2118,11 +2048,7 @@ ProcessUtilitySlow(Node *parsetree,
#ifdef PGXC
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support SERVER yet"),
-#else
- errmsg("Postgres-XC does not support SERVER yet"),
-#endif
errdetail("The feature is not currently supported")));
#endif
address = CreateForeignServer((CreateForeignServerStmt *) parsetree);
@@ -2136,11 +2062,7 @@ ProcessUtilitySlow(Node *parsetree,
#ifdef PGXC
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support USER MAPPING yet"),
-#else
- errmsg("Postgres-XC does not support USER MAPPING yet"),
-#endif
errdetail("The feature is not currently supported")));
#endif
address = CreateUserMapping((CreateUserMappingStmt *) parsetree);
@@ -2210,13 +2132,9 @@ ProcessUtilitySlow(Node *parsetree,
#ifdef PGXC
if (IS_PGXC_LOCAL_COORDINATOR)
{
-#ifdef XCP
ViewStmt *stmt = (ViewStmt *) parsetree;
if (stmt->view->relpersistence != RELPERSISTENCE_TEMP)
-#else
- if (!ExecIsTempObjectIncluded())
-#endif
ExecUtilityStmtOnNodes(queryString, NULL, sentToRemote, false, EXEC_ON_COORDS, false);
}
#endif
@@ -2268,13 +2186,6 @@ ProcessUtilitySlow(Node *parsetree,
if (!stmt->is_serial)
{
bool is_temp = stmt->sequence->relpersistence == RELPERSISTENCE_TEMP;
-
-#ifndef XCP
- /* Set temporary object flag in pooler */
- if (is_temp)
- PoolManagerSetCommand(POOL_CMD_TEMP, NULL);
-#endif
-
ExecUtilityStmtOnNodes(queryString, NULL, sentToRemote, false, EXEC_ON_ALL_NODES, is_temp);
}
}
@@ -2365,11 +2276,7 @@ ProcessUtilitySlow(Node *parsetree,
/* Postgres-XC does not support yet triggers */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support TRIGGER yet"),
-#else
- errmsg("Postgres-XC does not support TRIGGER yet"),
-#endif
errdetail("The feature is not currently supported")));
if (IS_PGXC_LOCAL_COORDINATOR)
@@ -4331,9 +4238,6 @@ ExecUtilityStmtOnNodes(const char *queryString, ExecNodes *nodes, bool sentToRem
step->sql_statement = pstrdup(queryString);
step->force_autocommit = force_autocommit;
step->exec_type = exec_type;
-#ifndef XCP
- step->is_temp = is_temp;
-#endif
ExecRemoteUtility(step);
pfree(step->sql_statement);
pfree(step);
@@ -4436,29 +4340,18 @@ ExecUtilityFindNodesRelkind(Oid relid, bool *is_temp)
switch (relkind_str)
{
case RELKIND_SEQUENCE:
-#ifndef XCP
- *is_temp = IsTempTable(relid);
- exec_type = EXEC_ON_ALL_NODES;
- break;
-#endif
case RELKIND_RELATION:
-#ifdef XCP
- if ((*is_temp = IsTempTable(relid)))
- {
- if (IsLocalTempTable(relid))
- exec_type = EXEC_ON_NONE;
- else
- exec_type = EXEC_ON_DATANODES;
- }
+ if ((*is_temp = IsTempTable(relid)))
+ {
+ if (IsLocalTempTable(relid))
+ exec_type = EXEC_ON_NONE;
else
- exec_type = EXEC_ON_ALL_NODES;
-#else
- *is_temp = IsTempTable(relid);
+ exec_type = EXEC_ON_DATANODES;
+ }
+ else
exec_type = EXEC_ON_ALL_NODES;
-#endif
break;
-#ifdef XCP
case RELKIND_INDEX:
{
HeapTuple tuple;
@@ -4481,7 +4374,6 @@ ExecUtilityFindNodesRelkind(Oid relid, bool *is_temp)
}
}
break;
-#endif
case RELKIND_VIEW:
if ((*is_temp = IsTempTable(relid)))
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index b49ff0ab18..77f7fbe4c1 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -269,20 +269,6 @@ array_in(PG_FUNCTION_ARGS)
typdelim = my_extra->typdelim;
typioparam = my_extra->typioparam;
-#ifndef XCP
- /* Make a modifiable copy of the input */
- string_save = pstrdup(string);
-
- /*
- * If the input string starts with dimension info, read and use that.
- * Otherwise, we require the input to be in curly-brace style, and we
- * prescan the input to determine dimensions.
- *
- * Dimension info takes the form of one or more [n] or [m:n] items. The
- * outer loop iterates once per dimension item.
- */
- p = string_save;
-#endif
ndim = 0;
for (;;)
{
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 1bfc3fe58e..c36401b128 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -1021,18 +1021,9 @@ pgxc_database_size(Oid dbOid)
Datum
pgxc_execute_on_nodes(int numnodes, Oid *nodelist, char *query)
{
-#ifndef XCP
- StringInfoData buf;
- int ret;
- TupleDesc spi_tupdesc;
-#endif
int i;
int64 total_size = 0;
int64 size = 0;
-#ifndef XCP
- bool isnull;
- char *nodename;
-#endif
Datum datum = (Datum) 0;
#ifdef XCP
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 07be6f24a3..4e697fb9ff 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -2282,9 +2282,6 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
const char *sep;
int i;
int save_nestlevel;
-#ifndef XCP
- char workmembuf[32];
-#endif
int spi_result;
SPIPlanPtr qplan;
@@ -2440,18 +2437,6 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
*/
save_nestlevel = NewGUCNestLevel();
-#ifndef XCP
- /*
- * In multitenant extension we restrict permission on work_mem.
- * This code may be executed by ordinary user, so skip this optimization.
- * XXX look for workaround
- */
- snprintf(workmembuf, sizeof(workmembuf), "%d", maintenance_work_mem);
- (void) set_config_option("work_mem", workmembuf,
- PGC_USERSET, PGC_S_SESSION,
- GUC_ACTION_SAVE, true, 0, false);
-#endif
-
if (SPI_connect() != SPI_OK_CONNECT)
elog(ERROR, "SPI_connect failed");
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 982af02591..c8414c8dcc 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -125,11 +125,6 @@ typedef struct
int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* TRUE to print prefixes on Vars */
-#ifdef PGXC
-#ifndef XCP
- bool finalise_aggs; /* should Datanode finalise the aggregates? */
-#endif /* XCP */
-#endif /* PGXC */
ParseExprKind special_exprkind; /* set only for exprkinds needing
* special handling */
} deparse_context;
@@ -912,11 +907,6 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty)
context.windowClause = NIL;
context.windowTList = NIL;
context.varprefix = true;
-#ifdef PGXC
-#ifndef XCP
- context.finalise_aggs = false;
-#endif /* XCP */
-#endif /* PGXC */
context.prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : PRETTYFLAG_INDENT;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
@@ -2545,11 +2535,6 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
context.windowTList = NIL;
context.varprefix = forceprefix;
context.prettyFlags = prettyFlags;
-#ifdef PGXC
-#ifndef XCP
- context.finalise_aggs = false;
-#endif /* XCP */
-#endif /* PGXC */
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = startIndent;
context.special_exprkind = EXPR_KIND_NONE;
@@ -4297,11 +4282,6 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
context.prettyFlags = prettyFlags;
context.wrapColumn = WRAP_COLUMN_DEFAULT;
context.indentLevel = PRETTYINDENT_STD;
-#ifdef PGXC
-#ifndef XCP
- context.finalise_aggs = false;
-#endif /* XCP */
-#endif /* PGXC */
context.special_exprkind = EXPR_KIND_NONE;
set_deparse_for_query(&dpns, query, NIL);
@@ -4504,9 +4484,6 @@ get_query_def_from_valuesList(Query *query, StringInfo buf)
context.prettyFlags = 0;
context.indentLevel = 0;
context.wrapColumn = 0;
-#ifndef XCP
- context.finalise_aggs = query->qry_finalise_aggs;
-#endif
dpns.rtable = query->rtable;
dpns.ctes = query->cteList;
@@ -4671,11 +4648,6 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
context.prettyFlags = prettyFlags;
context.wrapColumn = wrapColumn;
context.indentLevel = startIndent;
-#ifdef PGXC
-#ifndef XCP
- context.finalise_aggs = query->qry_finalise_aggs;
-#endif /* XCP */
-#endif /* PGXC */
context.special_exprkind = EXPR_KIND_NONE;
set_deparse_for_query(&dpns, query, parentnamespace);
@@ -5734,19 +5706,6 @@ get_insert_query_def(Query *query, deparse_context *context)
/* Insert the WITH clause if given */
get_with_clause(query, context);
-#ifdef PGXC
-#ifndef XCP
- /*
- * In the case of "INSERT ... DEFAULT VALUES" analyzed in pgxc planner,
- * return the sql statement directly if the table has no default values.
- */
- if (IS_PGXC_LOCAL_COORDINATOR && !query->targetList)
- {
- appendStringInfo(buf, "%s", query->sql_statement);
- return;
- }
-#endif
-#endif
/*
* If it's an INSERT ... SELECT or multi-row VALUES, there will be a
@@ -5773,20 +5732,6 @@ get_insert_query_def(Query *query, deparse_context *context)
if (select_rte && values_rte)
elog(ERROR, "both subquery and values RTEs in INSERT");
-#ifdef PGXC
-#ifndef XCP
- /*
- * If it's an INSERT ... SELECT or VALUES (...), (...), ...
- * sql_statement is rewritten and assigned in RewriteQuery.
- * Just return it here.
- */
- if (IS_PGXC_LOCAL_COORDINATOR && values_rte != NULL)
- {
- appendStringInfo(buf, "%s", query->sql_statement);
- return;
- }
-#endif
-#endif
/*
* Start the query with INSERT INTO relname
*/
@@ -8773,37 +8718,6 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
/* Extract the argument types as seen by the parser */
nargs = get_aggregate_argtypes(aggref, argtypes);
-#ifdef PGXC
-#ifndef XCP
- /*
- * Datanode should send finalised aggregate results. Datanodes evaluate only
- * transition results. In order to get the finalised aggregate, we enclose
- * the aggregate call inside final function call, so as to get finalised
- * results at the Coordinator
- */
- if (context->finalise_aggs)
- {
- HeapTuple aggTuple;
- Form_pg_aggregate aggform;
- aggTuple = SearchSysCache(AGGFNOID,
- ObjectIdGetDatum(aggref->aggfnoid),
- 0, 0, 0);
- if (!HeapTupleIsValid(aggTuple))
- elog(ERROR, "cache lookup failed for aggregate %u",
- aggref->aggfnoid);
- aggform = (Form_pg_aggregate) GETSTRUCT(aggTuple);
-
- if (OidIsValid(aggform->aggfinalfn))
- {
- appendStringInfo(buf, "%s(", generate_function_name(aggform->aggfinalfn, 0,
- NULL, NULL, NULL));
- added_finalfn = true;
- }
- ReleaseSysCache(aggTuple);
- }
-#endif /* XCP */
-#endif /* PGXC */
-
/* Print the aggregate name, schema-qualified if needed */
appendStringInfo(buf, "%s(%s",
generate_function_name(aggref->aggfnoid, nargs,
diff --git a/src/backend/utils/adt/version.c b/src/backend/utils/adt/version.c
index 3ba52d9c7b..e7072adb8b 100644
--- a/src/backend/utils/adt/version.c
+++ b/src/backend/utils/adt/version.c
@@ -28,12 +28,3 @@ pgsql_version(PG_FUNCTION_ARGS)
PG_RETURN_TEXT_P(cstring_to_text(PG_VERSION_STR));
}
-#ifdef PGXC
-#ifndef XCP
-Datum
-pgxc_version(PG_FUNCTION_ARGS)
-{
- PG_RETURN_TEXT_P(cstring_to_text(PGXC_VERSION_STR));
-}
-#endif
-#endif
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 837e19eb3a..211ece0e99 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -81,11 +81,6 @@
#include "pgxc/squeue.h"
#endif
#include "pgxc/pgxc.h"
-
-
-#ifndef XCP
-static void drop_datanode_statements(Plan *plannode);
-#endif
#endif
@@ -439,11 +434,6 @@ CompleteCachedPlan(CachedPlanSource *plansource,
plansource->parserSetupArg = parserSetupArg;
plansource->cursor_options = cursor_options;
plansource->fixed_result = fixed_result;
-#ifdef PGXC
-#ifndef XCP
- plansource->stmt_name = NULL;
-#endif
-#endif
plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
MemoryContextSwitchTo(oldcxt);
@@ -563,28 +553,6 @@ ReleaseGenericPlan(CachedPlanSource *plansource)
{
CachedPlan *plan = plansource->gplan;
-#ifdef PGXC
-#ifndef XCP
- /* Drop this plan on remote nodes */
- if (plan)
- {
- ListCell *lc;
-
- /* Close any active planned Datanode statements */
- foreach (lc, plan->stmt_list)
- {
- Node *node = lfirst(lc);
-
- if (IsA(node, PlannedStmt))
- {
- PlannedStmt *ps = (PlannedStmt *)node;
- drop_datanode_statements(ps->planTree);
- }
- }
- }
-#endif
-#endif
-
#ifdef XCP
/* Release SharedQueue if still held */
if (IsConnFromDatanode() && plan && list_length(plan->stmt_list) == 1)
@@ -1354,31 +1322,6 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
}
/*
- * Find and release all Datanode statements referenced by the plan node and subnodes
- */
-#ifdef PGXC
-#ifndef XCP
-static void
-drop_datanode_statements(Plan *plannode)
-{
- if (IsA(plannode, RemoteQuery))
- {
- RemoteQuery *step = (RemoteQuery *) plannode;
-
- if (step->statement)
- DropDatanodeStatement(step->statement);
- }
-
- if (innerPlan(plannode))
- drop_datanode_statements(innerPlan(plannode));
-
- if (outerPlan(plannode))
- drop_datanode_statements(outerPlan(plannode));
-}
-#endif
-#endif
-
-/*
* ReleaseCachedPlan: release active use of a cached plan.
*
* This decrements the reference count, and frees the plan if the count
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 897c8251c7..85bde4d0b9 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1060,14 +1060,8 @@ RelationBuildDesc(Oid targetRelId, bool insertIt)
relation->trigdesc = NULL;
#ifdef PGXC
-#ifdef XCP
if (IS_PGXC_COORDINATOR &&
relation->rd_id >= FirstNormalObjectId)
-#else
- if (IS_PGXC_COORDINATOR &&
- relation->rd_id >= FirstNormalObjectId &&
- !IsAutoVacuumWorkerProcess())
-#endif
RelationBuildLocator(relation);
#endif
if (relation->rd_rel->relrowsecurity)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 93b64563a7..aed3037462 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -937,35 +937,6 @@ static struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
#ifdef PGXC
-#ifndef XCP
- {
- {"enable_remotejoin", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables the planner's use of remote join plans."),
- NULL
- },
- &enable_remotejoin,
- true,
- NULL, NULL, NULL
- },
- {
- {"enable_fast_query_shipping", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables the planner's use of fast query shipping to ship query directly to datanode."),
- NULL
- },
- &enable_fast_query_shipping,
- true,
- NULL, NULL, NULL
- },
- {
- {"enable_remotegroup", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables the planner's use of remote group plans."),
- NULL
- },
- &enable_remotegroup,
- true,
- NULL, NULL, NULL
- },
-#else
{
{"loose_constraints", PGC_USERSET, COORDINATORS,
gettext_noop("Relax enforcing of constraints"),
@@ -988,7 +959,6 @@ static struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
#endif
-#endif
{
{"geqo", PGC_USERSET, QUERY_TUNING_GEQO,
gettext_noop("Enables genetic query optimization."),
@@ -2844,7 +2814,6 @@ static struct config_int ConfigureNamesInt[] =
NULL, NULL, NULL
},
#ifdef PGXC
-#ifdef XCP
{
{"sequence_range", PGC_USERSET, COORDINATORS,
gettext_noop("The range of values to ask from GTM for sequences. "
@@ -2888,29 +2857,6 @@ static struct config_int ConfigureNamesInt[] =
100, 1, 65535,
NULL, NULL, NULL
},
-#else
- {
- {"min_pool_size", PGC_POSTMASTER, DATA_NODES,
- gettext_noop("Initial pool size."),
- gettext_noop("If number of active connections decreased below this value, "
- "new connections are established")
- },
- &MinPoolSize,
- 1, 1, 65535,
- NULL, NULL, NULL
- },
-
- {
- {"max_pool_size", PGC_POSTMASTER, DATA_NODES,
- gettext_noop("Max pool size."),
- gettext_noop("If number of active connections reaches this value, "
- "other connection requests will be refused")
- },
- &MaxPoolSize,
- 100, 1, 65535,
- NULL, NULL, NULL
- },
-#endif
{
{"pooler_port", PGC_POSTMASTER, DATA_NODES,
@@ -4039,11 +3985,7 @@ static struct config_enum ConfigureNamesEnum[] =
#ifdef PGXC
{
{"remotetype", PGC_BACKEND, CONN_AUTH,
-#ifdef XCP
gettext_noop("Sets the type of Postgres-XL remote connection"),
-#else
- gettext_noop("Sets the type of Postgres-XC remote connection"),
-#endif
NULL
},
&remoteConnType,
@@ -7729,33 +7671,6 @@ set_config_by_name(PG_FUNCTION_ARGS)
/* get the new current value */
new_value = GetConfigOptionByName(name, NULL);
-
-#ifdef PGXC
-#ifndef XCP
- /*
- * Convert this to SET statement and pass it to pooler.
- * If command is local and we are not in a transaction block do NOT
- * send this query to backend nodes, it is just bypassed by the backend.
- */
- if (IS_PGXC_LOCAL_COORDINATOR
- && (!is_local || IsTransactionBlock()))
- {
- PoolCommandType poolcmdType = (is_local ? POOL_CMD_LOCAL_SET : POOL_CMD_GLOBAL_SET);
- StringInfoData poolcmd;
-
- initStringInfo(&poolcmd);
- appendStringInfo(&poolcmd, "SET %s %s TO %s",
- (is_local ? "LOCAL" : ""),
- name,
- (value ? value : "DEFAULT"));
-
- if (PoolManagerSetCommand(poolcmdType, poolcmd.data) < 0)
- elog(ERROR, "Postgres-XC: ERROR SET query");
-
- }
-#endif
-#endif
-
/* Convert return string to text */
PG_RETURN_TEXT_P(cstring_to_text(new_value));
}
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 2984ce7b7c..d8e84dad80 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -238,11 +238,7 @@ struct Tuplesortstate
MemoryContext sortcontext; /* memory context holding all sort data */
LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
#ifdef PGXC
-#ifdef XCP
ResponseCombiner *combiner; /* tuple source, alternate to tapeset */
-#else
- RemoteQueryState *combiner; /* tuple source, alternate to tapeset */
-#endif /* XCP */
#endif /* PGXC */
/*
@@ -1008,11 +1004,7 @@ Tuplesortstate *
tuplesort_begin_merge(TupleDesc tupDesc,
int nkeys, AttrNumber *attNums,
Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags,
-#ifdef XCP
ResponseCombiner *combiner,
-#else
- RemoteQueryState *combiner,
-#endif
int workMem)
{
Tuplesortstate *state = tuplesort_begin_common(workMem, false);
@@ -3393,7 +3385,6 @@ readtup_heap(Tuplesortstate *state, SortTuple *stup,
}
#ifdef PGXC
-#ifdef XCP
static unsigned int
getlen_datanode(Tuplesortstate *state, int tapenum, bool eofOK)
{
@@ -3439,167 +3430,6 @@ readtup_datanode(Tuplesortstate *state, SortTuple *stup,
state->tupDesc,
&stup->isnull1);
}
-#else
-static unsigned int
-getlen_datanode(Tuplesortstate *state, int tapenum, bool eofOK)
-{
- RemoteQueryState *combiner = state->combiner;
- PGXCNodeHandle *conn = combiner->connections[tapenum];
- /*
- * If connection is active (potentially has data to read) we can get node
- * number from the connection. If connection is not active (we have read all
- * available data rows) and if we have buffered data from that connection
- * the node number is stored in combiner->tapenodes[tapenum].
- * If connection is inactive and no buffered data we have EOF condition
- */
- int nid;
- unsigned int len = 0;
- ListCell *lc;
- ListCell *prev = NULL;
-
- /* May it ever happen ?! */
- if (!conn && !combiner->tapenodes)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from data node cursor")));
-
- nid = conn ? PGXCNodeGetNodeId(conn->nodeoid, PGXC_NODE_DATANODE) : combiner->tapenodes[tapenum];
-
- if (nid < 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Node id %d is incorrect", nid)));
-
- /*
- * If there are buffered rows iterate over them and get first from
- * the requested tape
- */
- foreach (lc, combiner->rowBuffer)
- {
- RemoteDataRow dataRow = (RemoteDataRow) lfirst(lc);
- if (dataRow->msgnode == nid)
- {
- combiner->currentRow = *dataRow;
- combiner->rowBuffer = list_delete_cell(combiner->rowBuffer, lc, prev);
- return dataRow->msglen;
- }
- prev = lc;
- }
-
- /* Nothing is found in the buffer, check for EOF */
- if (conn == NULL)
- {
- if (eofOK)
- return 0;
- else
- elog(ERROR, "unexpected end of data");
- }
-
- /* Going to get data from connection, buffer if needed */
- if (conn->state == DN_CONNECTION_STATE_QUERY && conn->combiner != combiner)
- BufferConnection(conn);
-
- /* Request more rows if needed */
- if (conn->state == DN_CONNECTION_STATE_IDLE)
- {
- Assert(combiner->cursor);
- if (pgxc_node_send_execute(conn, combiner->cursor, 1000) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from data node cursor")));
- if (pgxc_node_send_sync(conn) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from data node cursor")));
- conn->state = DN_CONNECTION_STATE_QUERY;
- conn->combiner = combiner;
- }
- /* Read data from the connection until get a row or EOF */
- for (;;)
- {
- switch (handle_response(conn, combiner))
- {
- case RESPONSE_SUSPENDED:
- /* Send Execute to request next row */
- Assert(combiner->cursor);
- if (len)
- return len;
- if (pgxc_node_send_execute(conn, combiner->cursor, 1000) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from data node cursor")));
- if (pgxc_node_send_sync(conn) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Failed to fetch from data node cursor")));
- conn->state = DN_CONNECTION_STATE_QUERY;
- conn->combiner = combiner;
- /* fallthru */
- case RESPONSE_EOF:
- /* receive more data */
- if (pgxc_node_receive(1, &conn, NULL))
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg(conn->error)));
- break;
- case RESPONSE_COMPLETE:
- /* EOF encountered, close the tape and report EOF */
- if (combiner->cursor)
- {
- combiner->connections[tapenum] = NULL;
- if (len)
- return len;
- }
- if (eofOK)
- return 0;
- else
- elog(ERROR, "unexpected end of data");
- break;
- case RESPONSE_DATAROW:
- Assert(len == 0);
- if (state->combiner->cursor)
- {
- /*
- * We fetching one row at a time when running EQP
- * so read following PortalSuspended or ResponseComplete
- * to leave connection clean between the calls
- */
- len = state->combiner->currentRow.msglen;
- break;
- }
- else
- return state->combiner->currentRow.msglen;
- default:
- ereport(ERROR,
- (errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("Unexpected response from the data nodes")));
- }
- }
-}
-
-static void
-readtup_datanode(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len)
-{
- TupleTableSlot *slot = state->combiner->ss.ss_ScanTupleSlot;
- MinimalTuple tuple;
- HeapTupleData htup;
-
- FetchTuple(state->combiner, slot);
-
- /* copy the tuple into sort storage */
- tuple = ExecCopySlotMinimalTuple(slot);
- stup->tuple = (void *) tuple;
- USEMEM(state, GetMemoryChunkSpace(tuple));
- /* set up first-column key value */
- htup.t_len = tuple->t_len + MINIMAL_TUPLE_OFFSET;
- htup.t_data = (HeapTupleHeader) ((char *) tuple - MINIMAL_TUPLE_OFFSET);
- stup->datum1 = heap_getattr(&htup,
- state->sortKeys[0].ssup_attno,
- state->tupDesc,
- &stup->isnull1);
-}
-#endif /* XCP */
#endif /* PGXC */
/*
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 959e63c993..3667e2f7cc 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -269,11 +269,7 @@ GetTransactionSnapshot(void)
* The command id should therefore be updated in the
* current snapshot.
*/
-#ifdef XCP
if (IsConnFromCoord() || IsConnFromDatanode())
-#else
- if (IsConnFromCoord())
-#endif
SnapshotSetCommandId(GetCurrentCommandId(false));
#endif
return CurrentSnapshot;
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index a67562d816..4a1dda87a1 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -2897,11 +2897,7 @@ usage(const char *progname)
printf(_(" --auth-local=METHOD default authentication method for local-socket connections\n"));
printf(_(" [-D, --pgdata=]DATADIR location for this database cluster\n"));
#ifdef PGXC
-#ifdef XCP
printf(_(" --nodename=NODENAME name of Postgres-XL node initialized\n"));
-#else
- printf(_(" --nodename=NODENAME name of Postgres-XC node initialized\n"));
-#endif
#endif
printf(_(" -E, --encoding=ENCODING set default encoding for new databases\n"));
printf(_(" --locale=LOCALE set default locale for new databases\n"));
@@ -3766,11 +3762,7 @@ main(int argc, char *argv[])
#ifdef PGXC
if (!nodename)
{
-#ifdef XCP
fprintf(stderr, _("%s: Postgres-XL node name is mandatory\n"), progname);
-#else
- fprintf(stderr, _("%s: Postgres-XC node name is mandatory\n"), progname);
-#endif
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
exit(1);
@@ -3839,19 +3831,11 @@ main(int argc, char *argv[])
#ifdef PGXC
-#ifdef XCP
printf(_("\nSuccess.\n You can now start the database server of the Postgres-XL coordinator using:\n\n"
-#else
- printf(_("\nSuccess.\n You can now start the database server of the Postgres-XC coordinator using:\n\n"
-#endif
" %s%s%spostgres%s --coordinator -D %s%s%s\n"
"or\n"
" %s%s%spg_ctl%s start -D %s%s%s -Z coordinator -l logfile\n\n"
-#ifdef XCP
" You can now start the database server of the Postgres-XL datanode using:\n\n"
-#else
- " You can now start the database server of the Postgres-XC datanode using:\n\n"
-#endif
" %s%s%spostgres%s --datanode -D %s%s%s\n"
"or \n"
" %s%s%spg_ctl%s start -D %s%s%s -Z datanode -l logfile\n\n"),
diff --git a/src/bin/initgtm/initgtm.c b/src/bin/initgtm/initgtm.c
index dd451d66e3..f78f1f5d17 100644
--- a/src/bin/initgtm/initgtm.c
+++ b/src/bin/initgtm/initgtm.c
@@ -744,11 +744,7 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo)
static void
usage(const char *progname)
{
-#ifdef XCP
printf(_("%s initializes GTM for a Postgres-XL database cluster.\n\n"), progname);
-#else
- printf(_("%s initializes a GTM for Postgres-XC database cluster.\n\n"), progname);
-#endif
printf(_("Usage:\n"));
printf(_(" %s [NODE-TYPE] [OPTION]... [DATADIR]\n"), progname);
printf(_("\nOptions:\n"));
@@ -799,11 +795,7 @@ main(int argc, char *argv[])
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
-#ifdef XCP
puts("initgtm (Postgres-XL) " PGXC_VERSION);
-#else
- puts("initgtm (Postgres-XC) " PGXC_VERSION);
-#endif
exit(0);
}
}
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index d68aeb1972..816bc7ef2a 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -1940,11 +1940,7 @@ do_help(void)
printf(_(" -w wait until operation completes\n"));
printf(_(" -W do not wait until operation completes\n"));
#ifdef PGXC
-#ifdef XCP
printf(_(" -Z NODE-TYPE can be \"coordinator\" or \"datanode\" (Postgres-XL)\n"));
-#else
- printf(_(" -Z NODE-TYPE can be \"coordinator\" or \"datanode\" (Postgres-XC)\n"));
-#endif
#endif
printf(_(" -?, --help show this help, then exit\n"));
printf(_("(The default is to wait for shutdown, but not for start or restart.)\n\n"));
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 95c8fd371e..a25ac3c585 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -1824,12 +1824,8 @@ connection_warnings(bool in_startup)
/* For version match, only print psql banner on startup. */
else if (in_startup)
#ifdef PGXC
-#ifdef XCP
printf("%s (PGXL %s, based on PG %s)\n", pset.progname, PGXC_VERSION, PG_VERSION);
#else
- printf("%s (PGXC %s, based on PG %s)\n", pset.progname, PGXC_VERSION, PG_VERSION);
-#endif
-#else
printf("%s (%s)\n", pset.progname, PG_VERSION);
#endif
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 46f3ea0716..d7fb322389 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -693,11 +693,7 @@ static void
showVersion(void)
{
#ifdef PGXC
-#ifdef XCP
puts("psql (Postgres-XL) " PGXC_VERSION);
-#else
- puts("psql (Postgres-XC) " PGXC_VERSION);
-#endif
puts("(based on PostgreSQL) " PG_VERSION);
#else
puts("psql (PostgreSQL) " PG_VERSION);
diff --git a/src/gtm/client/fe-protocol.c b/src/gtm/client/fe-protocol.c
index 3e43036140..64a88cb540 100644
--- a/src/gtm/client/fe-protocol.c
+++ b/src/gtm/client/fe-protocol.c
@@ -774,7 +774,6 @@ gtmpqParseSuccess(GTM_Conn *conn, GTM_Result *result)
result->gr_status = GTM_RESULT_ERROR;
break;
}
-#ifdef XCP
if (!gtm_deserialize_pgxcnodeinfo(data, buf, size, &conn->errorMessage))
{
result->gr_status = GTM_RESULT_ERROR;
@@ -784,10 +783,6 @@ gtmpqParseSuccess(GTM_Conn *conn, GTM_Result *result)
{
result->gr_resdata.grd_node_list.nodeinfo[i] = data;
}
-#else
- gtm_deserialize_pgxcnodeinfo(data, buf, size, &conn->errorMessage);
- result->gr_resdata.grd_node_list.nodeinfo[i] = data;
-#endif
}
break;
diff --git a/src/gtm/client/gtm_client.c b/src/gtm/client/gtm_client.c
index 8808735c6c..5058e73b0e 100644
--- a/src/gtm/client/gtm_client.c
+++ b/src/gtm/client/gtm_client.c
@@ -60,17 +60,12 @@ static int abort_transaction_multi_internal(GTM_Conn *conn, int txn_count, Globa
static int open_sequence_internal(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence increment,
GTM_Sequence minval, GTM_Sequence maxval,
GTM_Sequence startval, bool cycle, bool is_backup);
-#ifdef XCP
static int get_next_internal(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence range,
GTM_Sequence *result, GTM_Sequence *rangemax, bool is_backup);
static int set_val_internal(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence nextval,
bool iscalled, bool is_backup);
-#else
-static GTM_Sequence get_next_internal(GTM_Conn *conn, GTM_SequenceKey key, bool is_backup);
-static int set_val_internal(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool iscalled, bool is_backup);
-#endif
static int reset_sequence_internal(GTM_Conn *conn, GTM_SequenceKey key, bool is_backup);
static int commit_transaction_internal(GTM_Conn *conn, GlobalTransactionId gxid,
int waited_xid_count,
@@ -1348,7 +1343,6 @@ send_failed:
return -1;
}
-#ifdef XCP
/*
* Request from GTM current value of the specified sequence in the specified
* distributed session.
@@ -1360,14 +1354,9 @@ send_failed:
int
get_current(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence *result)
-#else
-GTM_Sequence
-get_current(GTM_Conn *conn, GTM_SequenceKey key)
-#endif
{
GTM_Result *res = NULL;
time_t finish_time;
-#ifdef XCP
int coord_namelen = coord_name ? strlen(coord_name) : 0;
/* Start the message. */
@@ -1379,14 +1368,6 @@ get_current(GTM_Conn *conn, GTM_SequenceKey key)
(coord_namelen > 0 && gtmpqPutnchar(coord_name, coord_namelen, conn)) ||
gtmpqPutInt(coord_procid, 4, conn))
goto send_failed;
-#else
- /* Start the message. */
- if (gtmpqPutMsgStart('C', true, conn) ||
- gtmpqPutInt(MSG_SEQUENCE_GET_CURRENT, sizeof (GTM_MessageType), conn) ||
- gtmpqPutInt(key->gsk_keylen, 4, conn) ||
- gtmpqPutnchar(key->gsk_key, key->gsk_keylen, conn))
- goto send_failed;
-#endif
/* Finish the message. */
if (gtmpqPutMsgEnd(conn))
@@ -1404,30 +1385,18 @@ get_current(GTM_Conn *conn, GTM_SequenceKey key)
if ((res = GTMPQgetResult(conn)) == NULL)
goto receive_failed;
-#ifdef XCP
if (res->gr_status == GTM_RESULT_OK)
*result = res->gr_resdata.grd_seq.seqval;
return res->gr_status;
-#else
- if (res->gr_status == GTM_RESULT_OK)
- return res->gr_resdata.grd_seq.seqval;
- else
- return InvalidSequenceValue;
-#endif
receive_failed:
send_failed:
conn->result = makeEmptyResultIfIsNull(conn->result);
conn->result->gr_status = GTM_RESULT_COMM_ERROR;
-#ifdef XCP
return GTM_RESULT_COMM_ERROR;
-#else
- return -1;
-#endif
}
-#ifdef XCP
/*
* Submit to GTM new next value of the specified sequence in the specified
* distributed session. The nextval parameter is the new value, if is called
@@ -1452,46 +1421,24 @@ bkup_set_val(GTM_Conn *conn, GTM_SequenceKey key, char *coord_name,
return set_val_internal(conn, key, coord_name, coord_procid, nextval,
iscalled, true);
}
-#else
-int
-set_val(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool iscalled)
-{
- return set_val_internal(conn, key, nextval, iscalled, false);
-}
-int
-bkup_set_val(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool iscalled)
-{
- return set_val_internal(conn, key, nextval, iscalled, true);
-}
-#endif
-
-#ifdef XCP
static int
set_val_internal(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence nextval,
bool iscalled, bool is_backup)
-#else
-static int
-set_val_internal(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool iscalled, bool is_backup)
-#endif
{
GTM_Result *res = NULL;
time_t finish_time;
-#ifdef XCP
int coord_namelen = coord_name ? strlen(coord_name) : 0;
-#endif
/* Start the message. */
if (gtmpqPutMsgStart('C', true, conn) ||
gtmpqPutInt(is_backup ? MSG_BKUP_SEQUENCE_SET_VAL : MSG_SEQUENCE_SET_VAL, sizeof (GTM_MessageType), conn) ||
gtmpqPutInt(key->gsk_keylen, 4, conn) ||
gtmpqPutnchar(key->gsk_key, key->gsk_keylen, conn) ||
-#ifdef XCP
gtmpqPutInt(coord_namelen, 4, conn) ||
(coord_namelen > 0 && gtmpqPutnchar(coord_name, coord_namelen, conn)) ||
gtmpqPutInt(coord_procid, 4, conn) ||
-#endif
gtmpqPutnchar((char *)&nextval, sizeof (GTM_Sequence), conn) ||
gtmpqPutc(iscalled, conn))
goto send_failed;
@@ -1522,14 +1469,9 @@ receive_failed:
send_failed:
conn->result = makeEmptyResultIfIsNull(conn->result);
conn->result->gr_status = GTM_RESULT_COMM_ERROR;
-#ifdef XCP
return GTM_RESULT_COMM_ERROR;
-#else
- return -1;
-#endif
}
-#ifdef XCP
/*
* Rexuest from GTM next value of the specified sequence.
* Function returns GTM_RESULT_OK if it succeedes, it sets the *result parameter
@@ -1554,33 +1496,14 @@ bkup_get_next(GTM_Conn *conn, GTM_SequenceKey key,
return get_next_internal(conn, key, coord_name, coord_procid,
range, result, rangemax, true);
}
-#else
-GTM_Sequence
-get_next(GTM_Conn *conn, GTM_SequenceKey key)
-{
- return get_next_internal(conn, key, false);
-}
-GTM_Sequence
-bkup_get_next(GTM_Conn *conn, GTM_SequenceKey key)
-{
- return get_next_internal(conn, key, true);
-}
-#endif
-
-#ifdef XCP
static int
get_next_internal(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence range,
GTM_Sequence *result, GTM_Sequence *rangemax, bool is_backup)
-#else
-static GTM_Sequence
-get_next_internal(GTM_Conn *conn, GTM_SequenceKey key, bool is_backup)
-#endif
{
GTM_Result *res = NULL;
time_t finish_time;
-#ifdef XCP
int coord_namelen = coord_name ? strlen(coord_name) : 0;
/* Start the message. */
@@ -1593,14 +1516,6 @@ get_next_internal(GTM_Conn *conn, GTM_SequenceKey key, bool is_backup)
gtmpqPutInt(coord_procid, 4, conn) ||
gtmpqPutnchar((char *)&range, sizeof (GTM_Sequence), conn))
goto send_failed;
-#else
- /* Start the message. */
- if (gtmpqPutMsgStart('C', true, conn) ||
- gtmpqPutInt(is_backup ? MSG_BKUP_SEQUENCE_GET_NEXT : MSG_SEQUENCE_GET_NEXT, sizeof (GTM_MessageType), conn) ||
- gtmpqPutInt(key->gsk_keylen, 4, conn) ||
- gtmpqPutnchar(key->gsk_key, key->gsk_keylen, conn))
- goto send_failed;
-#endif
/* Finish the message. */
if (gtmpqPutMsgEnd(conn))
@@ -1620,19 +1535,12 @@ get_next_internal(GTM_Conn *conn, GTM_SequenceKey key, bool is_backup)
if ((res = GTMPQgetResult(conn)) == NULL)
goto receive_failed;
-#ifdef XCP
if (res->gr_status == GTM_RESULT_OK)
{
*result = res->gr_resdata.grd_seq.seqval;
*rangemax = res->gr_resdata.grd_seq.rangemax;
}
return res->gr_status;
-#else
- if (res->gr_status == GTM_RESULT_OK)
- return res->gr_resdata.grd_seq.seqval;
- else
- return InvalidSequenceValue;
-#endif
}
return GTM_RESULT_OK;
@@ -1640,11 +1548,7 @@ receive_failed:
send_failed:
conn->result = makeEmptyResultIfIsNull(conn->result);
conn->result->gr_status = GTM_RESULT_COMM_ERROR;
-#ifdef XCP
return GTM_RESULT_COMM_ERROR;
-#else
- return -1;
-#endif
}
int
@@ -2466,8 +2370,6 @@ send_failed:
return -1;
}
-
-#ifdef XCP
/*
* Submit to GTM information about started distributed session.
* The information is the session identifier consisting of coordinator name and
@@ -2528,4 +2430,3 @@ send_failed:
conn->result->gr_status = GTM_RESULT_COMM_ERROR;
return -1;
}
-#endif
diff --git a/src/gtm/common/gtm_serialize.c b/src/gtm/common/gtm_serialize.c
index 6f9f97aae4..caa1c1c0f2 100644
--- a/src/gtm/common/gtm_serialize.c
+++ b/src/gtm/common/gtm_serialize.c
@@ -671,12 +671,10 @@ gtm_get_pgxcnodeinfo_size(GTM_PGXCNodeInfo *data)
len += sizeof(GTM_PGXCNodeStatus); /* status */
-#ifdef XCP
len += sizeof(uint32); /* max_sessions */
len += sizeof(uint32); /* num_sessions */
if (data->num_sessions > 0) /* sessions */
len += (data->num_sessions * sizeof(GTM_PGXCSession));
-#endif
return len;
}
@@ -764,7 +762,6 @@ gtm_serialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, char *buf, size_t buflen)
memcpy(buf + len, &(data->status), sizeof(GTM_PGXCNodeStatus));
len += sizeof(GTM_PGXCNodeStatus);
-#ifdef XCP
/* GTM_PGXCNodeInfo.sessions */
len_wk = data->max_sessions;
memcpy(buf + len, &len_wk, sizeof(uint32));
@@ -777,7 +774,6 @@ gtm_serialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, char *buf, size_t buflen)
memcpy(buf + len, data->sessions, len_wk * sizeof(GTM_PGXCSession));
len += len_wk * sizeof(GTM_PGXCSession);
}
-#endif
/* NOTE: nothing to be done for node_lock */
return len;
@@ -787,25 +783,18 @@ gtm_serialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, char *buf, size_t buflen)
/*
* Return a deserialize number of PGXC node information
*/
-#ifdef XCP
size_t
gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buflen, PQExpBuffer errorbuf)
-#else
-size_t
-gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buflen)
-#endif
{
size_t len = 0;
uint32 len_wk;
/* GTM_PGXCNodeInfo.type */
-#ifdef XCP
if (len + sizeof(GTM_PGXCNodeType) > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node info. buflen = %d", (int) buflen);
return (size_t) 0;
}
-#endif
memcpy(&(data->type), buf + len, sizeof(GTM_PGXCNodeType));
len += sizeof(GTM_PGXCNodeType);
@@ -819,13 +808,11 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
else
{
-#ifdef XCP
if (len + len_wk > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node name");
return (size_t) 0;
}
-#endif
/* PGXCTODO: free memory */
data->nodename = (char *)genAlloc(len_wk + 1);
@@ -844,13 +831,11 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
else
{
-#ifdef XCP
if (len + len_wk > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node info after proxy name");
return (size_t) 0;
}
-#endif
/* PGXCTODO: free memory */
data->proxyname = (char *)genAlloc(len_wk + 1);
memcpy(data->proxyname, buf + len, (size_t)len_wk);
@@ -859,13 +844,11 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
/* GTM_PGXCNodeInfo.port */
-#ifdef XCP
if (len + sizeof(GTM_PGXCNodePort) > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node port");
return (size_t) 0;
}
-#endif
memcpy(&(data->port), buf + len, sizeof(GTM_PGXCNodePort));
len += sizeof(GTM_PGXCNodePort);
@@ -878,13 +861,11 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
else
{
-#ifdef XCP
if (len + len_wk > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of ipaddress");
return (size_t) 0;
}
-#endif
data->ipaddress = (char *)genAlloc(len_wk + 1);
memcpy(data->ipaddress, buf + len, (size_t)len_wk);
data->ipaddress[len_wk] = 0; /* null_terminate */
@@ -900,13 +881,11 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
else
{
-#ifdef XCP
if (len + len_wk > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node info after data folder");
return (size_t) 0;
}
-#endif
data->datafolder = (char *)genAlloc(len_wk + 1);
memcpy(data->datafolder, buf + len, (size_t)len_wk);
data->datafolder[len_wk] = 0; /* null_terminate */
@@ -914,17 +893,14 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
}
/* GTM_PGXCNodeInfo.status */
-#ifdef XCP
if (len + sizeof(GTM_PGXCNodeStatus) > buflen)
{
printfGTMPQExpBuffer(errorbuf, "Buffer length error in deserialization of node info after status");
return (size_t) 0;
}
-#endif
memcpy(&(data->status), buf + len, sizeof(GTM_PGXCNodeStatus));
len += sizeof(GTM_PGXCNodeStatus);
-#ifdef XCP
/* GTM_PGXCNodeInfo.sessions */
memcpy(&len_wk, buf + len, sizeof(uint32));
len += sizeof(uint32);
@@ -945,7 +921,6 @@ gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *data, const char *buf, size_t buf
memcpy(data->sessions, buf + len, len_wk * sizeof(GTM_PGXCSession));
len += len_wk * sizeof(GTM_PGXCSession);
}
-#endif
/* NOTE: nothing to be done for node_lock */
@@ -966,13 +941,9 @@ gtm_get_sequence_size(GTM_SeqInfo *seq)
len += sizeof(GTM_SequenceKeyType); /* gs_key.gsk_type */
len += sizeof(GTM_Sequence); /* gs_value */
len += sizeof(GTM_Sequence); /* gs_init_value */
-#ifdef XCP
len += sizeof(uint32); /* gs_max_lastvals */
len += sizeof(uint32); /* gs_lastval_count */
len += seq->gs_lastval_count * sizeof(GTM_SeqLastVal); /* gs_last_values */
-#else
- len += sizeof(GTM_Sequence); /* gs_last_value */
-#endif
len += sizeof(GTM_Sequence); /* gs_increment_by */
len += sizeof(GTM_Sequence); /* gs_min_value */
len += sizeof(GTM_Sequence); /* gs_max_value */
@@ -1013,7 +984,6 @@ gtm_serialize_sequence(GTM_SeqInfo *s, char *buf, size_t buflen)
memcpy(buf + len, &s->gs_init_value, sizeof(GTM_Sequence));
len += sizeof(GTM_Sequence); /* gs_init_value */
-#ifdef XCP
memcpy(buf + len, &s->gs_max_lastvals, sizeof(uint32));
len += sizeof(uint32); /* gs_max_lastvals */
memcpy(buf + len, &s->gs_lastval_count, sizeof(uint32));
@@ -1021,10 +991,6 @@ gtm_serialize_sequence(GTM_SeqInfo *s, char *buf, size_t buflen)
memcpy(buf + len, s->gs_last_values,
s->gs_lastval_count * sizeof(GTM_SeqLastVal));
len += s->gs_lastval_count * sizeof(GTM_SeqLastVal); /* gs_last_values */
-#else
- memcpy(buf + len, &s->gs_last_value, sizeof(GTM_Sequence));
- len += sizeof(GTM_Sequence); /* gs_last_value */
-#endif
memcpy(buf + len, &s->gs_increment_by, sizeof(GTM_Sequence));
len += sizeof(GTM_Sequence); /* gs_increment_by */
@@ -1076,7 +1042,6 @@ gtm_deserialize_sequence(GTM_SeqInfo *seq, const char *buf, size_t buflen)
memcpy(&seq->gs_init_value, buf + len, sizeof(GTM_Sequence));
len += sizeof(GTM_Sequence); /* gs_init_value */
-#ifdef XCP
memcpy(&seq->gs_max_lastvals, buf + len, sizeof(uint32));
len += sizeof(uint32); /* gs_max_lastvals */
if (seq->gs_max_lastvals > 0)
@@ -1090,10 +1055,6 @@ gtm_deserialize_sequence(GTM_SeqInfo *seq, const char *buf, size_t buflen)
seq->gs_lastval_count * sizeof(GTM_SeqLastVal));
len += seq->gs_lastval_count * sizeof(GTM_SeqLastVal); /* gs_last_values */
}
-#else
- memcpy(&seq->gs_last_value, buf + len, sizeof(GTM_Sequence));
- len += sizeof(GTM_Sequence); /* gs_last_value */
-#endif
memcpy(&seq->gs_increment_by, buf + len, sizeof(GTM_Sequence));
len += sizeof(GTM_Sequence); /* gs_increment_by */
diff --git a/src/gtm/common/gtm_utils.c b/src/gtm/common/gtm_utils.c
index a9c79cf301..96c6e95b20 100644
--- a/src/gtm/common/gtm_utils.c
+++ b/src/gtm/common/gtm_utils.c
@@ -36,9 +36,7 @@ static struct enum_name message_name_tab[] =
{MSG_BKUP_NODE_REGISTER, "MSG_BKUP_NODE_REGISTER"},
{MSG_NODE_UNREGISTER, "MSG_NODE_UNREGISTER"},
{MSG_BKUP_NODE_UNREGISTER, "MSG_BKUP_NODE_UNREGISTER"},
-#ifdef XCP
{MSG_REGISTER_SESSION, "MSG_REGISTER_SESSION"},
-#endif
{MSG_NODE_LIST, "MSG_NODE_LIST"},
{MSG_NODE_BEGIN_REPLICATION_INIT, "MSG_NODE_BEGIN_REPLICATION_INIT"},
{MSG_NODE_END_REPLICATION_INIT, "MSG_NODE_END_REPLICATION_INIT"},
diff --git a/src/gtm/main/gtm_seq.c b/src/gtm/main/gtm_seq.c
index 121f92c915..e8d4e128e2 100644
--- a/src/gtm/main/gtm_seq.c
+++ b/src/gtm/main/gtm_seq.c
@@ -55,9 +55,7 @@ static GTM_SequenceKey seq_copy_key(GTM_SequenceKey key);
static int seq_drop_with_dbkey(GTM_SequenceKey nsp);
static bool GTM_NeedSeqRestoreUpdateInternal(GTM_SeqInfo *seqinfo);
-#ifdef XCP
static GTM_Sequence get_rangemax(GTM_SeqInfo *seqinfo, GTM_Sequence range);
-#endif
/*
* Get the hash value given the sequence key
@@ -342,14 +340,9 @@ GTM_SeqOpen(GTM_SequenceKey seqkey,
*/
seqinfo->gs_cycle = cycle;
-#ifdef XCP
seqinfo->gs_max_lastvals = 0;
seqinfo->gs_lastval_count = 0;
seqinfo->gs_last_values = NULL;
-#else
- /* Set the last value in case of a future restart */
- seqinfo->gs_last_value = seqinfo->gs_init_value;
-#endif
seqinfo->gs_backedUpValue = seqinfo->gs_value;
@@ -407,23 +400,10 @@ int GTM_SeqAlter(GTM_SequenceKey seqkey,
{
/* Restart command has been used, reset the sequence */
seqinfo->gs_called = false;
-#ifdef XCP
seqinfo->gs_value = lastval;
-#else
- seqinfo->gs_init_value = seqinfo->gs_last_value = lastval;
-#endif
}
-#ifdef XCP
if (seqinfo->gs_init_value != startval)
seqinfo->gs_init_value = startval;
-#else
- else
- {
- /* Start has been used, reinitialize init value */
- if (seqinfo->gs_init_value != startval)
- seqinfo->gs_init_value = seqinfo->gs_last_value = startval;
- }
-#endif
/* Remove the old key with the old name */
GTM_RWLockRelease(&seqinfo->gs_lock);
@@ -463,14 +443,10 @@ GTM_SeqRestore(GTM_SequenceKey seqkey,
seqinfo->gs_min_value = minval;
seqinfo->gs_max_value = maxval;
-#ifdef XCP
seqinfo->gs_init_value = startval;
seqinfo->gs_max_lastvals = 0;
seqinfo->gs_lastval_count = 0;
seqinfo->gs_last_values = NULL;
-#else
- seqinfo->gs_init_value = seqinfo->gs_last_value = startval;
-#endif
seqinfo->gs_value = curval;
seqinfo->gs_backedUpValue = seqinfo->gs_value;
@@ -664,7 +640,6 @@ GTM_SeqRename(GTM_SequenceKey seqkey, GTM_SequenceKey newseqkey)
newseqinfo->gs_cycle = seqinfo->gs_cycle;
newseqinfo->gs_state = seqinfo->gs_state;
-#ifdef XCP
newseqinfo->gs_max_lastvals = seqinfo->gs_max_lastvals;
newseqinfo->gs_lastval_count = seqinfo->gs_lastval_count;
newseqinfo->gs_last_values = (GTM_SeqLastVal *)
@@ -672,9 +647,6 @@ GTM_SeqRename(GTM_SequenceKey seqkey, GTM_SequenceKey newseqkey)
newseqinfo->gs_max_lastvals * sizeof(GTM_SeqLastVal));
memcpy(newseqinfo->gs_last_values, seqinfo->gs_last_values,
newseqinfo->gs_max_lastvals * sizeof(GTM_SeqLastVal));
-#else
- newseqinfo->gs_last_value = seqinfo->gs_last_value;
-#endif
/* Add the copy to the list */
if ((errcode = seq_add_seqinfo(newseqinfo))) /* a lock is taken here for the new sequence */
@@ -697,7 +669,6 @@ GTM_SeqRename(GTM_SequenceKey seqkey, GTM_SequenceKey newseqkey)
return errcode;
}
-#ifdef XCP
/*
* Get current value for the sequence without incrementing it
*/
@@ -998,157 +969,6 @@ get_rangemax(GTM_SeqInfo *seqinfo, GTM_Sequence range)
}
return rangemax;
}
-#else
-/*
- * Get current value for the sequence without incrementing it
- */
-GTM_Sequence
-GTM_SeqGetCurrent(GTM_SequenceKey seqkey)
-{
- GTM_SeqInfo *seqinfo = seq_find_seqinfo(seqkey);
- GTM_Sequence value;
-
- if (seqinfo == NULL)
- {
- ereport(LOG,
- (EINVAL,
- errmsg("The sequence with the given key does not exist")));
- return InvalidSequenceValue;
- }
-
- GTM_RWLockAcquire(&seqinfo->gs_lock, GTM_LOCKMODE_WRITE);
-
- value = seqinfo->gs_last_value;
-
- GTM_RWLockRelease(&seqinfo->gs_lock);
- seq_release_seqinfo(seqinfo);
- return value;
-}
-
-/*
- * Set values for the sequence
- */
-int
-GTM_SeqSetVal(GTM_SequenceKey seqkey, GTM_Sequence nextval, bool iscalled)
-{
- GTM_SeqInfo *seqinfo = seq_find_seqinfo(seqkey);
-
- if (seqinfo == NULL)
- {
- ereport(LOG,
- (EINVAL,
- errmsg("The sequence with the given key does not exist")));
-
- return EINVAL;
- }
-
- GTM_RWLockAcquire(&seqinfo->gs_lock, GTM_LOCKMODE_WRITE);
-
- seqinfo->gs_last_value = seqinfo->gs_value;
-
- if (seqinfo->gs_value != nextval)
- seqinfo->gs_value = nextval;
-
- seqinfo->gs_called = iscalled;
-
- /* If sequence is not called, reset the init value to the value set */
- if (!iscalled)
- seqinfo->gs_init_value = nextval;
-
- /* Remove the old key with the old name */
- GTM_RWLockRelease(&seqinfo->gs_lock);
- GTM_SetNeedBackup();
- seq_release_seqinfo(seqinfo);
-
- return 0;
-}
-
-/*
- * Get next value for the sequence
- */
-GTM_Sequence
-GTM_SeqGetNext(GTM_SequenceKey seqkey)
-{
- GTM_SeqInfo *seqinfo = seq_find_seqinfo(seqkey);
- GTM_Sequence value;
-
- if (seqinfo == NULL)
- {
- ereport(LOG,
- (EINVAL,
- errmsg("The sequence with the given key does not exist")));
- return InvalidSequenceValue;
- }
-
- GTM_RWLockAcquire(&seqinfo->gs_lock, GTM_LOCKMODE_WRITE);
-
- /*
- * If the sequence is called for the first time, initialize the value and
- * return the start value
- */
- if (!SEQ_IS_CALLED(seqinfo))
- {
- value = seqinfo->gs_last_value = seqinfo->gs_value = seqinfo->gs_init_value;
- seqinfo->gs_called = true;
- GTM_RWLockRelease(&seqinfo->gs_lock);
- seq_release_seqinfo(seqinfo);
- return value;
- }
-
- if (SEQ_IS_ASCENDING(seqinfo))
- {
- /*
- * Check if the sequence is about to wrap-around. If the sequence does
- * not support wrap-around, throw an error and return
- * InvalidSequenceValue
- */
- if (seqinfo->gs_max_value - seqinfo->gs_increment_by >= seqinfo->gs_value)
- value = seqinfo->gs_last_value = seqinfo->gs_value = seqinfo->gs_value + seqinfo->gs_increment_by;
- else if (SEQ_IS_CYCLE(seqinfo))
- value = seqinfo->gs_last_value = seqinfo->gs_value = seqinfo->gs_min_value;
- else
- {
- GTM_RWLockRelease(&seqinfo->gs_lock);
- seq_release_seqinfo(seqinfo);
- ereport(LOG,
- (ERANGE,
- errmsg("Sequence reached maximum value")));
- return InvalidSequenceValue;
- }
- }
- else
- {
- /*
- * Check if the sequence is about to wrap-around. If the sequence does
- * not support wrap-around, throw an error and return
- * InvalidSequenceValue, otherwise wrap around the sequence and reset
- * it to the max value.
- *
- * Note: The gs_increment_by is a signed integer and is negative for
- * descending sequences. So we don't need special handling below
- */
- if (seqinfo->gs_min_value - seqinfo->gs_increment_by <= seqinfo->gs_value)
- value = seqinfo->gs_value = seqinfo->gs_last_value = seqinfo->gs_value + seqinfo->gs_increment_by;
- else if (SEQ_IS_CYCLE(seqinfo))
- value = seqinfo->gs_value = seqinfo->gs_last_value = seqinfo->gs_max_value;
- else
- {
- GTM_RWLockRelease(&seqinfo->gs_lock);
- seq_release_seqinfo(seqinfo);
- ereport(LOG,
- (ERANGE,
- errmsg("Sequence reached minimum value")));
- return InvalidSequenceValue;
- }
-
- }
- GTM_RWLockRelease(&seqinfo->gs_lock);
- if (GTM_NeedSeqRestoreUpdateInternal(seqinfo))
- GTM_SetNeedBackup();
- seq_release_seqinfo(seqinfo);
- return value;
-}
-#endif
/*
* Reset the sequence
@@ -1167,11 +987,7 @@ GTM_SeqReset(GTM_SequenceKey seqkey)
}
GTM_RWLockAcquire(&seqinfo->gs_lock, GTM_LOCKMODE_WRITE);
-#ifdef XCP
seqinfo->gs_value = seqinfo->gs_backedUpValue = seqinfo->gs_init_value;
-#else
- seqinfo->gs_value = seqinfo->gs_last_value = seqinfo->gs_init_value;
-#endif
GTM_RWLockRelease(&gtm_bkup_lock);
@@ -1275,10 +1091,8 @@ ProcessSequenceInitCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "open_sequence() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file with new seq info */
SaveControlInfo();
-#endif
/*
* Send a SUCCESS message back to the client
*/
@@ -1393,10 +1207,9 @@ ProcessSequenceAlterCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "alter_sequence() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_ALTER_RESULT, 4);
if (myport->remote_type == GTM_NODE_GTM_PROXY)
@@ -1553,15 +1366,13 @@ ProcessSequenceGetCurrentCommand(Port *myport, StringInfo message)
GTM_SequenceKeyData seqkey;
StringInfoData buf;
GTM_Sequence seqval;
-#ifdef XCP
uint32 coord_namelen;
char *coord_name;
uint32 coord_procid;
-#endif
seqkey.gsk_keylen = pq_getmsgint(message, sizeof (seqkey.gsk_keylen));
seqkey.gsk_key = (char *)pq_getmsgbytes(message, seqkey.gsk_keylen);
-#ifdef XCP
+
coord_namelen = pq_getmsgint(message, sizeof(coord_namelen));
if (coord_namelen > 0)
coord_name = (char *)pq_getmsgbytes(message, coord_namelen);
@@ -1570,13 +1381,6 @@ ProcessSequenceGetCurrentCommand(Port *myport, StringInfo message)
coord_procid = pq_getmsgint(message, sizeof(coord_procid));
GTM_SeqGetCurrent(&seqkey, coord_name, coord_procid, &seqval);
-#else
- seqval = GTM_SeqGetCurrent(&seqkey);
- if (!SEQVAL_IS_VALID(seqval))
- ereport(ERROR,
- (ERANGE,
- errmsg("Can not get current value of the sequence")));
-#endif
elog(DEBUG1, "Getting current value %ld for sequence %s", seqval, seqkey.gsk_key);
@@ -1634,17 +1438,15 @@ ProcessSequenceGetNextCommand(Port *myport, StringInfo message, bool is_backup)
GTM_SequenceKeyData seqkey;
StringInfoData buf;
GTM_Sequence seqval;
-#ifdef XCP
GTM_Sequence range;
GTM_Sequence rangemax;
uint32 coord_namelen;
char *coord_name;
uint32 coord_procid;
-#endif
seqkey.gsk_keylen = pq_getmsgint(message, sizeof (seqkey.gsk_keylen));
seqkey.gsk_key = (char *)pq_getmsgbytes(message, seqkey.gsk_keylen);
-#ifdef XCP
+
coord_namelen = pq_getmsgint(message, sizeof(coord_namelen));
if (coord_namelen > 0)
coord_name = (char *)pq_getmsgbytes(message, coord_namelen);
@@ -1659,13 +1461,6 @@ ProcessSequenceGetNextCommand(Port *myport, StringInfo message, bool is_backup)
ereport(ERROR,
(ERANGE,
errmsg("Can not get current value of the sequence")));
-#else
- seqval = GTM_SeqGetNext(&seqkey);
- if (!SEQVAL_IS_VALID(seqval))
- ereport(ERROR,
- (ERANGE,
- errmsg("Can not get current value of the sequence")));
-#endif
elog(DEBUG1, "Getting next value %ld for sequence %s", seqval, seqkey.gsk_key);
@@ -1681,13 +1476,9 @@ ProcessSequenceGetNextCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "calling get_next() for standby GTM %p.", GetMyThreadInfo->thr_conn->standby);
retry:
-#ifdef XCP
bkup_get_next(GetMyThreadInfo->thr_conn->standby, &seqkey,
coord_name, coord_procid,
range, &loc_seq, &rangemax);
-#else
- loc_seq = bkup_get_next(GetMyThreadInfo->thr_conn->standby, &seqkey);
-#endif
if (gtm_standby_check_communication_error(&count, oldconn))
goto retry;
@@ -1698,10 +1489,9 @@ ProcessSequenceGetNextCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "get_next() returns GTM_Sequence %ld.", loc_seq);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
/* Respond to the client */
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_GET_NEXT_RESULT, 4);
@@ -1714,9 +1504,7 @@ ProcessSequenceGetNextCommand(Port *myport, StringInfo message, bool is_backup)
pq_sendint(&buf, seqkey.gsk_keylen, 4);
pq_sendbytes(&buf, seqkey.gsk_key, seqkey.gsk_keylen);
pq_sendbytes(&buf, (char *)&seqval, sizeof (GTM_Sequence));
-#ifdef XCP
pq_sendbytes(&buf, (char *)&rangemax, sizeof (GTM_Sequence));
-#endif
pq_endmessage(myport, &buf);
if (myport->remote_type != GTM_NODE_GTM_PROXY)
@@ -1745,25 +1533,22 @@ ProcessSequenceSetValCommand(Port *myport, StringInfo message, bool is_backup)
StringInfoData buf;
bool iscalled;
int errcode;
-#ifdef XCP
uint32 coord_namelen;
char *coord_name;
uint32 coord_procid;
-#endif
/*
* Get the sequence key
*/
seqkey.gsk_keylen = pq_getmsgint(message, sizeof (seqkey.gsk_keylen));
seqkey.gsk_key = (char *)pq_getmsgbytes(message, seqkey.gsk_keylen);
-#ifdef XCP
+
coord_namelen = pq_getmsgint(message, sizeof(coord_namelen));
if (coord_namelen > 0)
coord_name = (char *)pq_getmsgbytes(message, coord_namelen);
else
coord_name = NULL;
coord_procid = pq_getmsgint(message, sizeof(coord_procid));
-#endif
/* Read parameters to be set */
memcpy(&nextval, pq_getmsgbytes(message, sizeof (GTM_Sequence)),
@@ -1780,17 +1565,10 @@ ProcessSequenceSetValCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "Setting new value %ld for sequence %s", nextval, seqkey.gsk_key);
-#ifdef XCP
if ((errcode = GTM_SeqSetVal(&seqkey, coord_name, coord_procid, nextval, iscalled)))
ereport(ERROR,
(errcode,
errmsg("Failed to set values of sequence")));
-#else
- if ((errcode = GTM_SeqSetVal(&seqkey, nextval, iscalled)))
- ereport(ERROR,
- (errcode,
- errmsg("Failed to set values of sequence")));
-#endif
MemoryContextSwitchTo(oldContext);
@@ -1808,19 +1586,12 @@ ProcessSequenceSetValCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "calling set_val() for standby GTM %p.", GetMyThreadInfo->thr_conn->standby);
retry:
-#ifdef XCP
rc = bkup_set_val(GetMyThreadInfo->thr_conn->standby,
&seqkey,
coord_name,
coord_procid,
nextval,
iscalled);
-#else
- rc = bkup_set_val(GetMyThreadInfo->thr_conn->standby,
- &seqkey,
- nextval,
- iscalled);
-#endif
if (gtm_standby_check_communication_error(&count, oldconn))
goto retry;
@@ -1831,10 +1602,9 @@ ProcessSequenceSetValCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "set_val() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
/* Respond to the client */
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_SET_VAL_RESULT, 4);
@@ -1905,10 +1675,9 @@ ProcessSequenceResetCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "reset_sequence() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
/* Respond to the client */
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_RESET_RESULT, 4);
@@ -1981,10 +1750,9 @@ ProcessSequenceCloseCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "close_sequence() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
/* Respond to the client */
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_CLOSE_RESULT, 4);
@@ -2072,10 +1840,9 @@ ProcessSequenceRenameCommand(Port *myport, StringInfo message, bool is_backup)
elog(DEBUG1, "rename_sequence() returns rc %d.", rc);
}
-#ifdef XCP
/* Save control file info */
SaveControlInfo();
-#endif
+
/* Send a SUCCESS message back to the client */
pq_beginmessage(&buf, 'S');
pq_sendint(&buf, SEQUENCE_RENAME_RESULT, 4);
@@ -2461,8 +2228,6 @@ GTM_RestoreSeqInfo(FILE *ctlf)
}
}
-
-#ifdef XCP
/*
* Remove all current values allocated for the specified session from all
* sequences.
@@ -2523,4 +2288,3 @@ GTM_CleanupSeqSession(char *coord_name, int coord_procid)
GTM_RWLockRelease(&bucket->shb_lock);
}
}
-#endif
diff --git a/src/gtm/main/gtm_txn.c b/src/gtm/main/gtm_txn.c
index 871021cc6a..249c74d189 100644
--- a/src/gtm/main/gtm_txn.c
+++ b/src/gtm/main/gtm_txn.c
@@ -47,9 +47,7 @@ static void init_GTM_TransactionInfo(GTM_TransactionInfo *gtm_txninfo,
bool readonly);
static void clean_GTM_TransactionInfo(GTM_TransactionInfo *gtm_txninfo);
-#ifdef XCP
GlobalTransactionId ControlXid; /* last one written to control file */
-#endif
GTM_Transactions GTMTransactions;
void
@@ -111,9 +109,7 @@ GTM_InitTxnManager(void)
GTMTransactions.gt_gtm_state = GTM_STARTING;
-#ifdef XCP
ControlXid = FirstNormalGlobalTransactionId;
-#endif
return;
}
@@ -575,9 +571,7 @@ GTM_GetGlobalTransactionIdMulti(GTM_TransactionHandle handle[], int txn_count)
GlobalTransactionId start_xid = InvalidGlobalTransactionId;
GTM_TransactionInfo *gtm_txninfo = NULL;
int ii;
-#ifdef XCP
bool save_control = false;
-#endif
if (Recovery_IsStandby())
{
@@ -656,7 +650,6 @@ GTM_GetGlobalTransactionIdMulti(GTM_TransactionHandle handle[], int txn_count)
gtm_txninfo->gti_gxid = xid;
}
-#ifdef XCP
/* Periodically write the xid and sequence info out to the control file.
* Try and handle wrapping, too.
*/
@@ -666,17 +659,14 @@ GTM_GetGlobalTransactionIdMulti(GTM_TransactionHandle handle[], int txn_count)
save_control = true;
ControlXid = xid;
}
-#endif
if (GTM_NeedXidRestoreUpdate())
GTM_SetNeedBackup();
GTM_RWLockRelease(&GTMTransactions.gt_XidGenLock);
-#ifdef XCP
/* save control info when not holding the XidGenLock */
if (save_control)
SaveControlInfo();
-#endif
return start_xid;
}
@@ -1395,10 +1385,9 @@ GTM_BkupBeginTransactionGetGXIDMulti(GlobalTransactionId *gxid,
int count;
MemoryContext oldContext;
-#ifdef XCP
bool save_control = false;
GlobalTransactionId xid = InvalidGlobalTransactionId;
-#endif
+
oldContext = MemoryContextSwitchTo(TopMostMemoryContext);
count = GTM_BeginTransactionMulti(isolevel, readonly, connid,
@@ -1430,8 +1419,6 @@ GTM_BkupBeginTransactionGetGXIDMulti(GlobalTransactionId *gxid,
xid = GTMTransactions.gt_nextXid;
}
-
-#ifdef XCP
/* Periodically write the xid and sequence info out to the control file.
* Try and handle wrapping, too.
*/
@@ -1441,15 +1428,12 @@ GTM_BkupBeginTransactionGetGXIDMulti(GlobalTransactionId *gxid,
save_control = true;
ControlXid = xid;
}
-#endif
GTM_RWLockRelease(&GTMTransactions.gt_TransArrayLock);
-#ifdef XCP
/* save control info when not holding the XidGenLock */
if (save_control)
SaveControlInfo();
-#endif
MemoryContextSwitchTo(oldContext);
}
@@ -2724,15 +2708,11 @@ GTM_RestoreTxnInfo(FILE *ctlf, GlobalTransactionId next_gxid)
(!GlobalTransactionIdIsValid(next_gxid)))
next_gxid = InitialGXIDValue_Default;
else if (!GlobalTransactionIdIsValid(next_gxid))
-#ifdef XCP
{
/* Add in extra amount in case we had not gracefully stopped */
next_gxid = saved_gxid + CONTROL_INTERVAL;
ControlXid = next_gxid;
}
-#else
- next_gxid = saved_gxid;
-#endif
}
else if (!GlobalTransactionIdIsValid(next_gxid))
next_gxid = InitialGXIDValue_Default;
diff --git a/src/gtm/main/main.c b/src/gtm/main/main.c
index c55a57363e..a995fe9beb 100644
--- a/src/gtm/main/main.c
+++ b/src/gtm/main/main.c
@@ -78,11 +78,9 @@ int tcp_keepalives_count;
char *error_reporter;
char *status_reader;
bool isStartUp;
-#ifdef XCP
GTM_MutexLock control_lock;
char GTMControlFileTmp[GTM_MAX_PATH];
#define GTM_CONTROL_FILE_TMP "gtm.control.tmp"
-#endif
/* If this is GTM or not */
/*
@@ -209,9 +207,7 @@ InitGTMProcess()
fflush(stdout);
fflush(stderr);
}
-#ifdef XCP
GTM_MutexLockInit(&control_lock);
-#endif
}
static void
@@ -223,9 +219,7 @@ BaseInit()
CreateDataDirLockFile();
sprintf(GTMControlFile, "%s/%s", GTMDataDir, GTM_CONTROL_FILE);
-#ifdef XCP
sprintf(GTMControlFileTmp, "%s/%s", GTMDataDir, GTM_CONTROL_FILE_TMP);
-#endif
if (GTMLogFile == NULL)
{
GTMLogFile = (char *) malloc(GTM_MAX_PATH);
@@ -314,7 +308,6 @@ gtm_status()
exit(0);
}
-#ifdef XCP
/*
* Save control file info
*/
@@ -344,7 +337,6 @@ SaveControlInfo(void)
GTM_MutexLockRelease(&control_lock);
}
-#endif
int
main(int argc, char *argv[])
@@ -658,9 +650,7 @@ main(int argc, char *argv[])
}
else
{
-#ifdef XCP
GTM_MutexLockAcquire(&control_lock);
-#endif
ctlf = fopen(GTMControlFile, "r");
GTM_RestoreTxnInfo(ctlf, next_gxid);
@@ -668,9 +658,7 @@ main(int argc, char *argv[])
if (ctlf)
fclose(ctlf);
-#ifdef XCP
GTM_MutexLockRelease(&control_lock);
-#endif
}
if (Recovery_IsStandby())
@@ -694,11 +682,7 @@ main(int argc, char *argv[])
elog(LOG, "Restoring node information from the active-GTM succeeded.");
}
else
- {
- /* Recover Data of Registered nodes. */
- Recovery_RestoreRegisterInfo();
elog(LOG, "Started to run as GTM-Active.");
- }
/*
* Establish input sockets.
@@ -871,10 +855,6 @@ ServerLoop(void)
if (GTMAbortPending)
{
-#ifndef XCP
- FILE *ctlf;
-#endif
-
/*
* XXX We should do a clean shutdown here. For the time being, just
* write the next GXID to be issued in the control file and exit
@@ -888,19 +868,7 @@ ServerLoop(void)
*/
GTM_SetShuttingDown();
-#ifdef XCP
SaveControlInfo();
-#else
- ctlf = fopen(GTMControlFile, "w");
- if (ctlf == NULL)
- {
- fprintf(stderr, "Failed to create/open the control file\n");
- exit(2);
- }
-
- GTM_SaveTxnInfo(ctlf);
- GTM_SaveSeqInfo(ctlf);
-#endif
#if 0
/*
@@ -915,11 +883,6 @@ ServerLoop(void)
gtm_standby_finishActiveConn();
}
#endif
-
-#ifndef XCP
- fclose(ctlf);
-#endif
-
exit(1);
}
@@ -1336,9 +1299,7 @@ ProcessCommand(Port *myport, StringInfo input_message)
case MSG_NODE_UNREGISTER:
case MSG_BKUP_NODE_UNREGISTER:
case MSG_NODE_LIST:
-#ifdef XCP
case MSG_REGISTER_SESSION:
-#endif
ProcessPGXCNodeCommand(myport, mtype, input_message);
break;
case MSG_BEGIN_BACKUP:
@@ -1572,11 +1533,9 @@ ProcessPGXCNodeCommand(Port *myport, GTM_MessageType mtype, StringInfo message)
ProcessPGXCNodeList(myport, message);
break;
-#ifdef XCP
case MSG_REGISTER_SESSION:
ProcessPGXCRegisterSession(myport, message);
break;
-#endif
default:
Assert(0); /* Shouldn't come here.. keep compiler quite */
diff --git a/src/gtm/proxy/proxy_main.c b/src/gtm/proxy/proxy_main.c
index a8f871d780..041c5be392 100644
--- a/src/gtm/proxy/proxy_main.c
+++ b/src/gtm/proxy/proxy_main.c
@@ -812,9 +812,6 @@ main(int argc, char *argv[])
elog(LOG, "Starting GTM proxy at (%s:%d)", ListenAddresses, GTMProxyPortNumber);
- /* Recover Data of Registered nodes. */
- Recovery_RestoreRegisterInfo();
-
/*
* Establish input sockets.
*/
@@ -1597,9 +1594,7 @@ ProcessCommand(GTMProxy_ConnectionInfo *conninfo, GTM_Conn *gtm_conn,
case MSG_SEQUENCE_ALTER:
case MSG_BARRIER:
case MSG_TXN_COMMIT:
-#ifdef XCP
case MSG_REGISTER_SESSION:
-#endif
GTMProxy_ProxyCommand(conninfo, gtm_conn, mtype, input_message);
break;
@@ -1744,9 +1739,7 @@ IsProxiedMessage(GTM_MessageType mtype)
case MSG_TXN_GET_GID_DATA:
case MSG_NODE_REGISTER:
case MSG_NODE_UNREGISTER:
-#ifdef XCP
case MSG_REGISTER_SESSION:
-#endif
case MSG_SNAPSHOT_GXID_GET:
case MSG_SEQUENCE_INIT:
case MSG_SEQUENCE_GET_CURRENT:
@@ -1949,9 +1942,7 @@ ProcessResponse(GTMProxy_ThreadInfo *thrinfo, GTMProxy_CommandInfo *cmdinfo,
case MSG_TXN_GET_GID_DATA:
case MSG_NODE_REGISTER:
case MSG_NODE_UNREGISTER:
-#ifdef XCP
case MSG_REGISTER_SESSION:
-#endif
case MSG_SNAPSHOT_GXID_GET:
case MSG_SEQUENCE_INIT:
case MSG_SEQUENCE_GET_CURRENT:
@@ -2256,7 +2247,6 @@ ProcessPGXCNodeCommand(GTMProxy_ConnectionInfo *conninfo, GTM_Conn *gtm_conn,
/* Unregistering has to be saved in a place where it can be seen by all the threads */
oldContext = MemoryContextSwitchTo(TopMostMemoryContext);
-#ifdef XCP
/*
* Unregister node. Ignore any error here, otherwise we enter
* endless loop trying to execute command again and again
@@ -2265,18 +2255,6 @@ ProcessPGXCNodeCommand(GTMProxy_ConnectionInfo *conninfo, GTM_Conn *gtm_conn,
cmd_data.cd_reg.nodename,
false,
conninfo->con_port->sock);
-#else
- /* Unregister Node also on Proxy */
- if (Recovery_PGXCNodeUnregister(cmd_data.cd_reg.type,
- cmd_data.cd_reg.nodename,
- false,
- conninfo->con_port->sock))
- {
- ereport(ERROR,
- (EINVAL,
- errmsg("Failed to Unregister node")));
- }
-#endif
MemoryContextSwitchTo(oldContext);
GTMProxy_ProxyPGXCNodeCommand(conninfo, gtm_conn, mtype, cmd_data);
@@ -2506,9 +2484,7 @@ GTMProxy_CommandPending(GTMProxy_ConnectionInfo *conninfo, GTM_MessageType mtype
GTMProxy_CommandInfo *cmdinfo;
GTMProxy_ThreadInfo *thrinfo = GetMyThreadInfo;
-#ifdef XCP
MemoryContext oldContext = MemoryContextSwitchTo(TopMemoryContext);
-#endif
/*
* Add the message to the pending command list
@@ -2520,9 +2496,7 @@ GTMProxy_CommandPending(GTMProxy_ConnectionInfo *conninfo, GTM_MessageType mtype
cmdinfo->ci_data = cmd_data;
thrinfo->thr_pending_commands[mtype] = gtm_lappend(thrinfo->thr_pending_commands[mtype], cmdinfo);
-#ifdef XCP
MemoryContextSwitchTo(oldContext);
-#endif
return;
}
diff --git a/src/gtm/recovery/register_common.c b/src/gtm/recovery/register_common.c
index 8f4f05042b..50c0ee3538 100644
--- a/src/gtm/recovery/register_common.c
+++ b/src/gtm/recovery/register_common.c
@@ -357,18 +357,15 @@ Recovery_PGXCNodeUnregister(GTM_PGXCNodeType type, char *node_name, bool in_reco
Recovery_RecordRegisterInfo(nodeinfo, false);
pfree(nodeinfo->nodename);
-#ifdef XCP
if (nodeinfo->ipaddress)
-#endif
- pfree(nodeinfo->ipaddress);
-#ifdef XCP
+ pfree(nodeinfo->ipaddress);
+
if (nodeinfo->datafolder)
-#endif
- pfree(nodeinfo->datafolder);
-#ifdef XCP
+ pfree(nodeinfo->datafolder);
+
if (nodeinfo->sessions)
pfree(nodeinfo->sessions);
-#endif
+
pfree(nodeinfo);
}
else
@@ -391,11 +388,7 @@ Recovery_PGXCNodeRegister(GTM_PGXCNodeType type,
GTM_PGXCNodeInfo *nodeinfo = NULL;
int errcode = 0;
-#ifdef XCP
nodeinfo = (GTM_PGXCNodeInfo *) palloc0(sizeof(GTM_PGXCNodeInfo));
-#else
- nodeinfo = (GTM_PGXCNodeInfo *) palloc(sizeof (GTM_PGXCNodeInfo));
-#endif
if (nodeinfo == NULL)
ereport(ERROR, (ENOMEM, errmsg("Out of memory")));
@@ -653,81 +646,6 @@ Recovery_RecordRegisterInfo(GTM_PGXCNodeInfo *nodeinfo, bool is_register)
}
void
-Recovery_RestoreRegisterInfo(void)
-{
-#ifndef XCP
- int magic;
- int ctlfd;
-
- /* This is made when GTM/Proxy restarts, so it is not necessary to take a lock */
- ctlfd = open(GTMPGXCNodeFile, O_RDONLY);
-
- if (ctlfd == -1)
- return;
-
- while (read(ctlfd, &magic, sizeof (NodeRegisterMagic)) == sizeof (NodeRegisterMagic))
- {
- GTM_PGXCNodeType type;
- GTM_PGXCNodePort port;
- GTM_PGXCNodeStatus status;
- char *ipaddress, *datafolder, *nodename, *proxyname;
- int len;
-
- if (magic != NodeRegisterMagic && magic != NodeUnregisterMagic)
- {
- elog(WARNING, "Start magic mismatch %x", magic);
- break;
- }
-
- read(ctlfd, &type, sizeof (GTM_PGXCNodeType));
- /* Read size of nodename string */
- read(ctlfd, &len, sizeof (uint32));
- nodename = (char *) palloc(len);
- read(ctlfd, nodename, len);
-
- if (magic == NodeRegisterMagic)
- {
- read(ctlfd, &port, sizeof (GTM_PGXCNodePort));
-
- /* Read size of proxyname string */
- read(ctlfd, &len, sizeof (uint32));
- proxyname = (char *) palloc(len);
- read(ctlfd, proxyname, len);
-
- read(ctlfd, &status, sizeof (GTM_PGXCNodeStatus));
-
- /* Read size of ipaddress string */
- read(ctlfd, &len, sizeof (uint32));
- ipaddress = (char *) palloc(len);
- read(ctlfd, ipaddress, len);
-
- /* Read size of datafolder string */
- read(ctlfd, &len, sizeof (uint32));
- datafolder = (char *) palloc(len);
- read(ctlfd, datafolder, len);
- }
-
- /* Rebuild based on the records */
- if (magic == NodeRegisterMagic)
- Recovery_PGXCNodeRegister(type, nodename, port, proxyname, status,
- ipaddress, datafolder, true, 0);
- else
- Recovery_PGXCNodeUnregister(type, nodename, true, 0);
-
- read(ctlfd, &magic, sizeof(NodeEndMagic));
-
- if (magic != NodeEndMagic)
- {
- elog(WARNING, "Corrupted control file");
- return;
- }
- }
-
- close(ctlfd);
-#endif
-}
-
-void
Recovery_SaveRegisterFileName(char *dir)
{
if (!dir)
@@ -809,8 +727,6 @@ Recovery_PGXCNodeBackendDisconnect(GTM_PGXCNodeType type, char *nodename, int so
return errcode;
}
-
-#ifdef XCP
/*
* Register active distributed session. If another session with specified
* BackendId already exists return the PID of the session, so caller could clean
@@ -893,8 +809,6 @@ Recovery_PGXCNodeRegisterCoordProcess(char *coord_node, int coord_procid,
return 0;
}
-#endif
-
/*
* Process MSG_BACKEND_DISCONNECT
diff --git a/src/gtm/recovery/register_gtm.c b/src/gtm/recovery/register_gtm.c
index 72ebf92e9a..371b4c97f8 100644
--- a/src/gtm/recovery/register_gtm.c
+++ b/src/gtm/recovery/register_gtm.c
@@ -491,8 +491,6 @@ finishStandbyConn(GTM_ThreadInfo *thrinfo)
}
}
-
-#ifdef XCP
/*
* Process MSG_REGISTER_SESSION message
*/
@@ -592,4 +590,3 @@ ProcessPGXCRegisterSession(Port *myport, StringInfo message)
pq_flush(myport);
}
}
-#endif
diff --git a/src/include/access/gtm.h b/src/include/access/gtm.h
index 9a376d3bdc..548441ce65 100644
--- a/src/include/access/gtm.h
+++ b/src/include/access/gtm.h
@@ -17,9 +17,7 @@ extern char *GtmHost;
extern int GtmPort;
extern bool gtm_backup_barrier;
-#ifdef XCP
extern bool IsXidFromGTM;
-#endif
extern GlobalTransactionId currentGxid;
extern bool IsGTMConnected(void);
@@ -51,12 +49,8 @@ extern int UnregisterGTM(GTM_PGXCNodeType type);
/* Sequence interface APIs with GTM */
extern GTM_Sequence GetCurrentValGTM(char *seqname);
-#ifdef XCP
extern GTM_Sequence GetNextValGTM(char *seqname,
GTM_Sequence range, GTM_Sequence *rangemax);
-#else
-extern GTM_Sequence GetNextValGTM(char *seqname);
-#endif
extern int SetValGTM(char *seqname, GTM_Sequence nextval, bool iscalled);
extern int CreateSequenceGTM(char *seqname, GTM_Sequence increment,
GTM_Sequence minval, GTM_Sequence maxval, GTM_Sequence startval,
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
index 98cfdd94e3..caa9d37137 100644
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -332,10 +332,6 @@ extern TransactionId GetTopTransactionIdIfAny(void);
extern TransactionId GetCurrentTransactionId(void);
extern TransactionId GetCurrentTransactionIdIfAny(void);
#ifdef PGXC /* PGXC_COORD */
-#ifndef XCP
-extern bool GetCurrentLocalParamStatus(void);
-extern void SetCurrentLocalParamStatus(bool status);
-#endif
extern GlobalTransactionId GetAuxilliaryTransactionId(void);
extern GlobalTransactionId GetTopGlobalTransactionId(void);
extern void SetAuxilliaryTransactionId(GlobalTransactionId gxid);
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index 5e1d51e13a..6520cc4653 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -124,16 +124,8 @@ typedef struct TupleTableSlot
bool tts_slow; /* saved state for slot_deform_tuple */
HeapTuple tts_tuple; /* physical tuple, or NULL if virtual */
#ifdef PGXC
-#ifdef XCP
RemoteDataRow tts_datarow; /* Tuple data in DataRow format */
MemoryContext tts_drowcxt; /* Context to store deformed */
-#else
- /*
- * PGXC extension to support tuples sent from remote Datanode.
- */
- char *tts_dataRow; /* Tuple data in DataRow format */
- int tts_dataLen; /* Actual length of the data row */
-#endif
bool tts_shouldFreeRow; /* should pfree tts_dataRow? */
struct AttInMetadata *tts_attinmeta; /* store here info to extract values from the DataRow */
#endif
@@ -172,16 +164,9 @@ extern TupleTableSlot *ExecStoreMinimalTuple(MinimalTuple mtup,
TupleTableSlot *slot,
bool shouldFree);
#ifdef PGXC
-#ifdef XCP
extern TupleTableSlot *ExecStoreDataRowTuple(RemoteDataRow datarow,
TupleTableSlot *slot,
bool shouldFree);
-#else
-extern TupleTableSlot *ExecStoreDataRowTuple(char *msg,
- size_t len,
- TupleTableSlot *slot,
- bool shouldFree);
-#endif
#endif
extern TupleTableSlot *ExecClearTuple(TupleTableSlot *slot);
extern TupleTableSlot *ExecStoreVirtualTuple(TupleTableSlot *slot);
@@ -189,12 +174,8 @@ extern TupleTableSlot *ExecStoreAllNullTuple(TupleTableSlot *slot);
extern HeapTuple ExecCopySlotTuple(TupleTableSlot *slot);
extern MinimalTuple ExecCopySlotMinimalTuple(TupleTableSlot *slot);
#ifdef PGXC
-#ifdef XCP
extern RemoteDataRow ExecCopySlotDatarow(TupleTableSlot *slot,
MemoryContext tmpcxt);
-#else
-extern int ExecCopySlotDatarow(TupleTableSlot *slot, char **datarow);
-#endif
#endif
extern HeapTuple ExecFetchSlotTuple(TupleTableSlot *slot);
extern MinimalTuple ExecFetchSlotMinimalTuple(TupleTableSlot *slot);
diff --git a/src/include/gtm/gtm_client.h b/src/include/gtm/gtm_client.h
index 7a6c61d811..b21f253143 100644
--- a/src/include/gtm/gtm_client.h
+++ b/src/include/gtm/gtm_client.h
@@ -63,9 +63,7 @@ typedef union GTM_ResultData
{
GTM_SequenceKeyData seqkey;
GTM_Sequence seqval;
-#ifdef XCP
GTM_Sequence rangemax;
-#endif
} grd_seq; /* SEQUENCE_GET_CURRENT
* SEQUENCE_GET_NEXT */
struct
@@ -271,10 +269,8 @@ int node_unregister(GTM_Conn *conn, GTM_PGXCNodeType type, const char *node_name
int bkup_node_unregister(GTM_Conn *conn, GTM_PGXCNodeType type, const char * node_name);
int backend_disconnect(GTM_Conn *conn, bool is_postmaster, GTM_PGXCNodeType type, char *node_name);
char *node_get_local_addr(GTM_Conn *conn, char *buf, size_t buflen, int *rc);
-#ifdef XCP
int register_session(GTM_Conn *conn, const char *coord_name, int coord_procid,
int coord_backendid);
-#endif
/*
* Sequence Management API
@@ -295,7 +291,6 @@ int close_sequence(GTM_Conn *conn, GTM_SequenceKey key);
int bkup_close_sequence(GTM_Conn *conn, GTM_SequenceKey key);
int rename_sequence(GTM_Conn *conn, GTM_SequenceKey key, GTM_SequenceKey newkey);
int bkup_rename_sequence(GTM_Conn *conn, GTM_SequenceKey key, GTM_SequenceKey newkey);
-#ifdef XCP
int get_current(GTM_Conn *conn, GTM_SequenceKey key,
char *coord_name, int coord_procid, GTM_Sequence *result);
int get_next(GTM_Conn *conn, GTM_SequenceKey key,
@@ -308,13 +303,6 @@ int set_val(GTM_Conn *conn, GTM_SequenceKey key, char *coord_name,
int coord_procid, GTM_Sequence nextval, bool iscalled);
int bkup_set_val(GTM_Conn *conn, GTM_SequenceKey key, char *coord_name,
int coord_procid, GTM_Sequence nextval, bool iscalled);
-#else
-GTM_Sequence get_current(GTM_Conn *conn, GTM_SequenceKey key);
-GTM_Sequence get_next(GTM_Conn *conn, GTM_SequenceKey key);
-GTM_Sequence bkup_get_next(GTM_Conn *conn, GTM_SequenceKey key);
-int set_val(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool is_called);
-int bkup_set_val(GTM_Conn *conn, GTM_SequenceKey key, GTM_Sequence nextval, bool is_called);
-#endif
int reset_sequence(GTM_Conn *conn, GTM_SequenceKey key);
int bkup_reset_sequence(GTM_Conn *conn, GTM_SequenceKey key);
diff --git a/src/include/gtm/gtm_msg.h b/src/include/gtm/gtm_msg.h
index 6bf56ad972..f4854cbf5d 100644
--- a/src/include/gtm/gtm_msg.h
+++ b/src/include/gtm/gtm_msg.h
@@ -32,9 +32,7 @@ typedef enum GTM_MessageType
MSG_BKUP_NODE_REGISTER, /* Backup of MSG_NODE_REGISTER */
MSG_NODE_UNREGISTER, /* Unregister a PGXC Node with GTM */
MSG_BKUP_NODE_UNREGISTER, /* Backup of MSG_NODE_UNREGISTER */
-#ifdef XCP
MSG_REGISTER_SESSION, /* Register distributed session with GTM */
-#endif
MSG_NODE_LIST, /* Get node list */
MSG_NODE_BEGIN_REPLICATION_INIT,
MSG_NODE_END_REPLICATION_INIT,
@@ -110,9 +108,7 @@ typedef enum GTM_ResultType
SYNC_STANDBY_RESULT,
NODE_REGISTER_RESULT,
NODE_UNREGISTER_RESULT,
-#ifdef XCP
REGISTER_SESSION_RESULT,
-#endif
NODE_LIST_RESULT,
NODE_BEGIN_REPLICATION_INIT_RESULT,
NODE_END_REPLICATION_INIT_RESULT,
diff --git a/src/include/gtm/gtm_seq.h b/src/include/gtm/gtm_seq.h
index 10ba3d83c0..75de008b8f 100644
--- a/src/include/gtm/gtm_seq.h
+++ b/src/include/gtm/gtm_seq.h
@@ -25,16 +25,12 @@
/* Global sequence related structures */
-
-#ifdef XCP
typedef struct GTM_SeqLastVal
{
char gs_coord_name[SP_NODE_NAME];
int32 gs_coord_procid;
GTM_Sequence gs_last_value;
} GTM_SeqLastVal;
-#endif
-
typedef struct GTM_SeqInfo
{
@@ -42,13 +38,9 @@ typedef struct GTM_SeqInfo
GTM_Sequence gs_value;
GTM_Sequence gs_backedUpValue;
GTM_Sequence gs_init_value;
-#ifdef XCP
int32 gs_max_lastvals;
int32 gs_lastval_count;
GTM_SeqLastVal *gs_last_values;
-#else
- GTM_Sequence gs_last_value;
-#endif
GTM_Sequence gs_increment_by;
GTM_Sequence gs_min_value;
GTM_Sequence gs_max_value;
@@ -93,7 +85,6 @@ int GTM_SeqAlter(GTM_SequenceKey seqkey,
bool is_restart);
int GTM_SeqClose(GTM_SequenceKey seqkey);
int GTM_SeqRename(GTM_SequenceKey seqkey, GTM_SequenceKey newseqkey);
-#ifdef XCP
int GTM_SeqGetNext(GTM_SequenceKey seqkey, char *coord_name,
int coord_procid, GTM_Sequence range,
GTM_Sequence *result, GTM_Sequence *rangemax);
@@ -101,11 +92,6 @@ void GTM_SeqGetCurrent(GTM_SequenceKey seqkey, char *coord_name,
int coord_procid, GTM_Sequence *result);
int GTM_SeqSetVal(GTM_SequenceKey seqkey, char *coord_name,
int coord_procid, GTM_Sequence nextval, bool iscalled);
-#else
-GTM_Sequence GTM_SeqGetNext(GTM_SequenceKey seqkey);
-GTM_Sequence GTM_SeqGetCurrent(GTM_SequenceKey seqkey);
-int GTM_SeqSetVal(GTM_SequenceKey seqkey, GTM_Sequence nextval, bool iscalled);
-#endif
int GTM_SeqReset(GTM_SequenceKey seqkey);
void ProcessSequenceInitCommand(Port *myport, StringInfo message, bool is_backup);
@@ -131,9 +117,7 @@ int GTM_SeqRestore(GTM_SequenceKey seqkey,
bool cycle,
bool called);
-#ifdef XCP
void GTM_CleanupSeqSession(char *coord_name, int coord_procid);
-#endif
bool GTM_NeedSeqRestoreUpdate(GTM_SequenceKey seqkey);
void GTM_WriteRestorePointSeq(FILE *f);
diff --git a/src/include/gtm/gtm_serialize.h b/src/include/gtm/gtm_serialize.h
index 8d4077500e..4f8cecdf5c 100644
--- a/src/include/gtm/gtm_serialize.h
+++ b/src/include/gtm/gtm_serialize.h
@@ -36,11 +36,7 @@ size_t gtm_deserialize_transactions(GTM_Transactions *, const char *, size_t);
size_t gtm_get_pgxcnodeinfo_size(GTM_PGXCNodeInfo *);
size_t gtm_serialize_pgxcnodeinfo(GTM_PGXCNodeInfo *, char *, size_t);
-#ifdef XCP
size_t gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *, const char *, size_t, PQExpBuffer);
-#else
-size_t gtm_deserialize_pgxcnodeinfo(GTM_PGXCNodeInfo *, const char *, size_t);
-#endif
size_t gtm_get_sequence_size(GTM_SeqInfo *);
size_t gtm_serialize_sequence(GTM_SeqInfo *, char *, size_t);
diff --git a/src/include/gtm/register.h b/src/include/gtm/register.h
index b9cc089952..8fbaac19c4 100644
--- a/src/include/gtm/register.h
+++ b/src/include/gtm/register.h
@@ -43,13 +43,11 @@ typedef enum GTM_PGXCNodeStatus
NODE_DISCONNECTED
} GTM_PGXCNodeStatus;
-#ifdef XCP
typedef struct GTM_PGXCSession
{
int gps_coord_proc_id;
int gps_coord_backend_id;
} GTM_PGXCSession;
-#endif
typedef struct GTM_PGXCNodeInfo
{
@@ -60,11 +58,9 @@ typedef struct GTM_PGXCNodeInfo
char *ipaddress; /* IP address of the nodes */
char *datafolder; /* Data folder of the node */
GTM_PGXCNodeStatus status; /* Node status */
-#ifdef XCP
int max_sessions;
int num_sessions;
GTM_PGXCSession *sessions;
-#endif
GTM_RWLock node_lock; /* Lock on this structure */
int socket; /* socket number used for registration */
} GTM_PGXCNodeInfo;
@@ -92,15 +88,12 @@ int Recovery_PGXCNodeUnregister(GTM_PGXCNodeType type,
int Recovery_PGXCNodeBackendDisconnect(GTM_PGXCNodeType type, char *nodename, int socket);
void Recovery_RecordRegisterInfo(GTM_PGXCNodeInfo *nodeinfo, bool is_register);
-void Recovery_RestoreRegisterInfo(void);
void Recovery_SaveRegisterInfo(void);
void Recovery_PGXCNodeDisconnect(Port *myport);
void Recovery_SaveRegisterFileName(char *dir);
-#ifdef XCP
int Recovery_PGXCNodeRegisterCoordProcess(char *coord_node, int coord_procid,
int coord_backendid);
void ProcessPGXCRegisterSession(Port *myport, StringInfo message);
-#endif
void ProcessPGXCNodeRegister(Port *myport, StringInfo message, bool is_backup);
void ProcessPGXCNodeUnregister(Port *myport, StringInfo message, bool is_backup);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 6c05c33d1a..01c683e8de 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -28,9 +28,7 @@
#include "utils/reltrigger.h"
#include "utils/sortsupport.h"
#include "utils/tuplestore.h"
-#ifdef XCP
#include "pgxc/squeue.h"
-#endif
#include "utils/tuplesort.h"
@@ -1870,11 +1868,6 @@ typedef struct AggState
List *hash_needed; /* list of columns needed in hash table */
bool table_filled; /* hash table filled yet? */
TupleHashIterator hashiter; /* for iterating through hash table */
-#ifdef PGXC
-#ifndef XCP
- bool skip_trans; /* skip the transition step for aggregates */
-#endif /* XCP */
-#endif /* PGXC */
} AggState;
/* ----------------
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index b5cb749336..99ab49dd1c 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -168,19 +168,6 @@ typedef struct Query
List *constraintDeps; /* a list of pg_constraint OIDs that the query
* depends on to be semantically valid */
-#ifdef PGXC
-#ifndef XCP
- /* need this info for PGXC Planner, may be temporary */
- char *sql_statement; /* original query */
- bool qry_finalise_aggs; /* used for queries intended for Datanodes,
- * should Datanode finalise the aggregates? */
- bool is_local; /* enforce query execution on local node
- * this is used by EXECUTE DIRECT especially. */
- bool is_ins_child_sel_parent;/* true if the query is such an INSERT SELECT that
- * inserts into a child by selecting from its parent */
- bool recursiveOK; /* does query support WITH RECURSIVE */
-#endif
-#endif
} Query;
@@ -831,12 +818,6 @@ typedef struct RangeTblEntry
* code that is being actively worked on. FIXME someday.
*/
-#ifdef PGXC
-#ifndef XCP
- char *relname;
-#endif
-#endif
-
/*
* Fields valid for a plain relation RTE (else zero):
*/
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index a2b04518a2..009e49e55c 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -200,11 +200,6 @@ typedef struct ModifyTable
List *fdwPrivLists; /* per-target-table FDW private data lists */
List *rowMarks; /* PlanRowMarks (non-locking only) */
int epqParam; /* ID of Param for EvalPlanQual re-eval */
-#ifdef PGXC
-#ifndef XCP
- List *remote_plans; /* per-target-table remote node */
-#endif
-#endif
OnConflictAction onConflictAction; /* ON CONFLICT action */
List *arbiterIndexes; /* List of ON CONFLICT arbiter index OIDs */
List *onConflictSet; /* SET for INSERT ON CONFLICT DO UPDATE */
@@ -746,13 +741,6 @@ typedef struct Agg
AttrNumber *grpColIdx; /* their indexes in the target list */
Oid *grpOperators; /* equality operators to compare with */
long numGroups; /* estimated number of groups in input */
-#ifdef PGXC
-#ifndef XCP
- bool skip_trans; /* apply collection directly on the data received
- * from remote Datanodes
- */
-#endif /* XCP */
-#endif /* PGXC */
List *groupingSets; /* grouping sets to use */
List *chain; /* chained Agg/Sort nodes */
} Agg;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index ffd05bb64f..4338a16d54 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -268,12 +268,6 @@ typedef struct Aggref
Oid aggtype; /* type Oid of result of the aggregate */
Oid aggcollid; /* OID of collation of result */
Oid inputcollid; /* OID of collation that function should use */
-#ifdef PGXC
-#ifndef XCP
- Oid aggtrantype; /* type Oid of transition results */
- bool agghas_collectfn; /* is collection function available */
-#endif /* XCP */
-#endif /* PGXC */
List *aggdirectargs; /* direct arguments, if an ordered-set agg */
List *args; /* aggregated arguments and sort expressions */
List *aggorder; /* ORDER BY (list of SortGroupClause) */
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index f7df205fe1..5cf9a3b843 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -274,23 +274,6 @@ typedef struct PlannerInfo
* pseudoconstant = true */
bool hasRecursion; /* true if planning a recursive WITH item */
-#ifdef PGXC
-#ifndef XCP
- /* This field is used only when RemoteScan nodes are involved */
- int rs_alias_index; /* used to build the alias reference */
-
- /*
- * In Postgres-XC Coordinators are supposed to skip the handling of
- * row marks of type ROW_MARK_EXCLUSIVE & ROW_MARK_SHARE.
- * In order to do that we simply remove such type
- * of row marks from the list rowMarks. Instead they are saved
- * in xc_rowMarks list that is then handeled to add
- * FOR UPDATE/SHARE in the remote query
- */
- List *xc_rowMarks; /* list of PlanRowMarks of type ROW_MARK_EXCLUSIVE & ROW_MARK_SHARE */
-#endif
-#endif
-
/* These fields are used only when hasRecursion is true: */
int wt_param_id; /* PARAM_EXEC ID for the work table */
struct Plan *non_recursive_plan; /* plan for non-recursive term */
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 3119b71cd4..bdadb3f2cf 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -95,11 +95,6 @@ extern ForeignPath *create_foreignscan_path(PlannerInfo *root, RelOptInfo *rel,
List *pathkeys,
Relids required_outer,
List *fdw_private);
-#ifdef PGXC
-#ifndef XCP
-extern Path *create_remotequery_path(PlannerInfo *root, RelOptInfo *rel);
-#endif
-#endif
extern Relids calc_nestloop_required_outer(Path *outer_path, Path *inner_path);
extern Relids calc_non_nestloop_required_outer(Path *outer_path, Path *inner_path);
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index 2deb4cf98f..fc50375703 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -150,7 +150,6 @@ extern void extract_query_dependencies(Node *query,
bool *hasRowSecurity);
#ifdef PGXC
-#ifdef XCP
extern RemoteSubplan *find_push_down_plan(Plan *plan, bool force);
extern RemoteSubplan *find_delete_push_down_plan(PlannerInfo *root, Plan *plan,
bool force, Plan **parent);
@@ -159,15 +158,6 @@ extern RemoteSubplan *make_remotesubplan(PlannerInfo *root,
Distribution *resultDistribution,
Distribution *execDistribution,
List *pathkeys);
-#else
-extern Var *search_tlist_for_var(Var *var, List *jtlist);
-extern Plan *create_remoteinsert_plan(PlannerInfo *root, Plan *topplan);
-extern Plan *create_remoteupdate_plan(PlannerInfo *root, Plan *topplan);
-extern Plan *create_remotedelete_plan(PlannerInfo *root, Plan *topplan);
-extern Plan *create_remotegrouping_plan(PlannerInfo *root, Plan *local_plan);
-/* Expose fix_scan_expr to create_remotequery_plan() */
-extern Node *pgxc_fix_scan_expr(PlannerInfo *root, Node *node, int rtoffset);
-#endif /* XCP */
#endif /* PGXC */
#endif /* PLANMAIN_H */
diff --git a/src/include/pgxc/execRemote.h b/src/include/pgxc/execRemote.h
index 87fc6fdedc..2a26394144 100644
--- a/src/include/pgxc/execRemote.h
+++ b/src/include/pgxc/execRemote.h
@@ -59,11 +59,8 @@ typedef enum
REQUEST_TYPE_COMMAND, /* OK or row count response */
REQUEST_TYPE_QUERY, /* Row description response */
REQUEST_TYPE_COPY_IN, /* Copy In response */
- REQUEST_TYPE_COPY_OUT /* Copy Out response */
-#ifdef XCP
- ,
+ REQUEST_TYPE_COPY_OUT, /* Copy Out response */
REQUEST_TYPE_ERROR /* Error, ignore responses */
-#endif
} RequestType;
/*
@@ -84,32 +81,12 @@ typedef struct CombineTag
char data[COMPLETION_TAG_BUFSIZE]; /* execution result combination data */
} CombineTag;
-
-#ifndef XCP
-/*
- * Represents a DataRow message received from a remote node.
- * Contains originating node number and message body in DataRow format without
- * message code and length. Length is separate field
- */
-typedef struct RemoteDataRowData
-{
- char *msg; /* last data row message */
- int msglen; /* length of the data row message */
- int msgnode; /* node number of the data row message */
-} RemoteDataRowData;
-typedef RemoteDataRowData *RemoteDataRow;
-#endif
-
-#ifdef XCP
/*
* Common part for all plan state nodes needed to access remote datanodes
* ResponseCombiner must be the first field of the plan state node so we can
* typecast
*/
typedef struct ResponseCombiner
-#else
-typedef struct RemoteQueryState
-#endif
{
ScanState ss; /* its first field is NodeTag */
int node_count; /* total count of participating nodes */
@@ -129,16 +106,11 @@ typedef struct RemoteQueryState
char *errorMessage; /* error message to send back to client */
char *errorDetail; /* error detail to send back to client */
char *errorHint; /* error hint to send back to client */
-#ifdef XCP
Oid returning_node; /* returning replicated node */
RemoteDataRow currentRow; /* next data ro to be wrapped into a tuple */
-#else
- RemoteDataRowData currentRow; /* next data ro to be wrapped into a tuple */
-#endif
/* TODO use a tuplestore as a rowbuffer */
List *rowBuffer; /* buffer where rows are stored when connection
* should be cleaned for reuse by other RemoteQuery */
-#ifdef XCP
/*
* To handle special case - if there is a simple sort and sort connection
* is buffered. If EOF is reached on a connection it should be removed from
@@ -156,16 +128,6 @@ typedef struct RemoteQueryState
bool merge_sort; /* perform mergesort of node tuples */
bool extended_query; /* running extended query protocol */
bool probing_primary; /* trying replicated on primary node */
-#else
- /*
- * To handle special case - if there is a simple sort and sort connection
- * is buffered. If EOF is reached on a connection it should be removed from
- * the array, but we need to know node number of the connection to find
- * messages in the buffer. So we store nodenum to that array if reach EOF
- * when buffering
- */
- int *tapenodes;
-#endif
void *tuplesortstate; /* for merge sort */
/* COPY support */
RemoteCopyType remoteCopyType;
@@ -175,13 +137,11 @@ typedef struct RemoteQueryState
char *update_cursor; /* throw this cursor current tuple can be updated */
int cursor_count; /* total count of participating nodes */
PGXCNodeHandle **cursor_connections;/* data node connections being combined */
-#ifdef XCP
} ResponseCombiner;
typedef struct RemoteQueryState
{
ResponseCombiner combiner; /* see ResponseCombiner struct */
-#endif
bool query_Done; /* query has been sent down to Datanodes */
/*
* While we are not supporting grouping use this flag to indicate we need
@@ -197,13 +157,8 @@ typedef struct RemoteQueryState
int eflags; /* capability flags to pass to tuplestore */
bool eof_underlying; /* reached end of underlying plan? */
-#ifndef XCP
- CommandId rqs_cmd_id; /* Cmd id to use in some special cases */
-#endif
} RemoteQueryState;
-
-#ifdef XCP
typedef struct RemoteParam
{
ParamKind paramkind; /* kind of parameter */
@@ -270,24 +225,10 @@ typedef struct RemoteStmt
List *distributionRestrict;
} RemoteStmt;
-#endif
typedef void (*xact_callback) (bool isCommit, void *args);
-#ifndef XCP
-/* Multinode Executor */
-extern void PGXCNodeBegin(void);
-extern void PGXCNodeSetBeginQuery(char *query_string);
-extern void PGXCNodeCommit(bool bReleaseHandles);
-extern int PGXCNodeRollback(void);
-extern bool PGXCNodePrepare(char *gid);
-extern bool PGXCNodeRollbackPrepared(char *gid);
-extern void PGXCNodeCommitPrepared(char *gid);
-#endif
-
-
/* Copy command just involves Datanodes */
-#ifdef XCP
extern void DataNodeCopyBegin(RemoteCopyData *rcstate);
extern int DataNodeCopyIn(char *data_row, int len, int conn_count,
PGXCNodeHandle** copy_connections);
@@ -298,43 +239,25 @@ extern uint64 DataNodeCopyStore(PGXCNodeHandle** copy_connections,
extern void DataNodeCopyFinish(int conn_count, PGXCNodeHandle** connections);
extern int DataNodeCopyInBinaryForAll(char *msg_buf, int len, int conn_count,
PGXCNodeHandle** connections);
-#else
-extern PGXCNodeHandle** DataNodeCopyBegin(const char *query, List *nodelist, Snapshot snapshot);
-extern int DataNodeCopyIn(char *data_row, int len, ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections);
-extern uint64 DataNodeCopyOut(ExecNodes *exec_nodes, PGXCNodeHandle** copy_connections, TupleDesc tupleDesc,
- FILE* copy_file, Tuplestorestate *store, RemoteCopyType remoteCopyType);
-extern void DataNodeCopyFinish(PGXCNodeHandle** copy_connections, int primary_dn_index, CombineType combine_type);
-extern int DataNodeCopyInBinaryForAll(char *msg_buf, int len, PGXCNodeHandle** copy_connections);
-#endif
extern bool DataNodeCopyEnd(PGXCNodeHandle *handle, bool is_error);
-#ifndef XCP
-extern int ExecCountSlotsRemoteQuery(RemoteQuery *node);
-#endif
extern RemoteQueryState *ExecInitRemoteQuery(RemoteQuery *node, EState *estate, int eflags);
extern TupleTableSlot* ExecRemoteQuery(RemoteQueryState *step);
extern void ExecEndRemoteQuery(RemoteQueryState *step);
-#ifdef XCP
extern void RemoteSubplanMakeUnique(Node *plan, int unique);
extern RemoteSubplanState *ExecInitRemoteSubplan(RemoteSubplan *node, EState *estate, int eflags);
extern void ExecFinishInitRemoteSubplan(RemoteSubplanState *node);
extern TupleTableSlot* ExecRemoteSubplan(RemoteSubplanState *node);
extern void ExecEndRemoteSubplan(RemoteSubplanState *node);
extern void ExecReScanRemoteSubplan(RemoteSubplanState *node);
-#endif
extern void ExecRemoteUtility(RemoteQuery *node);
extern bool is_data_node_ready(PGXCNodeHandle * conn);
-#ifdef XCP
extern int handle_response(PGXCNodeHandle *conn, ResponseCombiner *combiner);
-#else
-extern int handle_response(PGXCNodeHandle *conn, RemoteQueryState *combiner);
-#endif
extern void HandleCmdComplete(CmdType commandType, CombineTag *combine, const char *msg_body,
size_t len);
-#ifdef XCP
#define CHECK_OWNERSHIP(conn, node) \
do { \
if ((conn)->state == DN_CONNECTION_STATE_QUERY && \
@@ -348,9 +271,6 @@ extern TupleTableSlot *FetchTuple(ResponseCombiner *combiner);
extern void InitResponseCombiner(ResponseCombiner *combiner, int node_count,
CombineType combine_type);
extern void CloseCombiner(ResponseCombiner *combiner);
-#else
-extern bool FetchTuple(RemoteQueryState *combiner, TupleTableSlot *slot);
-#endif
extern void BufferConnection(PGXCNodeHandle *conn);
extern void ExecRemoteQueryReScan(RemoteQueryState *node, ExprContext *exprCtxt);
@@ -359,25 +279,13 @@ extern int ParamListToDataRow(ParamListInfo params, char** result);
extern void ExecCloseRemoteStatement(const char *stmt_name, List *nodelist);
extern char *PrePrepare_Remote(char *prepareGID, bool localNode, bool implicit);
-#ifdef XCP
extern void PostPrepare_Remote(char *prepareGID, bool implicit);
extern void PreCommit_Remote(char *prepareGID, char *nodestring, bool preparedLocalNode);
-#else
-extern void PostPrepare_Remote(char *prepareGID, char *nodestring, bool implicit);
-extern void PreCommit_Remote(char *prepareGID, bool preparedLocalNode);
-#endif
extern bool PreAbort_Remote(void);
extern void AtEOXact_Remote(void);
extern bool IsTwoPhaseCommitRequired(bool localWrite);
extern bool FinishRemotePreparedTransaction(char *prepareGID, bool commit);
-#ifndef XCP
-/* Flags related to temporary objects included in query */
-extern void ExecSetTempObjectIncluded(void);
-extern bool ExecIsTempObjectIncluded(void);
-extern void ExecRemoteQueryStandard(Relation resultRelationDesc, RemoteQueryState *resultRemoteRel, TupleTableSlot *slot);
-#endif
-
extern void pgxc_all_success_nodes(ExecNodes **d_nodes, ExecNodes **c_nodes, char **failednodes_msg);
extern void AtEOXact_DBCleanup(bool isCommit);
diff --git a/src/include/pgxc/locator.h b/src/include/pgxc/locator.h
index 145028f962..8b57dc3774 100644
--- a/src/include/pgxc/locator.h
+++ b/src/include/pgxc/locator.h
@@ -13,9 +13,7 @@
#ifndef LOCATOR_H
#define LOCATOR_H
-#ifdef XCP
#include "fmgr.h"
-#endif
#define LOCATOR_TYPE_REPLICATED 'R'
#define LOCATOR_TYPE_HASH 'H'
#define LOCATOR_TYPE_RANGE 'G'
@@ -89,7 +87,6 @@ typedef struct
} ExecNodes;
-#ifdef XCP
typedef enum
{
LOCATOR_LIST_NONE, /* locator returns integers in range 0..NodeCount-1,
@@ -141,7 +138,6 @@ extern int GET_NODES(Locator *self, Datum value, bool isnull, bool *hasprimary);
extern void *getLocatorResults(Locator *self);
extern void *getLocatorNodeMap(Locator *self);
extern int getLocatorNodeCount(Locator *self);
-#endif
/* Extern variables related to locations */
extern Oid primary_data_node;
@@ -158,13 +154,6 @@ extern RelationLocInfo *CopyRelationLocInfo(RelationLocInfo *src_info);
extern char GetRelationLocType(Oid relid);
extern bool IsTableDistOnPrimary(RelationLocInfo *rel_loc_info);
extern bool IsLocatorInfoEqual(RelationLocInfo *rel_loc_info1, RelationLocInfo *rel_loc_info2);
-#ifndef XCP
-extern ExecNodes *GetRelationNodes(RelationLocInfo *rel_loc_info, Datum valueForDistCol,
- bool isValueNull, Oid typeOfValueForDistCol,
- RelationAccessType accessType);
-extern ExecNodes *GetRelationNodesByQuals(Oid reloid, Index varno, Node *quals,
- RelationAccessType relaccess);
-#endif
extern bool IsHashColumn(RelationLocInfo *rel_loc_info, char *part_col_name);
extern bool IsHashColumnForRelId(Oid relid, char *part_col_name);
extern int GetRoundRobinNode(Oid relid);
@@ -172,11 +161,7 @@ extern int GetRoundRobinNode(Oid relid);
extern bool IsTypeHashDistributable(Oid col_type);
extern List *GetAllDataNodes(void);
extern List *GetAllCoordNodes(void);
-#ifdef XCP
extern int GetAnyDataNode(Bitmapset *nodes);
-#else
-extern List *GetPreferredReplicationNode(List *relNodes);
-#endif
extern void RelationBuildLocator(Relation rel);
extern void FreeRelationLocInfo(RelationLocInfo *relationLocInfo);
diff --git a/src/include/pgxc/pgxc.h b/src/include/pgxc/pgxc.h
index a1d67861b9..9cd3995b2f 100644
--- a/src/include/pgxc/pgxc.h
+++ b/src/include/pgxc/pgxc.h
@@ -25,11 +25,9 @@
extern bool isPGXCCoordinator;
extern bool isPGXCDataNode;
extern bool isRestoreMode;
-#ifdef XCP
extern char *parentPGXCNode;
extern int parentPGXCNodeId;
extern char parentPGXCNodeType;
-#endif
typedef enum
{
@@ -59,11 +57,9 @@ extern Datum xc_lockForBackupKey2;
#define IS_PGXC_REMOTE_COORDINATOR \
(IS_PGXC_COORDINATOR && IsConnFromCoord())
-#ifdef XCP
#define PGXC_PARENT_NODE parentPGXCNode
#define PGXC_PARENT_NODE_ID parentPGXCNodeId
#define PGXC_PARENT_NODE_TYPE parentPGXCNodeType
-#endif
#define REMOTE_CONN_TYPE remoteConnType
#define IsConnFromApp() (remoteConnType == REMOTE_CONN_APP)
diff --git a/src/include/pgxc/pgxcnode.h b/src/include/pgxc/pgxcnode.h
index bfb6bda0b8..1cd4162a34 100644
--- a/src/include/pgxc/pgxcnode.h
+++ b/src/include/pgxc/pgxcnode.h
@@ -51,27 +51,6 @@ typedef enum
HANDLE_DEFAULT
} PGXCNode_HandleRequested;
-#ifndef XCP
-/*
- * Enumeration for two purposes
- * 1. To indicate to the HandleCommandComplete function whether response checking is required or not
- * 2. To enable HandleCommandComplete function to indicate whether the response was a ROLLBACK or not
- * Response checking is required in case of PREPARE TRANSACTION and should not be done for the rest
- * of the cases for performance reasons, hence we have an option to ignore response checking.
- * The problem with PREPARE TRANSACTION is that it can result in a ROLLBACK response
- * yet Coordinator would think it got done on all nodes.
- * If we ignore ROLLBACK response then we would try to COMMIT a transaction that
- * never got prepared, which in an incorrect behavior.
- */
-typedef enum
-{
- RESP_ROLLBACK_IGNORE, /* Ignore response checking */
- RESP_ROLLBACK_CHECK, /* Check whether response was ROLLBACK */
- RESP_ROLLBACK_RECEIVED, /* Response is ROLLBACK */
- RESP_ROLLBACK_NOT_RECEIVED /* Response is NOT ROLLBACK */
-}RESP_ROLLBACK;
-#endif
-
#define DN_CONNECTION_STATE_ERROR(dnconn) \
((dnconn)->state == DN_CONNECTION_STATE_ERROR_FATAL \
|| (dnconn)->transaction_status == 'E')
@@ -89,12 +68,8 @@ struct pgxc_node_handle
/* Connection state */
char transaction_status;
DNConnectionState state;
-#ifdef XCP
bool read_only;
struct ResponseCombiner *combiner;
-#else
- struct RemoteQueryState *combiner;
-#endif
#ifdef DN_CONNECTION_DEBUG
bool have_row_desc;
#endif
@@ -115,11 +90,7 @@ struct pgxc_node_handle
*
* For details see comments of RESP_ROLLBACK
*/
-#ifdef XCP
bool ck_resp_rollback;
-#else
- RESP_ROLLBACK ck_resp_rollback;
-#endif
};
typedef struct pgxc_node_handle PGXCNodeHandle;
@@ -136,52 +107,27 @@ typedef struct
extern void InitMultinodeExecutor(bool is_force);
/* Open/close connection routines (invoked from Pool Manager) */
-#ifdef XCP
extern char *PGXCNodeConnStr(char *host, int port, char *dbname, char *user,
char *pgoptions,
char *remote_type, char *parent_node);
-#else
-extern char *PGXCNodeConnStr(char *host, int port, char *dbname, char *user,
- char *pgoptions, char *remote_type);
-#endif
extern NODE_CONNECTION *PGXCNodeConnect(char *connstr);
-#ifndef XCP
-extern int PGXCNodeSendSetQuery(NODE_CONNECTION *conn, const char *sql_command);
-#endif
extern void PGXCNodeClose(NODE_CONNECTION * conn);
extern int PGXCNodeConnected(NODE_CONNECTION * conn);
extern int PGXCNodeConnClean(NODE_CONNECTION * conn);
extern void PGXCNodeCleanAndRelease(int code, Datum arg);
-#ifdef XCP
extern PGXCNodeHandle *get_any_handle(List *datanodelist);
-#endif
/* Look at information cached in node handles */
-#ifdef XCP
extern int PGXCNodeGetNodeId(Oid nodeoid, char *node_type);
extern int PGXCNodeGetNodeIdFromName(char *node_name, char *node_type);
-#else
-extern int PGXCNodeGetNodeId(Oid nodeoid, char node_type);
-extern int PGXCNodeGetNodeIdFromName(char *node_name, char node_type);
-#endif
extern Oid PGXCNodeGetNodeOid(int nodeid, char node_type);
-#ifdef XCP
extern PGXCNodeAllHandles *get_handles(List *datanodelist, List *coordlist, bool is_query_coord_only, bool is_global_session);
-#else
-extern PGXCNodeAllHandles *get_handles(List *datanodelist, List *coordlist, bool is_query_coord_only);
-#endif
-#ifdef XCP
extern PGXCNodeAllHandles *get_current_handles(void);
-#endif
extern void pfree_pgxc_all_handles(PGXCNodeAllHandles *handles);
extern void release_handles(void);
-#ifndef XCP
-extern void cancel_query(void);
-extern void clear_all_data(void);
-#endif
extern int get_transaction_nodes(PGXCNodeHandle ** connections,
char client_conn_type,
@@ -210,11 +156,9 @@ extern int pgxc_node_send_query_extended(PGXCNodeHandle *handle, const char *que
int num_params, Oid *param_types,
int paramlen, char *params,
bool send_describe, int fetch_size);
-#ifdef XCP
extern int pgxc_node_send_plan(PGXCNodeHandle * handle, const char *statement,
const char *query, const char *planstr,
short num_params, Oid *param_types);
-#endif
extern int pgxc_node_send_gxid(PGXCNodeHandle * handle, GlobalTransactionId gxid);
extern int pgxc_node_send_cmd_id(PGXCNodeHandle *handle, CommandId cid);
extern int pgxc_node_send_snapshot(PGXCNodeHandle * handle, Snapshot snapshot);
@@ -229,24 +173,17 @@ extern int send_some(PGXCNodeHandle * handle, int len);
extern int pgxc_node_flush(PGXCNodeHandle *handle);
extern void pgxc_node_flush_read(PGXCNodeHandle *handle);
-#ifndef XCP
-extern int pgxc_all_handles_send_gxid(PGXCNodeAllHandles *pgxc_handles, GlobalTransactionId gxid, bool stop_at_error);
-extern int pgxc_all_handles_send_query(PGXCNodeAllHandles *pgxc_handles, const char *buffer, bool stop_at_error);
-#endif
-
extern char get_message(PGXCNodeHandle *conn, int *len, char **msg);
extern void add_error_message(PGXCNodeHandle * handle, const char *message);
extern Datum pgxc_execute_on_nodes(int numnodes, Oid *nodelist, char *query);
-#ifdef XCP
extern void PGXCNodeSetParam(bool local, const char *name, const char *value);
extern void PGXCNodeResetParams(bool only_local);
extern char *PGXCNodeGetSessionParamStr(void);
extern char *PGXCNodeGetTransactionParamStr(void);
extern void pgxc_node_set_query(PGXCNodeHandle *handle, const char *set_query);
extern void RequestInvalidateRemoteHandles(void);
-#endif
#endif /* PGXCNODE_H */
diff --git a/src/include/pgxc/planner.h b/src/include/pgxc/planner.h
index 9ff0de50f4..9c59494fb6 100644
--- a/src/include/pgxc/planner.h
+++ b/src/include/pgxc/planner.h
@@ -59,9 +59,7 @@ typedef struct
*/
typedef enum
{
-#ifdef XCP
EXEC_ON_CURRENT,
-#endif
EXEC_ON_DATANODES,
EXEC_ON_COORDS,
EXEC_ON_ALL_NODES,
@@ -103,10 +101,6 @@ typedef struct
* plan. So, don't change this once set.
*/
RemoteQueryExecType exec_type;
-#ifndef XCP
- bool is_temp; /* determine if this remote node is based
- * on a temporary objects (no 2PC) */
-#endif
int reduce_level; /* in case of reduced JOIN, it's level */
List *base_tlist; /* in case of isReduced, the base tlist */
char *outer_alias;
@@ -123,8 +117,6 @@ typedef struct
* inserts into child by selecting from its parent */
} RemoteQuery;
-
-#ifdef XCP
/*
* Going to be a RemoteQuery replacement.
* Submit left subplan to the nodes defined by the Distribution and combine
@@ -143,8 +135,6 @@ typedef struct
char *cursor;
int unique;
} RemoteSubplan;
-#endif
-
/*
* FQS_context
@@ -202,36 +192,12 @@ typedef enum
/* forbid SQL if unsafe, useful to turn off for development */
extern bool StrictStatementChecking;
-#ifndef XCP
-/* global variable corresponding to the GUC with same name */
-extern bool enable_fast_query_shipping;
-
-/* forbid SELECT even multi-node ORDER BY */
-extern bool StrictSelectChecking;
-
-extern PlannedStmt *pgxc_planner(Query *query, int cursorOptions,
- ParamListInfo boundParams);
-extern bool IsHashDistributable(Oid col_type);
-
-extern ExecNodes *IsJoinReducible(RemoteQuery *innernode, RemoteQuery *outernode,
- Relids in_relids, Relids out_relids,
- Join *join, JoinPath *join_path, List *rtable);
-
-extern List *AddRemoteQueryNode(List *stmts, const char *queryString,
- RemoteQueryExecType remoteExecType, bool is_temp);
-extern bool pgxc_query_contains_temp_tables(List *queries);
-extern Expr *pgxc_find_distcol_expr(Index varno, PartAttrNumber partAttrNum,
-extern bool pgxc_query_contains_utility(List *queries);
-#endif
extern bool pgxc_shippability_walker(Node *node, Shippability_context *sc_context);
extern bool pgxc_test_shippability_reason(Shippability_context *context,
ShippabilityStat reason);
-
-#ifdef XCP
extern PlannedStmt *pgxc_direct_planner(Query *query, int cursorOptions,
ParamListInfo boundParams);
extern List *AddRemoteQueryNode(List *stmts, const char *queryString,
RemoteQueryExecType remoteExecType);
-#endif
#endif /* PGXCPLANNER_H */
diff --git a/src/include/pgxc/poolmgr.h b/src/include/pgxc/poolmgr.h
index 7c9bf09ae0..f81cd71b7e 100644
--- a/src/include/pgxc/poolmgr.h
+++ b/src/include/pgxc/poolmgr.h
@@ -29,50 +29,10 @@
#define MAX_IDLE_TIME 60
-#ifndef XCP
-/*
- * List of flags related to pooler connection clean up when disconnecting
- * a session or relaeasing handles.
- * When Local SET commands (POOL_CMD_LOCAL_SET) are used, local parameter
- * string is cleaned by the node commit itself.
- * When global SET commands (POOL_CMD_GLOBAL_SET) are used, "RESET ALL"
- * command is sent down to activated nodes to at session end. At the end
- * of a transaction, connections using global SET commands are not sent
- * back to pool.
- * When temporary object commands are used (POOL_CMD_TEMP), "DISCARD ALL"
- * query is sent down to nodes whose connection is activated at the end of
- * a session.
- * At the end of a transaction, a session using either temporary objects
- * or global session parameters has its connections not sent back to pool.
- *
- * Local parameters are used to change within current transaction block.
- * They are sent to remote nodes invloved in the transaction after sending
- * BEGIN TRANSACTION using a special firing protocol.
- * They cannot be sent when connections are obtained, making them having no
- * effect as BEGIN is sent by backend after connections are obtained and
- * obtention confirmation has been sent back to backend.
- * SET CONSTRAINT, SET LOCAL commands are in this category.
- *
- * Global parmeters are used to change the behavior of current session.
- * They are sent to the nodes when the connections are obtained.
- * SET GLOBAL, general SET commands are in this category.
- */
-typedef enum
-{
- POOL_CMD_TEMP, /* Temporary object flag */
- POOL_CMD_LOCAL_SET, /* Local SET flag, current transaction block only */
- POOL_CMD_GLOBAL_SET /* Global SET flag */
-} PoolCommandType;
-#endif
-
/* Connection pool entry */
typedef struct
{
-#ifdef XCP
time_t released;
-#else
- struct timeval released;
-#endif
NODE_CONNECTION *conn;
NODE_CANCEL *xc_cancelConn;
} PGXCNodePoolSlot;
@@ -97,9 +57,7 @@ typedef struct databasepool
* Coordinator or DataNode */
MemoryContext mcxt;
struct databasepool *next; /* Reference to next to organize linked list */
-#ifdef XCP
time_t oldest_idle;
-#endif
} DatabasePool;
/*
@@ -121,28 +79,11 @@ typedef struct
Oid *coord_conn_oids; /* one for each Coordinator */
PGXCNodePoolSlot **dn_connections; /* one for each Datanode */
PGXCNodePoolSlot **coord_connections; /* one for each Coordinator */
-#ifndef XCP
- char *session_params;
- char *local_params;
- bool is_temp; /* Temporary objects used for this pool session? */
-#endif
} PoolAgent;
-#ifndef XCP
-/* Handle to the pool manager (Session's side) */
-typedef struct
-{
- /* communication channel */
- PoolPort port;
-} PoolHandle;
-#endif
-#ifdef XCP
extern int PoolConnKeepAlive;
extern int PoolMaintenanceTimeout;
-#else
-extern int MinPoolSize;
-#endif
extern int MaxPoolSize;
extern int PoolerPort;
@@ -158,58 +99,18 @@ extern int PoolManagerInit(void);
/* Destroy internal structures */
extern int PoolManagerDestroy(void);
-#ifndef XCP
-/*
- * Get handle to pool manager. This function should be called just before
- * forking off new session. It creates PoolHandle, PoolAgent and a pipe between
- * them. PoolAgent is stored within Postmaster's memory context and Session
- * closes it later. PoolHandle is returned and should be store in a local
- * variable. After forking off it can be stored in global memory, so it will
- * only be accessible by the process running the session.
- */
-extern PoolHandle *GetPoolManagerHandle(void);
-
-/*
- * Called from Postmaster(Coordinator) after fork. Close one end of the pipe and
- * free memory occupied by PoolHandler
- */
-extern void PoolManagerCloseHandle(PoolHandle *handle);
-#endif
-
/*
* Gracefully close connection to the PoolManager
*/
extern void PoolManagerDisconnect(void);
extern char *session_options(void);
-#ifndef XCP
-/*
- * Called from Session process after fork(). Associate handle with session
- * for subsequent calls. Associate session with specified database and
- * initialize respective connection pool
- */
-extern void PoolManagerConnect(PoolHandle *handle,
- const char *database, const char *user_name,
- char *pgoptions);
-#endif
-
/*
* Reconnect to pool manager
* This simply does a disconnection followed by a reconnection.
*/
extern void PoolManagerReconnect(void);
-
-#ifndef XCP
-/*
- * Save a SET command in Pooler.
- * This command is run on existent agent connections
- * and stored in pooler agent to be replayed when new connections
- * are requested.
- */
-extern int PoolManagerSetCommand(PoolCommandType command_type, const char *set_command);
-#endif
-
/* Get pooled connections */
extern int *PoolManagerGetConnections(List *datanodelist, List *coordlist);
@@ -226,11 +127,7 @@ extern void PoolManagerReloadConnectionInfo(void);
extern int PoolManagerAbortTransactions(char *dbname, char *username, int **proc_pids);
/* Return connections back to the pool, for both Coordinator and Datanode connections */
-#ifdef XCP
extern void PoolManagerReleaseConnections(bool destroy);
-#else
-extern void PoolManagerReleaseConnections(void);
-#endif
/* Cancel a running query on Datanodes as well as on other Coordinators */
extern void PoolManagerCancelQuery(int dn_count, int* dn_list, int co_count, int* co_list);
@@ -238,12 +135,4 @@ extern void PoolManagerCancelQuery(int dn_count, int* dn_list, int co_count, int
/* Lock/unlock pool manager */
extern void PoolManagerLock(bool is_lock);
-#ifndef XCP
-/* Check if pool has a handle */
-extern bool IsPoolHandle(void);
-
-/* Send commands to alter the behavior of current transaction */
-extern int PoolManagerSendLocalCommand(int dn_count, int* dn_list, int co_count, int* co_list);
-#endif
-
#endif
diff --git a/src/include/pgxc/remotecopy.h b/src/include/pgxc/remotecopy.h
index 6adb386306..01ce7dbb07 100644
--- a/src/include/pgxc/remotecopy.h
+++ b/src/include/pgxc/remotecopy.h
@@ -16,9 +16,7 @@
#define REMOTECOPY_H
#include "nodes/parsenodes.h"
-#ifdef XCP
#include "pgxc/locator.h"
-#endif
/*
* This contains the set of data necessary for remote COPY control.
@@ -35,21 +33,11 @@ typedef struct RemoteCopyData {
* as copy source or destination
*/
StringInfoData query_buf;
-#ifdef XCP
Locator *locator; /* the locator object */
Oid dist_type; /* data type of the distribution column */
-#else
- /* Execution nodes for COPY */
- ExecNodes *exec_nodes;
-#endif
/* Locator information */
RelationLocInfo *rel_loc; /* the locator key */
-#ifndef XCP
- int idx_dist_by_col; /* index of the distributed by column */
-
- PGXCNodeHandle **connections; /* Involved Datanode connections */
-#endif
} RemoteCopyData;
/*
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index eb857e0f19..b705a593ee 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -140,8 +140,6 @@ extern PGDLLIMPORT LWLockPadded *MainLWLockArray;
#ifdef PGXC
#define BarrierLock (&MainLWLockArray[38].lock)
#define NodeTableLock (&MainLWLockArray[39].lock)
-#endif
-#ifdef XCP
#define SQueuesLock (&MainLWLockArray[40].lock)
#endif
#define CommitTsControlLock (&MainLWLockArray[41].lock)
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 27f73592a1..dab93a06bf 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -878,11 +878,6 @@ extern Datum text_format_nv(PG_FUNCTION_ARGS);
/* version.c */
extern Datum pgsql_version(PG_FUNCTION_ARGS);
-#ifdef PGXC
-#ifndef XCP
-extern Datum pgxc_version(PG_FUNCTION_ARGS);
-#endif
-#endif
/* xid.c */
extern Datum xidin(PG_FUNCTION_ARGS);
@@ -1312,14 +1307,10 @@ extern Datum pg_cursor(PG_FUNCTION_ARGS);
extern Datum pgxc_pool_check(PG_FUNCTION_ARGS);
extern Datum pgxc_pool_reload(PG_FUNCTION_ARGS);
-#ifdef XCP
/* backend/pgxc/cluster/stormutils.c */
extern Datum stormdb_promote_standby(PG_FUNCTION_ARGS);
-#endif
-#endif
/* backend/access/transam/transam.c */
-#ifdef PGXC
extern Datum pgxc_is_committed(PG_FUNCTION_ARGS);
#endif
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index 8d20eecb8c..62b464e8f5 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -90,11 +90,7 @@ extern Tuplesortstate *tuplesort_begin_datum(Oid datumType,
extern Tuplesortstate *tuplesort_begin_merge(TupleDesc tupDesc,
int nkeys, AttrNumber *attNums,
Oid *sortOperators, Oid *sortCollations, bool *nullsFirstFlags,
-#ifdef XCP
struct ResponseCombiner *combiner,
-#else
- RemoteQueryState *combiner,
-#endif
int workMem);
#endif
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index abb92ff1a8..e803c2fe72 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -3476,23 +3476,19 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
q->commandType == CMD_DELETE)
stmt->mod_stmt = true;
/* PGXCTODO: Support a better parameter interface for XC with DMLs */
+ if
#ifdef XCP
- if (IS_PGXC_DATANODE && (q->commandType == CMD_INSERT ||
-#else
- if (q->commandType == CMD_INSERT ||
+ (IS_PGXC_DATANODE &&
#endif
- q->commandType == CMD_UPDATE ||
- q->commandType == CMD_DELETE)
+ (q->commandType == CMD_INSERT ||
+ q->commandType == CMD_UPDATE ||
+ q->commandType == CMD_DELETE)
#ifdef XCP
- )
+ )
#endif
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-#ifdef XCP
errmsg("Postgres-XL does not support DML queries in PL/pgSQL on Datanodes")));
-#else
- errmsg("Postgres-XC does not support DML queries in PL/pgSQL")));
-#endif
}
}
}