diff options
author | Pavan Deolasee | 2015-11-19 10:44:48 +0000 |
---|---|---|
committer | Pavan Deolasee | 2015-11-19 10:44:48 +0000 |
commit | 6fc00e3f9e52b5587b5f8c94393ec77cec6818a4 (patch) | |
tree | 030275de3f61d9d2aaf68be247e710928116dd5e | |
parent | 3c0155c4c5b70da05f43b081337b564ca3b7ec86 (diff) |
Do not use READ ONLY transaction while dumping data using pg_dump
We use nextval(sequence) to get a consistent sequence value directly from the
GTM, since sequence values could be catched at different coordinators. But that
requires a RW transaction. Its not ideal for pg_dump to use RW transaction, but
its not terrible either given that its run in a very controlled manner. So
change it that way until we find a more elegant solution
Also fix some assorted issues with pg_dump. It now seems to pass on the
"regression" database after a round of regression run
-rw-r--r-- | src/backend/libpq/be-fsstubs.c | 28 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump.c | 14 |
2 files changed, 39 insertions, 3 deletions
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c index 29aeaf5891..c5959af0be 100644 --- a/src/backend/libpq/be-fsstubs.c +++ b/src/backend/libpq/be-fsstubs.c @@ -940,6 +940,13 @@ lo_get(PG_FUNCTION_ARGS) Oid loOid = PG_GETARG_OID(0); bytea *result; +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Postgres-XL does not yet support large objects"), + errdetail("The feature is not currently supported"))); +#endif + result = lo_get_fragment_internal(loOid, 0, -1); PG_RETURN_BYTEA_P(result); @@ -956,6 +963,13 @@ lo_get_fragment(PG_FUNCTION_ARGS) int32 nbytes = PG_GETARG_INT32(2); bytea *result; +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Postgres-XL does not yet support large objects"), + errdetail("The feature is not currently supported"))); +#endif + if (nbytes < 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -977,6 +991,13 @@ lo_from_bytea(PG_FUNCTION_ARGS) LargeObjectDesc *loDesc; int written PG_USED_FOR_ASSERTS_ONLY; +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Postgres-XL does not yet support large objects"), + errdetail("The feature is not currently supported"))); +#endif + CreateFSContext(); loOid = inv_create(loOid); @@ -1000,6 +1021,13 @@ lo_put(PG_FUNCTION_ARGS) LargeObjectDesc *loDesc; int written PG_USED_FOR_ASSERTS_ONLY; +#ifdef PGXC + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("Postgres-XL does not yet support large objects"), + errdetail("The feature is not currently supported"))); +#endif + CreateFSContext(); loDesc = inv_open(loOid, INV_WRITE, fscxt); diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 3fb8a53993..8a33d9c1b4 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1044,11 +1044,19 @@ setup_connection(Archive *AH, DumpOptions *dopt, const char *dumpencoding, if (dopt->serializable_deferrable && AH->sync_snapshot_id == NULL) ExecuteSqlStatement(AH, "SET TRANSACTION ISOLATION LEVEL " - "SERIALIZABLE, READ ONLY, DEFERRABLE"); + "SERIALIZABLE, " +#ifndef XCP + "READ ONLY, " +#endif + "DEFERRABLE"); else ExecuteSqlStatement(AH, "SET TRANSACTION ISOLATION LEVEL " - "REPEATABLE READ, READ ONLY"); + "REPEATABLE READ" +#ifndef XCP + ", READ ONLY" +#endif + ); } else if (AH->remoteVersion >= 70400) { @@ -15135,7 +15143,7 @@ dumpSequenceData(Archive *fout, TableDataInfo *tdinfo) * obtained from GTM. */ resetPQExpBuffer(query); - appendPQExpBufferStr(query, "SELECT pg_catalog.setval("); + appendPQExpBufferStr(query, "SELECT pg_catalog.nextval("); appendStringLiteralAH(query, fmtId(tbinfo->dobj.name), fout); appendPQExpBuffer(query, ");\n"); res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK); |