my ($relname) = @_;
my $pgdata = $node->data_dir;
- my $rel = $node->safe_psql('postgres',
+ my $rel = $node->safe_psql('postgres',
qq(SELECT pg_relation_filepath('$relname')));
die "path not found for relation $relname" unless defined $rel;
return "$pgdata/$rel";
for my $endblock (qw(NULL 0))
{
my $opts =
- "on_error_stop := $stop, "
+ "on_error_stop := $stop, "
. "check_toast := $check_toast, "
. "skip := $skip, "
. "startblock := $startblock, "
my $main_h = $node->background_psql('postgres');
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
my $cic_h = $node->background_psql('postgres');
-$cic_h->query_until(qr/start/, q(
+$cic_h->query_until(
+ qr/start/, q(
\echo start
CREATE INDEX CONCURRENTLY idx ON tbl(i);
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'a';
));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
BEGIN;
INSERT INTO tbl VALUES(0);
));
$node->safe_psql('postgres', q(COMMIT PREPARED 'a';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'b';
BEGIN;
INSERT INTO tbl VALUES(0);
$node->safe_psql('postgres', q(COMMIT PREPARED 'b';));
-$main_h->query_safe(q(
+$main_h->query_safe(
+ q(
PREPARE TRANSACTION 'c';
COMMIT PREPARED 'c';
));
$node->restart;
my $reindex_h = $node->background_psql('postgres');
-$reindex_h->query_until(qr/start/, q(
+$reindex_h->query_until(
+ qr/start/, q(
\echo start
DROP INDEX CONCURRENTLY idx;
CREATE INDEX CONCURRENTLY idx ON tbl(i);
OffsetNumber successor[MaxOffsetNumber];
bool lp_valid[MaxOffsetNumber];
bool xmin_commit_status_ok[MaxOffsetNumber];
- XidCommitStatus xmin_commit_status[MaxOffsetNumber];
+ XidCommitStatus xmin_commit_status[MaxOffsetNumber];
CHECK_FOR_INTERRUPTS();
for (ctx.offnum = FirstOffsetNumber; ctx.offnum <= maxoff;
ctx.offnum = OffsetNumberNext(ctx.offnum))
{
- BlockNumber nextblkno;
+ BlockNumber nextblkno;
OffsetNumber nextoffnum;
successor[ctx.offnum] = InvalidOffsetNumber;
/*
* Since we've checked that this redirect points to a line
- * pointer between FirstOffsetNumber and maxoff, it should
- * now be safe to fetch the referenced line pointer. We expect
- * it to be LP_NORMAL; if not, that's corruption.
+ * pointer between FirstOffsetNumber and maxoff, it should now
+ * be safe to fetch the referenced line pointer. We expect it
+ * to be LP_NORMAL; if not, that's corruption.
*/
rditem = PageGetItemId(ctx.page, rdoffnum);
if (!ItemIdIsUsed(rditem))
{
/*
* We should not have set successor[ctx.offnum] to a value
- * other than InvalidOffsetNumber unless that line pointer
- * is LP_NORMAL.
+ * other than InvalidOffsetNumber unless that line pointer is
+ * LP_NORMAL.
*/
Assert(ItemIdIsNormal(next_lp));
}
/*
- * If the next line pointer is a redirect, or if it's a tuple
- * but the XMAX of this tuple doesn't match the XMIN of the next
+ * If the next line pointer is a redirect, or if it's a tuple but
+ * the XMAX of this tuple doesn't match the XMIN of the next
* tuple, then the two aren't part of the same update chain and
* there is nothing more to do.
*/
}
/*
- * This tuple and the tuple to which it points seem to be part
- * of an update chain.
+ * This tuple and the tuple to which it points seem to be part of
+ * an update chain.
*/
predecessor[nextoffnum] = ctx.offnum;
}
/*
- * If the current tuple's xmin is aborted but the successor tuple's
- * xmin is in-progress or committed, that's corruption.
+ * If the current tuple's xmin is aborted but the successor
+ * tuple's xmin is in-progress or committed, that's corruption.
*/
if (xmin_commit_status_ok[ctx.offnum] &&
xmin_commit_status[ctx.offnum] == XID_ABORTED &&
HeapTupleHeader tuphdr = ctx->tuphdr;
ctx->tuple_could_be_pruned = true; /* have not yet proven otherwise */
- *xmin_commit_status_ok = false; /* have not yet proven otherwise */
+ *xmin_commit_status_ok = false; /* have not yet proven otherwise */
/* If xmin is normal, it should be within valid range */
xmin = HeapTupleHeaderGetXmin(tuphdr);
* therefore cannot check it.
*/
if (!check_tuple_visibility(ctx, xmin_commit_status_ok,
- xmin_commit_status))
+ xmin_commit_status))
return;
/*
diff = (int32) (ctx->next_xid - xid);
/*
- * In cases of corruption we might see a 32bit xid that is before epoch
- * 0. We can't represent that as a 64bit xid, due to 64bit xids being
+ * In cases of corruption we might see a 32bit xid that is before epoch 0.
+ * We can't represent that as a 64bit xid, due to 64bit xids being
* unsigned integers, without the modulo arithmetic of 32bit xid. There's
* no really nice way to deal with that, but it works ok enough to use
* FirstNormalFullTransactionId in that case, as a freshly initdb'd
local $ENV{PGOPTIONS} = join " ",
map { "-c $_=$params->{$_}" } keys %$params;
- my $log = $node->logfile();
+ my $log = $node->logfile();
my $offset = -s $log;
$node->safe_psql("postgres", $sql);
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "on"
+ "compute_query_id" => "on"
});
like(
"SELECT * FROM pg_class;",
{
"auto_explain.log_verbose" => "on",
- "compute_query_id" => "regress"
+ "compute_query_id" => "regress"
});
unlike(
# This is only needed on Windows machines that don't use UNIX sockets.
$node->init(
'allows_streaming' => 1,
- 'auth_extra' => [ '--create-role', 'backupuser' ]);
+ 'auth_extra' => [ '--create-role', 'backupuser' ]);
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'basebackup_to_shell'");
'fails if basebackup_to_shell.command is not set');
# Configure basebackup_to_shell.command and reload the configuration file.
-my $backup_path = PostgreSQL::Test::Utils::tempdir;
+my $backup_path = PostgreSQL::Test::Utils::tempdir;
my $escaped_backup_path = $backup_path;
$escaped_backup_path =~ s{\\}{\\\\}g
if ($PostgreSQL::Test::Utils::windows_os);
MemoryContext basic_archive_context;
/*
- * If we didn't get to storing the pointer to our allocated state, we don't
- * have anything to clean up.
+ * If we didn't get to storing the pointer to our allocated state, we
+ * don't have anything to clean up.
*/
if (data == NULL)
return;
if (astate)
PG_RETURN_DATUM(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
else
PG_RETURN_NULL();
}
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
- "select $outf from "
+ "select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';
print @plan;
}
-my $t0 = [gettimeofday];
+my $t0 = [gettimeofday];
my $count = 0;
-my $b = $opt{b};
+my $b = $opt{b};
$b ||= 1;
my @a;
foreach (1 .. $b)
EOT
-open(my $msg, '>', "message.tmp") || die;
+open(my $msg, '>', "message.tmp") || die;
open(my $map, '>', "message_section_map.tmp") || die;
srand(1);
ltree *left, ltree *right)
{
int32 size = LTG_HDRSIZE + (isalltrue ? 0 : siglen) +
- (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
+ (left ? VARSIZE(left) + (right ? VARSIZE(right) : 0) : 0);
ltree_gist *result = palloc(size);
SET_VARSIZE(result, size);
ltree_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- ltree *res;
+ ltree *res;
if ((res = parse_ltree(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
*/
static bool
finish_nodeitem(nodeitem *lptr, const char *ptr, bool is_lquery, int pos,
- struct Node *escontext)
+ struct Node *escontext)
{
if (is_lquery)
{
lquery_in(PG_FUNCTION_ARGS)
{
char *buf = (char *) PG_GETARG_POINTER(0);
- lquery *res;
+ lquery *res;
if ((res = parse_lquery(buf, fcinfo->context)) == NULL)
PG_RETURN_NULL();
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("word is too long")));
- if (! pushquery(state, type, ltree_crc32_sz(strval, lenval),
- state->curop - state->op, lenval, flag))
+ if (!pushquery(state, type, ltree_crc32_sz(strval, lenval),
+ state->curop - state->op, lenval, flag))
return false;
while (state->curop - state->op + lenval + 1 >= state->lenop)
Datum
ltxtq_in(PG_FUNCTION_ARGS)
{
- ltxtquery *res;
+ ltxtquery *res;
if ((res = queryin((char *) PG_GETARG_POINTER(0), fcinfo->context)) == NULL)
PG_RETURN_NULL();
# setup
$node->safe_psql("postgres",
- "CREATE EXTENSION pg_prewarm;\n"
+ "CREATE EXTENSION pg_prewarm;\n"
. "CREATE TABLE test(c1 int);\n"
. "INSERT INTO test SELECT generate_series(1, 100);");
int block_id;
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
RmgrData desc;
- const char *record_type;
- StringInfoData rec_desc;
+ const char *record_type;
+ StringInfoData rec_desc;
Assert(XLogRecHasAnyBlockRefs(record));
bool have_error; /* have any subxacts aborted in this xact? */
bool changing_xact_state; /* xact state change in process */
bool parallel_commit; /* do we commit (sub)xacts in parallel? */
- bool parallel_abort; /* do we abort (sub)xacts in parallel? */
+ bool parallel_abort; /* do we abort (sub)xacts in parallel? */
bool invalidated; /* true if reconnect is pending */
bool keep_connections; /* setting value of keep_connections
* server option */
/*
* Should never get called when the insert is being performed on a table
- * that is also among the target relations of an UPDATE operation,
- * because postgresBeginForeignInsert() currently rejects such insert
- * attempts.
+ * that is also among the target relations of an UPDATE operation, because
+ * postgresBeginForeignInsert() currently rejects such insert attempts.
*/
Assert(fmstate == NULL || fmstate->aux_fmstate == NULL);
*/
if (method != ANALYZE_SAMPLE_OFF)
{
- bool can_tablesample;
+ bool can_tablesample;
reltuples = postgresGetAnalyzeInfoForForeignTable(relation,
&can_tablesample);
/*
- * Make sure we're not choosing TABLESAMPLE when the remote relation does
- * not support that. But only do this for "auto" - if the user explicitly
- * requested BERNOULLI/SYSTEM, it's better to fail.
+ * Make sure we're not choosing TABLESAMPLE when the remote relation
+ * does not support that. But only do this for "auto" - if the user
+ * explicitly requested BERNOULLI/SYSTEM, it's better to fail.
*/
if (!can_tablesample && (method == ANALYZE_SAMPLE_AUTO))
method = ANALYZE_SAMPLE_RANDOM;
else
{
/*
- * All supported sampling methods require sampling rate,
- * not target rows directly, so we calculate that using
- * the remote reltuples value. That's imperfect, because
- * it might be off a good deal, but that's not something
- * we can (or should) address here.
+ * All supported sampling methods require sampling rate, not
+ * target rows directly, so we calculate that using the remote
+ * reltuples value. That's imperfect, because it might be off a
+ * good deal, but that's not something we can (or should) address
+ * here.
*
- * If reltuples is too low (i.e. when table grew), we'll
- * end up sampling more rows - but then we'll apply the
- * local sampling, so we get the expected sample size.
- * This is the same outcome as without remote sampling.
+ * If reltuples is too low (i.e. when table grew), we'll end up
+ * sampling more rows - but then we'll apply the local sampling,
+ * so we get the expected sample size. This is the same outcome as
+ * without remote sampling.
*
- * If reltuples is too high (e.g. after bulk DELETE), we
- * will end up sampling too few rows.
+ * If reltuples is too high (e.g. after bulk DELETE), we will end
+ * up sampling too few rows.
*
- * We can't really do much better here - we could try
- * sampling a bit more rows, but we don't know how off
- * the reltuples value is so how much is "a bit more"?
+ * We can't really do much better here - we could try sampling a
+ * bit more rows, but we don't know how off the reltuples value is
+ * so how much is "a bit more"?
*
- * Furthermore, the targrows value for partitions is
- * determined based on table size (relpages), which can
- * be off in different ways too. Adjusting the sampling
- * rate here might make the issue worse.
+ * Furthermore, the targrows value for partitions is determined
+ * based on table size (relpages), which can be off in different
+ * ways too. Adjusting the sampling rate here might make the issue
+ * worse.
*/
sample_frac = targrows / reltuples;
/*
* We should never get sampling rate outside the valid range
- * (between 0.0 and 1.0), because those cases should be covered
- * by the previous branch that sets ANALYZE_SAMPLE_OFF.
+ * (between 0.0 and 1.0), because those cases should be covered by
+ * the previous branch that sets ANALYZE_SAMPLE_OFF.
*/
Assert(sample_frac >= 0.0 && sample_frac <= 1.0);
}
/* See if we already cached the result. */
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_FIND, NULL);
if (!entry)
{
* cache invalidation.
*/
entry = (ShippableCacheEntry *)
- hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
+ hash_search(ShippableCacheHash, &key, HASH_ENTER, NULL);
entry->shippable = shippable;
}
use warnings;
my $integer = '[+-]?[0-9]+';
-my $real = '[+-]?[0-9]+\.[0-9]+';
+my $real = '[+-]?[0-9]+\.[0-9]+';
-my $RANGE = '(\.\.)(\.)?';
-my $PLUMIN = q(\'\+\-\');
-my $FLOAT = "(($integer)|($real))([eE]($integer))?";
+my $RANGE = '(\.\.)(\.)?';
+my $PLUMIN = q(\'\+\-\');
+my $FLOAT = "(($integer)|($real))([eE]($integer))?";
my $EXTENSION = '<|>|~';
-my $boundary = "($EXTENSION)?$FLOAT";
+my $boundary = "($EXTENSION)?$FLOAT";
my $deviation = $FLOAT;
my $rule_1 = $boundary . $PLUMIN . $deviation;
# replication statistics data is fine after restart.
$node->stop;
-my $datadir = $node->data_dir;
+my $datadir = $node->data_dir;
my $slot3_replslotdir = "$datadir/pg_replslot/regression_slot3";
rmtree($slot3_replslotdir);
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
{
TestDecodingData *data = ctx->output_plugin_private;
TestDecodingTxnData *txndata =
- MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
+ MemoryContextAllocZero(ctx->context, sizeof(TestDecodingTxnData));
txndata->xact_wrote_changes = false;
txn->output_plugin_private = txndata;
while (<$feat>)
{
chomp;
- my ($feature_id, $feature_name, $subfeature_id,
+ my ($feature_id, $feature_name, $subfeature_id,
$subfeature_name, $is_supported, $comments) = split /\t/;
$is_supported eq $yesno || next;
}
/*
- * If we found a scan key eliminating the range, no need to
- * check additional ones.
+ * If we found a scan key eliminating the range, no need
+ * to check additional ones.
*/
if (!addrange)
break;
* Obtain BrinOpcInfo for each indexed column. While at it, accumulate
* the number of columns stored, since the number is opclass-defined.
*/
- opcinfo = palloc_array(BrinOpcInfo*, tupdesc->natts);
+ opcinfo = palloc_array(BrinOpcInfo *, tupdesc->natts);
for (keyno = 0; keyno < tupdesc->natts; keyno++)
{
FmgrInfo *opcInfoFn;
bval = &dtup->bt_columns[keyno];
/*
- * Does the range have actual NULL values? Either of the flags can
- * be set, but we ignore the state before adding first row.
+ * Does the range have actual NULL values? Either of the flags can be
+ * set, but we ignore the state before adding first row.
*
* We have to remember this, because we'll modify the flags and we
* need to know if the range started as empty.
/*
* If the range was had actual NULL values (i.e. did not start empty),
- * make sure we don't forget about the NULL values. Either the allnulls
- * flag is still set to true, or (if the opclass cleared it) we need to
- * set hasnulls=true.
+ * make sure we don't forget about the NULL values. Either the
+ * allnulls flag is still set to true, or (if the opclass cleared it)
+ * we need to set hasnulls=true.
*
- * XXX This can only happen when the opclass modified the tuple, so the
- * modified flag should be set.
+ * XXX This can only happen when the opclass modified the tuple, so
+ * the modified flag should be set.
*/
if (has_nulls && !(bval->bv_hasnulls || bval->bv_allnulls))
{
/*
* After updating summaries for all the keys, mark it as not empty.
*
- * If we're actually changing the flag value (i.e. tuple started as empty),
- * we should have modified the tuple. So we should not see empty range that
- * was not modified.
+ * If we're actually changing the flag value (i.e. tuple started as
+ * empty), we should have modified the tuple. So we should not see empty
+ * range that was not modified.
*/
Assert(!dtup->bt_empty_range || modified);
dtup->bt_empty_range = false;
if (optstr->fill_cb)
{
const char *val = optval->isset ? optval->values.string_val :
- optstr->default_isnull ? NULL : optstr->default_val;
+ optstr->default_isnull ? NULL : optstr->default_val;
size += optstr->fill_cb(val, NULL);
}
if (optstring->fill_cb)
{
Size size =
- optstring->fill_cb(string_val,
- (char *) rdopts + offset);
+ optstring->fill_cb(string_val,
+ (char *) rdopts + offset);
if (size)
{
for (offset = FirstOffsetNumber; offset <= maxoff; offset = OffsetNumberNext(offset))
{
IndexTuple ituple = (IndexTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (downlink == NULL)
downlink = CopyIndexTuple(ituple);
{
GISTPageSplitInfo *si = (GISTPageSplitInfo *) lfirst(lc);
GISTNodeBuffer *newNodeBuffer;
- int i = foreach_current_index(lc);
+ int i = foreach_current_index(lc);
/* Decompress parent index tuple of node buffer page. */
gistDeCompressAtt(giststate, r,
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
if (data - begin < datalen)
{
OffsetNumber off = (PageIsEmpty(page)) ? FirstOffsetNumber :
- OffsetNumberNext(PageGetMaxOffsetNumber(page));
+ OffsetNumberNext(PageGetMaxOffsetNumber(page));
while (data - begin < datalen)
{
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
const char *keydata = VARDATA_ANY(key);
size_t keylen = VARSIZE_ANY_EXHDR(key);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
{
const uint16 interesting =
- HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
+ HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY | HEAP_LOCK_MASK;
if ((new_infomask & interesting) != (old_infomask & interesting))
return true;
* Note: heap_update returns the tid (location) of the new tuple in the
* t_self field.
*
- * If the update is not HOT, we must update all indexes. If the update
- * is HOT, it could be that we updated summarized columns, so we either
+ * If the update is not HOT, we must update all indexes. If the update is
+ * HOT, it could be that we updated summarized columns, so we either
* update only summarized indexes, or none at all.
*/
if (result != TM_Ok)
if (use_fsm && i >= not_in_fsm_pages)
{
Size freespace = BufferGetPageSize(victim_buffers[i]) -
- SizeOfPageHeaderData;
+ SizeOfPageHeaderData;
RecordPageWithFreeSpace(relation, curBlock, freespace);
}
if (!TransactionIdIsValid(prstate->old_snap_xmin))
{
TransactionId horizon =
- GlobalVisTestNonRemovableHorizon(prstate->vistest);
+ GlobalVisTestNonRemovableHorizon(prstate->vistest);
TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
&prstate->old_snap_xmin,
Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
params->truncate != VACOPTVALUE_AUTO);
+
/*
* While VacuumFailSafeActive is reset to false before calling this, we
* still need to reset it here due to recursive calls.
{
/*
* We have no freeze plans to execute, so there's no added cost
- * from following the freeze path. That's why it was chosen.
- * This is important in the case where the page only contains
- * totally frozen tuples at this point (perhaps only following
- * pruning). Such pages can be marked all-frozen in the VM by our
- * caller, even though none of its tuples were newly frozen here
- * (note that the "no freeze" path never sets pages all-frozen).
+ * from following the freeze path. That's why it was chosen. This
+ * is important in the case where the page only contains totally
+ * frozen tuples at this point (perhaps only following pruning).
+ * Such pages can be marked all-frozen in the VM by our caller,
+ * even though none of its tuples were newly frozen here (note
+ * that the "no freeze" path never sets pages all-frozen).
*
* We never increment the frozen_pages instrumentation counter
* here, since it only counts pages with newly frozen tuples
{
int64 max_items;
int vac_work_mem = IsAutoVacuumWorkerProcess() &&
- autovacuum_work_mem != -1 ?
- autovacuum_work_mem : maintenance_work_mem;
+ autovacuum_work_mem != -1 ?
+ autovacuum_work_mem : maintenance_work_mem;
if (vacrel->nindexes > 0)
{
static Buffer
vm_extend(Relation rel, BlockNumber vm_nblocks)
{
- Buffer buf;
+ Buffer buf;
buf = ExtendBufferedRelTo(EB_REL(rel), VISIBILITYMAP_FORKNUM, NULL,
EB_CREATE_FORK_IF_NEEDED |
_bt_pendingfsm_finalize(Relation rel, BTVacState *vstate)
{
IndexBulkDeleteResult *stats = vstate->stats;
- Relation heaprel = vstate->info->heaprel;
+ Relation heaprel = vstate->info->heaprel;
Assert(stats->pages_newly_deleted >= vstate->npendingpages);
if (vstate->npendingpages > 0)
{
FullTransactionId lastsafexid =
- vstate->pendingpages[vstate->npendingpages - 1].safexid;
+ vstate->pendingpages[vstate->npendingpages - 1].safexid;
Assert(FullTransactionIdFollowsOrEquals(safexid, lastsafexid));
}
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) rec;
+ (xl_dbase_create_file_copy_rec *) rec;
appendStringInfo(buf, "copy dir %u/%u to %u/%u",
xlrec->src_tablespace_id, xlrec->src_db_id,
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) rec;
+ (xl_dbase_create_wal_log_rec *) rec;
appendStringInfo(buf, "create dir %u/%u",
xlrec->tablespace_id, xlrec->db_id);
else
{
ginxlogInsertDataInternal *insertData =
- (ginxlogInsertDataInternal *) payload;
+ (ginxlogInsertDataInternal *) payload;
appendStringInfo(buf, " pitem: %u-%u/%u",
PostingItemGetBlockNumber(&insertData->newitem),
else
{
ginxlogVacuumDataLeafPage *xlrec =
- (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
+ (ginxlogVacuumDataLeafPage *) XLogRecGetBlockData(record, 0, NULL);
desc_recompress_leaf(buf, &xlrec->data);
}
{
/* allocate distance array only for non-NULL items */
SpGistSearchItem *item =
- palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
+ palloc(SizeOfSpGistSearchItem(isnull ? 0 : so->numberOfNonNullOrderBys));
item->isNull = isnull;
spgAddStartItem(SpGistScanOpaque so, bool isnull)
{
SpGistSearchItem *startEntry =
- spgAllocSearchItem(so, isnull, so->zeroDistances);
+ spgAllocSearchItem(so, isnull, so->zeroDistances);
ItemPointerSet(&startEntry->heapPtr,
isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO,
storeRes_func storeRes)
{
SpGistLeafTuple leafTuple = (SpGistLeafTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (leafTuple->tupstate != SPGIST_LIVE)
{
else /* page is inner */
{
SpGistInnerTuple innerTuple = (SpGistInnerTuple)
- PageGetItem(page, PageGetItemId(page, offset));
+ PageGetItem(page, PageGetItemId(page, offset));
if (innerTuple->tupstate != SPGIST_LIVE)
{
else
{
IndexOrderByDistance *distances =
- palloc(sizeof(distances[0]) * so->numberOfOrderBys);
+ palloc(sizeof(distances[0]) * so->numberOfOrderBys);
int i;
for (i = 0; i < so->numberOfOrderBys; i++)
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == pscan->phs_relid);
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
xl_multixact_create *xlrec =
- (xl_multixact_create *) XLogRecGetData(record);
+ (xl_multixact_create *) XLogRecGetData(record);
TransactionId max_xid;
int i;
shm_toc_insert(pcxt->toc, PARALLEL_KEY_COMBO_CID, combocidspace);
/*
- * Serialize the transaction snapshot if the transaction
- * isolation level uses a transaction snapshot.
+ * Serialize the transaction snapshot if the transaction isolation
+ * level uses a transaction snapshot.
*/
if (IsolationUsesXactSnapshot())
{
RestoreClientConnectionInfo(clientconninfospace);
/*
- * Initialize SystemUser now that MyClientConnectionInfo is restored.
- * Also ensure that auth_method is actually valid, aka authn_id is not NULL.
+ * Initialize SystemUser now that MyClientConnectionInfo is restored. Also
+ * ensure that auth_method is actually valid, aka authn_id is not NULL.
*/
if (MyClientConnectionInfo.authn_id)
InitializeSystemUser(MyClientConnectionInfo.authn_id,
break;
/*
- * The user issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already did
- * PushTransaction, so as to have someplace to put the SUBBEGIN
- * state.)
+ * The user issued a SAVEPOINT inside a transaction block. Start a
+ * subtransaction. (DefineSavepoint already did PushTransaction,
+ * so as to have someplace to put the SUBBEGIN state.)
*/
case TBLOCK_SUBBEGIN:
StartSubTransaction();
s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState == TBLOCK_SUBINPROGRESS ||
- s->blockState == TBLOCK_INPROGRESS ||
- s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
- s->blockState == TBLOCK_STARTED);
+ s->blockState == TBLOCK_INPROGRESS ||
+ s->blockState == TBLOCK_IMPLICIT_INPROGRESS ||
+ s->blockState == TBLOCK_STARTED);
}
/*
missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
/*
- * Reset ps status display, so as no information related to recovery
- * shows up.
+ * Reset ps status display, so as no information related to recovery shows
+ * up.
*/
set_ps_display("");
if (!XLogRecPtrIsInvalid(missingContrecPtr))
{
/*
- * We should only have a missingContrecPtr if we're not switching to
- * a new timeline. When a timeline switch occurs, WAL is copied from
- * the old timeline to the new only up to the end of the last complete
+ * We should only have a missingContrecPtr if we're not switching to a
+ * new timeline. When a timeline switch occurs, WAL is copied from the
+ * old timeline to the new only up to the end of the last complete
* record, so there can't be an incomplete WAL record that we need to
* disregard.
*/
*/
if (rllen > datadirpathlen &&
strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
- IS_DIR_SEP(linkpath[datadirpathlen]))
+ IS_DIR_SEP(linkpath[datadirpathlen]))
relpath = pstrdup(linkpath + datadirpathlen + 1);
/*
*
* XLogReader machinery is only able to handle records up to a certain
* size (ignoring machine resource limitations), so make sure that we will
- * not emit records larger than the sizes advertised to be supported.
- * This cap is based on DecodeXLogRecordRequiredSpace().
+ * not emit records larger than the sizes advertised to be supported. This
+ * cap is based on DecodeXLogRecordRequiredSpace().
*/
if (total_len >= XLogRecordMaxSize)
ereport(ERROR,
if (record_type == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) record->main_data;
+ (xl_dbase_create_file_copy_rec *) record->main_data;
RelFileLocator rlocator =
{InvalidOid, xlrec->db_id, InvalidRelFileNumber};
if (record_type == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *)
- record->main_data;
+ record->main_data;
if (xlrec->forkNum == MAIN_FORKNUM)
{
else if (record_type == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *)
- record->main_data;
+ record->main_data;
/*
* Don't consider prefetching anything in the truncated
XLogReleasePreviousRecord(XLogReaderState *state)
{
DecodedXLogRecord *record;
- XLogRecPtr next_lsn;
+ XLogRecPtr next_lsn;
if (!state->record)
return InvalidXLogRecPtr;
XLogRecPtr targetRecPtr, char *readBuf)
{
XLogPageReadPrivate *private =
- (XLogPageReadPrivate *) xlogreader->private_data;
+ (XLogPageReadPrivate *) xlogreader->private_data;
int emode = private->emode;
uint32 targetPageOff;
XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
*
* There's no guarantee that this will actually
* happen, though: the torn write could take an
- * arbitrarily long time to complete. Retrying multiple
- * times wouldn't fix this problem, either, though
- * it would reduce the chances of it happening in
- * practice. The only real fix here seems to be to
+ * arbitrarily long time to complete. Retrying
+ * multiple times wouldn't fix this problem, either,
+ * though it would reduce the chances of it happening
+ * in practice. The only real fix here seems to be to
* have some kind of interlock that allows us to wait
* until we can be certain that no write to the block
* is in progress. Since we don't have any such thing
tupdesc = CreateTemplateTupleDesc(2);
TupleDescInitBuiltinEntry(tupdesc, (AttrNumber) 1, "recptr", TEXTOID, -1, 0);
+
/*
* int8 may seem like a surprising data type for this, but in theory int4
* would not be wide enough for this, as TimeLineID is unsigned.
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */
- values[0]= CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
+ values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls);
# There are a few types which are given one name in the C source, but a
# different name at the SQL level. These are enumerated here.
my %RENAME_ATTTYPE = (
- 'int16' => 'int2',
- 'int32' => 'int4',
- 'int64' => 'int8',
- 'Oid' => 'oid',
- 'NameData' => 'name',
+ 'int16' => 'int2',
+ 'int32' => 'int4',
+ 'int64' => 'int8',
+ 'Oid' => 'oid',
+ 'NameData' => 'name',
'TransactionId' => 'xid',
- 'XLogRecPtr' => 'pg_lsn');
+ 'XLogRecPtr' => 'pg_lsn');
my %catalog;
my $declaring_attributes = 0;
- my $is_varlen = 0;
- my $is_client_code = 0;
+ my $is_varlen = 0;
+ my $is_client_code = 0;
- $catalog{columns} = [];
- $catalog{toasting} = [];
- $catalog{indexing} = [];
- $catalog{other_oids} = [];
+ $catalog{columns} = [];
+ $catalog{toasting} = [];
+ $catalog{indexing} = [];
+ $catalog{other_oids} = [];
$catalog{foreign_keys} = [];
- $catalog{client_code} = [];
+ $catalog{client_code} = [];
open(my $ifh, '<', $input_file) || die "$input_file: $!";
{
push @{ $catalog{toasting} },
{
- parent_table => $1,
- toast_oid => $2,
- toast_index_oid => $3,
- toast_oid_macro => $4,
+ parent_table => $1,
+ toast_oid => $2,
+ toast_index_oid => $3,
+ toast_oid_macro => $4,
toast_index_oid_macro => $5
};
}
push @{ $catalog{indexing} },
{
is_unique => $1 ? 1 : 0,
- is_pkey => $2 ? 1 : 0,
- index_name => $3,
- index_oid => $4,
+ is_pkey => $2 ? 1 : 0,
+ index_name => $3,
+ index_oid => $4,
index_oid_macro => $5,
- index_decl => $6
+ index_decl => $6
};
}
elsif (/^DECLARE_OID_DEFINING_MACRO\(\s*(\w+),\s*(\d+)\)/)
push @{ $catalog{other_oids} },
{
other_name => $1,
- other_oid => $2
+ other_oid => $2
};
}
elsif (
push @{ $catalog{foreign_keys} },
{
is_array => $1 ? 1 : 0,
- is_opt => $2 ? 1 : 0,
- fk_cols => $3,
+ is_opt => $2 ? 1 : 0,
+ fk_cols => $3,
pk_table => $4,
- pk_cols => $5
+ pk_cols => $5
};
}
elsif (/^CATALOG\((\w+),(\d+),(\w+)\)/)
{
- $catalog{catname} = $1;
- $catalog{relation_oid} = $2;
+ $catalog{catname} = $1;
+ $catalog{relation_oid} = $2;
$catalog{relation_oid_macro} = $3;
$catalog{bootstrap} = /BKI_BOOTSTRAP/ ? ' bootstrap' : '';
/BKI_SHARED_RELATION/ ? ' shared_relation' : '';
if (/BKI_ROWTYPE_OID\((\d+),(\w+)\)/)
{
- $catalog{rowtype_oid} = $1;
+ $catalog{rowtype_oid} = $1;
$catalog{rowtype_oid_clause} = " rowtype_oid $1";
- $catalog{rowtype_oid_macro} = $2;
+ $catalog{rowtype_oid_macro} = $2;
}
else
{
- $catalog{rowtype_oid} = '';
+ $catalog{rowtype_oid} = '';
$catalog{rowtype_oid_clause} = '';
- $catalog{rowtype_oid_macro} = '';
+ $catalog{rowtype_oid_macro} = '';
}
$catalog{schema_macro} = /BKI_SCHEMA_MACRO/ ? 1 : 0;
$declaring_attributes = 1;
$atttype = '_' . $atttype;
}
- $column{type} = $atttype;
- $column{name} = $attname;
+ $column{type} = $atttype;
+ $column{name} = $attname;
$column{is_varlen} = 1 if $is_varlen;
foreach my $attopt (@attopts)
# BKI_LOOKUP implicitly makes an FK reference
push @{ $catalog{foreign_keys} },
{
- is_array =>
- ($atttype eq 'oidvector' || $atttype eq '_oid')
+ is_array => (
+ $atttype eq 'oidvector' || $atttype eq '_oid')
? 1
: 0,
- is_opt => $column{lookup_opt},
- fk_cols => $attname,
+ is_opt => $column{lookup_opt},
+ fk_cols => $attname,
pk_table => $column{lookup},
- pk_cols => 'oid'
+ pk_cols => 'oid'
};
}
else
$input_file =~ /(\w+)\.dat$/
or die "Input file $input_file needs to be a .dat file.\n";
my $catname = $1;
- my $data = [];
+ my $data = [];
if ($preserve_formatting)
{
sub GenerateArrayTypes
{
my $pgtype_schema = shift;
- my $types = shift;
+ my $types = shift;
my @array_types;
foreach my $elem_type (@$types)
my %array_type;
# Set up metadata fields for array type.
- $array_type{oid} = $elem_type->{array_type_oid};
+ $array_type{oid} = $elem_type->{array_type_oid};
$array_type{autogenerated} = 1;
- $array_type{line_number} = $elem_type->{line_number};
+ $array_type{line_number} = $elem_type->{line_number};
# Set up column values derived from the element type.
$array_type{typname} = '_' . $elem_type->{typname};
sub RenameTempFile
{
my $final_name = shift;
- my $extension = shift;
- my $temp_name = $final_name . $extension;
+ my $extension = shift;
+ my $temp_name = $final_name . $extension;
if (-f $final_name
&& compare($temp_name, $final_name) == 0)
result |= (mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE));
/*
- * Check if ACL_MAINTAIN is being checked and, if so, and not already set as
- * part of the result, then check if the user is a member of the
+ * Check if ACL_MAINTAIN is being checked and, if so, and not already set
+ * as part of the result, then check if the user is a member of the
* pg_maintain role, which allows VACUUM, ANALYZE, CLUSTER, REFRESH
* MATERIALIZED VIEW, and REINDEX on all relations.
*/
my $num_errors = 0;
GetOptions(
- 'output:s' => \$output_path,
- 'set-version:s' => \$major_version,
+ 'output:s' => \$output_path,
+ 'set-version:s' => \$major_version,
'include-path:s' => \$include_path) || usage();
# Sanity check arguments.
-die "No input files.\n" unless @ARGV;
+die "No input files.\n" unless @ARGV;
die "--set-version must be specified.\n" unless $major_version;
die "Invalid version string: $major_version\n"
unless $major_version =~ /^\d+$/;
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
- my $schema = $catalog->{columns};
+ my $schema = $catalog->{columns};
if (defined $catname)
{
if (defined $row->{descr})
{
my %descr = (
- objoid => $row->{oid},
- classoid => $catalog->{relation_oid},
- objsubid => 0,
+ objoid => $row->{oid},
+ classoid => $catalog->{relation_oid},
+ objsubid => 0,
description => $row->{descr});
if ($catalog->{shared_relation})
# We're parsing an enum, so start with 0 and increment
# every time we find an enum member.
-my $encid = 0;
+my $encid = 0;
my $collect_encodings = 0;
while (<$ef>)
{
# Map lookup name to the corresponding hash table.
my %lookup_kind = (
- pg_am => \%amoids,
- pg_authid => \%authidoids,
- pg_class => \%classoids,
- pg_collation => \%collationoids,
- pg_language => \%langoids,
- pg_namespace => \%namespaceoids,
- pg_opclass => \%opcoids,
- pg_operator => \%operoids,
- pg_opfamily => \%opfoids,
- pg_proc => \%procoids,
- pg_tablespace => \%tablespaceoids,
- pg_ts_config => \%tsconfigoids,
- pg_ts_dict => \%tsdictoids,
- pg_ts_parser => \%tsparseroids,
+ pg_am => \%amoids,
+ pg_authid => \%authidoids,
+ pg_class => \%classoids,
+ pg_collation => \%collationoids,
+ pg_language => \%langoids,
+ pg_namespace => \%namespaceoids,
+ pg_opclass => \%opcoids,
+ pg_operator => \%operoids,
+ pg_opfamily => \%opfoids,
+ pg_proc => \%procoids,
+ pg_tablespace => \%tablespaceoids,
+ pg_ts_config => \%tsconfigoids,
+ pg_ts_dict => \%tsdictoids,
+ pg_ts_parser => \%tsparseroids,
pg_ts_template => \%tstemplateoids,
- pg_type => \%typeoids,
- encoding => \%encids);
+ pg_type => \%typeoids,
+ encoding => \%encids);
# Open temp files
-my $tmpext = ".tmp$$";
+my $tmpext = ".tmp$$";
my $bkifile = $output_path . 'postgres.bki';
open my $bki, '>', $bkifile . $tmpext
or die "can't open $bkifile$tmpext: $!";
# each element of the array as per the lookup rule.
if ($column->{lookup})
{
- my $lookup = $lookup_kind{ $column->{lookup} };
+ my $lookup = $lookup_kind{ $column->{lookup} };
my $lookup_opt = $column->{lookup_opt};
my @lookupnames;
my @lookupoids;
printf $fk_info
"\t{ /* %s */ %s, /* %s */ %s, \"{%s}\", \"{%s}\", %s, %s},\n",
- $catname, $catalog->{relation_oid},
+ $catname, $catalog->{relation_oid},
$pktabname, $catalogs{$pktabname}->{relation_oid},
$fkinfo->{fk_cols},
$fkinfo->{pk_cols},
close $constraints;
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($bkifile, $tmpext);
-Catalog::RenameTempFile($schemafile, $tmpext);
-Catalog::RenameTempFile($fk_info_file, $tmpext);
+Catalog::RenameTempFile($bkifile, $tmpext);
+Catalog::RenameTempFile($schemafile, $tmpext);
+Catalog::RenameTempFile($fk_info_file, $tmpext);
Catalog::RenameTempFile($constraints_file, $tmpext);
exit($num_errors != 0 ? 1 : 0);
push @tables_needing_macros, $table_name;
# Generate entries for user attributes.
- my $attnum = 0;
+ my $attnum = 0;
my $priorfixedwidth = 1;
foreach my $attr (@{ $table->{columns} })
{
$attnum++;
my %row;
- $row{attnum} = $attnum;
+ $row{attnum} = $attnum;
$row{attrelid} = $table->{relation_oid};
morph_row_for_pgattr(\%row, $schema, $attr, $priorfixedwidth);
{
$attnum = 0;
my @SYS_ATTRS = (
- { name => 'ctid', type => 'tid' },
- { name => 'xmin', type => 'xid' },
- { name => 'cmin', type => 'cid' },
- { name => 'xmax', type => 'xid' },
- { name => 'cmax', type => 'cid' },
+ { name => 'ctid', type => 'tid' },
+ { name => 'xmin', type => 'xid' },
+ { name => 'cmin', type => 'cid' },
+ { name => 'xmax', type => 'xid' },
+ { name => 'cmax', type => 'cid' },
{ name => 'tableoid', type => 'oid' });
foreach my $attr (@SYS_ATTRS)
{
$attnum--;
my %row;
- $row{attnum} = $attnum;
- $row{attrelid} = $table->{relation_oid};
+ $row{attnum} = $attnum;
+ $row{attrelid} = $table->{relation_oid};
$row{attstattarget} = '0';
morph_row_for_pgattr(\%row, $schema, $attr, 1);
# Copy the type data from pg_type, and add some type-dependent items
my $type = $types{$atttype};
- $row->{atttypid} = $type->{oid};
- $row->{attlen} = $type->{typlen};
- $row->{attbyval} = $type->{typbyval};
- $row->{attalign} = $type->{typalign};
+ $row->{atttypid} = $type->{oid};
+ $row->{attlen} = $type->{typlen};
+ $row->{attbyval} = $type->{typbyval};
+ $row->{attalign} = $type->{typalign};
$row->{attstorage} = $type->{typstorage};
# set attndims if it's an array type
# At this point the width of type name is still symbolic,
# so we need a special test.
$row->{attnotnull} =
- $row->{attlen} eq 'NAMEDATALEN' ? 't'
+ $row->{attlen} eq 'NAMEDATALEN' ? 't'
: $row->{attlen} > 0 ? 't'
: 'f';
}
# Write an entry to postgres.bki.
sub print_bki_insert
{
- my $row = shift;
+ my $row = shift;
my $schema = shift;
my @bki_values;
foreach my $column (@$schema)
{
- my $attname = $column->{name};
- my $atttype = $column->{type};
+ my $attname = $column->{name};
+ my $atttype = $column->{type};
my $bki_value = $row->{$attname};
# Fold backslash-zero to empty string if it's the entire string,
# quite identical, to the corresponding values in postgres.bki.
sub morph_row_for_schemapg
{
- my $row = shift;
+ my $row = shift;
my $pgattr_schema = shift;
foreach my $column (@$pgattr_schema)
# don't change.
elsif ($atttype eq 'bool')
{
- $row->{$attname} = 'true' if $row->{$attname} eq 't';
+ $row->{$attname} = 'true' if $row->{$attname} eq 't';
$row->{$attname} = 'false' if $row->{$attname} eq 'f';
}
# Skip for rowtypes of bootstrap catalogs, since they have their
# own naming convention defined elsewhere.
return
- if $typename eq 'pg_type'
+ if $typename eq 'pg_type'
or $typename eq 'pg_proc'
or $typename eq 'pg_attribute'
or $typename eq 'pg_class';
#endif /* USE_ASSERT_CHECKING */
/*
- * Skip insertions into non-summarizing indexes if we only need
- * to update summarizing indexes.
+ * Skip insertions into non-summarizing indexes if we only need to
+ * update summarizing indexes.
*/
if (onlySummarized && !indexInfo->ii_Summarizing)
continue;
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
if (OidIsValid(namespaceId) &&
!list_member_oid(oidlist, namespaceId) &&
object_aclcheck(NamespaceRelationId, namespaceId, roleid,
- ACL_USAGE) == ACLCHECK_OK &&
+ ACL_USAGE) == ACLCHECK_OK &&
InvokeNamespaceSearchHook(namespaceId, false))
oidlist = lappend_oid(oidlist, namespaceId);
}
* temp table creation request is made by someone with appropriate rights.
*/
if (object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE_TEMP) != ACLCHECK_OK)
+ ACL_CREATE_TEMP) != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to create temporary tables in database \"%s\"",
/* not in catalogs, different from operator, so make shell */
aclresult = object_aclcheck(NamespaceRelationId, otherNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(otherNamespace));
/* FALLTHROUGH */
case SHARED_DEPENDENCY_OWNER:
+
/*
* Save it for deletion below, if it's a local object or a
* role grant. Other shared objects, such as databases,
if (OidIsValid(namespaceId))
{
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, new_ownerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
*/
if (!IsBinaryUpgrade)
{
- char *langtag = icu_language_tag(colliculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(colliculocale,
+ icu_validation_level);
if (langtag && strcmp(colliculocale, langtag) != 0)
{
Datum
pg_collation_actual_version(PG_FUNCTION_ARGS)
{
- Oid collid = PG_GETARG_OID(0);
- char provider;
- char *locale;
- char *version;
- Datum datum;
+ Oid collid = PG_GETARG_OID(0);
+ char provider;
+ char *locale;
+ char *version;
+ Datum datum;
if (collid == DEFAULT_COLLATION_OID)
{
/* retrieve from pg_database */
HeapTuple dbtup = SearchSysCache1(DATABASEOID, ObjectIdGetDatum(MyDatabaseId));
+
if (!HeapTupleIsValid(dbtup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
{
/* retrieve from pg_collation */
- HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+ HeapTuple colltp = SearchSysCache1(COLLOID, ObjectIdGetDatum(collid));
+
if (!HeapTupleIsValid(colltp))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
Oid collid;
/*
- * Some systems have locale names that don't consist entirely of
- * ASCII letters (such as "bokmål" or "français").
- * This is pretty silly, since we need the locale itself to
- * interpret the non-ASCII characters. We can't do much with
- * those, so we filter them out.
+ * Some systems have locale names that don't consist entirely of ASCII
+ * letters (such as "bokmål" or "français"). This is pretty
+ * silly, since we need the locale itself to interpret the non-ASCII
+ * characters. We can't do much with those, so we filter them out.
*/
if (!pg_is_ascii(locale))
{
return -1;
}
if (enc == PG_SQL_ASCII)
- return -1; /* C/POSIX are already in the catalog */
+ return -1; /* C/POSIX are already in the catalog */
/* count valid locales found in operating system */
(*nvalidp)++;
/*
- * Create a collation named the same as the locale, but quietly
- * doing nothing if it already exists. This is the behavior we
- * need even at initdb time, because some versions of "locale -a"
- * can report the same locale name more than once. And it's
- * convenient for later import runs, too, since you just about
- * always want to add on new locales without a lot of chatter
- * about existing ones.
+ * Create a collation named the same as the locale, but quietly doing
+ * nothing if it already exists. This is the behavior we need even at
+ * initdb time, because some versions of "locale -a" can report the same
+ * locale name more than once. And it's convenient for later import runs,
+ * too, since you just about always want to add on new locales without a
+ * lot of chatter about existing ones.
*/
collid = CollationCreate(locale, nspid, GetUserId(),
COLLPROVIDER_LIBC, true, enc,
param.nvalidp = &nvalid;
/*
- * Enumerate the locales that are either installed on or supported
- * by the OS.
+ * Enumerate the locales that are either installed on or supported by
+ * the OS.
*/
if (!EnumSystemLocalesEx(win32_read_locale, LOCALE_ALL,
(LPARAM) ¶m, NULL))
List *rlocatorlist = NIL;
LockRelId relid;
Snapshot snapshot;
- SMgrRelation smgr;
+ SMgrRelation smgr;
BufferAccessStrategy bstrategy;
/* Get pg_class relfilenumber. */
*/
if (!IsBinaryUpgrade && dbiculocale != src_iculocale)
{
- char *langtag = icu_language_tag(dbiculocale,
- icu_validation_level);
+ char *langtag = icu_language_tag(dbiculocale,
+ icu_validation_level);
if (langtag && strcmp(dbiculocale, langtag) != 0)
{
dst_deftablespace = get_tablespace_oid(tablespacename, false);
/* check permissions */
aclresult = object_aclcheck(TableSpaceRelationId, dst_deftablespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tablespacename);
* If we're going to be reading data for the to-be-created database into
* shared_buffers, take a lock on it. Nobody should know that this
* database exists yet, but it's good to maintain the invariant that an
- * AccessExclusiveLock on the database is sufficient to drop all
- * of its buffers without worrying about more being read later.
+ * AccessExclusiveLock on the database is sufficient to drop all of its
+ * buffers without worrying about more being read later.
*
* Note that we need to do this before entering the
* PG_ENSURE_ERROR_CLEANUP block below, because createdb_failure_callback
* Permission checks
*/
aclresult = object_aclcheck(TableSpaceRelationId, dst_tblspcoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
tblspcname);
if (info == XLOG_DBASE_CREATE_FILE_COPY)
{
xl_dbase_create_file_copy_rec *xlrec =
- (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
+ (xl_dbase_create_file_copy_rec *) XLogRecGetData(record);
char *src_path;
char *dst_path;
char *parent_path;
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
xl_dbase_create_wal_log_rec *xlrec =
- (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
+ (xl_dbase_create_wal_log_rec *) XLogRecGetData(record);
char *dbpath;
char *parent_path;
case OBJECT_TABLE:
case OBJECT_TABLESPACE:
case OBJECT_VIEW:
+
/*
* These are handled elsewhere, so if someone gets here the code
* is probably wrong or should be revisited.
{
BitmapIndexScan *bitmapindexscan = (BitmapIndexScan *) plan;
const char *indexname =
- explain_get_index_name(bitmapindexscan->indexid);
+ explain_get_index_name(bitmapindexscan->indexid);
if (es->format == EXPLAIN_FORMAT_TEXT)
appendStringInfo(es->str, " on %s",
for (n = 0; n < incrsortstate->shared_info->num_workers; n++)
{
IncrementalSortInfo *incsort_info =
- &incrsortstate->shared_info->sinfo[n];
+ &incrsortstate->shared_info->sinfo[n];
/*
* If a worker hasn't processed any sort groups at all, then
{
ListCell *cell;
const char *label =
- (list_length(css->custom_ps) != 1 ? "children" : "child");
+ (list_length(css->custom_ps) != 1 ? "children" : "child");
foreach(cell, css->custom_ps)
ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es);
namespaceId = QualifiedNameGetCreationNamespace(returnType->names,
&typname);
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(LanguageRelationId, codeblock->langOid, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(languageStruct->lanname));
AclResult aclresult;
aclresult = object_aclcheck(NamespaceRelationId, namespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, root_save_userid,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params.tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params.tablespaceOid));
/*
* The table can be reindexed if the user has been granted MAINTAIN on
* the table or one of its partition ancestors or the user is a
- * superuser, the table owner, or the database/schema owner (but in the
- * latter case, only if it's not a shared relation). pg_class_aclcheck
- * includes the superuser case, and depending on objectKind we already
- * know that the user has permission to run REINDEX on this database or
- * schema per the permission checks at the beginning of this routine.
+ * superuser, the table owner, or the database/schema owner (but in
+ * the latter case, only if it's not a shared relation).
+ * pg_class_aclcheck includes the superuser case, and depending on
+ * objectKind we already know that the user has permission to run
+ * REINDEX on this database or schema per the permission checks at the
+ * beginning of this routine.
*/
if (classtuple->relisshared &&
pg_class_aclcheck(relid, GetUserId(), ACL_MAINTAIN) != ACLCHECK_OK &&
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, params->tablespaceOid,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(params->tablespaceOid));
* no special case for them.
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_DATABASE,
get_database_name(MyDatabaseId));
PreventInTransactionBlock(isTopLevel, "CREATE SUBSCRIPTION ... WITH (create_slot = true)");
/*
- * We don't want to allow unprivileged users to be able to trigger attempts
- * to access arbitrary network destinations, so require the user to have
- * been specifically authorized to create subscriptions.
+ * We don't want to allow unprivileged users to be able to trigger
+ * attempts to access arbitrary network destinations, so require the user
+ * to have been specifically authorized to create subscriptions.
*/
if (!has_privs_of_role(owner, ROLE_PG_CREATE_SUBSCRIPTION))
ereport(ERROR,
* exempt a subscription from this requirement.
*/
if (!opts.passwordrequired && !superuser_arg(owner))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/*
* If built with appropriate switch, whine when regression-testing
if (!sub->passwordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Lock the subscription so nobody else can do anything with it. */
LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
if (!form->subpasswordrequired && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("password_required=false is superuser-only"),
- errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
+ errmsg("password_required=false is superuser-only"),
+ errhint("Subscriptions with the password_required option set to false may only be created or modified by the superuser.")));
/* Must be able to become new owner */
check_can_set_role(GetUserId(), newOwnerId);
* current owner must have CREATE on database
*
* This is consistent with how ALTER SCHEMA ... OWNER TO works, but some
- * other object types behave differently (e.g. you can't give a table to
- * a user who lacks CREATE privileges on a schema).
+ * other object types behave differently (e.g. you can't give a table to a
+ * user who lacks CREATE privileges on a schema).
*/
aclresult = object_aclcheck(DatabaseRelationId, MyDatabaseId,
GetUserId(), ACL_CREATE);
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
resultRelInfo = resultRelInfos;
foreach(cell, rels)
{
- UserContext ucxt;
+ UserContext ucxt;
if (run_as_table_owner)
SwitchToUntrustedUser(resultRelInfo->ri_RelationDesc->rd_rel->relowner,
if (CompressionMethodIsValid(attribute->attcompression))
{
const char *compression =
- GetCompressionMethodName(attribute->attcompression);
+ GetCompressionMethodName(attribute->attcompression);
if (def->compression == NULL)
def->compression = pstrdup(compression);
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, namespaceOid, newOwnerId,
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceOid));
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(view_query, true);
+ view_query_is_auto_updatable(view_query, true);
if (view_updatable_error)
ereport(ERROR,
AclResult aclresult;
aclresult = object_aclcheck(TableSpaceRelationId, new_tablespaceoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(new_tablespaceoid));
if (IsA(stmt, RenameStmt))
{
aclresult = object_aclcheck(NamespaceRelationId, classform->relnamespace,
- GetUserId(), ACL_CREATE);
+ GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(classform->relnamespace));
/* Check permissions, similarly complaining only if interactive */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
{
if (source >= PGC_S_INTERACTIVE)
/* Check permissions similarly */
aclresult = object_aclcheck(TableSpaceRelationId, curoid, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
continue;
/* Check we have creation rights in target namespace */
aclresult = object_aclcheck(NamespaceRelationId, domainNamespace, GetUserId(),
- ACL_CREATE);
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(domainNamespace));
/* New owner must have CREATE privilege on namespace */
aclresult = object_aclcheck(NamespaceRelationId, typTup->typnamespace,
- newOwnerId,
- ACL_CREATE);
+ newOwnerId,
+ ACL_CREATE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(typTup->typnamespace));
int Password_encryption = PASSWORD_TYPE_SCRAM_SHA_256;
char *createrole_self_grant = "";
bool createrole_self_grant_enabled = false;
-GrantRoleOptions createrole_self_grant_options;
+GrantRoleOptions createrole_self_grant_options;
/* Hook to check passwords in CreateRole() and AlterRole() */
check_password_hook_type check_password_hook = NULL;
DefElem *dadminmembers = NULL;
DefElem *dvalidUntil = NULL;
DefElem *dbypassRLS = NULL;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
/* The defaults can vary depending on the original statement type */
switch (stmt->stmt_type)
*
* The grantor of record for this implicit grant is the bootstrap
* superuser, which means that the CREATEROLE user cannot revoke the
- * grant. They can however grant the created role back to themselves
- * with different options, since they enjoy ADMIN OPTION on it.
+ * grant. They can however grant the created role back to themselves with
+ * different options, since they enjoy ADMIN OPTION on it.
*/
if (!superuser())
{
BOOTSTRAP_SUPERUSERID, &poptself);
/*
- * We must make the implicit grant visible to the code below, else
- * the additional grants will fail.
+ * We must make the implicit grant visible to the code below, else the
+ * additional grants will fail.
*/
CommandCounterIncrement();
* Add the specified members to this new role. adminmembers get the admin
* option, rolemembers don't.
*
- * NB: No permissions check is required here. If you have enough rights
- * to create a role, you can add any members you like.
+ * NB: No permissions check is required here. If you have enough rights to
+ * create a role, you can add any members you like.
*/
AddRoleMems(currentUserId, stmt->role, roleid,
rolemembers, roleSpecsToIds(rolemembers),
DefElem *dbypassRLS = NULL;
Oid roleid;
Oid currentUserId = GetUserId();
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
check_rolespec_name(stmt->role,
_("Cannot alter reserved roles."));
*/
if (dissuper)
{
- bool should_be_super = boolVal(dissuper->arg);
+ bool should_be_super = boolVal(dissuper->arg);
if (!should_be_super && roleid == BOOTSTRAP_SUPERUSERID)
ereport(ERROR,
shdepLockAndCheckObject(AuthIdRelationId, roleid);
/*
- * To mess with a superuser you gotta be superuser; otherwise you
- * need CREATEROLE plus admin option on the target role; unless you're
- * just trying to change your own settings
+ * To mess with a superuser you gotta be superuser; otherwise you need
+ * CREATEROLE plus admin option on the target role; unless you're just
+ * trying to change your own settings
*/
if (roleform->rolsuper)
{
else
{
if ((!have_createrole_privilege() ||
- !is_admin_of_role(GetUserId(), roleid))
+ !is_admin_of_role(GetUserId(), roleid))
&& roleid != GetUserId())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
Oid grantor;
List *grantee_ids;
ListCell *item;
- GrantRoleOptions popt;
+ GrantRoleOptions popt;
Oid currentUserId = GetUserId();
/* Parse options list. */
InitGrantRoleOptions(&popt);
foreach(item, stmt->opt)
{
- DefElem *opt = (DefElem *) lfirst(item);
+ DefElem *opt = (DefElem *) lfirst(item);
char *optval = defGetString(opt);
if (strcmp(opt->defname, "admin") == 0)
/*
* Step through all of the granted roles and add, update, or remove
* entries in pg_auth_members as appropriate. If stmt->is_grant is true,
- * we are adding new grants or, if they already exist, updating options
- * on those grants. If stmt->is_grant is false, we are revoking grants or
+ * we are adding new grants or, if they already exist, updating options on
+ * those grants. If stmt->is_grant is false, we are revoking grants or
* removing options from them.
*/
foreach(item, stmt->granted_roles)
ObjectIdGetDatum(grantorId));
/*
- * If we found a tuple, update it with new option values, unless
- * there are no changes, in which case issue a WARNING.
+ * If we found a tuple, update it with new option values, unless there
+ * are no changes, in which case issue a WARNING.
*
* If we didn't find a tuple, just insert one.
*/
popt->inherit;
else
{
- HeapTuple mrtup;
- Form_pg_authid mrform;
+ HeapTuple mrtup;
+ Form_pg_authid mrform;
mrtup = SearchSysCache1(AUTHOID, memberid);
if (!HeapTupleIsValid(mrtup))
/*
* If popt.specified == 0, we're revoking the grant entirely; otherwise,
* we expect just one bit to be set, and we're revoking the corresponding
- * option. As of this writing, there's no syntax that would allow for
- * an attempt to revoke multiple options at once, and the logic below
+ * option. As of this writing, there's no syntax that would allow for an
+ * attempt to revoke multiple options at once, and the logic below
* wouldn't work properly if such syntax were added, so assert that our
* caller isn't trying to do that.
*/
}
else
{
- bool revoke_admin_option_only;
+ bool revoke_admin_option_only;
/*
* Revoking the grant entirely, or ADMIN option on a grant,
void
assign_createrole_self_grant(const char *newval, void *extra)
{
- unsigned options = * (unsigned *) extra;
+ unsigned options = *(unsigned *) extra;
createrole_self_grant_enabled = (options != 0);
createrole_self_grant_options.specified = GRANT_ROLE_SPECIFIED_ADMIN
if (check_option)
{
const char *view_updatable_error =
- view_query_is_auto_updatable(viewParse, true);
+ view_query_is_auto_updatable(viewParse, true);
if (view_updatable_error)
ereport(ERROR,
/* Check permission to call function */
aclresult = object_aclcheck(ProcedureRelationId, cmpfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(cmpfuncid));
if (OidIsValid(opexpr->hashfuncid))
{
aclresult = object_aclcheck(ProcedureRelationId, opexpr->hashfuncid,
- GetUserId(),
- ACL_EXECUTE);
+ GetUserId(),
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(opexpr->hashfuncid));
* column sorted on.
*/
TargetEntry *source_tle =
- (TargetEntry *) linitial(pertrans->aggref->args);
+ (TargetEntry *) linitial(pertrans->aggref->args);
Assert(list_length(pertrans->aggref->args) == 1);
{
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerGroup pergroup_allaggs =
- aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
+ aggstate->all_pergroups[op->d.agg_plain_pergroup_nullcheck.setoff];
if (pergroup_allaggs == NULL)
EEO_JUMP(op->d.agg_plain_pergroup_nullcheck.jumpnull);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
AggState *aggstate = castNode(AggState, state->parent);
AggStatePerTrans pertrans = op->d.agg_trans.pertrans;
AggStatePerGroup pergroup =
- &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
+ &aggstate->all_pergroups[op->d.agg_trans.setoff][op->d.agg_trans.transno];
Assert(!pertrans->transtypeByVal);
continue;
/*
- * Skip processing of non-summarizing indexes if we only
- * update summarizing indexes
+ * Skip processing of non-summarizing indexes if we only update
+ * summarizing indexes
*/
if (onlySummarizing && !indexInfo->ii_Summarizing)
continue;
if (first_time)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
if (tupdesc == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
/*
* This is the first non-NULL result from the
if (rsinfo.setResult == NULL)
{
MemoryContext oldcontext =
- MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
+ MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
tupstore = tuplestore_begin_heap(randomAccess, false, work_mem);
rsinfo.setResult = tupstore;
/* Check permission to call aggregate function */
aclresult = object_aclcheck(ProcedureRelationId, aggref->aggfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_AGGREGATE,
get_func_name(aggref->aggfnoid));
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
if (OidIsValid(serialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, serialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(serialfn_oid));
if (OidIsValid(deserialfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, deserialfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(deserialfn_oid));
else
{
size_t tuple_size =
- MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
+ MAXALIGN(HJTUPLE_OVERHEAD + tuple->t_len);
/* It belongs in a later batch. */
hashtable->batches[batchno].estimated_size += tuple_size;
for (i = 1; i < old_nbatch; ++i)
{
ParallelHashJoinBatch *shared =
- NthParallelHashJoinBatch(old_batches, i);
+ NthParallelHashJoinBatch(old_batches, i);
old_inner_tuples[i] = sts_attach(ParallelHashJoinBatchInner(shared),
ParallelWorkerNumber + 1,
while (DsaPointerIsValid(batch->chunks))
{
HashMemoryChunk chunk =
- dsa_get_address(hashtable->area, batch->chunks);
+ dsa_get_address(hashtable->area, batch->chunks);
dsa_pointer next = chunk->next.shared;
dsa_free(hashtable->area, batch->chunks);
{
SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier =
- &hashtable->batches[batchno].shared->batch_barrier;
+ &hashtable->batches[batchno].shared->batch_barrier;
switch (BarrierAttach(batch_barrier))
{
BufFile *file = *fileptr;
/*
- * The batch file is lazily created. If this is the first tuple
- * written to this batch, the batch file is created and its buffer is
- * allocated in the spillCxt context, NOT in the batchCxt.
+ * The batch file is lazily created. If this is the first tuple written to
+ * this batch, the batch file is created and its buffer is allocated in
+ * the spillCxt context, NOT in the batchCxt.
*
- * During the build phase, buffered files are created for inner
- * batches. Each batch's buffered file is closed (and its buffer freed)
- * after the batch is loaded into memory during the outer side scan.
- * Therefore, it is necessary to allocate the batch file buffer in a
- * memory context which outlives the batch itself.
+ * During the build phase, buffered files are created for inner batches.
+ * Each batch's buffered file is closed (and its buffer freed) after the
+ * batch is loaded into memory during the outer side scan. Therefore, it
+ * is necessary to allocate the batch file buffer in a memory context
+ * which outlives the batch itself.
*
- * Also, we use spillCxt instead of hashCxt for a better accounting of
- * the spilling memory consumption.
+ * Also, we use spillCxt instead of hashCxt for a better accounting of the
+ * spilling memory consumption.
*/
if (file == NULL)
{
- MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
+ MemoryContext oldctx = MemoryContextSwitchTo(hashtable->spillCxt);
file = BufFileCreateTemp(false);
*fileptr = file;
{
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pcxt->toc, plan_node_id, false);
/*
* It would be possible to reuse the shared hash table in single-batch
HashState *hashNode;
int plan_node_id = state->js.ps.plan->plan_node_id;
ParallelHashJoinState *pstate =
- shm_toc_lookup(pwcxt->toc, plan_node_id, false);
+ shm_toc_lookup(pwcxt->toc, plan_node_id, false);
/* Attach to the space for shared temporary files. */
SharedFileSetAttach(&pstate->fileset, pwcxt->seg);
if (incrsortstate->ss.ps.instrument != NULL)
{
IncrementalSortGroupInfo *fullsortGroupInfo =
- &incrsortstate->incsort_info.fullsortGroupInfo;
+ &incrsortstate->incsort_info.fullsortGroupInfo;
IncrementalSortGroupInfo *prefixsortGroupInfo =
- &incrsortstate->incsort_info.prefixsortGroupInfo;
+ &incrsortstate->incsort_info.prefixsortGroupInfo;
fullsortGroupInfo->groupCount = 0;
fullsortGroupInfo->maxDiskSpaceUsed = 0;
{
bool updated; /* did UPDATE actually occur? */
bool crossPartUpdate; /* was it a cross-partition update? */
- TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
+ TU_UpdateIndexes updateIndexes; /* Which index updates are required? */
/*
* Lock mode to acquire on the latest tuple version before performing
{
TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor);
TupleDesc plan_tdesc =
- CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
+ CreateTupleDescCopy(planSlot->tts_tupleDescriptor);
resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] =
MakeSingleTupleTableSlot(tdesc, slot->tts_ops);
int colno;
Datum value;
int ordinalitycol =
- ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
+ ((TableFuncScan *) (tstate->ss.ps.plan))->tablefunc->ordinalitycol;
/*
* Install the document as a possibly-toasted Datum into the tablefunc
/* Check permission to call window function */
aclresult = object_aclcheck(ProcedureRelationId, wfunc->winfnoid, GetUserId(),
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(wfunc->winfnoid));
if (!OidIsValid(aggform->aggminvtransfn))
use_ma_code = false; /* sine qua non */
else if (aggform->aggmfinalmodify == AGGMODIFY_READ_ONLY &&
- aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
+ aggform->aggfinalmodify != AGGMODIFY_READ_ONLY)
use_ma_code = true; /* decision forced by safety */
else if (winstate->frameOptions & FRAMEOPTION_START_UNBOUNDED_PRECEDING)
use_ma_code = false; /* non-moving frame head */
ReleaseSysCache(procTuple);
aclresult = object_aclcheck(ProcedureRelationId, transfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(transfn_oid));
if (OidIsValid(invtransfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, invtransfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(invtransfn_oid));
if (OidIsValid(finalfn_oid))
{
aclresult = object_aclcheck(ProcedureRelationId, finalfn_oid, aggOwner,
- ACL_EXECUTE);
+ ACL_EXECUTE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_FUNCTION,
get_func_name(finalfn_oid));
if (tdata->tg_newtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgnewtable;
if (tdata->tg_oldtable)
{
EphemeralNamedRelation enr =
- palloc(sizeof(EphemeralNamedRelationData));
+ palloc(sizeof(EphemeralNamedRelationData));
int rc;
enr->md.name = tdata->tg_trigger->tgoldtable;
LLVMInitializeNativeAsmParser();
/*
- * When targeting an LLVM version with opaque pointers enabled by
- * default, turn them off for the context we build our code in. We don't
- * need to do so for other contexts (e.g. llvm_ts_context). Once the IR is
+ * When targeting an LLVM version with opaque pointers enabled by default,
+ * turn them off for the context we build our code in. We don't need to
+ * do so for other contexts (e.g. llvm_ts_context). Once the IR is
* generated, it carries the necessary information.
*/
#if LLVM_VERSION_MAJOR > 14
static LLVMErrorRef
llvm_resolve_symbols(LLVMOrcDefinitionGeneratorRef GeneratorObj, void *Ctx,
- LLVMOrcLookupStateRef * LookupState, LLVMOrcLookupKind Kind,
+ LLVMOrcLookupStateRef *LookupState, LLVMOrcLookupKind Kind,
LLVMOrcJITDylibRef JD, LLVMOrcJITDylibLookupFlags JDLookupFlags,
LLVMOrcCLookupSet LookupSet, size_t LookupSetSize)
{
llvm_create_object_layer(void *Ctx, LLVMOrcExecutionSessionRef ES, const char *Triple)
{
LLVMOrcObjectLayerRef objlayer =
- LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
+ LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(ES);
#if defined(HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER) && HAVE_DECL_LLVMCREATEGDBREGISTRATIONLISTENER
if (jit_debugging_support)
{
LLVMValueRef v_tmp_loaddata;
LLVMTypeRef vartypep =
- LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
+ LLVMPointerType(LLVMIntType(att->attlen * 8), 0);
v_tmp_loaddata =
LLVMBuildPointerCast(b, v_attdatap, vartypep, "");
else
{
LLVMValueRef v_value =
- LLVMBuildLoad(b, v_resvaluep, "");
+ LLVMBuildLoad(b, v_resvaluep, "");
v_value = LLVMBuildZExt(b,
LLVMBuildICmp(b, LLVMIntEQ,
/*
* pergroup = &aggstate->all_pergroups
- * [op->d.agg_trans.setoff]
- * [op->d.agg_trans.transno];
+ * [op->d.agg_trans.setoff] [op->d.agg_trans.transno];
*/
v_allpergroupsp =
l_load_struct_gep(b, v_aggstatep,
/*
* Use the configured keytab, if there is one. As we now require MIT
- * Kerberos, we might consider using the credential store extensions in the
- * future instead of the environment variable.
+ * Kerberos, we might consider using the credential store extensions in
+ * the future instead of the environment variable.
*/
if (pg_krb_server_keyfile != NULL && pg_krb_server_keyfile[0] != '\0')
{
if (namelen > MAXLEN)
{
/*
- * Keep the end of the name, not the beginning, since the most specific
- * field is likely to give users the most information.
+ * Keep the end of the name, not the beginning, since the most
+ * specific field is likely to give users the most information.
*/
truncated = name + namelen - MAXLEN;
truncated[0] = truncated[1] = truncated[2] = '.';
/*
* Get the Subject and Issuer for logging, but don't let maliciously
- * huge certs flood the logs, and don't reflect non-ASCII bytes into it
- * either.
+ * huge certs flood the logs, and don't reflect non-ASCII bytes into
+ * it either.
*/
subject = X509_NAME_to_cstring(X509_get_subject_name(cert));
sub_prepared = prepare_cert_name(subject);
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(hbacxt);
return false;
if (!ok)
{
/*
- * File contained one or more errors, so bail out. MemoryContextDelete
- * is enough to clean up everything, including regexes.
+ * File contained one or more errors, so bail out.
+ * MemoryContextDelete is enough to clean up everything, including
+ * regexes.
*/
MemoryContextDelete(ident_context);
return false;
# In HEAD, these variables should be left undef, since we don't promise
# ABI stability during development.
-my $last_nodetag = undef;
+my $last_nodetag = undef;
my $last_nodetag_no = undef;
# output file names
# (Ideally we'd mark List as "special copy/equal" not "no copy/equal".
# But until there's other use-cases for that, just hot-wire the tests
# that would need to distinguish.)
-push @no_copy, qw(List);
-push @no_equal, qw(List);
-push @no_query_jumble, qw(List);
+push @no_copy, qw(List);
+push @no_equal, qw(List);
+push @no_query_jumble, qw(List);
push @special_read_write, qw(List);
# Nodes with custom copy/equal implementations are skipped from
}
$file_content .= $raw_file_content;
- my $lineno = 0;
+ my $lineno = 0;
my $prevline = '';
foreach my $line (split /\n/, $file_content)
{
if ($line =~ /;$/)
{
# found the end, re-attach any previous line(s)
- $line = $prevline . $line;
+ $line = $prevline . $line;
$prevline = '';
}
elsif ($prevline eq ''
if ($subline == 1)
{
$is_node_struct = 0;
- $supertype = undef;
+ $supertype = undef;
next if $line eq '{';
die "$infile:$lineno: expected opening brace\n";
}
elsif ($subline == 2
&& $line =~ /^\s*pg_node_attr\(([\w(), ]*)\)$/)
{
- $node_attrs = $1;
+ $node_attrs = $1;
$node_attrs_lineno = $lineno;
# hack: don't count the line
$subline--;
}
elsif ($line =~ /\s*(\w+)\s+(\w+);/ and elem $1, @node_types)
{
- $is_node_struct = 1;
- $supertype = $1;
+ $is_node_struct = 1;
+ $supertype = $1;
$supertype_field = $2;
next;
}
}
elsif ($attr eq 'no_copy_equal')
{
- push @no_copy, $in_struct;
+ push @no_copy, $in_struct;
push @no_equal, $in_struct;
}
elsif ($attr eq 'no_query_jumble')
push @node_types, $in_struct;
# field names, types, attributes
- my @f = @my_fields;
+ my @f = @my_fields;
my %ft = %my_field_types;
my %fa = %my_field_attrs;
unshift @f, @superfields;
}
# save in global info structure
- $node_type_info{$in_struct}->{fields} = \@f;
+ $node_type_info{$in_struct}->{fields} = \@f;
$node_type_info{$in_struct}->{field_types} = \%ft;
$node_type_info{$in_struct}->{field_attrs} = \%fa;
}
# start new cycle
- $in_struct = undef;
- $node_attrs = '';
- @my_fields = ();
+ $in_struct = undef;
+ $node_attrs = '';
+ @my_fields = ();
%my_field_types = ();
%my_field_attrs = ();
}
{
if ($is_node_struct)
{
- my $type = $1;
- my $name = $2;
+ my $type = $1;
+ my $name = $2;
my $array_size = $3;
- my $attrs = $4;
+ my $attrs = $4;
# strip "const"
$type =~ s/^const\s*//;
{
if ($is_node_struct)
{
- my $type = $1;
- my $name = $2;
- my $args = $3;
+ my $type = $1;
+ my $name = $2;
+ my $args = $3;
my $attrs = $4;
my @attrs;
if ($line =~ /^(?:typedef )?struct (\w+)$/ && $1 ne 'Node')
{
$in_struct = $1;
- $subline = 0;
+ $subline = 0;
}
# one node type typedef'ed directly from another
elsif ($line =~ /^typedef (\w+) (\w+);$/ and elem $1, @node_types)
{
my $alias_of = $1;
- my $n = $2;
+ my $n = $2;
# copy everything over
push @node_types, $n;
- my @f = @{ $node_type_info{$alias_of}->{fields} };
+ my @f = @{ $node_type_info{$alias_of}->{fields} };
my %ft = %{ $node_type_info{$alias_of}->{field_types} };
my %fa = %{ $node_type_info{$alias_of}->{field_attrs} };
- $node_type_info{$n}->{fields} = \@f;
+ $node_type_info{$n}->{fields} = \@f;
$node_type_info{$n}->{field_types} = \%ft;
$node_type_info{$n}->{field_attrs} = \%fa;
}
printf $nt $header_comment, 'nodetags.h';
-my $tagno = 0;
+my $tagno = 0;
my $last_tag = undef;
foreach my $n (@node_types, @extra_tags)
{
{
next if elem $n, @abstract_types;
next if elem $n, @nodetag_only;
- my $struct_no_copy = (elem $n, @no_copy);
+ my $struct_no_copy = (elem $n, @no_copy);
my $struct_no_equal = (elem $n, @no_equal);
next if $struct_no_copy && $struct_no_equal;
# print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} })
{
- my $t = $node_type_info{$n}->{field_types}{$f};
- my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
- my $copy_ignore = $struct_no_copy;
+ my $t = $node_type_info{$n}->{field_types}{$f};
+ my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
+ my $copy_ignore = $struct_no_copy;
my $equal_ignore = $struct_no_equal;
# extract per-field attributes
my $array_size_field;
my $copy_as_field;
- my $copy_as_scalar = 0;
+ my $copy_as_scalar = 0;
my $equal_as_scalar = 0;
foreach my $a (@a)
{
# select instructions by field type
if ($t eq 'char*')
{
- print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_STRING_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_STRING_FIELD($f);\n" unless $equal_ignore;
}
elsif ($t eq 'Bitmapset*' || $t eq 'Relids')
}
elsif ($t eq 'int' && $f =~ 'location$')
{
- print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_LOCATION_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_LOCATION_FIELD($f);\n" unless $equal_ignore;
}
elsif (elem $t, @scalar_types or elem $t, @enum_types)
elsif ($t eq 'function pointer')
{
# we can copy and compare as a scalar
- print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
}
# node type
and $1 ne 'List'
and !$equal_ignore;
- print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_NODE_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_NODE_FIELD($f);\n" unless $equal_ignore;
}
# array (inline)
elsif ($t =~ /^\w+\[\w+\]$/)
{
- print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_ARRAY_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_ARRAY_FIELD($f);\n" unless $equal_ignore;
}
elsif ($t eq 'struct CustomPathMethods*'
# Fields of these types are required to be a pointer to a
# static table of callback functions. So we don't copy
# the table itself, just reference the original one.
- print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
+ print $cff "\tCOPY_SCALAR_FIELD($f);\n" unless $copy_ignore;
print $eff "\tCOMPARE_SCALAR_FIELD($f);\n" unless $equal_ignore;
}
else
{
print $off "\tWRITE_FLOAT_FIELD($f.startup);\n";
print $off "\tWRITE_FLOAT_FIELD($f.per_tuple);\n";
- print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
+ print $rff "\tREAD_FLOAT_FIELD($f.startup);\n" unless $no_read;
print $rff "\tREAD_FLOAT_FIELD($f.per_tuple);\n" unless $no_read;
}
elsif ($t eq 'Selectivity')
# print instructions for each field
foreach my $f (@{ $node_type_info{$n}->{fields} })
{
- my $t = $node_type_info{$n}->{field_types}{$f};
- my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
+ my $t = $node_type_info{$n}->{field_types}{$f};
+ my @a = @{ $node_type_info{$n}->{field_attrs}{$f} };
my $query_jumble_ignore = $struct_no_query_jumble;
my $query_jumble_location = 0;
{
PathKey *key = (PathKey *) lfirst(l);
EquivalenceMember *member = (EquivalenceMember *)
- linitial(key->pk_eclass->ec_members);
+ linitial(key->pk_eclass->ec_members);
/*
* Check if the expression contains Var with "varno 0" so that we
if (leaf_relid)
{
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(context->root->row_identity_vars, var->varattno - 1);
+ list_nth(context->root->row_identity_vars, var->varattno - 1);
if (bms_is_member(leaf_relid, ridinfo->rowidrels))
{
{
/* UPDATE/DELETE/MERGE row identity vars are always needed */
RowIdentityVarInfo *ridinfo = (RowIdentityVarInfo *)
- list_nth(root->row_identity_vars, var->varattno - 1);
+ list_nth(root->row_identity_vars, var->varattno - 1);
/* Update reltarget width estimate from RowIdentityVarInfo */
joinrel->reltarget->width += ridinfo->rowidwidth;
use strict;
use warnings;
-my $gram_filename = $ARGV[0];
+my $gram_filename = $ARGV[0];
my $kwlist_filename = $ARGV[1];
my $errors = 0;
$\ = "\n"; # set output record separator
my %keyword_categories;
-$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
-$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
+$keyword_categories{'unreserved_keyword'} = 'UNRESERVED_KEYWORD';
+$keyword_categories{'col_name_keyword'} = 'COL_NAME_KEYWORD';
$keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
-$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
+$keyword_categories{'reserved_keyword'} = 'RESERVED_KEYWORD';
open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
if ($line =~ /^PG_KEYWORD\(\"(.*)\", (.*), (.*), (.*)\)/)
{
my ($kwstring) = $1;
- my ($kwname) = $2;
+ my ($kwname) = $2;
my ($kwcat_id) = $3;
my ($collabel) = $4;
if (format->format_type == JS_FORMAT_JSON)
{
JsonEncoding enc = format->encoding != JS_ENC_DEFAULT ?
- format->encoding : JS_ENC_UTF8;
+ format->encoding : JS_ENC_UTF8;
if (targettype != BYTEAOID &&
format->encoding != JS_ENC_DEFAULT)
/*
* Set up the MERGE target table. The target table is added to the
- * namespace below and to joinlist in transform_MERGE_to_join, so don't
- * do it here.
+ * namespace below and to joinlist in transform_MERGE_to_join, so don't do
+ * it here.
*/
qry->resultRelation = setTargetTable(pstate, stmt->relation,
stmt->relation->inh,
if (relation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
{
aclresult = object_aclcheck(TypeRelationId, relation->rd_rel->reltype, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TYPE,
RelationGetRelationName(relation));
* mentioned above.
*/
Datum attoptions =
- get_attoptions(RelationGetRelid(index_rel), i + 1);
+ get_attoptions(RelationGetRelid(index_rel), i + 1);
defopclass = GetDefaultOpClass(attform->atttypid,
index_rel->rd_rel->relam);
/*
* The default partitions have to be joined with each other, so merge
* them. Note that each of the default partitions isn't merged yet
- * (see, process_outer_partition()/process_inner_partition()), so
- * they should be merged successfully. The merged partition will act
- * as the default partition of the join relation.
+ * (see, process_outer_partition()/process_inner_partition()), so they
+ * should be merged successfully. The merged partition will act as
+ * the default partition of the join relation.
*/
Assert(outer_merged_index == -1);
Assert(inner_merged_index == -1);
* datums list.
*/
PartitionRangeDatum *datum =
- list_nth(spec->upperdatums, abs(cmpval) - 1);
+ list_nth(spec->upperdatums, abs(cmpval) - 1);
/*
* The new partition overlaps with the
/*
* We start postmaster children with signals blocked. This allows them to
* install their own handlers before unblocking, to avoid races where they
- * might run the postmaster's handler and miss an important control signal.
- * With more analysis this could potentially be relaxed.
+ * might run the postmaster's handler and miss an important control
+ * signal. With more analysis this could potentially be relaxed.
*/
sigprocmask(SIG_SETMASK, &BlockSig, &save_mask);
result = fork();
RETV(PLAIN, c);
break;
default:
+
/*
* Throw an error for unrecognized ASCII alpha escape sequences,
* which reserves them for future use if needed.
if (must_use_password)
{
- bool uses_password = false;
+ bool uses_password = false;
for (opt = opts; opt->keyword != NULL; ++opt)
{
case XLOG_PARAMETER_CHANGE:
{
xl_parameter_change *xlrec =
- (xl_parameter_change *) XLogRecGetData(buf->record);
+ (xl_parameter_change *) XLogRecGetData(buf->record);
/*
* If wal_level on the primary is reduced to less than
* invalidated when this WAL record is replayed; and further,
* slot creation fails when wal_level is not sufficient; but
* all these operations are not synchronized, so a logical
- * slot may creep in while the wal_level is being
- * reduced. Hence this extra check.
+ * slot may creep in while the wal_level is being reduced.
+ * Hence this extra check.
*/
if (xlrec->wal_level < WAL_LEVEL_LOGICAL)
{
SnapBuild *builder = ctx->snapshot_builder;
XLogRecPtr origin_lsn = parsed->origin_lsn;
TimestampTz prepare_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
int i;
TransactionId xid = parsed->twophase_xid;
int i;
XLogRecPtr origin_lsn = InvalidXLogRecPtr;
TimestampTz abort_time = parsed->xact_time;
- RepOriginId origin_id = XLogRecGetOrigin(buf->record);
+ RepOriginId origin_id = XLogRecGetOrigin(buf->record);
bool skip_xact;
if (parsed->xinfo & XACT_XINFO_HAS_ORIGIN)
MemoryContext old_context;
/*
- * On a standby, this check is also required while creating the
- * slot. Check the comments in the function.
+ * On a standby, this check is also required while creating the slot.
+ * Check the comments in the function.
*/
CheckLogicalDecodingRequirements();
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec =
- (xl_replorigin_set *) XLogRecGetData(record);
+ (xl_replorigin_set *) XLogRecGetData(record);
replorigin_advance(xlrec->node_id,
xlrec->remote_lsn, record->EndRecPtr,
{
dlist_node *next = dlist_next_node(&entry->txn->changes, &change->node);
ReorderBufferChange *next_change =
- dlist_container(ReorderBufferChange, node, next);
+ dlist_container(ReorderBufferChange, node, next);
/* txn stays the same */
state->entries[off].lsn = next_change->lsn;
{
/* successfully restored changes from disk */
ReorderBufferChange *next_change =
- dlist_head_element(ReorderBufferChange, node,
- &entry->txn->changes);
+ dlist_head_element(ReorderBufferChange, node,
+ &entry->txn->changes);
elog(DEBUG2, "restored %u/%u changes from disk",
(uint32) entry->txn->nentries_mem,
dclist_delete_from(&rb->catchange_txns, &txn->catchange_node);
/* now remove reference from buffer */
- hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
+ hash_search(rb->by_txn, &txn->xid, HASH_REMOVE, &found);
Assert(found);
/* remove entries spilled to disk */
ReorderBufferTXN *txn;
/*
- * Bail out if logical_replication_mode is buffered and we haven't exceeded
- * the memory limit.
+ * Bail out if logical_replication_mode is buffered and we haven't
+ * exceeded the memory limit.
*/
if (logical_replication_mode == LOGICAL_REP_MODE_BUFFERED &&
rb->size < logical_decoding_work_mem * 1024L)
{
char *data;
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
sz += inval_size;
* After that we need to reuse the snapshot from the previous run.
*
* Unlike DecodeCommit which adds xids of all the subtransactions in
- * snapshot's xip array via SnapBuildCommitTxn, we can't do that here
- * but we do add them to subxip array instead via ReorderBufferCopySnap.
- * This allows the catalog changes made in subtransactions decoded till
- * now to be visible.
+ * snapshot's xip array via SnapBuildCommitTxn, we can't do that here but
+ * we do add them to subxip array instead via ReorderBufferCopySnap. This
+ * allows the catalog changes made in subtransactions decoded till now to
+ * be visible.
*/
if (txn->snapshot_now == NULL)
{
dlist_foreach_modify(cleanup_iter, &txn->changes)
{
ReorderBufferChange *cleanup =
- dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
+ dlist_container(ReorderBufferChange, node, cleanup_iter.cur);
dlist_delete(&cleanup->node);
ReorderBufferReturnChange(rb, cleanup, true);
case REORDER_BUFFER_CHANGE_INVALIDATION:
{
Size inval_size = sizeof(SharedInvalidationMessage) *
- change->data.inval.ninvalidations;
+ change->data.inval.ninvalidations;
change->data.inval.invalidations =
MemoryContextAlloc(rb->context, inval_size);
dlist_foreach_modify(it, &ent->chunks)
{
ReorderBufferChange *change =
- dlist_container(ReorderBufferChange, node, it.cur);
+ dlist_container(ReorderBufferChange, node, it.cur);
dlist_delete(&change->node);
ReorderBufferReturnChange(rb, change, true);
Assert(builder->building_full_snapshot);
/* don't allow older snapshots */
- InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
+ InvalidateCatalogSnapshot(); /* about to overwrite MyProc->xmin */
if (HaveRegisteredOrActiveSnapshot())
elog(ERROR, "cannot build an initial slot snapshot when snapshots exist");
Assert(!HistoricSnapshotActive());
*/
/*
- * xl_running_xacts record is older than what we can use, we might not have
- * all necessary catalog rows anymore.
+ * xl_running_xacts record is older than what we can use, we might not
+ * have all necessary catalog rows anymore.
*/
if (TransactionIdIsNormal(builder->initial_xmin_horizon) &&
NormalTransactionIdPrecedes(running->oldestRunningXid,
* the lock.
*/
int nsyncworkers =
- logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
+ logicalrep_sync_worker_count(MyLogicalRepWorker->subid);
/* Now safe to release the LWLock */
LWLockRelease(LogicalRepWorkerLock);
LogicalRepRelMapEntry *rel;
LogicalRepTupleData newtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
{
LogicalRepRelMapEntry *rel;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
LogicalRepTupleData oldtup;
LogicalRepRelMapEntry *rel;
LogicalRepTupleData oldtup;
LogicalRepRelId relid;
- UserContext ucxt;
+ UserContext ucxt;
ApplyExecutionData *edata;
EState *estate;
TupleTableSlot *remoteslot;
if (map)
{
TupleConversionMap *PartitionToRootMap =
- convert_tuples_by_name(RelationGetDescr(partrel),
- RelationGetDescr(parentrel));
+ convert_tuples_by_name(RelationGetDescr(partrel),
+ RelationGetDescr(parentrel));
remoteslot =
execute_attr_map_slot(PartitionToRootMap->attrMap,
dlist_foreach_modify(iter, &lsn_mapping)
{
FlushPosition *pos =
- dlist_container(FlushPosition, node, iter.cur);
+ dlist_container(FlushPosition, node, iter.cur);
*write = pos->remote_end;
ereport(DEBUG1,
(errmsg_internal("logical replication apply worker for subscription \"%s\" two_phase is %s",
- MySubscription->name,
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
- MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
- "?")));
+ MySubscription->name,
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_DISABLED ? "DISABLED" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_PENDING ? "PENDING" :
+ MySubscription->twophasestate == LOGICALREP_TWOPHASE_STATE_ENABLED ? "ENABLED" :
+ "?")));
}
else
{
}
/*
- * If we are processing this transaction using a parallel apply worker then
- * either we send the changes to the parallel worker or if the worker is busy
- * then serialize the changes to the file which will later be processed by
- * the parallel worker.
+ * If we are processing this transaction using a parallel apply worker
+ * then either we send the changes to the parallel worker or if the worker
+ * is busy then serialize the changes to the file which will later be
+ * processed by the parallel worker.
*/
*winfo = pa_find_worker(xid);
}
/*
- * If there is no parallel worker involved to process this transaction then
- * we either directly apply the change or serialize it to a file which will
- * later be applied when the transaction finish message is processed.
+ * If there is no parallel worker involved to process this transaction
+ * then we either directly apply the change or serialize it to a file
+ * which will later be applied when the transaction finish message is
+ * processed.
*/
else if (in_streamed_transaction)
{
* are multiple lists (one for each operation) to which row filters will
* be appended.
*
- * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row
- * filter expression" so it takes precedence.
+ * FOR ALL TABLES and FOR TABLES IN SCHEMA implies "don't use row filter
+ * expression" so it takes precedence.
*/
foreach(lc, publications)
{
SyncRepQueueInsert(int mode)
{
dlist_head *queue;
- dlist_iter iter;
+ dlist_iter iter;
Assert(mode >= 0 && mode < NUM_SYNC_REP_WAIT_MODE);
queue = &WalSndCtl->SyncRepQueue[mode];
dlist_foreach_modify(iter, &WalSndCtl->SyncRepQueue[mode])
{
- PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
+ PGPROC *proc = dlist_container(PGPROC, syncRepLinks, iter.cur);
/*
* Assume the queue is ordered by LSN
if (parsetree->withCheckOptions != NIL)
{
WithCheckOption *parent_wco =
- (WithCheckOption *) linitial(parsetree->withCheckOptions);
+ (WithCheckOption *) linitial(parsetree->withCheckOptions);
if (parent_wco->cascaded)
{
if (row_security_policy_hook_restrictive)
{
List *hook_policies =
- (*row_security_policy_hook_restrictive) (cmd, relation);
+ (*row_security_policy_hook_restrictive) (cmd, relation);
/*
* As with built-in restrictive policies, we sort any hook-provided
if (row_security_policy_hook_permissive)
{
List *hook_policies =
- (*row_security_policy_hook_permissive) (cmd, relation);
+ (*row_security_policy_hook_permissive) (cmd, relation);
foreach(item, hook_policies)
{
my $depfile;
our @languages = qw(
- arabic
- armenian
- basque
- catalan
- danish
- dutch
- english
- finnish
- french
- german
- greek
- hindi
- hungarian
- indonesian
- irish
- italian
- lithuanian
- nepali
- norwegian
- portuguese
- romanian
- russian
- serbian
- spanish
- swedish
- tamil
- turkish
- yiddish
+ arabic
+ armenian
+ basque
+ catalan
+ danish
+ dutch
+ english
+ finnish
+ french
+ german
+ greek
+ hindi
+ hungarian
+ indonesian
+ irish
+ italian
+ lithuanian
+ nepali
+ norwegian
+ portuguese
+ romanian
+ russian
+ serbian
+ spanish
+ swedish
+ tamil
+ turkish
+ yiddish
);
# Names of alternative dictionaries for all-ASCII words. If not
our %ascii_languages = (
'hindi' => 'english',
- 'russian' => 'english',
-);
+ 'russian' => 'english',);
GetOptions(
- 'depfile' => \$depfile,
- 'outdir:s' => \$outdir_path,
- 'input:s' => \$input_path) || usage();
+ 'depfile' => \$depfile,
+ 'outdir:s' => \$outdir_path,
+ 'input:s' => \$input_path) || usage();
# Make sure input_path ends in a slash if needed.
if ($input_path ne '' && substr($input_path, -1) ne '/')
foreach my $lang (@languages)
{
my $asclang = $ascii_languages{$lang} || $lang;
- my $txt = $tmpl;
- my $stop = '';
+ my $txt = $tmpl;
+ my $stop = '';
my $stopword_path = "$input_path/stopwords/$lang.stop";
if (-s "$stopword_path")
if (tcnt > 0)
{
AttributeOpts *aopt =
- get_attribute_options(stats->attr->attrelid,
- stats->attr->attnum);
+ get_attribute_options(stats->attr->attrelid,
+ stats->attr->attnum);
stats->exprvals = exprvals;
stats->exprnulls = exprnulls;
{
BufferDesc *bufHdr = NULL;
CkptTsStatus *ts_stat = (CkptTsStatus *)
- DatumGetPointer(binaryheap_first(ts_heap));
+ DatumGetPointer(binaryheap_first(ts_heap));
buf_id = CkptBufferIds[ts_stat->index].buf_id;
Assert(buf_id != -1);
/*
* XXX Should ideally us PGIOAlignedBlock, but might need a way to avoid
- * wasting per-file alignment padding when some users create many
- * files.
+ * wasting per-file alignment padding when some users create many files.
*/
PGAlignedBlock buffer;
};
/*
* Block all blockable signals, except SIGQUIT. posix_fallocate() can run
* for quite a long time, and is an all-or-nothing operation. If we
- * allowed SIGUSR1 to interrupt us repeatedly (for example, due to recovery
- * conflicts), the retry loop might never succeed.
+ * allowed SIGUSR1 to interrupt us repeatedly (for example, due to
+ * recovery conflicts), the retry loop might never succeed.
*/
if (IsUnderPostmaster)
sigprocmask(SIG_SETMASK, &BlockSig, &save_sigmask);
pgstat_report_wait_start(WAIT_EVENT_DSM_ALLOCATE);
#if defined(HAVE_POSIX_FALLOCATE) && defined(__linux__)
+
/*
* On Linux, a shm_open fd is backed by a tmpfs file. If we were to use
* ftruncate, the file would contain a hole. Accessing memory backed by a
* SIGBUS later.
*
* We still use a traditional EINTR retry loop to handle SIGCONT.
- * posix_fallocate() doesn't restart automatically, and we don't want
- * this to fail if you attach a debugger.
+ * posix_fallocate() doesn't restart automatically, and we don't want this
+ * to fail if you attach a debugger.
*/
do
{
} while (rc == EINTR);
/*
- * The caller expects errno to be set, but posix_fallocate() doesn't
- * set it. Instead it returns error numbers directly. So set errno,
- * even though we'll also return rc to indicate success or failure.
+ * The caller expects errno to be set, but posix_fallocate() doesn't set
+ * it. Instead it returns error numbers directly. So set errno, even
+ * though we'll also return rc to indicate success or failure.
*/
errno = rc;
#else
my $output_path = '.';
my $lastlockidx = -1;
-my $continue = "\n";
+my $continue = "\n";
-GetOptions(
- 'outdir:s' => \$output_path);
+GetOptions('outdir:s' => \$output_path);
open my $lwlocknames, '<', $ARGV[0] or die;
$trimmedlockname =~ s/Lock$//;
die "lock names must end with 'Lock'" if $trimmedlockname eq $lockname;
- die "lwlocknames.txt not in order" if $lockidx < $lastlockidx;
+ die "lwlocknames.txt not in order" if $lockidx < $lastlockidx;
die "lwlocknames.txt has duplicates" if $lockidx == $lastlockidx;
while ($lastlockidx < $lockidx - 1)
}
printf $c "%s \"%s\"", $continue, $trimmedlockname;
$lastlockidx = $lockidx;
- $continue = ",\n";
+ $continue = ",\n";
print $h "#define $lockname (&MainLWLockArray[$lockidx].lock)\n";
}
close $h;
close $c;
-rename($htmp, "$output_path/lwlocknames.h") || die "rename: $htmp to $output_path/lwlocknames.h: $!";
+rename($htmp, "$output_path/lwlocknames.h")
+ || die "rename: $htmp to $output_path/lwlocknames.h: $!";
rename($ctmp, "$output_path/lwlocknames.c") || die "rename: $ctmp: $!";
close $lwlocknames;
dclist_foreach(proc_iter, waitQueue)
{
PGPROC *queued_proc = dlist_container(PGPROC, links, proc_iter.cur);
+
if (queued_proc == blocked_proc)
break;
data->waiter_pids[data->npids++] = queued_proc->pid;
LWLockWaitListLock(lock);
/*
- * Remove ourselves from the waitlist, unless we've already been
- * removed. The removal happens with the wait list lock held, so there's
- * no race in this check.
+ * Remove ourselves from the waitlist, unless we've already been removed.
+ * The removal happens with the wait list lock held, so there's no race in
+ * this check.
*/
on_waitlist = MyProc->lwWaiting == LW_WS_WAITING;
if (on_waitlist)
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (conflict->sxactIn == writer)
return true;
dlist_foreach_modify(iter, &sxact->possibleUnsafeConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(conflict->sxactOut));
Assert(sxact == conflict->sxactIn);
dlist_foreach(iter, &blocking_sxact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
output[num_written++] = possibleUnsafeConflict->sxactOut->pid;
/*
* If we didn't find any possibly unsafe conflicts because every
* uncommitted writable transaction turned out to be doomed, then we
- * can "opt out" immediately. See comments above the earlier check for
- * PredXact->WritableSxactCount == 0.
+ * can "opt out" immediately. See comments above the earlier check
+ * for PredXact->WritableSxactCount == 0.
*/
if (dlist_is_empty(&sxact->possibleUnsafeConflicts))
{
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
bool found;
dlist_delete(&(predlock->xactLink));
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo = oldpredlock->commitSeqNo;
dlist_foreach_modify(iter, &oldtarget->predicateLocks)
{
PREDICATELOCK *oldpredlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
PREDICATELOCK *newpredlock;
SerCommitSeqNo oldCommitSeqNo;
SERIALIZABLEXACT *oldXact;
dlist_foreach(iter, &PredXact->activeList)
{
SERIALIZABLEXACT *sxact =
- dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, xactLink, iter.cur);
if (!SxactIsRolledBack(sxact)
&& !SxactIsCommitted(sxact)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
Assert(!SxactIsReadOnly(possibleUnsafeConflict->sxactOut));
Assert(MySerializableXact == possibleUnsafeConflict->sxactIn);
dlist_foreach_modify(iter, &MySerializableXact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (isCommit
&& !SxactIsReadOnly(MySerializableXact)
dlist_foreach_modify(iter, &MySerializableXact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (!isCommit
|| SxactIsCommitted(conflict->sxactOut)
dlist_foreach_modify(iter, &MySerializableXact->possibleUnsafeConflicts)
{
RWConflict possibleUnsafeConflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
roXact = possibleUnsafeConflict->sxactIn;
Assert(MySerializableXact == possibleUnsafeConflict->sxactOut);
* xmin and purge any transactions which finished before this transaction
* was launched.
*
- * For parallel queries in read-only transactions, it might run twice.
- * We only release the reference on the first call.
+ * For parallel queries in read-only transactions, it might run twice. We
+ * only release the reference on the first call.
*/
needToClear = false;
if ((partiallyReleasing ||
dlist_foreach_modify(iter, FinishedSerializableTransactions)
{
SERIALIZABLEXACT *finishedSxact =
- dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
+ dlist_container(SERIALIZABLEXACT, finishedLink, iter.cur);
if (!TransactionIdIsValid(PredXact->SxactGlobalXmin)
|| TransactionIdPrecedesOrEquals(finishedSxact->finishedBefore,
dlist_foreach_modify(iter, &OldCommittedSxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
bool canDoPartialCleanup;
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
dlist_foreach_modify(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
PREDICATELOCKTAG tag;
PREDICATELOCKTARGET *target;
PREDICATELOCKTARGETTAG targettag;
dlist_foreach_modify(iter, &sxact->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
if (summarize)
conflict->sxactIn->flags |= SXACT_FLAG_SUMMARY_CONFLICT_IN;
dlist_foreach_modify(iter, &sxact->inConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
if (summarize)
conflict->sxactOut->flags |= SXACT_FLAG_SUMMARY_CONFLICT_OUT;
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
SERIALIZABLEXACT *sxact = predlock->tag.myXact;
if (sxact == MySerializableXact)
dlist_foreach_modify(iter, &target->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, targetLink, iter.cur);
+ dlist_container(PREDICATELOCK, targetLink, iter.cur);
if (predlock->tag.myXact != MySerializableXact
&& !RWConflictExists(predlock->tag.myXact, MySerializableXact))
dlist_foreach(iter, &writer->outConflicts)
{
RWConflict conflict =
- dlist_container(RWConflictData, outLink, iter.cur);
+ dlist_container(RWConflictData, outLink, iter.cur);
SERIALIZABLEXACT *t2 = conflict->sxactIn;
if (SxactIsPrepared(t2)
dlist_foreach(iter, &unconstify(SERIALIZABLEXACT *, reader)->inConflicts)
{
const RWConflict conflict =
- dlist_container(RWConflictData, inLink, iter.cur);
+ dlist_container(RWConflictData, inLink, iter.cur);
const SERIALIZABLEXACT *t0 = conflict->sxactOut;
if (!SxactIsDoomed(t0)
dlist_foreach(near_iter, &MySerializableXact->inConflicts)
{
RWConflict nearConflict =
- dlist_container(RWConflictData, inLink, near_iter.cur);
+ dlist_container(RWConflictData, inLink, near_iter.cur);
if (!SxactIsCommitted(nearConflict->sxactOut)
&& !SxactIsDoomed(nearConflict->sxactOut))
dlist_foreach(far_iter, &nearConflict->sxactOut->inConflicts)
{
RWConflict farConflict =
- dlist_container(RWConflictData, inLink, far_iter.cur);
+ dlist_container(RWConflictData, inLink, far_iter.cur);
if (farConflict->sxactOut == MySerializableXact
|| (!SxactIsCommitted(farConflict->sxactOut)
dlist_foreach(iter, &sxact->predicateLocks)
{
PREDICATELOCK *predlock =
- dlist_container(PREDICATELOCK, xactLink, iter.cur);
+ dlist_container(PREDICATELOCK, xactLink, iter.cur);
record.type = TWOPHASEPREDICATERECORD_LOCK;
lockRecord->target = predlock->tag.myTarget->tag;
{
Size size = 0;
Size TotalProcs =
- add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
+ add_size(MaxBackends, add_size(NUM_AUXILIARY_PROCS, max_prepared_xacts));
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
if (!dlist_is_empty(procgloballist))
{
- MyProc = (PGPROC*) dlist_pop_head_node(procgloballist);
+ MyProc = (PGPROC *) dlist_pop_head_node(procgloballist);
SpinLockRelease(ProcStructLock);
}
else
uint32 hashcode = locallock->hashcode;
LWLock *partitionLock = LockHashPartitionLock(hashcode);
dclist_head *waitQueue = &lock->waitProcs;
- PGPROC *insert_before = NULL;
+ PGPROC *insert_before = NULL;
LOCKMASK myHeldLocks = MyProc->heldLocks;
TimestampTz standbyWaitStart = 0;
bool early_deadlock = false;
if (InHotStandby)
{
bool maybe_log_conflict =
- (standbyWaitStart != 0 && !logged_recovery_conflict);
+ (standbyWaitStart != 0 && !logged_recovery_conflict);
/* Set a timer and wait for that or for the lock to be granted */
ResolveRecoveryConflictWithLock(locallock->tag.lock,
while (remblocks > 0)
{
- BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
+ BlockNumber segstartblock = curblocknum % ((BlockNumber) RELSEG_SIZE);
off_t seekpos = (off_t) BLCKSZ * segstartblock;
int numblocks;
/*
* Even if we don't want to use fallocate, we can still extend a
* bit more efficiently than writing each 8kB block individually.
- * pg_pwrite_zeros() (via FileZero()) uses
- * pg_pwritev_with_retry() to avoid multiple writes or needing a
- * zeroed buffer for the whole length of the extension.
+ * pg_pwrite_zeros() (via FileZero()) uses pg_pwritev_with_retry()
+ * to avoid multiple writes or needing a zeroed buffer for the
+ * whole length of the extension.
*/
ret = FileZero(v->mdfd_vfd,
seekpos, (off_t) BLCKSZ * numblocks,
{
/* prefix success */
char *ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
- VoidString : prefix->aff[j]->flag;
+ VoidString : prefix->aff[j]->flag;
if (FindWord(Conf, pnewword, ff, flag))
cur += addToResult(forms, cur, pnewword);
# Initialize.
openARGV();
- $Hold = '';
+ $Hold = '';
$CondReg = 0;
$doPrint = $doAutoPrint;
CYCLE:
my $include_path;
GetOptions(
- 'output:s' => \$output_path,
+ 'output:s' => \$output_path,
'include-path:s' => \$include_path) || usage();
# Make sure output_path ends in a slash.
}
# Sanity check arguments.
-die "No input files.\n" unless @ARGV;
+die "No input files.\n" unless @ARGV;
die "--include-path must be specified.\n" unless $include_path;
# Read all the input files into internal data structures.
my $catalog = Catalog::ParseHeader($header);
my $catname = $catalog->{catname};
- my $schema = $catalog->{columns};
+ my $schema = $catalog->{columns};
$catalogs{$catname} = $catalog;
$catalog_data{$catname} = Catalog::ParseData($datfile, $schema, 0);
push @fmgr,
{
- oid => $bki_values{oid},
- name => $bki_values{proname},
- lang => $bki_values{prolang},
- kind => $bki_values{prokind},
+ oid => $bki_values{oid},
+ name => $bki_values{proname},
+ lang => $bki_values{prolang},
+ kind => $bki_values{prokind},
strict => $bki_values{proisstrict},
retset => $bki_values{proretset},
- nargs => $bki_values{pronargs},
- args => $bki_values{proargtypes},
+ nargs => $bki_values{pronargs},
+ args => $bki_values{proargtypes},
prosrc => $bki_values{prosrc},
};
}
# Emit headers for both files
-my $tmpext = ".tmp$$";
-my $oidsfile = $output_path . 'fmgroids.h';
+my $tmpext = ".tmp$$";
+my $oidsfile = $output_path . 'fmgroids.h';
my $protosfile = $output_path . 'fmgrprotos.h';
-my $tabfile = $output_path . 'fmgrtab.c';
+my $tabfile = $output_path . 'fmgrtab.c';
open my $ofh, '>', $oidsfile . $tmpext
or die "Could not open $oidsfile$tmpext: $!";
$bmap{'f'} = 'false';
my @fmgr_builtin_oid_index;
my $last_builtin_oid = 0;
-my $fmgr_count = 0;
+my $fmgr_count = 0;
+
foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
{
next if $s->{lang} ne 'internal';
close($tfh);
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($oidsfile, $tmpext);
+Catalog::RenameTempFile($oidsfile, $tmpext);
Catalog::RenameTempFile($protosfile, $tmpext);
-Catalog::RenameTempFile($tabfile, $tmpext);
+Catalog::RenameTempFile($tabfile, $tmpext);
sub usage
{
while (cur)
{
PgStat_EntryRef *entry_ref =
- dlist_container(PgStat_EntryRef, pending_node, cur);
+ dlist_container(PgStat_EntryRef, pending_node, cur);
PgStat_HashKey key = entry_ref->shared_entry->key;
PgStat_Kind kind = key.kind;
const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
if (pgStatEntryRefHash)
{
PgStat_EntryRefHashEntry *lohashent =
- pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
+ pgstat_entry_ref_hash_lookup(pgStatEntryRefHash, key);
if (lohashent)
pgstat_release_entry_ref(lohashent->key, lohashent->entry_ref,
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
if (isCommit && !pending->is_create)
dclist_foreach_modify(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
xl_xact_stats_item *it = &pending->item;
dclist_delete_from(&xact_state->pending_drops, &pending->node);
dclist_foreach(iter, &xact_state->pending_drops)
{
PgStat_PendingDroppedStatsItem *pending =
- dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
+ dclist_container(PgStat_PendingDroppedStatsItem, node, iter.cur);
if (isCommit && pending->is_create)
continue;
int nest_level = GetCurrentTransactionNestLevel();
PgStat_SubXactStatus *xact_state;
PgStat_PendingDroppedStatsItem *drop = (PgStat_PendingDroppedStatsItem *)
- MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
+ MemoryContextAlloc(TopTransactionContext, sizeof(PgStat_PendingDroppedStatsItem));
xact_state = pgstat_get_xact_stack_level(nest_level);
case INTSTYLE_SQL_STANDARD:
{
bool has_negative = year < 0 || mon < 0 ||
- mday < 0 || hour < 0 ||
- min < 0 || sec < 0 || fsec < 0;
+ mday < 0 || hour < 0 ||
+ min < 0 || sec < 0 || fsec < 0;
bool has_positive = year > 0 || mon > 0 ||
- mday > 0 || hour > 0 ||
- min > 0 || sec > 0 || fsec > 0;
+ mday > 0 || hour > 0 ||
+ min > 0 || sec > 0 || fsec > 0;
bool has_year_month = year != 0 || mon != 0;
bool has_day_time = mday != 0 || hour != 0 ||
- min != 0 || sec != 0 || fsec != 0;
+ min != 0 || sec != 0 || fsec != 0;
bool has_day = mday != 0;
bool sql_standard_value = !(has_negative && has_positive) &&
- !(has_year_month && has_day_time);
+ !(has_year_month && has_day_time);
/*
* SQL Standard wants only 1 "<sign>" preceding the whole
/*
* endptr points to the first character _after_ the sequence we recognized
* as a valid floating point number. orig_string points to the original
- * input
- * string.
+ * input string.
*/
/* skip leading whitespace */
allocate_record_info(MemoryContext mcxt, int ncolumns)
{
RecordIOData *data = (RecordIOData *)
- MemoryContextAlloc(mcxt,
- offsetof(RecordIOData, columns) +
- ncolumns * sizeof(ColumnIOData));
+ MemoryContextAlloc(mcxt,
+ offsetof(RecordIOData, columns) +
+ ncolumns * sizeof(ColumnIOData));
data->record_type = InvalidOid;
data->record_typmod = 0;
static Datum jsonPathFromCstring(char *in, int len, struct Node *escontext);
static char *jsonPathToCstring(StringInfo out, JsonPath *in,
int estimated_len);
-static bool flattenJsonPathParseItem(StringInfo buf, int *result,
+static bool flattenJsonPathParseItem(StringInfo buf, int *result,
struct Node *escontext,
JsonPathParseItem *item,
int nestingLevel, bool insideArraySubscript);
* children into a binary representation.
*/
static bool
-flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
+flattenJsonPathParseItem(StringInfo buf, int *result, struct Node *escontext,
JsonPathParseItem *item, int nestingLevel,
bool insideArraySubscript)
{
if (!item->value.args.left)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.left,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.left,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + left) = chld - pos;
if (!item->value.args.right)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.args.right,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.args.right,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + right) = chld - pos;
}
item->value.like_regex.patternlen);
appendStringInfoChar(buf, '\0');
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.like_regex.expr,
- nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.like_regex.expr,
+ nestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + offs) = chld - pos;
}
if (!item->value.arg)
chld = pos;
- else if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->value.arg,
- nestingLevel + argNestingLevel,
- insideArraySubscript))
+ else if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->value.arg,
+ nestingLevel + argNestingLevel,
+ insideArraySubscript))
return false;
*(int32 *) (buf->data + arg) = chld - pos;
}
int32 topos;
int32 frompos;
- if (! flattenJsonPathParseItem(buf, &frompos, escontext,
- item->value.array.elems[i].from,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &frompos, escontext,
+ item->value.array.elems[i].from,
+ nestingLevel, true))
return false;
frompos -= pos;
if (item->value.array.elems[i].to)
{
- if (! flattenJsonPathParseItem(buf, &topos, escontext,
- item->value.array.elems[i].to,
- nestingLevel, true))
+ if (!flattenJsonPathParseItem(buf, &topos, escontext,
+ item->value.array.elems[i].to,
+ nestingLevel, true))
return false;
topos -= pos;
}
if (item->next)
{
- if (! flattenJsonPathParseItem(buf, &chld, escontext,
- item->next, nestingLevel,
- insideArraySubscript))
+ if (!flattenJsonPathParseItem(buf, &chld, escontext,
+ item->next, nestingLevel,
+ insideArraySubscript))
return false;
chld -= pos;
*(int32 *) (buf->data + next) = chld;
*/
JsonValueList vals = {0};
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, &vals);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, &vals);
if (jperIsError(res))
return jpbUnknown;
else
{
JsonPathExecResult res =
- executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
- false, NULL);
+ executeItemOptUnwrapResultNoThrow(cxt, &larg, jb,
+ false, NULL);
if (jperIsError(res))
return jpbUnknown;
if (!fmt_txt[i])
{
MemoryContext oldcxt =
- MemoryContextSwitchTo(TopMemoryContext);
+ MemoryContextSwitchTo(TopMemoryContext);
fmt_txt[i] = cstring_to_text(fmt_str[i]);
MemoryContextSwitchTo(oldcxt);
char *val;
int len;
int total;
-} JsonPathString;
+} JsonPathString;
#include "utils/jsonpath.h"
#include "jsonpath_gram.h"
JsonPathParseResult **result, \
struct Node *escontext)
YY_DECL;
-extern int jsonpath_yyparse(JsonPathParseResult **result,
- struct Node *escontext);
+extern int jsonpath_yyparse(JsonPathParseResult **result,
+ struct Node *escontext);
extern void jsonpath_yyerror(JsonPathParseResult **result,
struct Node *escontext,
const char *message);
else
#endif
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
- * headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
ereport(ERROR,
(errmsg("could not compare Unicode strings: %m")));
static int
pg_strcoll_libc(const char *arg1, const char *arg2, pg_locale_t locale)
{
- int result;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
#ifdef WIN32
if (GetDatabaseEncoding() == PG_UTF8)
{
- size_t len1 = strlen(arg1);
- size_t len2 = strlen(arg2);
+ size_t len1 = strlen(arg1);
+ size_t len2 = strlen(arg2);
+
result = pg_strncoll_libc_win32_utf8(arg1, len1, arg2, len2, locale);
}
else
pg_strncoll_libc(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize1 = len1 + 1;
- size_t bufsize2 = len2 + 1;
- char *arg1n;
- char *arg2n;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize1 = len1 + 1;
+ size_t bufsize2 = len2 + 1;
+ char *arg1n;
+ char *arg2n;
+ int result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
pg_strncoll_icu_no_utf8(const char *arg1, int32_t len1,
const char *arg2, int32_t len2, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- int32_t ulen1;
- int32_t ulen2;
- size_t bufsize1;
- size_t bufsize2;
- UChar *uchar1,
- *uchar2;
- int result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ int32_t ulen1;
+ int32_t ulen2;
+ size_t bufsize1;
+ size_t bufsize2;
+ UChar *uchar1,
+ *uchar2;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
#ifdef HAVE_UCOL_STRCOLLUTF8
pg_strncoll_icu(const char *arg1, int32_t len1, const char *arg2, int32_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
Assert(locale->provider == COLLPROVIDER_ICU);
pg_strncoll(const char *arg1, size_t len1, const char *arg2, size_t len2,
pg_locale_t locale)
{
- int result;
+ int result;
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strncoll_libc(arg1, len1, arg2, len2, locale);
#else
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return 0; /* keep compiler quiet */
+ return 0; /* keep compiler quiet */
#endif
}
pg_strnxfrm_libc(char *dest, const char *src, size_t srclen, size_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- size_t bufsize = srclen + 1;
- size_t result;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ size_t bufsize = srclen + 1;
+ size_t result;
Assert(!locale || locale->provider == COLLPROVIDER_LIBC);
pg_strnxfrm_icu(char *dest, const char *src, int32_t srclen, int32_t destsize,
pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UChar *uchar;
- int32_t ulen;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UChar *uchar;
+ int32_t ulen;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
pg_strnxfrm_prefix_icu_no_utf8(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- char sbuf[TEXTBUFLEN];
- char *buf = sbuf;
- UCharIterator iter;
- uint32_t state[2];
- UErrorCode status;
- int32_t ulen = -1;
- UChar *uchar = NULL;
- size_t uchar_bsize;
- Size result_bsize;
+ char sbuf[TEXTBUFLEN];
+ char *buf = sbuf;
+ UCharIterator iter;
+ uint32_t state[2];
+ UErrorCode status;
+ int32_t ulen = -1;
+ UChar *uchar = NULL;
+ size_t uchar_bsize;
+ Size result_bsize;
Assert(locale->provider == COLLPROVIDER_ICU);
Assert(GetDatabaseEncoding() != PG_UTF8);
pg_strnxfrm_prefix_icu(char *dest, const char *src, int32_t srclen,
int32_t destsize, pg_locale_t locale)
{
- size_t result;
+ size_t result;
Assert(locale->provider == COLLPROVIDER_ICU);
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
size_t
pg_strxfrm(char *dest, const char *src, size_t destsize, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strxfrm_libc(dest, src, destsize, locale);
pg_strnxfrm(char *dest, size_t destsize, const char *src, size_t srclen,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
result = pg_strnxfrm_libc(dest, src, srclen, destsize, locale);
/* shouldn't happen */
elog(ERROR, "unsupported collprovider: %c", locale->provider);
- return false; /* keep compiler quiet */
+ return false; /* keep compiler quiet */
}
/*
pg_strxfrm_prefix(char *dest, const char *src, size_t destsize,
pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strxfrm_prefix()",
pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
size_t srclen, pg_locale_t locale)
{
- size_t result = 0; /* keep compiler quiet */
+ size_t result = 0; /* keep compiler quiet */
if (!locale || locale->provider == COLLPROVIDER_LIBC)
elog(ERROR, "collprovider '%c' does not support pg_strnxfrm_prefix()",
collator = ucol_open(loc_str, &status);
if (U_FAILURE(status))
ereport(ERROR,
- /* use original string for error report */
+ /* use original string for error report */
(errmsg("could not open collator for locale \"%s\": %s",
orig_str, u_errorName(status))));
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
ulen = ucnv_toUChars(converter, NULL, 0, str, len, &status);
if (U_FAILURE(status) && status != U_BUFFER_OVERFLOW_ERROR)
ereport(ERROR,
{
UErrorCode status = U_ZERO_ERROR;
int32_t ulen;
+
status = U_ZERO_ERROR;
ulen = ucnv_toUChars(converter, dest, destlen, src, srclen, &status);
if (U_FAILURE(status))
int32_t
icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes)
{
- int32_t len_uchar;
+ int32_t len_uchar;
init_icu_converter();
icu_language_tag(const char *loc_str, int elevel)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ICU is not supported in this build")));
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif /* not USE_ICU */
}
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
- int elevel = icu_validation_level;
+ UCollator *collator;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
+ int elevel = icu_validation_level;
/* no validation */
if (elevel < 0)
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
foreach(cell, bound_datums)
{
PartitionRangeDatum *datum =
- lfirst_node(PartitionRangeDatum, cell);
+ lfirst_node(PartitionRangeDatum, cell);
appendStringInfoString(buf, sep);
if (datum->kind == PARTITION_RANGE_DATUM_MINVALUE)
tsquery_phrase(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(DirectFunctionCall3(tsquery_phrase_distance,
- PG_GETARG_DATUM(0),
- PG_GETARG_DATUM(1),
- Int32GetDatum(1)));
+ PG_GETARG_DATUM(0),
+ PG_GETARG_DATUM(1),
+ Int32GetDatum(1)));
}
Datum
if (arrin[i].haspos)
{
int len = POSDATALEN(tsv, arrin + i) * sizeof(WordEntryPos)
- + sizeof(uint16);
+ + sizeof(uint16);
curoff = SHORTALIGN(curoff);
memcpy(dataout + curoff,
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any((uint8_t *) buf, bsize + 1);
}
else
{
- Size bsize, rsize;
+ Size bsize,
+ rsize;
char *buf;
bsize = pg_strnxfrm(NULL, 0, keydata, keylen, mylocale);
/*
* In principle, there's no reason to include the terminating NUL
- * character in the hash, but it was done before and the behavior
- * must be preserved.
+ * character in the hash, but it was done before and the behavior must
+ * be preserved.
*/
result = hash_any_extended((uint8_t *) buf, bsize + 1,
PG_GETARG_INT64(1));
memcpy(sss->buf1, authoritative_data, len);
/*
- * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated
- * strings.
+ * pg_strxfrm() and pg_strxfrm_prefix expect NUL-terminated strings.
*/
sss->buf1[len] = '\0';
sss->last_len1 = len;
PG_RETURN_ARRAYTYPE_P(construct_empty_array(TEXTOID));
PG_RETURN_DATUM(makeArrayResult(tstate.astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
/*
for (i = 0; i < nxip; i++)
{
FullTransactionId cur =
- FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
+ FullTransactionIdFromU64((uint64) pq_getmsgint64(buf));
if (FullTransactionIdPrecedes(cur, last) ||
FullTransactionIdPrecedes(cur, xmin) ||
XmlOptionType parsed_xmloptiontype;
xmlNodePtr content_nodes;
volatile xmlBufferPtr buf = NULL;
- volatile xmlSaveCtxtPtr ctxt = NULL;
+ volatile xmlSaveCtxtPtr ctxt = NULL;
ErrorSaveContext escontext = {T_ErrorSaveContext};
PgXmlErrorContext *xmlerrcxt;
#endif
get_publication_name(Oid pubid, bool missing_ok)
{
HeapTuple tup;
- char *pubname;
+ char *pubname;
Form_pg_publication pubform;
tup = SearchSysCache1(PUBLICATIONOID, ObjectIdGetDatum(pubid));
* return InvalidOid.
*/
Oid
-get_subscription_oid(const char* subname, bool missing_ok)
+get_subscription_oid(const char *subname, bool missing_ok)
{
Oid oid;
oid = GetSysCacheOid2(SUBSCRIPTIONNAME, Anum_pg_subscription_oid,
- MyDatabaseId, CStringGetDatum(subname));
+ MyDatabaseId, CStringGetDatum(subname));
if (!OidIsValid(oid) && !missing_ok)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("subscription \"%s\" does not exist", subname)));
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("subscription \"%s\" does not exist", subname)));
return oid;
}
get_subscription_name(Oid subid, bool missing_ok)
{
HeapTuple tup;
- char* subname;
+ char *subname;
Form_pg_subscription subform;
tup = SearchSysCache1(SUBSCRIPTIONOID, ObjectIdGetDatum(subid));
AssertPendingSyncConsistency(Relation relation)
{
bool relcache_verdict =
- RelationIsPermanent(relation) &&
- ((relation->rd_createSubid != InvalidSubTransactionId &&
- RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
- relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
+ RelationIsPermanent(relation) &&
+ ((relation->rd_createSubid != InvalidSubTransactionId &&
+ RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
*/
if (IsBinaryUpgrade)
{
- SMgrRelation srel;
+ SMgrRelation srel;
/*
* During a binary upgrade, we use this code path to ensure that
- * pg_largeobject and its index have the same relfilenumbers as in
- * the old cluster. This is necessary because pg_upgrade treats
+ * pg_largeobject and its index have the same relfilenumbers as in the
+ * old cluster. This is necessary because pg_upgrade treats
* pg_largeobject like a user table, not a system table. It is however
* possible that a table or index may need to end up with the same
* relfilenumber in the new cluster as what it had in the old cluster.
Bitmapset *uindexattrs; /* columns in unique indexes */
Bitmapset *pkindexattrs; /* columns in the primary index */
Bitmapset *idindexattrs; /* columns in the replica identity */
- Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
- Bitmapset *summarizedattrs; /* columns with summarizing indexes */
+ Bitmapset *hotblockingattrs; /* columns with HOT blocking indexes */
+ Bitmapset *summarizedattrs; /* columns with summarizing indexes */
List *indexoidlist;
List *newindexoidlist;
Oid relpkindex;
* when the column value changes, thus require a separate
* attribute bitmapset.
*
- * Obviously, non-key columns couldn't be referenced by
- * foreign key or identity key. Hence we do not include them into
+ * Obviously, non-key columns couldn't be referenced by foreign
+ * key or identity key. Hence we do not include them into
* uindexattrs, pkindexattrs and idindexattrs bitmaps.
*/
if (attrnum != 0)
/*
* Open the target file.
*
- * Because Windows isn't happy about the idea of renaming over a file
- * that someone has open, we only open this file after acquiring the lock,
- * and for the same reason, we close it before releasing the lock. That
- * way, by the time write_relmap_file() acquires an exclusive lock, no
- * one else will have it open.
+ * Because Windows isn't happy about the idea of renaming over a file that
+ * someone has open, we only open this file after acquiring the lock, and
+ * for the same reason, we close it before releasing the lock. That way,
+ * by the time write_relmap_file() acquires an exclusive lock, no one else
+ * will have it open.
*/
snprintf(mapfilename, sizeof(mapfilename), "%s/%s", dbpath,
RELMAPPER_FILENAME);
/* first validate that we have permissions to use the language */
aclresult = object_aclcheck(LanguageRelationId, procStruct->prolang, GetUserId(),
- ACL_USAGE);
+ ACL_USAGE);
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_LANGUAGE,
NameStr(langStruct->lanname));
use warnings;
use Getopt::Long;
-my $outfile = '';
+my $outfile = '';
-GetOptions(
- 'outfile=s' => \$outfile) or die "$0: wrong arguments";
+GetOptions('outfile=s' => \$outfile) or die "$0: wrong arguments";
open my $errcodes, '<', $ARGV[0]
or die "$0: could not open input file '$ARGV[0]': $!\n";
*/
if (!am_superuser &&
object_aclcheck(DatabaseRelationId, MyDatabaseId, GetUserId(),
- ACL_CONNECT) != ACLCHECK_OK)
+ ACL_CONNECT) != ACLCHECK_OK)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for database \"%s\"", name),
}
/*
- * The last few connection slots are reserved for superusers and roles with
- * privileges of pg_use_reserved_connections. Replication connections are
- * drawn from slots reserved with max_wal_senders and are not limited by
- * max_connections, superuser_reserved_connections, or
+ * The last few connection slots are reserved for superusers and roles
+ * with privileges of pg_use_reserved_connections. Replication
+ * connections are drawn from slots reserved with max_wal_senders and are
+ * not limited by max_connections, superuser_reserved_connections, or
* reserved_connections.
*
* Note: At this point, the new backend has already claimed a proc struct,
}
else
{
- int sec_context = context->save_sec_context;
+ int sec_context = context->save_sec_context;
/*
* This user can SET ROLE to the target user, but not the other way
* around, so protect ourselves against the target user by setting
* SECURITY_RESTRICTED_OPERATION to prevent certain changes to the
- * session state. Also set up a new GUC nest level, so that we can roll
- * back any GUC changes that may be made by code running as the target
- * user, inasmuch as they could be malicious.
+ * session state. Also set up a new GUC nest level, so that we can
+ * roll back any GUC changes that may be made by code running as the
+ * target user, inasmuch as they could be malicious.
*/
sec_context |= SECURITY_RESTRICTED_OPERATION;
SetUserIdAndSecContext(userid, sec_context);
foreach my $i (@$cp950txt)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
# Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc
# from CP950.TXT
{
push @$all,
{
- code => $code,
- ucs => $ucs,
- comment => $i->{comment},
+ code => $code,
+ ucs => $ucs,
+ comment => $i->{comment},
direction => BOTH,
- f => $i->{f},
- l => $i->{l}
+ f => $i->{f},
+ l => $i->{l}
};
}
}
foreach my $i (@$all)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
# BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
# contain only one of them. XXX: Doesn't really make sense to include any of them,
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
# The GB-18030 character set, which we use as the source, contains
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
close($in);
push @all,
{
- direction => BOTH,
- ucs => $ucs1,
+ direction => BOTH,
+ ucs => $ucs1,
ucs_second => $ucs2,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $.
};
}
elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
next if ($code < 0x80 && $ucs < 0x80);
push @all,
{
direction => BOTH,
- ucs => $ucs,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
+ ucs => $ucs,
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $.
};
}
}
push @mapping, (
{
direction => BOTH,
- ucs => 0x4efc,
- code => 0x8ff4af,
- comment => '# CJK(4EFC)'
+ ucs => 0x4efc,
+ code => 0x8ff4af,
+ comment => '# CJK(4EFC)'
},
{
direction => BOTH,
- ucs => 0x50f4,
- code => 0x8ff4b0,
- comment => '# CJK(50F4)'
+ ucs => 0x50f4,
+ code => 0x8ff4b0,
+ comment => '# CJK(50F4)'
},
{
direction => BOTH,
- ucs => 0x51EC,
- code => 0x8ff4b1,
- comment => '# CJK(51EC)'
+ ucs => 0x51EC,
+ code => 0x8ff4b1,
+ comment => '# CJK(51EC)'
},
{
direction => BOTH,
- ucs => 0x5307,
- code => 0x8ff4b2,
- comment => '# CJK(5307)'
+ ucs => 0x5307,
+ code => 0x8ff4b2,
+ comment => '# CJK(5307)'
},
{
direction => BOTH,
- ucs => 0x5324,
- code => 0x8ff4b3,
- comment => '# CJK(5324)'
+ ucs => 0x5324,
+ code => 0x8ff4b3,
+ comment => '# CJK(5324)'
},
{
direction => BOTH,
- ucs => 0x548A,
- code => 0x8ff4b5,
- comment => '# CJK(548A)'
+ ucs => 0x548A,
+ code => 0x8ff4b5,
+ comment => '# CJK(548A)'
},
{
direction => BOTH,
- ucs => 0x5759,
- code => 0x8ff4b6,
- comment => '# CJK(5759)'
+ ucs => 0x5759,
+ code => 0x8ff4b6,
+ comment => '# CJK(5759)'
},
{
direction => BOTH,
- ucs => 0x589E,
- code => 0x8ff4b9,
- comment => '# CJK(589E)'
+ ucs => 0x589E,
+ code => 0x8ff4b9,
+ comment => '# CJK(589E)'
},
{
direction => BOTH,
- ucs => 0x5BEC,
- code => 0x8ff4ba,
- comment => '# CJK(5BEC)'
+ ucs => 0x5BEC,
+ code => 0x8ff4ba,
+ comment => '# CJK(5BEC)'
},
{
direction => BOTH,
- ucs => 0x5CF5,
- code => 0x8ff4bb,
- comment => '# CJK(5CF5)'
+ ucs => 0x5CF5,
+ code => 0x8ff4bb,
+ comment => '# CJK(5CF5)'
},
{
direction => BOTH,
- ucs => 0x5D53,
- code => 0x8ff4bc,
- comment => '# CJK(5D53)'
+ ucs => 0x5D53,
+ code => 0x8ff4bc,
+ comment => '# CJK(5D53)'
},
{
direction => BOTH,
- ucs => 0x5FB7,
- code => 0x8ff4be,
- comment => '# CJK(5FB7)'
+ ucs => 0x5FB7,
+ code => 0x8ff4be,
+ comment => '# CJK(5FB7)'
},
{
direction => BOTH,
- ucs => 0x6085,
- code => 0x8ff4bf,
- comment => '# CJK(6085)'
+ ucs => 0x6085,
+ code => 0x8ff4bf,
+ comment => '# CJK(6085)'
},
{
direction => BOTH,
- ucs => 0x6120,
- code => 0x8ff4c0,
- comment => '# CJK(6120)'
+ ucs => 0x6120,
+ code => 0x8ff4c0,
+ comment => '# CJK(6120)'
},
{
direction => BOTH,
- ucs => 0x654E,
- code => 0x8ff4c1,
- comment => '# CJK(654E)'
+ ucs => 0x654E,
+ code => 0x8ff4c1,
+ comment => '# CJK(654E)'
},
{
direction => BOTH,
- ucs => 0x663B,
- code => 0x8ff4c2,
- comment => '# CJK(663B)'
+ ucs => 0x663B,
+ code => 0x8ff4c2,
+ comment => '# CJK(663B)'
},
{
direction => BOTH,
- ucs => 0x6665,
- code => 0x8ff4c3,
- comment => '# CJK(6665)'
+ ucs => 0x6665,
+ code => 0x8ff4c3,
+ comment => '# CJK(6665)'
},
{
direction => BOTH,
- ucs => 0x6801,
- code => 0x8ff4c6,
- comment => '# CJK(6801)'
+ ucs => 0x6801,
+ code => 0x8ff4c6,
+ comment => '# CJK(6801)'
},
{
direction => BOTH,
- ucs => 0x6A6B,
- code => 0x8ff4c9,
- comment => '# CJK(6A6B)'
+ ucs => 0x6A6B,
+ code => 0x8ff4c9,
+ comment => '# CJK(6A6B)'
},
{
direction => BOTH,
- ucs => 0x6AE2,
- code => 0x8ff4ca,
- comment => '# CJK(6AE2)'
+ ucs => 0x6AE2,
+ code => 0x8ff4ca,
+ comment => '# CJK(6AE2)'
},
{
direction => BOTH,
- ucs => 0x6DF2,
- code => 0x8ff4cc,
- comment => '# CJK(6DF2)'
+ ucs => 0x6DF2,
+ code => 0x8ff4cc,
+ comment => '# CJK(6DF2)'
},
{
direction => BOTH,
- ucs => 0x6DF8,
- code => 0x8ff4cb,
- comment => '# CJK(6DF8)'
+ ucs => 0x6DF8,
+ code => 0x8ff4cb,
+ comment => '# CJK(6DF8)'
},
{
direction => BOTH,
- ucs => 0x7028,
- code => 0x8ff4cd,
- comment => '# CJK(7028)'
+ ucs => 0x7028,
+ code => 0x8ff4cd,
+ comment => '# CJK(7028)'
},
{
direction => BOTH,
- ucs => 0x70BB,
- code => 0x8ff4ae,
- comment => '# CJK(70BB)'
+ ucs => 0x70BB,
+ code => 0x8ff4ae,
+ comment => '# CJK(70BB)'
},
{
direction => BOTH,
- ucs => 0x7501,
- code => 0x8ff4d0,
- comment => '# CJK(7501)'
+ ucs => 0x7501,
+ code => 0x8ff4d0,
+ comment => '# CJK(7501)'
},
{
direction => BOTH,
- ucs => 0x7682,
- code => 0x8ff4d1,
- comment => '# CJK(7682)'
+ ucs => 0x7682,
+ code => 0x8ff4d1,
+ comment => '# CJK(7682)'
},
{
direction => BOTH,
- ucs => 0x769E,
- code => 0x8ff4d2,
- comment => '# CJK(769E)'
+ ucs => 0x769E,
+ code => 0x8ff4d2,
+ comment => '# CJK(769E)'
},
{
direction => BOTH,
- ucs => 0x7930,
- code => 0x8ff4d4,
- comment => '# CJK(7930)'
+ ucs => 0x7930,
+ code => 0x8ff4d4,
+ comment => '# CJK(7930)'
},
{
direction => BOTH,
- ucs => 0x7AE7,
- code => 0x8ff4d9,
- comment => '# CJK(7AE7)'
+ ucs => 0x7AE7,
+ code => 0x8ff4d9,
+ comment => '# CJK(7AE7)'
},
{
direction => BOTH,
- ucs => 0x7DA0,
- code => 0x8ff4dc,
- comment => '# CJK(7DA0)'
+ ucs => 0x7DA0,
+ code => 0x8ff4dc,
+ comment => '# CJK(7DA0)'
},
{
direction => BOTH,
- ucs => 0x7DD6,
- code => 0x8ff4dd,
- comment => '# CJK(7DD6)'
+ ucs => 0x7DD6,
+ code => 0x8ff4dd,
+ comment => '# CJK(7DD6)'
},
{
direction => BOTH,
- ucs => 0x8362,
- code => 0x8ff4df,
- comment => '# CJK(8362)'
+ ucs => 0x8362,
+ code => 0x8ff4df,
+ comment => '# CJK(8362)'
},
{
direction => BOTH,
- ucs => 0x85B0,
- code => 0x8ff4e1,
- comment => '# CJK(85B0)'
+ ucs => 0x85B0,
+ code => 0x8ff4e1,
+ comment => '# CJK(85B0)'
},
{
direction => BOTH,
- ucs => 0x8807,
- code => 0x8ff4e4,
- comment => '# CJK(8807)'
+ ucs => 0x8807,
+ code => 0x8ff4e4,
+ comment => '# CJK(8807)'
},
{
direction => BOTH,
- ucs => 0x8B7F,
- code => 0x8ff4e6,
- comment => '# CJK(8B7F)'
+ ucs => 0x8B7F,
+ code => 0x8ff4e6,
+ comment => '# CJK(8B7F)'
},
{
direction => BOTH,
- ucs => 0x8CF4,
- code => 0x8ff4e7,
- comment => '# CJK(8CF4)'
+ ucs => 0x8CF4,
+ code => 0x8ff4e7,
+ comment => '# CJK(8CF4)'
},
{
direction => BOTH,
- ucs => 0x8D76,
- code => 0x8ff4e8,
- comment => '# CJK(8D76)'
+ ucs => 0x8D76,
+ code => 0x8ff4e8,
+ comment => '# CJK(8D76)'
},
{
direction => BOTH,
- ucs => 0x90DE,
- code => 0x8ff4ec,
- comment => '# CJK(90DE)'
+ ucs => 0x90DE,
+ code => 0x8ff4ec,
+ comment => '# CJK(90DE)'
},
{
direction => BOTH,
- ucs => 0x9115,
- code => 0x8ff4ee,
- comment => '# CJK(9115)'
+ ucs => 0x9115,
+ code => 0x8ff4ee,
+ comment => '# CJK(9115)'
},
{
direction => BOTH,
- ucs => 0x9592,
- code => 0x8ff4f1,
- comment => '# CJK(9592)'
+ ucs => 0x9592,
+ code => 0x8ff4f1,
+ comment => '# CJK(9592)'
},
{
direction => BOTH,
- ucs => 0x973B,
- code => 0x8ff4f4,
- comment => '# CJK(973B)'
+ ucs => 0x973B,
+ code => 0x8ff4f4,
+ comment => '# CJK(973B)'
},
{
direction => BOTH,
- ucs => 0x974D,
- code => 0x8ff4f5,
- comment => '# CJK(974D)'
+ ucs => 0x974D,
+ code => 0x8ff4f5,
+ comment => '# CJK(974D)'
},
{
direction => BOTH,
- ucs => 0x9751,
- code => 0x8ff4f6,
- comment => '# CJK(9751)'
+ ucs => 0x9751,
+ code => 0x8ff4f6,
+ comment => '# CJK(9751)'
},
{
direction => BOTH,
- ucs => 0x999E,
- code => 0x8ff4fa,
- comment => '# CJK(999E)'
+ ucs => 0x999E,
+ code => 0x8ff4fa,
+ comment => '# CJK(999E)'
},
{
direction => BOTH,
- ucs => 0x9AD9,
- code => 0x8ff4fb,
- comment => '# CJK(9AD9)'
+ ucs => 0x9AD9,
+ code => 0x8ff4fb,
+ comment => '# CJK(9AD9)'
},
{
direction => BOTH,
- ucs => 0x9B72,
- code => 0x8ff4fc,
- comment => '# CJK(9B72)'
+ ucs => 0x9B72,
+ code => 0x8ff4fc,
+ comment => '# CJK(9B72)'
},
{
direction => BOTH,
- ucs => 0x9ED1,
- code => 0x8ff4fe,
- comment => '# CJK(9ED1)'
+ ucs => 0x9ED1,
+ code => 0x8ff4fe,
+ comment => '# CJK(9ED1)'
},
{
direction => BOTH,
- ucs => 0xF929,
- code => 0x8ff4c5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'
+ ucs => 0xF929,
+ code => 0x8ff4c5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'
},
{
direction => BOTH,
- ucs => 0xF9DC,
- code => 0x8ff4f2,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
+ ucs => 0xF9DC,
+ code => 0x8ff4f2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'
},
{
direction => BOTH,
- ucs => 0xFA0E,
- code => 0x8ff4b4,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
+ ucs => 0xFA0E,
+ code => 0x8ff4b4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'
},
{
direction => BOTH,
- ucs => 0xFA0F,
- code => 0x8ff4b7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
+ ucs => 0xFA0F,
+ code => 0x8ff4b7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'
},
{
direction => BOTH,
- ucs => 0xFA10,
- code => 0x8ff4b8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
+ ucs => 0xFA10,
+ code => 0x8ff4b8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'
},
{
direction => BOTH,
- ucs => 0xFA11,
- code => 0x8ff4bd,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
+ ucs => 0xFA11,
+ code => 0x8ff4bd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'
},
{
direction => BOTH,
- ucs => 0xFA12,
- code => 0x8ff4c4,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
+ ucs => 0xFA12,
+ code => 0x8ff4c4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'
},
{
direction => BOTH,
- ucs => 0xFA13,
- code => 0x8ff4c7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
+ ucs => 0xFA13,
+ code => 0x8ff4c7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'
},
{
direction => BOTH,
- ucs => 0xFA14,
- code => 0x8ff4c8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
+ ucs => 0xFA14,
+ code => 0x8ff4c8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'
},
{
direction => BOTH,
- ucs => 0xFA15,
- code => 0x8ff4ce,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
+ ucs => 0xFA15,
+ code => 0x8ff4ce,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'
},
{
direction => BOTH,
- ucs => 0xFA16,
- code => 0x8ff4cf,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
+ ucs => 0xFA16,
+ code => 0x8ff4cf,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'
},
{
direction => BOTH,
- ucs => 0xFA17,
- code => 0x8ff4d3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
+ ucs => 0xFA17,
+ code => 0x8ff4d3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'
},
{
direction => BOTH,
- ucs => 0xFA18,
- code => 0x8ff4d5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
+ ucs => 0xFA18,
+ code => 0x8ff4d5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'
},
{
direction => BOTH,
- ucs => 0xFA19,
- code => 0x8ff4d6,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
+ ucs => 0xFA19,
+ code => 0x8ff4d6,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'
},
{
direction => BOTH,
- ucs => 0xFA1A,
- code => 0x8ff4d7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
+ ucs => 0xFA1A,
+ code => 0x8ff4d7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'
},
{
direction => BOTH,
- ucs => 0xFA1B,
- code => 0x8ff4d8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
+ ucs => 0xFA1B,
+ code => 0x8ff4d8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'
},
{
direction => BOTH,
- ucs => 0xFA1C,
- code => 0x8ff4da,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
+ ucs => 0xFA1C,
+ code => 0x8ff4da,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'
},
{
direction => BOTH,
- ucs => 0xFA1D,
- code => 0x8ff4db,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
+ ucs => 0xFA1D,
+ code => 0x8ff4db,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'
},
{
direction => BOTH,
- ucs => 0xFA1E,
- code => 0x8ff4de,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
+ ucs => 0xFA1E,
+ code => 0x8ff4de,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'
},
{
direction => BOTH,
- ucs => 0xFA1F,
- code => 0x8ff4e0,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
+ ucs => 0xFA1F,
+ code => 0x8ff4e0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'
},
{
direction => BOTH,
- ucs => 0xFA20,
- code => 0x8ff4e2,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
+ ucs => 0xFA20,
+ code => 0x8ff4e2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'
},
{
direction => BOTH,
- ucs => 0xFA21,
- code => 0x8ff4e3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
+ ucs => 0xFA21,
+ code => 0x8ff4e3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'
},
{
direction => BOTH,
- ucs => 0xFA22,
- code => 0x8ff4e5,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
+ ucs => 0xFA22,
+ code => 0x8ff4e5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'
},
{
direction => BOTH,
- ucs => 0xFA23,
- code => 0x8ff4e9,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
+ ucs => 0xFA23,
+ code => 0x8ff4e9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'
},
{
direction => BOTH,
- ucs => 0xFA24,
- code => 0x8ff4ea,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
+ ucs => 0xFA24,
+ code => 0x8ff4ea,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'
},
{
direction => BOTH,
- ucs => 0xFA25,
- code => 0x8ff4eb,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
+ ucs => 0xFA25,
+ code => 0x8ff4eb,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'
},
{
direction => BOTH,
- ucs => 0xFA26,
- code => 0x8ff4ed,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
+ ucs => 0xFA26,
+ code => 0x8ff4ed,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'
},
{
direction => BOTH,
- ucs => 0xFA27,
- code => 0x8ff4ef,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
+ ucs => 0xFA27,
+ code => 0x8ff4ef,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'
},
{
direction => BOTH,
- ucs => 0xFA28,
- code => 0x8ff4f0,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
+ ucs => 0xFA28,
+ code => 0x8ff4f0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'
},
{
direction => BOTH,
- ucs => 0xFA29,
- code => 0x8ff4f3,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
+ ucs => 0xFA29,
+ code => 0x8ff4f3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'
},
{
direction => BOTH,
- ucs => 0xFA2A,
- code => 0x8ff4f7,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
+ ucs => 0xFA2A,
+ code => 0x8ff4f7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'
},
{
direction => BOTH,
- ucs => 0xFA2B,
- code => 0x8ff4f8,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
+ ucs => 0xFA2B,
+ code => 0x8ff4f8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'
},
{
direction => BOTH,
- ucs => 0xFA2C,
- code => 0x8ff4f9,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
+ ucs => 0xFA2C,
+ code => 0x8ff4f9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'
},
{
direction => BOTH,
- ucs => 0xFA2D,
- code => 0x8ff4fd,
- comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
+ ucs => 0xFA2D,
+ code => 0x8ff4fd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'
},
{
direction => BOTH,
- ucs => 0xFF07,
- code => 0x8ff4a9,
- comment => '# FULLWIDTH APOSTROPHE'
+ ucs => 0xFF07,
+ code => 0x8ff4a9,
+ comment => '# FULLWIDTH APOSTROPHE'
},
{
direction => BOTH,
- ucs => 0xFFE4,
- code => 0x8fa2c3,
- comment => '# FULLWIDTH BROKEN BAR'
+ ucs => 0xFFE4,
+ code => 0x8fa2c3,
+ comment => '# FULLWIDTH BROKEN BAR'
},
# additional conversions for EUC_JP -> UTF-8 conversion
{
direction => TO_UNICODE,
- ucs => 0x2116,
- code => 0x8ff4ac,
- comment => '# NUMERO SIGN'
+ ucs => 0x2116,
+ code => 0x8ff4ac,
+ comment => '# NUMERO SIGN'
},
{
direction => TO_UNICODE,
- ucs => 0x2121,
- code => 0x8ff4ad,
- comment => '# TELEPHONE SIGN'
+ ucs => 0x2121,
+ code => 0x8ff4ad,
+ comment => '# TELEPHONE SIGN'
},
{
direction => TO_UNICODE,
- ucs => 0x3231,
- code => 0x8ff4ab,
- comment => '# PARENTHESIZED IDEOGRAPH STOCK'
+ ucs => 0x3231,
+ code => 0x8ff4ab,
+ comment => '# PARENTHESIZED IDEOGRAPH STOCK'
});
print_conversion_tables($this_script, "EUC_JP", \@mapping);
push @$mapping,
( {
direction => BOTH,
- ucs => 0x20AC,
- code => 0xa2e6,
- comment => '# EURO SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x20AC,
+ code => 0xa2e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x00AE,
- code => 0xa2e7,
- comment => '# REGISTERED SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00AE,
+ code => 0xa2e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x327E,
- code => 0xa2e8,
- comment => '# CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ ucs => 0x327E,
+ code => 0xa2e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "EUC_KR", $mapping);
foreach my $i (@$mapping)
{
- my $ucs = $i->{ucs};
- my $code = $i->{code};
+ my $ucs = $i->{ucs};
+ my $code = $i->{code};
my $origcode = $i->{code};
my $plane = ($code & 0x1f0000) >> 16;
{
push @extras,
{
- ucs => $i->{ucs},
- code => ($i->{code} + 0x8ea10000),
- rest => $i->{rest},
+ ucs => $i->{ucs},
+ code => ($i->{code} + 0x8ea10000),
+ rest => $i->{rest},
direction => TO_UNICODE,
- f => $i->{f},
- l => $i->{l}
+ f => $i->{f},
+ l => $i->{l}
};
}
}
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
if ($code >= 0x80 && $ucs >= 0x0080)
{
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
push @$mapping,
( {
direction => BOTH,
- ucs => 0x20AC,
- code => 0xd9e6,
- comment => '# EURO SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x20AC,
+ code => 0xd9e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x00AE,
- code => 0xd9e7,
- comment => '# REGISTERED SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00AE,
+ code => 0xd9e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => BOTH,
- ucs => 0x327E,
- code => 0xd9e8,
- comment => '# CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ ucs => 0x327E,
+ code => 0xd9e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "JOHAB", $mapping);
push @mapping,
{
- code => $code,
- ucs => $ucs1,
+ code => $code,
+ ucs => $ucs1,
ucs_second => $ucs2,
- comment => $rest,
- direction => BOTH,
- f => $in_file,
- l => $.
+ comment => $rest,
+ direction => BOTH,
+ f => $in_file,
+ l => $.
};
}
elsif ($line =~ /^0x(\w+)\s*U\+(\w+)\s*#\s*(\S.*)?\s*$/)
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
my $direction;
push @mapping,
{
- code => $code,
- ucs => $ucs,
- comment => $rest,
+ code => $code,
+ ucs => $ucs,
+ comment => $rest,
direction => $direction,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
# Drop these SJIS codes from the source for UTF8=>SJIS conversion
my @reject_sjis = (
0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
- 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
+ 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
0x8795 .. 0x8797, 0x879a .. 0x879c);
foreach my $i (@$mapping)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
if (grep { $code == $_ } @reject_sjis)
{
push @$mapping,
( {
direction => FROM_UNICODE,
- ucs => 0x00a2,
- code => 0x8191,
- comment => '# CENT SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a2,
+ code => 0x8191,
+ comment => '# CENT SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00a3,
- code => 0x8192,
- comment => '# POUND SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a3,
+ code => 0x8192,
+ comment => '# POUND SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00a5,
- code => 0x5c,
- comment => '# YEN SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00a5,
+ code => 0x5c,
+ comment => '# YEN SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x00ac,
- code => 0x81ca,
- comment => '# NOT SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x00ac,
+ code => 0x81ca,
+ comment => '# NOT SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x2016,
- code => 0x8161,
- comment => '# DOUBLE VERTICAL LINE',
- f => $this_script,
- l => __LINE__
+ ucs => 0x2016,
+ code => 0x8161,
+ comment => '# DOUBLE VERTICAL LINE',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x203e,
- code => 0x7e,
- comment => '# OVERLINE',
- f => $this_script,
- l => __LINE__
+ ucs => 0x203e,
+ code => 0x7e,
+ comment => '# OVERLINE',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x2212,
- code => 0x817c,
- comment => '# MINUS SIGN',
- f => $this_script,
- l => __LINE__
+ ucs => 0x2212,
+ code => 0x817c,
+ comment => '# MINUS SIGN',
+ f => $this_script,
+ l => __LINE__
},
{
direction => FROM_UNICODE,
- ucs => 0x301c,
- code => 0x8160,
- comment => '# WAVE DASH',
- f => $this_script,
- l => __LINE__
+ ucs => 0x301c,
+ code => 0x8160,
+ comment => '# WAVE DASH',
+ f => $this_script,
+ l => __LINE__
});
print_conversion_tables($this_script, "SJIS", $mapping);
next if (!m/<a u="([0-9A-F]+)" b="([0-9A-F ]+)"/);
my ($u, $c) = ($1, $2);
$c =~ s/ //g;
- my $ucs = hex($u);
+ my $ucs = hex($u);
my $code = hex($c);
next if ($code == 0x0080 || $code == 0x00FF);
{
push @mapping,
{
- ucs => $ucs,
- code => $code,
+ ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
+ f => $in_file,
+ l => $.
};
}
}
push @mapping,
{
direction => BOTH,
- code => 0xa2e8,
- ucs => 0x327e,
- comment => 'CIRCLED HANGUL IEUNG U',
- f => $this_script,
- l => __LINE__
+ code => 0xa2e8,
+ ucs => 0x327e,
+ comment => 'CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__
};
print_conversion_tables($this_script, "UHC", \@mapping);
my $this_script = 'src/backend/utils/mb/Unicode/UCS_to_most.pl';
my %filename = (
- 'WIN866' => 'CP866.TXT',
- 'WIN874' => 'CP874.TXT',
- 'WIN1250' => 'CP1250.TXT',
- 'WIN1251' => 'CP1251.TXT',
- 'WIN1252' => 'CP1252.TXT',
- 'WIN1253' => 'CP1253.TXT',
- 'WIN1254' => 'CP1254.TXT',
- 'WIN1255' => 'CP1255.TXT',
- 'WIN1256' => 'CP1256.TXT',
- 'WIN1257' => 'CP1257.TXT',
- 'WIN1258' => 'CP1258.TXT',
- 'ISO8859_2' => '8859-2.TXT',
- 'ISO8859_3' => '8859-3.TXT',
- 'ISO8859_4' => '8859-4.TXT',
- 'ISO8859_5' => '8859-5.TXT',
- 'ISO8859_6' => '8859-6.TXT',
- 'ISO8859_7' => '8859-7.TXT',
- 'ISO8859_8' => '8859-8.TXT',
- 'ISO8859_9' => '8859-9.TXT',
+ 'WIN866' => 'CP866.TXT',
+ 'WIN874' => 'CP874.TXT',
+ 'WIN1250' => 'CP1250.TXT',
+ 'WIN1251' => 'CP1251.TXT',
+ 'WIN1252' => 'CP1252.TXT',
+ 'WIN1253' => 'CP1253.TXT',
+ 'WIN1254' => 'CP1254.TXT',
+ 'WIN1255' => 'CP1255.TXT',
+ 'WIN1256' => 'CP1256.TXT',
+ 'WIN1257' => 'CP1257.TXT',
+ 'WIN1258' => 'CP1258.TXT',
+ 'ISO8859_2' => '8859-2.TXT',
+ 'ISO8859_3' => '8859-3.TXT',
+ 'ISO8859_4' => '8859-4.TXT',
+ 'ISO8859_5' => '8859-5.TXT',
+ 'ISO8859_6' => '8859-6.TXT',
+ 'ISO8859_7' => '8859-7.TXT',
+ 'ISO8859_8' => '8859-8.TXT',
+ 'ISO8859_9' => '8859-9.TXT',
'ISO8859_10' => '8859-10.TXT',
'ISO8859_13' => '8859-13.TXT',
'ISO8859_14' => '8859-14.TXT',
'ISO8859_15' => '8859-15.TXT',
'ISO8859_16' => '8859-16.TXT',
- 'KOI8R' => 'KOI8-R.TXT',
- 'KOI8U' => 'KOI8-U.TXT',
- 'GBK' => 'CP936.TXT');
+ 'KOI8R' => 'KOI8-R.TXT',
+ 'KOI8U' => 'KOI8-U.TXT',
+ 'GBK' => 'CP936.TXT');
# make maps for all encodings if not specified
my @charsets = (scalar(@ARGV) > 0) ? @ARGV : sort keys(%filename);
# Constants used in the 'direction' field of the character maps
use constant {
- NONE => 0,
- TO_UNICODE => 1,
+ NONE => 0,
+ TO_UNICODE => 1,
FROM_UNICODE => 2,
- BOTH => 3
+ BOTH => 3
};
#######################################################################
exit;
}
my $out = {
- code => hex($1),
- ucs => hex($2),
- comment => $4,
+ code => hex($1),
+ ucs => hex($2),
+ comment => $4,
direction => BOTH,
- f => $fname,
- l => $.
+ f => $fname,
+ l => $.
};
# Ignore pure ASCII mappings. PostgreSQL character conversion code
my $tblname;
if ($direction == TO_UNICODE)
{
- $fname = lc("${csname}_to_utf8.map");
+ $fname = lc("${csname}_to_utf8.map");
$tblname = lc("${csname}_to_unicode_tree");
print "- Writing ${csname}=>UTF8 conversion table: $fname\n";
}
else
{
- $fname = lc("utf8_to_${csname}.map");
+ $fname = lc("utf8_to_${csname}.map");
$tblname = lc("${csname}_from_unicode_tree");
print "- Writing UTF8=>${csname} conversion table: $fname\n";
unshift @segments,
{
- header => "Dummy map, for invalid values",
+ header => "Dummy map, for invalid values",
min_idx => 0,
max_idx => $widest_range,
- label => "dummy map"
+ label => "dummy map"
};
###
###
for (my $j = 0; $j < $#segments - 1; $j++)
{
- my $seg = $segments[$j];
+ my $seg = $segments[$j];
my $nextseg = $segments[ $j + 1 ];
# Count the number of zero values at the end of this segment.
if ($max_val <= 0xffff)
{
$vals_per_line = 8;
- $colwidth = 4;
+ $colwidth = 4;
}
elsif ($max_val <= 0xffffff)
{
$vals_per_line = 4;
- $colwidth = 6;
+ $colwidth = 6;
}
else
{
$vals_per_line = 4;
- $colwidth = 8;
+ $colwidth = 8;
}
###
# Print the next line's worth of values.
# XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i;
- for (my $j = 0;
- $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
+ for (
+ my $j = 0;
+ $j < $vals_per_line && $i <= $seg->{max_idx};
+ $j++)
{
# missing values represent zero.
my $val = $seg->{values}->{$i} || 0;
push @segments,
{
header => $header . ", leaf: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
values => $map
};
}
push @segments,
{
header => $header . ", byte #$level: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
values => \%children
};
}
if (defined $c->{ucs_second})
{
my $entry = {
- utf8 => ucs2utf($c->{ucs}),
+ utf8 => ucs2utf($c->{ucs}),
utf8_second => ucs2utf($c->{ucs_second}),
- code => $c->{code},
- comment => $c->{comment},
- f => $c->{f},
- l => $c->{l}
+ code => $c->{code},
+ comment => $c->{comment},
+ f => $c->{f},
+ l => $c->{l}
};
push @combined, $entry;
}
/* Flag combinations */
/*
- * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part
- * of SHOW ALL should not be hidden in postgresql.conf.sample.
+ * GUC_NO_SHOW_ALL requires GUC_NOT_IN_SAMPLE, as a parameter not part of
+ * SHOW ALL should not be hidden in postgresql.conf.sample.
*/
if ((gconf->flags & GUC_NO_SHOW_ALL) &&
!(gconf->flags & GUC_NOT_IN_SAMPLE))
{
{"icu_validation_level", PGC_USERSET, CLIENT_CONN_LOCALE,
- gettext_noop("Log level for reporting invalid ICU locale strings."),
- NULL
+ gettext_noop("Log level for reporting invalid ICU locale strings."),
+ NULL
},
&icu_validation_level,
WARNING, icu_validation_level_options,
if (DsaPointerIsValid(pool->spans[1]))
{
dsa_area_span *head = (dsa_area_span *)
- dsa_get_address(area, pool->spans[1]);
+ dsa_get_address(area, pool->spans[1]);
head->prevspan = span_pointer;
}
if (segment_map->header->next != DSA_SEGMENT_INDEX_NONE)
{
dsa_segment_map *next =
- get_segment_by_index(area, segment_map->header->next);
+ get_segment_by_index(area, segment_map->header->next);
Assert(next->header->bin == segment_map->header->bin);
next->header->prev = new_index;
if (!relptr_is_null(fpm->freelist[list]))
{
FreePageSpanLeader *candidate =
- relptr_access(base, fpm->freelist[list]);
+ relptr_access(base, fpm->freelist[list]);
do
{
*
* We don't buffer the information about all memory contexts in a
* backend into StringInfo and log it as one message. That would
- * require the buffer to be enlarged, risking an OOM as there could
- * be a large number of memory contexts in a backend. Instead, we
- * log one message per memory context.
+ * require the buffer to be enlarged, risking an OOM as there could be
+ * a large number of memory contexts in a backend. Instead, we log
+ * one message per memory context.
*/
ereport(LOG_SERVER_ONLY,
(errhidestmt(true),
while (ResourceArrayGetAny(&(owner->cryptohasharr), &foundres))
{
pg_cryptohash_ctx *context =
- (pg_cryptohash_ctx *) DatumGetPointer(foundres);
+ (pg_cryptohash_ctx *) DatumGetPointer(foundres);
if (isCommit)
PrintCryptoHashLeakWarning(foundres);
/*
* We were able to accumulate all the tuples required for output
* in memory, using a heap to eliminate excess tuples. Now we
- * have to transform the heap to a properly-sorted array.
- * Note that sort_bounded_heap sets the correct state->status.
+ * have to transform the heap to a properly-sorted array. Note
+ * that sort_bounded_heap sets the correct state->status.
*/
sort_bounded_heap(state);
state->current = 0;
int bucket = (oldSnapshotControl->head_offset
+ ((ts - oldSnapshotControl->head_timestamp)
/ USECS_PER_MINUTE))
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
if (TransactionIdPrecedes(oldSnapshotControl->xid_by_minute[bucket], xmin))
oldSnapshotControl->xid_by_minute[bucket] = xmin;
/* Extend map to unused entry. */
int new_tail = (oldSnapshotControl->head_offset
+ oldSnapshotControl->count_used)
- % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
+ % OLD_SNAPSHOT_TIME_MAP_ENTRIES;
oldSnapshotControl->count_used++;
oldSnapshotControl->xid_by_minute[new_tail] = xmin;
if (serialized_snapshot.subxcnt > 0)
{
Size subxipoff = sizeof(SerializedSnapshotData) +
- snapshot->xcnt * sizeof(TransactionId);
+ snapshot->xcnt * sizeof(TransactionId);
memcpy((TransactionId *) (start_address + subxipoff),
snapshot->subxip, snapshot->subxcnt * sizeof(TransactionId));
setup_auth(FILE *cmdfd)
{
/*
- * The authid table shouldn't be readable except through views, to
- * ensure passwords are not publicly visible.
+ * The authid table shouldn't be readable except through views, to ensure
+ * passwords are not publicly visible.
*/
PG_CMD_PUTS("REVOKE ALL ON pg_authid FROM public;\n\n");
" STRATEGY = file_copy;\n\n");
/*
- * template0 shouldn't have any collation-dependent objects, so unset
- * the collation version. This disables collation version checks when
- * making a new database from it.
+ * template0 shouldn't have any collation-dependent objects, so unset the
+ * collation version. This disables collation version checks when making
+ * a new database from it.
*/
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = NULL WHERE datname = 'template0';\n\n");
PG_CMD_PUTS("UPDATE pg_database SET datcollversion = pg_database_collation_actual_version(oid) WHERE datname = 'template1';\n\n");
/*
- * Explicitly revoke public create-schema and create-temp-table
- * privileges in template1 and template0; else the latter would be on
- * by default
+ * Explicitly revoke public create-schema and create-temp-table privileges
+ * in template1 and template0; else the latter would be on by default
*/
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template1 FROM public;\n\n");
PG_CMD_PUTS("REVOKE CREATE,TEMPORARY ON DATABASE template0 FROM public;\n\n");
icu_language_tag(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- char *langtag;
- size_t buflen = 32; /* arbitrary starting buffer size */
- const bool strict = true;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ char *langtag;
+ size_t buflen = 32; /* arbitrary starting buffer size */
+ const bool strict = true;
status = U_ZERO_ERROR;
uloc_getLanguage(loc_str, lang, ULOC_LANG_CAPACITY, &status);
return pstrdup("en-US-u-va-posix");
/*
- * A BCP47 language tag doesn't have a clearly-defined upper limit
- * (cf. RFC5646 section 4.4). Additionally, in older ICU versions,
+ * A BCP47 language tag doesn't have a clearly-defined upper limit (cf.
+ * RFC5646 section 4.4). Additionally, in older ICU versions,
* uloc_toLanguageTag() doesn't always return the ultimate length on the
* first call, necessitating a loop.
*/
return langtag;
#else
pg_fatal("ICU is not supported in this build");
- return NULL; /* keep compiler quiet */
+ return NULL; /* keep compiler quiet */
#endif
}
icu_validate_locale(const char *loc_str)
{
#ifdef USE_ICU
- UErrorCode status;
- char lang[ULOC_LANG_CAPACITY];
- bool found = false;
+ UErrorCode status;
+ char lang[ULOC_LANG_CAPACITY];
+ bool found = false;
/* validate that we can extract the language */
status = U_ZERO_ERROR;
/* search for matching language within ICU */
for (int32_t i = 0; !found && i < uloc_countAvailable(); i++)
{
- const char *otherloc = uloc_getAvailable(i);
- char otherlang[ULOC_LANG_CAPACITY];
+ const char *otherloc = uloc_getAvailable(i);
+ char otherlang[ULOC_LANG_CAPACITY];
status = U_ZERO_ERROR;
uloc_getLanguage(otherloc, otherlang, ULOC_LANG_CAPACITY, &status);
default_icu_locale(void)
{
#ifdef USE_ICU
- UCollator *collator;
- UErrorCode status;
- const char *valid_locale;
- char *default_locale;
+ UCollator *collator;
+ UErrorCode status;
+ const char *valid_locale;
+ char *default_locale;
status = U_ZERO_ERROR;
collator = ucol_open(NULL, &status);
if (locale_provider == COLLPROVIDER_ICU)
{
- char *langtag;
+ char *langtag;
/* acquire default locale from the environment, if not specified */
if (icu_locale == NULL)
{
command_ok(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--icu-locale=en',
"$tempdir/data3"
],
command_fails_like(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--icu-locale=@colNumeric=lower',
"$tempdir/dataX"
],
command_fails_like(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=icu', '--encoding=SQL_ASCII',
'--icu-locale=en', "$tempdir/dataX"
],
command_fails_like(
[
- 'initdb', '--no-sync',
- '--locale-provider=icu',
- '--icu-locale=nonsense-nowhere', "$tempdir/dataX"
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=nonsense-nowhere',
+ "$tempdir/dataX"
],
qr/error: locale "nonsense-nowhere" has unknown language "nonsense"/,
'fails for nonsense language');
command_fails_like(
[
- 'initdb', '--no-sync',
- '--locale-provider=icu',
- '--icu-locale=@colNumeric=lower', "$tempdir/dataX"
+ 'initdb', '--no-sync',
+ '--locale-provider=icu', '--icu-locale=@colNumeric=lower',
+ "$tempdir/dataX"
],
qr/could not open collator for locale "und-u-kn-lower": U_ILLEGAL_ARGUMENT_ERROR/,
'fails for invalid collation argument');
command_fails(
[
- 'initdb', '--no-sync',
+ 'initdb', '--no-sync',
'--locale-provider=libc', '--icu-locale=en',
"$tempdir/dataX"
],
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'this.is.a.really.long.dotted.string'
+ '-t', 'this.is.a.really.long.dotted.string'
],
2,
[qr/^$/],
$node->command_checks_all(
[
'pg_amcheck', '--no-strict-names',
- '-t', 'no_such_table',
- '-t', 'no*such*table',
- '-i', 'no_such_index',
- '-i', 'no*such*index',
- '-r', 'no_such_relation',
- '-r', 'no*such*relation',
- '-d', 'no_such_database',
- '-d', 'no*such*database',
- '-r', 'none.none',
- '-r', 'none.none.none',
- '-r', 'postgres.none.none',
- '-r', 'postgres.pg_catalog.none',
- '-r', 'postgres.none.pg_class',
- '-t', 'postgres.pg_catalog.pg_class', # This exists
+ '-t', 'no_such_table',
+ '-t', 'no*such*table',
+ '-i', 'no_such_index',
+ '-i', 'no*such*index',
+ '-r', 'no_such_relation',
+ '-r', 'no*such*relation',
+ '-d', 'no_such_database',
+ '-d', 'no*such*database',
+ '-r', 'none.none',
+ '-r', 'none.none.none',
+ '-r', 'postgres.none.none',
+ '-r', 'postgres.pg_catalog.none',
+ '-r', 'postgres.none.pg_class',
+ '-t', 'postgres.pg_catalog.pg_class', # This exists
],
0,
[qr/^$/],
$node->command_checks_all(
[
'pg_amcheck', '-d',
- 'postgres', '--no-strict-names',
- '-t', 'template1.public.foo',
- '-t', 'another_db.public.foo',
- '-t', 'no_such_database.public.foo',
- '-i', 'template1.public.foo_idx',
- '-i', 'another_db.public.foo_idx',
- '-i', 'no_such_database.public.foo_idx',
+ 'postgres', '--no-strict-names',
+ '-t', 'template1.public.foo',
+ '-t', 'another_db.public.foo',
+ '-t', 'no_such_database.public.foo',
+ '-i', 'template1.public.foo_idx',
+ '-i', 'another_db.public.foo_idx',
+ '-i', 'no_such_database.public.foo_idx',
],
1,
[qr/^$/],
$node->command_checks_all(
[
'pg_amcheck', '--all', '--no-strict-names', '-S',
- 'public', '-S', 'pg_catalog', '-S',
- 'pg_toast', '-S', 'information_schema',
+ 'public', '-S', 'pg_catalog', '-S',
+ 'pg_toast', '-S', 'information_schema',
],
1,
[qr/^$/],
# Check with schema exclusion patterns overriding relation and schema inclusion patterns
$node->command_checks_all(
[
- 'pg_amcheck', '--all', '--no-strict-names', '-s',
- 'public', '-s', 'pg_catalog', '-s',
- 'pg_toast', '-s', 'information_schema', '-t',
+ 'pg_amcheck', '--all', '--no-strict-names', '-s',
+ 'public', '-s', 'pg_catalog', '-s',
+ 'pg_toast', '-s', 'information_schema', '-t',
'pg_catalog.pg_class', '-S*'
],
1,
my @cmd = ('pg_amcheck', '-p', $port);
# Regular expressions to match various expected output
-my $no_output_re = qr/^$/;
+my $no_output_re = qr/^$/;
my $line_pointer_corruption_re = qr/line pointer/;
my $missing_file_re = qr/could not open file ".*": No such file or directory/;
my $index_missing_relation_fork_re =
@_ = unpack(HEAPTUPLE_PACK_CODE, $buffer);
%tup = (
- t_xmin => shift,
- t_xmax => shift,
- t_field3 => shift,
- bi_hi => shift,
- bi_lo => shift,
- ip_posid => shift,
- t_infomask2 => shift,
- t_infomask => shift,
- t_hoff => shift,
- t_bits => shift,
- a_1 => shift,
- a_2 => shift,
- b_header => shift,
- b_body1 => shift,
- b_body2 => shift,
- b_body3 => shift,
- b_body4 => shift,
- b_body5 => shift,
- b_body6 => shift,
- b_body7 => shift,
- c_va_header => shift,
- c_va_vartag => shift,
- c_va_rawsize => shift,
- c_va_extinfo => shift,
- c_va_valueid => shift,
+ t_xmin => shift,
+ t_xmax => shift,
+ t_field3 => shift,
+ bi_hi => shift,
+ bi_lo => shift,
+ ip_posid => shift,
+ t_infomask2 => shift,
+ t_infomask => shift,
+ t_hoff => shift,
+ t_bits => shift,
+ a_1 => shift,
+ a_2 => shift,
+ b_header => shift,
+ b_body1 => shift,
+ b_body2 => shift,
+ b_body3 => shift,
+ b_body4 => shift,
+ b_body5 => shift,
+ b_body6 => shift,
+ b_body7 => shift,
+ c_va_header => shift,
+ c_va_vartag => shift,
+ c_va_rawsize => shift,
+ c_va_extinfo => shift,
+ c_va_valueid => shift,
c_va_toastrelid => shift);
# Stitch together the text for column 'b'
$tup{b} = join('', map { chr($tup{"b_body$_"}) } (1 .. 7));
my ($fh, $offset, $tup) = @_;
my $buffer = pack(
HEAPTUPLE_PACK_CODE,
- $tup->{t_xmin}, $tup->{t_xmax},
- $tup->{t_field3}, $tup->{bi_hi},
- $tup->{bi_lo}, $tup->{ip_posid},
- $tup->{t_infomask2}, $tup->{t_infomask},
- $tup->{t_hoff}, $tup->{t_bits},
- $tup->{a_1}, $tup->{a_2},
- $tup->{b_header}, $tup->{b_body1},
- $tup->{b_body2}, $tup->{b_body3},
- $tup->{b_body4}, $tup->{b_body5},
- $tup->{b_body6}, $tup->{b_body7},
- $tup->{c_va_header}, $tup->{c_va_vartag},
+ $tup->{t_xmin}, $tup->{t_xmax},
+ $tup->{t_field3}, $tup->{bi_hi},
+ $tup->{bi_lo}, $tup->{ip_posid},
+ $tup->{t_infomask2}, $tup->{t_infomask},
+ $tup->{t_hoff}, $tup->{t_bits},
+ $tup->{a_1}, $tup->{a_2},
+ $tup->{b_header}, $tup->{b_body1},
+ $tup->{b_body2}, $tup->{b_body3},
+ $tup->{b_body4}, $tup->{b_body5},
+ $tup->{b_body6}, $tup->{b_body7},
+ $tup->{c_va_header}, $tup->{c_va_vartag},
$tup->{c_va_rawsize}, $tup->{c_va_extinfo},
$tup->{c_va_valueid}, $tup->{c_va_toastrelid});
sysseek($fh, $offset, 0)
# Start the node and load the extensions. We depend on both
# amcheck and pageinspect for this test.
$node->start;
-my $port = $node->port;
+my $port = $node->port;
my $pgdata = $node->data_dir;
$node->safe_psql('postgres', "CREATE EXTENSION amcheck");
$node->safe_psql('postgres', "CREATE EXTENSION pageinspect");
my $ENDIANNESS;
for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
- next if $offset == -1; # ignore redirect line pointers
+ next if $offset == -1; # ignore redirect line pointers
my $tup = read_tuple($file, $offset);
# Sanity-check that the data appears on the page where we expect.
my $a_1 = $tup->{a_1};
my $a_2 = $tup->{a_2};
- my $b = $tup->{b};
+ my $b = $tup->{b};
if ($a_1 != 0xDEADF9F9 || $a_2 != 0xDEADF9F9 || $b ne 'abcdefg')
{
close($file); # ignore errors on close; we're exiting anyway
$node->clean_node;
plan skip_all =>
sprintf(
- "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")", $tupidx,
- 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
+ "Page layout of index %d differs from our expectations: expected (%x, %x, \"%s\"), got (%x, %x, \"%s\")",
+ $tupidx, 0xDEADF9F9, 0xDEADF9F9, "abcdefg", $a_1, $a_2, $b);
exit;
}
$node->stop;
# Some #define constants from access/htup_details.h for use while corrupting.
-use constant HEAP_HASNULL => 0x0001;
+use constant HEAP_HASNULL => 0x0001;
use constant HEAP_XMAX_LOCK_ONLY => 0x0080;
use constant HEAP_XMIN_COMMITTED => 0x0100;
-use constant HEAP_XMIN_INVALID => 0x0200;
+use constant HEAP_XMIN_INVALID => 0x0200;
use constant HEAP_XMAX_COMMITTED => 0x0400;
-use constant HEAP_XMAX_INVALID => 0x0800;
-use constant HEAP_NATTS_MASK => 0x07FF;
-use constant HEAP_XMAX_IS_MULTI => 0x1000;
-use constant HEAP_KEYS_UPDATED => 0x2000;
-use constant HEAP_HOT_UPDATED => 0x4000;
-use constant HEAP_ONLY_TUPLE => 0x8000;
-use constant HEAP_UPDATED => 0x2000;
+use constant HEAP_XMAX_INVALID => 0x0800;
+use constant HEAP_NATTS_MASK => 0x07FF;
+use constant HEAP_XMAX_IS_MULTI => 0x1000;
+use constant HEAP_KEYS_UPDATED => 0x2000;
+use constant HEAP_HOT_UPDATED => 0x4000;
+use constant HEAP_ONLY_TUPLE => 0x8000;
+use constant HEAP_UPDATED => 0x2000;
# Helper function to generate a regular expression matching the header we
# expect verify_heapam() to return given which fields we expect to be non-null.
for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
{
- my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
+ my $offnum = $tupidx + 1; # offnum is 1-based, not zero-based
my $offset = $lp_off[$tupidx];
my $header = header(0, $offnum, undef);
# Corrupt the tuple to look like it has lots of attributes, some of
# them null. This falsely creates the impression that the t_bits
# array is longer than just one byte, but t_hoff still says otherwise.
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
$tup->{t_bits} = 0xAA;
elsif ($offnum == 11)
{
# Same as above, but this time t_hoff plays along
- $tup->{t_infomask} |= HEAP_HASNULL;
+ $tup->{t_infomask} |= HEAP_HASNULL;
$tup->{t_infomask2} |= (HEAP_NATTS_MASK & 0x40);
$tup->{t_bits} = 0xAA;
$tup->{t_hoff} = 32;
# bytes with 0xFF using 0x3FFFFFFF.
#
$tup->{b_header} = $ENDIANNESS eq 'little' ? 0xFC : 0x3F;
- $tup->{b_body1} = 0xFF;
- $tup->{b_body2} = 0xFF;
- $tup->{b_body3} = 0xFF;
+ $tup->{b_body1} = 0xFF;
+ $tup->{b_body2} = 0xFF;
+ $tup->{b_body3} = 0xFF;
$header = header(0, $offnum, 1);
push @expected,
# at offnum 19 we will unset HEAP_ONLY_TUPLE flag
die "offnum $offnum should be a redirect" if defined $tup;
push @expected,
- qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
+ qr/${header}redirected line pointer points to a non-heap-only tuple at offset \d+/;
}
elsif ($offnum == 18)
{
die "offnum $offnum should be a redirect" if defined $tup;
sysseek($file, 92, 0) or BAIL_OUT("sysseek failed: $!");
syswrite($file,
- pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
- or BAIL_OUT("syswrite failed: $!");
+ pack("L", $ENDIANNESS eq 'little' ? 0x00010011 : 0x00230000))
+ or BAIL_OUT("syswrite failed: $!");
push @expected,
qr/${header}redirected line pointer points to another redirected line pointer at offset \d+/;
}
# rewrite line pointer with lp.off = 25, lp_flags = 2, lp_len = 0
sysseek($file, 108, 0) or BAIL_OUT("sysseek failed: $!");
syswrite($file,
- pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
- or BAIL_OUT("syswrite failed: $!");
+ pack("L", $ENDIANNESS eq 'little' ? 0x00010019 : 0x00330000))
+ or BAIL_OUT("syswrite failed: $!");
push @expected,
qr/${header}redirect line pointer points to offset \d+, but offset \d+ also points there/;
}
[ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ],
2, [@expected], [], 'Expected corruption message output');
$node->safe_psql(
- 'postgres', qq(
+ 'postgres', qq(
COMMIT PREPARED 'in_progress_tx';
));
my @walfiles = (
'00000001000000370000000C.gz', '00000001000000370000000D',
- '00000001000000370000000E', '00000001000000370000000F.partial',);
+ '00000001000000370000000E', '00000001000000370000000F.partial',);
sub create_files
{
{
# like command_like but checking stderr
my $stderr;
- my $result = IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
- $walfiles[2] ], '2>', \$stderr;
+ my $result =
+ IPC::Run::run [ 'pg_archivecleanup', '-d', '-n', $tempdir,
+ $walfiles[2] ],
+ '2>', \$stderr;
ok($result, "pg_archivecleanup dry run: exit code 0");
like(
$stderr,
return;
}
-run_check('', 'pg_archivecleanup');
-run_check('.partial', 'pg_archivecleanup with .partial file');
+run_check('', 'pg_archivecleanup');
+run_check('.partial', 'pg_archivecleanup with .partial file');
run_check('.00000020.backup', 'pg_archivecleanup with .backup file');
done_testing();
/*
* All tablespaces are created with absolute directories, so specifying a
- * non-absolute path here would just never match, possibly confusing users.
- * Since we don't know whether the remote side is Windows or not, and it
- * might be different than the local side, permit any path that could be
- * absolute under either set of rules.
+ * non-absolute path here would just never match, possibly confusing
+ * users. Since we don't know whether the remote side is Windows or not,
+ * and it might be different than the local side, permit any path that
+ * could be absolute under either set of rules.
*
* (There is little practical risk of confusion here, because someone
* running entirely on Linux isn't likely to have a relative path that
* begins with a backslash or something that looks like a drive
- * specification. If they do, and they also incorrectly believe that
- * a relative path is acceptable here, we'll silently fail to warn them
- * of their mistake, and the -T option will just not get applied, same
- * as if they'd specified -T for a nonexistent tablespace.)
+ * specification. If they do, and they also incorrectly believe that a
+ * relative path is acceptable here, we'll silently fail to warn them of
+ * their mistake, and the -T option will just not get applied, same as if
+ * they'd specified -T for a nonexistent tablespace.)
*/
if (!is_nonwindows_absolute_path(cell->old_dir) &&
!is_windows_absolute_path(cell->old_dir))
static char *basedir = NULL;
static int verbose = 0;
static int compresslevel = 0;
-static bool noloop = false;
+static bool noloop = false;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static volatile sig_atomic_t time_to_stop = false;
static bool do_create_slot = false;
use strict;
use warnings;
use File::Basename qw(basename dirname);
-use File::Path qw(rmtree);
+use File::Path qw(rmtree);
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
# Initialize node without replication settings
$node->init(
- extra => ['--data-checksums'],
+ extra => ['--data-checksums'],
auth_extra => [ '--create-role', 'backupuser' ]);
$node->start;
my $pgdata = $node->data_dir;
'gzip:long',
'invalid compression specification: compression algorithm "gzip" does not support long-distance mode',
'failure on long mode for gzip'
- ],
- );
+ ],);
for my $cft (@compression_failure_tests)
{
my $sfail = quotemeta($server_fails . $cft->[1]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
+ 'pg_basebackup', '-D',
"$tempdir/backup", '--compress',
$cft->[0]
],
'client ' . $cft->[2]);
$node->command_fails_like(
[
- 'pg_basebackup', '-D',
+ 'pg_basebackup', '-D',
"$tempdir/backup", '--compress',
'server-' . $cft->[0]
],
# Make sure main and init forks exist
ok(-f "$pgdata/${baseUnloggedPath}_init", 'unlogged init fork in base');
-ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
+ok(-f "$pgdata/$baseUnloggedPath", 'unlogged main fork in base');
# Create files that look like temporary relations to ensure they are ignored.
my $postgresOid = $node->safe_psql('postgres',
$node->command_ok(
[ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
'pg_basebackup runs');
-ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
+ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
# Permissions on backup should be default
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup2", '--no-manifest',
- '--waldir', "$tempdir/xlog2"
+ "$tempdir/backup2", '--no-manifest',
+ '--waldir', "$tempdir/xlog2"
],
'separate xlog directory');
-ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
+ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
ok(!-f "$tempdir/backup2/backup_manifest", 'manifest was suppressed');
-ok(-d "$tempdir/xlog2/", 'xlog directory was created');
+ok(-d "$tempdir/xlog2/", 'xlog directory was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
# to our physical temp location. That way we can use shorter names
# for the tablespace directories, which hopefully won't run afoul of
# the 99 character length limit.
-my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
+my $sys_tempdir = PostgreSQL::Test::Utils::tempdir_short;
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
- "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
+ "CREATE TABLE test1 (a int) TABLESPACE tblspc1;"
. "INSERT INTO test1 VALUES (1234);");
$node->backup('tarbackup2', backup_options => ['-Ft']);
# empty test1, just so that it's different from the to-be-restored data
# basic checks on the output
my $backupdir = $node->backup_dir . '/tarbackup2';
-ok(-f "$backupdir/base.tar", 'backup tar was created');
+ok(-f "$backupdir/base.tar", 'backup tar was created');
ok(-f "$backupdir/pg_wal.tar", 'WAL tar was created');
my @tblspc_tars = glob "$backupdir/[0-9]*.tar";
is(scalar(@tblspc_tars), 1, 'one tablespace tar was created');
$node2->init_from_backup($node, 'tarbackup2', tar_program => $tar);
# Recover tablespace into a new directory (not where it was!)
- my $repTsDir = "$tempdir/tblspc1replica";
+ my $repTsDir = "$tempdir/tblspc1replica";
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
# Update tablespace map to point to new directory.
# XXX Ideally pg_basebackup would handle this.
$tblspc_tars[0] =~ m|/([0-9]*)\.tar$|;
- my $tblspcoid = $1;
+ my $tblspcoid = $1;
my $escapedRepTsDir = $realRepTsDir;
$escapedRepTsDir =~ s/\\/\\\\/g;
open my $mapfile, '>', $node2->data_dir . '/tablespace_map';
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup1", '-Fp',
+ "$tempdir/backup1", '-Fp',
"-T$realTsDir=$tempdir/tbackup/tblspc1",
],
'plain format with tablespaces succeeds with tablespace mapping');
$node->command_ok(
[
@pg_basebackup_defs, '-D',
- "$tempdir/backup3", '-Fp',
+ "$tempdir/backup3", '-Fp',
"-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
],
'mapping tablespace with = sign in path');
$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
-ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
+ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf";
rmtree("$tempdir/backupR");
rmtree("$tempdir/backupxst");
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupnoslot", '-X',
- 'stream', '--no-slot'
+ 'stream', '--no-slot'
],
'pg_basebackup -X stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
$node->command_fails_like(
[
@pg_basebackup_defs, '--target', 'blackhole', '-X',
- 'none', '-D', "$tempdir/blackhole"
+ 'none', '-D', "$tempdir/blackhole"
],
qr/cannot specify both output directory and backup target/,
'backup target and output directory');
'backup target blackhole');
$node->command_ok(
[
- @pg_basebackup_defs, '--target',
+ @pg_basebackup_defs, '--target',
"server:$tempdir/backuponserver", '-X',
'none'
],
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
+ 'stream', '-S',
'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ '-S', 'slot0',
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_sl_fail", '-X',
- 'stream', '-S',
+ 'stream', '-S',
'slot0'
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0',
+ '-S', 'slot0',
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
- '-S', 'slot0'
+ '-S', 'slot0'
],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
$node->command_fails(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot1", '-C',
- '-S', 'slot0'
+ '-S', 'slot0'
],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->command_fails(
[
@pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
- 'slot1', '-X', 'none'
+ 'slot1', '-X', 'none'
],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
- 'stream', '-S', 'slot1'
+ 'stream', '-S', 'slot1'
],
'pg_basebackup -X stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
$node->command_ok(
[
@pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
- 'stream', '-S', 'slot1', '-R',
+ 'stream', '-S', 'slot1', '-R',
],
'pg_basebackup with replication slot and -R runs');
like(
# do not verify checksums, should return ok
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_corrupt4", '--no-verify-checksums',
],
'pg_basebackup with -k does not report checksum mismatch');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip", '--compress',
- '1', '--format',
+ '1', '--format',
't'
],
'pg_basebackup with --compress');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip2", '--gzip',
- '--format', 't'
+ '--format', 't'
],
'pg_basebackup with --gzip');
$node->command_ok(
[
- @pg_basebackup_defs, '-D',
+ @pg_basebackup_defs, '-D',
"$tempdir/backup_gzip3", '--compress',
- 'gzip:1', '--format',
+ 'gzip:1', '--format',
't'
],
'pg_basebackup with --compress=gzip:1');
my $sigchld_bb = IPC::Run::start(
[
@pg_basebackup_defs, '--wal-method=stream',
- '-D', "$tempdir/sigchld",
- '--max-rate=32', '-d',
+ '-D', "$tempdir/sigchld",
+ '--max-rate=32', '-d',
$node->connstr('postgres')
],
'<',
"Walsender killed");
ok( pump_until(
- $sigchld_bb, $sigchld_bb_timeout,
+ $sigchld_bb, $sigchld_bb_timeout,
\$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
'background process exit message');
$sigchld_bb->finish();
# Test that we can back up an in-place tablespace
$node->safe_psql('postgres',
- "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';");
+ "SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2 LOCATION '';"
+);
$node->safe_psql('postgres',
- "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
+ "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
. "INSERT INTO test2 VALUES (1234);");
my $tblspc_oid = $node->safe_psql('postgres',
"SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';");
# compression involved.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--synchronous', '--no-loop'
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--synchronous', '--no-loop'
],
'streaming some WAL with --synchronous');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--compress', 'gzip:1',
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--compress', 'gzip:1',
'--no-loop'
],
"streaming some WAL using ZLIB compression");
# Stream up to the given position.
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop', '--compress',
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--no-loop', '--compress',
'lz4'
],
'streaming some WAL using --compress=lz4');
$primary->psql('postgres', 'INSERT INTO test_table VALUES (4);');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $stream_dir, '--verbose',
- '--endpos', $nextlsn, '--no-loop'
+ 'pg_receivewal', '-D', $stream_dir, '--verbose',
+ '--endpos', $nextlsn, '--no-loop'
],
"streaming some WAL");
# Check case where the slot does not exist.
$primary->command_fails_like(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
+ 'pg_receivewal', '-D', $slot_dir, '--slot',
'nonexistentslot', '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ '--endpos', $nextlsn
],
qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/,
'pg_receivewal fails with non-existing slot');
$primary->command_ok(
[
- 'pg_receivewal', '-D', $slot_dir, '--slot',
- $slot_name, '-n', '--no-sync', '--verbose',
- '--endpos', $nextlsn
+ 'pg_receivewal', '-D', $slot_dir, '--slot',
+ $slot_name, '-n', '--no-sync', '--verbose',
+ '--endpos', $nextlsn
],
"WAL streamed from the slot's restart_lsn");
ok(-e "$slot_dir/$walfile_streamed",
$primary->wait_for_catchup($standby);
# Get a walfilename from before the promotion to make sure it is archived
# after promotion
-my $standby_slot = $standby->slot($archive_slot);
+my $standby_slot = $standby->slot($archive_slot);
my $replication_slot_lsn = $standby_slot->{'restart_lsn'};
# pg_walfile_name() is not supported while in recovery, so use the primary
$standby->command_ok(
[
- 'pg_receivewal', '-D', $timeline_dir, '--verbose',
- '--endpos', $nextlsn, '--slot', $archive_slot,
- '--no-sync', '-n'
+ 'pg_receivewal', '-D', $timeline_dir, '--verbose',
+ '--endpos', $nextlsn, '--slot', $archive_slot,
+ '--no-sync', '-n'
],
"Stream some wal after promoting, resuming from the slot's position");
ok(-e "$timeline_dir/$walfile_before_promotion",
'pg_recvlogical needs an action');
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--start'
],
'no destination file');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--create-slot'
],
'slot created');
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--drop-slot'
],
'slot dropped');
#test with two-phase option enabled
$node->command_ok(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--create-slot',
'--two-phase'
],
$node->command_fails(
[
- 'pg_recvlogical', '-S',
- 'test', '-d',
+ 'pg_recvlogical', '-S',
+ 'test', '-d',
$node->connstr('postgres'), '--start',
- '--endpos', "$nextlsn",
- '--two-phase', '--no-loop',
- '-f', '-'
+ '--endpos', "$nextlsn",
+ '--two-phase', '--no-loop',
+ '-f', '-'
],
'incorrect usage');
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int dir_close(Walfile *f, WalCloseMethod method);
+static int dir_close(Walfile *f, WalCloseMethod method);
static bool dir_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t dir_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *dir_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t dir_write(Walfile *f, const void *buf, size_t count);
-static int dir_sync(Walfile *f);
+static int dir_sync(Walfile *f);
static bool dir_finish(WalWriteMethod *wwmethod);
static void dir_free(WalWriteMethod *wwmethod);
*/
typedef struct DirectoryMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *basedir;
} DirectoryMethodData;
const char *pathname,
const char *temp_suffix,
size_t pad_to_size);
-static int tar_close(Walfile *f, WalCloseMethod method);
+static int tar_close(Walfile *f, WalCloseMethod method);
static bool tar_existsfile(WalWriteMethod *wwmethod, const char *pathname);
static ssize_t tar_get_file_size(WalWriteMethod *wwmethod,
const char *pathname);
static char *tar_get_file_name(WalWriteMethod *wwmethod,
const char *pathname, const char *temp_suffix);
static ssize_t tar_write(Walfile *f, const void *buf, size_t count);
-static int tar_sync(Walfile *f);
+static int tar_sync(Walfile *f);
static bool tar_finish(WalWriteMethod *wwmethod);
static void tar_free(WalWriteMethod *wwmethod);
typedef struct TarMethodData
{
- WalWriteMethod base;
+ WalWriteMethod base;
char *tarfilename;
int fd;
TarMethodFile *currentfile;
{
TarMethodData *wwmethod;
const char *suffix = (compression_algorithm == PG_COMPRESSION_GZIP) ?
- ".tar.gz" : ".tar";
+ ".tar.gz" : ".tar";
wwmethod = pg_malloc0(sizeof(TarMethodData));
*((const WalWriteMethodOps **) &wwmethod->base.ops) =
WalWriteMethod *wwmethod;
off_t currpos;
char *pathname;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
} Walfile;
* automatically renamed in close(). If pad_to_size is specified, the file
* will be padded with NUL up to that size, if supported by the Walmethod.
*/
- Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
+ Walfile *(*open_for_write) (WalWriteMethod *wwmethod, const char *pathname, const char *temp_suffix, size_t pad_to_size);
/*
* Close an open Walfile, using one or more methods for handling automatic
bool sync;
const char *lasterrstring; /* if set, takes precedence over lasterrno */
int lasterrno;
+
/*
* MORE DATA FOLLOWS AT END OF STRUCT
*
- * Each WalWriteMethod is expected to embed this as the first member of
- * a larger struct with method-specific fields following.
+ * Each WalWriteMethod is expected to embed this as the first member of a
+ * larger struct with method-specific fields following.
*/
};
# at the end.
sub check_relation_corruption
{
- my $node = shift;
- my $table = shift;
+ my $node = shift;
+ my $table = shift;
my $tablespace = shift;
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
# Create table and discover its filesystem location.
$node->safe_psql(
command_ok(
[
'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ '-D', $pgdata,
+ '--filenode', $relfilenode_corrupted
],
"succeeds for single relfilenode on tablespace $tablespace with offline cluster"
);
$node->command_checks_all(
[
'pg_checksums', '--check',
- '-D', $pgdata,
- '--filenode', $relfilenode_corrupted
+ '-D', $pgdata,
+ '--filenode', $relfilenode_corrupted
],
1,
[qr/Bad checksums:.*1/],
'checksums disabled in control file');
# These are correct but empty files, so they should pass through.
-append_to_file "$pgdata/global/99999", "";
-append_to_file "$pgdata/global/99999.123", "";
-append_to_file "$pgdata/global/99999_fsm", "";
-append_to_file "$pgdata/global/99999_init", "";
-append_to_file "$pgdata/global/99999_vm", "";
+append_to_file "$pgdata/global/99999", "";
+append_to_file "$pgdata/global/99999.123", "";
+append_to_file "$pgdata/global/99999_fsm", "";
+append_to_file "$pgdata/global/99999_init", "";
+append_to_file "$pgdata/global/99999_vm", "";
append_to_file "$pgdata/global/99999_init.123", "";
-append_to_file "$pgdata/global/99999_fsm.123", "";
-append_to_file "$pgdata/global/99999_vm.123", "";
+append_to_file "$pgdata/global/99999_fsm.123", "";
+append_to_file "$pgdata/global/99999_vm.123", "";
# These are temporary files and folders with dummy contents, which
# should be ignored by the scan.
append_to_file "$pgdata/global/pgsql_tmp_123", "foo";
mkdir "$pgdata/global/pgsql_tmp";
-append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
-append_to_file "$pgdata/global/pg_internal.init", "foo";
+append_to_file "$pgdata/global/pgsql_tmp/1.1", "foo";
+append_to_file "$pgdata/global/pg_internal.init", "foo";
append_to_file "$pgdata/global/pg_internal.init.123", "foo";
# Enable checksums.
check_relation_corruption($node, 'corrupt1', 'pg_default');
# Create tablespace to check corruptions in a non-default tablespace.
-my $basedir = $node->basedir;
+my $basedir = $node->basedir;
my $tablespace_dir = "$basedir/ts_corrupt_dir";
mkdir($tablespace_dir);
$node->safe_psql('postgres',
# correctly-named relation files filled with some corrupted data.
sub fail_corrupt
{
- my $node = shift;
- my $file = shift;
+ my $node = shift;
+ my $file = shift;
my $pgdata = $node->data_dir;
# Create the file with some dummy data in it.
# check with a corrupted pg_control
my $pg_control = $node->data_dir . '/global/pg_control';
-my $size = (stat($pg_control))[7];
+my $size = (stat($pg_control))[7];
open my $fh, '>', $pg_control or BAIL_OUT($!);
binmode $fh;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
my $tempdir_short = PostgreSQL::Test::Utils::tempdir_short;
program_help_ok('pg_ctl');
sub fetch_file_name
{
my $logfiles = shift;
- my $format = shift;
- my @lines = split(/\n/, $logfiles);
+ my $format = shift;
+ my @lines = split(/\n/, $logfiles);
my $filename = undef;
foreach my $line (@lines)
{
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
- my $format = shift;
+ my $format = shift;
my $logfiles = shift;
- my $pattern = shift;
- my $node = shift;
- my $lfname = fetch_file_name($logfiles, $format);
+ my $pattern = shift;
+ my $node = shift;
+ my $lfname = fetch_file_name($logfiles, $format);
my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
jsonlog log/postgresql-.*json$|,
'current_logfiles is sane');
-check_log_pattern('stderr', $current_logfiles, 'division by zero', $node);
-check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node);
+check_log_pattern('stderr', $current_logfiles, 'division by zero', $node);
+check_log_pattern('csvlog', $current_logfiles, 'division by zero', $node);
check_log_pattern('jsonlog', $current_logfiles, 'division by zero', $node);
# Sleep 2 seconds and ask for log rotation; this should result in
# Verify that log output gets to this file, too
$node->psql('postgres', 'fee fi fo fum');
-check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node);
-check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('stderr', $new_current_logfiles, 'syntax error', $node);
+check_log_pattern('csvlog', $new_current_logfiles, 'syntax error', $node);
check_log_pattern('jsonlog', $new_current_logfiles, 'syntax error', $node);
$node->stop();
char *
supports_compression(const pg_compress_specification compression_spec)
{
- const pg_compress_algorithm algorithm = compression_spec.algorithm;
- bool supported = false;
+ const pg_compress_algorithm algorithm = compression_spec.algorithm;
+ bool supported = false;
if (algorithm == PG_COMPRESSION_NONE)
supported = true;
LZ4F_preferences_t prefs;
- LZ4F_compressionContext_t ctx;
- LZ4F_decompressionContext_t dtx;
+ LZ4F_compressionContext_t ctx;
+ LZ4F_decompressionContext_t dtx;
/*
* Used by the Stream API's lazy initialization.
char *outbuf;
char *readbuf;
LZ4F_decompressionContext_t ctx = NULL;
- LZ4F_decompressOptions_t dec_opt;
- LZ4F_errorCode_t status;
+ LZ4F_decompressOptions_t dec_opt;
+ LZ4F_errorCode_t status;
memset(&dec_opt, 0, sizeof(dec_opt));
status = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
return NULL;
/*
- * Our caller expects the return string to be NULL terminated
- * and we know that ret is greater than zero.
+ * Our caller expects the return string to be NULL terminated and we know
+ * that ret is greater than zero.
*/
ptr[ret - 1] = '\0';
if (compress.options & PG_COMPRESSION_OPTION_LONG_DISTANCE)
_Zstd_CCtx_setParam_or_die(cstream,
- ZSTD_c_enableLongDistanceMatching,
- compress.long_distance, "long");
+ ZSTD_c_enableLongDistanceMatching,
+ compress.long_distance, "long");
return cstream;
}
#include "compress_io.h"
extern void InitCompressorZstd(CompressorState *cs,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
extern void InitCompressFileHandleZstd(CompressFileHandle *CFH,
- const pg_compress_specification compression_spec);
+ const pg_compress_specification compression_spec);
-#endif /* COMPRESS_ZSTD_H */
+#endif /* COMPRESS_ZSTD_H */
{
if (te->hadDumper && (te->reqs & REQ_DATA) != 0)
{
- char *errmsg = supports_compression(AH->compression_spec);
+ char *errmsg = supports_compression(AH->compression_spec);
+
if (errmsg)
pg_fatal("cannot restore from compressed archive (%s)",
- errmsg);
+ errmsg);
else
break;
}
if (!te->hadDumper)
{
/*
- * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then
- * it is considered a data entry. We don't need to check for the
- * BLOBS entry or old-style BLOB COMMENTS, because they will have
- * hadDumper = true ... but we do need to check new-style BLOB ACLs,
- * comments, etc.
+ * Special Case: If 'SEQUENCE SET' or anything to do with LOs, then it
+ * is considered a data entry. We don't need to check for the BLOBS
+ * entry or old-style BLOB COMMENTS, because they will have hadDumper
+ * = true ... but we do need to check new-style BLOB ACLs, comments,
+ * etc.
*/
if (strcmp(te->desc, "SEQUENCE SET") == 0 ||
strcmp(te->desc, "BLOB") == 0 ||
{
appendPQExpBuffer(buf, "LARGE OBJECT %s", te->tag);
}
+
/*
* These object types require additional decoration. Fortunately, the
* information needed is exactly what's in the DROP command.
initPQExpBuffer(&temp);
_getObjectDescription(&temp, te);
+
/*
* If _getObjectDescription() didn't fill the buffer, then there is no
* owner.
if (errmsg)
{
pg_log_warning("archive is compressed, but this installation does not support compression (%s) -- no data will be available",
- errmsg);
+ errmsg);
pg_free(errmsg);
}
tarClose(AH, th);
/*
- * Once we have found the first LO, stop at the first non-LO
- * entry (which will be 'blobs.toc'). This coding would eat all
- * the rest of the archive if there are no LOs ... but this
- * function shouldn't be called at all in that case.
+ * Once we have found the first LO, stop at the first non-LO entry
+ * (which will be 'blobs.toc'). This coding would eat all the
+ * rest of the archive if there are no LOs ... but this function
+ * shouldn't be called at all in that case.
*/
if (foundLO)
break;
pg_fatal("%s", error_detail);
/*
- * Disable support for zstd workers for now - these are based on threading,
- * and it's unclear how it interacts with parallel dumps on platforms where
- * that relies on threads too (e.g. Windows).
+ * Disable support for zstd workers for now - these are based on
+ * threading, and it's unclear how it interacts with parallel dumps on
+ * platforms where that relies on threads too (e.g. Windows).
*/
if (compression_spec.options & PG_COMPRESSION_OPTION_WORKERS)
pg_log_warning("compression option \"%s\" is not currently supported by pg_dump",
/*
* Dumping LOs is the default for dumps where an inclusion switch is not
* used (an "include everything" dump). -B can be used to exclude LOs
- * from those dumps. -b can be used to include LOs even when an
- * inclusion switch is used.
+ * from those dumps. -b can be used to include LOs even when an inclusion
+ * switch is used.
*
* -s means "schema only" and LOs are data, not schema, so we never
* include LOs when -s is used.
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
*
- * However, we do need to collect LO information as there may be
- * comments or other information on LOs that we do need to dump out.
+ * However, we do need to collect LO information as there may be comments
+ * or other information on LOs that we do need to dump out.
*/
if (dopt.outputLOs || dopt.binary_upgrade)
getLOs(fout);
appendPQExpBufferStr(loOutQry, "\n-- For binary upgrade, preserve pg_largeobject and index relfilenodes\n");
for (int i = 0; i < PQntuples(lo_res); ++i)
{
- Oid oid;
- RelFileNumber relfilenumber;
+ Oid oid;
+ RelFileNumber relfilenumber;
appendPQExpBuffer(loHorizonQry, "UPDATE pg_catalog.pg_class\n"
"SET relfrozenxid = '%u', relminmxid = '%u'\n"
loinfo[i].dobj.components |= DUMP_COMPONENT_ACL;
/*
- * In binary-upgrade mode for LOs, we do *not* dump out the LO
- * data, as it will be copied by pg_upgrade, which simply copies the
+ * In binary-upgrade mode for LOs, we do *not* dump out the LO data,
+ * as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
* pg_largeobject_metadata, after the dump is restored.
if (dopt->no_security_labels)
return;
- /* Security labels are schema not data ... except large object labels are data */
+ /*
+ * Security labels are schema not data ... except large object labels are
+ * data
+ */
if (strcmp(type, "LARGE OBJECT") != 0)
{
if (dopt->dataOnly)
if (tbinfo->dobj.dump & DUMP_COMPONENT_ACL)
{
const char *objtype =
- (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
+ (tbinfo->relkind == RELKIND_SEQUENCE) ? "SEQUENCE" : "TABLE";
tableAclDumpId =
dumpACL(fout, tbinfo->dobj.dumpId, InvalidDumpId,
{
appendPQExpBufferStr(q,
coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+
/*
* PRIMARY KEY constraints should not be using NULLS NOT DISTINCT
* indexes. Being able to create this was fixed, but we need to
- * make the index distinct in order to be able to restore the dump.
+ * make the index distinct in order to be able to restore the
+ * dump.
*/
if (indxinfo->indnullsnotdistinct && coninfo->contype != 'p')
appendPQExpBufferStr(q, " NULLS NOT DISTINCT");
TableInfo *configtbl;
Oid configtbloid = atooid(extconfigarray[j]);
bool dumpobj =
- curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
+ curext->dobj.dump & DUMP_COMPONENT_DEFINITION;
configtbl = findTableByOid(configtbloid);
if (configtbl == NULL)
dumpRoleMembership(PGconn *conn)
{
PQExpBuffer buf = createPQExpBuffer();
- PQExpBuffer optbuf = createPQExpBuffer();
+ PQExpBuffer optbuf = createPQExpBuffer();
PGresult *res;
int start = 0,
end,
/*
* We can't dump these GRANT commands in arbitrary order, because a role
- * that is named as a grantor must already have ADMIN OPTION on the
- * role for which it is granting permissions, except for the bootstrap
+ * that is named as a grantor must already have ADMIN OPTION on the role
+ * for which it is granting permissions, except for the bootstrap
* superuser, who can always be named as the grantor.
*
* We handle this by considering these grants role by role. For each role,
* superuser. Every time we grant ADMIN OPTION on the role to some user,
* that user also becomes an allowable grantor. We make repeated passes
* over the grants for the role, each time dumping those whose grantors
- * are allowable and which we haven't done yet. Eventually this should
- * let us dump all the grants.
+ * are allowable and which we haven't done yet. Eventually this should let
+ * us dump all the grants.
*/
total = PQntuples(res);
while (start < total)
/* All memberships for a single role should be adjacent. */
for (end = start; end < total; ++end)
{
- char *otherrole;
+ char *otherrole;
otherrole = PQgetvalue(res, end, 0);
if (strcmp(role, otherrole) != 0)
appendPQExpBufferStr(optbuf, "ADMIN OPTION");
if (dump_grant_options)
{
- char *inherit_option;
+ char *inherit_option;
if (optbuf->data[0] != '\0')
appendPQExpBufferStr(optbuf, ", ");
# database and then pg_dump *that* database (or something along
# those lines) to validate that part of the process.
-my $supports_icu = ($ENV{with_icu} eq 'yes');
+my $supports_icu = ($ENV{with_icu} eq 'yes');
my $supports_gzip = check_pg_config("#define HAVE_LIBZ 1");
-my $supports_lz4 = check_pg_config("#define USE_LZ4 1");
-my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
+my $supports_lz4 = check_pg_config("#define USE_LZ4 1");
+my $supports_zstd = check_pg_config("#define USE_ZSTD 1");
my %pgdump_runs = (
binary_upgrade => {
# Do not use --no-sync to give test coverage for data sync.
compression_gzip_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=1', "--file=$tempdir/compression_gzip_custom.dump",
'postgres',
],
'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump",
],
expected => qr/Compression: gzip/,
- name => 'data content is gzip-compressed'
+ name => 'data content is gzip-compressed'
},
},
# Do not use --no-sync to give test coverage for data sync.
compression_gzip_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=gzip:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=gzip:1',
"--file=$tempdir/compression_gzip_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'GZIP_PROGRAM'},
- args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
+ args => [ '-f', "$tempdir/compression_gzip_dir/blobs.toc", ],
},
# Verify that only data files were compressed
glob_patterns => [
},
compression_gzip_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'gzip',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '-Z1',
"--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'GZIP_PROGRAM'},
- args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
+ args => [ '-d', "$tempdir/compression_gzip_plain.sql.gz", ],
},
},
# Do not use --no-sync to give test coverage for data sync.
compression_lz4_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump",
'postgres',
],
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
- command => [
- 'pg_restore',
- '-l', "$tempdir/compression_lz4_custom.dump",
- ],
+ command =>
+ [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ],
expected => qr/Compression: lz4/,
name => 'data content is lz4 compressed'
},
# Do not use --no-sync to give test coverage for data sync.
compression_lz4_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=lz4:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=lz4:1',
"--file=$tempdir/compression_lz4_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'LZ4'},
- args => [
+ args => [
'-z', '-f', '--rm',
"$tempdir/compression_lz4_dir/blobs.toc",
"$tempdir/compression_lz4_dir/blobs.toc.lz4",
# Verify that data files were compressed
glob_patterns => [
"$tempdir/compression_lz4_dir/toc.dat",
- "$tempdir/compression_lz4_dir/*.dat.lz4",
+ "$tempdir/compression_lz4_dir/*.dat.lz4",
],
restore_cmd => [
'pg_restore', '--jobs=2',
},
compression_lz4_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'lz4',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '--compress=lz4',
"--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'LZ4'},
- args => [
+ args => [
'-d', '-f',
"$tempdir/compression_lz4_plain.sql.lz4",
"$tempdir/compression_lz4_plain.sql",
},
compression_zstd_custom => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
- 'pg_dump', '--format=custom',
+ dump_cmd => [
+ 'pg_dump', '--format=custom',
'--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump",
'postgres',
],
],
command_like => {
command => [
- 'pg_restore',
- '-l', "$tempdir/compression_zstd_custom.dump",
+ 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump",
],
expected => qr/Compression: zstd/,
name => 'data content is zstd compressed'
},
compression_zstd_dir => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
- 'pg_dump', '--jobs=2',
- '--format=directory', '--compress=zstd:1',
+ dump_cmd => [
+ 'pg_dump', '--jobs=2',
+ '--format=directory', '--compress=zstd:1',
"--file=$tempdir/compression_zstd_dir", 'postgres',
],
# Give coverage for manually compressed blob.toc files during
# restore.
compress_cmd => {
program => $ENV{'ZSTD'},
- args => [
- '-z', '-f', '--rm',
- "$tempdir/compression_zstd_dir/blobs.toc",
+ args => [
+ '-z', '-f',
+ '--rm', "$tempdir/compression_zstd_dir/blobs.toc",
"-o", "$tempdir/compression_zstd_dir/blobs.toc.zst",
],
},
# Verify that data files were compressed
glob_patterns => [
- "$tempdir/compression_zstd_dir/toc.dat",
- "$tempdir/compression_zstd_dir/*.dat.zst",
+ "$tempdir/compression_zstd_dir/toc.dat",
+ "$tempdir/compression_zstd_dir/*.dat.zst",
],
restore_cmd => [
'pg_restore', '--jobs=2',
# Exercise long mode for test coverage
compression_zstd_plain => {
- test_key => 'compression',
+ test_key => 'compression',
compile_option => 'zstd',
- dump_cmd => [
+ dump_cmd => [
'pg_dump', '--format=plain', '--compress=zstd:long',
"--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres',
],
# Decompress the generated file to run through the tests.
compress_cmd => {
program => $ENV{'ZSTD'},
- args => [
+ args => [
'-d', '-f',
- "$tempdir/compression_zstd_plain.sql.zst",
- "-o", "$tempdir/compression_zstd_plain.sql",
+ "$tempdir/compression_zstd_plain.sql.zst", "-o",
+ "$tempdir/compression_zstd_plain.sql",
],
},
},
},
column_inserts => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/column_inserts.sql", '-a',
- '--column-inserts', 'postgres',
+ '--column-inserts', 'postgres',
],
},
createdb => {
defaults => {
dump_cmd => [
'pg_dump', '--no-sync',
- '-f', "$tempdir/defaults.sql",
+ '-f', "$tempdir/defaults.sql",
'postgres',
],
},
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ],
- expected => $supports_gzip ?
- qr/Compression: gzip/ :
- qr/Compression: none/,
+ expected => $supports_gzip
+ ? qr/Compression: gzip/
+ : qr/Compression: none/,
name => 'data content is gzip-compressed by default if available',
},
},
defaults_dir_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Fd',
+ 'pg_dump', '-Fd',
"--file=$tempdir/defaults_dir_format", 'postgres',
],
restore_cmd => [
command_like => {
command =>
[ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ],
- expected => $supports_gzip ?
- qr/Compression: gzip/ :
- qr/Compression: none/,
+ expected => $supports_gzip ? qr/Compression: gzip/
+ : qr/Compression: none/,
name => 'data content is gzip-compressed by default',
},
glob_patterns => [
"$tempdir/defaults_dir_format/toc.dat",
"$tempdir/defaults_dir_format/blobs.toc",
- $supports_gzip ?
- "$tempdir/defaults_dir_format/*.dat.gz" :
- "$tempdir/defaults_dir_format/*.dat",
+ $supports_gzip ? "$tempdir/defaults_dir_format/*.dat.gz"
+ : "$tempdir/defaults_dir_format/*.dat",
],
},
defaults_tar_format => {
test_key => 'defaults',
dump_cmd => [
- 'pg_dump', '-Ft',
+ 'pg_dump', '-Ft',
"--file=$tempdir/defaults_tar_format.tar", 'postgres',
],
restore_cmd => [
},
exclude_measurement => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump',
+ '--no-sync',
"--file=$tempdir/exclude_measurement.sql",
'--exclude-table-and-children=dump_test.measurement',
'postgres',
},
inserts => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/inserts.sql", '-a',
- '--inserts', 'postgres',
+ '--inserts', 'postgres',
],
},
pg_dumpall_globals => {
},
no_large_objects => {
dump_cmd => [
- 'pg_dump', '--no-sync',
- "--file=$tempdir/no_large_objects.sql", '-B',
- 'postgres',
+ 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql",
+ '-B', 'postgres',
],
},
no_privs => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/no_privs.sql", '-x',
'postgres',
],
},
no_owner => {
dump_cmd => [
- 'pg_dump', '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/no_owner.sql", '-O',
'postgres',
],
},
schema_only => {
dump_cmd => [
- 'pg_dump', '--format=plain',
+ 'pg_dump', '--format=plain',
"--file=$tempdir/schema_only.sql", '--no-sync',
- '-s', 'postgres',
+ '-s', 'postgres',
],
},
section_pre_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_pre_data.sql",
+ 'pg_dump', "--file=$tempdir/section_pre_data.sql",
'--section=pre-data', '--no-sync',
'postgres',
],
},
section_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_data.sql",
+ 'pg_dump', "--file=$tempdir/section_data.sql",
'--section=data', '--no-sync',
'postgres',
],
# Tests which target the 'dump_test' schema, specifically.
my %dump_test_schema_runs = (
- only_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_test_schema => 1,
+ only_dump_measurement => 1,
test_schema_plus_large_objects => 1,);
# Tests which are considered 'full' dumps by pg_dump, but there
# are flags used to exclude specific items (ACLs, LOs, etc).
my %full_runs = (
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- compression => 1,
- createdb => 1,
- defaults => 1,
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ compression => 1,
+ createdb => 1,
+ defaults => 1,
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- exclude_measurement => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ exclude_measurement => 1,
exclude_measurement_data => 1,
- no_toast_compression => 1,
- no_large_objects => 1,
- no_owner => 1,
- no_privs => 1,
- no_table_access_method => 1,
- pg_dumpall_dbprivs => 1,
- pg_dumpall_exclude => 1,
- schema_only => 1,);
+ no_toast_compression => 1,
+ no_large_objects => 1,
+ no_owner => 1,
+ no_privs => 1,
+ no_table_access_method => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_exclude => 1,
+ schema_only => 1,);
# This is where the actual tests are defined.
my %tests = (
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT' => {
create_order => 14,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role IN SCHEMA dump_test
GRANT SELECT ON TABLES TO regress_dump_test_role;',
regexp => qr/^
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role GRANT EXECUTE ON FUNCTIONS'
=> {
create_order => 15,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role IN SCHEMA dump_test
GRANT EXECUTE ON FUNCTIONS TO regress_dump_test_role;',
regexp => qr/^
{ %full_runs, %dump_test_schema_runs, section_post_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_privs => 1,
- only_dump_measurement => 1,
+ no_privs => 1,
+ only_dump_measurement => 1,
},
},
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE' => {
create_order => 55,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role
REVOKE EXECUTE ON FUNCTIONS FROM PUBLIC;',
regexp => qr/^
'ALTER DEFAULT PRIVILEGES FOR ROLE regress_dump_test_role REVOKE SELECT'
=> {
create_order => 56,
- create_sql => 'ALTER DEFAULT PRIVILEGES
+ create_sql => 'ALTER DEFAULT PRIVILEGES
FOR ROLE regress_dump_test_role
REVOKE SELECT ON TABLES FROM regress_dump_test_role;',
regexp => qr/^
\QNOREPLICATION NOBYPASSRLS;\E
/xm,
like => {
- pg_dumpall_dbprivs => 1,
- pg_dumpall_globals => 1,
+ pg_dumpall_dbprivs => 1,
+ pg_dumpall_globals => 1,
pg_dumpall_globals_clean => 1,
- pg_dumpall_exclude => 1,
+ pg_dumpall_exclude => 1,
},
},
'ALTER COLLATION test0 OWNER TO' => {
- regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
+ regexp => qr/^\QALTER COLLATION public.test0 OWNER TO \E.+;/m,
collation => 1,
- like => { %full_runs, section_pre_data => 1, },
- unlike => { %dump_test_schema_runs, no_owner => 1, },
+ like => { %full_runs, section_pre_data => 1, },
+ unlike => { %dump_test_schema_runs, no_owner => 1, },
},
'ALTER FOREIGN DATA WRAPPER dummy OWNER TO' => {
regexp => qr/^ALTER FOREIGN DATA WRAPPER dummy OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
'ALTER SERVER s1 OWNER TO' => {
regexp => qr/^ALTER SERVER s1 OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
'ALTER PUBLICATION pub1 OWNER TO' => {
regexp => qr/^ALTER PUBLICATION pub1 OWNER TO .+;/m,
- like => { %full_runs, section_post_data => 1, },
+ like => { %full_runs, section_post_data => 1, },
unlike => { no_owner => 1, },
},
'ALTER LARGE OBJECT ... OWNER TO' => {
regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .+;/m,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
no_large_objects => 1,
- no_owner => 1,
+ no_owner => 1,
schema_only => 1,
},
},
'ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO' => {
regexp => qr/^ALTER PROCEDURAL LANGUAGE pltestlang OWNER TO .+;/m,
- like => { %full_runs, section_pre_data => 1, },
+ like => { %full_runs, section_pre_data => 1, },
unlike => { no_owner => 1, },
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
'ALTER SCHEMA dump_test_second_schema OWNER TO' => {
regexp => qr/^ALTER SCHEMA dump_test_second_schema OWNER TO .+;/m,
- like => {
+ like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
},
unlike => { no_owner => 1, },
create_sql =>
'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";',
regexp => qr/^ALTER SCHEMA public OWNER TO .+;/m,
- like => {
+ like => {
%full_runs, section_pre_data => 1,
},
unlike => { no_owner => 1, },
},
'ALTER SCHEMA public OWNER TO (w/o ACL changes)' => {
- database => 'regress_public_owner',
+ database => 'regress_public_owner',
create_order => 100,
create_sql =>
'ALTER SCHEMA public OWNER TO "regress_quoted \"" role";',
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
'ALTER TABLE (partitioned) ADD CONSTRAINT ... FOREIGN KEY' => {
create_order => 4,
- create_sql => 'CREATE TABLE dump_test.test_table_fk (
+ create_sql => 'CREATE TABLE dump_test.test_table_fk (
col1 int references dump_test.test_table)
PARTITION BY RANGE (col1);
CREATE TABLE dump_test.test_table_fk_1
},
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
/xm,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
- binary_upgrade => 1,
+ binary_upgrade => 1,
only_dump_measurement => 1,
},
unlike => {
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_measurement => 1,
+ only_dump_measurement => 1,
},
},
'ALTER TABLE test_table OWNER TO' => {
regexp => qr/^\QALTER TABLE dump_test.test_table OWNER TO \E.+;/m,
- like => {
+ like => {
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
- no_owner => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
+ no_owner => 1,
},
},
'ALTER TABLE test_table ENABLE ROW LEVEL SECURITY' => {
create_order => 23,
- create_sql => 'ALTER TABLE dump_test.test_table
+ create_sql => 'ALTER TABLE dump_test.test_table
ENABLE ROW LEVEL SECURITY;',
regexp =>
qr/^\QALTER TABLE dump_test.test_table ENABLE ROW LEVEL SECURITY;\E/m,
%full_runs,
%dump_test_schema_runs,
only_dump_test_table => 1,
- section_post_data => 1,
+ section_post_data => 1,
},
unlike => {
exclude_dump_test_schema => 1,
- exclude_test_table => 1,
- only_dump_measurement => 1,
+ exclude_test_table => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
},
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- exclude_measurement => 1,
+ no_owner => 1,
+ exclude_measurement => 1,
},
},
qr/^\QALTER TABLE dump_test_second_schema.measurement_y2006m2 OWNER TO \E.+;/m,
like => {
%full_runs,
- role => 1,
+ role => 1,
section_pre_data => 1,
only_dump_measurement => 1,
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- no_owner => 1,
- only_dump_measurement => 1,
+ no_owner => 1,
+ only_dump_measurement => 1,
},
},
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
unlike => {
exclude_dump_test_schema => 1,
- only_dump_test_table => 1,
- no_owner => 1,
- role => 1,
- only_dump_measurement => 1,
+ only_dump_test_table => 1,
+ no_owner => 1,
+ role => 1,
+ only_dump_measurement => 1,
},
},
create_sql =>
'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_pre_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_pre_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
/xm,
like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
binary_upgrade => 1,
no_large_objects => 1,
- schema_only => 1,
+ schema_only => 1,
},
},
'LO create (with no data)' => {
- create_sql =>
- 'SELECT pg_catalog.lo_create(0);',
+ create_sql => 'SELECT pg_catalog.lo_create(0);',
regexp => qr/^
\QSELECT pg_catalog.lo_open\E \('\d+',\ \d+\);\n
\QSELECT pg_catalog.lo_close(0);\E
/xm,
- like => {
+ like => {
%full_runs,
- column_inserts => 1,
- data_only => 1,
- inserts => 1,
- section_data => 1,
+ column_inserts => 1,
+ data_only => 1,
+ inserts => 1,
+ section_data => 1,
test_schema_plus_large_objects => 1,
},
unlike => {
- binary_upgrade => 1,
- no_large_objects => 1,
- schema_only => 1,
- section_pre_data => 1,
+ binary_upgrade => 1,
+ no_large_objects => 1,
+ schema_only => 1,
+ section_pre_data => 1,
},
},
},
'COMMENT ON SCHEMA public IS NULL' => {
- database => 'regress_public_owner',
+ database => 'regress_public_owner',
create_order => 100,
- create_sql => 'COMMENT ON SCHEMA public IS NULL;',
- regexp => qr/^COMMENT ON SCHEMA public IS '';/m,
- like => { defaults_public_owner => 1 },
+ create_sql => 'COMMENT ON SCHEMA public IS NULL;',
+ regexp => qr/^COMMENT ON SCHEMA public IS '';/m,
+ like => { defaults_public_owner => 1 },
},
'COMMENT ON TABLE dump_test.test_table' => {
create_order => 36,
- create_sql => 'COMMENT ON TABLE dump_test.test_table
+ create_sql => 'COMMENT ON TABLE dump_test.test_table
IS \'comment on table\';',
regexp =>
qr/^\QCOMMENT ON TABLE dump_test.test_table IS 'comment on table';\E/m,