PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
(char *) metapage, true);
- log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
+ log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
BLOOM_METAPAGE_BLKNO, metapage, true);
/*
{
eary *tables;
eary *oids;
- eary *filenodes;
+ eary *filenumbers;
bool quiet;
bool systables;
my_opts->dbname = pg_strdup(optarg);
break;
- /* specify one filenode to show */
+ /* specify one filenumber to show */
case 'f':
- add_one_elt(optarg, my_opts->filenodes);
+ add_one_elt(optarg, my_opts->filenumbers);
break;
/* host to connect to */
}
/*
- * Show oid, filenode, name, schema and tablespace for each of the
+ * Show oid, filenumber, name, schema and tablespace for each of the
* given objects in the current database.
*/
void
char *qualifiers,
*ptr;
char *comma_oids,
- *comma_filenodes,
+ *comma_filenumbers,
*comma_tables;
bool written = false;
char *addfields = ",c.oid AS \"Oid\", nspname AS \"Schema\", spcname as \"Tablespace\" ";
- /* get tables qualifiers, whether names, filenodes, or OIDs */
+ /* get tables qualifiers, whether names, filenumbers, or OIDs */
comma_oids = get_comma_elts(opts->oids);
comma_tables = get_comma_elts(opts->tables);
- comma_filenodes = get_comma_elts(opts->filenodes);
+ comma_filenumbers = get_comma_elts(opts->filenumbers);
/* 80 extra chars for SQL expression */
qualifiers = (char *) pg_malloc(strlen(comma_oids) + strlen(comma_tables) +
- strlen(comma_filenodes) + 80);
+ strlen(comma_filenumbers) + 80);
ptr = qualifiers;
if (opts->oids->num > 0)
ptr += sprintf(ptr, "c.oid IN (%s)", comma_oids);
written = true;
}
- if (opts->filenodes->num > 0)
+ if (opts->filenumbers->num > 0)
{
if (written)
ptr += sprintf(ptr, " OR ");
- ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)", comma_filenodes);
+ ptr += sprintf(ptr, "pg_catalog.pg_relation_filenode(c.oid) IN (%s)",
+ comma_filenumbers);
written = true;
}
if (opts->tables->num > 0)
}
free(comma_oids);
free(comma_tables);
- free(comma_filenodes);
+ free(comma_filenumbers);
/* now build the query */
todo = psprintf("SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
my_opts->oids = (eary *) pg_malloc(sizeof(eary));
my_opts->tables = (eary *) pg_malloc(sizeof(eary));
- my_opts->filenodes = (eary *) pg_malloc(sizeof(eary));
+ my_opts->filenumbers = (eary *) pg_malloc(sizeof(eary));
my_opts->oids->num = my_opts->oids->alloc = 0;
my_opts->tables->num = my_opts->tables->alloc = 0;
- my_opts->filenodes->num = my_opts->filenodes->alloc = 0;
+ my_opts->filenumbers->num = my_opts->filenumbers->alloc = 0;
/* parse the opts */
get_opts(argc, argv, my_opts);
/* display the given elements in the database */
if (my_opts->oids->num > 0 ||
my_opts->tables->num > 0 ||
- my_opts->filenodes->num > 0)
+ my_opts->filenumbers->num > 0)
{
if (!my_opts->quiet)
printf("From database \"%s\":\n", my_opts->dbname);
typedef struct
{
uint32 bufferid;
- Oid relfilenode;
+ RelFileNumber relfilenumber;
Oid reltablespace;
Oid reldatabase;
ForkNumber forknum;
buf_state = LockBufHdr(bufHdr);
fctx->record[i].bufferid = BufferDescriptorGetBuffer(bufHdr);
- fctx->record[i].relfilenode = bufHdr->tag.rnode.relNode;
- fctx->record[i].reltablespace = bufHdr->tag.rnode.spcNode;
- fctx->record[i].reldatabase = bufHdr->tag.rnode.dbNode;
+ fctx->record[i].relfilenumber = bufHdr->tag.rlocator.relNumber;
+ fctx->record[i].reltablespace = bufHdr->tag.rlocator.spcOid;
+ fctx->record[i].reldatabase = bufHdr->tag.rlocator.dbOid;
fctx->record[i].forknum = bufHdr->tag.forkNum;
fctx->record[i].blocknum = bufHdr->tag.blockNum;
fctx->record[i].usagecount = BUF_STATE_GET_USAGECOUNT(buf_state);
}
else
{
- values[1] = ObjectIdGetDatum(fctx->record[i].relfilenode);
+ values[1] = ObjectIdGetDatum(fctx->record[i].relfilenumber);
nulls[1] = false;
values[2] = ObjectIdGetDatum(fctx->record[i].reltablespace);
nulls[2] = false;
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/rel.h"
-#include "utils/relfilenodemap.h"
+#include "utils/relfilenumbermap.h"
#include "utils/resowner.h"
#define AUTOPREWARM_FILE "autoprewarm.blocks"
{
Oid database;
Oid tablespace;
- Oid filenode;
+ RelFileNumber filenumber;
ForkNumber forknum;
BlockNumber blocknum;
} BlockInfoRecord;
unsigned forknum;
if (fscanf(file, "%u,%u,%u,%u,%u\n", &blkinfo[i].database,
- &blkinfo[i].tablespace, &blkinfo[i].filenode,
+ &blkinfo[i].tablespace, &blkinfo[i].filenumber,
&forknum, &blkinfo[i].blocknum) != 5)
ereport(ERROR,
(errmsg("autoprewarm block dump file is corrupted at line %d",
* relation. Note that rel will be NULL if try_relation_open failed
* previously; in that case, there is nothing to close.
*/
- if (old_blk != NULL && old_blk->filenode != blk->filenode &&
+ if (old_blk != NULL && old_blk->filenumber != blk->filenumber &&
rel != NULL)
{
relation_close(rel, AccessShareLock);
* Try to open each new relation, but only once, when we first
* encounter it. If it's been dropped, skip the associated blocks.
*/
- if (old_blk == NULL || old_blk->filenode != blk->filenode)
+ if (old_blk == NULL || old_blk->filenumber != blk->filenumber)
{
Oid reloid;
Assert(rel == NULL);
StartTransactionCommand();
- reloid = RelidByRelfilenode(blk->tablespace, blk->filenode);
+ reloid = RelidByRelfilenumber(blk->tablespace, blk->filenumber);
if (OidIsValid(reloid))
rel = try_relation_open(reloid, AccessShareLock);
/* Once per fork, check for fork existence and size. */
if (old_blk == NULL ||
- old_blk->filenode != blk->filenode ||
+ old_blk->filenumber != blk->filenumber ||
old_blk->forknum != blk->forknum)
{
/*
if (buf_state & BM_TAG_VALID &&
((buf_state & BM_PERMANENT) || dump_unlogged))
{
- block_info_array[num_blocks].database = bufHdr->tag.rnode.dbNode;
- block_info_array[num_blocks].tablespace = bufHdr->tag.rnode.spcNode;
- block_info_array[num_blocks].filenode = bufHdr->tag.rnode.relNode;
+ block_info_array[num_blocks].database = bufHdr->tag.rlocator.dbOid;
+ block_info_array[num_blocks].tablespace = bufHdr->tag.rlocator.spcOid;
+ block_info_array[num_blocks].filenumber = bufHdr->tag.rlocator.relNumber;
block_info_array[num_blocks].forknum = bufHdr->tag.forkNum;
block_info_array[num_blocks].blocknum = bufHdr->tag.blockNum;
++num_blocks;
ret = fprintf(file, "%u,%u,%u,%u,%u\n",
block_info_array[i].database,
block_info_array[i].tablespace,
- block_info_array[i].filenode,
+ block_info_array[i].filenumber,
(uint32) block_info_array[i].forknum,
block_info_array[i].blocknum);
if (ret < 0)
* We depend on all records for a particular database being consecutive
* in the dump file; each per-database worker will preload blocks until
* it sees a block for some other database. Sorting by tablespace,
- * filenode, forknum, and blocknum isn't critical for correctness, but
+ * filenumber, forknum, and blocknum isn't critical for correctness, but
* helps us get a sequential I/O pattern.
*/
static int
cmp_member_elem(database);
cmp_member_elem(tablespace);
- cmp_member_elem(filenode);
+ cmp_member_elem(filenumber);
cmp_member_elem(forknum);
cmp_member_elem(blocknum);
xl_smgr_truncate xlrec;
xlrec.blkno = 0;
- xlrec.rnode = rel->rd_node;
+ xlrec.rlocator = rel->rd_locator;
xlrec.flags = SMGR_TRUNCATE_VM;
XLogBeginInsert();
*/
typedef struct ss_scan_location_t
{
- RelFileNode relfilenode; /* identity of a relation */
+ RelFileLocator relfilelocator; /* identity of a relation */
BlockNumber location; /* last-reported location in the relation */
} ss_scan_location_t;
static ss_scan_locations_t *scan_locations;
/* prototypes for internal functions */
-static BlockNumber ss_search(RelFileNode relfilenode,
+static BlockNumber ss_search(RelFileLocator relfilelocator,
BlockNumber location, bool set);
* these invalid entries will fall off the LRU list and get
* replaced with real entries.
*/
- item->location.relfilenode.spcNode = InvalidOid;
- item->location.relfilenode.dbNode = InvalidOid;
- item->location.relfilenode.relNode = InvalidOid;
+ item->location.relfilelocator.spcOid = InvalidOid;
+ item->location.relfilelocator.dbOid = InvalidOid;
+ item->location.relfilelocator.relNumber = InvalidRelFileNumber;
item->location.location = InvalidBlockNumber;
item->prev = (i > 0) ?
/*
* ss_search --- search the scan_locations structure for an entry with the
- * given relfilenode.
+ * given relfilelocator.
*
* If "set" is true, the location is updated to the given location. If no
- * entry for the given relfilenode is found, it will be created at the head
+ * entry for the given relfilelocator is found, it will be created at the head
* of the list with the given location, even if "set" is false.
*
* In any case, the location after possible update is returned.
* data structure.
*/
static BlockNumber
-ss_search(RelFileNode relfilenode, BlockNumber location, bool set)
+ss_search(RelFileLocator relfilelocator, BlockNumber location, bool set)
{
ss_lru_item_t *item;
{
bool match;
- match = RelFileNodeEquals(item->location.relfilenode, relfilenode);
+ match = RelFileLocatorEquals(item->location.relfilelocator,
+ relfilelocator);
if (match || item->next == NULL)
{
*/
if (!match)
{
- item->location.relfilenode = relfilenode;
+ item->location.relfilelocator = relfilelocator;
item->location.location = location;
}
else if (set)
BlockNumber startloc;
LWLockAcquire(SyncScanLock, LW_EXCLUSIVE);
- startloc = ss_search(rel->rd_node, 0, false);
+ startloc = ss_search(rel->rd_locator, 0, false);
LWLockRelease(SyncScanLock);
/*
* ss_report_location --- update the current scan location
*
* Writes an entry into the shared Sync Scan state of the form
- * (relfilenode, blocknumber), overwriting any existing entry for the
- * same relfilenode.
+ * (relfilelocator, blocknumber), overwriting any existing entry for the
+ * same relfilelocator.
*/
void
ss_report_location(Relation rel, BlockNumber location)
{
if (LWLockConditionalAcquire(SyncScanLock, LW_EXCLUSIVE))
{
- (void) ss_search(rel->rd_node, location, true);
+ (void) ss_search(rel->rd_locator, location, true);
LWLockRelease(SyncScanLock);
}
#ifdef TRACE_SYNCSCAN
savedRightLink = GinPageGetOpaque(page)->rightlink;
/* Begin setting up WAL record */
- data.node = btree->index->rd_node;
+ data.locator = btree->index->rd_locator;
data.flags = xlflags;
if (BufferIsValid(childbuf))
{
needWal = RelationNeedsWAL(index);
- data.node = index->rd_node;
+ data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;
XLogRecPtr recptr;
ginxlogUpdateMeta data;
- data.node = index->rd_node;
+ data.locator = index->rd_locator;
data.ntuples = 0;
data.newRightlink = data.prevTail = InvalidBlockNumber;
memcpy(&data.metadata, metadata, sizeof(GinMetaPageData));
if (PageAddItem(page, (Item) itup, IndexTupleSize(itup), offset, false, false) == InvalidOffsetNumber)
{
- RelFileNode node;
+ RelFileLocator locator;
ForkNumber forknum;
BlockNumber blknum;
- BufferGetTag(buffer, &node, &forknum, &blknum);
+ BufferGetTag(buffer, &locator, &forknum, &blknum);
elog(ERROR, "failed to add item to index page in %u/%u/%u",
- node.spcNode, node.dbNode, node.relNode);
+ locator.spcOid, locator.dbOid, locator.relNumber);
}
}
smgrwrite(RelationGetSmgr(state->indexrel), MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
if (RelationNeedsWAL(state->indexrel))
- log_newpage(&state->indexrel->rd_node, MAIN_FORKNUM, GIST_ROOT_BLKNO,
+ log_newpage(&state->indexrel->rd_locator, MAIN_FORKNUM, GIST_ROOT_BLKNO,
levelstate->pages[0], true);
pfree(levelstate->pages[0]);
}
if (RelationNeedsWAL(state->indexrel))
- log_newpages(&state->indexrel->rd_node, MAIN_FORKNUM, state->ready_num_pages,
+ log_newpages(&state->indexrel->rd_locator, MAIN_FORKNUM, state->ready_num_pages,
state->ready_blknos, state->ready_pages, true);
for (int i = 0; i < state->ready_num_pages; i++)
*/
if (InHotStandby)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
+ ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid,
+ rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
*/
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
- xlrec->node);
+ xlrec->locator);
}
void
*/
/* XLOG stuff */
- xlrec_reuse.node = rel->rd_node;
+ xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = latestRemovedXid;
*/
if (InHotStandby)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rnode);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
+ ResolveRecoveryConflictWithSnapshot(xldata->latestRemovedXid, rlocator);
}
action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer);
MarkBufferDirty(buf);
if (use_wal)
- log_newpage(&rel->rd_node,
+ log_newpage(&rel->rd_locator,
forkNum,
blkno,
BufferGetPage(buf),
ovflopaque->hasho_page_id = HASHO_PAGE_ID;
if (RelationNeedsWAL(rel))
- log_newpage(&rel->rd_node,
+ log_newpage(&rel->rd_locator,
MAIN_FORKNUM,
lastblock,
zerobuf.data,
* heap_buffer, if necessary.
*/
XLogRecPtr
-log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
+log_heap_visible(RelFileLocator rlocator, Buffer heap_buffer, Buffer vm_buffer,
TransactionId cutoff_xid, uint8 vmflags)
{
xl_heap_visible xlrec;
Assert(tup->t_tableOid != InvalidOid);
xlrec.top_xid = GetTopTransactionId();
- xlrec.target_node = relation->rd_node;
+ xlrec.target_locator = relation->rd_locator;
xlrec.target_tid = tup->t_self;
/*
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_prune *xlrec = (xl_heap_prune *) XLogRecGetData(record);
Buffer buffer;
- RelFileNode rnode;
+ RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
* no queries running for which the removed tuples are still visible.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
+ ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
/*
* If we have a full-page image, restore it (using a cleanup lock) and
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
- XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+ XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
if (BufferIsValid(buffer))
{
Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
- RelFileNode rnode;
+ RelFileLocator rlocator;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
UnlockReleaseBuffer(buffer);
* Do this regardless of a full-page image being applied, since the
* FSM data is not in the page anyway.
*/
- XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+ XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
}
Buffer vmbuffer = InvalidBuffer;
Buffer buffer;
Page page;
- RelFileNode rnode;
+ RelFileLocator rlocator;
BlockNumber blkno;
XLogRedoAction action;
- XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
+ XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
/*
* If there are any Hot Standby transactions running that have an xmin
* rather than killing the transaction outright.
*/
if (InHotStandby)
- ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rnode);
+ ResolveRecoveryConflictWithSnapshot(xlrec->cutoff_xid, rlocator);
/*
* Read the heap page, if it still exists. If the heap file has dropped or
* FSM data is not in the page anyway.
*/
if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
- XLogRecordPageWithFreeSpace(rnode, blkno, space);
+ XLogRecordPageWithFreeSpace(rlocator, blkno, space);
}
/*
*/
LockBuffer(vmbuffer, BUFFER_LOCK_UNLOCK);
- reln = CreateFakeRelcacheEntry(rnode);
+ reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, blkno, &vmbuffer);
/*
*/
if (InHotStandby)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
TransactionId latestRemovedXid = cutoff_xid;
TransactionIdRetreat(latestRemovedXid);
- XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rnode);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
+ ResolveRecoveryConflictWithSnapshot(latestRemovedXid, rlocator);
}
if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
ItemId lp = NULL;
HeapTupleHeader htup;
BlockNumber blkno;
- RelFileNode target_node;
+ RelFileLocator target_locator;
ItemPointerData target_tid;
- XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
+ XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
*/
if (xlrec->flags & XLH_DELETE_ALL_VISIBLE_CLEARED)
{
- Relation reln = CreateFakeRelcacheEntry(target_node);
+ Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
xl_heap_header xlhdr;
uint32 newlen;
Size freespace = 0;
- RelFileNode target_node;
+ RelFileLocator target_locator;
BlockNumber blkno;
ItemPointerData target_tid;
XLogRedoAction action;
- XLogRecGetBlockTag(record, 0, &target_node, NULL, &blkno);
+ XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
- Relation reln = CreateFakeRelcacheEntry(target_node);
+ Relation reln = CreateFakeRelcacheEntry(target_locator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
- XLogRecordPageWithFreeSpace(target_node, blkno, freespace);
+ XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
}
/*
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_multi_insert *xlrec;
- RelFileNode rnode;
+ RelFileLocator rlocator;
BlockNumber blkno;
Buffer buffer;
Page page;
*/
xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
/* check that the mutually exclusive flags are not both set */
Assert(!((xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED) &&
*/
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
{
- Relation reln = CreateFakeRelcacheEntry(rnode);
+ Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, blkno, &vmbuffer);
* totally accurate anyway.
*/
if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
- XLogRecordPageWithFreeSpace(rnode, blkno, freespace);
+ XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
}
/*
{
XLogRecPtr lsn = record->EndRecPtr;
xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
- RelFileNode rnode;
+ RelFileLocator rlocator;
BlockNumber oldblk;
BlockNumber newblk;
ItemPointerData newtid;
oldtup.t_data = NULL;
oldtup.t_len = 0;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &newblk);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
{
/* HOT updates are never done across pages */
*/
if (xlrec->flags & XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED)
{
- Relation reln = CreateFakeRelcacheEntry(rnode);
+ Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, oldblk, &vmbuffer);
*/
if (xlrec->flags & XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED)
{
- Relation reln = CreateFakeRelcacheEntry(rnode);
+ Relation reln = CreateFakeRelcacheEntry(rlocator);
Buffer vmbuffer = InvalidBuffer;
visibilitymap_pin(reln, newblk, &vmbuffer);
* totally accurate anyway.
*/
if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
- XLogRecordPageWithFreeSpace(rnode, newblk, freespace);
+ XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
}
static void
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
- reln = CreateFakeRelcacheEntry(rnode);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
*/
if (xlrec->flags & XLH_LOCK_ALL_FROZEN_CLEARED)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
Buffer vmbuffer = InvalidBuffer;
BlockNumber block;
Relation reln;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, &block);
- reln = CreateFakeRelcacheEntry(rnode);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
+ reln = CreateFakeRelcacheEntry(rlocator);
visibilitymap_pin(reln, block, &vmbuffer);
visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
*/
static void
-heapam_relation_set_new_filenode(Relation rel,
- const RelFileNode *newrnode,
- char persistence,
- TransactionId *freezeXid,
- MultiXactId *minmulti)
+heapam_relation_set_new_filelocator(Relation rel,
+ const RelFileLocator *newrlocator,
+ char persistence,
+ TransactionId *freezeXid,
+ MultiXactId *minmulti)
{
SMgrRelation srel;
*/
*minmulti = GetOldestMultiXactId();
- srel = RelationCreateStorage(*newrnode, persistence, true);
+ srel = RelationCreateStorage(*newrlocator, persistence, true);
/*
* If required, set up an init fork for an unlogged table so that it can
rel->rd_rel->relkind == RELKIND_MATVIEW ||
rel->rd_rel->relkind == RELKIND_TOASTVALUE);
smgrcreate(srel, INIT_FORKNUM, false);
- log_smgrcreate(newrnode, INIT_FORKNUM);
+ log_smgrcreate(newrlocator, INIT_FORKNUM);
smgrimmedsync(srel, INIT_FORKNUM);
}
}
static void
-heapam_relation_copy_data(Relation rel, const RelFileNode *newrnode)
+heapam_relation_copy_data(Relation rel, const RelFileLocator *newrlocator)
{
SMgrRelation dstrel;
- dstrel = smgropen(*newrnode, rel->rd_backend);
+ dstrel = smgropen(*newrlocator, rel->rd_backend);
/*
* Since we copy the file directly without looking at the shared buffers,
* Create and copy all forks of the relation, and schedule unlinking of
* old physical files.
*
- * NOTE: any conflict in relfilenode value will be caught in
+ * NOTE: any conflict in relfilenumber value will be caught in
* RelationCreateStorage().
*/
- RelationCreateStorage(*newrnode, rel->rd_rel->relpersistence, true);
+ RelationCreateStorage(*newrlocator, rel->rd_rel->relpersistence, true);
/* copy main fork */
RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
if (RelationIsPermanent(rel) ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
- log_smgrcreate(newrnode, forkNum);
+ log_smgrcreate(newrlocator, forkNum);
RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
rel->rd_rel->relpersistence);
}
.tuple_satisfies_snapshot = heapam_tuple_satisfies_snapshot,
.index_delete_tuples = heap_index_delete_tuples,
- .relation_set_new_filenode = heapam_relation_set_new_filenode,
+ .relation_set_new_filelocator = heapam_relation_set_new_filelocator,
.relation_nontransactional_truncate = heapam_relation_nontransactional_truncate,
.relation_copy_data = heapam_relation_copy_data,
.relation_copy_for_cluster = heapam_relation_copy_for_cluster,
if (state->rs_buffer_valid)
{
if (RelationNeedsWAL(state->rs_new_rel))
- log_newpage(&state->rs_new_rel->rd_node,
+ log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
state->rs_buffer,
/* XLOG stuff */
if (RelationNeedsWAL(state->rs_new_rel))
- log_newpage(&state->rs_new_rel->rd_node,
+ log_newpage(&state->rs_new_rel->rd_locator,
MAIN_FORKNUM,
state->rs_blockno,
page,
* When doing logical decoding - which relies on using cmin/cmax of catalog
* tuples, via xl_heap_new_cid records - heap rewrites have to log enough
* information to allow the decoding backend to update its internal mapping
- * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
+ * of (relfilelocator,ctid) => (cmin, cmax) to be correct for the rewritten heap.
*
* For that, every time we find a tuple that's been modified in a catalog
* relation within the xmin horizon of any decoding slot, we log a mapping
return;
/* fill out mapping information */
- map.old_node = state->rs_old_rel->rd_node;
+ map.old_locator = state->rs_old_rel->rd_locator;
map.old_tid = old_tid;
- map.new_node = state->rs_new_rel->rd_node;
+ map.new_locator = state->rs_new_rel->rd_locator;
map.new_tid = new_tid;
/* ---
if (XLogRecPtrIsInvalid(recptr))
{
Assert(!InRecovery);
- recptr = log_heap_visible(rel->rd_node, heapBuf, vmBuf,
+ recptr = log_heap_visible(rel->rd_locator, heapBuf, vmBuf,
cutoff_xid, flags);
/*
* to keep checking for creation or extension of the file, which happens
* infrequently.
*/
- CacheInvalidateSmgr(reln->smgr_rnode);
+ CacheInvalidateSmgr(reln->smgr_rlocator);
UnlockRelationForExtension(rel, ExclusiveLock);
}
*/
/* XLOG stuff */
- xlrec_reuse.node = rel->rd_node;
+ xlrec_reuse.locator = rel->rd_locator;
xlrec_reuse.block = blkno;
xlrec_reuse.latestRemovedFullXid = safexid;
PageSetChecksumInplace(metapage, BTREE_METAPAGE);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BTREE_METAPAGE,
(char *) metapage, true);
- log_newpage(&RelationGetSmgr(index)->smgr_rnode.node, INIT_FORKNUM,
+ log_newpage(&RelationGetSmgr(index)->smgr_rlocator.locator, INIT_FORKNUM,
BTREE_METAPAGE, metapage, true);
/*
if (wstate->btws_use_wal)
{
/* We use the XLOG_FPI record type for this */
- log_newpage(&wstate->index->rd_node, MAIN_FORKNUM, blkno, page, true);
+ log_newpage(&wstate->index->rd_locator, MAIN_FORKNUM, blkno, page, true);
}
/*
*/
if (InHotStandby)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
- XLogRecGetBlockTag(record, 0, &rnode, NULL, NULL);
+ XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
- ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rnode);
+ ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid, rlocator);
}
/*
if (InHotStandby)
ResolveRecoveryConflictWithSnapshotFullXid(xlrec->latestRemovedFullXid,
- xlrec->node);
+ xlrec->locator);
}
void
#include "access/generic_xlog.h"
#include "lib/stringinfo.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
/*
* Description of generic xlog record: write page regions that this record
#include "access/ginxlog.h"
#include "access/xlogutils.h"
#include "lib/stringinfo.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
static void
desc_recompress_leaf(StringInfo buf, ginxlogRecompressDataLeaf *insertData)
#include "access/gistxlog.h"
#include "lib/stringinfo.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
static void
out_gistxlogPageUpdate(StringInfo buf, gistxlogPageUpdate *xlrec)
out_gistxlogPageReuse(StringInfo buf, gistxlogPageReuse *xlrec)
{
appendStringInfo(buf, "rel %u/%u/%u; blk %u; latestRemovedXid %u:%u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->block,
+ xlrec->locator.spcOid, xlrec->locator.dbOid,
+ xlrec->locator.relNumber, xlrec->block,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
}
xl_heap_new_cid *xlrec = (xl_heap_new_cid *) rec;
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
- xlrec->target_node.spcNode,
- xlrec->target_node.dbNode,
- xlrec->target_node.relNode,
+ xlrec->target_locator.spcOid,
+ xlrec->target_locator.dbOid,
+ xlrec->target_locator.relNumber,
ItemPointerGetBlockNumber(&(xlrec->target_tid)),
ItemPointerGetOffsetNumber(&(xlrec->target_tid)));
appendStringInfo(buf, "; cmin: %u, cmax: %u, combo: %u",
xl_btree_reuse_page *xlrec = (xl_btree_reuse_page *) rec;
appendStringInfo(buf, "rel %u/%u/%u; latestRemovedXid %u:%u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode,
+ xlrec->locator.spcOid, xlrec->locator.dbOid,
+ xlrec->locator.relNumber,
EpochFromFullTransactionId(xlrec->latestRemovedFullXid),
XidFromFullTransactionId(xlrec->latestRemovedFullXid));
break;
if (info == XLOG_SEQ_LOG)
appendStringInfo(buf, "rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode);
+ xlrec->locator.spcOid, xlrec->locator.dbOid,
+ xlrec->locator.relNumber);
}
const char *
if (info == XLOG_SMGR_CREATE)
{
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
- char *path = relpathperm(xlrec->rnode, xlrec->forkNum);
+ char *path = relpathperm(xlrec->rlocator, xlrec->forkNum);
appendStringInfoString(buf, path);
pfree(path);
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
- char *path = relpathperm(xlrec->rnode, MAIN_FORKNUM);
+ char *path = relpathperm(xlrec->rlocator, MAIN_FORKNUM);
appendStringInfo(buf, "%s to %u blocks flags %d", path,
xlrec->blkno, xlrec->flags);
data += parsed->nsubxacts * sizeof(TransactionId);
}
- if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
+ if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
- xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
+ xl_xact_relfilelocators *xl_rellocators = (xl_xact_relfilelocators *) data;
- parsed->nrels = xl_relfilenodes->nrels;
- parsed->xnodes = xl_relfilenodes->xnodes;
+ parsed->nrels = xl_rellocators->nrels;
+ parsed->xlocators = xl_rellocators->xlocators;
- data += MinSizeOfXactRelfilenodes;
- data += xl_relfilenodes->nrels * sizeof(RelFileNode);
+ data += MinSizeOfXactRelfileLocators;
+ data += xl_rellocators->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
data += parsed->nsubxacts * sizeof(TransactionId);
}
- if (parsed->xinfo & XACT_XINFO_HAS_RELFILENODES)
+ if (parsed->xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
- xl_xact_relfilenodes *xl_relfilenodes = (xl_xact_relfilenodes *) data;
+ xl_xact_relfilelocators *xl_rellocator = (xl_xact_relfilelocators *) data;
- parsed->nrels = xl_relfilenodes->nrels;
- parsed->xnodes = xl_relfilenodes->xnodes;
+ parsed->nrels = xl_rellocator->nrels;
+ parsed->xlocators = xl_rellocator->xlocators;
- data += MinSizeOfXactRelfilenodes;
- data += xl_relfilenodes->nrels * sizeof(RelFileNode);
+ data += MinSizeOfXactRelfileLocators;
+ data += xl_rellocator->nrels * sizeof(RelFileLocator);
}
if (parsed->xinfo & XACT_XINFO_HAS_DROPPED_STATS)
parsed->subxacts = (TransactionId *) bufptr;
bufptr += MAXALIGN(xlrec->nsubxacts * sizeof(TransactionId));
- parsed->xnodes = (RelFileNode *) bufptr;
- bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileNode));
+ parsed->xlocators = (RelFileLocator *) bufptr;
+ bufptr += MAXALIGN(xlrec->ncommitrels * sizeof(RelFileLocator));
- parsed->abortnodes = (RelFileNode *) bufptr;
- bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileNode));
+ parsed->abortlocators = (RelFileLocator *) bufptr;
+ bufptr += MAXALIGN(xlrec->nabortrels * sizeof(RelFileLocator));
parsed->stats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(xlrec->ncommitstats * sizeof(xl_xact_stats_item));
static void
xact_desc_relations(StringInfo buf, char *label, int nrels,
- RelFileNode *xnodes)
+ RelFileLocator *xlocators)
{
int i;
appendStringInfo(buf, "; %s:", label);
for (i = 0; i < nrels; i++)
{
- char *path = relpathperm(xnodes[i], MAIN_FORKNUM);
+ char *path = relpathperm(xlocators[i], MAIN_FORKNUM);
appendStringInfo(buf, " %s", path);
pfree(path);
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
- xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
+ xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
xact_desc_stats(buf, "", parsed.nstats, parsed.stats);
appendStringInfoString(buf, timestamptz_to_str(xlrec->xact_time));
- xact_desc_relations(buf, "rels", parsed.nrels, parsed.xnodes);
+ xact_desc_relations(buf, "rels", parsed.nrels, parsed.xlocators);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
appendStringInfo(buf, "gid %s: ", parsed.twophase_gid);
appendStringInfoString(buf, timestamptz_to_str(parsed.xact_time));
- xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xnodes);
+ xact_desc_relations(buf, "rels(commit)", parsed.nrels, parsed.xlocators);
xact_desc_relations(buf, "rels(abort)", parsed.nabortrels,
- parsed.abortnodes);
+ parsed.abortlocators);
xact_desc_stats(buf, "commit ", parsed.nstats, parsed.stats);
xact_desc_stats(buf, "abort ", parsed.nabortstats, parsed.abortstats);
xact_desc_subxacts(buf, parsed.nsubxacts, parsed.subxacts);
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
- &rnode, &forknum, &blk, NULL))
+ &rlocator, &forknum, &blk, NULL))
continue;
if (detailed_format)
appendStringInfo(buf,
"blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u fork %s blk %u",
block_id,
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forkNames[forknum],
blk);
}
appendStringInfo(buf,
", blkref #%d: rel %u/%u/%u blk %u",
block_id,
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
blk);
}
PageSetChecksumInplace(page, SPGIST_METAPAGE_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_METAPAGE_BLKNO,
(char *) page, true);
- log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
+ log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_METAPAGE_BLKNO, page, true);
/* Likewise for the root page. */
PageSetChecksumInplace(page, SPGIST_ROOT_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_ROOT_BLKNO,
(char *) page, true);
- log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
+ log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_ROOT_BLKNO, page, true);
/* Likewise for the null-tuples root page. */
PageSetChecksumInplace(page, SPGIST_NULL_BLKNO);
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, SPGIST_NULL_BLKNO,
(char *) page, true);
- log_newpage(&(RelationGetSmgr(index))->smgr_rnode.node, INIT_FORKNUM,
+ log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
SPGIST_NULL_BLKNO, page, true);
/*
{
if (TransactionIdIsValid(xldata->newestRedirectXid))
{
- RelFileNode node;
+ RelFileLocator locator;
- XLogRecGetBlockTag(record, 0, &node, NULL, NULL);
+ XLogRecGetBlockTag(record, 0, &locator, NULL, NULL);
ResolveRecoveryConflictWithSnapshot(xldata->newestRedirectXid,
- node);
+ locator);
}
}
Assert(routine->tuple_update != NULL);
Assert(routine->tuple_lock != NULL);
- Assert(routine->relation_set_new_filenode != NULL);
+ Assert(routine->relation_set_new_filelocator != NULL);
Assert(routine->relation_nontransactional_truncate != NULL);
Assert(routine->relation_copy_data != NULL);
Assert(routine->relation_copy_for_cluster != NULL);
XLogRegisterBuffer adds information about a data block to the WAL record.
block_id is an arbitrary number used to identify this page reference in
the redo routine. The information needed to re-find the page at redo -
- relfilenode, fork, and block number - are included in the WAL record.
+ relfilelocator, fork, and block number - are included in the WAL record.
XLogInsert will automatically include a full copy of the page contents, if
this is the first modification of the buffer since the last checkpoint.
entry in pg_class, but that currently isn't done because of the possibility
of deleting data that is useful for forensic analysis of the crash.
Orphan files are harmless --- at worst they waste a bit of disk space ---
-because we check for on-disk collisions when allocating new relfilenode
+because we check for on-disk collisions when allocating new relfilenumber
OIDs. So cleaning up isn't really necessary.
3. Deleting a table, which requires an unlink() that could fail.
entry until we've successfully done the original action.
-Skipping WAL for New RelFileNode
+Skipping WAL for New RelFileLocator
--------------------------------
-Under wal_level=minimal, if a change modifies a relfilenode that ROLLBACK
+Under wal_level=minimal, if a change modifies a relfilenumber that ROLLBACK
would unlink, in-tree access methods write no WAL for that change. Code that
writes WAL without calling RelationNeedsWAL() must check for this case. This
skipping is mandatory. If a WAL-writing change preceded a WAL-skipping change
method callbacks must not call functions that react to RelationNeedsWAL().
This applies only to WAL records whose replay would modify bytes stored in the
-new relfilenode. It does not apply to other records about the relfilenode,
+new relfilenumber. It does not apply to other records about the relfilenumber,
such as XLOG_SMGR_CREATE. Because it operates at the level of individual
-relfilenodes, RelationNeedsWAL() can differ for tightly-coupled relations.
+relfilenumbers, RelationNeedsWAL() can differ for tightly-coupled relations.
Consider "CREATE TABLE t (); BEGIN; ALTER TABLE t ADD c text; ..." in which
ALTER TABLE adds a TOAST relation. The TOAST relation will skip WAL, while
the table owning it will not. ALTER TABLE SET TABLESPACE will cause a table
advance of T1's commit, but we don't care since temp table contents don't
survive crashes anyway.
-Database writes that skip WAL for new relfilenodes are also safe. In these
+Database writes that skip WAL for new relfilenumbers are also safe. In these
cases it's entirely possible for the data to reach disk before T1's commit,
because T1 will fsync it down to disk without any sort of interlock. However,
all these paths are designed to write data that no other transaction can see
an index that is currently being rebuilt.
- Active relmapper.c mapping state. This is needed to allow consistent
- answers when fetching the current relfilenode for relation oids of
+ answers when fetching the current relfilenumber for relation oids of
mapped relations.
To prevent unprincipled deadlocks when running in parallel mode, this code
int nchildren,
TransactionId *children,
int nrels,
- RelFileNode *rels,
+ RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
int nchildren,
TransactionId *children,
int nrels,
- RelFileNode *rels,
+ RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid);
*
* 1. TwoPhaseFileHeader
* 2. TransactionId[] (subtransactions)
- * 3. RelFileNode[] (files to be deleted at commit)
- * 4. RelFileNode[] (files to be deleted at abort)
+ * 3. RelFileLocator[] (files to be deleted at commit)
+ * 4. RelFileLocator[] (files to be deleted at abort)
* 5. SharedInvalidationMessage[] (inval messages to be sent at commit)
* 6. TwoPhaseRecordOnDisk
* 7. ...
TransactionId xid = gxact->xid;
TwoPhaseFileHeader hdr;
TransactionId *children;
- RelFileNode *commitrels;
- RelFileNode *abortrels;
+ RelFileLocator *commitrels;
+ RelFileLocator *abortrels;
xl_xact_stats_item *abortstats = NULL;
xl_xact_stats_item *commitstats = NULL;
SharedInvalidationMessage *invalmsgs;
}
if (hdr.ncommitrels > 0)
{
- save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileNode));
+ save_state_data(commitrels, hdr.ncommitrels * sizeof(RelFileLocator));
pfree(commitrels);
}
if (hdr.nabortrels > 0)
{
- save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileNode));
+ save_state_data(abortrels, hdr.nabortrels * sizeof(RelFileLocator));
pfree(abortrels);
}
if (hdr.ncommitstats > 0)
TwoPhaseFileHeader *hdr;
TransactionId latestXid;
TransactionId *children;
- RelFileNode *commitrels;
- RelFileNode *abortrels;
- RelFileNode *delrels;
+ RelFileLocator *commitrels;
+ RelFileLocator *abortrels;
+ RelFileLocator *delrels;
int ndelrels;
xl_xact_stats_item *commitstats;
xl_xact_stats_item *abortstats;
bufptr += MAXALIGN(hdr->gidlen);
children = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
- commitrels = (RelFileNode *) bufptr;
- bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
- abortrels = (RelFileNode *) bufptr;
- bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
+ commitrels = (RelFileLocator *) bufptr;
+ bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
+ abortrels = (RelFileLocator *) bufptr;
+ bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
commitstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
abortstats = (xl_xact_stats_item *) bufptr;
bufptr += MAXALIGN(hdr->gidlen);
subxids = (TransactionId *) bufptr;
bufptr += MAXALIGN(hdr->nsubxacts * sizeof(TransactionId));
- bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileNode));
- bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileNode));
+ bufptr += MAXALIGN(hdr->ncommitrels * sizeof(RelFileLocator));
+ bufptr += MAXALIGN(hdr->nabortrels * sizeof(RelFileLocator));
bufptr += MAXALIGN(hdr->ncommitstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->nabortstats * sizeof(xl_xact_stats_item));
bufptr += MAXALIGN(hdr->ninvalmsgs * sizeof(SharedInvalidationMessage));
int nchildren,
TransactionId *children,
int nrels,
- RelFileNode *rels,
+ RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
int ninvalmsgs,
int nchildren,
TransactionId *children,
int nrels,
- RelFileNode *rels,
+ RelFileLocator *rels,
int nstats,
xl_xact_stats_item *stats,
const char *gid)
* wide, counter wraparound will occur eventually, and therefore it is unwise
* to assume they are unique unless precautions are taken to make them so.
* Hence, this routine should generally not be used directly. The only direct
- * callers should be GetNewOidWithIndex() and GetNewRelFileNode() in
+ * callers should be GetNewOidWithIndex() and GetNewRelFileNumber() in
* catalog/catalog.c.
*/
Oid
bool markXidCommitted = TransactionIdIsValid(xid);
TransactionId latestXid = InvalidTransactionId;
int nrels;
- RelFileNode *rels;
+ RelFileLocator *rels;
int nchildren;
TransactionId *children;
int ndroppedstats = 0;
TransactionId xid = GetCurrentTransactionIdIfAny();
TransactionId latestXid;
int nrels;
- RelFileNode *rels;
+ RelFileLocator *rels;
int ndroppedstats = 0;
xl_xact_stats_item *droppedstats = NULL;
int nchildren;
XLogRecPtr
XactLogCommitRecord(TimestampTz commit_time,
int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
+ int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int nmsgs, SharedInvalidationMessage *msgs,
bool relcacheInval,
xl_xact_xinfo xl_xinfo;
xl_xact_dbinfo xl_dbinfo;
xl_xact_subxacts xl_subxacts;
- xl_xact_relfilenodes xl_relfilenodes;
+ xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_invals xl_invals;
xl_xact_twophase xl_twophase;
if (nrels > 0)
{
- xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
- xl_relfilenodes.nrels = nrels;
+ xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
+ xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
nsubxacts * sizeof(TransactionId));
}
- if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
+ if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
- XLogRegisterData((char *) (&xl_relfilenodes),
- MinSizeOfXactRelfilenodes);
+ XLogRegisterData((char *) (&xl_relfilelocators),
+ MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
- nrels * sizeof(RelFileNode));
+ nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
XLogRecPtr
XactLogAbortRecord(TimestampTz abort_time,
int nsubxacts, TransactionId *subxacts,
- int nrels, RelFileNode *rels,
+ int nrels, RelFileLocator *rels,
int ndroppedstats, xl_xact_stats_item *droppedstats,
int xactflags, TransactionId twophase_xid,
const char *twophase_gid)
xl_xact_abort xlrec;
xl_xact_xinfo xl_xinfo;
xl_xact_subxacts xl_subxacts;
- xl_xact_relfilenodes xl_relfilenodes;
+ xl_xact_relfilelocators xl_relfilelocators;
xl_xact_stats_items xl_dropped_stats;
xl_xact_twophase xl_twophase;
xl_xact_dbinfo xl_dbinfo;
if (nrels > 0)
{
- xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILENODES;
- xl_relfilenodes.nrels = nrels;
+ xl_xinfo.xinfo |= XACT_XINFO_HAS_RELFILELOCATORS;
+ xl_relfilelocators.nrels = nrels;
info |= XLR_SPECIAL_REL_UPDATE;
}
nsubxacts * sizeof(TransactionId));
}
- if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILENODES)
+ if (xl_xinfo.xinfo & XACT_XINFO_HAS_RELFILELOCATORS)
{
- XLogRegisterData((char *) (&xl_relfilenodes),
- MinSizeOfXactRelfilenodes);
+ XLogRegisterData((char *) (&xl_relfilelocators),
+ MinSizeOfXactRelfileLocators);
XLogRegisterData((char *) rels,
- nrels * sizeof(RelFileNode));
+ nrels * sizeof(RelFileLocator));
}
if (xl_xinfo.xinfo & XACT_XINFO_HAS_DROPPED_STATS)
XLogFlush(lsn);
/* Make sure files supposed to be dropped are dropped */
- DropRelationFiles(parsed->xnodes, parsed->nrels, true);
+ DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)
*/
XLogFlush(lsn);
- DropRelationFiles(parsed->xnodes, parsed->nrels, true);
+ DropRelationFiles(parsed->xlocators, parsed->nrels, true);
}
if (parsed->nstats > 0)
{
bool in_use; /* is this slot in use? */
uint8 flags; /* REGBUF_* flags */
- RelFileNode rnode; /* identifies the relation and block */
+ RelFileLocator rlocator; /* identifies the relation and block */
ForkNumber forkno;
BlockNumber block;
Page page; /* page content */
regbuf = ®istered_buffers[block_id];
- BufferGetTag(buffer, ®buf->rnode, ®buf->forkno, ®buf->block);
+ BufferGetTag(buffer, ®buf->rlocator, ®buf->forkno, ®buf->block);
regbuf->page = BufferGetPage(buffer);
regbuf->flags = flags;
regbuf->rdata_tail = (XLogRecData *) ®buf->rdata_head;
if (i == block_id || !regbuf_old->in_use)
continue;
- Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
+ Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
* shared buffer pool (i.e. when you don't have a Buffer for it).
*/
void
-XLogRegisterBlock(uint8 block_id, RelFileNode *rnode, ForkNumber forknum,
+XLogRegisterBlock(uint8 block_id, RelFileLocator *rlocator, ForkNumber forknum,
BlockNumber blknum, Page page, uint8 flags)
{
registered_buffer *regbuf;
regbuf = ®istered_buffers[block_id];
- regbuf->rnode = *rnode;
+ regbuf->rlocator = *rlocator;
regbuf->forkno = forknum;
regbuf->block = blknum;
regbuf->page = page;
if (i == block_id || !regbuf_old->in_use)
continue;
- Assert(!RelFileNodeEquals(regbuf_old->rnode, regbuf->rnode) ||
+ Assert(!RelFileLocatorEquals(regbuf_old->rlocator, regbuf->rlocator) ||
regbuf_old->forkno != regbuf->forkno ||
regbuf_old->block != regbuf->block);
}
rdt_datas_last = regbuf->rdata_tail;
}
- if (prev_regbuf && RelFileNodeEquals(regbuf->rnode, prev_regbuf->rnode))
+ if (prev_regbuf && RelFileLocatorEquals(regbuf->rlocator, prev_regbuf->rlocator))
{
samerel = true;
bkpb.fork_flags |= BKPBLOCK_SAME_REL;
}
if (!samerel)
{
- memcpy(scratch, ®buf->rnode, sizeof(RelFileNode));
- scratch += sizeof(RelFileNode);
+ memcpy(scratch, ®buf->rlocator, sizeof(RelFileLocator));
+ scratch += sizeof(RelFileLocator);
}
memcpy(scratch, ®buf->block, sizeof(BlockNumber));
scratch += sizeof(BlockNumber);
int flags = 0;
PGAlignedBlock copied_buffer;
char *origdata = (char *) BufferGetBlock(buffer);
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forkno;
BlockNumber blkno;
if (buffer_std)
flags |= REGBUF_STANDARD;
- BufferGetTag(buffer, &rnode, &forkno, &blkno);
- XLogRegisterBlock(0, &rnode, forkno, blkno, copied_buffer.data, flags);
+ BufferGetTag(buffer, &rlocator, &forkno, &blkno);
+ XLogRegisterBlock(0, &rlocator, forkno, blkno, copied_buffer.data, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI_FOR_HINT);
}
* the unused space to be left out from the WAL record, making it smaller.
*/
XLogRecPtr
-log_newpage(RelFileNode *rnode, ForkNumber forkNum, BlockNumber blkno,
+log_newpage(RelFileLocator *rlocator, ForkNumber forkNum, BlockNumber blkno,
Page page, bool page_std)
{
int flags;
flags |= REGBUF_STANDARD;
XLogBeginInsert();
- XLogRegisterBlock(0, rnode, forkNum, blkno, page, flags);
+ XLogRegisterBlock(0, rlocator, forkNum, blkno, page, flags);
recptr = XLogInsert(RM_XLOG_ID, XLOG_FPI);
/*
* because we can write multiple pages in a single WAL record.
*/
void
-log_newpages(RelFileNode *rnode, ForkNumber forkNum, int num_pages,
+log_newpages(RelFileLocator *rlocator, ForkNumber forkNum, int num_pages,
BlockNumber *blknos, Page *pages, bool page_std)
{
int flags;
nbatch = 0;
while (nbatch < XLR_MAX_BLOCK_ID && i < num_pages)
{
- XLogRegisterBlock(nbatch, rnode, forkNum, blknos[i], pages[i], flags);
+ XLogRegisterBlock(nbatch, rlocator, forkNum, blknos[i], pages[i], flags);
i++;
nbatch++;
}
log_newpage_buffer(Buffer buffer, bool page_std)
{
Page page = BufferGetPage(buffer);
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forkNum;
BlockNumber blkno;
/* Shared buffers should be modified in a critical section. */
Assert(CritSectionCount > 0);
- BufferGetTag(buffer, &rnode, &forkNum, &blkno);
+ BufferGetTag(buffer, &rlocator, &forkNum, &blkno);
- return log_newpage(&rnode, forkNum, blkno, page, page_std);
+ return log_newpage(&rlocator, forkNum, blkno, page, page_std);
}
/*
dlist_head filter_queue;
/* Book-keeping to avoid repeat prefetches. */
- RelFileNode recent_rnode[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
+ RelFileLocator recent_rlocator[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
BlockNumber recent_block[XLOGPREFETCHER_SEQ_WINDOW_SIZE];
int recent_idx;
*/
typedef struct XLogPrefetcherFilter
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
XLogRecPtr filter_until_replayed;
BlockNumber filter_from_block;
dlist_node link;
} XLogPrefetchStats;
static inline void XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher,
- RelFileNode rnode,
+ RelFileLocator rlocator,
BlockNumber blockno,
XLogRecPtr lsn);
static inline bool XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher,
- RelFileNode rnode,
+ RelFileLocator rlocator,
BlockNumber blockno);
static inline void XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher,
XLogRecPtr replaying_lsn);
{
XLogPrefetcher *prefetcher;
static HASHCTL hash_table_ctl = {
- .keysize = sizeof(RelFileNode),
+ .keysize = sizeof(RelFileLocator),
.entrysize = sizeof(XLogPrefetcherFilter)
};
{
xl_dbase_create_file_copy_rec *xlrec =
(xl_dbase_create_file_copy_rec *) record->main_data;
- RelFileNode rnode = {InvalidOid, xlrec->db_id, InvalidOid};
+ RelFileLocator rlocator =
+ {InvalidOid, xlrec->db_id, InvalidRelFileNumber};
/*
* Don't try to prefetch anything in this database until
* it has been created, or we might confuse the blocks of
- * different generations, if a database OID or relfilenode
- * is reused. It's also more efficient than discovering
- * that relations don't exist on disk yet with ENOENT
- * errors.
+ * different generations, if a database OID or
+ * relfilenumber is reused. It's also more efficient than
+ * discovering that relations don't exist on disk yet with
+ * ENOENT errors.
*/
- XLogPrefetcherAddFilter(prefetcher, rnode, 0, record->lsn);
+ XLogPrefetcherAddFilter(prefetcher, rlocator, 0, record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in database %u until %X/%X is replayed due to raw file copy",
- rnode.dbNode,
+ rlocator.dbOid,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
* Don't prefetch anything for this whole relation
* until it has been created. Otherwise we might
* confuse the blocks of different generations, if a
- * relfilenode is reused. This also avoids the need
+ * relfilenumber is reused. This also avoids the need
* to discover the problem via extra syscalls that
* report ENOENT.
*/
- XLogPrefetcherAddFilter(prefetcher, xlrec->rnode, 0,
+ XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator, 0,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u until %X/%X is replayed, which creates the relation",
- xlrec->rnode.spcNode,
- xlrec->rnode.dbNode,
- xlrec->rnode.relNode,
+ xlrec->rlocator.spcOid,
+ xlrec->rlocator.dbOid,
+ xlrec->rlocator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
* Don't consider prefetching anything in the truncated
* range until the truncation has been performed.
*/
- XLogPrefetcherAddFilter(prefetcher, xlrec->rnode,
+ XLogPrefetcherAddFilter(prefetcher, xlrec->rlocator,
xlrec->blkno,
record->lsn);
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, which truncates the relation",
- xlrec->rnode.spcNode,
- xlrec->rnode.dbNode,
- xlrec->rnode.relNode,
+ xlrec->rlocator.spcOid,
+ xlrec->rlocator.dbOid,
+ xlrec->rlocator.relNumber,
xlrec->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
}
/* Should we skip prefetching this block due to a filter? */
- if (XLogPrefetcherIsFiltered(prefetcher, block->rnode, block->blkno))
+ if (XLogPrefetcherIsFiltered(prefetcher, block->rlocator, block->blkno))
{
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
for (int i = 0; i < XLOGPREFETCHER_SEQ_WINDOW_SIZE; ++i)
{
if (block->blkno == prefetcher->recent_block[i] &&
- RelFileNodeEquals(block->rnode, prefetcher->recent_rnode[i]))
+ RelFileLocatorEquals(block->rlocator, prefetcher->recent_rlocator[i]))
{
/*
* XXX If we also remembered where it was, we could set
return LRQ_NEXT_NO_IO;
}
}
- prefetcher->recent_rnode[prefetcher->recent_idx] = block->rnode;
+ prefetcher->recent_rlocator[prefetcher->recent_idx] = block->rlocator;
prefetcher->recent_block[prefetcher->recent_idx] = block->blkno;
prefetcher->recent_idx =
(prefetcher->recent_idx + 1) % XLOGPREFETCHER_SEQ_WINDOW_SIZE;
* same relation (with some scheme to handle invalidations
* safely), but for now we'll call smgropen() every time.
*/
- reln = smgropen(block->rnode, InvalidBackendId);
+ reln = smgropen(block->rlocator, InvalidBackendId);
/*
* If the relation file doesn't exist on disk, for example because
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing all prefetch in relation %u/%u/%u until %X/%X is replayed, because the relation does not exist on disk",
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
LSN_FORMAT_ARGS(record->lsn));
#endif
- XLogPrefetcherAddFilter(prefetcher, block->rnode, 0,
+ XLogPrefetcherAddFilter(prefetcher, block->rlocator, 0,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, because the relation is too small",
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
block->blkno,
LSN_FORMAT_ARGS(record->lsn));
#endif
- XLogPrefetcherAddFilter(prefetcher, block->rnode, block->blkno,
+ XLogPrefetcherAddFilter(prefetcher, block->rlocator, block->blkno,
record->lsn);
XLogPrefetchIncrement(&SharedStats->skip_new);
return LRQ_NEXT_NO_IO;
*/
elog(ERROR,
"could not prefetch relation %u/%u/%u block %u",
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
block->blkno);
}
}
}
/*
- * Don't prefetch any blocks >= 'blockno' from a given 'rnode', until 'lsn'
+ * Don't prefetch any blocks >= 'blockno' from a given 'rlocator', until 'lsn'
* has been replayed.
*/
static inline void
-XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileNode rnode,
+XLogPrefetcherAddFilter(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno, XLogRecPtr lsn)
{
XLogPrefetcherFilter *filter;
bool found;
- filter = hash_search(prefetcher->filter_table, &rnode, HASH_ENTER, &found);
+ filter = hash_search(prefetcher->filter_table, &rlocator, HASH_ENTER, &found);
if (!found)
{
/*
else
{
/*
- * We were already filtering this rnode. Extend the filter's lifetime
- * to cover this WAL record, but leave the lower of the block numbers
- * there because we don't want to have to track individual blocks.
+ * We were already filtering this rlocator. Extend the filter's
+ * lifetime to cover this WAL record, but leave the lower of the block
+ * numbers there because we don't want to have to track individual
+ * blocks.
*/
filter->filter_until_replayed = lsn;
dlist_delete(&filter->link);
* Have we replayed any records that caused us to begin filtering a block
* range? That means that relations should have been created, extended or
* dropped as required, so we can stop filtering out accesses to a given
- * relfilenode.
+ * relfilenumber.
*/
static inline void
XLogPrefetcherCompleteFilters(XLogPrefetcher *prefetcher, XLogRecPtr replaying_lsn)
* Check if a given block should be skipped due to a filter.
*/
static inline bool
-XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileNode rnode,
+XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
BlockNumber blockno)
{
/*
XLogPrefetcherFilter *filter;
/* See if the block range is filtered. */
- filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
+ filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter && filter->filter_from_block <= blockno)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)",
- rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed),
filter->filter_from_block);
#endif
}
/* See if the whole database is filtered. */
- rnode.relNode = InvalidOid;
- rnode.spcNode = InvalidOid;
- filter = hash_search(prefetcher->filter_table, &rnode, HASH_FIND, NULL);
+ rlocator.relNumber = InvalidRelFileNumber;
+ rlocator.spcOid = InvalidOid;
+ filter = hash_search(prefetcher->filter_table, &rlocator, HASH_FIND, NULL);
if (filter)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
"prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (whole database)",
- rnode.spcNode, rnode.dbNode, rnode.relNode, blockno,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed));
#endif
return true;
char *out;
uint32 remaining;
uint32 datatotal;
- RelFileNode *rnode = NULL;
+ RelFileLocator *rlocator = NULL;
uint8 block_id;
decoded->header = *record;
}
if (!(fork_flags & BKPBLOCK_SAME_REL))
{
- COPY_HEADER_FIELD(&blk->rnode, sizeof(RelFileNode));
- rnode = &blk->rnode;
+ COPY_HEADER_FIELD(&blk->rlocator, sizeof(RelFileLocator));
+ rlocator = &blk->rlocator;
}
else
{
- if (rnode == NULL)
+ if (rlocator == NULL)
{
report_invalid_record(state,
"BKPBLOCK_SAME_REL set but no previous rel at %X/%X",
goto err;
}
- blk->rnode = *rnode;
+ blk->rlocator = *rlocator;
}
COPY_HEADER_FIELD(&blk->blkno, sizeof(BlockNumber));
}
*/
void
XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id,
- RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
+ RelFileLocator *rlocator, ForkNumber *forknum,
+ BlockNumber *blknum)
{
- if (!XLogRecGetBlockTagExtended(record, block_id, rnode, forknum, blknum,
- NULL))
+ if (!XLogRecGetBlockTagExtended(record, block_id, rlocator, forknum,
+ blknum, NULL))
{
#ifndef FRONTEND
elog(ERROR, "failed to locate backup block with ID %d in WAL record",
* Returns information about the block that a block reference refers to,
* optionally including the buffer that the block may already be in.
*
- * If the WAL record contains a block reference with the given ID, *rnode,
+ * If the WAL record contains a block reference with the given ID, *rlocator,
* *forknum, *blknum and *prefetch_buffer are filled in (if not NULL), and
* returns true. Otherwise returns false.
*/
bool
XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id,
- RelFileNode *rnode, ForkNumber *forknum,
+ RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum,
Buffer *prefetch_buffer)
{
return false;
bkpb = &record->record->blocks[block_id];
- if (rnode)
- *rnode = bkpb->rnode;
+ if (rlocator)
+ *rlocator = bkpb->rlocator;
if (forknum)
*forknum = bkpb->forknum;
if (blknum)
/* decode block references */
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
- &rnode, &forknum, &blk, NULL))
+ &rlocator, &forknum, &blk, NULL))
continue;
if (forknum != MAIN_FORKNUM)
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, fork %u, blk %u",
block_id,
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid,
+ rlocator.relNumber,
forknum,
blk);
else
appendStringInfo(buf, "; blkref #%d: rel %u/%u/%u, blk %u",
block_id,
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid,
+ rlocator.relNumber,
blk);
if (XLogRecHasBlockImage(record, block_id))
appendStringInfoString(buf, " FPW");
verifyBackupPageConsistency(XLogReaderState *record)
{
RmgrData rmgr = GetRmgr(XLogRecGetRmid(record));
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
int block_id;
Page page;
if (!XLogRecGetBlockTagExtended(record, block_id,
- &rnode, &forknum, &blkno, NULL))
+ &rlocator, &forknum, &blkno, NULL))
{
/*
* WAL record doesn't contain a block reference with the given id.
* Read the contents from the current buffer and store it in a
* temporary page.
*/
- buf = XLogReadBufferExtended(rnode, forknum, blkno,
+ buf = XLogReadBufferExtended(rlocator, forknum, blkno,
RBM_NORMAL_NO_LOG,
InvalidBuffer);
if (!BufferIsValid(buf))
{
elog(FATAL,
"inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u",
- rnode.spcNode, rnode.dbNode, rnode.relNode,
+ rlocator.spcOid, rlocator.dbOid, rlocator.relNumber,
forknum, blkno);
}
}
*/
typedef struct xl_invalid_page_key
{
- RelFileNode node; /* the relation */
+ RelFileLocator locator; /* the relation */
ForkNumber forkno; /* the fork number */
BlockNumber blkno; /* the page */
} xl_invalid_page_key;
/* Report a reference to an invalid page */
static void
-report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno,
+report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno,
BlockNumber blkno, bool present)
{
- char *path = relpathperm(node, forkno);
+ char *path = relpathperm(locator, forkno);
if (present)
elog(elevel, "page %u of relation %s is uninitialized",
/* Log a reference to an invalid page */
static void
-log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
+log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno,
bool present)
{
xl_invalid_page_key key;
*/
if (reachedConsistency)
{
- report_invalid_page(WARNING, node, forkno, blkno, present);
+ report_invalid_page(WARNING, locator, forkno, blkno, present);
elog(ignore_invalid_pages ? WARNING : PANIC,
"WAL contains references to invalid pages");
}
* something about the XLOG record that generated the reference).
*/
if (message_level_is_interesting(DEBUG1))
- report_invalid_page(DEBUG1, node, forkno, blkno, present);
+ report_invalid_page(DEBUG1, locator, forkno, blkno, present);
if (invalid_page_tab == NULL)
{
}
/* we currently assume xl_invalid_page_key contains no padding */
- key.node = node;
+ key.locator = locator;
key.forkno = forkno;
key.blkno = blkno;
hentry = (xl_invalid_page *)
/* Forget any invalid pages >= minblkno, because they've been dropped */
static void
-forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
+forget_invalid_pages(RelFileLocator locator, ForkNumber forkno,
+ BlockNumber minblkno)
{
HASH_SEQ_STATUS status;
xl_invalid_page *hentry;
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
- if (RelFileNodeEquals(hentry->key.node, node) &&
+ if (RelFileLocatorEquals(hentry->key.locator, locator) &&
hentry->key.forkno == forkno &&
hentry->key.blkno >= minblkno)
{
if (message_level_is_interesting(DEBUG2))
{
- char *path = relpathperm(hentry->key.node, forkno);
+ char *path = relpathperm(hentry->key.locator, forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
- if (hentry->key.node.dbNode == dbid)
+ if (hentry->key.locator.dbOid == dbid)
{
if (message_level_is_interesting(DEBUG2))
{
- char *path = relpathperm(hentry->key.node, hentry->key.forkno);
+ char *path = relpathperm(hentry->key.locator, hentry->key.forkno);
elog(DEBUG2, "page %u of relation %s has been dropped",
hentry->key.blkno, path);
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
- report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
+ report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
hentry->key.blkno, hentry->present);
foundone = true;
}
Buffer *buf)
{
XLogRecPtr lsn = record->EndRecPtr;
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
Buffer prefetch_buffer;
bool zeromode;
bool willinit;
- if (!XLogRecGetBlockTagExtended(record, block_id, &rnode, &forknum, &blkno,
+ if (!XLogRecGetBlockTagExtended(record, block_id, &rlocator, &forknum, &blkno,
&prefetch_buffer))
{
/* Caller specified a bogus block_id */
if (XLogRecBlockImageApply(record, block_id))
{
Assert(XLogRecHasBlockImage(record, block_id));
- *buf = XLogReadBufferExtended(rnode, forknum, blkno,
+ *buf = XLogReadBufferExtended(rlocator, forknum, blkno,
get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK,
prefetch_buffer);
page = BufferGetPage(*buf);
}
else
{
- *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode, prefetch_buffer);
+ *buf = XLogReadBufferExtended(rlocator, forknum, blkno, mode, prefetch_buffer);
if (BufferIsValid(*buf))
{
if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
* they will be invisible to tools that need to know which pages are modified.
*/
Buffer
-XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
+XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum,
BlockNumber blkno, ReadBufferMode mode,
Buffer recent_buffer)
{
/* Do we have a clue where the buffer might be already? */
if (BufferIsValid(recent_buffer) &&
mode == RBM_NORMAL &&
- ReadRecentBuffer(rnode, forknum, blkno, recent_buffer))
+ ReadRecentBuffer(rlocator, forknum, blkno, recent_buffer))
{
buffer = recent_buffer;
goto recent_buffer_fast_path;
}
/* Open the relation at smgr level */
- smgr = smgropen(rnode, InvalidBackendId);
+ smgr = smgropen(rlocator, InvalidBackendId);
/*
* Create the target file if it doesn't already exist. This lets us cope
if (blkno < lastblock)
{
/* page exists in file */
- buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
+ buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
else
/* hm, page doesn't exist in file */
if (mode == RBM_NORMAL)
{
- log_invalid_page(rnode, forknum, blkno, false);
+ log_invalid_page(rlocator, forknum, blkno, false);
return InvalidBuffer;
}
if (mode == RBM_NORMAL_NO_LOG)
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
}
- buffer = ReadBufferWithoutRelcache(rnode, forknum,
+ buffer = ReadBufferWithoutRelcache(rlocator, forknum,
P_NEW, mode, NULL, true);
}
while (BufferGetBlockNumber(buffer) < blkno);
if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
- buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
+ buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
mode, NULL, true);
}
}
if (PageIsNew(page))
{
ReleaseBuffer(buffer);
- log_invalid_page(rnode, forknum, blkno, true);
+ log_invalid_page(rlocator, forknum, blkno, true);
return InvalidBuffer;
}
}
* Caller must free the returned entry with FreeFakeRelcacheEntry().
*/
Relation
-CreateFakeRelcacheEntry(RelFileNode rnode)
+CreateFakeRelcacheEntry(RelFileLocator rlocator)
{
FakeRelCacheEntry fakeentry;
Relation rel;
rel = (Relation) fakeentry;
rel->rd_rel = &fakeentry->pgc;
- rel->rd_node = rnode;
+ rel->rd_locator = rlocator;
/*
* We will never be working with temp rels during recovery or while
/* It must be a permanent table here */
rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
- /* We don't know the name of the relation; use relfilenode instead */
- sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
+ /* We don't know the name of the relation; use relfilenumber instead */
+ sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber);
/*
* We set up the lockRelId in case anything tries to lock the dummy
- * relation. Note that this is fairly bogus since relNode may be
+ * relation. Note that this is fairly bogus since relNumber may be
* different from the relation's OID. It shouldn't really matter though.
* In recovery, we are running by ourselves and can't have any lock
* conflicts. While syncing, we already hold AccessExclusiveLock.
*/
- rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
- rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
+ rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid;
+ rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber;
rel->rd_smgr = NULL;
* any open "invalid-page" records for the relation.
*/
void
-XLogDropRelation(RelFileNode rnode, ForkNumber forknum)
+XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
{
- forget_invalid_pages(rnode, forknum, 0);
+ forget_invalid_pages(rlocator, forknum, 0);
}
/*
* We need to clean up any open "invalid-page" records for the dropped pages.
*/
void
-XLogTruncateRelation(RelFileNode rnode, ForkNumber forkNum,
+XLogTruncateRelation(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber nblocks)
{
- forget_invalid_pages(rnode, forkNum, nblocks);
+ forget_invalid_pages(rlocator, forkNum, nblocks);
}
/*
stmt->excludeOpNames = NIL;
stmt->idxcomment = NULL;
stmt->indexOid = InvalidOid;
- stmt->oldNode = InvalidOid;
+ stmt->oldNumber = InvalidRelFileNumber;
stmt->oldCreateSubid = InvalidSubTransactionId;
- stmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ stmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
stmt->unique = false;
stmt->primary = false;
stmt->isconstraint = false;
stmt->excludeOpNames = NIL;
stmt->idxcomment = NULL;
stmt->indexOid = InvalidOid;
- stmt->oldNode = InvalidOid;
+ stmt->oldNumber = InvalidRelFileNumber;
stmt->oldCreateSubid = InvalidSubTransactionId;
- stmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ stmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
stmt->unique = true;
stmt->primary = false;
stmt->isconstraint = false;
}
/*
- * GetNewRelFileNode
- * Generate a new relfilenode number that is unique within the
+ * GetNewRelFileNumber
+ * Generate a new relfilenumber that is unique within the
* database of the given tablespace.
*
- * If the relfilenode will also be used as the relation's OID, pass the
+ * If the relfilenumber will also be used as the relation's OID, pass the
* opened pg_class catalog, and this routine will guarantee that the result
* is also an unused OID within pg_class. If the result is to be used only
- * as a relfilenode for an existing relation, pass NULL for pg_class.
+ * as a relfilenumber for an existing relation, pass NULL for pg_class.
*
* As with GetNewOidWithIndex(), there is some theoretical risk of a race
* condition, but it doesn't seem worth worrying about.
* Note: we don't support using this in bootstrap mode. All relations
* created by bootstrap have preassigned OIDs, so there's no need.
*/
-Oid
-GetNewRelFileNode(Oid reltablespace, Relation pg_class, char relpersistence)
+RelFileNumber
+GetNewRelFileNumber(Oid reltablespace, Relation pg_class, char relpersistence)
{
- RelFileNodeBackend rnode;
+ RelFileLocatorBackend rlocator;
char *rpath;
bool collides;
BackendId backend;
/*
* If we ever get here during pg_upgrade, there's something wrong; all
- * relfilenode assignments during a binary-upgrade run should be
+ * relfilenumber assignments during a binary-upgrade run should be
* determined by commands in the dump script.
*/
Assert(!IsBinaryUpgrade);
break;
default:
elog(ERROR, "invalid relpersistence: %c", relpersistence);
- return InvalidOid; /* placate compiler */
+ return InvalidRelFileNumber; /* placate compiler */
}
/* This logic should match RelationInitPhysicalAddr */
- rnode.node.spcNode = reltablespace ? reltablespace : MyDatabaseTableSpace;
- rnode.node.dbNode = (rnode.node.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid : MyDatabaseId;
+ rlocator.locator.spcOid = reltablespace ? reltablespace : MyDatabaseTableSpace;
+ rlocator.locator.dbOid =
+ (rlocator.locator.spcOid == GLOBALTABLESPACE_OID) ?
+ InvalidOid : MyDatabaseId;
/*
* The relpath will vary based on the backend ID, so we must initialize
* that properly here to make sure that any collisions based on filename
* are properly detected.
*/
- rnode.backend = backend;
+ rlocator.backend = backend;
do
{
/* Generate the OID */
if (pg_class)
- rnode.node.relNode = GetNewOidWithIndex(pg_class, ClassOidIndexId,
- Anum_pg_class_oid);
+ rlocator.locator.relNumber = GetNewOidWithIndex(pg_class, ClassOidIndexId,
+ Anum_pg_class_oid);
else
- rnode.node.relNode = GetNewObjectId();
+ rlocator.locator.relNumber = GetNewObjectId();
/* Check for existing file of same name */
- rpath = relpath(rnode, MAIN_FORKNUM);
+ rpath = relpath(rlocator, MAIN_FORKNUM);
if (access(rpath, F_OK) == 0)
{
pfree(rpath);
} while (collides);
- return rnode.node.relNode;
+ return rlocator.locator.relNumber;
}
/*
/* Potentially set by pg_upgrade_support functions */
Oid binary_upgrade_next_heap_pg_class_oid = InvalidOid;
-Oid binary_upgrade_next_heap_pg_class_relfilenode = InvalidOid;
Oid binary_upgrade_next_toast_pg_class_oid = InvalidOid;
-Oid binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
+RelFileNumber binary_upgrade_next_heap_pg_class_relfilenumber = InvalidRelFileNumber;
+RelFileNumber binary_upgrade_next_toast_pg_class_relfilenumber = InvalidRelFileNumber;
static void AddNewRelationTuple(Relation pg_class_desc,
Relation new_rel_desc,
* heap_create - Create an uncataloged heap relation
*
* Note API change: the caller must now always provide the OID
- * to use for the relation. The relfilenode may be (and in
+ * to use for the relation. The relfilenumber may be (and in
* the simplest cases is) left unspecified.
*
* create_storage indicates whether or not to create the storage.
Oid relnamespace,
Oid reltablespace,
Oid relid,
- Oid relfilenode,
+ RelFileNumber relfilenumber,
Oid accessmtd,
TupleDesc tupDesc,
char relkind,
else
{
/*
- * If relfilenode is unspecified by the caller then create storage
+ * If relfilenumber is unspecified by the caller then create storage
* with oid same as relid.
*/
- if (!OidIsValid(relfilenode))
- relfilenode = relid;
+ if (!RelFileNumberIsValid(relfilenumber))
+ relfilenumber = relid;
}
/*
tupDesc,
relid,
accessmtd,
- relfilenode,
+ relfilenumber,
reltablespace,
shared_relation,
mapped_relation,
if (create_storage)
{
if (RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind))
- table_relation_set_new_filenode(rel, &rel->rd_node,
- relpersistence,
- relfrozenxid, relminmxid);
+ table_relation_set_new_filelocator(rel, &rel->rd_locator,
+ relpersistence,
+ relfrozenxid, relminmxid);
else if (RELKIND_HAS_STORAGE(rel->rd_rel->relkind))
- RelationCreateStorage(rel->rd_node, relpersistence, true);
+ RelationCreateStorage(rel->rd_locator, relpersistence, true);
else
Assert(false);
}
* relkind: relkind for new rel
* relpersistence: rel's persistence status (permanent, temp, or unlogged)
* shared_relation: true if it's to be a shared relation
- * mapped_relation: true if the relation will use the relfilenode map
+ * mapped_relation: true if the relation will use the relfilenumber map
* oncommit: ON COMMIT marking (only relevant if it's a temp table)
* reloptions: reloptions in Datum form, or (Datum) 0 if none
* use_user_acl: true if should look for user-defined default permissions;
Oid new_type_oid;
/* By default set to InvalidOid unless overridden by binary-upgrade */
- Oid relfilenode = InvalidOid;
+ RelFileNumber relfilenumber = InvalidRelFileNumber;
TransactionId relfrozenxid;
MultiXactId relminmxid;
/*
* Allocate an OID for the relation, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't
+ * The OID will be the relfilenumber as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
{
- /* Use binary-upgrade override for pg_class.oid and relfilenode */
+ /* Use binary-upgrade override for pg_class.oid and relfilenumber */
if (IsBinaryUpgrade)
{
/*
relid = binary_upgrade_next_toast_pg_class_oid;
binary_upgrade_next_toast_pg_class_oid = InvalidOid;
- if (!OidIsValid(binary_upgrade_next_toast_pg_class_relfilenode))
+ if (!RelFileNumberIsValid(binary_upgrade_next_toast_pg_class_relfilenumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("toast relfilenode value not set when in binary upgrade mode")));
+ errmsg("toast relfilenumber value not set when in binary upgrade mode")));
- relfilenode = binary_upgrade_next_toast_pg_class_relfilenode;
- binary_upgrade_next_toast_pg_class_relfilenode = InvalidOid;
+ relfilenumber = binary_upgrade_next_toast_pg_class_relfilenumber;
+ binary_upgrade_next_toast_pg_class_relfilenumber = InvalidRelFileNumber;
}
}
else
if (RELKIND_HAS_STORAGE(relkind))
{
- if (!OidIsValid(binary_upgrade_next_heap_pg_class_relfilenode))
+ if (!RelFileNumberIsValid(binary_upgrade_next_heap_pg_class_relfilenumber))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("relfilenode value not set when in binary upgrade mode")));
+ errmsg("relfilenumber value not set when in binary upgrade mode")));
- relfilenode = binary_upgrade_next_heap_pg_class_relfilenode;
- binary_upgrade_next_heap_pg_class_relfilenode = InvalidOid;
+ relfilenumber = binary_upgrade_next_heap_pg_class_relfilenumber;
+ binary_upgrade_next_heap_pg_class_relfilenumber = InvalidRelFileNumber;
}
}
}
if (!OidIsValid(relid))
- relid = GetNewRelFileNode(reltablespace, pg_class_desc,
- relpersistence);
+ relid = GetNewRelFileNumber(reltablespace, pg_class_desc,
+ relpersistence);
}
/*
relnamespace,
reltablespace,
relid,
- relfilenode,
+ relfilenumber,
accessmtd,
tupdesc,
relkind,
/* Potentially set by pg_upgrade_support functions */
Oid binary_upgrade_next_index_pg_class_oid = InvalidOid;
-Oid binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
+RelFileNumber binary_upgrade_next_index_pg_class_relfilenumber =
+InvalidRelFileNumber;
/*
* Pointer-free representation of variables used when reindexing system
* parent index; otherwise InvalidOid.
* parentConstraintId: if creating a constraint on a partition, the OID
* of the constraint in the parent; otherwise InvalidOid.
- * relFileNode: normally, pass InvalidOid to get new storage. May be
- * nonzero to attach an existing valid build.
+ * relFileNumber: normally, pass InvalidRelFileNumber to get new storage.
+ * May be nonzero to attach an existing valid build.
* indexInfo: same info executor uses to insert into the index
* indexColNames: column names to use for index (List of char *)
* accessMethodObjectId: OID of index AM to use
Oid indexRelationId,
Oid parentIndexRelid,
Oid parentConstraintId,
- Oid relFileNode,
+ RelFileNumber relFileNumber,
IndexInfo *indexInfo,
List *indexColNames,
Oid accessMethodObjectId,
char relkind;
TransactionId relfrozenxid;
MultiXactId relminmxid;
- bool create_storage = !OidIsValid(relFileNode);
+ bool create_storage = !RelFileNumberIsValid(relFileNumber);
/* constraint flags can only be set when a constraint is requested */
Assert((constr_flags == 0) ||
/*
* The index will be in the same namespace as its parent table, and is
* shared across databases if and only if the parent is. Likewise, it
- * will use the relfilenode map if and only if the parent does; and it
+ * will use the relfilenumber map if and only if the parent does; and it
* inherits the parent's relpersistence.
*/
namespaceId = RelationGetNamespace(heapRelation);
/*
* Allocate an OID for the index, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't
+ * The OID will be the relfilenumber as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
{
- /* Use binary-upgrade override for pg_class.oid and relfilenode */
+ /* Use binary-upgrade override for pg_class.oid and relfilenumber */
if (IsBinaryUpgrade)
{
if (!OidIsValid(binary_upgrade_next_index_pg_class_oid))
indexRelationId = binary_upgrade_next_index_pg_class_oid;
binary_upgrade_next_index_pg_class_oid = InvalidOid;
- /* Override the index relfilenode */
+ /* Override the index relfilenumber */
if ((relkind == RELKIND_INDEX) &&
- (!OidIsValid(binary_upgrade_next_index_pg_class_relfilenode)))
+ (!RelFileNumberIsValid(binary_upgrade_next_index_pg_class_relfilenumber)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("index relfilenode value not set when in binary upgrade mode")));
- relFileNode = binary_upgrade_next_index_pg_class_relfilenode;
- binary_upgrade_next_index_pg_class_relfilenode = InvalidOid;
+ errmsg("index relfilenumber value not set when in binary upgrade mode")));
+ relFileNumber = binary_upgrade_next_index_pg_class_relfilenumber;
+ binary_upgrade_next_index_pg_class_relfilenumber = InvalidRelFileNumber;
/*
* Note that we want create_storage = true for binary upgrade. The
else
{
indexRelationId =
- GetNewRelFileNode(tableSpaceId, pg_class, relpersistence);
+ GetNewRelFileNumber(tableSpaceId, pg_class, relpersistence);
}
}
namespaceId,
tableSpaceId,
indexRelationId,
- relFileNode,
+ relFileNumber,
accessMethodObjectId,
indexTupDesc,
relkind,
InvalidOid, /* indexRelationId */
InvalidOid, /* parentIndexRelid */
InvalidOid, /* parentConstraintId */
- InvalidOid, /* relFileNode */
+ InvalidRelFileNumber, /* relFileNumber */
newInfo,
indexColNames,
indexRelation->rd_rel->relam,
* it -- but we must first check whether one already exists. If, for
* example, an unlogged relation is truncated in the transaction that
* created it, or truncated twice in a subsequent transaction, the
- * relfilenode won't change, and nothing needs to be done here.
+ * relfilenumber won't change, and nothing needs to be done here.
*/
if (indexRelation->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
!smgrexists(RelationGetSmgr(indexRelation), INIT_FORKNUM))
* Schedule unlinking of the old index storage at transaction commit.
*/
RelationDropStorage(iRel);
- RelationAssumeNewRelfilenode(iRel);
+ RelationAssumeNewRelfilelocator(iRel);
/* Make sure the reltablespace change is visible */
CommandCounterIncrement();
SetReindexProcessing(heapId, indexId);
/* Create a new physical relation for the index */
- RelationSetNewRelfilenode(iRel, persistence);
+ RelationSetNewRelfilenumber(iRel, persistence);
/* Initialize the index and rebuild */
/* Note: we do not need to re-establish pkey setting */
int wal_skip_threshold = 2048; /* in kilobytes */
/*
- * We keep a list of all relations (represented as RelFileNode values)
+ * We keep a list of all relations (represented as RelFileLocator values)
* that have been created or deleted in the current transaction. When
* a relation is created, we create the physical file immediately, but
* remember it so that we can delete the file again if the current
typedef struct PendingRelDelete
{
- RelFileNode relnode; /* relation that may need to be deleted */
+ RelFileLocator rlocator; /* relation that may need to be deleted */
BackendId backend; /* InvalidBackendId if not a temp rel */
bool atCommit; /* T=delete at commit; F=delete at abort */
int nestLevel; /* xact nesting level of request */
typedef struct PendingRelSync
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
bool is_truncated; /* Has the file experienced truncation? */
} PendingRelSync;
* Queue an at-commit fsync.
*/
static void
-AddPendingSync(const RelFileNode *rnode)
+AddPendingSync(const RelFileLocator *rlocator)
{
PendingRelSync *pending;
bool found;
{
HASHCTL ctl;
- ctl.keysize = sizeof(RelFileNode);
+ ctl.keysize = sizeof(RelFileLocator);
ctl.entrysize = sizeof(PendingRelSync);
ctl.hcxt = TopTransactionContext;
pendingSyncHash = hash_create("pending sync hash", 16, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
}
- pending = hash_search(pendingSyncHash, rnode, HASH_ENTER, &found);
+ pending = hash_search(pendingSyncHash, rlocator, HASH_ENTER, &found);
Assert(!found);
pending->is_truncated = false;
}
* pass register_delete = false.
*/
SMgrRelation
-RelationCreateStorage(RelFileNode rnode, char relpersistence,
+RelationCreateStorage(RelFileLocator rlocator, char relpersistence,
bool register_delete)
{
SMgrRelation srel;
return NULL; /* placate compiler */
}
- srel = smgropen(rnode, backend);
+ srel = smgropen(rlocator, backend);
smgrcreate(srel, MAIN_FORKNUM, false);
if (needs_wal)
- log_smgrcreate(&srel->smgr_rnode.node, MAIN_FORKNUM);
+ log_smgrcreate(&srel->smgr_rlocator.locator, MAIN_FORKNUM);
/*
* Add the relation to the list of stuff to delete at abort, if we are
pending = (PendingRelDelete *)
MemoryContextAlloc(TopMemoryContext, sizeof(PendingRelDelete));
- pending->relnode = rnode;
+ pending->rlocator = rlocator;
pending->backend = backend;
pending->atCommit = false; /* delete if abort */
pending->nestLevel = GetCurrentTransactionNestLevel();
if (relpersistence == RELPERSISTENCE_PERMANENT && !XLogIsNeeded())
{
Assert(backend == InvalidBackendId);
- AddPendingSync(&rnode);
+ AddPendingSync(&rlocator);
}
return srel;
* Perform XLogInsert of an XLOG_SMGR_CREATE record to WAL.
*/
void
-log_smgrcreate(const RelFileNode *rnode, ForkNumber forkNum)
+log_smgrcreate(const RelFileLocator *rlocator, ForkNumber forkNum)
{
xl_smgr_create xlrec;
/*
* Make an XLOG entry reporting the file creation.
*/
- xlrec.rnode = *rnode;
+ xlrec.rlocator = *rlocator;
xlrec.forkNum = forkNum;
XLogBeginInsert();
/* Add the relation to the list of stuff to delete at commit */
pending = (PendingRelDelete *)
MemoryContextAlloc(TopMemoryContext, sizeof(PendingRelDelete));
- pending->relnode = rel->rd_node;
+ pending->rlocator = rel->rd_locator;
pending->backend = rel->rd_backend;
pending->atCommit = true; /* delete if commit */
pending->nestLevel = GetCurrentTransactionNestLevel();
* No-op if the relation is not among those scheduled for deletion.
*/
void
-RelationPreserveStorage(RelFileNode rnode, bool atCommit)
+RelationPreserveStorage(RelFileLocator rlocator, bool atCommit)
{
PendingRelDelete *pending;
PendingRelDelete *prev;
for (pending = pendingDeletes; pending != NULL; pending = next)
{
next = pending->next;
- if (RelFileNodeEquals(rnode, pending->relnode)
+ if (RelFileLocatorEquals(rlocator, pending->rlocator)
&& pending->atCommit == atCommit)
{
/* unlink and delete list entry */
xl_smgr_truncate xlrec;
xlrec.blkno = nblocks;
- xlrec.rnode = rel->rd_node;
+ xlrec.rlocator = rel->rd_locator;
xlrec.flags = SMGR_TRUNCATE_ALL;
XLogBeginInsert();
return;
pending = hash_search(pendingSyncHash,
- &(RelationGetSmgr(rel)->smgr_rnode.node),
+ &(RelationGetSmgr(rel)->smgr_rlocator.locator),
HASH_FIND, NULL);
if (pending)
pending->is_truncated = true;
* We need to log the copied data in WAL iff WAL archiving/streaming is
* enabled AND it's a permanent relation. This gives the same answer as
* "RelationNeedsWAL(rel) || copying_initfork", because we know the
- * current operation created a new relfilenode.
+ * current operation created new relation storage.
*/
use_wal = XLogIsNeeded() &&
(relpersistence == RELPERSISTENCE_PERMANENT || copying_initfork);
* (errcontext callbacks shouldn't be risking any such thing, but
* people have been known to forget that rule.)
*/
- char *relpath = relpathbackend(src->smgr_rnode.node,
- src->smgr_rnode.backend,
+ char *relpath = relpathbackend(src->smgr_rlocator.locator,
+ src->smgr_rlocator.backend,
forkNum);
ereport(ERROR,
* space.
*/
if (use_wal)
- log_newpage(&dst->smgr_rnode.node, forkNum, blkno, page, false);
+ log_newpage(&dst->smgr_rlocator.locator, forkNum, blkno, page, false);
PageSetChecksumInplace(page, blkno);
}
/*
- * RelFileNodeSkippingWAL
- * Check if a BM_PERMANENT relfilenode is using WAL.
+ * RelFileLocatorSkippingWAL
+ * Check if a BM_PERMANENT relfilelocator is using WAL.
*
- * Changes of certain relfilenodes must not write WAL; see "Skipping WAL for
- * New RelFileNode" in src/backend/access/transam/README. Though it is known
- * from Relation efficiently, this function is intended for the code paths not
- * having access to Relation.
+ * Changes to certain relations must not write WAL; see "Skipping WAL for
+ * New RelFileLocator" in src/backend/access/transam/README. Though it is
+ * known from Relation efficiently, this function is intended for the code
+ * paths not having access to Relation.
*/
bool
-RelFileNodeSkippingWAL(RelFileNode rnode)
+RelFileLocatorSkippingWAL(RelFileLocator rlocator)
{
if (!pendingSyncHash ||
- hash_search(pendingSyncHash, &rnode, HASH_FIND, NULL) == NULL)
+ hash_search(pendingSyncHash, &rlocator, HASH_FIND, NULL) == NULL)
return false;
return true;
long entries;
entries = pendingSyncHash ? hash_get_num_entries(pendingSyncHash) : 0;
- return mul_size(1 + entries, sizeof(RelFileNode));
+ return mul_size(1 + entries, sizeof(RelFileLocator));
}
/*
HASH_SEQ_STATUS scan;
PendingRelSync *sync;
PendingRelDelete *delete;
- RelFileNode *src;
- RelFileNode *dest = (RelFileNode *) startAddress;
+ RelFileLocator *src;
+ RelFileLocator *dest = (RelFileLocator *) startAddress;
if (!pendingSyncHash)
goto terminate;
- /* Create temporary hash to collect active relfilenodes */
- ctl.keysize = sizeof(RelFileNode);
- ctl.entrysize = sizeof(RelFileNode);
+ /* Create temporary hash to collect active relfilelocators */
+ ctl.keysize = sizeof(RelFileLocator);
+ ctl.entrysize = sizeof(RelFileLocator);
ctl.hcxt = CurrentMemoryContext;
- tmphash = hash_create("tmp relfilenodes",
+ tmphash = hash_create("tmp relfilelocators",
hash_get_num_entries(pendingSyncHash), &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
- /* collect all rnodes from pending syncs */
+ /* collect all rlocator from pending syncs */
hash_seq_init(&scan, pendingSyncHash);
while ((sync = (PendingRelSync *) hash_seq_search(&scan)))
- (void) hash_search(tmphash, &sync->rnode, HASH_ENTER, NULL);
+ (void) hash_search(tmphash, &sync->rlocator, HASH_ENTER, NULL);
/* remove deleted rnodes */
for (delete = pendingDeletes; delete != NULL; delete = delete->next)
if (delete->atCommit)
- (void) hash_search(tmphash, (void *) &delete->relnode,
+ (void) hash_search(tmphash, (void *) &delete->rlocator,
HASH_REMOVE, NULL);
hash_seq_init(&scan, tmphash);
- while ((src = (RelFileNode *) hash_seq_search(&scan)))
+ while ((src = (RelFileLocator *) hash_seq_search(&scan)))
*dest++ = *src;
hash_destroy(tmphash);
terminate:
- MemSet(dest, 0, sizeof(RelFileNode));
+ MemSet(dest, 0, sizeof(RelFileLocator));
}
/*
* RestorePendingSyncs
* Restore syncs within a parallel worker.
*
- * RelationNeedsWAL() and RelFileNodeSkippingWAL() must offer the correct
+ * RelationNeedsWAL() and RelFileLocatorSkippingWAL() must offer the correct
* answer to parallel workers. Only smgrDoPendingSyncs() reads the
* is_truncated field, at end of transaction. Hence, don't restore it.
*/
void
RestorePendingSyncs(char *startAddress)
{
- RelFileNode *rnode;
+ RelFileLocator *rlocator;
Assert(pendingSyncHash == NULL);
- for (rnode = (RelFileNode *) startAddress; rnode->relNode != 0; rnode++)
- AddPendingSync(rnode);
+ for (rlocator = (RelFileLocator *) startAddress; rlocator->relNumber != 0;
+ rlocator++)
+ AddPendingSync(rlocator);
}
/*
{
SMgrRelation srel;
- srel = smgropen(pending->relnode, pending->backend);
+ srel = smgropen(pending->rlocator, pending->backend);
/* allocate the initial array, or extend it, if needed */
if (maxrels == 0)
/* Skip syncing nodes that smgrDoPendingDeletes() will delete. */
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
if (pending->atCommit)
- (void) hash_search(pendingSyncHash, (void *) &pending->relnode,
+ (void) hash_search(pendingSyncHash, (void *) &pending->rlocator,
HASH_REMOVE, NULL);
hash_seq_init(&scan, pendingSyncHash);
BlockNumber total_blocks = 0;
SMgrRelation srel;
- srel = smgropen(pendingsync->rnode, InvalidBackendId);
+ srel = smgropen(pendingsync->rlocator, InvalidBackendId);
/*
* We emit newpage WAL records for smaller relations.
* page including any unused space. ReadBufferExtended()
* counts some pgstat events; unfortunately, we discard them.
*/
- rel = CreateFakeRelcacheEntry(srel->smgr_rnode.node);
+ rel = CreateFakeRelcacheEntry(srel->smgr_rlocator.locator);
log_newpage_range(rel, fork, 0, n, false);
FreeFakeRelcacheEntry(rel);
}
* smgrGetPendingDeletes() -- Get a list of non-temp relations to be deleted.
*
* The return value is the number of relations scheduled for termination.
- * *ptr is set to point to a freshly-palloc'd array of RelFileNodes.
+ * *ptr is set to point to a freshly-palloc'd array of RelFileLocators.
* If there are no relations to be deleted, *ptr is set to NULL.
*
* Only non-temporary relations are included in the returned list. This is OK
* by upper-level transactions.
*/
int
-smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr)
+smgrGetPendingDeletes(bool forCommit, RelFileLocator **ptr)
{
int nestLevel = GetCurrentTransactionNestLevel();
int nrels;
- RelFileNode *rptr;
+ RelFileLocator *rptr;
PendingRelDelete *pending;
nrels = 0;
*ptr = NULL;
return 0;
}
- rptr = (RelFileNode *) palloc(nrels * sizeof(RelFileNode));
+ rptr = (RelFileLocator *) palloc(nrels * sizeof(RelFileLocator));
*ptr = rptr;
for (pending = pendingDeletes; pending != NULL; pending = pending->next)
{
if (pending->nestLevel >= nestLevel && pending->atCommit == forCommit
&& pending->backend == InvalidBackendId)
{
- *rptr = pending->relnode;
+ *rptr = pending->rlocator;
rptr++;
}
}
xl_smgr_create *xlrec = (xl_smgr_create *) XLogRecGetData(record);
SMgrRelation reln;
- reln = smgropen(xlrec->rnode, InvalidBackendId);
+ reln = smgropen(xlrec->rlocator, InvalidBackendId);
smgrcreate(reln, xlrec->forkNum, true);
}
else if (info == XLOG_SMGR_TRUNCATE)
int nforks = 0;
bool need_fsm_vacuum = false;
- reln = smgropen(xlrec->rnode, InvalidBackendId);
+ reln = smgropen(xlrec->rlocator, InvalidBackendId);
/*
* Forcibly create relation if it doesn't exist (which suggests that
nforks++;
/* Also tell xlogutils.c about it */
- XLogTruncateRelation(xlrec->rnode, MAIN_FORKNUM, xlrec->blkno);
+ XLogTruncateRelation(xlrec->rlocator, MAIN_FORKNUM, xlrec->blkno);
}
/* Prepare for truncation of FSM and VM too */
- rel = CreateFakeRelcacheEntry(xlrec->rnode);
+ rel = CreateFakeRelcacheEntry(xlrec->rlocator);
if ((xlrec->flags & SMGR_TRUNCATE_FSM) != 0 &&
smgrexists(reln, FSM_FORKNUM))
* cluster_rel
*
* This clusters the table by creating a new, clustered table and
- * swapping the relfilenodes of the new table and the old table, so
+ * swapping the relfilenumbers of the new table and the old table, so
* the OID of the original table is preserved. Thus we do not lose
* GRANT, inheritance nor references to this table (this was a bug
* in releases through 7.3).
/*
* Swap the physical files of two given relations.
*
- * We swap the physical identity (reltablespace, relfilenode) while keeping the
- * same logical identities of the two relations. relpersistence is also
+ * We swap the physical identity (reltablespace, relfilenumber) while keeping
+ * the same logical identities of the two relations. relpersistence is also
* swapped, which is critical since it determines where buffers live for each
* relation.
*
reltup2;
Form_pg_class relform1,
relform2;
- Oid relfilenode1,
- relfilenode2;
- Oid swaptemp;
+ RelFileNumber relfilenumber1,
+ relfilenumber2;
+ RelFileNumber swaptemp;
char swptmpchr;
/* We need writable copies of both pg_class tuples. */
elog(ERROR, "cache lookup failed for relation %u", r2);
relform2 = (Form_pg_class) GETSTRUCT(reltup2);
- relfilenode1 = relform1->relfilenode;
- relfilenode2 = relform2->relfilenode;
+ relfilenumber1 = relform1->relfilenode;
+ relfilenumber2 = relform2->relfilenode;
- if (OidIsValid(relfilenode1) && OidIsValid(relfilenode2))
+ if (RelFileNumberIsValid(relfilenumber1) &&
+ RelFileNumberIsValid(relfilenumber2))
{
/*
- * Normal non-mapped relations: swap relfilenodes, reltablespaces,
+ * Normal non-mapped relations: swap relfilenumbers, reltablespaces,
* relpersistence
*/
Assert(!target_is_pg_class);
* Mapped-relation case. Here we have to swap the relation mappings
* instead of modifying the pg_class columns. Both must be mapped.
*/
- if (OidIsValid(relfilenode1) || OidIsValid(relfilenode2))
+ if (RelFileNumberIsValid(relfilenumber1) ||
+ RelFileNumberIsValid(relfilenumber2))
elog(ERROR, "cannot swap mapped relation \"%s\" with non-mapped relation",
NameStr(relform1->relname));
/*
* Fetch the mappings --- shouldn't fail, but be paranoid
*/
- relfilenode1 = RelationMapOidToFilenode(r1, relform1->relisshared);
- if (!OidIsValid(relfilenode1))
+ relfilenumber1 = RelationMapOidToFilenumber(r1, relform1->relisshared);
+ if (!RelFileNumberIsValid(relfilenumber1))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
NameStr(relform1->relname), r1);
- relfilenode2 = RelationMapOidToFilenode(r2, relform2->relisshared);
- if (!OidIsValid(relfilenode2))
+ relfilenumber2 = RelationMapOidToFilenumber(r2, relform2->relisshared);
+ if (!RelFileNumberIsValid(relfilenumber2))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
NameStr(relform2->relname), r2);
* Send replacement mappings to relmapper. Note these won't actually
* take effect until CommandCounterIncrement.
*/
- RelationMapUpdateMap(r1, relfilenode2, relform1->relisshared, false);
- RelationMapUpdateMap(r2, relfilenode1, relform2->relisshared, false);
+ RelationMapUpdateMap(r1, relfilenumber2, relform1->relisshared, false);
+ RelationMapUpdateMap(r2, relfilenumber1, relform2->relisshared, false);
/* Pass OIDs of mapped r2 tables back to caller */
*mapped_tables++ = r2;
}
/*
- * Recognize that rel1's relfilenode (swapped from rel2) is new in this
+ * Recognize that rel1's relfilenumber (swapped from rel2) is new in this
* subtransaction. The rel2 storage (swapped from rel1) may or may not be
* new.
*/
rel1 = relation_open(r1, NoLock);
rel2 = relation_open(r2, NoLock);
rel2->rd_createSubid = rel1->rd_createSubid;
- rel2->rd_newRelfilenodeSubid = rel1->rd_newRelfilenodeSubid;
- rel2->rd_firstRelfilenodeSubid = rel1->rd_firstRelfilenodeSubid;
- RelationAssumeNewRelfilenode(rel1);
+ rel2->rd_newRelfilelocatorSubid = rel1->rd_newRelfilelocatorSubid;
+ rel2->rd_firstRelfilelocatorSubid = rel1->rd_firstRelfilelocatorSubid;
+ RelationAssumeNewRelfilelocator(rel1);
relation_close(rel1, NoLock);
relation_close(rel2, NoLock);
}
table_close(relRelation, RowExclusiveLock);
}
- /* Destroy new heap with old filenode */
+ /* Destroy new heap with old filenumber */
object.classId = RelationRelationId;
object.objectId = OIDNewHeap;
object.objectSubId = 0;
*/
if (RELKIND_HAS_STORAGE(cstate->rel->rd_rel->relkind) &&
(cstate->rel->rd_createSubid != InvalidSubTransactionId ||
- cstate->rel->rd_firstRelfilenodeSubid != InvalidSubTransactionId))
+ cstate->rel->rd_firstRelfilelocatorSubid != InvalidSubTransactionId))
ti_options |= TABLE_INSERT_SKIP_FSM;
/*
- * Optimize if new relfilenode was created in this subxact or one of its
- * committed children and we won't see those rows later as part of an
+ * Optimize if new relation storage was created in this subxact or one of
+ * its committed children and we won't see those rows later as part of an
* earlier scan or command. The subxact test ensures that if this subxact
* aborts then the frozen rows won't be visible after xact cleanup. Note
* that the stronger test of exactly which subtransaction created it is
errmsg("cannot perform COPY FREEZE because of prior transaction activity")));
if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() &&
- cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId())
+ cstate->rel->rd_newRelfilelocatorSubid != GetCurrentSubTransactionId())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot perform COPY FREEZE because the table was not created or truncated in the current subtransaction")));
*/
typedef struct CreateDBRelInfo
{
- RelFileNode rnode; /* physical relation identifier */
+ RelFileLocator rlocator; /* physical relation identifier */
Oid reloid; /* relation oid */
bool permanent; /* relation is permanent or unlogged */
} CreateDBRelInfo;
static List *ScanSourceDatabasePgClass(Oid srctbid, Oid srcdbid, char *srcpath);
static List *ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid,
Oid dbid, char *srcpath,
- List *rnodelist, Snapshot snapshot);
+ List *rlocatorlist, Snapshot snapshot);
static CreateDBRelInfo *ScanSourceDatabasePgClassTuple(HeapTupleData *tuple,
Oid tbid, Oid dbid,
char *srcpath);
{
char *srcpath;
char *dstpath;
- List *rnodelist = NULL;
+ List *rlocatorlist = NULL;
ListCell *cell;
LockRelId srcrelid;
LockRelId dstrelid;
- RelFileNode srcrnode;
- RelFileNode dstrnode;
+ RelFileLocator srcrlocator;
+ RelFileLocator dstrlocator;
CreateDBRelInfo *relinfo;
/* Get source and destination database paths. */
/* Copy relmap file from source database to the destination database. */
RelationMapCopy(dst_dboid, dst_tsid, srcpath, dstpath);
- /* Get list of relfilenodes to copy from the source database. */
- rnodelist = ScanSourceDatabasePgClass(src_tsid, src_dboid, srcpath);
- Assert(rnodelist != NIL);
+ /* Get list of relfilelocators to copy from the source database. */
+ rlocatorlist = ScanSourceDatabasePgClass(src_tsid, src_dboid, srcpath);
+ Assert(rlocatorlist != NIL);
/*
* Database IDs will be the same for all relations so set them before
srcrelid.dbId = src_dboid;
dstrelid.dbId = dst_dboid;
- /* Loop over our list of relfilenodes and copy each one. */
- foreach(cell, rnodelist)
+ /* Loop over our list of relfilelocators and copy each one. */
+ foreach(cell, rlocatorlist)
{
relinfo = lfirst(cell);
- srcrnode = relinfo->rnode;
+ srcrlocator = relinfo->rlocator;
/*
* If the relation is from the source db's default tablespace then we
* Otherwise, we need to create in the same tablespace as it is in the
* source database.
*/
- if (srcrnode.spcNode == src_tsid)
- dstrnode.spcNode = dst_tsid;
+ if (srcrlocator.spcOid == src_tsid)
+ dstrlocator.spcOid = dst_tsid;
else
- dstrnode.spcNode = srcrnode.spcNode;
+ dstrlocator.spcOid = srcrlocator.spcOid;
- dstrnode.dbNode = dst_dboid;
- dstrnode.relNode = srcrnode.relNode;
+ dstrlocator.dbOid = dst_dboid;
+ dstrlocator.relNumber = srcrlocator.relNumber;
/*
* Acquire locks on source and target relations before copying.
LockRelationId(&dstrelid, AccessShareLock);
/* Copy relation storage from source to the destination. */
- CreateAndCopyRelationData(srcrnode, dstrnode, relinfo->permanent);
+ CreateAndCopyRelationData(srcrlocator, dstrlocator, relinfo->permanent);
/* Release the relation locks. */
UnlockRelationId(&srcrelid, AccessShareLock);
pfree(srcpath);
pfree(dstpath);
- list_free_deep(rnodelist);
+ list_free_deep(rlocatorlist);
}
/*
static List *
ScanSourceDatabasePgClass(Oid tbid, Oid dbid, char *srcpath)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
BlockNumber nblocks;
BlockNumber blkno;
Buffer buf;
- Oid relfilenode;
+ Oid relfilenumber;
Page page;
- List *rnodelist = NIL;
+ List *rlocatorlist = NIL;
LockRelId relid;
Relation rel;
Snapshot snapshot;
BufferAccessStrategy bstrategy;
- /* Get pg_class relfilenode. */
- relfilenode = RelationMapOidToFilenodeForDatabase(srcpath,
- RelationRelationId);
+ /* Get pg_class relfilenumber. */
+ relfilenumber = RelationMapOidToFilenumberForDatabase(srcpath,
+ RelationRelationId);
/* Don't read data into shared_buffers without holding a relation lock. */
relid.dbId = dbid;
relid.relId = RelationRelationId;
LockRelationId(&relid, AccessShareLock);
- /* Prepare a RelFileNode for the pg_class relation. */
- rnode.spcNode = tbid;
- rnode.dbNode = dbid;
- rnode.relNode = relfilenode;
+ /* Prepare a RelFileLocator for the pg_class relation. */
+ rlocator.spcOid = tbid;
+ rlocator.dbOid = dbid;
+ rlocator.relNumber = relfilenumber;
/*
* We can't use a real relcache entry for a relation in some other
* used the smgr layer directly, we would have to worry about
* invalidations.
*/
- rel = CreateFakeRelcacheEntry(rnode);
+ rel = CreateFakeRelcacheEntry(rlocator);
nblocks = smgrnblocks(RelationGetSmgr(rel), MAIN_FORKNUM);
FreeFakeRelcacheEntry(rel);
{
CHECK_FOR_INTERRUPTS();
- buf = ReadBufferWithoutRelcache(rnode, MAIN_FORKNUM, blkno,
+ buf = ReadBufferWithoutRelcache(rlocator, MAIN_FORKNUM, blkno,
RBM_NORMAL, bstrategy, false);
LockBuffer(buf, BUFFER_LOCK_SHARE);
continue;
}
- /* Append relevant pg_class tuples for current page to rnodelist. */
- rnodelist = ScanSourceDatabasePgClassPage(page, buf, tbid, dbid,
- srcpath, rnodelist,
- snapshot);
+ /* Append relevant pg_class tuples for current page to rlocatorlist. */
+ rlocatorlist = ScanSourceDatabasePgClassPage(page, buf, tbid, dbid,
+ srcpath, rlocatorlist,
+ snapshot);
UnlockReleaseBuffer(buf);
}
/* Release relation lock. */
UnlockRelationId(&relid, AccessShareLock);
- return rnodelist;
+ return rlocatorlist;
}
/*
* Scan one page of the source database's pg_class relation and add relevant
- * entries to rnodelist. The return value is the updated list.
+ * entries to rlocatorlist. The return value is the updated list.
*/
static List *
ScanSourceDatabasePgClassPage(Page page, Buffer buf, Oid tbid, Oid dbid,
- char *srcpath, List *rnodelist,
+ char *srcpath, List *rlocatorlist,
Snapshot snapshot)
{
BlockNumber blkno = BufferGetBlockNumber(buf);
relinfo = ScanSourceDatabasePgClassTuple(&tuple, tbid, dbid,
srcpath);
if (relinfo != NULL)
- rnodelist = lappend(rnodelist, relinfo);
+ rlocatorlist = lappend(rlocatorlist, relinfo);
}
}
- return rnodelist;
+ return rlocatorlist;
}
/*
{
CreateDBRelInfo *relinfo;
Form_pg_class classForm;
- Oid relfilenode = InvalidOid;
+ Oid relfilenumber = InvalidRelFileNumber;
classForm = (Form_pg_class) GETSTRUCT(tuple);
return NULL;
/*
- * If relfilenode is valid then directly use it. Otherwise, consult the
+ * If relfilenumber is valid then directly use it. Otherwise, consult the
* relmap.
*/
- if (OidIsValid(classForm->relfilenode))
- relfilenode = classForm->relfilenode;
+ if (RelFileNumberIsValid(classForm->relfilenode))
+ relfilenumber = classForm->relfilenode;
else
- relfilenode = RelationMapOidToFilenodeForDatabase(srcpath,
- classForm->oid);
+ relfilenumber = RelationMapOidToFilenumberForDatabase(srcpath,
+ classForm->oid);
- /* We must have a valid relfilenode oid. */
- if (!OidIsValid(relfilenode))
- elog(ERROR, "relation with OID %u does not have a valid relfilenode",
+ /* We must have a valid relfilenumber. */
+ if (!RelFileNumberIsValid(relfilenumber))
+ elog(ERROR, "relation with OID %u does not have a valid relfilenumber",
classForm->oid);
/* Prepare a rel info element and add it to the list. */
relinfo = (CreateDBRelInfo *) palloc(sizeof(CreateDBRelInfo));
if (OidIsValid(classForm->reltablespace))
- relinfo->rnode.spcNode = classForm->reltablespace;
+ relinfo->rlocator.spcOid = classForm->reltablespace;
else
- relinfo->rnode.spcNode = tbid;
+ relinfo->rlocator.spcOid = tbid;
- relinfo->rnode.dbNode = dbid;
- relinfo->rnode.relNode = relfilenode;
+ relinfo->rlocator.dbOid = dbid;
+ relinfo->rlocator.relNumber = relfilenumber;
relinfo->reloid = classForm->oid;
/* Temporary relations were rejected above. */
* try to remove that already-existing subdirectory during the cleanup in
* remove_dbtablespaces. Nuking existing files seems like a bad idea, so
* instead we make this extra check before settling on the OID of the new
- * database. This exactly parallels what GetNewRelFileNode() does for table
- * relfilenode values.
+ * database. This exactly parallels what GetNewRelFileNumber() does for table
+ * relfilenumber values.
*/
static bool
check_db_file_conflict(Oid db_id)
}
/*
- * A valid stmt->oldNode implies that we already have a built form of the
- * index. The caller should also decline any index build.
+ * A valid stmt->oldNumber implies that we already have a built form of
+ * the index. The caller should also decline any index build.
*/
- Assert(!OidIsValid(stmt->oldNode) || (skip_build && !concurrent));
+ Assert(!RelFileNumberIsValid(stmt->oldNumber) || (skip_build && !concurrent));
/*
* Make the catalog entries for the index, including constraints. This
indexRelationId =
index_create(rel, indexRelationName, indexRelationId, parentIndexId,
parentConstraintId,
- stmt->oldNode, indexInfo, indexColNames,
+ stmt->oldNumber, indexInfo, indexColNames,
accessMethodId, tablespaceId,
collationObjectId, classObjectId,
coloptions, reloptions,
* We can't use the same index name for the child index,
* so clear idxname to let the recursive invocation choose
* a new name. Likewise, the existing target relation
- * field is wrong, and if indexOid or oldNode are set,
+ * field is wrong, and if indexOid or oldNumber are set,
* they mustn't be applied to the child either.
*/
childStmt->idxname = NULL;
childStmt->relation = NULL;
childStmt->indexOid = InvalidOid;
- childStmt->oldNode = InvalidOid;
+ childStmt->oldNumber = InvalidRelFileNumber;
childStmt->oldCreateSubid = InvalidSubTransactionId;
- childStmt->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ childStmt->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
/*
* Adjust any Vars (both in expressions and in the index's
* particular this eliminates all shared catalogs.).
*/
if (RELKIND_HAS_STORAGE(classtuple->relkind) &&
- !OidIsValid(classtuple->relfilenode))
+ !RelFileNumberIsValid(classtuple->relfilenode))
skip_rel = true;
/*
* ExecRefreshMatView -- execute a REFRESH MATERIALIZED VIEW command
*
* This refreshes the materialized view by creating a new table and swapping
- * the relfilenodes of the new table and the old materialized view, so the OID
+ * the relfilenumbers of the new table and the old materialized view, so the OID
* of the original materialized view is preserved. Thus we do not lose GRANT
* nor references to this materialized view.
*
typedef struct SeqTableData
{
Oid relid; /* pg_class OID of this sequence (hash key) */
- Oid filenode; /* last seen relfilenode of this sequence */
+ RelFileNumber filenumber; /* last seen relfilenumber of this sequence */
LocalTransactionId lxid; /* xact in which we last did a seq op */
bool last_valid; /* do we have a valid "last" value? */
int64 last; /* value last returned by nextval */
*
* The change is made transactionally, so that on failure of the current
* transaction, the sequence will be restored to its previous state.
- * We do that by creating a whole new relfilenode for the sequence; so this
+ * We do that by creating a whole new relfilenumber for the sequence; so this
* works much like the rewriting forms of ALTER TABLE.
*
* Caller is assumed to have acquired AccessExclusiveLock on the sequence,
/*
* Create a new storage file for the sequence.
*/
- RelationSetNewRelfilenode(seq_rel, seq_rel->rd_rel->relpersistence);
+ RelationSetNewRelfilenumber(seq_rel, seq_rel->rd_rel->relpersistence);
/*
* Ensure sequence's relfrozenxid is at 0, since it won't contain any
{
SMgrRelation srel;
- srel = smgropen(rel->rd_node, InvalidBackendId);
+ srel = smgropen(rel->rd_locator, InvalidBackendId);
smgrcreate(srel, INIT_FORKNUM, false);
- log_smgrcreate(&rel->rd_node, INIT_FORKNUM);
+ log_smgrcreate(&rel->rd_locator, INIT_FORKNUM);
fill_seq_fork_with_data(rel, tuple, INIT_FORKNUM);
FlushRelationBuffers(rel);
smgrclose(srel);
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
- xlrec.node = rel->rd_node;
+ xlrec.locator = rel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) tuple->t_data, tuple->t_len);
* Create a new storage file for the sequence, making the state
* changes transactional.
*/
- RelationSetNewRelfilenode(seqrel, seqrel->rd_rel->relpersistence);
+ RelationSetNewRelfilenumber(seqrel, seqrel->rd_rel->relpersistence);
/*
* Ensure sequence's relfrozenxid is at 0, since it won't contain any
GetTopTransactionId();
(void) read_seq_tuple(seqrel, &buf, &seqdatatuple);
- RelationSetNewRelfilenode(seqrel, newrelpersistence);
+ RelationSetNewRelfilenumber(seqrel, newrelpersistence);
fill_seq_with_data(seqrel, &seqdatatuple);
UnlockReleaseBuffer(buf);
seq->is_called = true;
seq->log_cnt = 0;
- xlrec.node = seqrel->rd_node;
+ xlrec.locator = seqrel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqdatatuple.t_data, seqdatatuple.t_len);
XLogBeginInsert();
XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
- xlrec.node = seqrel->rd_node;
+ xlrec.locator = seqrel->rd_locator;
XLogRegisterData((char *) &xlrec, sizeof(xl_seq_rec));
XLogRegisterData((char *) seqdatatuple.t_data, seqdatatuple.t_len);
if (!found)
{
/* relid already filled in */
- elm->filenode = InvalidOid;
+ elm->filenumber = InvalidRelFileNumber;
elm->lxid = InvalidLocalTransactionId;
elm->last_valid = false;
elm->last = elm->cached = 0;
* discard any cached-but-unissued values. We do not touch the currval()
* state, however.
*/
- if (seqrel->rd_rel->relfilenode != elm->filenode)
+ if (seqrel->rd_rel->relfilenode != elm->filenumber)
{
- elm->filenode = seqrel->rd_rel->relfilenode;
+ elm->filenumber = seqrel->rd_rel->relfilenode;
elm->cached = elm->last;
}
* changed. This allows ALTER SEQUENCE to behave transactionally. Currently,
* the only option that doesn't cause that is OWNED BY. It's *necessary* for
* ALTER SEQUENCE OWNED BY to not rewrite the sequence, because that would
- * break pg_upgrade by causing unwanted changes in the sequence's relfilenode.
+ * break pg_upgrade by causing unwanted changes in the sequence's
+ * relfilenumber.
*/
static void
init_params(ParseState *pstate, List *options, bool for_identity,
static ObjectAddress ATExecSetCompression(AlteredTableInfo *tab, Relation rel,
const char *column, Node *newValue, LOCKMODE lockmode);
-static void index_copy_data(Relation rel, RelFileNode newrnode);
+static void index_copy_data(Relation rel, RelFileLocator newrlocator);
static const char *storage_name(char c);
static void RangeVarCallbackForDropRelation(const RangeVar *rel, Oid relOid,
/*
* Normally, we need a transaction-safe truncation here. However, if
* the table was either created in the current (sub)transaction or has
- * a new relfilenode in the current (sub)transaction, then we can just
- * truncate it in-place, because a rollback would cause the whole
+ * a new relfilenumber in the current (sub)transaction, then we can
+ * just truncate it in-place, because a rollback would cause the whole
* table or the current physical file to be thrown away anyway.
*/
if (rel->rd_createSubid == mySubid ||
- rel->rd_newRelfilenodeSubid == mySubid)
+ rel->rd_newRelfilelocatorSubid == mySubid)
{
/* Immediate, non-rollbackable truncation is OK */
heap_truncate_one_rel(rel);
* Need the full transaction-safe pushups.
*
* Create a new empty storage file for the relation, and assign it
- * as the relfilenode value. The old storage file is scheduled for
- * deletion at commit.
+ * as the relfilenumber value. The old storage file is scheduled
+ * for deletion at commit.
*/
- RelationSetNewRelfilenode(rel, rel->rd_rel->relpersistence);
+ RelationSetNewRelfilenumber(rel, rel->rd_rel->relpersistence);
heap_relid = RelationGetRelid(rel);
Relation toastrel = relation_open(toast_relid,
AccessExclusiveLock);
- RelationSetNewRelfilenode(toastrel,
- toastrel->rd_rel->relpersistence);
+ RelationSetNewRelfilenumber(toastrel,
+ toastrel->rd_rel->relpersistence);
table_close(toastrel, NoLock);
}
/*
* SetRelationTableSpace
- * Set new reltablespace and relfilenode in pg_class entry.
+ * Set new reltablespace and relfilenumber in pg_class entry.
*
* newTableSpaceId is the new tablespace for the relation, and
- * newRelFileNode its new filenode. If newRelFileNode is InvalidOid,
- * this field is not updated.
+ * newRelFilenumber its new filenumber. If newRelFilenumber is
+ * InvalidRelFileNumber, this field is not updated.
*
* NOTE: The caller must hold AccessExclusiveLock on the relation.
*
void
SetRelationTableSpace(Relation rel,
Oid newTableSpaceId,
- Oid newRelFileNode)
+ RelFileNumber newRelFilenumber)
{
Relation pg_class;
HeapTuple tuple;
/* Update the pg_class row. */
rd_rel->reltablespace = (newTableSpaceId == MyDatabaseTableSpace) ?
InvalidOid : newTableSpaceId;
- if (OidIsValid(newRelFileNode))
- rd_rel->relfilenode = newRelFileNode;
+ if (RelFileNumberIsValid(newRelFilenumber))
+ rd_rel->relfilenode = newRelFilenumber;
CatalogTupleUpdate(pg_class, &tuple->t_self, tuple);
/*
* persistence: on one hand, we need to ensure that the buffers
* belonging to each of the two relations are marked with or without
* BM_PERMANENT properly. On the other hand, since rewriting creates
- * and assigns a new relfilenode, we automatically create or drop an
+ * and assigns a new relfilenumber, we automatically create or drop an
* init fork for the relation as appropriate.
*/
if (tab->rewrite > 0 && tab->relkind != RELKIND_SEQUENCE)
* Create transient table that will receive the modified data.
*
* Ensure it is marked correctly as logged or unlogged. We have
- * to do this here so that buffers for the new relfilenode will
+ * to do this here so that buffers for the new relfilenumber will
* have the right persistence set, and at the same time ensure
- * that the original filenode's buffers will get read in with the
- * correct setting (i.e. the original one). Otherwise a rollback
- * after the rewrite would possibly result with buffers for the
- * original filenode having the wrong persistence setting.
+ * that the original filenumbers's buffers will get read in with
+ * the correct setting (i.e. the original one). Otherwise a
+ * rollback after the rewrite would possibly result with buffers
+ * for the original filenumbers having the wrong persistence
+ * setting.
*
* NB: This relies on swap_relation_files() also swapping the
* persistence. That wouldn't work for pg_class, but that can't be
/* suppress schema rights check when rebuilding existing index */
check_rights = !is_rebuild;
/* skip index build if phase 3 will do it or we're reusing an old one */
- skip_build = tab->rewrite > 0 || OidIsValid(stmt->oldNode);
+ skip_build = tab->rewrite > 0 || RelFileNumberIsValid(stmt->oldNumber);
/* suppress notices when rebuilding existing index */
quiet = is_rebuild;
quiet);
/*
- * If TryReuseIndex() stashed a relfilenode for us, we used it for the new
- * index instead of building from scratch. Restore associated fields.
+ * If TryReuseIndex() stashed a relfilenumber for us, we used it for the
+ * new index instead of building from scratch. Restore associated fields.
* This may store InvalidSubTransactionId in both fields, in which case
* relcache.c will assume it can rebuild the relcache entry. Hence, do
* this after the CCI that made catalog rows visible to any rebuild. The
* DROP of the old edition of this index will have scheduled the storage
* for deletion at commit, so cancel that pending deletion.
*/
- if (OidIsValid(stmt->oldNode))
+ if (RelFileNumberIsValid(stmt->oldNumber))
{
Relation irel = index_open(address.objectId, NoLock);
irel->rd_createSubid = stmt->oldCreateSubid;
- irel->rd_firstRelfilenodeSubid = stmt->oldFirstRelfilenodeSubid;
- RelationPreserveStorage(irel->rd_node, true);
+ irel->rd_firstRelfilelocatorSubid = stmt->oldFirstRelfilelocatorSubid;
+ RelationPreserveStorage(irel->rd_locator, true);
index_close(irel, NoLock);
}
/* If it's a partitioned index, there is no storage to share. */
if (irel->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
{
- stmt->oldNode = irel->rd_node.relNode;
+ stmt->oldNumber = irel->rd_locator.relNumber;
stmt->oldCreateSubid = irel->rd_createSubid;
- stmt->oldFirstRelfilenodeSubid = irel->rd_firstRelfilenodeSubid;
+ stmt->oldFirstRelfilelocatorSubid = irel->rd_firstRelfilelocatorSubid;
}
index_close(irel, NoLock);
}
{
Relation rel;
Oid reltoastrelid;
- Oid newrelfilenode;
- RelFileNode newrnode;
+ RelFileNumber newrelfilenumber;
+ RelFileLocator newrlocator;
List *reltoastidxids = NIL;
ListCell *lc;
}
/*
- * Relfilenodes are not unique in databases across tablespaces, so we need
- * to allocate a new one in the new tablespace.
+ * Relfilenumbers are not unique in databases across tablespaces, so we
+ * need to allocate a new one in the new tablespace.
*/
- newrelfilenode = GetNewRelFileNode(newTableSpace, NULL,
- rel->rd_rel->relpersistence);
+ newrelfilenumber = GetNewRelFileNumber(newTableSpace, NULL,
+ rel->rd_rel->relpersistence);
/* Open old and new relation */
- newrnode = rel->rd_node;
- newrnode.relNode = newrelfilenode;
- newrnode.spcNode = newTableSpace;
+ newrlocator = rel->rd_locator;
+ newrlocator.relNumber = newrelfilenumber;
+ newrlocator.spcOid = newTableSpace;
- /* hand off to AM to actually create the new filenode and copy the data */
+ /* hand off to AM to actually create new rel storage and copy the data */
if (rel->rd_rel->relkind == RELKIND_INDEX)
{
- index_copy_data(rel, newrnode);
+ index_copy_data(rel, newrlocator);
}
else
{
Assert(RELKIND_HAS_TABLE_AM(rel->rd_rel->relkind));
- table_relation_copy_data(rel, &newrnode);
+ table_relation_copy_data(rel, &newrlocator);
}
/*
* the updated pg_class entry), but that's forbidden with
* CheckRelationTableSpaceMove().
*/
- SetRelationTableSpace(rel, newTableSpace, newrelfilenode);
+ SetRelationTableSpace(rel, newTableSpace, newrelfilenumber);
InvokeObjectPostAlterHook(RelationRelationId, RelationGetRelid(rel), 0);
- RelationAssumeNewRelfilenode(rel);
+ RelationAssumeNewRelfilelocator(rel);
relation_close(rel, NoLock);
}
static void
-index_copy_data(Relation rel, RelFileNode newrnode)
+index_copy_data(Relation rel, RelFileLocator newrlocator)
{
SMgrRelation dstrel;
- dstrel = smgropen(newrnode, rel->rd_backend);
+ dstrel = smgropen(newrlocator, rel->rd_backend);
/*
* Since we copy the file directly without looking at the shared buffers,
* Create and copy all forks of the relation, and schedule unlinking of
* old physical files.
*
- * NOTE: any conflict in relfilenode value will be caught in
+ * NOTE: any conflict in relfilenumber value will be caught in
* RelationCreateStorage().
*/
- RelationCreateStorage(newrnode, rel->rd_rel->relpersistence, true);
+ RelationCreateStorage(newrlocator, rel->rd_rel->relpersistence, true);
/* copy main fork */
RelationCopyStorage(RelationGetSmgr(rel), dstrel, MAIN_FORKNUM,
if (RelationIsPermanent(rel) ||
(rel->rd_rel->relpersistence == RELPERSISTENCE_UNLOGGED &&
forkNum == INIT_FORKNUM))
- log_smgrcreate(&newrnode, forkNum);
+ log_smgrcreate(&newrlocator, forkNum);
RelationCopyStorage(RelationGetSmgr(rel), dstrel, forkNum,
rel->rd_rel->relpersistence);
}
* remove the possibility of having file name conflicts, we isolate
* files within a tablespace into database-specific subdirectories.
*
- * To support file access via the information given in RelFileNode, we
+ * To support file access via the information given in RelFileLocator, we
* maintain a symbolic-link map in $PGDATA/pg_tblspc. The symlinks are
* named by tablespace OIDs and point to the actual tablespace directories.
* There is also a per-cluster version directory in each tablespace.
* Thus the full path to an arbitrary file is
- * $PGDATA/pg_tblspc/spcoid/PG_MAJORVER_CATVER/dboid/relfilenode
+ * $PGDATA/pg_tblspc/spcoid/PG_MAJORVER_CATVER/dboid/relfilenumber
* e.g.
* $PGDATA/pg_tblspc/20981/PG_9.0_201002161/719849/83292814
*
* tables) and pg_default (for everything else). For backwards compatibility
* and to remain functional on platforms without symlinks, these tablespaces
* are accessed specially: they are respectively
- * $PGDATA/global/relfilenode
- * $PGDATA/base/dboid/relfilenode
+ * $PGDATA/global/relfilenumber
+ * $PGDATA/base/dboid/relfilenumber
*
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* re-create a database subdirectory (of $PGDATA/base) during WAL replay.
*/
void
-TablespaceCreateDbspace(Oid spcNode, Oid dbNode, bool isRedo)
+TablespaceCreateDbspace(Oid spcOid, Oid dbOid, bool isRedo)
{
struct stat st;
char *dir;
* The global tablespace doesn't have per-database subdirectories, so
* nothing to do for it.
*/
- if (spcNode == GLOBALTABLESPACE_OID)
+ if (spcOid == GLOBALTABLESPACE_OID)
return;
- Assert(OidIsValid(spcNode));
- Assert(OidIsValid(dbNode));
+ Assert(OidIsValid(spcOid));
+ Assert(OidIsValid(dbOid));
- dir = GetDatabasePath(dbNode, spcNode);
+ dir = GetDatabasePath(dbOid, spcOid);
if (stat(dir, &st) < 0)
{
COPY_NODE_FIELD(excludeOpNames);
COPY_STRING_FIELD(idxcomment);
COPY_SCALAR_FIELD(indexOid);
- COPY_SCALAR_FIELD(oldNode);
+ COPY_SCALAR_FIELD(oldNumber);
COPY_SCALAR_FIELD(oldCreateSubid);
- COPY_SCALAR_FIELD(oldFirstRelfilenodeSubid);
+ COPY_SCALAR_FIELD(oldFirstRelfilelocatorSubid);
COPY_SCALAR_FIELD(unique);
COPY_SCALAR_FIELD(nulls_not_distinct);
COPY_SCALAR_FIELD(primary);
COMPARE_NODE_FIELD(excludeOpNames);
COMPARE_STRING_FIELD(idxcomment);
COMPARE_SCALAR_FIELD(indexOid);
- COMPARE_SCALAR_FIELD(oldNode);
+ COMPARE_SCALAR_FIELD(oldNumber);
COMPARE_SCALAR_FIELD(oldCreateSubid);
- COMPARE_SCALAR_FIELD(oldFirstRelfilenodeSubid);
+ COMPARE_SCALAR_FIELD(oldFirstRelfilelocatorSubid);
COMPARE_SCALAR_FIELD(unique);
COMPARE_SCALAR_FIELD(nulls_not_distinct);
COMPARE_SCALAR_FIELD(primary);
WRITE_NODE_FIELD(excludeOpNames);
WRITE_STRING_FIELD(idxcomment);
WRITE_OID_FIELD(indexOid);
- WRITE_OID_FIELD(oldNode);
+ WRITE_OID_FIELD(oldNumber);
WRITE_UINT_FIELD(oldCreateSubid);
- WRITE_UINT_FIELD(oldFirstRelfilenodeSubid);
+ WRITE_UINT_FIELD(oldFirstRelfilelocatorSubid);
WRITE_BOOL_FIELD(unique);
WRITE_BOOL_FIELD(nulls_not_distinct);
WRITE_BOOL_FIELD(primary);
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
- n->oldNode = InvalidOid;
+ n->oldNumber = InvalidRelFileNumber;
n->oldCreateSubid = InvalidSubTransactionId;
- n->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ n->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
n->primary = false;
n->isconstraint = false;
n->deferrable = false;
n->excludeOpNames = NIL;
n->idxcomment = NULL;
n->indexOid = InvalidOid;
- n->oldNode = InvalidOid;
+ n->oldNumber = InvalidRelFileNumber;
n->oldCreateSubid = InvalidSubTransactionId;
- n->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ n->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
n->primary = false;
n->isconstraint = false;
n->deferrable = false;
index->excludeOpNames = NIL;
index->idxcomment = NULL;
index->indexOid = InvalidOid;
- index->oldNode = InvalidOid;
+ index->oldNumber = InvalidRelFileNumber;
index->oldCreateSubid = InvalidSubTransactionId;
- index->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ index->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
index->unique = idxrec->indisunique;
index->nulls_not_distinct = idxrec->indnullsnotdistinct;
index->primary = idxrec->indisprimary;
index->excludeOpNames = NIL;
index->idxcomment = NULL;
index->indexOid = InvalidOid;
- index->oldNode = InvalidOid;
+ index->oldNumber = InvalidRelFileNumber;
index->oldCreateSubid = InvalidSubTransactionId;
- index->oldFirstRelfilenodeSubid = InvalidSubTransactionId;
+ index->oldFirstRelfilelocatorSubid = InvalidSubTransactionId;
index->transformed = false;
index->concurrent = false;
index->if_not_exists = false;
* We use the request struct directly as a hashtable key. This
* assumes that any padding bytes in the structs are consistently the
* same, which should be okay because we zeroed them in
- * CheckpointerShmemInit. Note also that RelFileNode had better
+ * CheckpointerShmemInit. Note also that RelFileLocator had better
* contain no pad bytes.
*/
request = &CheckpointerShmem->requests[n];
XLogReaderState *r = buf->record;
xl_heap_insert *xlrec;
ReorderBufferChange *change;
- RelFileNode target_node;
+ RelFileLocator target_locator;
xlrec = (xl_heap_insert *) XLogRecGetData(r);
return;
/* only interested in our database */
- XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
- if (target_node.dbNode != ctx->slot->data.database)
+ XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
+ if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_INSERT;
change->origin_id = XLogRecGetOrigin(r);
- memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+ memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
tupledata = XLogRecGetBlockData(r, 0, &datalen);
tuplelen = datalen - SizeOfHeapHeader;
xl_heap_update *xlrec;
ReorderBufferChange *change;
char *data;
- RelFileNode target_node;
+ RelFileLocator target_locator;
xlrec = (xl_heap_update *) XLogRecGetData(r);
/* only interested in our database */
- XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
- if (target_node.dbNode != ctx->slot->data.database)
+ XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
+ if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
change = ReorderBufferGetChange(ctx->reorder);
change->action = REORDER_BUFFER_CHANGE_UPDATE;
change->origin_id = XLogRecGetOrigin(r);
- memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+ memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
if (xlrec->flags & XLH_UPDATE_CONTAINS_NEW_TUPLE)
{
XLogReaderState *r = buf->record;
xl_heap_delete *xlrec;
ReorderBufferChange *change;
- RelFileNode target_node;
+ RelFileLocator target_locator;
xlrec = (xl_heap_delete *) XLogRecGetData(r);
/* only interested in our database */
- XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
- if (target_node.dbNode != ctx->slot->data.database)
+ XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
+ if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
change->origin_id = XLogRecGetOrigin(r);
- memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+ memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
/* old primary key stored */
if (xlrec->flags & XLH_DELETE_CONTAINS_OLD)
char *data;
char *tupledata;
Size tuplelen;
- RelFileNode rnode;
+ RelFileLocator rlocator;
xlrec = (xl_heap_multi_insert *) XLogRecGetData(r);
return;
/* only interested in our database */
- XLogRecGetBlockTag(r, 0, &rnode, NULL, NULL);
- if (rnode.dbNode != ctx->slot->data.database)
+ XLogRecGetBlockTag(r, 0, &rlocator, NULL, NULL);
+ if (rlocator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
change->action = REORDER_BUFFER_CHANGE_INSERT;
change->origin_id = XLogRecGetOrigin(r);
- memcpy(&change->data.tp.relnode, &rnode, sizeof(RelFileNode));
+ memcpy(&change->data.tp.rlocator, &rlocator, sizeof(RelFileLocator));
xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(data);
data = ((char *) xlhdr) + SizeOfMultiInsertTuple;
{
XLogReaderState *r = buf->record;
ReorderBufferChange *change;
- RelFileNode target_node;
+ RelFileLocator target_locator;
/* only interested in our database */
- XLogRecGetBlockTag(r, 0, &target_node, NULL, NULL);
- if (target_node.dbNode != ctx->slot->data.database)
+ XLogRecGetBlockTag(r, 0, &target_locator, NULL, NULL);
+ if (target_locator.dbOid != ctx->slot->data.database)
return;
/* output plugin doesn't look for this origin, no need to queue */
change->action = REORDER_BUFFER_CHANGE_INTERNAL_SPEC_CONFIRM;
change->origin_id = XLogRecGetOrigin(r);
- memcpy(&change->data.tp.relnode, &target_node, sizeof(RelFileNode));
+ memcpy(&change->data.tp.rlocator, &target_locator, sizeof(RelFileLocator));
change->data.tp.clear_toast_afterwards = true;
#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/rel.h"
-#include "utils/relfilenodemap.h"
+#include "utils/relfilenumbermap.h"
/* entry for a hash table we use to map from xid to our transaction state */
ReorderBufferTXN *txn;
} ReorderBufferTXNByIdEnt;
-/* data structures for (relfilenode, ctid) => (cmin, cmax) mapping */
+/* data structures for (relfilelocator, ctid) => (cmin, cmax) mapping */
typedef struct ReorderBufferTupleCidKey
{
- RelFileNode relnode;
+ RelFileLocator rlocator;
ItemPointerData tid;
} ReorderBufferTupleCidKey;
}
/*
- * Destroy the (relfilenode, ctid) hashtable, so that we don't leak any
+ * Destroy the (relfilelocator, ctid) hashtable, so that we don't leak any
* memory. We could also keep the hash table and update it with new ctid
* values, but this seems simpler and good enough for now.
*/
}
/*
- * Build a hash with a (relfilenode, ctid) -> (cmin, cmax) mapping for use by
+ * Build a hash with a (relfilelocator, ctid) -> (cmin, cmax) mapping for use by
* HeapTupleSatisfiesHistoricMVCC.
*/
static void
/* be careful about padding */
memset(&key, 0, sizeof(ReorderBufferTupleCidKey));
- key.relnode = change->data.tuplecid.node;
+ key.rlocator = change->data.tuplecid.locator;
ItemPointerCopy(&change->data.tuplecid.tid,
&key.tid);
case REORDER_BUFFER_CHANGE_DELETE:
Assert(snapshot_now);
- reloid = RelidByRelfilenode(change->data.tp.relnode.spcNode,
- change->data.tp.relnode.relNode);
+ reloid = RelidByRelfilenumber(change->data.tp.rlocator.spcOid,
+ change->data.tp.rlocator.relNumber);
/*
* Mapped catalog tuple without data, emitted while
* catalog table was in the process of being rewritten. We
- * can fail to look up the relfilenode, because the
+ * can fail to look up the relfilenumber, because the
* relmapper has no "historic" view, in contrast to the
* normal catalog during decoding. Thus repeated rewrites
* can cause a lookup failure. That's OK because we do not
* decode catalog changes anyway. Normally such tuples
* would be skipped over below, but we can't identify
* whether the table should be logically logged without
- * mapping the relfilenode to the oid.
+ * mapping the relfilenumber to the oid.
*/
if (reloid == InvalidOid &&
change->data.tp.newtuple == NULL &&
change->data.tp.oldtuple == NULL)
goto change_done;
else if (reloid == InvalidOid)
- elog(ERROR, "could not map filenode \"%s\" to relation OID",
- relpathperm(change->data.tp.relnode,
+ elog(ERROR, "could not map filenumber \"%s\" to relation OID",
+ relpathperm(change->data.tp.rlocator,
MAIN_FORKNUM));
relation = RelationIdGetRelation(reloid);
if (!RelationIsValid(relation))
- elog(ERROR, "could not open relation with OID %u (for filenode \"%s\")",
+ elog(ERROR, "could not open relation with OID %u (for filenumber \"%s\")",
reloid,
- relpathperm(change->data.tp.relnode,
+ relpathperm(change->data.tp.rlocator,
MAIN_FORKNUM));
if (!RelationIsLogicallyLogged(relation))
}
/*
- * Add new (relfilenode, tid) -> (cmin, cmax) mappings.
+ * Add new (relfilelocator, tid) -> (cmin, cmax) mappings.
*
* We do not include this change type in memory accounting, because we
* keep CIDs in a separate list and do not evict them when reaching
*/
void
ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
- XLogRecPtr lsn, RelFileNode node,
+ XLogRecPtr lsn, RelFileLocator locator,
ItemPointerData tid, CommandId cmin,
CommandId cmax, CommandId combocid)
{
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
- change->data.tuplecid.node = node;
+ change->data.tuplecid.locator = locator;
change->data.tuplecid.tid = tid;
change->data.tuplecid.cmin = cmin;
change->data.tuplecid.cmax = cmax;
* need anymore.
*
* To resolve those problems we have a per-transaction hash of (cmin,
- * cmax) tuples keyed by (relfilenode, ctid) which contains the actual
+ * cmax) tuples keyed by (relfilelocator, ctid) which contains the actual
* (cmin, cmax) values. That also takes care of combo CIDs by simply
* not caring about them at all. As we have the real cmin/cmax values
* combo CIDs aren't interesting.
while ((ent = (ReorderBufferTupleCidEnt *) hash_seq_search(&hstat)) != NULL)
{
elog(DEBUG3, "mapping: node: %u/%u/%u tid: %u/%u cmin: %u, cmax: %u",
- ent->key.relnode.dbNode,
- ent->key.relnode.spcNode,
- ent->key.relnode.relNode,
+ ent->key.rlocator.dbOid,
+ ent->key.rlocator.spcOid,
+ ent->key.rlocator.relNumber,
ItemPointerGetBlockNumber(&ent->key.tid),
ItemPointerGetOffsetNumber(&ent->key.tid),
ent->cmin,
path, readBytes,
(int32) sizeof(LogicalRewriteMappingData))));
- key.relnode = map.old_node;
+ key.rlocator = map.old_locator;
ItemPointerCopy(&map.old_tid,
&key.tid);
if (!ent)
continue;
- key.relnode = map.new_node;
+ key.rlocator = map.new_locator;
ItemPointerCopy(&map.new_tid,
&key.tid);
Assert(!BufferIsLocal(buffer));
/*
- * get relfilenode from the buffer, no convenient way to access it other
- * than that.
+ * get relfilelocator from the buffer, no convenient way to access it
+ * other than that.
*/
- BufferGetTag(buffer, &key.relnode, &forkno, &blockno);
+ BufferGetTag(buffer, &key.rlocator, &forkno, &blockno);
/* tuples can only be in the main fork */
Assert(forkno == MAIN_FORKNUM);
ReorderBufferXidSetCatalogChanges(builder->reorder, xid, lsn);
ReorderBufferAddNewTupleCids(builder->reorder, xlrec->top_xid, lsn,
- xlrec->target_node, xlrec->target_tid,
+ xlrec->target_locator, xlrec->target_tid,
xlrec->cmin, xlrec->cmax,
xlrec->combocid);
* Type for array used to sort SMgrRelations
*
* FlushRelationsAllBuffers shares the same comparator function with
- * DropRelFileNodesAllBuffers. Pointer to this struct and RelFileNode must be
+ * DropRelFileLocatorsAllBuffers. Pointer to this struct and RelFileLocator must be
* compatible.
*/
typedef struct SMgrSortArray
{
- RelFileNode rnode; /* This must be the first member */
+ RelFileLocator rlocator; /* This must be the first member */
SMgrRelation srel;
} SMgrSortArray;
BufferAccessStrategy strategy,
bool *foundPtr);
static void FlushBuffer(BufferDesc *buf, SMgrRelation reln);
-static void FindAndDropRelFileNodeBuffers(RelFileNode rnode,
- ForkNumber forkNum,
- BlockNumber nForkBlock,
- BlockNumber firstDelBlock);
+static void FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator,
+ ForkNumber forkNum,
+ BlockNumber nForkBlock,
+ BlockNumber firstDelBlock);
static void RelationCopyStorageUsingBuffer(Relation src, Relation dst,
ForkNumber forkNum,
bool isunlogged);
static void AtProcExit_Buffers(int code, Datum arg);
static void CheckForBufferLeaks(void);
-static int rnode_comparator(const void *p1, const void *p2);
+static int rlocator_comparator(const void *p1, const void *p2);
static inline int buffertag_comparator(const BufferTag *a, const BufferTag *b);
static inline int ckpt_buforder_comparator(const CkptSortItem *a, const CkptSortItem *b);
static int ts_ckpt_progress_comparator(Datum a, Datum b, void *arg);
Assert(BlockNumberIsValid(blockNum));
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(newTag, smgr_reln->smgr_rnode.node,
+ INIT_BUFFERTAG(newTag, smgr_reln->smgr_rlocator.locator,
forkNum, blockNum);
/* determine its hash code and partition lock ID */
* tag. In that case, the buffer is pinned and the usage count is bumped.
*/
bool
-ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum,
+ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum,
Buffer recent_buffer)
{
BufferDesc *bufHdr;
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
ReservePrivateRefCountEntry();
- INIT_BUFFERTAG(tag, rnode, forkNum, blockNum);
+ INIT_BUFFERTAG(tag, rlocator, forkNum, blockNum);
if (BufferIsLocal(recent_buffer))
{
* BackendId).
*/
Buffer
-ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum,
+ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum,
BlockNumber blockNum, ReadBufferMode mode,
BufferAccessStrategy strategy, bool permanent)
{
bool hit;
- SMgrRelation smgr = smgropen(rnode, InvalidBackendId);
+ SMgrRelation smgr = smgropen(rlocator, InvalidBackendId);
return ReadBuffer_common(smgr, permanent ? RELPERSISTENCE_PERMANENT :
RELPERSISTENCE_UNLOGGED, forkNum, blockNum,
isExtend = (blockNum == P_NEW);
TRACE_POSTGRESQL_BUFFER_READ_START(forkNum, blockNum,
- smgr->smgr_rnode.node.spcNode,
- smgr->smgr_rnode.node.dbNode,
- smgr->smgr_rnode.node.relNode,
- smgr->smgr_rnode.backend,
+ smgr->smgr_rlocator.locator.spcOid,
+ smgr->smgr_rlocator.locator.dbOid,
+ smgr->smgr_rlocator.locator.relNumber,
+ smgr->smgr_rlocator.backend,
isExtend);
/* Substitute proper block number if caller asked for P_NEW */
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend relation %s beyond %u blocks",
- relpath(smgr->smgr_rnode, forkNum),
+ relpath(smgr->smgr_rlocator, forkNum),
P_NEW)));
}
VacuumCostBalance += VacuumCostPageHit;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
- smgr->smgr_rnode.node.spcNode,
- smgr->smgr_rnode.node.dbNode,
- smgr->smgr_rnode.node.relNode,
- smgr->smgr_rnode.backend,
+ smgr->smgr_rlocator.locator.spcOid,
+ smgr->smgr_rlocator.locator.dbOid,
+ smgr->smgr_rlocator.locator.relNumber,
+ smgr->smgr_rlocator.backend,
isExtend,
found);
if (!PageIsNew((Page) bufBlock))
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
- blockNum, relpath(smgr->smgr_rnode, forkNum)),
+ blockNum, relpath(smgr->smgr_rlocator, forkNum)),
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s; zeroing out page",
blockNum,
- relpath(smgr->smgr_rnode, forkNum))));
+ relpath(smgr->smgr_rlocator, forkNum))));
MemSet((char *) bufBlock, 0, BLCKSZ);
}
else
(errcode(ERRCODE_DATA_CORRUPTED),
errmsg("invalid page in block %u of relation %s",
blockNum,
- relpath(smgr->smgr_rnode, forkNum))));
+ relpath(smgr->smgr_rlocator, forkNum))));
}
}
}
VacuumCostBalance += VacuumCostPageMiss;
TRACE_POSTGRESQL_BUFFER_READ_DONE(forkNum, blockNum,
- smgr->smgr_rnode.node.spcNode,
- smgr->smgr_rnode.node.dbNode,
- smgr->smgr_rnode.node.relNode,
- smgr->smgr_rnode.backend,
+ smgr->smgr_rlocator.locator.spcOid,
+ smgr->smgr_rlocator.locator.dbOid,
+ smgr->smgr_rlocator.locator.relNumber,
+ smgr->smgr_rlocator.backend,
isExtend,
found);
uint32 buf_state;
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
+ INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* determine its hash code and partition lock ID */
newHash = BufTableHashCode(&newTag);
/* OK, do the I/O */
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_START(forkNum, blockNum,
- smgr->smgr_rnode.node.spcNode,
- smgr->smgr_rnode.node.dbNode,
- smgr->smgr_rnode.node.relNode);
+ smgr->smgr_rlocator.locator.spcOid,
+ smgr->smgr_rlocator.locator.dbOid,
+ smgr->smgr_rlocator.locator.relNumber);
FlushBuffer(buf, NULL);
LWLockRelease(BufferDescriptorGetContentLock(buf));
&buf->tag);
TRACE_POSTGRESQL_BUFFER_WRITE_DIRTY_DONE(forkNum, blockNum,
- smgr->smgr_rnode.node.spcNode,
- smgr->smgr_rnode.node.dbNode,
- smgr->smgr_rnode.node.relNode);
+ smgr->smgr_rlocator.locator.spcOid,
+ smgr->smgr_rlocator.locator.dbOid,
+ smgr->smgr_rlocator.locator.relNumber);
}
else
{
{
bufHdr = GetLocalBufferDescriptor(-buffer - 1);
if (bufHdr->tag.blockNum == blockNum &&
- RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
+ RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
ResourceOwnerForgetBuffer(CurrentResourceOwner, buffer);
bufHdr = GetBufferDescriptor(buffer - 1);
/* we have pin, so it's ok to examine tag without spinlock */
if (bufHdr->tag.blockNum == blockNum &&
- RelFileNodeEquals(bufHdr->tag.rnode, relation->rd_node) &&
+ RelFileLocatorEquals(bufHdr->tag.rlocator, relation->rd_locator) &&
bufHdr->tag.forkNum == forkNum)
return buffer;
UnpinBuffer(bufHdr, true);
item = &CkptBufferIds[num_to_scan++];
item->buf_id = buf_id;
- item->tsId = bufHdr->tag.rnode.spcNode;
- item->relNode = bufHdr->tag.rnode.relNode;
+ item->tsId = bufHdr->tag.rlocator.spcOid;
+ item->relNumber = bufHdr->tag.rlocator.relNumber;
item->forkNum = bufHdr->tag.forkNum;
item->blockNum = bufHdr->tag.blockNum;
}
}
/* theoretically we should lock the bufhdr here */
- path = relpathbackend(buf->tag.rnode, backend, buf->tag.forkNum);
+ path = relpathbackend(buf->tag.rlocator, backend, buf->tag.forkNum);
buf_state = pg_atomic_read_u32(&buf->state);
elog(WARNING,
"buffer refcount leak: [%03d] "
/*
* BufferGetTag
- * Returns the relfilenode, fork number and block number associated with
+ * Returns the relfilelocator, fork number and block number associated with
* a buffer.
*/
void
-BufferGetTag(Buffer buffer, RelFileNode *rnode, ForkNumber *forknum,
+BufferGetTag(Buffer buffer, RelFileLocator *rlocator, ForkNumber *forknum,
BlockNumber *blknum)
{
BufferDesc *bufHdr;
bufHdr = GetBufferDescriptor(buffer - 1);
/* pinned, so OK to read tag without spinlock */
- *rnode = bufHdr->tag.rnode;
+ *rlocator = bufHdr->tag.rlocator;
*forknum = bufHdr->tag.forkNum;
*blknum = bufHdr->tag.blockNum;
}
/* Find smgr relation for buffer */
if (reln == NULL)
- reln = smgropen(buf->tag.rnode, InvalidBackendId);
+ reln = smgropen(buf->tag.rlocator, InvalidBackendId);
TRACE_POSTGRESQL_BUFFER_FLUSH_START(buf->tag.forkNum,
buf->tag.blockNum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode);
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber);
buf_state = LockBufHdr(buf);
TRACE_POSTGRESQL_BUFFER_FLUSH_DONE(buf->tag.forkNum,
buf->tag.blockNum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode);
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber);
/* Pop the error context stack */
error_context_stack = errcallback.previous;
}
/* ---------------------------------------------------------------------
- * DropRelFileNodeBuffers
+ * DropRelFileLocatorBuffers
*
* This function removes from the buffer pool all the pages of the
* specified relation forks that have block numbers >= firstDelBlock.
* --------------------------------------------------------------------
*/
void
-DropRelFileNodeBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
- int nforks, BlockNumber *firstDelBlock)
+DropRelFileLocatorBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
+ int nforks, BlockNumber *firstDelBlock)
{
int i;
int j;
- RelFileNodeBackend rnode;
+ RelFileLocatorBackend rlocator;
BlockNumber nForkBlock[MAX_FORKNUM];
uint64 nBlocksToInvalidate = 0;
- rnode = smgr_reln->smgr_rnode;
+ rlocator = smgr_reln->smgr_rlocator;
/* If it's a local relation, it's localbuf.c's problem. */
- if (RelFileNodeBackendIsTemp(rnode))
+ if (RelFileLocatorBackendIsTemp(rlocator))
{
- if (rnode.backend == MyBackendId)
+ if (rlocator.backend == MyBackendId)
{
for (j = 0; j < nforks; j++)
- DropRelFileNodeLocalBuffers(rnode.node, forkNum[j],
- firstDelBlock[j]);
+ DropRelFileLocatorLocalBuffers(rlocator.locator, forkNum[j],
+ firstDelBlock[j]);
}
return;
}
nBlocksToInvalidate < BUF_DROP_FULL_SCAN_THRESHOLD)
{
for (j = 0; j < nforks; j++)
- FindAndDropRelFileNodeBuffers(rnode.node, forkNum[j],
- nForkBlock[j], firstDelBlock[j]);
+ FindAndDropRelFileLocatorBuffers(rlocator.locator, forkNum[j],
+ nForkBlock[j], firstDelBlock[j]);
return;
}
* false positives are safe because we'll recheck after getting the
* buffer lock.
*
- * We could check forkNum and blockNum as well as the rnode, but the
- * incremental win from doing so seems small.
+ * We could check forkNum and blockNum as well as the rlocator, but
+ * the incremental win from doing so seems small.
*/
- if (!RelFileNodeEquals(bufHdr->tag.rnode, rnode.node))
+ if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator))
continue;
buf_state = LockBufHdr(bufHdr);
for (j = 0; j < nforks; j++)
{
- if (RelFileNodeEquals(bufHdr->tag.rnode, rnode.node) &&
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator.locator) &&
bufHdr->tag.forkNum == forkNum[j] &&
bufHdr->tag.blockNum >= firstDelBlock[j])
{
}
/* ---------------------------------------------------------------------
- * DropRelFileNodesAllBuffers
+ * DropRelFileLocatorsAllBuffers
*
* This function removes from the buffer pool all the pages of all
* forks of the specified relations. It's equivalent to calling
- * DropRelFileNodeBuffers once per fork per relation with
+ * DropRelFileLocatorBuffers once per fork per relation with
* firstDelBlock = 0.
* --------------------------------------------------------------------
*/
void
-DropRelFileNodesAllBuffers(SMgrRelation *smgr_reln, int nnodes)
+DropRelFileLocatorsAllBuffers(SMgrRelation *smgr_reln, int nlocators)
{
int i;
int j;
SMgrRelation *rels;
BlockNumber (*block)[MAX_FORKNUM + 1];
uint64 nBlocksToInvalidate = 0;
- RelFileNode *nodes;
+ RelFileLocator *locators;
bool cached = true;
bool use_bsearch;
- if (nnodes == 0)
+ if (nlocators == 0)
return;
- rels = palloc(sizeof(SMgrRelation) * nnodes); /* non-local relations */
+ rels = palloc(sizeof(SMgrRelation) * nlocators); /* non-local relations */
/* If it's a local relation, it's localbuf.c's problem. */
- for (i = 0; i < nnodes; i++)
+ for (i = 0; i < nlocators; i++)
{
- if (RelFileNodeBackendIsTemp(smgr_reln[i]->smgr_rnode))
+ if (RelFileLocatorBackendIsTemp(smgr_reln[i]->smgr_rlocator))
{
- if (smgr_reln[i]->smgr_rnode.backend == MyBackendId)
- DropRelFileNodeAllLocalBuffers(smgr_reln[i]->smgr_rnode.node);
+ if (smgr_reln[i]->smgr_rlocator.backend == MyBackendId)
+ DropRelFileLocatorAllLocalBuffers(smgr_reln[i]->smgr_rlocator.locator);
}
else
rels[n++] = smgr_reln[i];
/*
* We can avoid scanning the entire buffer pool if we know the exact size
- * of each of the given relation forks. See DropRelFileNodeBuffers.
+ * of each of the given relation forks. See DropRelFileLocatorBuffers.
*/
for (i = 0; i < n && cached; i++)
{
continue;
/* drop all the buffers for a particular relation fork */
- FindAndDropRelFileNodeBuffers(rels[i]->smgr_rnode.node,
- j, block[i][j], 0);
+ FindAndDropRelFileLocatorBuffers(rels[i]->smgr_rlocator.locator,
+ j, block[i][j], 0);
}
}
}
pfree(block);
- nodes = palloc(sizeof(RelFileNode) * n); /* non-local relations */
+ locators = palloc(sizeof(RelFileLocator) * n); /* non-local relations */
for (i = 0; i < n; i++)
- nodes[i] = rels[i]->smgr_rnode.node;
+ locators[i] = rels[i]->smgr_rlocator.locator;
/*
* For low number of relations to drop just use a simple walk through, to
*/
use_bsearch = n > RELS_BSEARCH_THRESHOLD;
- /* sort the list of rnodes if necessary */
+ /* sort the list of rlocators if necessary */
if (use_bsearch)
- pg_qsort(nodes, n, sizeof(RelFileNode), rnode_comparator);
+ pg_qsort(locators, n, sizeof(RelFileLocator), rlocator_comparator);
for (i = 0; i < NBuffers; i++)
{
- RelFileNode *rnode = NULL;
+ RelFileLocator *rlocator = NULL;
BufferDesc *bufHdr = GetBufferDescriptor(i);
uint32 buf_state;
/*
- * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
- * and saves some cycles.
+ * As in DropRelFileLocatorBuffers, an unlocked precheck should be
+ * safe and saves some cycles.
*/
if (!use_bsearch)
for (j = 0; j < n; j++)
{
- if (RelFileNodeEquals(bufHdr->tag.rnode, nodes[j]))
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, locators[j]))
{
- rnode = &nodes[j];
+ rlocator = &locators[j];
break;
}
}
}
else
{
- rnode = bsearch((const void *) &(bufHdr->tag.rnode),
- nodes, n, sizeof(RelFileNode),
- rnode_comparator);
+ rlocator = bsearch((const void *) &(bufHdr->tag.rlocator),
+ locators, n, sizeof(RelFileLocator),
+ rlocator_comparator);
}
- /* buffer doesn't belong to any of the given relfilenodes; skip it */
- if (rnode == NULL)
+ /* buffer doesn't belong to any of the given relfilelocators; skip it */
+ if (rlocator == NULL)
continue;
buf_state = LockBufHdr(bufHdr);
- if (RelFileNodeEquals(bufHdr->tag.rnode, (*rnode)))
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, (*rlocator)))
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
}
- pfree(nodes);
+ pfree(locators);
pfree(rels);
}
/* ---------------------------------------------------------------------
- * FindAndDropRelFileNodeBuffers
+ * FindAndDropRelFileLocatorBuffers
*
* This function performs look up in BufMapping table and removes from the
* buffer pool all the pages of the specified relation fork that has block
* --------------------------------------------------------------------
*/
static void
-FindAndDropRelFileNodeBuffers(RelFileNode rnode, ForkNumber forkNum,
- BlockNumber nForkBlock,
- BlockNumber firstDelBlock)
+FindAndDropRelFileLocatorBuffers(RelFileLocator rlocator, ForkNumber forkNum,
+ BlockNumber nForkBlock,
+ BlockNumber firstDelBlock)
{
BlockNumber curBlock;
uint32 buf_state;
/* create a tag so we can lookup the buffer */
- INIT_BUFFERTAG(bufTag, rnode, forkNum, curBlock);
+ INIT_BUFFERTAG(bufTag, rlocator, forkNum, curBlock);
/* determine its hash code and partition lock ID */
bufHash = BufTableHashCode(&bufTag);
*/
buf_state = LockBufHdr(bufHdr);
- if (RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
InvalidateBuffer(bufHdr); /* releases spinlock */
* bothering to write them out first. This is used when we destroy a
* database, to avoid trying to flush data to disk when the directory
* tree no longer exists. Implementation is pretty similar to
- * DropRelFileNodeBuffers() which is for destroying just one relation.
+ * DropRelFileLocatorBuffers() which is for destroying just one relation.
* --------------------------------------------------------------------
*/
void
uint32 buf_state;
/*
- * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
- * and saves some cycles.
+ * As in DropRelFileLocatorBuffers, an unlocked precheck should be
+ * safe and saves some cycles.
*/
- if (bufHdr->tag.rnode.dbNode != dbid)
+ if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rnode.dbNode == dbid)
+ if (bufHdr->tag.rlocator.dbOid == dbid)
InvalidateBuffer(bufHdr); /* releases spinlock */
else
UnlockBufHdr(bufHdr, buf_state);
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathbackend(buf->tag.rnode, InvalidBackendId, buf->tag.forkNum),
+ relpathbackend(buf->tag.rlocator, InvalidBackendId, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
"[%02d] (freeNext=%d, rel=%s, "
"blockNum=%u, flags=0x%x, refcount=%u %d)",
i, buf->freeNext,
- relpathperm(buf->tag.rnode, buf->tag.forkNum),
+ relpathperm(buf->tag.rlocator, buf->tag.forkNum),
buf->tag.blockNum, buf->flags,
buf->refcount, GetPrivateRefCount(b));
}
uint32 buf_state;
bufHdr = GetLocalBufferDescriptor(i);
- if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
((buf_state = pg_atomic_read_u32(&bufHdr->state)) &
(BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
bufHdr = GetBufferDescriptor(i);
/*
- * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
- * and saves some cycles.
+ * As in DropRelFileLocatorBuffers, an unlocked precheck should be
+ * safe and saves some cycles.
*/
- if (!RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node))
+ if (!RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator))
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileNodeEquals(bufHdr->tag.rnode, rel->rd_node) &&
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, rel->rd_locator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
for (i = 0; i < nrels; i++)
{
- Assert(!RelFileNodeBackendIsTemp(smgrs[i]->smgr_rnode));
+ Assert(!RelFileLocatorBackendIsTemp(smgrs[i]->smgr_rlocator));
- srels[i].rnode = smgrs[i]->smgr_rnode.node;
+ srels[i].rlocator = smgrs[i]->smgr_rlocator.locator;
srels[i].srel = smgrs[i];
}
/*
* Save the bsearch overhead for low number of relations to sync. See
- * DropRelFileNodesAllBuffers for details.
+ * DropRelFileLocatorsAllBuffers for details.
*/
use_bsearch = nrels > RELS_BSEARCH_THRESHOLD;
/* sort the list of SMgrRelations if necessary */
if (use_bsearch)
- pg_qsort(srels, nrels, sizeof(SMgrSortArray), rnode_comparator);
+ pg_qsort(srels, nrels, sizeof(SMgrSortArray), rlocator_comparator);
/* Make sure we can handle the pin inside the loop */
ResourceOwnerEnlargeBuffers(CurrentResourceOwner);
uint32 buf_state;
/*
- * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
- * and saves some cycles.
+ * As in DropRelFileLocatorBuffers, an unlocked precheck should be
+ * safe and saves some cycles.
*/
if (!use_bsearch)
for (j = 0; j < nrels; j++)
{
- if (RelFileNodeEquals(bufHdr->tag.rnode, srels[j].rnode))
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, srels[j].rlocator))
{
srelent = &srels[j];
break;
}
else
{
- srelent = bsearch((const void *) &(bufHdr->tag.rnode),
+ srelent = bsearch((const void *) &(bufHdr->tag.rlocator),
srels, nrels, sizeof(SMgrSortArray),
- rnode_comparator);
+ rlocator_comparator);
}
- /* buffer doesn't belong to any of the given relfilenodes; skip it */
+ /* buffer doesn't belong to any of the given relfilelocators; skip it */
if (srelent == NULL)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (RelFileNodeEquals(bufHdr->tag.rnode, srelent->rnode) &&
+ if (RelFileLocatorEquals(bufHdr->tag.rlocator, srelent->rlocator) &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
CHECK_FOR_INTERRUPTS();
/* Read block from source relation. */
- srcBuf = ReadBufferWithoutRelcache(src->rd_node, forkNum, blkno,
+ srcBuf = ReadBufferWithoutRelcache(src->rd_locator, forkNum, blkno,
RBM_NORMAL, bstrategy_src,
permanent);
srcPage = BufferGetPage(srcBuf);
}
/* Use P_NEW to extend the destination relation. */
- dstBuf = ReadBufferWithoutRelcache(dst->rd_node, forkNum, P_NEW,
+ dstBuf = ReadBufferWithoutRelcache(dst->rd_locator, forkNum, P_NEW,
RBM_NORMAL, bstrategy_dst,
permanent);
LockBuffer(dstBuf, BUFFER_LOCK_EXCLUSIVE);
* --------------------------------------------------------------------
*/
void
-CreateAndCopyRelationData(RelFileNode src_rnode, RelFileNode dst_rnode,
- bool permanent)
+CreateAndCopyRelationData(RelFileLocator src_rlocator,
+ RelFileLocator dst_rlocator, bool permanent)
{
Relation src_rel;
Relation dst_rel;
* used the smgr layer directly, we would have to worry about
* invalidations.
*/
- src_rel = CreateFakeRelcacheEntry(src_rnode);
- dst_rel = CreateFakeRelcacheEntry(dst_rnode);
+ src_rel = CreateFakeRelcacheEntry(src_rlocator);
+ dst_rel = CreateFakeRelcacheEntry(dst_rlocator);
/*
* Create and copy all forks of the relation. During create database we
* directory. Therefore, each individual relation doesn't need to be
* registered for cleanup.
*/
- RelationCreateStorage(dst_rnode, relpersistence, false);
+ RelationCreateStorage(dst_rlocator, relpersistence, false);
/* copy main fork. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, MAIN_FORKNUM, permanent);
* init fork of an unlogged relation.
*/
if (permanent || forkNum == INIT_FORKNUM)
- log_smgrcreate(&dst_rnode, forkNum);
+ log_smgrcreate(&dst_rlocator, forkNum);
/* Copy a fork's data, block by block. */
RelationCopyStorageUsingBuffer(src_rel, dst_rel, forkNum,
bufHdr = GetBufferDescriptor(i);
/*
- * As in DropRelFileNodeBuffers, an unlocked precheck should be safe
- * and saves some cycles.
+ * As in DropRelFileLocatorBuffers, an unlocked precheck should be
+ * safe and saves some cycles.
*/
- if (bufHdr->tag.rnode.dbNode != dbid)
+ if (bufHdr->tag.rlocator.dbOid != dbid)
continue;
ReservePrivateRefCountEntry();
buf_state = LockBufHdr(bufHdr);
- if (bufHdr->tag.rnode.dbNode == dbid &&
+ if (bufHdr->tag.rlocator.dbOid == dbid &&
(buf_state & (BM_VALID | BM_DIRTY)) == (BM_VALID | BM_DIRTY))
{
PinBuffer_Locked(bufHdr);
(pg_atomic_read_u32(&bufHdr->state) & BM_PERMANENT))
{
/*
- * If we must not write WAL, due to a relfilenode-specific
+ * If we must not write WAL, due to a relfilelocator-specific
* condition or being in recovery, don't dirty the page. We can
* set the hint, just not dirty the page as a result so the hint
* is lost when we evict the page or shutdown.
* See src/backend/storage/page/README for longer discussion.
*/
if (RecoveryInProgress() ||
- RelFileNodeSkippingWAL(bufHdr->tag.rnode))
+ RelFileLocatorSkippingWAL(bufHdr->tag.rlocator))
return;
/*
/* Buffer is pinned, so we can read tag without spinlock */
char *path;
- path = relpathperm(buf->tag.rnode, buf->tag.forkNum);
+ path = relpathperm(buf->tag.rlocator, buf->tag.forkNum);
ereport(WARNING,
(errcode(ERRCODE_IO_ERROR),
errmsg("could not write block %u of %s",
/* Buffer is pinned, so we can read the tag without locking the spinlock */
if (bufHdr != NULL)
{
- char *path = relpathperm(bufHdr->tag.rnode, bufHdr->tag.forkNum);
+ char *path = relpathperm(bufHdr->tag.rlocator, bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
bufHdr->tag.blockNum, path);
if (bufHdr != NULL)
{
- char *path = relpathbackend(bufHdr->tag.rnode, MyBackendId,
+ char *path = relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum);
errcontext("writing block %u of relation %s",
}
/*
- * RelFileNode qsort/bsearch comparator; see RelFileNodeEquals.
+ * RelFileLocator qsort/bsearch comparator; see RelFileLocatorEquals.
*/
static int
-rnode_comparator(const void *p1, const void *p2)
+rlocator_comparator(const void *p1, const void *p2)
{
- RelFileNode n1 = *(const RelFileNode *) p1;
- RelFileNode n2 = *(const RelFileNode *) p2;
+ RelFileLocator n1 = *(const RelFileLocator *) p1;
+ RelFileLocator n2 = *(const RelFileLocator *) p2;
- if (n1.relNode < n2.relNode)
+ if (n1.relNumber < n2.relNumber)
return -1;
- else if (n1.relNode > n2.relNode)
+ else if (n1.relNumber > n2.relNumber)
return 1;
- if (n1.dbNode < n2.dbNode)
+ if (n1.dbOid < n2.dbOid)
return -1;
- else if (n1.dbNode > n2.dbNode)
+ else if (n1.dbOid > n2.dbOid)
return 1;
- if (n1.spcNode < n2.spcNode)
+ if (n1.spcOid < n2.spcOid)
return -1;
- else if (n1.spcNode > n2.spcNode)
+ else if (n1.spcOid > n2.spcOid)
return 1;
else
return 0;
{
int ret;
- ret = rnode_comparator(&ba->rnode, &bb->rnode);
+ ret = rlocator_comparator(&ba->rlocator, &bb->rlocator);
if (ret != 0)
return ret;
else if (a->tsId > b->tsId)
return 1;
/* compare relation */
- if (a->relNode < b->relNode)
+ if (a->relNumber < b->relNumber)
return -1;
- else if (a->relNode > b->relNode)
+ else if (a->relNumber > b->relNumber)
return 1;
/* compare fork */
else if (a->forkNum < b->forkNum)
next = &context->pending_writebacks[i + ahead + 1];
/* different file, stop */
- if (!RelFileNodeEquals(cur->tag.rnode, next->tag.rnode) ||
+ if (!RelFileLocatorEquals(cur->tag.rlocator, next->tag.rlocator) ||
cur->tag.forkNum != next->tag.forkNum)
break;
i += ahead;
/* and finally tell the kernel to write the data to storage */
- reln = smgropen(tag.rnode, InvalidBackendId);
+ reln = smgropen(tag.rlocator, InvalidBackendId);
smgrwriteback(reln, tag.forkNum, tag.blockNum, nblocks);
}
BufferTag newTag; /* identity of requested block */
LocalBufferLookupEnt *hresult;
- INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
+ INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
bool found;
uint32 buf_state;
- INIT_BUFFERTAG(newTag, smgr->smgr_rnode.node, forkNum, blockNum);
+ INIT_BUFFERTAG(newTag, smgr->smgr_rlocator.locator, forkNum, blockNum);
/* Initialize local buffers if first request in this session */
if (LocalBufHash == NULL)
Assert(BUFFERTAGS_EQUAL(bufHdr->tag, newTag));
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
- smgr->smgr_rnode.node.relNode, forkNum, blockNum, -b - 1);
+ smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum, -b - 1);
#endif
buf_state = pg_atomic_read_u32(&bufHdr->state);
#ifdef LBDEBUG
fprintf(stderr, "LB ALLOC (%u,%d,%d) %d\n",
- smgr->smgr_rnode.node.relNode, forkNum, blockNum,
+ smgr->smgr_rlocator.locator.relNumber, forkNum, blockNum,
-nextFreeLocalBuf - 1);
#endif
Page localpage = (char *) LocalBufHdrGetBlock(bufHdr);
/* Find smgr relation for buffer */
- oreln = smgropen(bufHdr->tag.rnode, MyBackendId);
+ oreln = smgropen(bufHdr->tag.rlocator, MyBackendId);
PageSetChecksumInplace(localpage, bufHdr->tag.blockNum);
}
/*
- * DropRelFileNodeLocalBuffers
+ * DropRelFileLocatorLocalBuffers
* This function removes from the buffer pool all the pages of the
* specified relation that have block numbers >= firstDelBlock.
* (In particular, with firstDelBlock = 0, all pages are removed.)
* out first. Therefore, this is NOT rollback-able, and so should be
* used only with extreme caution!
*
- * See DropRelFileNodeBuffers in bufmgr.c for more notes.
+ * See DropRelFileLocatorBuffers in bufmgr.c for more notes.
*/
void
-DropRelFileNodeLocalBuffers(RelFileNode rnode, ForkNumber forkNum,
- BlockNumber firstDelBlock)
+DropRelFileLocatorLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
+ BlockNumber firstDelBlock)
{
int i;
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
- RelFileNodeEquals(bufHdr->tag.rnode, rnode) &&
+ RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator) &&
bufHdr->tag.forkNum == forkNum &&
bufHdr->tag.blockNum >= firstDelBlock)
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
- relpathbackend(bufHdr->tag.rnode, MyBackendId,
+ relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
}
/*
- * DropRelFileNodeAllLocalBuffers
+ * DropRelFileLocatorAllLocalBuffers
* This function removes from the buffer pool all pages of all forks
* of the specified relation.
*
- * See DropRelFileNodesAllBuffers in bufmgr.c for more notes.
+ * See DropRelFileLocatorsAllBuffers in bufmgr.c for more notes.
*/
void
-DropRelFileNodeAllLocalBuffers(RelFileNode rnode)
+DropRelFileLocatorAllLocalBuffers(RelFileLocator rlocator)
{
int i;
buf_state = pg_atomic_read_u32(&bufHdr->state);
if ((buf_state & BM_TAG_VALID) &&
- RelFileNodeEquals(bufHdr->tag.rnode, rnode))
+ RelFileLocatorEquals(bufHdr->tag.rlocator, rlocator))
{
if (LocalRefCount[i] != 0)
elog(ERROR, "block %u of %s is still referenced (local %u)",
bufHdr->tag.blockNum,
- relpathbackend(bufHdr->tag.rnode, MyBackendId,
+ relpathbackend(bufHdr->tag.rlocator, MyBackendId,
bufHdr->tag.forkNum),
LocalRefCount[i]);
/* Remove entry from hashtable */
{
/*
* We shouldn't be holding any remaining pins; if we are, and assertions
- * aren't enabled, we'll fail later in DropRelFileNodeBuffers while trying
- * to drop the temp rels.
+ * aren't enabled, we'll fail later in DropRelFileLocatorBuffers while
+ * trying to drop the temp rels.
*/
CheckForLocalBufferLeaks();
}
* WAL replay
*/
void
-XLogRecordPageWithFreeSpace(RelFileNode rnode, BlockNumber heapBlk,
+XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk,
Size spaceAvail)
{
int new_cat = fsm_space_avail_to_cat(spaceAvail);
blkno = fsm_logical_to_physical(addr);
/* If the page doesn't exist already, extend */
- buf = XLogReadBufferExtended(rnode, FSM_FORKNUM, blkno, RBM_ZERO_ON_ERROR,
- InvalidBuffer);
+ buf = XLogReadBufferExtended(rlocator, FSM_FORKNUM, blkno,
+ RBM_ZERO_ON_ERROR, InvalidBuffer);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
page = BufferGetPage(buf);
*
* Fix the corruption and restart.
*/
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blknum;
- BufferGetTag(buf, &rnode, &forknum, &blknum);
+ BufferGetTag(buf, &rlocator, &forknum, &blknum);
elog(DEBUG1, "fixing corrupt FSM block %u, relation %u/%u/%u",
- blknum, rnode.spcNode, rnode.dbNode, rnode.relNode);
+ blknum, rlocator.spcOid, rlocator.dbOid, rlocator.relNumber);
/* make sure we hold an exclusive lock */
if (!exclusive_lock_held)
}
void
-ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode node)
+ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileLocator locator)
{
VirtualTransactionId *backends;
return;
backends = GetConflictingVirtualXIDs(latestRemovedXid,
- node.dbNode);
+ locator.dbOid);
ResolveRecoveryConflictWithVirtualXIDs(backends,
PROCSIG_RECOVERY_CONFLICT_SNAPSHOT,
*/
void
ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId latestRemovedFullXid,
- RelFileNode node)
+ RelFileLocator locator)
{
/*
* ResolveRecoveryConflictWithSnapshot operates on 32-bit TransactionIds,
TransactionId latestRemovedXid;
latestRemovedXid = XidFromFullTransactionId(latestRemovedFullXid);
- ResolveRecoveryConflictWithSnapshot(latestRemovedXid, node);
+ ResolveRecoveryConflictWithSnapshot(latestRemovedXid, locator);
}
}
PREDICATELOCKTARGET *target;
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
blkno);
return;
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id);
PredicateLockAcquire(&tag);
}
return;
SET_PREDICATELOCKTARGETTAG_PAGE(tag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
blkno);
PredicateLockAcquire(&tag);
* level lock.
*/
SET_PREDICATELOCKTARGETTAG_RELATION(tag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id);
if (PredicateLockExists(&tag))
return;
SET_PREDICATELOCKTARGETTAG_TUPLE(tag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
if (!PredicateLockingNeededForRelation(relation))
return;
- dbId = relation->rd_node.dbNode;
+ dbId = relation->rd_locator.dbOid;
relId = relation->rd_id;
if (relation->rd_index == NULL)
{
Assert(BlockNumberIsValid(newblkno));
SET_PREDICATELOCKTARGETTAG_PAGE(oldtargettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
oldblkno);
SET_PREDICATELOCKTARGETTAG_PAGE(newtargettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
newblkno);
if (tid != NULL)
{
SET_PREDICATELOCKTARGETTAG_TUPLE(targettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
ItemPointerGetBlockNumber(tid),
ItemPointerGetOffsetNumber(tid));
if (blkno != InvalidBlockNumber)
{
SET_PREDICATELOCKTARGETTAG_PAGE(targettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id,
blkno);
CheckTargetForConflictsIn(&targettag);
}
SET_PREDICATELOCKTARGETTAG_RELATION(targettag,
- relation->rd_node.dbNode,
+ relation->rd_locator.dbOid,
relation->rd_id);
CheckTargetForConflictsIn(&targettag);
}
Assert(relation->rd_index == NULL); /* not an index relation */
- dbId = relation->rd_node.dbNode;
+ dbId = relation->rd_locator.dbOid;
heapId = relation->rd_id;
LWLockAcquire(SerializablePredicateListLock, LW_EXCLUSIVE);
It is assumed that the main fork, fork number 0 or MAIN_FORKNUM, always
exists. Fork numbers are assigned in src/include/common/relpath.h.
Functions in smgr.c and md.c take an extra fork number argument, in addition
-to relfilenode and block number, to identify which relation fork you want to
+to relfilelocator and block number, to identify which relation fork you want to
access. Since most code wants to access the main fork, a shortcut version of
ReadBuffer that accesses MAIN_FORKNUM is provided in the buffer manager for
convenience.
#include "storage/bufmgr.h"
#include "storage/fd.h"
#include "storage/md.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
#include "storage/smgr.h"
#include "storage/sync.h"
#include "utils/hsearch.h"
/* Populate a file tag describing an md.c segment file. */
-#define INIT_MD_FILETAG(a,xx_rnode,xx_forknum,xx_segno) \
+#define INIT_MD_FILETAG(a,xx_rlocator,xx_forknum,xx_segno) \
( \
memset(&(a), 0, sizeof(FileTag)), \
(a).handler = SYNC_HANDLER_MD, \
- (a).rnode = (xx_rnode), \
+ (a).rlocator = (xx_rlocator), \
(a).forknum = (xx_forknum), \
(a).segno = (xx_segno) \
)
/* local routines */
-static void mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum,
+static void mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum,
bool isRedo);
static MdfdVec *mdopenfork(SMgrRelation reln, ForkNumber forknum, int behavior);
static void register_dirty_segment(SMgrRelation reln, ForkNumber forknum,
MdfdVec *seg);
-static void register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
+static void register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
-static void register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
+static void register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno);
static void _fdvec_resize(SMgrRelation reln,
ForkNumber forknum,
* should be here and not in commands/tablespace.c? But that would imply
* importing a lot of stuff that smgr.c oughtn't know, either.
*/
- TablespaceCreateDbspace(reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
+ TablespaceCreateDbspace(reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
isRedo);
- path = relpath(reln->smgr_rnode, forkNum);
+ path = relpath(reln->smgr_rlocator, forkNum);
fd = PathNameOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
/*
* mdunlink() -- Unlink a relation.
*
- * Note that we're passed a RelFileNodeBackend --- by the time this is called,
+ * Note that we're passed a RelFileLocatorBackend --- by the time this is called,
* there won't be an SMgrRelation hashtable entry anymore.
*
* forkNum can be a fork number to delete a specific fork, or InvalidForkNumber
* For regular relations, we don't unlink the first segment file of the rel,
* but just truncate it to zero length, and record a request to unlink it after
* the next checkpoint. Additional segments can be unlinked immediately,
- * however. Leaving the empty file in place prevents that relfilenode
- * number from being reused. The scenario this protects us from is:
+ * however. Leaving the empty file in place prevents that relfilenumber
+ * from being reused. The scenario this protects us from is:
* 1. We delete a relation (and commit, and actually remove its file).
- * 2. We create a new relation, which by chance gets the same relfilenode as
+ * 2. We create a new relation, which by chance gets the same relfilenumber as
* the just-deleted one (OIDs must've wrapped around for that to happen).
* 3. We crash before another checkpoint occurs.
* During replay, we would delete the file and then recreate it, which is fine
* But if we didn't WAL-log insertions, but instead relied on fsyncing the
* file after populating it (as we do at wal_level=minimal), the contents of
* the file would be lost forever. By leaving the empty file until after the
- * next checkpoint, we prevent reassignment of the relfilenode number until
- * it's safe, because relfilenode assignment skips over any existing file.
+ * next checkpoint, we prevent reassignment of the relfilenumber until it's
+ * safe, because relfilenumber assignment skips over any existing file.
*
* We do not need to go through this dance for temp relations, though, because
* we never make WAL entries for temp rels, and so a temp rel poses no threat
- * to the health of a regular rel that has taken over its relfilenode number.
+ * to the health of a regular rel that has taken over its relfilenumber.
* The fact that temp rels and regular rels have different file naming
* patterns provides additional safety.
*
* All the above applies only to the relation's main fork; other forks can
* just be removed immediately, since they are not needed to prevent the
- * relfilenode number from being recycled. Also, we do not carefully
+ * relfilenumber from being recycled. Also, we do not carefully
* track whether other forks have been created or not, but just attempt to
* unlink them unconditionally; so we should never complain about ENOENT.
*
* we are usually not in a transaction anymore when this is called.
*/
void
-mdunlink(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
+mdunlink(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
/* Now do the per-fork work */
if (forkNum == InvalidForkNumber)
{
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
- mdunlinkfork(rnode, forkNum, isRedo);
+ mdunlinkfork(rlocator, forkNum, isRedo);
}
else
- mdunlinkfork(rnode, forkNum, isRedo);
+ mdunlinkfork(rlocator, forkNum, isRedo);
}
/*
}
static void
-mdunlinkfork(RelFileNodeBackend rnode, ForkNumber forkNum, bool isRedo)
+mdunlinkfork(RelFileLocatorBackend rlocator, ForkNumber forkNum, bool isRedo)
{
char *path;
int ret;
- path = relpath(rnode, forkNum);
+ path = relpath(rlocator, forkNum);
/*
* Delete or truncate the first segment.
*/
- if (isRedo || forkNum != MAIN_FORKNUM || RelFileNodeBackendIsTemp(rnode))
+ if (isRedo || forkNum != MAIN_FORKNUM || RelFileLocatorBackendIsTemp(rlocator))
{
- if (!RelFileNodeBackendIsTemp(rnode))
+ if (!RelFileLocatorBackendIsTemp(rlocator))
{
/* Prevent other backends' fds from holding on to the disk space */
ret = do_truncate(path);
/* Forget any pending sync requests for the first segment */
- register_forget_request(rnode, forkNum, 0 /* first seg */ );
+ register_forget_request(rlocator, forkNum, 0 /* first seg */ );
}
else
ret = 0;
ret = do_truncate(path);
/* Register request to unlink first segment later */
- register_unlink_segment(rnode, forkNum, 0 /* first seg */ );
+ register_unlink_segment(rlocator, forkNum, 0 /* first seg */ );
}
/*
{
sprintf(segpath, "%s.%u", path, segno);
- if (!RelFileNodeBackendIsTemp(rnode))
+ if (!RelFileLocatorBackendIsTemp(rlocator))
{
/*
* Prevent other backends' fds from holding on to the disk
* Forget any pending sync requests for this segment before we
* try to unlink.
*/
- register_forget_request(rnode, forkNum, segno);
+ register_forget_request(rlocator, forkNum, segno);
}
if (unlink(segpath) < 0)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("cannot extend file \"%s\" beyond %u blocks",
- relpath(reln->smgr_rnode, forknum),
+ relpath(reln->smgr_rlocator, forknum),
InvalidBlockNumber)));
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync, EXTENSION_CREATE);
if (reln->md_num_open_segs[forknum] > 0)
return &reln->md_seg_fds[forknum][0];
- path = relpath(reln->smgr_rnode, forknum);
+ path = relpath(reln->smgr_rlocator, forknum);
fd = PathNameOpenFile(path, O_RDWR | PG_BINARY);
MdfdVec *v;
TRACE_POSTGRESQL_SMGR_MD_READ_START(forknum, blocknum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
- reln->smgr_rnode.backend);
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
+ reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, false,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
nbytes = FileRead(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_READ);
TRACE_POSTGRESQL_SMGR_MD_READ_DONE(forknum, blocknum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
- reln->smgr_rnode.backend,
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
+ reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
#endif
TRACE_POSTGRESQL_SMGR_MD_WRITE_START(forknum, blocknum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
- reln->smgr_rnode.backend);
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
+ reln->smgr_rlocator.backend);
v = _mdfd_getseg(reln, forknum, blocknum, skipFsync,
EXTENSION_FAIL | EXTENSION_CREATE_RECOVERY);
nbytes = FileWrite(v->mdfd_vfd, buffer, BLCKSZ, seekpos, WAIT_EVENT_DATA_FILE_WRITE);
TRACE_POSTGRESQL_SMGR_MD_WRITE_DONE(forknum, blocknum,
- reln->smgr_rnode.node.spcNode,
- reln->smgr_rnode.node.dbNode,
- reln->smgr_rnode.node.relNode,
- reln->smgr_rnode.backend,
+ reln->smgr_rlocator.locator.spcOid,
+ reln->smgr_rlocator.locator.dbOid,
+ reln->smgr_rlocator.locator.relNumber,
+ reln->smgr_rlocator.backend,
nbytes,
BLCKSZ);
return;
ereport(ERROR,
(errmsg("could not truncate file \"%s\" to %u blocks: it's only %u blocks now",
- relpath(reln->smgr_rnode, forknum),
+ relpath(reln->smgr_rlocator, forknum),
nblocks, curnblk)));
}
if (nblocks == curnblk)
{
FileTag tag;
- INIT_MD_FILETAG(tag, reln->smgr_rnode.node, forknum, seg->mdfd_segno);
+ INIT_MD_FILETAG(tag, reln->smgr_rlocator.locator, forknum, seg->mdfd_segno);
/* Temp relations should never be fsync'd */
Assert(!SmgrIsTemp(reln));
* register_unlink_segment() -- Schedule a file to be deleted after next checkpoint
*/
static void
-register_unlink_segment(RelFileNodeBackend rnode, ForkNumber forknum,
+register_unlink_segment(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
- INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
+ INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
/* Should never be used with temp relations */
- Assert(!RelFileNodeBackendIsTemp(rnode));
+ Assert(!RelFileLocatorBackendIsTemp(rlocator));
RegisterSyncRequest(&tag, SYNC_UNLINK_REQUEST, true /* retryOnError */ );
}
* register_forget_request() -- forget any fsyncs for a relation fork's segment
*/
static void
-register_forget_request(RelFileNodeBackend rnode, ForkNumber forknum,
+register_forget_request(RelFileLocatorBackend rlocator, ForkNumber forknum,
BlockNumber segno)
{
FileTag tag;
- INIT_MD_FILETAG(tag, rnode.node, forknum, segno);
+ INIT_MD_FILETAG(tag, rlocator.locator, forknum, segno);
RegisterSyncRequest(&tag, SYNC_FORGET_REQUEST, true /* retryOnError */ );
}
ForgetDatabaseSyncRequests(Oid dbid)
{
FileTag tag;
- RelFileNode rnode;
+ RelFileLocator rlocator;
- rnode.dbNode = dbid;
- rnode.spcNode = 0;
- rnode.relNode = 0;
+ rlocator.dbOid = dbid;
+ rlocator.spcOid = 0;
+ rlocator.relNumber = 0;
- INIT_MD_FILETAG(tag, rnode, InvalidForkNumber, InvalidBlockNumber);
+ INIT_MD_FILETAG(tag, rlocator, InvalidForkNumber, InvalidBlockNumber);
RegisterSyncRequest(&tag, SYNC_FILTER_REQUEST, true /* retryOnError */ );
}
* DropRelationFiles -- drop files of all given relations
*/
void
-DropRelationFiles(RelFileNode *delrels, int ndelrels, bool isRedo)
+DropRelationFiles(RelFileLocator *delrels, int ndelrels, bool isRedo)
{
SMgrRelation *srels;
int i;
char *path,
*fullpath;
- path = relpath(reln->smgr_rnode, forknum);
+ path = relpath(reln->smgr_rlocator, forknum);
if (segno > 0)
{
int
mdsyncfiletag(const FileTag *ftag, char *path)
{
- SMgrRelation reln = smgropen(ftag->rnode, InvalidBackendId);
+ SMgrRelation reln = smgropen(ftag->rlocator, InvalidBackendId);
File file;
bool need_to_close;
int result,
char *p;
/* Compute the path. */
- p = relpathperm(ftag->rnode, MAIN_FORKNUM);
+ p = relpathperm(ftag->rlocator, MAIN_FORKNUM);
strlcpy(path, p, MAXPGPATH);
pfree(p);
* We'll return true for all candidates that have the same database OID as
* the ftag from the SYNC_FILTER_REQUEST request, so they're forgotten.
*/
- return ftag->rnode.dbNode == candidate->rnode.dbNode;
+ return ftag->rlocator.dbOid == candidate->rlocator.dbOid;
}
void (*smgr_create) (SMgrRelation reln, ForkNumber forknum,
bool isRedo);
bool (*smgr_exists) (SMgrRelation reln, ForkNumber forknum);
- void (*smgr_unlink) (RelFileNodeBackend rnode, ForkNumber forknum,
+ void (*smgr_unlink) (RelFileLocatorBackend rlocator, ForkNumber forknum,
bool isRedo);
void (*smgr_extend) (SMgrRelation reln, ForkNumber forknum,
BlockNumber blocknum, char *buffer, bool skipFsync);
* This does not attempt to actually open the underlying file.
*/
SMgrRelation
-smgropen(RelFileNode rnode, BackendId backend)
+smgropen(RelFileLocator rlocator, BackendId backend)
{
- RelFileNodeBackend brnode;
+ RelFileLocatorBackend brlocator;
SMgrRelation reln;
bool found;
/* First time through: initialize the hash table */
HASHCTL ctl;
- ctl.keysize = sizeof(RelFileNodeBackend);
+ ctl.keysize = sizeof(RelFileLocatorBackend);
ctl.entrysize = sizeof(SMgrRelationData);
SMgrRelationHash = hash_create("smgr relation table", 400,
&ctl, HASH_ELEM | HASH_BLOBS);
}
/* Look up or create an entry */
- brnode.node = rnode;
- brnode.backend = backend;
+ brlocator.locator = rlocator;
+ brlocator.backend = backend;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
- (void *) &brnode,
+ (void *) &brlocator,
HASH_ENTER, &found);
/* Initialize it if not present before */
dlist_delete(&reln->node);
if (hash_search(SMgrRelationHash,
- (void *) &(reln->smgr_rnode),
+ (void *) &(reln->smgr_rlocator),
HASH_REMOVE, NULL) == NULL)
elog(ERROR, "SMgrRelation hashtable corrupted");
}
/*
- * smgrclosenode() -- Close SMgrRelation object for given RelFileNode,
+ * smgrcloserellocator() -- Close SMgrRelation object for given RelFileLocator,
* if one exists.
*
- * This has the same effects as smgrclose(smgropen(rnode)), but it avoids
+ * This has the same effects as smgrclose(smgropen(rlocator)), but it avoids
* uselessly creating a hashtable entry only to drop it again when no
* such entry exists already.
*/
void
-smgrclosenode(RelFileNodeBackend rnode)
+smgrcloserellocator(RelFileLocatorBackend rlocator)
{
SMgrRelation reln;
return;
reln = (SMgrRelation) hash_search(SMgrRelationHash,
- (void *) &rnode,
+ (void *) &rlocator,
HASH_FIND, NULL);
if (reln != NULL)
smgrclose(reln);
smgrdounlinkall(SMgrRelation *rels, int nrels, bool isRedo)
{
int i = 0;
- RelFileNodeBackend *rnodes;
+ RelFileLocatorBackend *rlocators;
ForkNumber forknum;
if (nrels == 0)
* Get rid of any remaining buffers for the relations. bufmgr will just
* drop them without bothering to write the contents.
*/
- DropRelFileNodesAllBuffers(rels, nrels);
+ DropRelFileLocatorsAllBuffers(rels, nrels);
/*
* create an array which contains all relations to be dropped, and close
* each relation's forks at the smgr level while at it
*/
- rnodes = palloc(sizeof(RelFileNodeBackend) * nrels);
+ rlocators = palloc(sizeof(RelFileLocatorBackend) * nrels);
for (i = 0; i < nrels; i++)
{
- RelFileNodeBackend rnode = rels[i]->smgr_rnode;
+ RelFileLocatorBackend rlocator = rels[i]->smgr_rlocator;
int which = rels[i]->smgr_which;
- rnodes[i] = rnode;
+ rlocators[i] = rlocator;
/* Close the forks at smgr level */
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
* closed our own smgr rel.
*/
for (i = 0; i < nrels; i++)
- CacheInvalidateSmgr(rnodes[i]);
+ CacheInvalidateSmgr(rlocators[i]);
/*
* Delete the physical file(s).
int which = rels[i]->smgr_which;
for (forknum = 0; forknum <= MAX_FORKNUM; forknum++)
- smgrsw[which].smgr_unlink(rnodes[i], forknum, isRedo);
+ smgrsw[which].smgr_unlink(rlocators[i], forknum, isRedo);
}
- pfree(rnodes);
+ pfree(rlocators);
}
* Get rid of any buffers for the about-to-be-deleted blocks. bufmgr will
* just drop them without bothering to write the contents.
*/
- DropRelFileNodeBuffers(reln, forknum, nforks, nblocks);
+ DropRelFileLocatorBuffers(reln, forknum, nforks, nblocks);
/*
* Send a shared-inval message to force other backends to close any smgr
* is a performance-critical path.) As in the unlink code, we want to be
* sure the message is sent before we start changing things on-disk.
*/
- CacheInvalidateSmgr(reln->smgr_rnode);
+ CacheInvalidateSmgr(reln->smgr_rlocator);
/* Do the truncation */
for (i = 0; i < nforks; i++)
#include "utils/builtins.h"
#include "utils/numeric.h"
#include "utils/rel.h"
-#include "utils/relfilenodemap.h"
+#include "utils/relfilenumbermap.h"
#include "utils/relmapper.h"
#include "utils/syscache.h"
* is no check here or at the call sites for that.
*/
static int64
-calculate_relation_size(RelFileNode *rfn, BackendId backend, ForkNumber forknum)
+calculate_relation_size(RelFileLocator *rfn, BackendId backend, ForkNumber forknum)
{
int64 totalsize = 0;
char *relationpath;
if (rel == NULL)
PG_RETURN_NULL();
- size = calculate_relation_size(&(rel->rd_node), rel->rd_backend,
+ size = calculate_relation_size(&(rel->rd_locator), rel->rd_backend,
forkname_to_number(text_to_cstring(forkName)));
relation_close(rel, AccessShareLock);
/* toast heap size, including FSM and VM size */
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
- size += calculate_relation_size(&(toastRel->rd_node),
+ size += calculate_relation_size(&(toastRel->rd_locator),
toastRel->rd_backend, forkNum);
/* toast index size, including FSM and VM size */
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
- size += calculate_relation_size(&(toastIdxRel->rd_node),
+ size += calculate_relation_size(&(toastIdxRel->rd_locator),
toastIdxRel->rd_backend, forkNum);
relation_close(toastIdxRel, AccessShareLock);
* heap size, including FSM and VM
*/
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
- size += calculate_relation_size(&(rel->rd_node), rel->rd_backend,
+ size += calculate_relation_size(&(rel->rd_locator), rel->rd_backend,
forkNum);
/*
idxRel = relation_open(idxOid, AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
- size += calculate_relation_size(&(idxRel->rd_node),
+ size += calculate_relation_size(&(idxRel->rd_locator),
idxRel->rd_backend,
forkNum);
pg_relation_filenode(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- Oid result;
+ RelFileNumber result;
HeapTuple tuple;
Form_pg_class relform;
if (relform->relfilenode)
result = relform->relfilenode;
else /* Consult the relation mapper */
- result = RelationMapOidToFilenode(relid,
- relform->relisshared);
+ result = RelationMapOidToFilenumber(relid,
+ relform->relisshared);
}
else
{
/* no storage, return NULL */
- result = InvalidOid;
+ result = InvalidRelFileNumber;
}
ReleaseSysCache(tuple);
- if (!OidIsValid(result))
+ if (!RelFileNumberIsValid(result))
PG_RETURN_NULL();
PG_RETURN_OID(result);
}
/*
- * Get the relation via (reltablespace, relfilenode)
+ * Get the relation via (reltablespace, relfilenumber)
*
* This is expected to be used when somebody wants to match an individual file
* on the filesystem back to its table. That's not trivially possible via
- * pg_class, because that doesn't contain the relfilenodes of shared and nailed
+ * pg_class, because that doesn't contain the relfilenumbers of shared and nailed
* tables.
*
* We don't fail but return NULL if we cannot find a mapping.
pg_filenode_relation(PG_FUNCTION_ARGS)
{
Oid reltablespace = PG_GETARG_OID(0);
- Oid relfilenode = PG_GETARG_OID(1);
+ RelFileNumber relfilenumber = PG_GETARG_OID(1);
Oid heaprel;
- /* test needed so RelidByRelfilenode doesn't misbehave */
- if (!OidIsValid(relfilenode))
+ /* test needed so RelidByRelfilenumber doesn't misbehave */
+ if (!RelFileNumberIsValid(relfilenumber))
PG_RETURN_NULL();
- heaprel = RelidByRelfilenode(reltablespace, relfilenode);
+ heaprel = RelidByRelfilenumber(reltablespace, relfilenumber);
if (!OidIsValid(heaprel))
PG_RETURN_NULL();
Oid relid = PG_GETARG_OID(0);
HeapTuple tuple;
Form_pg_class relform;
- RelFileNode rnode;
+ RelFileLocator rlocator;
BackendId backend;
char *path;
{
/* This logic should match RelationInitPhysicalAddr */
if (relform->reltablespace)
- rnode.spcNode = relform->reltablespace;
+ rlocator.spcOid = relform->reltablespace;
else
- rnode.spcNode = MyDatabaseTableSpace;
- if (rnode.spcNode == GLOBALTABLESPACE_OID)
- rnode.dbNode = InvalidOid;
+ rlocator.spcOid = MyDatabaseTableSpace;
+ if (rlocator.spcOid == GLOBALTABLESPACE_OID)
+ rlocator.dbOid = InvalidOid;
else
- rnode.dbNode = MyDatabaseId;
+ rlocator.dbOid = MyDatabaseId;
if (relform->relfilenode)
- rnode.relNode = relform->relfilenode;
+ rlocator.relNumber = relform->relfilenode;
else /* Consult the relation mapper */
- rnode.relNode = RelationMapOidToFilenode(relid,
- relform->relisshared);
+ rlocator.relNumber = RelationMapOidToFilenumber(relid,
+ relform->relisshared);
}
else
{
/* no storage, return NULL */
- rnode.relNode = InvalidOid;
+ rlocator.relNumber = InvalidRelFileNumber;
/* some compilers generate warnings without these next two lines */
- rnode.dbNode = InvalidOid;
- rnode.spcNode = InvalidOid;
+ rlocator.dbOid = InvalidOid;
+ rlocator.spcOid = InvalidOid;
}
- if (!OidIsValid(rnode.relNode))
+ if (!RelFileNumberIsValid(rlocator.relNumber))
{
ReleaseSysCache(tuple);
PG_RETURN_NULL();
ReleaseSysCache(tuple);
- path = relpathbackend(rnode, backend, MAIN_FORKNUM);
+ path = relpathbackend(rlocator, backend, MAIN_FORKNUM);
PG_RETURN_TEXT_P(cstring_to_text(path));
}
* pg_upgrade_support.c
*
* server-side functions to set backend global variables
- * to control oid and relfilenode assignment, and do other special
+ * to control oid and relfilenumber assignment, and do other special
* hacks needed for pg_upgrade.
*
* Copyright (c) 2010-2022, PostgreSQL Global Development Group
Datum
binary_upgrade_set_next_heap_relfilenode(PG_FUNCTION_ARGS)
{
- Oid nodeoid = PG_GETARG_OID(0);
+ RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
- binary_upgrade_next_heap_pg_class_relfilenode = nodeoid;
+ binary_upgrade_next_heap_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}
Datum
binary_upgrade_set_next_index_relfilenode(PG_FUNCTION_ARGS)
{
- Oid nodeoid = PG_GETARG_OID(0);
+ RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
- binary_upgrade_next_index_pg_class_relfilenode = nodeoid;
+ binary_upgrade_next_index_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}
Datum
binary_upgrade_set_next_toast_relfilenode(PG_FUNCTION_ARGS)
{
- Oid nodeoid = PG_GETARG_OID(0);
+ RelFileNumber relfilenumber = PG_GETARG_OID(0);
CHECK_IS_BINARY_UPGRADE;
- binary_upgrade_next_toast_pg_class_relfilenode = nodeoid;
+ binary_upgrade_next_toast_pg_class_relfilenumber = relfilenumber;
PG_RETURN_VOID();
}
partcache.o \
plancache.o \
relcache.o \
- relfilenodemap.o \
+ relfilenumbermap.o \
relmapper.o \
spccache.o \
syscache.o \
* We could have smgr entries for relations of other databases, so no
* short-circuit test is possible here.
*/
- RelFileNodeBackend rnode;
+ RelFileLocatorBackend rlocator;
- rnode.node = msg->sm.rnode;
- rnode.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
- smgrclosenode(rnode);
+ rlocator.locator = msg->sm.rlocator;
+ rlocator.backend = (msg->sm.backend_hi << 16) | (int) msg->sm.backend_lo;
+ smgrcloserellocator(rlocator);
}
else if (msg->id == SHAREDINVALRELMAP_ID)
{
* Thus, the maximum possible backend ID is 2^23-1.
*/
void
-CacheInvalidateSmgr(RelFileNodeBackend rnode)
+CacheInvalidateSmgr(RelFileLocatorBackend rlocator)
{
SharedInvalidationMessage msg;
msg.sm.id = SHAREDINVALSMGR_ID;
- msg.sm.backend_hi = rnode.backend >> 16;
- msg.sm.backend_lo = rnode.backend & 0xffff;
- msg.sm.rnode = rnode.node;
+ msg.sm.backend_hi = rlocator.backend >> 16;
+ msg.sm.backend_lo = rlocator.backend & 0xffff;
+ msg.sm.rlocator = rlocator.locator;
/* check AddCatcacheInvalidationMessage() for an explanation */
VALGRIND_MAKE_MEM_DEFINED(&msg, sizeof(msg));
/*
* The caller might need a tuple that's newer than the one the historic
* snapshot; currently the only case requiring to do so is looking up the
- * relfilenode of non mapped system relations during decoding. That
+ * relfilenumber of non mapped system relations during decoding. That
* snapshot can't change in the midst of a relcache build, so there's no
* need to register the snapshot.
*/
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
relation->rd_createSubid = InvalidSubTransactionId;
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
switch (relation->rd_rel->relpersistence)
{
}
/*
- * Initialize the physical addressing info (RelFileNode) for a relcache entry
+ * Initialize the physical addressing info (RelFileLocator) for a relcache entry
*
* Note: at the physical level, relations in the pg_global tablespace must
* be treated as shared, even if relisshared isn't set. Hence we do not
static void
RelationInitPhysicalAddr(Relation relation)
{
- Oid oldnode = relation->rd_node.relNode;
+ RelFileNumber oldnumber = relation->rd_locator.relNumber;
/* these relations kinds never have storage */
if (!RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
return;
if (relation->rd_rel->reltablespace)
- relation->rd_node.spcNode = relation->rd_rel->reltablespace;
+ relation->rd_locator.spcOid = relation->rd_rel->reltablespace;
else
- relation->rd_node.spcNode = MyDatabaseTableSpace;
- if (relation->rd_node.spcNode == GLOBALTABLESPACE_OID)
- relation->rd_node.dbNode = InvalidOid;
+ relation->rd_locator.spcOid = MyDatabaseTableSpace;
+ if (relation->rd_locator.spcOid == GLOBALTABLESPACE_OID)
+ relation->rd_locator.dbOid = InvalidOid;
else
- relation->rd_node.dbNode = MyDatabaseId;
+ relation->rd_locator.dbOid = MyDatabaseId;
if (relation->rd_rel->relfilenode)
{
heap_freetuple(phys_tuple);
}
- relation->rd_node.relNode = relation->rd_rel->relfilenode;
+ relation->rd_locator.relNumber = relation->rd_rel->relfilenode;
}
else
{
/* Consult the relation mapper */
- relation->rd_node.relNode =
- RelationMapOidToFilenode(relation->rd_id,
- relation->rd_rel->relisshared);
- if (!OidIsValid(relation->rd_node.relNode))
+ relation->rd_locator.relNumber =
+ RelationMapOidToFilenumber(relation->rd_id,
+ relation->rd_rel->relisshared);
+ if (!RelFileNumberIsValid(relation->rd_locator.relNumber))
elog(ERROR, "could not find relation mapping for relation \"%s\", OID %u",
RelationGetRelationName(relation), relation->rd_id);
}
/*
* For RelationNeedsWAL() to answer correctly on parallel workers, restore
- * rd_firstRelfilenodeSubid. No subtransactions start or end while in
+ * rd_firstRelfilelocatorSubid. No subtransactions start or end while in
* parallel mode, so the specific SubTransactionId does not matter.
*/
- if (IsParallelWorker() && oldnode != relation->rd_node.relNode)
+ if (IsParallelWorker() && oldnumber != relation->rd_locator.relNumber)
{
- if (RelFileNodeSkippingWAL(relation->rd_node))
- relation->rd_firstRelfilenodeSubid = TopSubTransactionId;
+ if (RelFileLocatorSkippingWAL(relation->rd_locator))
+ relation->rd_firstRelfilelocatorSubid = TopSubTransactionId;
else
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
}
}
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
relation->rd_backend = InvalidBackendId;
relation->rd_islocaltemp = false;
/*
* All relations made with formrdesc are mapped. This is necessarily so
- * because there is no other way to know what filenode they currently
+ * because there is no other way to know what filenumber they currently
* have. In bootstrap mode, add them to the initial relation mapper data,
- * specifying that the initial filenode is the same as the OID.
+ * specifying that the initial filenumber is the same as the OID.
*/
- relation->rd_rel->relfilenode = InvalidOid;
+ relation->rd_rel->relfilenode = InvalidRelFileNumber;
if (IsBootstrapProcessingMode())
RelationMapUpdateMap(RelationGetRelid(relation),
RelationGetRelid(relation),
#ifdef RELCACHE_FORCE_RELEASE
if (RelationHasReferenceCountZero(relation) &&
relation->rd_createSubid == InvalidSubTransactionId &&
- relation->rd_firstRelfilenodeSubid == InvalidSubTransactionId)
+ relation->rd_firstRelfilelocatorSubid == InvalidSubTransactionId)
RelationClearRelation(relation, false);
#endif
}
{
/*
* If it's a nailed-but-not-mapped index, then we need to re-read the
- * pg_class row to see if its relfilenode changed.
+ * pg_class row to see if its relfilenumber changed.
*/
RelationReloadIndexInfo(relation);
}
Assert(newrel->rd_isnailed == relation->rd_isnailed);
/* creation sub-XIDs must be preserved */
SWAPFIELD(SubTransactionId, rd_createSubid);
- SWAPFIELD(SubTransactionId, rd_newRelfilenodeSubid);
- SWAPFIELD(SubTransactionId, rd_firstRelfilenodeSubid);
+ SWAPFIELD(SubTransactionId, rd_newRelfilelocatorSubid);
+ SWAPFIELD(SubTransactionId, rd_firstRelfilelocatorSubid);
SWAPFIELD(SubTransactionId, rd_droppedSubid);
/* un-swap rd_rel pointers, swap contents instead */
SWAPFIELD(Form_pg_class, rd_rel);
RelationFlushRelation(Relation relation)
{
if (relation->rd_createSubid != InvalidSubTransactionId ||
- relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
{
/*
* New relcache entries are always rebuilt, not flushed; else we'd
* forget the "new" status of the relation. Ditto for the
- * new-relfilenode status.
+ * new-relfilenumber status.
*
* The rel could have zero refcnt here, so temporarily increment the
* refcnt to ensure it's safe to rebuild it. We can assume that the
Assert(relation->rd_droppedSubid == InvalidSubTransactionId);
if (relation->rd_createSubid != InvalidSubTransactionId ||
- relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
{
/*
* In the event of subtransaction rollback, we must not forget
*
* Apart from debug_discard_caches, this is currently used only to recover
* from SI message buffer overflow, so we do not touch relations having
- * new-in-transaction relfilenodes; they cannot be targets of cross-backend
+ * new-in-transaction relfilenumbers; they cannot be targets of cross-backend
* SI updates (and our own updates now go through a separate linked list
* that isn't limited by the SI message buffer size).
*
* so hash_seq_search will complete safely; (b) during the second pass we
* only hold onto pointers to nondeletable entries.
*
- * The two-phase approach also makes it easy to update relfilenodes for
+ * The two-phase approach also makes it easy to update relfilenumbers for
* mapped relations before we do anything else, and to ensure that the
* second pass processes nailed-in-cache items before other nondeletable
* items. This should ensure that system catalogs are up to date before
/*
* Ignore new relations; no other backend will manipulate them before
- * we commit. Likewise, before replacing a relation's relfilenode, we
- * shall have acquired AccessExclusiveLock and drained any applicable
- * pending invalidations.
+ * we commit. Likewise, before replacing a relation's relfilelocator,
+ * we shall have acquired AccessExclusiveLock and drained any
+ * applicable pending invalidations.
*/
if (relation->rd_createSubid != InvalidSubTransactionId ||
- relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId)
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId)
continue;
relcacheInvalsReceived++;
else
{
/*
- * If it's a mapped relation, immediately update its rd_node in
- * case its relfilenode changed. We must do this during phase 1
+ * If it's a mapped relation, immediately update its rd_locator in
+ * case its relfilenumber changed. We must do this during phase 1
* in case the relation is consulted during rebuild of other
* relcache entries in phase 2. It's safe since consulting the
* map doesn't involve any access to relcache entries.
RelationIsPermanent(relation) &&
((relation->rd_createSubid != InvalidSubTransactionId &&
RELKIND_HAS_STORAGE(relation->rd_rel->relkind)) ||
- relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId);
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId);
- Assert(relcache_verdict == RelFileNodeSkippingWAL(relation->rd_node));
+ Assert(relcache_verdict == RelFileLocatorSkippingWAL(relation->rd_locator));
if (relation->rd_droppedSubid != InvalidSubTransactionId)
Assert(!relation->rd_isvalid &&
(relation->rd_createSubid != InvalidSubTransactionId ||
- relation->rd_firstRelfilenodeSubid != InvalidSubTransactionId));
+ relation->rd_firstRelfilelocatorSubid != InvalidSubTransactionId));
}
/*
* also lets RelationClearRelation() drop the relcache entry.
*/
relation->rd_createSubid = InvalidSubTransactionId;
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
if (clear_relcache)
{
/* allow the entry to be removed */
relation->rd_createSubid = InvalidSubTransactionId;
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
relation->rd_droppedSubid = InvalidSubTransactionId;
RelationClearRelation(relation, false);
return;
}
/*
- * Likewise, update or drop any new-relfilenode-in-subtransaction record
+ * Likewise, update or drop any new-relfilenumber-in-subtransaction record
* or drop record.
*/
- if (relation->rd_newRelfilenodeSubid == mySubid)
+ if (relation->rd_newRelfilelocatorSubid == mySubid)
{
if (isCommit)
- relation->rd_newRelfilenodeSubid = parentSubid;
+ relation->rd_newRelfilelocatorSubid = parentSubid;
else
- relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
}
- if (relation->rd_firstRelfilenodeSubid == mySubid)
+ if (relation->rd_firstRelfilelocatorSubid == mySubid)
{
if (isCommit)
- relation->rd_firstRelfilenodeSubid = parentSubid;
+ relation->rd_firstRelfilelocatorSubid = parentSubid;
else
- relation->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ relation->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
}
if (relation->rd_droppedSubid == mySubid)
TupleDesc tupDesc,
Oid relid,
Oid accessmtd,
- Oid relfilenode,
+ RelFileNumber relfilenumber,
Oid reltablespace,
bool shared_relation,
bool mapped_relation,
/* it's being created in this transaction */
rel->rd_createSubid = GetCurrentSubTransactionId();
- rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- rel->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ rel->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ rel->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_droppedSubid = InvalidSubTransactionId;
/*
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
- * RelationInitPhysicalAddr to consult the map.
+ * places. For a mapped relation, we set relfilenumber to zero and rely
+ * on RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
if (mapped_relation)
{
- rel->rd_rel->relfilenode = InvalidOid;
+ rel->rd_rel->relfilenode = InvalidRelFileNumber;
/* Add it to the active mapping information */
- RelationMapUpdateMap(relid, relfilenode, shared_relation, true);
+ RelationMapUpdateMap(relid, relfilenumber, shared_relation, true);
}
else
- rel->rd_rel->relfilenode = relfilenode;
+ rel->rd_rel->relfilenode = relfilenumber;
RelationInitLockInfo(rel); /* see lmgr.c */
/*
- * RelationSetNewRelfilenode
+ * RelationSetNewRelfilenumber
*
- * Assign a new relfilenode (physical file name), and possibly a new
+ * Assign a new relfilenumber (physical file name), and possibly a new
* persistence setting, to the relation.
*
* This allows a full rewrite of the relation to be done with transactional
- * safety (since the filenode assignment can be rolled back). Note however
+ * safety (since the filenumber assignment can be rolled back). Note however
* that there is no simple way to access the relation's old data for the
* remainder of the current transaction. This limits the usefulness to cases
* such as TRUNCATE or rebuilding an index from scratch.
* Caller must already hold exclusive lock on the relation.
*/
void
-RelationSetNewRelfilenode(Relation relation, char persistence)
+RelationSetNewRelfilenumber(Relation relation, char persistence)
{
- Oid newrelfilenode;
+ RelFileNumber newrelfilenumber;
Relation pg_class;
HeapTuple tuple;
Form_pg_class classform;
MultiXactId minmulti = InvalidMultiXactId;
TransactionId freezeXid = InvalidTransactionId;
- RelFileNode newrnode;
+ RelFileLocator newrlocator;
- /* Allocate a new relfilenode */
- newrelfilenode = GetNewRelFileNode(relation->rd_rel->reltablespace, NULL,
- persistence);
+ /* Allocate a new relfilenumber */
+ newrelfilenumber = GetNewRelFileNumber(relation->rd_rel->reltablespace,
+ NULL, persistence);
/*
* Get a writable copy of the pg_class tuple for the given relation.
RelationDropStorage(relation);
/*
- * Create storage for the main fork of the new relfilenode. If it's a
+ * Create storage for the main fork of the new relfilenumber. If it's a
* table-like object, call into the table AM to do so, which'll also
* create the table's init fork if needed.
*
- * NOTE: If relevant for the AM, any conflict in relfilenode value will be
- * caught here, if GetNewRelFileNode messes up for any reason.
+ * NOTE: If relevant for the AM, any conflict in relfilenumber value will
+ * be caught here, if GetNewRelFileNumber messes up for any reason.
*/
- newrnode = relation->rd_node;
- newrnode.relNode = newrelfilenode;
+ newrlocator = relation->rd_locator;
+ newrlocator.relNumber = newrelfilenumber;
if (RELKIND_HAS_TABLE_AM(relation->rd_rel->relkind))
{
- table_relation_set_new_filenode(relation, &newrnode,
- persistence,
- &freezeXid, &minmulti);
+ table_relation_set_new_filelocator(relation, &newrlocator,
+ persistence,
+ &freezeXid, &minmulti);
}
else if (RELKIND_HAS_STORAGE(relation->rd_rel->relkind))
{
/* handle these directly, at least for now */
SMgrRelation srel;
- srel = RelationCreateStorage(newrnode, persistence, true);
+ srel = RelationCreateStorage(newrlocator, persistence, true);
smgrclose(srel);
}
else
/* Do the deed */
RelationMapUpdateMap(RelationGetRelid(relation),
- newrelfilenode,
+ newrelfilenumber,
relation->rd_rel->relisshared,
false);
else
{
/* Normal case, update the pg_class entry */
- classform->relfilenode = newrelfilenode;
+ classform->relfilenode = newrelfilenumber;
/* relpages etc. never change for sequences */
if (relation->rd_rel->relkind != RELKIND_SEQUENCE)
*/
CommandCounterIncrement();
- RelationAssumeNewRelfilenode(relation);
+ RelationAssumeNewRelfilelocator(relation);
}
/*
- * RelationAssumeNewRelfilenode
+ * RelationAssumeNewRelfilelocator
*
* Code that modifies pg_class.reltablespace or pg_class.relfilenode must call
* this. The call shall precede any code that might insert WAL records whose
- * replay would modify bytes in the new RelFileNode, and the call shall follow
- * any WAL modifying bytes in the prior RelFileNode. See struct RelationData.
+ * replay would modify bytes in the new RelFileLocator, and the call shall follow
+ * any WAL modifying bytes in the prior RelFileLocator. See struct RelationData.
* Ideally, call this as near as possible to the CommandCounterIncrement()
* that makes the pg_class change visible (before it or after it); that
* minimizes the chance of future development adding a forbidden WAL insertion
- * between RelationAssumeNewRelfilenode() and CommandCounterIncrement().
+ * between RelationAssumeNewRelfilelocator() and CommandCounterIncrement().
*/
void
-RelationAssumeNewRelfilenode(Relation relation)
+RelationAssumeNewRelfilelocator(Relation relation)
{
- relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId();
- if (relation->rd_firstRelfilenodeSubid == InvalidSubTransactionId)
- relation->rd_firstRelfilenodeSubid = relation->rd_newRelfilenodeSubid;
+ relation->rd_newRelfilelocatorSubid = GetCurrentSubTransactionId();
+ if (relation->rd_firstRelfilelocatorSubid == InvalidSubTransactionId)
+ relation->rd_firstRelfilelocatorSubid = relation->rd_newRelfilelocatorSubid;
/* Flag relation as needing eoxact cleanup (to clear these fields) */
EOXactListAdd(relation);
rel->rd_fkeyvalid = false;
rel->rd_fkeylist = NIL;
rel->rd_createSubid = InvalidSubTransactionId;
- rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
- rel->rd_firstRelfilenodeSubid = InvalidSubTransactionId;
+ rel->rd_newRelfilelocatorSubid = InvalidSubTransactionId;
+ rel->rd_firstRelfilelocatorSubid = InvalidSubTransactionId;
rel->rd_droppedSubid = InvalidSubTransactionId;
rel->rd_amcache = NULL;
rel->pgstat_info = NULL;
/*-------------------------------------------------------------------------
*
- * relfilenodemap.c
- * relfilenode to oid mapping cache.
+ * relfilenumbermap.c
+ * relfilenumber to oid mapping cache.
*
* Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * src/backend/utils/cache/relfilenodemap.c
+ * src/backend/utils/cache/relfilenumbermap.c
*
*-------------------------------------------------------------------------
*/
#include "utils/hsearch.h"
#include "utils/inval.h"
#include "utils/rel.h"
-#include "utils/relfilenodemap.h"
+#include "utils/relfilenumbermap.h"
#include "utils/relmapper.h"
-/* Hash table for information about each relfilenode <-> oid pair */
-static HTAB *RelfilenodeMapHash = NULL;
+/* Hash table for information about each relfilenumber <-> oid pair */
+static HTAB *RelfilenumberMapHash = NULL;
-/* built first time through in InitializeRelfilenodeMap */
-static ScanKeyData relfilenode_skey[2];
+/* built first time through in InitializeRelfilenumberMap */
+static ScanKeyData relfilenumber_skey[2];
typedef struct
{
Oid reltablespace;
- Oid relfilenode;
-} RelfilenodeMapKey;
+ RelFileNumber relfilenumber;
+} RelfilenumberMapKey;
typedef struct
{
- RelfilenodeMapKey key; /* lookup key - must be first */
+ RelfilenumberMapKey key; /* lookup key - must be first */
Oid relid; /* pg_class.oid */
-} RelfilenodeMapEntry;
+} RelfilenumberMapEntry;
/*
- * RelfilenodeMapInvalidateCallback
+ * RelfilenumberMapInvalidateCallback
* Flush mapping entries when pg_class is updated in a relevant fashion.
*/
static void
-RelfilenodeMapInvalidateCallback(Datum arg, Oid relid)
+RelfilenumberMapInvalidateCallback(Datum arg, Oid relid)
{
HASH_SEQ_STATUS status;
- RelfilenodeMapEntry *entry;
+ RelfilenumberMapEntry *entry;
/* callback only gets registered after creating the hash */
- Assert(RelfilenodeMapHash != NULL);
+ Assert(RelfilenumberMapHash != NULL);
- hash_seq_init(&status, RelfilenodeMapHash);
- while ((entry = (RelfilenodeMapEntry *) hash_seq_search(&status)) != NULL)
+ hash_seq_init(&status, RelfilenumberMapHash);
+ while ((entry = (RelfilenumberMapEntry *) hash_seq_search(&status)) != NULL)
{
/*
* If relid is InvalidOid, signaling a complete reset, we must remove
entry->relid == InvalidOid || /* negative cache entry */
entry->relid == relid) /* individual flushed relation */
{
- if (hash_search(RelfilenodeMapHash,
+ if (hash_search(RelfilenumberMapHash,
(void *) &entry->key,
HASH_REMOVE,
NULL) == NULL)
}
/*
- * InitializeRelfilenodeMap
+ * InitializeRelfilenumberMap
* Initialize cache, either on first use or after a reset.
*/
static void
-InitializeRelfilenodeMap(void)
+InitializeRelfilenumberMap(void)
{
HASHCTL ctl;
int i;
CreateCacheMemoryContext();
/* build skey */
- MemSet(&relfilenode_skey, 0, sizeof(relfilenode_skey));
+ MemSet(&relfilenumber_skey, 0, sizeof(relfilenumber_skey));
for (i = 0; i < 2; i++)
{
fmgr_info_cxt(F_OIDEQ,
- &relfilenode_skey[i].sk_func,
+ &relfilenumber_skey[i].sk_func,
CacheMemoryContext);
- relfilenode_skey[i].sk_strategy = BTEqualStrategyNumber;
- relfilenode_skey[i].sk_subtype = InvalidOid;
- relfilenode_skey[i].sk_collation = InvalidOid;
+ relfilenumber_skey[i].sk_strategy = BTEqualStrategyNumber;
+ relfilenumber_skey[i].sk_subtype = InvalidOid;
+ relfilenumber_skey[i].sk_collation = InvalidOid;
}
- relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace;
- relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode;
+ relfilenumber_skey[0].sk_attno = Anum_pg_class_reltablespace;
+ relfilenumber_skey[1].sk_attno = Anum_pg_class_relfilenode;
/*
- * Only create the RelfilenodeMapHash now, so we don't end up partially
+ * Only create the RelfilenumberMapHash now, so we don't end up partially
* initialized when fmgr_info_cxt() above ERRORs out with an out of memory
* error.
*/
- ctl.keysize = sizeof(RelfilenodeMapKey);
- ctl.entrysize = sizeof(RelfilenodeMapEntry);
+ ctl.keysize = sizeof(RelfilenumberMapKey);
+ ctl.entrysize = sizeof(RelfilenumberMapEntry);
ctl.hcxt = CacheMemoryContext;
- RelfilenodeMapHash =
- hash_create("RelfilenodeMap cache", 64, &ctl,
+ RelfilenumberMapHash =
+ hash_create("RelfilenumberMap cache", 64, &ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/* Watch for invalidation events. */
- CacheRegisterRelcacheCallback(RelfilenodeMapInvalidateCallback,
+ CacheRegisterRelcacheCallback(RelfilenumberMapInvalidateCallback,
(Datum) 0);
}
/*
- * Map a relation's (tablespace, filenode) to a relation's oid and cache the
- * result.
+ * Map a relation's (tablespace, relfilenumber) to a relation's oid and cache
+ * the result.
*
* Returns InvalidOid if no relation matching the criteria could be found.
*/
Oid
-RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
+RelidByRelfilenumber(Oid reltablespace, RelFileNumber relfilenumber)
{
- RelfilenodeMapKey key;
- RelfilenodeMapEntry *entry;
+ RelfilenumberMapKey key;
+ RelfilenumberMapEntry *entry;
bool found;
SysScanDesc scandesc;
Relation relation;
ScanKeyData skey[2];
Oid relid;
- if (RelfilenodeMapHash == NULL)
- InitializeRelfilenodeMap();
+ if (RelfilenumberMapHash == NULL)
+ InitializeRelfilenumberMap();
/* pg_class will show 0 when the value is actually MyDatabaseTableSpace */
if (reltablespace == MyDatabaseTableSpace)
MemSet(&key, 0, sizeof(key));
key.reltablespace = reltablespace;
- key.relfilenode = relfilenode;
+ key.relfilenumber = relfilenumber;
/*
* Check cache and return entry if one is found. Even if no target
* since querying invalid values isn't supposed to be a frequent thing,
* but it's basically free.
*/
- entry = hash_search(RelfilenodeMapHash, (void *) &key, HASH_FIND, &found);
+ entry = hash_search(RelfilenumberMapHash, (void *) &key, HASH_FIND, &found);
if (found)
return entry->relid;
/*
* Ok, shared table, check relmapper.
*/
- relid = RelationMapFilenodeToOid(relfilenode, true);
+ relid = RelationMapFilenumberToOid(relfilenumber, true);
}
else
{
relation = table_open(RelationRelationId, AccessShareLock);
/* copy scankey to local copy, it will be modified during the scan */
- memcpy(skey, relfilenode_skey, sizeof(skey));
+ memcpy(skey, relfilenumber_skey, sizeof(skey));
/* set scan arguments */
skey[0].sk_argument = ObjectIdGetDatum(reltablespace);
- skey[1].sk_argument = ObjectIdGetDatum(relfilenode);
+ skey[1].sk_argument = ObjectIdGetDatum(relfilenumber);
scandesc = systable_beginscan(relation,
ClassTblspcRelfilenodeIndexId,
if (found)
elog(ERROR,
- "unexpected duplicate for tablespace %u, relfilenode %u",
- reltablespace, relfilenode);
+ "unexpected duplicate for tablespace %u, relfilenumber %u",
+ reltablespace, relfilenumber);
found = true;
Assert(classform->reltablespace == reltablespace);
- Assert(classform->relfilenode == relfilenode);
+ Assert(classform->relfilenode == relfilenumber);
relid = classform->oid;
}
/* check for tables that are mapped but not shared */
if (!found)
- relid = RelationMapFilenodeToOid(relfilenode, false);
+ relid = RelationMapFilenumberToOid(relfilenumber, false);
}
/*
* caused cache invalidations to be executed which would have deleted a
* new entry if we had entered it above.
*/
- entry = hash_search(RelfilenodeMapHash, (void *) &key, HASH_ENTER, &found);
+ entry = hash_search(RelfilenumberMapHash, (void *) &key, HASH_ENTER, &found);
if (found)
elog(ERROR, "corrupted hashtable");
entry->relid = relid;
/*-------------------------------------------------------------------------
*
* relmapper.c
- * Catalog-to-filenode mapping
+ * Catalog-to-filenumber mapping
*
* For most tables, the physical file underlying the table is specified by
* pg_class.relfilenode. However, that obviously won't work for pg_class
* update other databases' pg_class entries when relocating a shared catalog.
* Therefore, for these special catalogs (henceforth referred to as "mapped
* catalogs") we rely on a separately maintained file that shows the mapping
- * from catalog OIDs to filenode numbers. Each database has a map file for
+ * from catalog OIDs to filenumbers. Each database has a map file for
* its local mapped catalogs, and there is a separate map file for shared
* catalogs. Mapped catalogs have zero in their pg_class.relfilenode entries.
*
typedef struct RelMapping
{
Oid mapoid; /* OID of a catalog */
- Oid mapfilenode; /* its filenode number */
+ RelFileNumber mapfilenumber; /* its rel file number */
} RelMapping;
typedef struct RelMapFile
* subtransactions, so one set of transaction-level changes is sufficient.
*
* The active_xxx variables contain updates that are valid in our transaction
- * and should be honored by RelationMapOidToFilenode. The pending_xxx
+ * and should be honored by RelationMapOidToFilenumber. The pending_xxx
* variables contain updates we have been told about that aren't active yet;
* they will become active at the next CommandCounterIncrement. This setup
* lets map updates act similarly to updates of pg_class rows, ie, they
/* non-export function prototypes */
-static void apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode,
- bool add_okay);
+static void apply_map_update(RelMapFile *map, Oid relationId,
+ RelFileNumber filenumber, bool add_okay);
static void merge_map_updates(RelMapFile *map, const RelMapFile *updates,
bool add_okay);
static void load_relmap_file(bool shared, bool lock_held);
/*
- * RelationMapOidToFilenode
+ * RelationMapOidToFilenumber
*
- * The raison d' etre ... given a relation OID, look up its filenode.
+ * The raison d' etre ... given a relation OID, look up its filenumber.
*
* Although shared and local relation OIDs should never overlap, the caller
* always knows which we need --- so pass that information to avoid useless
* searching.
*
- * Returns InvalidOid if the OID is not known (which should never happen,
- * but the caller is in a better position to report a meaningful error).
+ * Returns InvalidRelFileNumber if the OID is not known (which should never
+ * happen, but the caller is in a better position to report a meaningful
+ * error).
*/
-Oid
-RelationMapOidToFilenode(Oid relationId, bool shared)
+RelFileNumber
+RelationMapOidToFilenumber(Oid relationId, bool shared)
{
const RelMapFile *map;
int32 i;
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
- return map->mappings[i].mapfilenode;
+ return map->mappings[i].mapfilenumber;
}
map = &shared_map;
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
- return map->mappings[i].mapfilenode;
+ return map->mappings[i].mapfilenumber;
}
}
else
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
- return map->mappings[i].mapfilenode;
+ return map->mappings[i].mapfilenumber;
}
map = &local_map;
for (i = 0; i < map->num_mappings; i++)
{
if (relationId == map->mappings[i].mapoid)
- return map->mappings[i].mapfilenode;
+ return map->mappings[i].mapfilenumber;
}
}
- return InvalidOid;
+ return InvalidRelFileNumber;
}
/*
- * RelationMapFilenodeToOid
+ * RelationMapFilenumberToOid
*
* Do the reverse of the normal direction of mapping done in
- * RelationMapOidToFilenode.
+ * RelationMapOidToFilenumber.
*
* This is not supposed to be used during normal running but rather for
* information purposes when looking at the filesystem or xlog.
*
* Returns InvalidOid if the OID is not known; this can easily happen if the
- * relfilenode doesn't pertain to a mapped relation.
+ * relfilenumber doesn't pertain to a mapped relation.
*/
Oid
-RelationMapFilenodeToOid(Oid filenode, bool shared)
+RelationMapFilenumberToOid(RelFileNumber filenumber, bool shared)
{
const RelMapFile *map;
int32 i;
map = &active_shared_updates;
for (i = 0; i < map->num_mappings; i++)
{
- if (filenode == map->mappings[i].mapfilenode)
+ if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
map = &shared_map;
for (i = 0; i < map->num_mappings; i++)
{
- if (filenode == map->mappings[i].mapfilenode)
+ if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
}
map = &active_local_updates;
for (i = 0; i < map->num_mappings; i++)
{
- if (filenode == map->mappings[i].mapfilenode)
+ if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
map = &local_map;
for (i = 0; i < map->num_mappings; i++)
{
- if (filenode == map->mappings[i].mapfilenode)
+ if (filenumber == map->mappings[i].mapfilenumber)
return map->mappings[i].mapoid;
}
}
}
/*
- * RelationMapOidToFilenodeForDatabase
+ * RelationMapOidToFilenumberForDatabase
*
- * Like RelationMapOidToFilenode, but reads the mapping from the indicated
+ * Like RelationMapOidToFilenumber, but reads the mapping from the indicated
* path instead of using the one for the current database.
*/
-Oid
-RelationMapOidToFilenodeForDatabase(char *dbpath, Oid relationId)
+RelFileNumber
+RelationMapOidToFilenumberForDatabase(char *dbpath, Oid relationId)
{
RelMapFile map;
int i;
for (i = 0; i < map.num_mappings; i++)
{
if (relationId == map.mappings[i].mapoid)
- return map.mappings[i].mapfilenode;
+ return map.mappings[i].mapfilenumber;
}
- return InvalidOid;
+ return InvalidRelFileNumber;
}
/*
/*
* RelationMapUpdateMap
*
- * Install a new relfilenode mapping for the specified relation.
+ * Install a new relfilenumber mapping for the specified relation.
*
* If immediate is true (or we're bootstrapping), the mapping is activated
* immediately. Otherwise it is made pending until CommandCounterIncrement.
*/
void
-RelationMapUpdateMap(Oid relationId, Oid fileNode, bool shared,
+RelationMapUpdateMap(Oid relationId, RelFileNumber fileNumber, bool shared,
bool immediate)
{
RelMapFile *map;
map = &pending_local_updates;
}
}
- apply_map_update(map, relationId, fileNode, true);
+ apply_map_update(map, relationId, fileNumber, true);
}
/*
* add_okay = false to draw an error if not.
*/
static void
-apply_map_update(RelMapFile *map, Oid relationId, Oid fileNode, bool add_okay)
+apply_map_update(RelMapFile *map, Oid relationId, RelFileNumber fileNumber,
+ bool add_okay)
{
int32 i;
{
if (relationId == map->mappings[i].mapoid)
{
- map->mappings[i].mapfilenode = fileNode;
+ map->mappings[i].mapfilenumber = fileNumber;
return;
}
}
if (map->num_mappings >= MAX_MAPPINGS)
elog(ERROR, "ran out of space in relation map");
map->mappings[map->num_mappings].mapoid = relationId;
- map->mappings[map->num_mappings].mapfilenode = fileNode;
+ map->mappings[map->num_mappings].mapfilenumber = fileNumber;
map->num_mappings++;
}
{
apply_map_update(map,
updates->mappings[i].mapoid,
- updates->mappings[i].mapfilenode,
+ updates->mappings[i].mapfilenumber,
add_okay);
}
}
for (i = 0; i < newmap->num_mappings; i++)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
- rnode.spcNode = tsid;
- rnode.dbNode = dbid;
- rnode.relNode = newmap->mappings[i].mapfilenode;
- RelationPreserveStorage(rnode, false);
+ rlocator.spcOid = tsid;
+ rlocator.dbOid = dbid;
+ rlocator.relNumber = newmap->mappings[i].mapfilenumber;
+ RelationPreserveStorage(rlocator, false);
}
}
{
PQExpBuffer upgrade_query = createPQExpBuffer();
PGresult *upgrade_res;
- Oid relfilenode;
+ RelFileNumber relfilenumber;
Oid toast_oid;
- Oid toast_relfilenode;
+ RelFileNumber toast_relfilenumber;
char relkind;
Oid toast_index_oid;
- Oid toast_index_relfilenode;
+ RelFileNumber toast_index_relfilenumber;
/*
- * Preserve the OID and relfilenode of the table, table's index, table's
+ * Preserve the OID and relfilenumber of the table, table's index, table's
* toast table and toast table's index if any.
*
* One complexity is that the current table definition might not require
relkind = *PQgetvalue(upgrade_res, 0, PQfnumber(upgrade_res, "relkind"));
- relfilenode = atooid(PQgetvalue(upgrade_res, 0,
- PQfnumber(upgrade_res, "relfilenode")));
+ relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
+ PQfnumber(upgrade_res, "relfilenode")));
toast_oid = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "reltoastrelid")));
- toast_relfilenode = atooid(PQgetvalue(upgrade_res, 0,
- PQfnumber(upgrade_res, "toast_relfilenode")));
+ toast_relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
+ PQfnumber(upgrade_res, "toast_relfilenode")));
toast_index_oid = atooid(PQgetvalue(upgrade_res, 0,
PQfnumber(upgrade_res, "indexrelid")));
- toast_index_relfilenode = atooid(PQgetvalue(upgrade_res, 0,
- PQfnumber(upgrade_res, "toast_index_relfilenode")));
+ toast_index_relfilenumber = atooid(PQgetvalue(upgrade_res, 0,
+ PQfnumber(upgrade_res, "toast_index_relfilenode")));
appendPQExpBufferStr(upgrade_buffer,
"\n-- For binary upgrade, must preserve pg_class oids and relfilenodes\n");
/*
* Not every relation has storage. Also, in a pre-v12 database,
- * partitioned tables have a relfilenode, which should not be
+ * partitioned tables have a relfilenumber, which should not be
* preserved when upgrading.
*/
- if (OidIsValid(relfilenode) && relkind != RELKIND_PARTITIONED_TABLE)
+ if (RelFileNumberIsValid(relfilenumber) && relkind != RELKIND_PARTITIONED_TABLE)
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_heap_relfilenode('%u'::pg_catalog.oid);\n",
- relfilenode);
+ relfilenumber);
/*
* In a pre-v12 database, partitioned tables might be marked as having
toast_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_toast_relfilenode('%u'::pg_catalog.oid);\n",
- toast_relfilenode);
+ toast_relfilenumber);
/* every toast table has an index */
appendPQExpBuffer(upgrade_buffer,
toast_index_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
- toast_index_relfilenode);
+ toast_index_relfilenumber);
}
PQclear(upgrade_res);
}
else
{
- /* Preserve the OID and relfilenode of the index */
+ /* Preserve the OID and relfilenumber of the index */
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_pg_class_oid('%u'::pg_catalog.oid);\n",
pg_class_oid);
appendPQExpBuffer(upgrade_buffer,
"SELECT pg_catalog.binary_upgrade_set_next_index_relfilenode('%u'::pg_catalog.oid);\n",
- relfilenode);
+ relfilenumber);
}
appendPQExpBufferChar(upgrade_buffer, '\n');
#define DATAPAGEMAP_H
#include "storage/block.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
struct datapagemap
{
static filehash_hash *filehash;
static bool isRelDataFile(const char *path);
-static char *datasegpath(RelFileNode rnode, ForkNumber forknum,
+static char *datasegpath(RelFileLocator rlocator, ForkNumber forknum,
BlockNumber segno);
static file_entry_t *insert_filehash_entry(const char *path);
* hash table!
*/
void
-process_target_wal_block_change(ForkNumber forknum, RelFileNode rnode,
+process_target_wal_block_change(ForkNumber forknum, RelFileLocator rlocator,
BlockNumber blkno)
{
char *path;
segno = blkno / RELSEG_SIZE;
blkno_inseg = blkno % RELSEG_SIZE;
- path = datasegpath(rnode, forknum, segno);
+ path = datasegpath(rlocator, forknum, segno);
entry = lookup_filehash_entry(path);
pfree(path);
static bool
isRelDataFile(const char *path)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
unsigned int segNo;
int nmatch;
bool matched;
*
*----
*/
- rnode.spcNode = InvalidOid;
- rnode.dbNode = InvalidOid;
- rnode.relNode = InvalidOid;
+ rlocator.spcOid = InvalidOid;
+ rlocator.dbOid = InvalidOid;
+ rlocator.relNumber = InvalidRelFileNumber;
segNo = 0;
matched = false;
- nmatch = sscanf(path, "global/%u.%u", &rnode.relNode, &segNo);
+ nmatch = sscanf(path, "global/%u.%u", &rlocator.relNumber, &segNo);
if (nmatch == 1 || nmatch == 2)
{
- rnode.spcNode = GLOBALTABLESPACE_OID;
- rnode.dbNode = 0;
+ rlocator.spcOid = GLOBALTABLESPACE_OID;
+ rlocator.dbOid = 0;
matched = true;
}
else
{
nmatch = sscanf(path, "base/%u/%u.%u",
- &rnode.dbNode, &rnode.relNode, &segNo);
+ &rlocator.dbOid, &rlocator.relNumber, &segNo);
if (nmatch == 2 || nmatch == 3)
{
- rnode.spcNode = DEFAULTTABLESPACE_OID;
+ rlocator.spcOid = DEFAULTTABLESPACE_OID;
matched = true;
}
else
{
nmatch = sscanf(path, "pg_tblspc/%u/" TABLESPACE_VERSION_DIRECTORY "/%u/%u.%u",
- &rnode.spcNode, &rnode.dbNode, &rnode.relNode,
+ &rlocator.spcOid, &rlocator.dbOid, &rlocator.relNumber,
&segNo);
if (nmatch == 3 || nmatch == 4)
matched = true;
/*
* The sscanf tests above can match files that have extra characters at
* the end. To eliminate such cases, cross-check that GetRelationPath
- * creates the exact same filename, when passed the RelFileNode
+ * creates the exact same filename, when passed the RelFileLocator
* information we extracted from the filename.
*/
if (matched)
{
- char *check_path = datasegpath(rnode, MAIN_FORKNUM, segNo);
+ char *check_path = datasegpath(rlocator, MAIN_FORKNUM, segNo);
if (strcmp(check_path, path) != 0)
matched = false;
* The returned path is palloc'd
*/
static char *
-datasegpath(RelFileNode rnode, ForkNumber forknum, BlockNumber segno)
+datasegpath(RelFileLocator rlocator, ForkNumber forknum, BlockNumber segno)
{
char *path;
char *segpath;
- path = relpathperm(rnode, forknum);
+ path = relpathperm(rlocator, forknum);
if (segno > 0)
{
segpath = psprintf("%s.%u", path, segno);
#include "datapagemap.h"
#include "storage/block.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
/* these enum values are sorted in the order we want actions to be processed */
typedef enum
extern void process_target_file(const char *path, file_type_t type,
size_t size, const char *link_target);
extern void process_target_wal_block_change(ForkNumber forknum,
- RelFileNode rnode,
+ RelFileLocator rlocator,
BlockNumber blkno);
extern filemap_t *decide_file_actions(void);
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blkno;
if (!XLogRecGetBlockTagExtended(record, block_id,
- &rnode, &forknum, &blkno, NULL))
+ &rlocator, &forknum, &blkno, NULL))
continue;
/* We only care about the main fork; others are copied in toto */
if (forknum != MAIN_FORKNUM)
continue;
- process_target_wal_block_change(forknum, rnode, blkno);
+ process_target_wal_block_change(forknum, rlocator, blkno);
}
}
#include "datapagemap.h"
#include "libpq-fe.h"
#include "storage/block.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
/* Configuration options */
extern char *datadir_target;
option.o \
parallel.o \
pg_upgrade.o \
- relfilenode.o \
+ relfilenumber.o \
server.o \
tablespace.o \
util.o \
map->new_tablespace_suffix = new_cluster.tablespace_suffix;
}
- /* DB oid and relfilenodes are preserved between old and new cluster */
+ /* DB oid and relfilenumbers are preserved between old and new cluster */
map->db_oid = old_db->db_oid;
- map->relfilenode = old_rel->relfilenode;
+ map->relfilenumber = old_rel->relfilenumber;
/* used only for logging and error reporting, old/new are identical */
map->nspname = old_rel->nspname;
i_reloid,
i_indtable,
i_toastheap,
- i_relfilenode,
+ i_relfilenumber,
i_reltablespace;
char query[QUERY_ALLOC];
char *last_namespace = NULL,
i_toastheap = PQfnumber(res, "toastheap");
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
- i_relfilenode = PQfnumber(res, "relfilenode");
+ i_relfilenumber = PQfnumber(res, "relfilenode");
i_reltablespace = PQfnumber(res, "reltablespace");
i_spclocation = PQfnumber(res, "spclocation");
relname = PQgetvalue(res, relnum, i_relname);
curr->relname = pg_strdup(relname);
- curr->relfilenode = atooid(PQgetvalue(res, relnum, i_relfilenode));
+ curr->relfilenumber = atooid(PQgetvalue(res, relnum, i_relfilenumber));
curr->tblsp_alloc = false;
/* Is the tablespace oid non-default? */
char *nspname; /* namespace name */
char *relname; /* relation name */
Oid reloid; /* relation OID */
- Oid relfilenode; /* relation file node */
+ RelFileNumber relfilenumber; /* relation file number */
Oid indtable; /* if index, OID of its table, else 0 */
Oid toastheap; /* if toast table, OID of base table, else 0 */
char *tablespace; /* tablespace path; "" for cluster default */
const char *old_tablespace_suffix;
const char *new_tablespace_suffix;
Oid db_oid;
- Oid relfilenode;
+ RelFileNumber relfilenumber;
/* the rest are used only for logging and error reporting */
char *nspname; /* namespaces */
char *relname;
void adjust_data_dir(ClusterInfo *cluster);
void get_sock_dir(ClusterInfo *cluster, bool live_check);
-/* relfilenode.c */
+/* relfilenumber.c */
void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
/*
- * relfilenode.c
+ * relfilenumber.c
*
- * relfilenode functions
+ * relfilenumber functions
*
* Copyright (c) 2010-2022, PostgreSQL Global Development Group
- * src/bin/pg_upgrade/relfilenode.c
+ * src/bin/pg_upgrade/relfilenumber.c
*/
#include "postgres_fe.h"
/*
* Now copy/link any related segments as well. Remember, PG breaks large
* files into 1GB segments, the first segment has no extension, subsequent
- * segments are named relfilenode.1, relfilenode.2, relfilenode.3.
+ * segments are named relfilenumber.1, relfilenumber.2, relfilenumber.3.
*/
for (segno = 0;; segno++)
{
map->old_tablespace,
map->old_tablespace_suffix,
map->db_oid,
- map->relfilenode,
+ map->relfilenumber,
type_suffix,
extent_suffix);
snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s",
map->new_tablespace,
map->new_tablespace_suffix,
map->db_oid,
- map->relfilenode,
+ map->relfilenumber,
type_suffix,
extent_suffix);
static int WalSegSz;
static volatile sig_atomic_t time_to_stop = false;
-static const RelFileNode emptyRelFileNode = {0, 0, 0};
+static const RelFileLocator emptyRelFileLocator = {0, 0, 0};
typedef struct XLogDumpPrivate
{
bool filter_by_rmgr_enabled;
TransactionId filter_by_xid;
bool filter_by_xid_enabled;
- RelFileNode filter_by_relation;
+ RelFileLocator filter_by_relation;
bool filter_by_extended;
bool filter_by_relation_enabled;
BlockNumber filter_by_relation_block;
*/
static bool
XLogRecordMatchesRelationBlock(XLogReaderState *record,
- RelFileNode matchRnode,
+ RelFileLocator matchRlocator,
BlockNumber matchBlock,
ForkNumber matchFork)
{
for (block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
{
- RelFileNode rnode;
+ RelFileLocator rlocator;
ForkNumber forknum;
BlockNumber blk;
if (!XLogRecGetBlockTagExtended(record, block_id,
- &rnode, &forknum, &blk, NULL))
+ &rlocator, &forknum, &blk, NULL))
continue;
if ((matchFork == InvalidForkNumber || matchFork == forknum) &&
- (RelFileNodeEquals(matchRnode, emptyRelFileNode) ||
- RelFileNodeEquals(matchRnode, rnode)) &&
+ (RelFileLocatorEquals(matchRlocator, emptyRelFileLocator) ||
+ RelFileLocatorEquals(matchRlocator, rlocator)) &&
(matchBlock == InvalidBlockNumber || matchBlock == blk))
return true;
}
break;
case 'R':
if (sscanf(optarg, "%u/%u/%u",
- &config.filter_by_relation.spcNode,
- &config.filter_by_relation.dbNode,
- &config.filter_by_relation.relNode) != 3 ||
- !OidIsValid(config.filter_by_relation.spcNode) ||
- !OidIsValid(config.filter_by_relation.relNode))
+ &config.filter_by_relation.spcOid,
+ &config.filter_by_relation.dbOid,
+ &config.filter_by_relation.relNumber) != 3 ||
+ !OidIsValid(config.filter_by_relation.spcOid) ||
+ !RelFileNumberIsValid(config.filter_by_relation.relNumber))
{
pg_log_error("invalid relation specification: \"%s\"", optarg);
pg_log_error_detail("Expecting \"tablespace OID/database OID/relation filenode\".");
!XLogRecordMatchesRelationBlock(xlogreader_state,
config.filter_by_relation_enabled ?
config.filter_by_relation :
- emptyRelFileNode,
+ emptyRelFileLocator,
config.filter_by_relation_block_enabled ?
config.filter_by_relation_block :
InvalidBlockNumber,
* XXX this must agree with GetRelationPath()!
*/
char *
-GetDatabasePath(Oid dbNode, Oid spcNode)
+GetDatabasePath(Oid dbOid, Oid spcOid)
{
- if (spcNode == GLOBALTABLESPACE_OID)
+ if (spcOid == GLOBALTABLESPACE_OID)
{
/* Shared system relations live in {datadir}/global */
- Assert(dbNode == 0);
+ Assert(dbOid == 0);
return pstrdup("global");
}
- else if (spcNode == DEFAULTTABLESPACE_OID)
+ else if (spcOid == DEFAULTTABLESPACE_OID)
{
/* The default tablespace is {datadir}/base */
- return psprintf("base/%u", dbNode);
+ return psprintf("base/%u", dbOid);
}
else
{
/* All other tablespaces are accessed via symlinks */
return psprintf("pg_tblspc/%u/%s/%u",
- spcNode, TABLESPACE_VERSION_DIRECTORY, dbNode);
+ spcOid, TABLESPACE_VERSION_DIRECTORY, dbOid);
}
}
* the trouble considering BackendId is just int anyway.
*/
char *
-GetRelationPath(Oid dbNode, Oid spcNode, Oid relNode,
+GetRelationPath(Oid dbOid, Oid spcOid, RelFileNumber relNumber,
int backendId, ForkNumber forkNumber)
{
char *path;
- if (spcNode == GLOBALTABLESPACE_OID)
+ if (spcOid == GLOBALTABLESPACE_OID)
{
/* Shared system relations live in {datadir}/global */
- Assert(dbNode == 0);
+ Assert(dbOid == 0);
Assert(backendId == InvalidBackendId);
if (forkNumber != MAIN_FORKNUM)
path = psprintf("global/%u_%s",
- relNode, forkNames[forkNumber]);
+ relNumber, forkNames[forkNumber]);
else
- path = psprintf("global/%u", relNode);
+ path = psprintf("global/%u", relNumber);
}
- else if (spcNode == DEFAULTTABLESPACE_OID)
+ else if (spcOid == DEFAULTTABLESPACE_OID)
{
/* The default tablespace is {datadir}/base */
if (backendId == InvalidBackendId)
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("base/%u/%u_%s",
- dbNode, relNode,
+ dbOid, relNumber,
forkNames[forkNumber]);
else
path = psprintf("base/%u/%u",
- dbNode, relNode);
+ dbOid, relNumber);
}
else
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("base/%u/t%d_%u_%s",
- dbNode, backendId, relNode,
+ dbOid, backendId, relNumber,
forkNames[forkNumber]);
else
path = psprintf("base/%u/t%d_%u",
- dbNode, backendId, relNode);
+ dbOid, backendId, relNumber);
}
}
else
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("pg_tblspc/%u/%s/%u/%u_%s",
- spcNode, TABLESPACE_VERSION_DIRECTORY,
- dbNode, relNode,
+ spcOid, TABLESPACE_VERSION_DIRECTORY,
+ dbOid, relNumber,
forkNames[forkNumber]);
else
path = psprintf("pg_tblspc/%u/%s/%u/%u",
- spcNode, TABLESPACE_VERSION_DIRECTORY,
- dbNode, relNode);
+ spcOid, TABLESPACE_VERSION_DIRECTORY,
+ dbOid, relNumber);
}
else
{
if (forkNumber != MAIN_FORKNUM)
path = psprintf("pg_tblspc/%u/%s/%u/t%d_%u_%s",
- spcNode, TABLESPACE_VERSION_DIRECTORY,
- dbNode, backendId, relNode,
+ spcOid, TABLESPACE_VERSION_DIRECTORY,
+ dbOid, backendId, relNumber,
forkNames[forkNumber]);
else
path = psprintf("pg_tblspc/%u/%s/%u/t%d_%u",
- spcNode, TABLESPACE_VERSION_DIRECTORY,
- dbNode, backendId, relNode);
+ spcOid, TABLESPACE_VERSION_DIRECTORY,
+ dbOid, backendId, relNumber);
}
}
return path;
#include "lib/stringinfo.h"
#include "storage/bufpage.h"
#include "storage/itemptr.h"
-#include "storage/relfilenode.h"
+#include "storage/relfilelocator.h"
#include "utils/relcache.h"
typedef struct ginxlogSplit
{
- RelFileNode node;
+ RelFileLocator locator;
BlockNumber rrlink; /* right link, or root's blocknumber if root
* split */
BlockNumber leftChildBlkno; /* valid on a non-leaf split */
*/
typedef struct ginxlogUpdateMeta
{
- RelFileNode node;
+ RelFileLocator locator;
GinMetaPageData metadata;
BlockNumber prevTail;
BlockNumber newRightlink;