tbm->maxentries = (int) nbuckets;
tbm->lossify_start = 0;
tbm->dsa = dsa;
+ tbm->dsapagetable = InvalidDsaPointer;
+ tbm->dsapagetableold = InvalidDsaPointer;
+ tbm->ptpages = InvalidDsaPointer;
+ tbm->ptchunks = InvalidDsaPointer;
return tbm;
}
tbm_free_shared_area(dsa_area *dsa, dsa_pointer dp)
{
TBMSharedIteratorState *istate = dsa_get_address(dsa, dp);
- PTEntryArray *ptbase = dsa_get_address(dsa, istate->pagetable);
+ PTEntryArray *ptbase;
PTIterationArray *ptpages;
PTIterationArray *ptchunks;
- if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
- dsa_free(dsa, istate->pagetable);
-
- if (istate->spages)
+ if (DsaPointerIsValid(istate->pagetable))
+ {
+ ptbase = dsa_get_address(dsa, istate->pagetable);
+ if (pg_atomic_sub_fetch_u32(&ptbase->refcount, 1) == 0)
+ dsa_free(dsa, istate->pagetable);
+ }
+ if (DsaPointerIsValid(istate->spages))
{
ptpages = dsa_get_address(dsa, istate->spages);
if (pg_atomic_sub_fetch_u32(&ptpages->refcount, 1) == 0)
dsa_free(dsa, istate->spages);
}
- if (istate->schunks)
+ if (DsaPointerIsValid(istate->schunks))
{
ptchunks = dsa_get_address(dsa, istate->schunks);
if (pg_atomic_sub_fetch_u32(&ptchunks->refcount, 1) == 0)
{
dsa_pointer dp;
TBMSharedIteratorState *istate;
- PTEntryArray *ptbase;
+ PTEntryArray *ptbase = NULL;
PTIterationArray *ptpages = NULL;
PTIterationArray *ptchunks = NULL;
* Allocate TBMSharedIteratorState from DSA to hold the shared members and
* lock, this will also be used by multiple worker for shared iterate.
*/
- dp = dsa_allocate(tbm->dsa, sizeof(TBMSharedIteratorState));
+ dp = dsa_allocate0(tbm->dsa, sizeof(TBMSharedIteratorState));
istate = dsa_get_address(tbm->dsa, dp);
/*
Assert(npages == tbm->npages);
Assert(nchunks == tbm->nchunks);
}
- else
+ else if (tbm->status == TBM_ONE_PAGE)
{
/*
* In one page mode allocate the space for one pagetable entry and
ptpages->index[0] = 0;
}
- pg_atomic_init_u32(&ptbase->refcount, 0);
-
+ if (ptbase != NULL)
+ pg_atomic_init_u32(&ptbase->refcount, 0);
if (npages > 1)
qsort_arg((void *) (ptpages->index), npages, sizeof(int),
tbm_shared_comparator, (void *) ptbase->ptentry);
* increase the refcount by 1 so that while freeing the shared iterator
* we don't free pagetable and iterator array until its refcount becomes 0.
*/
- pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
- if (ptpages)
+ if (ptbase != NULL)
+ pg_atomic_add_fetch_u32(&ptbase->refcount, 1);
+ if (ptpages != NULL)
pg_atomic_add_fetch_u32(&ptpages->refcount, 1);
- if (ptchunks)
+ if (ptchunks != NULL)
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
/* Initialize the iterator lock */
{
TBMIterateResult *output = &iterator->output;
TBMSharedIteratorState *istate = iterator->state;
- PagetableEntry *ptbase = iterator->ptbase->ptentry;
- int *idxpages = iterator->ptpages->index;
- int *idxchunks = iterator->ptchunks->index;
+ PagetableEntry *ptbase = NULL;
+ int *idxpages = NULL;
+ int *idxchunks = NULL;
+
+ if (iterator->ptbase != NULL)
+ ptbase = iterator->ptbase->ptentry;
+ if (iterator->ptpages != NULL)
+ idxpages = iterator->ptpages->index;
+ if (iterator->ptchunks != NULL)
+ idxchunks = iterator->ptchunks->index;
/* Acquire the LWLock before accessing the shared members */
LWLockAcquire(&istate->lock, LW_EXCLUSIVE);
* Create the TBMSharedIterator struct, with enough trailing space to
* serve the needs of the TBMIterateResult sub-struct.
*/
- iterator = (TBMSharedIterator *) palloc(sizeof(TBMSharedIterator) +
+ iterator = (TBMSharedIterator *) palloc0(sizeof(TBMSharedIterator) +
MAX_TUPLES_PER_PAGE * sizeof(OffsetNumber));
istate = (TBMSharedIteratorState *) dsa_get_address(dsa, dp);