static Bool AMSSegRegionIsFree(Seg seg, Addr base, Addr limit) { AMSSeg amsseg = MustBeA(AMSSeg, seg); Index baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); if (amsseg->allocTableInUse) { Index limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); return BTIsResRange(amsseg->allocTable, baseIndex, limitIndex); } else { return amsseg->firstFree <= baseIndex; } }
/* AMSTStressBufferedSeg -- Stress test for a buffered seg * * Test splitting or merging a buffered seg. * * .bmerge: A merge is performed when the segment had previously * been split and the segment above meets the constraints (i.e. empty, * not already attached to a buffer and similar colour) * * .bsplit: Whether or not a merge happpened, a split is performed if * the limit of the buffered region is also the limit of an arena * grain, and yet does not correspond to the segment limit, provided * that the part of the segment above the buffer is all free. */ static void AMSTStressBufferedSeg(Seg seg, Buffer buffer) { AMSTSeg amstseg; AMST amst; Arena arena; Addr limit; Buffer segBuf; AVERT(Seg, seg); AVERT(Buffer, buffer); AVER(SegBuffer(&segBuf, seg) && segBuf == buffer); amstseg = Seg2AMSTSeg(seg); AVERT(AMSTSeg, amstseg); limit = BufferLimit(buffer); arena = PoolArena(SegPool(seg)); amst = PoolAMST(SegPool(seg)); AVERT(AMST, amst); if (amstseg->next != NULL) { Seg segHi = AMSTSeg2Seg(amstseg->next); if (AMSSegIsFree(segHi) && SegGrey(segHi) == SegGrey(seg)) { /* .bmerge */ Seg mergedSeg; Res res; res = SegMerge(&mergedSeg, seg, segHi); if (ResOK == res) { amst->bmerges++; printf("J"); } else { /* deliberate fails only */ AVER(amst->failSegs); } } } if (SegLimit(seg) != limit && AddrIsArenaGrain(limit, arena) && AMSSegRegionIsFree(seg, limit, SegLimit(seg))) { /* .bsplit */ Seg segLo, segHi; Res res; res = SegSplit(&segLo, &segHi, seg, limit); if (ResOK == res) { amst->bsplits++; printf("C"); } else { /* deliberate fails only */ AVER(amst->failSegs); } } }
static void ArenaFormattedObjectsWalk(Arena arena, FormattedObjectsStepMethod f, void *p, size_t s) { Seg seg; FormattedObjectsStepClosure c; AVERT(Arena, arena); AVER(FUNCHECK(f)); AVER(f == ArenaFormattedObjectsStep); /* p and s are arbitrary closures. */ /* Know that p is a FormattedObjectsStepClosure */ /* Know that s is 0 */ AVER(p != NULL); AVER(s == 0); c = p; AVERT(FormattedObjectsStepClosure, c); if (SegFirst(&seg, arena)) { do { Pool pool; pool = SegPool(seg); if (PoolHasAttr(pool, AttrFMT)) { ShieldExpose(arena, seg); PoolWalk(pool, seg, f, p, s); ShieldCover(arena, seg); } } while(SegNext(&seg, arena, seg)); } }
static Res RootsWalkFix(Pool pool, ScanState ss, Seg seg, Ref *refIO) { rootsStepClosure rsc; Ref ref; UNUSED(pool); AVERT(ScanState, ss); AVER(refIO != NULL); rsc = ScanState2rootsStepClosure(ss); AVERT(rootsStepClosure, rsc); ref = *refIO; /* If the segment isn't GCable then the ref is not to the heap and */ /* shouldn't be passed to the client. */ AVER(PoolHasAttr(SegPool(seg), AttrGC)); /* Call the client closure - .assume.rootaddr */ rsc->f((mps_addr_t*)refIO, (mps_root_t)rsc->root, rsc->p, rsc->s); AVER(ref == *refIO); /* can walk object graph - but not modify it */ return ResOK; }
void PoolBlacken(Pool pool, TraceSet traceSet, Seg seg) { AVERT(Pool, pool); AVERT(TraceSet, traceSet); AVERT(Seg, seg); AVER(SegPool(seg) == pool); Method(Pool, pool, blacken)(pool, traceSet, seg); }
Res PoolWhiten(Pool pool, Trace trace, Seg seg) { AVERT(Pool, pool); AVERT(Trace, trace); AVERT(Seg, seg); AVER(PoolArena(pool) == trace->arena); AVER(SegPool(seg) == pool); return Method(Pool, pool, whiten)(pool, trace, seg); }
void PoolGrey(Pool pool, Trace trace, Seg seg) { AVERT(Pool, pool); AVERT(Trace, trace); AVERT(Seg, seg); AVER(pool->arena == trace->arena); AVER(SegPool(seg) == pool); Method(Pool, pool, grey)(pool, trace, seg); }
Res PoolAddrObject(Addr *pReturn, Pool pool, Seg seg, Addr addr) { AVER(pReturn != NULL); AVERT(Pool, pool); AVERT(Seg, seg); AVER(pool == SegPool(seg)); AVER(SegBase(seg) <= addr); AVER(addr < SegLimit(seg)); return Method(Pool, pool, addrObject)(pReturn, pool, seg, addr); }
void BufferReassignSeg(Buffer buffer, Seg seg) { AVERT(Buffer, buffer); AVERT(Seg, seg); AVER(!BufferIsReset(buffer)); AVER(BufferBase(buffer) >= SegBase(seg)); AVER(BufferLimit(buffer) <= SegLimit(seg)); AVER(BufferPool(buffer) == SegPool(seg)); Method(Buffer, buffer, reassignSeg)(buffer, seg); }
Res PoolFix(Pool pool, ScanState ss, Seg seg, Addr *refIO) { AVERT_CRITICAL(Pool, pool); AVERT_CRITICAL(ScanState, ss); AVERT_CRITICAL(Seg, seg); AVER_CRITICAL(pool == SegPool(seg)); AVER_CRITICAL(refIO != NULL); /* Should only be fixing references to white segments. */ AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); return pool->fix(pool, ss, seg, refIO); }
/* AMSUnallocateRange -- set a range to be unallocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to amsSegBufferEmpty. */ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count unallocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's allocated */ AVER(BTIsSetRange(amsseg->allocTable, baseIndex, limitIndex)); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's allocated */ AVER(limitIndex <= amsseg->firstFree); if (limitIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = baseIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } } unallocatedGrains = limitIndex - baseIndex; AVER(amsseg->bufferedGrains >= unallocatedGrains); amsseg->freeGrains += unallocatedGrains; amsseg->bufferedGrains -= unallocatedGrains; PoolGenAccountForEmpty(ams->pgen, 0, PoolGrainsSize(AMSPool(ams), unallocatedGrains), FALSE); }
/* AMSAllocateRange -- set a range to be allocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to AMSUnallocateRange. */ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count allocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's not allocated */ AVER(BTIsResRange(amsseg->allocTable, baseIndex, limitIndex)); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's not allocated */ AVER(baseIndex >= amsseg->firstFree); if (baseIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = limitIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } } allocatedGrains = limitIndex - baseIndex; AVER(amsseg->freeGrains >= allocatedGrains); amsseg->freeGrains -= allocatedGrains; amsseg->bufferedGrains += allocatedGrains; PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit)); }
void PoolReclaim(Pool pool, Trace trace, Seg seg) { AVERT_CRITICAL(Pool, pool); AVERT_CRITICAL(Trace, trace); AVERT_CRITICAL(Seg, seg); AVER_CRITICAL(pool->arena == trace->arena); AVER_CRITICAL(SegPool(seg) == pool); /* There shouldn't be any grey things left for this trace. */ AVER_CRITICAL(!TraceSetIsMember(SegGrey(seg), trace)); /* Should only be reclaiming segments which are still white. */ AVER_CRITICAL(TraceSetIsMember(SegWhite(seg), trace)); Method(Pool, pool, reclaim)(pool, trace, seg); }
Res PoolFixEmergency(Pool pool, ScanState ss, Seg seg, Addr *refIO) { Res res; AVERT_CRITICAL(Pool, pool); AVERT_CRITICAL(ScanState, ss); AVERT_CRITICAL(Seg, seg); AVER_CRITICAL(pool == SegPool(seg)); AVER_CRITICAL(refIO != NULL); /* Should only be fixing references to white segments. */ AVER_CRITICAL(TraceSetInter(SegWhite(seg), ss->traces) != TraceSetEMPTY); res = Method(Pool, pool, fixEmergency)(pool, ss, seg, refIO); AVER_CRITICAL(res == ResOK); return res; }
static void MVFFFinish(Pool pool) { MVFF mvff; Arena arena; Seg seg; Ring ring, node, nextNode; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); ring = PoolSegRing(pool); RING_FOR(node, ring, nextNode) { seg = SegOfPoolRing(node); AVER(SegPool(seg) == pool); SegFree(seg); }
static Res amstSegSplit(Seg seg, Seg segHi, Addr base, Addr mid, Addr limit) { AMST amst; AMSTSeg amstseg, amstsegHi; Res res; AVERT(Seg, seg); AVER(segHi != NULL); /* can't check fully, it's not initialized */ amstseg = Seg2AMSTSeg(seg); amstsegHi = Seg2AMSTSeg(segHi); AVERT(AMSTSeg, amstseg); amst = PoolAMST(SegPool(seg)); /* Split the superclass fields via direct next-method call */ res = NextMethod(Seg, AMSTSeg, split)(seg, segHi, base, mid, limit); if (res != ResOK) goto failSuper; if (AMSTFailOperation(amst)) { amst->badSplits++; printf("B"); goto failDeliberate; } /* Full initialization for segHi. */ amstsegHi->next = amstseg->next; amstsegHi->prev = amstseg; amstsegHi->sig = AMSTSegSig; amstseg->next = amstsegHi; AVERT(AMSTSeg, amstseg); AVERT(AMSTSeg, amstsegHi); amst->splits++; printf("S"); return ResOK; failDeliberate: /* Call the anti-method. (see .fail) */ res = NextMethod(Seg, AMSTSeg, merge)(seg, segHi, base, mid, limit); AVER(res == ResOK); res = ResFAIL; failSuper: AVERT(AMSTSeg, amstseg); return res; }
/* amstSegMerge -- AMSTSeg merge method * * .fail: Test proper handling of the most complex failure cases * by deliberately detecting failure sometimes after calling the * next method. We handle the error by calling the anti-method. * This isn't strictly safe <design/poolams#.split-merge.fail>. * But we assume here that we won't run out of memory when calling the * anti-method. */ static Res amstSegMerge(Seg seg, Seg segHi, Addr base, Addr mid, Addr limit) { AMST amst; AMSTSeg amstseg, amstsegHi; Res res; AVERT(Seg, seg); AVERT(Seg, segHi); amstseg = Seg2AMSTSeg(seg); amstsegHi = Seg2AMSTSeg(segHi); AVERT(AMSTSeg, amstseg); AVERT(AMSTSeg, amstsegHi); amst = PoolAMST(SegPool(seg)); /* Merge the superclass fields via direct next-method call */ res = NextMethod(Seg, AMSTSeg, merge)(seg, segHi, base, mid, limit); if (res != ResOK) goto failSuper; if (AMSTFailOperation(amst)) { amst->badMerges++; printf("D"); goto failDeliberate; } amstseg->next = amstsegHi->next; amstsegHi->sig = SigInvalid; AVERT(AMSTSeg, amstseg); amst->merges++; printf("M"); return ResOK; failDeliberate: /* Call the anti-method (see .fail) */ res = NextMethod(Seg, AMSTSeg, split)(seg, segHi, base, mid, limit); AVER(res == ResOK); res = ResFAIL; failSuper: AVERT(AMSTSeg, amstseg); AVERT(AMSTSeg, amstsegHi); return res; }
static void MVFFFinish(Pool pool) { MVFF mvff; Arena arena; Seg seg; Ring ring, node, nextNode; AVERT(Pool, pool); mvff = Pool2MVFF(pool); AVERT(MVFF, mvff); /* Do this first, because the free list can use the segs in an emergency. */ CBSFinish(CBSOfMVFF(mvff)); ring = PoolSegRing(pool); RING_FOR(node, ring, nextNode) { seg = SegOfPoolRing(node); AVER(SegPool(seg) == pool); SegFree(seg); }
Res PoolScan(Bool *totalReturn, ScanState ss, Pool pool, Seg seg) { AVER(totalReturn != NULL); AVERT(ScanState, ss); AVERT(Pool, pool); AVERT(Seg, seg); AVER(ss->arena == pool->arena); /* The segment must belong to the pool. */ AVER(pool == SegPool(seg)); /* We check that either ss->rank is in the segment's * ranks, or that ss->rank is exact. The check is more complicated if * we actually have multiple ranks in a seg. * See <code/trace.c#scan.conservative> */ AVER(ss->rank == RankEXACT || RankSetIsMember(SegRankSet(seg), ss->rank)); /* Should only scan segments which contain grey objects. */ AVER(TraceSetInter(SegGrey(seg), ss->traces) != TraceSetEMPTY); return Method(Pool, pool, scan)(totalReturn, ss, pool, seg); }
void SegFree(Seg seg) { Arena arena; Pool pool; Addr base; Size size, structSize; AVERT(Seg, seg); pool = SegPool(seg); AVERT(Pool, pool); arena = PoolArena(pool); AVERT(Arena, arena); base = SegBase(seg); size = SegSize(seg); structSize = ClassOfPoly(Seg, seg)->size; SegFinish(seg); ControlFree(arena, seg, structSize); ArenaFree(base, size, pool); EVENT2(SegFree, arena, seg); }
static Res ArenaRootsWalk(Globals arenaGlobals, mps_roots_stepper_t f, void *p, size_t s) { Arena arena; rootsStepClosureStruct rscStruct; rootsStepClosure rsc = &rscStruct; Trace trace; ScanState ss; Rank rank; Res res; Seg seg; AVERT(Globals, arenaGlobals); AVER(FUNCHECK(f)); /* p and s are arbitrary client-provided closure data. */ arena = GlobalsArena(arenaGlobals); /* Scan all the roots with a minimal trace. Invoke the scanner with a */ /* rootsStepClosure, which is a subclass of ScanState and contains the */ /* client-provided closure. Supply a special fix method in order to */ /* call the client closure. This fix method must perform no tracing */ /* operations of its own. */ res = TraceCreate(&trace, arena, TraceStartWhyWALK); /* Have to fail if no trace available. Unlikely due to .assume.parked. */ if (res != ResOK) return res; /* ArenaRootsWalk only passes references to GCable pools to the client. */ /* NOTE: I'm not sure why this is. RB 2012-07-24 */ if (SegFirst(&seg, arena)) { do { if (PoolHasAttr(SegPool(seg), AttrGC)) { res = TraceAddWhite(trace, seg); AVER(res == ResOK); } } while (SegNext(&seg, arena, seg)); } /* Make the roots grey so that they are scanned */ res = RootsIterate(arenaGlobals, rootWalkGrey, trace); /* Make this trace look like any other trace. */ arena->flippedTraces = TraceSetAdd(arena->flippedTraces, trace); rootsStepClosureInit(rsc, arenaGlobals, trace, RootsWalkFix, f, p, s); ss = rootsStepClosure2ScanState(rsc); for(rank = RankAMBIG; rank < RankLIMIT; ++rank) { ss->rank = rank; AVERT(ScanState, ss); res = RootsIterate(arenaGlobals, rootWalk, (void *)ss); if (res != ResOK) break; } /* Turn segments black again. */ if (SegFirst(&seg, arena)) { do { if (PoolHasAttr(SegPool(seg), AttrGC)) { SegSetGrey(seg, TraceSetDel(SegGrey(seg), trace)); SegSetWhite(seg, TraceSetDel(SegWhite(seg), trace)); } } while (SegNext(&seg, arena, seg)); } rootsStepClosureFinish(rsc); /* Make this trace look like any other finished trace. */ trace->state = TraceFINISHED; TraceDestroy(trace); AVER(!ArenaEmergency(arena)); /* There was no allocation. */ return res; }