/* AMSUnallocateRange -- set a range to be unallocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to amsSegBufferEmpty. */ static void AMSUnallocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count unallocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's allocated */ AVER(BTIsSetRange(amsseg->allocTable, baseIndex, limitIndex)); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's allocated */ AVER(limitIndex <= amsseg->firstFree); if (limitIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = baseIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTResRange(amsseg->allocTable, baseIndex, limitIndex); } } unallocatedGrains = limitIndex - baseIndex; AVER(amsseg->bufferedGrains >= unallocatedGrains); amsseg->freeGrains += unallocatedGrains; amsseg->bufferedGrains -= unallocatedGrains; PoolGenAccountForEmpty(ams->pgen, 0, PoolGrainsSize(AMSPool(ams), unallocatedGrains), FALSE); }
static void create(void) { Res res; if (args[0] < 1) { printf("can't create a BT of size 0\n"); return; } if (bt != NULL) destroy(); res = BTCreate(&bt, arena, args[0]); if (res == ResOK) { btSize = args[0]; BTResRange(bt, 0, btSize); } else { printf("BTCreate returned %d\n",res); } }
static void test(mps_arena_t arena) { BT bt; Nailboard board; Align align; Count nails; Addr base, limit; Index i, j, k; align = (Align)1 << (rnd() % 10); nails = (Count)1 << (rnd() % 16); nails += rnd() % nails; base = AddrAlignUp(0, align); limit = AddrAdd(base, nails * align); die(BTCreate(&bt, arena, nails), "BTCreate"); BTResRange(bt, 0, nails); die(NailboardCreate(&board, arena, align, base, limit), "NailboardCreate"); for (i = 0; i <= nails / 8; ++i) { Bool old; j = rnd() % nails; old = BTGet(bt, j); BTSet(bt, j); cdie(NailboardSet(board, AddrAdd(base, j * align)) == old, "NailboardSet"); for (k = 0; k < nails / 8; ++k) { Index b, l; b = rnd() % nails; l = b + rnd() % (nails - b) + 1; cdie(BTIsResRange(bt, b, l) == NailboardIsResRange(board, AddrAdd(base, b * align), AddrAdd(base, l * align)), "NailboardIsResRange"); } } die(NailboardDescribe(board, mps_lib_get_stdout(), 0), "NailboardDescribe"); }
/* AMSAllocateRange -- set a range to be allocated * * Used as a means of overriding the behaviour of AMSBufferFill. * The code is similar to AMSUnallocateRange. */ static void AMSAllocateRange(AMS ams, Seg seg, Addr base, Addr limit) { AMSSeg amsseg; Index baseIndex, limitIndex; Count allocatedGrains; /* parameters checked by caller */ amsseg = Seg2AMSSeg(seg); baseIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), base); limitIndex = PoolIndexOfAddr(SegBase(seg), SegPool(seg), limit); if (amsseg->allocTableInUse) { /* check that it's not allocated */ AVER(BTIsResRange(amsseg->allocTable, baseIndex, limitIndex)); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } else { /* check that it's not allocated */ AVER(baseIndex >= amsseg->firstFree); if (baseIndex == amsseg->firstFree) /* is it at the end? */ { amsseg->firstFree = limitIndex; } else { /* start using allocTable */ amsseg->allocTableInUse = TRUE; BTSetRange(amsseg->allocTable, 0, amsseg->firstFree); if (amsseg->firstFree < amsseg->grains) BTResRange(amsseg->allocTable, amsseg->firstFree, amsseg->grains); BTSetRange(amsseg->allocTable, baseIndex, limitIndex); } } allocatedGrains = limitIndex - baseIndex; AVER(amsseg->freeGrains >= allocatedGrains); amsseg->freeGrains -= allocatedGrains; amsseg->bufferedGrains += allocatedGrains; PoolGenAccountForFill(ams->pgen, AddrOffset(base, limit)); }
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved, BootBlock boot) { Size size; Count pages; Shift pageShift; Size pageTableSize; Addr allocBase; void *p; Res res; /* chunk is supposed to be uninitialized, so don't check it. */ AVERT(Arena, arena); AVER(base != NULL); AVER(AddrIsAligned(base, ArenaGrainSize(arena))); AVER(base < limit); AVER(AddrIsAligned(limit, ArenaGrainSize(arena))); AVERT(BootBlock, boot); chunk->serial = (arena->chunkSerial)++; chunk->arena = arena; RingInit(&chunk->arenaRing); chunk->pageSize = ArenaGrainSize(arena); chunk->pageShift = pageShift = SizeLog2(chunk->pageSize); chunk->base = base; chunk->limit = limit; chunk->reserved = reserved; size = ChunkSize(chunk); /* .overhead.pages: Chunk overhead for the page allocation table. */ chunk->pages = pages = size >> pageShift; res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN); if (res != ResOK) goto failAllocTable; chunk->allocTable = p; pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize); chunk->pageTablePages = pageTableSize >> pageShift; res = Method(Arena, arena, chunkInit)(chunk, boot); if (res != ResOK) goto failClassInit; /* @@@@ Is BootAllocated always right? */ /* Last thing we BootAlloc'd is pageTable. We requested pageSize */ /* alignment, and pageTableSize is itself pageSize aligned, so */ /* BootAllocated should also be pageSize aligned. */ AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize)); chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift); /* Init allocTable after class init, because it might be mapped there. */ BTResRange(chunk->allocTable, 0, pages); /* Check that there is some usable address space remaining in the chunk. */ allocBase = PageIndexBase(chunk, chunk->allocBase); AVER(allocBase < chunk->limit); /* Add the chunk's free address space to the arena's freeLand, so that we can allocate from it. */ if (arena->hasFreeLand) { res = ArenaFreeLandInsert(arena, allocBase, chunk->limit); if (res != ResOK) goto failLandInsert; } TreeInit(&chunk->chunkTree); chunk->sig = ChunkSig; AVERT(Chunk, chunk); ArenaChunkInsert(arena, chunk); return ResOK; failLandInsert: Method(Arena, arena, chunkFinish)(chunk); /* .no-clean: No clean-ups needed past this point for boot, as we will discard the chunk. */ failClassInit: failAllocTable: return res; }
static void resetRange(void) { if (checkDefaultRange(0)) BTResRange(bt, args[0], args[1]); }
static void deallocate(FBMState state, Addr base, Addr limit) { Res res; Index ib, il; Bool isAllocated; Addr outerBase = base, outerLimit = limit; /* interval containing [ib, il) */ RangeStruct range, freeRange; /* interval returned by the manager */ ib = indexOfAddr(state, base); il = indexOfAddr(state, limit); isAllocated = BTIsSetRange(state->allocTable, ib, il); NDeallocateTried++; if (isAllocated) { Size left, right, total; /* Sizes of block and two fragments */ /* Find the free blocks adjacent to the allocated block */ if (ib > 0 && !BTGet(state->allocTable, ib - 1)) { outerBase = addrOfIndex(state, lastEdge(state->allocTable, ArraySize, ib - 1)); } else { outerBase = base; } if (il < ArraySize && !BTGet(state->allocTable, il)) { outerLimit = addrOfIndex(state, nextEdge(state->allocTable, ArraySize, il)); } else { outerLimit = limit; } left = AddrOffset(outerBase, base); right = AddrOffset(limit, outerLimit); total = AddrOffset(outerBase, outerLimit); /* TODO: check these values */ UNUSED(left); UNUSED(right); UNUSED(total); } RangeInit(&range, base, limit); switch (state->type) { case FBMTypeCBS: res = CBSInsert(&freeRange, state->the.cbs, &range); break; case FBMTypeFreelist: res = FreelistInsert(&freeRange, state->the.fl, &range); break; default: fail(); return; } if (verbose) { printf("deallocate: [%p,%p) -- %s\n", (void *)base, (void *)limit, isAllocated ? "succeed" : "fail"); describe(state); } if (!isAllocated) { die_expect((mps_res_t)res, MPS_RES_FAIL, "succeeded in inserting non-allocated block"); } else { /* isAllocated */ die_expect((mps_res_t)res, MPS_RES_OK, "failed to insert allocated block"); NDeallocateSucceeded++; BTResRange(state->allocTable, ib, il); Insist(RangeBase(&freeRange) == outerBase); Insist(RangeLimit(&freeRange) == outerLimit); } }
static void deallocate(CBS cbs, Addr block, BT allocTable, Addr base, Addr limit) { Res res; Index ib, il; Bool isAllocated; Addr outerBase = base, outerLimit = limit; /* interval containing [ib, il) */ Addr freeBase, freeLimit; /* interval returned by CBS */ ib = indexOfAddr(block, base); il = indexOfAddr(block, limit); isAllocated = BTIsSetRange(allocTable, ib, il); /* printf("deallocate: [%p, %p) -- %s\n", base, limit, isAllocated ? "succeed" : "fail"); */ NDeallocateTried++; if (isAllocated) { Size left, right, total; /* Sizes of block and two fragments */ /* Find the free blocks adjacent to the allocated block */ if (ib > 0 && !BTGet(allocTable, ib - 1)) { outerBase = addrOfIndex(block, lastEdge(allocTable, ArraySize, ib - 1)); } else { outerBase = base; } if (il < ArraySize && !BTGet(allocTable, il)) { outerLimit = addrOfIndex(block, nextEdge(allocTable, ArraySize, il)); } else { outerLimit = limit; } left = AddrOffset(outerBase, base); right = AddrOffset(limit, outerLimit); total = AddrOffset(outerBase, outerLimit); /* based on detailed knowledge of CBS behaviour */ checkExpectations(); if (total >= MinSize && left < MinSize && right < MinSize) { if (left >= right) expectCallback(&CallbackNew, left, outerBase, outerLimit); else expectCallback(&CallbackNew, right, outerBase, outerLimit); } else if (left >= MinSize && right >= MinSize) { if (left >= right) { expectCallback(&CallbackDelete, right, (Addr)0, (Addr)0); expectCallback(&CallbackGrow, left, outerBase, outerLimit); } else { expectCallback(&CallbackDelete, left, (Addr)0, (Addr)0); expectCallback(&CallbackGrow, right, outerBase, outerLimit); } } else if (total >= MinSize) { if (left >= right) { Insist(left >= MinSize); Insist(right < MinSize); expectCallback(&CallbackGrow, left, outerBase, outerLimit); } else { Insist(left < MinSize); Insist(right >= MinSize); expectCallback(&CallbackGrow, right, outerBase, outerLimit); } } } res = CBSInsertReturningRange(&freeBase, &freeLimit, cbs, base, limit); if (!isAllocated) { die_expect((mps_res_t)res, MPS_RES_FAIL, "succeeded in inserting non-allocated block"); } else { /* isAllocated */ die_expect((mps_res_t)res, MPS_RES_OK, "failed to insert allocated block"); NDeallocateSucceeded++; BTResRange(allocTable, ib, il); checkExpectations(); Insist(freeBase == outerBase); Insist(freeLimit == outerLimit); } }