static Bool VMArenaCheck(VMArena vmArena) { Arena arena; VMChunk primary; CHECKS(VMArena, vmArena); arena = VMArena2Arena(vmArena); CHECKD(Arena, arena); /* spare pages are committed, so must be less spare than committed. */ CHECKL(vmArena->spareSize <= arena->committed); CHECKL(vmArena->extendBy > 0); CHECKL(vmArena->extendMin <= vmArena->extendBy); if (arena->primary != NULL) { primary = Chunk2VMChunk(arena->primary); CHECKD(VMChunk, primary); /* We could iterate over all chunks accumulating an accurate */ /* count of committed, but we don't have all day. */ CHECKL(VMMapped(primary->vm) <= arena->committed); } CHECKD_NOSIG(Ring, &vmArena->spareRing); /* FIXME: Can't check VMParams */ return TRUE; }
static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit) { Arena arena; Size size; /* no checking as function is local to module */ arena = VMArena2Arena(vmArena); size = AddrOffset(base, limit); AVER(size <= arena->committed); VMUnmap(vm, base, limit); arena->committed -= size; return; }
/* VM indirect functions * * These functions should be used to map and unmap within the arena. * They are responsible for maintaining vmArena->committed, and for * checking that the commit limit does not get exceeded. */ static Res vmArenaMap(VMArena vmArena, VM vm, Addr base, Addr limit) { Arena arena; Size size; Res res; /* no checking as function is local to module */ arena = VMArena2Arena(vmArena); size = AddrOffset(base, limit); /* committed can't overflow (since we can't commit more memory than */ /* address space), but we're paranoid. */ AVER(arena->committed < arena->committed + size); /* check against commit limit */ if (arena->commitLimit < arena->committed + size) return ResCOMMIT_LIMIT; res = VMMap(vm, base, limit); if (res != ResOK) return res; arena->committed += size; return ResOK; }
/* VMChunkCreate -- create a chunk * * chunkReturn, return parameter for the created chunk. * vmArena, the parent VMArena. * size, approximate amount of virtual address that the chunk should reserve. */ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) { Res res; Addr base, limit, chunkStructLimit; Align pageSize; VM vm; BootBlockStruct bootStruct; BootBlock boot = &bootStruct; VMChunk vmChunk; void *p; AVER(chunkReturn != NULL); AVERT(VMArena, vmArena); AVER(size > 0); res = VMCreate(&vm, size, vmArena->vmParams); if (res != ResOK) goto failVMCreate; pageSize = VMAlign(vm); /* The VM will have aligned the userSize; pick up the actual size. */ base = VMBase(vm); limit = VMLimit(vm); res = BootBlockInit(boot, (void *)base, (void *)limit); if (res != ResOK) goto failBootInit; /* Allocate and map the descriptor. */ /* See <design/arena/>.@@@@ */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; vmChunk = p; /* Calculate the limit of the page where the chunkStruct resides. */ chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), pageSize); res = vmArenaMap(vmArena, vm, base, chunkStructLimit); if (res != ResOK) goto failChunkMap; vmChunk->overheadMappedLimit = chunkStructLimit; vmChunk->vm = vm; res = ChunkInit(VMChunk2Chunk(vmChunk), VMArena2Arena(vmArena), base, limit, pageSize, boot); if (res != ResOK) goto failChunkInit; BootBlockFinish(boot); vmChunk->sig = VMChunkSig; AVERT(VMChunk, vmChunk); *chunkReturn = VMChunk2Chunk(vmChunk); return ResOK; failChunkInit: /* No need to unmap, as we're destroying the VM. */ failChunkMap: failChunkAlloc: failBootInit: VMDestroy(vm); failVMCreate: return res; }
/* VMChunkCreate -- create a chunk * * chunkReturn, return parameter for the created chunk. * vmArena, the parent VMArena. * size, approximate amount of virtual address that the chunk should reserve. */ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) { Arena arena; Res res; Addr base, limit, chunkStructLimit; VMStruct vmStruct; VM vm = &vmStruct; BootBlockStruct bootStruct; BootBlock boot = &bootStruct; VMChunk vmChunk; void *p; AVER(chunkReturn != NULL); AVERT(VMArena, vmArena); arena = VMArena2Arena(vmArena); AVER(size > 0); res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams); if (res != ResOK) goto failVMInit; base = VMBase(vm); limit = VMLimit(vm); res = BootBlockInit(boot, (void *)base, (void *)limit); if (res != ResOK) goto failBootInit; /* Allocate and map the descriptor. */ /* See <design/arena/>.@@@@ */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; vmChunk = p; /* Calculate the limit of the grain where the chunkStruct resides. */ chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena)); res = vmArenaMap(vmArena, vm, base, chunkStructLimit); if (res != ResOK) goto failChunkMap; vmChunk->overheadMappedLimit = chunkStructLimit; /* Copy VM descriptor into its place in the chunk. */ VMCopy(VMChunkVM(vmChunk), vm); res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, VMReserved(VMChunkVM(vmChunk)), boot); if (res != ResOK) goto failChunkInit; BootBlockFinish(boot); vmChunk->sig = VMChunkSig; AVERT(VMChunk, vmChunk); *chunkReturn = VMChunk2Chunk(vmChunk); return ResOK; failChunkInit: VMUnmap(vm, VMBase(vm), chunkStructLimit); failChunkMap: failChunkAlloc: failBootInit: VMFinish(vm); failVMInit: return res; }