Res VMMap(VM vm, Addr base, Addr limit) { LPVOID b; AVERT(VM, vm); AVER(AddrIsAligned(base, vm->pageSize)); AVER(AddrIsAligned(limit, vm->pageSize)); AVER(VMBase(vm) <= base); AVER(base < limit); AVER(limit <= VMLimit(vm)); /* .improve.query-map: We could check that the pages we are about to * map are unmapped using VirtualQuery. */ b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit), MEM_COMMIT, PAGE_EXECUTE_READWRITE); if (b == NULL) return ResMEMORY; AVER((Addr)b == base); /* base should've been aligned */ vm->mapped += AddrOffset(base, limit); AVER(VMMapped(vm) <= VMReserved(vm)); EVENT3(VMMap, vm, base, limit); return ResOK; }
Res VMInit(VM vm, Size size, Size grainSize, void *params) { LPVOID vbase; Size pageSize, reserved; VMParams vmParams = params; AVER(vm != NULL); AVERT(ArenaGrainSize, grainSize); AVER(size > 0); AVER(params != NULL); /* FIXME: Should have full AVERT? */ AVER(COMPATTYPE(LPVOID, Addr)); /* .assume.lpvoid-addr */ AVER(COMPATTYPE(SIZE_T, Size)); pageSize = PageSize(); /* Grains must consist of whole pages. */ AVER(grainSize % pageSize == 0); /* Check that the rounded-up sizes will fit in a Size. */ size = SizeRoundUp(size, grainSize); if (size < grainSize || size > (Size)(SIZE_T)-1) return ResRESOURCE; reserved = size + grainSize - pageSize; if (reserved < grainSize || reserved > (Size)(SIZE_T)-1) return ResRESOURCE; /* Allocate the address space. */ vbase = VirtualAlloc(NULL, reserved, vmParams->topDown ? MEM_RESERVE | MEM_TOP_DOWN : MEM_RESERVE, PAGE_NOACCESS); if (vbase == NULL) return ResRESOURCE; AVER(AddrIsAligned(vbase, pageSize)); vm->pageSize = pageSize; vm->block = vbase; vm->base = AddrAlignUp(vbase, grainSize); vm->limit = AddrAdd(vm->base, size); AVER(vm->base < vm->limit); /* .assume.not-last */ AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved)); vm->reserved = reserved; vm->mapped = 0; vm->sig = VMSig; AVERT(VM, vm); EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm)); return ResOK; }
void VMUnmap(VM vm, Addr base, Addr limit) { BOOL b; Size size; AVERT(VM, vm); AVER(AddrIsAligned(base, vm->pageSize)); AVER(AddrIsAligned(limit, vm->pageSize)); AVER(VMBase(vm) <= base); AVER(base < limit); AVER(limit <= VMLimit(vm)); size = AddrOffset(base, limit); AVER(size <= VMMapped(vm)); /* .improve.query-unmap: Could check that the pages we are about */ /* to unmap are mapped, using VirtualQuery. */ b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT); AVER(b != 0); /* .assume.free.success */ vm->mapped -= size; EVENT3(VMUnmap, vm, base, limit); }
/* VMChunkCreate -- create a chunk * * chunkReturn, return parameter for the created chunk. * vmArena, the parent VMArena. * size, approximate amount of virtual address that the chunk should reserve. */ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) { Res res; Addr base, limit, chunkStructLimit; Align pageSize; VM vm; BootBlockStruct bootStruct; BootBlock boot = &bootStruct; VMChunk vmChunk; void *p; AVER(chunkReturn != NULL); AVERT(VMArena, vmArena); AVER(size > 0); res = VMCreate(&vm, size, vmArena->vmParams); if (res != ResOK) goto failVMCreate; pageSize = VMAlign(vm); /* The VM will have aligned the userSize; pick up the actual size. */ base = VMBase(vm); limit = VMLimit(vm); res = BootBlockInit(boot, (void *)base, (void *)limit); if (res != ResOK) goto failBootInit; /* Allocate and map the descriptor. */ /* See <design/arena/>.@@@@ */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; vmChunk = p; /* Calculate the limit of the page where the chunkStruct resides. */ chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), pageSize); res = vmArenaMap(vmArena, vm, base, chunkStructLimit); if (res != ResOK) goto failChunkMap; vmChunk->overheadMappedLimit = chunkStructLimit; vmChunk->vm = vm; res = ChunkInit(VMChunk2Chunk(vmChunk), VMArena2Arena(vmArena), base, limit, pageSize, boot); if (res != ResOK) goto failChunkInit; BootBlockFinish(boot); vmChunk->sig = VMChunkSig; AVERT(VMChunk, vmChunk); *chunkReturn = VMChunk2Chunk(vmChunk); return ResOK; failChunkInit: /* No need to unmap, as we're destroying the VM. */ failChunkMap: failChunkAlloc: failBootInit: VMDestroy(vm); failVMCreate: return res; }
/* VMChunkCreate -- create a chunk * * chunkReturn, return parameter for the created chunk. * vmArena, the parent VMArena. * size, approximate amount of virtual address that the chunk should reserve. */ static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size) { Arena arena; Res res; Addr base, limit, chunkStructLimit; VMStruct vmStruct; VM vm = &vmStruct; BootBlockStruct bootStruct; BootBlock boot = &bootStruct; VMChunk vmChunk; void *p; AVER(chunkReturn != NULL); AVERT(VMArena, vmArena); arena = VMArena2Arena(vmArena); AVER(size > 0); res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams); if (res != ResOK) goto failVMInit; base = VMBase(vm); limit = VMLimit(vm); res = BootBlockInit(boot, (void *)base, (void *)limit); if (res != ResOK) goto failBootInit; /* Allocate and map the descriptor. */ /* See <design/arena/>.@@@@ */ res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN); if (res != ResOK) goto failChunkAlloc; vmChunk = p; /* Calculate the limit of the grain where the chunkStruct resides. */ chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena)); res = vmArenaMap(vmArena, vm, base, chunkStructLimit); if (res != ResOK) goto failChunkMap; vmChunk->overheadMappedLimit = chunkStructLimit; /* Copy VM descriptor into its place in the chunk. */ VMCopy(VMChunkVM(vmChunk), vm); res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit, VMReserved(VMChunkVM(vmChunk)), boot); if (res != ResOK) goto failChunkInit; BootBlockFinish(boot); vmChunk->sig = VMChunkSig; AVERT(VMChunk, vmChunk); *chunkReturn = VMChunk2Chunk(vmChunk); return ResOK; failChunkInit: VMUnmap(vm, VMBase(vm), chunkStructLimit); failChunkMap: failChunkAlloc: failBootInit: VMFinish(vm); failVMInit: return res; }