Ejemplo n.º 1
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  LPVOID b;

  AVERT(VM, vm);
  AVER(AddrIsAligned(base, vm->pageSize));
  AVER(AddrIsAligned(limit, vm->pageSize));
  AVER(VMBase(vm) <= base);
  AVER(base < limit);
  AVER(limit <= VMLimit(vm));

  /* .improve.query-map: We could check that the pages we are about to
   * map are unmapped using VirtualQuery. */

  b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit),
                   MEM_COMMIT, PAGE_EXECUTE_READWRITE);
  if (b == NULL)
    return ResMEMORY;
  AVER((Addr)b == base);        /* base should've been aligned */

  vm->mapped += AddrOffset(base, limit);
  AVER(VMMapped(vm) <= VMReserved(vm));

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 2
0
static Bool VMArenaCheck(VMArena vmArena)
{
  Arena arena;
  VMChunk primary;

  CHECKS(VMArena, vmArena);
  arena = VMArena2Arena(vmArena);
  CHECKD(Arena, arena);
  /* spare pages are committed, so must be less spare than committed. */
  CHECKL(vmArena->spareSize <= arena->committed);

  CHECKL(vmArena->extendBy > 0);
  CHECKL(vmArena->extendMin <= vmArena->extendBy);

  if (arena->primary != NULL) {
    primary = Chunk2VMChunk(arena->primary);
    CHECKD(VMChunk, primary);
    /* We could iterate over all chunks accumulating an accurate */
    /* count of committed, but we don't have all day. */
    CHECKL(VMMapped(primary->vm) <= arena->committed);
  }
  
  CHECKD_NOSIG(Ring, &vmArena->spareRing);

  /* FIXME: Can't check VMParams */

  return TRUE;
}
Ejemplo n.º 3
0
void VMFinish(VM vm)
{
  BOOL b;

  AVERT(VM, vm);
  /* Descriptor must not be stored inside its own VM at this point. */
  AVER(PointerAdd(vm, sizeof *vm) <= vm->block
       || PointerAdd(vm->block, VMReserved(vm)) <= (Pointer)vm);
  /* All address space must have been unmapped. */
  AVER(VMMapped(vm) == (Size)0);

  EVENT1(VMFinish, vm);

  vm->sig = SigInvalid;

  b = VirtualFree((LPVOID)vm->block, (SIZE_T)0, MEM_RELEASE);
  AVER(b != 0);
}
Ejemplo n.º 4
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  BOOL b;
  Size size;

  AVERT(VM, vm);
  AVER(AddrIsAligned(base, vm->pageSize));
  AVER(AddrIsAligned(limit, vm->pageSize));
  AVER(VMBase(vm) <= base);
  AVER(base < limit);
  AVER(limit <= VMLimit(vm));

  size = AddrOffset(base, limit);
  AVER(size <= VMMapped(vm));

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}