void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  caddr_t addr;

  AVERT(VM, vm);
  AVER(sizeof(int) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /etc/passwd onto the area, allowing no access.  This */
  /* effectively depopulates the area from memory, but keeps */
  /* it "busy" as far as the OS is concerned, so that it will not */
  /* be re-used by other calls to mmap which do not specify */
  /* MAP_FIXED.  The offset is specified to mmap so that */
  /* the OS merges this mapping with .map.reserve. */
  size = AddrOffset(base, limit);
  addr = mmap((caddr_t)base, (int)size,
              PROT_NONE, MAP_SHARED | MAP_FIXED,
              vm->none_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (caddr_t)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));
  AVER(sizeof(off_t) == sizeof(Size));  /* .assume.off_t */

  size = AddrOffset(base, limit);

  /* see <design/vmo1/#fun.unmap.offset> */
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_FILE | MAP_SHARED | MAP_FIXED,
              vm->none_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVERT(VM, vm);
  AVER(sizeof(int) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrOffset(base, limit) <= INT_MAX);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /dev/zero onto the area with a copy-on-write policy.  This */
  /* effectively populates the area with zeroed memory. */

  size = AddrOffset(base, limit);

  if (mmap((caddr_t)base, (int)size,
           PROT_READ | PROT_WRITE | PROT_EXEC,
           MAP_PRIVATE | MAP_FIXED,
           vm->zero_fd, (off_t)0)
      == (caddr_t)-1) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }

  vm->mapped += size;

  EVENT_PAA(VMMap, vm, base, limit);
  return ResOK;
}
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);

  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /dev/zero onto the area with a copy-on-write policy.  This */
  /* effectively populates the area with zeroed memory. */
  size = AddrOffset(base, limit);
  /* Check it won't lose any bits. */
  AVER(size <= (Size)(size_t)-1);
  addr = mmap((void *)base, (size_t)size,
	      PROT_READ | PROT_WRITE | PROT_EXEC,
	      MAP_PRIVATE | MAP_FIXED,
	      vm->zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM || errno == EAGAIN); /* .assume.mmap.err */
    return ResMEMORY;
  }
  AVER(addr == (void *)base);

  vm->mapped += size;

  EVENT_PAA(VMMap, vm, base, limit);
  return ResOK;
}
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* .unmap.reserve: Map /dev/zero onto the area, allowing no access. */
  /* This effectively depopulates the area from memory, but keeps */
  /* it "busy" as far as the OS is concerned, so that it will not */
  /* be re-used by other calls to mmap which do not specify */
  /* MAP_FIXED.  See also .map.reserve. */
  /* The OS doesn't merge this mapping with any neighbours, but it */
  /* can keep track of at least 16K mappings, so it's good enough. */
  size = AddrOffset(base, limit);
  /* Check it won't lose any bits. */
  AVER(size <= (Size)(size_t)-1);
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_AUTORESRV,
              vm->zero_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
Ejemplo n.º 6
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVERT(VM, vm);
  AVER(sizeof(void *) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  size = AddrOffset(base, limit);

  if(mmap((void *)base, (size_t)size,
          PROT_READ | PROT_WRITE | PROT_EXEC,
          MAP_ANON | MAP_PRIVATE | MAP_FIXED,
          -1, 0)
     == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 7
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  LPVOID b;
  Align align;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-map: We could check that the pages we are about to
   * map are unmapped using VirtualQuery. */

  b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit),
                   MEM_COMMIT, PAGE_EXECUTE_READWRITE);
  if (b == NULL)
    return ResMEMORY;
  AVER((Addr)b == base);        /* base should've been aligned */

  vm->mapped += AddrOffset(base, limit);

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 8
0
Bool VMCheck(VM vm)
{
  CHECKS(VM, vm);
  CHECKL(vm->base != 0);
  CHECKL(vm->limit != 0);
  CHECKL(vm->base < vm->limit);
  CHECKL(vm->mapped <= vm->reserved);
  CHECKL(AddrIsAligned(vm->base, vm->align));
  CHECKL(AddrIsAligned(vm->limit, vm->align));
  return TRUE;
}
Ejemplo n.º 9
0
Bool VMCheck(VM vm)
{
  CHECKS(VM, vm);
  CHECKL(vm->base != (Addr)0);
  CHECKL(vm->limit != (Addr)0);
  CHECKL(vm->base < vm->limit);
  CHECKL(AddrIsAligned(vm->base, VMANPageALIGNMENT));
  CHECKL(AddrIsAligned(vm->limit, VMANPageALIGNMENT));
  CHECKL(vm->block != NULL);
  CHECKL((Addr)vm->block <= vm->base);
  CHECKL(vm->mapped <= vm->reserved);
  return TRUE;
}
Bool VMCheck(VM vm)
{
  CHECKS(VM, vm);
  CHECKL(vm->zero_fd >= 0);
  CHECKL(vm->none_fd >= 0);
  CHECKL(vm->zero_fd != vm->none_fd);
  CHECKL(vm->base != 0);
  CHECKL(vm->limit != 0);
  CHECKL(vm->base < vm->limit);
  CHECKL(vm->mapped <= vm->reserved);
  CHECKL(SizeIsP2(vm->align));
  CHECKL(AddrIsAligned(vm->base, vm->align));
  CHECKL(AddrIsAligned(vm->limit, vm->align));
  return TRUE;
}
Ejemplo n.º 11
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Res res;
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(AddrIsAligned(old, PoolAlignment(pool)));
  AVER(size > 0);

  size = SizeAlignUp(size, PoolAlignment(pool));
  base = old;
  limit = AddrAdd(base, size);

  res = MVFFAddToFreeList(&base, &limit, mvff);
  AVER(res == ResOK);
  if (res == ResOK)
    MVFFFreeSegs(mvff, base, limit);

  return;
}
Ejemplo n.º 12
0
Archivo: pool.c Proyecto: bhanug/mps
Res PoolAlloc(Addr *pReturn, Pool pool, Size size)
{
  Res res;

  AVER(pReturn != NULL);
  AVERT(Pool, pool);
  AVER(size > 0);

  res = Method(Pool, pool, alloc)(pReturn, pool, size);
  if (res != ResOK)
    return res;
  /* Make sure that the allocated address was in the pool's memory. */
  /* .hasaddr.critical: The PoolHasAddr check is expensive, and in */
  /* allocation-bound programs this is on the critical path. */
  AVER_CRITICAL(PoolHasAddr(pool, *pReturn));
  /* All allocations should be aligned to the pool's alignment */
  AVER_CRITICAL(AddrIsAligned(*pReturn, pool->alignment));

  /* All PoolAllocs should advance the allocation clock, so we count */
  /* it all in the fillMutatorSize field. */
  ArenaGlobals(PoolArena(pool))->fillMutatorSize += size;

  EVENT3(PoolAlloc, pool, *pReturn, size);

  return ResOK;
}
Ejemplo n.º 13
0
static FreelistBlock FreelistBlockInit(Freelist fl, Addr base, Addr limit)
{
  FreelistBlock block;

  AVERT(Freelist, fl);
  AVER(base != NULL);
  AVER(AddrIsAligned(base, fl->alignment));
  AVER(base < limit);
  AVER(AddrIsAligned(limit, fl->alignment));

  block = (FreelistBlock)base;
  block->small.next = FreelistTagSet(NULL);
  FreelistBlockSetLimit(fl, block, limit);
  AVERT(FreelistBlock, block);
  return block;
}
Ejemplo n.º 14
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));

  size = AddrOffset(base, limit);
  memset((void *)base, (int)0, size);

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 15
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));
 
  size = AddrOffset(base, limit);
  memset((void *)base, 0xCD, size);

  AVER(vm->mapped >= size);
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 16
0
Res VMInit(VM vm, Size size, Size grainSize, void *params)
{
  LPVOID vbase;
  Size pageSize, reserved;
  VMParams vmParams = params;

  AVER(vm != NULL);
  AVERT(ArenaGrainSize, grainSize);
  AVER(size > 0);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  pageSize = PageSize();

  /* Grains must consist of whole pages. */
  AVER(grainSize % pageSize == 0);

  /* Check that the rounded-up sizes will fit in a Size. */
  size = SizeRoundUp(size, grainSize);
  if (size < grainSize || size > (Size)(SIZE_T)-1)
    return ResRESOURCE;
  reserved = size + grainSize - pageSize;
  if (reserved < grainSize || reserved > (Size)(SIZE_T)-1)
    return ResRESOURCE;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       reserved,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL)
    return ResRESOURCE;

  AVER(AddrIsAligned(vbase, pageSize));

  vm->pageSize = pageSize;
  vm->block = vbase;
  vm->base = AddrAlignUp(vbase, grainSize);
  vm->limit = AddrAdd(vm->base, size);
  AVER(vm->base < vm->limit);  /* .assume.not-last */
  AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved));
  vm->reserved = reserved;
  vm->mapped = 0;

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm));
  return ResOK;
}
Ejemplo n.º 17
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Align align;
  BOOL b;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)AddrOffset(base, limit), MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= AddrOffset(base, limit);

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 18
0
Archivo: pool.c Proyecto: bhanug/mps
void PoolFree(Pool pool, Addr old, Size size)
{
  AVERT(Pool, pool);
  AVER(old != NULL);
  /* The pool methods should check that old is in pool. */
  AVER(size > 0);
  AVER(AddrIsAligned(old, pool->alignment));
  AVER(PoolHasRange(pool, old, AddrAdd(old, size)));

  Method(Pool, pool, free)(pool, old, size);
 
  EVENT3(PoolFree, pool, old, size);
}
Ejemplo n.º 19
0
void MFSExtend(Pool pool, Addr base, Size size)
{
  MFS mfs;
  Tract tract;
  Word i, unitsPerExtent;
  Size unitSize;
  Header header = NULL;

  AVERT(Pool, pool);
  mfs = PoolPoolMFS(pool);
  AVERT(MFS, mfs);
  AVER(size == mfs->extendBy);

  /* Ensure that the memory we're adding belongs to this pool.  This is
     automatic if it was allocated using ArenaAlloc, but if the memory is
     being inserted from elsewhere then it must have been set up correctly. */
  AVER(PoolHasAddr(pool, base));
  
  /* .tract.chain: chain first tracts through TractP(tract) */
  tract = TractOfBaseAddr(PoolArena(pool), base);

  AVER(TractPool(tract) == pool);

  TractSetP(tract, (void *)mfs->tractList);
  mfs->tractList = tract;

  /* Update accounting */
  mfs->total += size;
  mfs->free += size;

  /* Sew together all the new empty units in the region, working down */
  /* from the top so that they are in ascending order of address on the */
  /* free list. */

  unitSize = mfs->unitSize;
  unitsPerExtent = size/unitSize;
  AVER(unitsPerExtent > 0);

#define SUB(b, s, i)    ((Header)AddrAdd(b, (s)*(i)))

  for(i = 0; i < unitsPerExtent; ++i)
  {
    header = SUB(base, unitSize, unitsPerExtent-i - 1);
    AVER(AddrIsAligned(header, pool->alignment));
    AVER(AddrAdd((Addr)header, unitSize) <= AddrAdd(base, size));
    header->next = mfs->freeList;
    mfs->freeList = header;
  }

#undef SUB
}
Ejemplo n.º 20
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  BOOL b;
  Size size;

  AVERT(VM, vm);
  AVER(AddrIsAligned(base, vm->pageSize));
  AVER(AddrIsAligned(limit, vm->pageSize));
  AVER(VMBase(vm) <= base);
  AVER(base < limit);
  AVER(limit <= VMLimit(vm));

  size = AddrOffset(base, limit);
  AVER(size <= VMMapped(vm));

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 21
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  size = AddrOffset(base, limit);

  /* see <design/vmo1/#fun.unmap.offset> */
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_FIXED,
              -1, 0);
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 22
0
Tract TractOfBaseAddr(Arena arena, Addr addr)
{
  Tract tract = NULL;
  Bool found;

  AVERT_CRITICAL(Arena, arena);
  AVER_CRITICAL(AddrIsAligned(addr, ArenaGrainSize(arena)));

  /* Check first in the cache, see <design/arena/#tract.cache>. */
  if (arena->lastTractBase == addr) {
    tract = arena->lastTract;
  } else {
    found = TractOfAddr(&tract, arena, addr);
    AVER_CRITICAL(found);
  }

  AVER_CRITICAL(TractBase(tract) == addr);
  return tract;
}
Ejemplo n.º 23
0
static void FreelistBlockSetLimit(Freelist fl, FreelistBlock block, Addr limit)
{
  Size size;

  AVERT(Freelist, fl);
  AVERT(FreelistBlock, block);
  AVER(AddrIsAligned(limit, fl->alignment));
  AVER(FreelistBlockBase(block) < limit);

  size = AddrOffset(block, limit);
  if (size >= sizeof(block->large)) {
    block->large.next = FreelistTagReset(block->large.next);
    block->large.limit = limit;
  } else {
    AVER(size >= sizeof(block->small));
    block->small.next = FreelistTagSet(block->small.next);
  }
  AVER(FreelistBlockLimit(fl, block) == limit);
}
Ejemplo n.º 24
0
Bool BufferCheck(Buffer buffer)
{
  CHECKS(Buffer, buffer);
  CHECKC(Buffer, buffer);
  CHECKL(buffer->serial < buffer->pool->bufferSerial); /* .trans.mod */
  CHECKU(Arena, buffer->arena);
  CHECKU(Pool, buffer->pool);
  CHECKL(buffer->arena == buffer->pool->arena);
  CHECKD_NOSIG(Ring, &buffer->poolRing);
  CHECKL(BoolCheck(buffer->isMutator));
  CHECKL(buffer->fillSize >= 0.0);
  CHECKL(buffer->emptySize >= 0.0);
  CHECKL(buffer->emptySize <= buffer->fillSize);
  CHECKL(buffer->alignment == buffer->pool->alignment);
  CHECKL(AlignCheck(buffer->alignment));

  /* If any of the buffer's fields indicate that it is reset, make */
  /* sure it is really reset.  Otherwise, check various properties */
  /* of the non-reset fields. */
  if (buffer->mode & BufferModeTRANSITION) {
    /* nothing to check */
  } else if ((buffer->mode & BufferModeATTACHED) == 0
             || buffer->base == (Addr)0
             || buffer->ap_s.init == (Addr)0
             || buffer->ap_s.alloc == (Addr)0
             || buffer->poolLimit == (Addr)0) {
    CHECKL((buffer->mode & BufferModeATTACHED) == 0);
    CHECKL(buffer->base == (Addr)0);
    CHECKL(buffer->initAtFlip == (Addr)0);
    CHECKL(buffer->ap_s.init == (Addr)0);
    CHECKL(buffer->ap_s.alloc == (Addr)0);
    CHECKL(buffer->ap_s.limit == (Addr)0);
    /* Nothing reliable to check for lightweight frame state */
    CHECKL(buffer->poolLimit == (Addr)0);
  } else {
    /* The buffer is attached to a region of memory.   */
    /* Check consistency. */
    CHECKL(buffer->mode & BufferModeATTACHED);

    /* These fields should obey the ordering */
    /* base <= init <= alloc <= poolLimit */
    CHECKL((mps_addr_t)buffer->base <= buffer->ap_s.init);
    CHECKL(buffer->ap_s.init <= buffer->ap_s.alloc);
    CHECKL(buffer->ap_s.alloc <= (mps_addr_t)buffer->poolLimit);

    /* Check that the fields are aligned to the buffer alignment. */
    CHECKL(AddrIsAligned(buffer->base, buffer->alignment));
    CHECKL(AddrIsAligned(buffer->initAtFlip, buffer->alignment));
    CHECKL(AddrIsAligned(buffer->ap_s.init, buffer->alignment));
    CHECKL(AddrIsAligned(buffer->ap_s.alloc, buffer->alignment));
    CHECKL(AddrIsAligned(buffer->ap_s.limit, buffer->alignment));
    CHECKL(AddrIsAligned(buffer->poolLimit, buffer->alignment));

    /* If the buffer isn't trapped then "limit" should be the limit */
    /* set by the owning pool.  Otherwise, "init" is either at the */
    /* same place it was at flip (.commit.before) or has been set */
    /* to "alloc" (.commit.after).  Also, when the buffer is */
    /* flipped, initAtFlip should hold the init at flip, which is */
    /* between the base and current init.  Otherwise, initAtFlip */
    /* is kept at zero to avoid misuse (see */
    /* request.dylan.170429.sol.zero_). */
    /* .. _request.dylan.170429.sol.zero: https://info.ravenbrook.com/project/mps/import/2001-11-05/mmprevol/request/dylan/170429 */

    if (BufferIsTrapped(buffer)) {
      /* .check.use-trapped: This checking function uses BufferIsTrapped, */
      /* So BufferIsTrapped can't do checking as that would cause an */
      /* infinite loop. */
      if (buffer->mode & BufferModeFLIPPED) {
        CHECKL(buffer->ap_s.init == buffer->initAtFlip
               || buffer->ap_s.init == buffer->ap_s.alloc);
        CHECKL(buffer->base <= buffer->initAtFlip);
        CHECKL(buffer->initAtFlip <= (Addr)buffer->ap_s.init);
      }
      /* Nothing special to check in the logged mode. */
    } else {
      CHECKL(buffer->initAtFlip == (Addr)0);
    }
  }

  return TRUE;
}
Ejemplo n.º 25
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  LPVOID vbase;
  SYSTEM_INFO si;
  Align align;
  VM vm;
  Res res;
  BOOL b;
  VMParams vmParams = params;

  AVER(vmReturn != NULL);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  GetSystemInfo(&si);
  align = (Align)si.dwPageSize;
  AVER((DWORD)align == si.dwPageSize); /* check it didn't truncate */
  AVER(SizeIsP2(align));    /* see .assume.sysalign */
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)(SIZE_T)-1))
    return ResRESOURCE;

  /* Allocate the vm descriptor.  This is likely to be wasteful. */
  vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), align),
                       MEM_COMMIT, PAGE_READWRITE);
  if (vbase == NULL)
    return ResMEMORY;
  vm = (VM)vbase;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       size,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL) {
    res = ResRESOURCE;
    goto failReserve;
  }

  AVER(AddrIsAligned(vbase, align));

  vm->align = align;
  vm->base = (Addr)vbase;
  vm->limit = AddrAdd(vbase, size);
  vm->reserved = size;
  vm->mapped = 0;
  AVER(vm->base < vm->limit);  /* .assume.not-last */

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;

failReserve:
  b = VirtualFree((LPVOID)vm, (SIZE_T)0, MEM_RELEASE);
  AVER(b != 0);
  return res;
}
Ejemplo n.º 26
0
Bool RangeIsAligned(Range range, Align alignment)
{
  AVERT(Range, range);
  return AddrIsAligned(RangeBase(range), alignment)
      && AddrIsAligned(RangeLimit(range), alignment);
}
Ejemplo n.º 27
0
Bool BTCheck(BT bt)
{
  AVER(bt != NULL);
  AVER(AddrIsAligned((Addr)bt, sizeof(Word)));
  return TRUE;
}
Ejemplo n.º 28
0
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
              BootBlock boot)
{
  Size size;
  Count pages;
  Shift pageShift;
  Size pageTableSize;
  Addr allocBase;
  void *p;
  Res res;

  /* chunk is supposed to be uninitialized, so don't check it. */
  AVERT(Arena, arena);
  AVER(base != NULL);
  AVER(AddrIsAligned(base, ArenaGrainSize(arena)));
  AVER(base < limit);
  AVER(AddrIsAligned(limit, ArenaGrainSize(arena)));
  AVERT(BootBlock, boot);

  chunk->serial = (arena->chunkSerial)++;
  chunk->arena = arena;
  RingInit(&chunk->arenaRing);

  chunk->pageSize = ArenaGrainSize(arena);
  chunk->pageShift = pageShift = SizeLog2(chunk->pageSize);
  chunk->base = base;
  chunk->limit = limit;
  chunk->reserved = reserved;
  size = ChunkSize(chunk);

  /* .overhead.pages: Chunk overhead for the page allocation table. */
  chunk->pages = pages = size >> pageShift;
  res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failAllocTable;
  chunk->allocTable = p;

  pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize);
  chunk->pageTablePages = pageTableSize >> pageShift;

  res = Method(Arena, arena, chunkInit)(chunk, boot);
  if (res != ResOK)
    goto failClassInit;

  /* @@@@ Is BootAllocated always right? */
  /* Last thing we BootAlloc'd is pageTable.  We requested pageSize */
  /* alignment, and pageTableSize is itself pageSize aligned, so */
  /* BootAllocated should also be pageSize aligned. */
  AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize));
  chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift);

  /* Init allocTable after class init, because it might be mapped there. */
  BTResRange(chunk->allocTable, 0, pages);

  /* Check that there is some usable address space remaining in the chunk. */
  allocBase = PageIndexBase(chunk, chunk->allocBase);
  AVER(allocBase < chunk->limit);

  /* Add the chunk's free address space to the arena's freeLand, so that
     we can allocate from it. */
  if (arena->hasFreeLand) {
    res = ArenaFreeLandInsert(arena, allocBase, chunk->limit);
    if (res != ResOK)
      goto failLandInsert;
  }

  TreeInit(&chunk->chunkTree);

  chunk->sig = ChunkSig;
  AVERT(Chunk, chunk);

  ArenaChunkInsert(arena, chunk);

  return ResOK;

failLandInsert:
  Method(Arena, arena, chunkFinish)(chunk);
  /* .no-clean: No clean-ups needed past this point for boot, as we will
     discard the chunk. */
failClassInit:
failAllocTable:
  return res;
}