void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* .unmap.reserve: Map /dev/zero onto the area, allowing no access. */
  /* This effectively depopulates the area from memory, but keeps */
  /* it "busy" as far as the OS is concerned, so that it will not */
  /* be re-used by other calls to mmap which do not specify */
  /* MAP_FIXED.  See also .map.reserve. */
  /* The OS doesn't merge this mapping with any neighbours, but it */
  /* can keep track of at least 16K mappings, so it's good enough. */
  size = AddrOffset(base, limit);
  /* Check it won't lose any bits. */
  AVER(size <= (Size)(size_t)-1);
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_SHARED | MAP_FIXED | MAP_AUTORESRV,
              vm->zero_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));
  AVER(sizeof(off_t) == sizeof(Size));  /* .assume.off_t */

  size = AddrOffset(base, limit);

  /* see <design/vmo1/#fun.unmap.offset> */
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_FILE | MAP_SHARED | MAP_FIXED,
              vm->none_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVERT(VM, vm);
  AVER(sizeof(int) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrOffset(base, limit) <= INT_MAX);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /dev/zero onto the area with a copy-on-write policy.  This */
  /* effectively populates the area with zeroed memory. */

  size = AddrOffset(base, limit);

  if (mmap((caddr_t)base, (int)size,
           PROT_READ | PROT_WRITE | PROT_EXEC,
           MAP_PRIVATE | MAP_FIXED,
           vm->zero_fd, (off_t)0)
      == (caddr_t)-1) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }

  vm->mapped += size;

  EVENT_PAA(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 4
0
void ProtSet(Addr base, Addr limit, AccessSet mode)
{
  int flags;

  AVER(sizeof(size_t) == sizeof(Addr));
  AVER(base < limit);
  AVER(base != 0);
  AVER(AddrOffset(base, limit) <= INT_MAX);     /* should be redundant */

  /* convert between MPS AccessSet and UNIX PROT thingies. */
  switch(mode) {
  case AccessWRITE | AccessREAD:
  case AccessREAD:      /* forbids writes as well */
    flags = PROT_NONE;
    break;
  case AccessWRITE:
    flags = PROT_READ | PROT_EXEC;
    break;
  case AccessSetEMPTY:
    flags = PROT_READ | PROT_WRITE | PROT_EXEC;
    break;
  default:
    NOTREACHED;
    flags = PROT_NONE;
  }

  if(mprotect((void *)base, (size_t)AddrOffset(base, limit), flags) != 0)
    NOTREACHED;
}
Ejemplo n.º 5
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  LPVOID b;
  Align align;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-map: We could check that the pages we are about to
   * map are unmapped using VirtualQuery. */

  b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit),
                   MEM_COMMIT, PAGE_EXECUTE_READWRITE);
  if (b == NULL)
    return ResMEMORY;
  AVER((Addr)b == base);        /* base should've been aligned */

  vm->mapped += AddrOffset(base, limit);

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  caddr_t addr;

  AVERT(VM, vm);
  AVER(sizeof(int) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /etc/passwd onto the area, allowing no access.  This */
  /* effectively depopulates the area from memory, but keeps */
  /* it "busy" as far as the OS is concerned, so that it will not */
  /* be re-used by other calls to mmap which do not specify */
  /* MAP_FIXED.  The offset is specified to mmap so that */
  /* the OS merges this mapping with .map.reserve. */
  size = AddrOffset(base, limit);
  addr = mmap((caddr_t)base, (int)size,
              PROT_NONE, MAP_SHARED | MAP_FIXED,
              vm->none_fd, (off_t)AddrOffset(vm->base, base));
  AVER(addr == (caddr_t)base);

  vm->mapped -= size;

  EVENT_PAA(VMUnmap, vm, base, limit);
}
Ejemplo n.º 7
0
/* MVFFFreeSegs -- Free segments from given range
 *
 * Given a free range, attempts to find entire segments within
 * it, and returns them to the arena, updating total size counter.
 *
 * This is usually called immediately after MVFFAddToFreeList.
 * It is not combined with MVFFAddToFreeList because the latter
 * is also called when new segments are added under MVFFAlloc.
 */
static void MVFFFreeSegs(MVFF mvff, Addr base, Addr limit)
{
  Seg seg = NULL;       /* suppress "may be used uninitialized" */
  Arena arena;
  Bool b;
  Addr segLimit;  /* limit of the current segment when iterating */
  Addr segBase;   /* base of the current segment when iterating */
  Res res;

  AVERT(MVFF, mvff);
  AVER(base < limit);
  /* Could profitably AVER that the given range is free, */
  /* but the CBS doesn't provide that facility. */

  if (AddrOffset(base, limit) < mvff->minSegSize)
    return; /* not large enough for entire segments */

  arena = PoolArena(MVFF2Pool(mvff));
  b = SegOfAddr(&seg, arena, base);
  AVER(b);

  segBase = SegBase(seg);
  segLimit = SegLimit(seg);

  while(segLimit <= limit) { /* segment ends in range */
    if (segBase >= base) { /* segment starts in range */
      /* Must remove from free list first, in case free list */
      /* is using inline data structures. */
      res = CBSDelete(CBSOfMVFF(mvff), segBase, segLimit);
      AVER(res == ResOK);
      mvff->free -= AddrOffset(segBase, segLimit);
      mvff->total -= AddrOffset(segBase, segLimit);
      SegFree(seg);
    }

    /* Avoid calling SegNext if the next segment would fail */
    /* the loop test, mainly because there might not be a */
    /* next segment. */
    if (segLimit == limit) /* segment ends at end of range */
      break;

    b = SegNext(&seg, arena, segBase);
    AVER(b);
    segBase = SegBase(seg);
    segLimit = SegLimit(seg);
  }

  return;
}
Ejemplo n.º 8
0
void VMDestroy(VM vm)
{
  /* All vm areas should have been unmapped. */
  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);
  AVER(vm->reserved == AddrOffset(vm->base, vm->limit));

  memset((void *)vm->base, VMJunkBYTE, AddrOffset(vm->base, vm->limit));
  free(vm->block);
 
  vm->sig = SigInvalid;
  free(vm);
 
  EVENT1(VMDestroy, vm);
}
Ejemplo n.º 9
0
ATTRIBUTE_UNUSED
static Bool MVTCheck(MVT mvt)
{
  CHECKS(MVT, mvt);
  CHECKC(MVTPool, mvt);
  CHECKD(Pool, MVTPool(mvt));
  CHECKC(MVTPool, mvt);
  CHECKD(CBS, &mvt->cbsStruct);
  CHECKD(ABQ, &mvt->abqStruct);
  CHECKD(Freelist, &mvt->flStruct);
  CHECKD(Failover, &mvt->foStruct);
  CHECKL(mvt->reuseSize >= 2 * mvt->fillSize);
  CHECKL(mvt->fillSize >= mvt->maxSize);
  CHECKL(mvt->maxSize >= mvt->meanSize);
  CHECKL(mvt->meanSize >= mvt->minSize);
  CHECKL(mvt->minSize > 0);
  CHECKL(mvt->fragLimit <= 100);
  CHECKL(mvt->availLimit == mvt->size * mvt->fragLimit / 100);
  CHECKL(BoolCheck(mvt->abqOverflow));
  CHECKL(BoolCheck(mvt->splinter));
  if (mvt->splinter) {
    CHECKL(AddrOffset(mvt->splinterBase, mvt->splinterLimit) >=
           mvt->minSize);
    CHECKL(mvt->splinterBase < mvt->splinterLimit);
  }
  CHECKL(mvt->size == mvt->allocated + mvt->available +
         mvt->unavailable);
  /* --- could check that sum of segment sizes == mvt->size */
  /* --- check meters? */

  return TRUE;
}
Ejemplo n.º 10
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVERT(VM, vm);
  AVER(sizeof(void *) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  size = AddrOffset(base, limit);

  if(mmap((void *)base, (size_t)size,
          PROT_READ | PROT_WRITE | PROT_EXEC,
          MAP_ANON | MAP_PRIVATE | MAP_FIXED,
          -1, 0)
     == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 11
0
static void stepper(mps_addr_t object, mps_fmt_t format,
    mps_pool_t pool, void *p, size_t s)
{
    struct stepper_data *sd;
    mps_arena_t arena;
    mps_bool_t b;
    mps_pool_t query_pool;
    mps_fmt_t query_fmt;
    size_t size;

    Insist(s == sizeof *sd);
    sd = p;
    arena = sd->arena;

    Insist(mps_arena_has_addr(arena, object));

    b = mps_addr_pool(&query_pool, arena, object);
    Insist(b);
    Insist(query_pool == pool);
    Insist(pool == sd->expect_pool);

    b = mps_addr_fmt(&query_fmt, arena, object);
    Insist(b);
    Insist(query_fmt == format);
    Insist(format == sd->expect_fmt);

    size = AddrOffset(object, dylan_skip(object));
    if (dylan_ispad(object)) {
      sd->padSize += size;
    } else {
      ++ sd->count;
      sd->objSize += size;
    }      
}
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);

  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  /* Map /dev/zero onto the area with a copy-on-write policy.  This */
  /* effectively populates the area with zeroed memory. */
  size = AddrOffset(base, limit);
  /* Check it won't lose any bits. */
  AVER(size <= (Size)(size_t)-1);
  addr = mmap((void *)base, (size_t)size,
	      PROT_READ | PROT_WRITE | PROT_EXEC,
	      MAP_PRIVATE | MAP_FIXED,
	      vm->zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM || errno == EAGAIN); /* .assume.mmap.err */
    return ResMEMORY;
  }
  AVER(addr == (void *)base);

  vm->mapped += size;

  EVENT_PAA(VMMap, vm, base, limit);
  return ResOK;
}
void VMDestroy(VM vm)
{
  int r;
  int zero_fd, none_fd;

  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);

  /* This appears to be pretty pointless, since the space descriptor */
  /* page is  about to vanish completely.  However, munmap might fail */
  /* for some reason, and this would ensure that it was still */
  /* discovered if sigs were being checked. */
  vm->sig = SigInvalid;

  zero_fd = vm->zero_fd; none_fd = vm->none_fd;
  r = munmap((caddr_t)vm->base, (int)AddrOffset(vm->base, vm->limit));
  AVER(r == 0);
  r = munmap((caddr_t)vm,
             (int)SizeAlignUp(sizeof(VMStruct), vm->align));
  AVER(r == 0);
  /* .close.fail: We ignore failure from close() as there's very */
  /* little we can do anyway. */
  (void)close(zero_fd);
  (void)close(none_fd);

  EVENT_P(VMDestroy, vm);
}
Ejemplo n.º 14
0
static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size,
                     Bool withReservoirPermit, DebugInfo info)
{
  Res res;
  MVFF mvff;
  Addr base, limit;
  Bool foundBlock;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(aReturn != NULL);
  AVER(size > 0);
  AVER(BoolCheck(withReservoirPermit));
  UNUSED(info);

  size = SizeAlignUp(size, PoolAlignment(pool));

  foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);
  if (!foundBlock) {
    Seg seg;

    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);

    /* We know that the found range must intersect the new segment. */
    /* In particular, it doesn't necessarily lie entirely within it. */
    /* The next three AVERs test for intersection of two intervals. */
    AVER(base >= SegBase(seg) || limit <= SegLimit(seg));
    AVER(base < SegLimit(seg));
    AVER(SegBase(seg) < limit);

    /* We also know that the found range is no larger than the segment. */
    AVER(SegSize(seg) >= AddrOffset(base, limit));
  }
  AVER(foundBlock);
  AVER(AddrOffset(base, limit) == size);

  *aReturn = base;

  return ResOK;
}
Ejemplo n.º 15
0
/* MVFFBufferFill -- Fill the buffer
 *
 * Fill it with the largest block we can find.
 */
static Res MVFFBufferFill(Addr *baseReturn, Addr *limitReturn,
                          Pool pool, Buffer buffer, Size size,
                          Bool withReservoirPermit)
{
  Res res;
  MVFF mvff;
  Addr base, limit;
  Bool foundBlock;
  Seg seg = NULL;

  AVER(baseReturn != NULL);
  AVER(limitReturn != NULL);
  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, PoolAlignment(pool)));
  AVERT(Bool, withReservoirPermit);

  /* Hoping the largest is big enough, delete it and return if small. */
  foundBlock = CBSFindLargest(&base, &limit, CBSOfMVFF(mvff),
                              CBSFindDeleteENTIRE);
  if (foundBlock && AddrOffset(base, limit) < size) {
    foundBlock = FALSE;
    res = CBSInsert(CBSOfMVFF(mvff), base, limit);
    AVER(res == ResOK);
  }
  if (!foundBlock) {
    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    foundBlock = CBSFindLargest(&base, &limit, CBSOfMVFF(mvff),
                                CBSFindDeleteENTIRE);
    AVER(foundBlock); /* We will find the new segment. */
  }

  AVER(AddrOffset(base, limit) >= size);
  mvff->free -= AddrOffset(base, limit);

  *baseReturn = base;
  *limitReturn = limit;
  return ResOK;
}
Ejemplo n.º 16
0
void BufferDetach(Buffer buffer, Pool pool)
{
  AVERT(Buffer, buffer);
  AVER(BufferIsReady(buffer));

  if (!BufferIsReset(buffer)) {
    Addr init, limit;
    Size spare;

    buffer->mode |= BufferModeTRANSITION;

    /* Ask the owning pool to do whatever it needs to before the */
    /* buffer is detached (e.g. copy buffer state into pool state). */
    Method(Pool, pool, bufferEmpty)(pool, buffer);

    /* run any class-specific detachment method */
    Method(Buffer, buffer, detach)(buffer);

    init = BufferGetInit(buffer);
    limit = BufferLimit(buffer);
    spare = AddrOffset(init, limit);
    buffer->emptySize += spare;
    if (buffer->isMutator) {
      ArenaGlobals(buffer->arena)->emptyMutatorSize += spare;
      ArenaGlobals(buffer->arena)->allocMutatorSize +=
        AddrOffset(buffer->base, init);
    } else {
      ArenaGlobals(buffer->arena)->emptyInternalSize += spare;
    }

    /* Reset the buffer. */
    buffer->base = (Addr)0;
    buffer->initAtFlip = (Addr)0;
    buffer->ap_s.init = (mps_addr_t)0;
    buffer->ap_s.alloc = (mps_addr_t)0;
    buffer->ap_s.limit = (mps_addr_t)0;
    buffer->poolLimit = (Addr)0;
    buffer->mode &=
      ~(BufferModeATTACHED|BufferModeFLIPPED|BufferModeTRANSITION);

    EVENT2(BufferEmpty, buffer, spare);
  }
}
Ejemplo n.º 17
0
void BufferAttach(Buffer buffer, Addr base, Addr limit,
                  Addr init, Size size)
{
  Size filled;

  AVERT(Buffer, buffer);
  AVER(BufferIsReset(buffer));
  AVER(AddrAdd(base, size) <= limit);
  AVER(base <= init);
  AVER(init <= limit);

  /* Set up the buffer to point at the supplied region */
  buffer->mode |= BufferModeATTACHED;
  buffer->base = base;
  buffer->ap_s.init = init;
  buffer->ap_s.alloc = AddrAdd(init, size);
  /* only set limit if not logged */
  if ((buffer->mode & BufferModeLOGGED) == 0) {
    buffer->ap_s.limit = limit;
  } else {
    AVER(buffer->ap_s.limit == (Addr)0);
  }
  AVER(buffer->initAtFlip == (Addr)0);
  buffer->poolLimit = limit;

  filled = AddrOffset(init, limit);
  buffer->fillSize += filled;
  if (buffer->isMutator) {
    if (base != init) { /* see <design/buffer#.count.alloc.how> */
      Size prealloc = AddrOffset(base, init);
      ArenaGlobals(buffer->arena)->allocMutatorSize -= prealloc;
    }
    ArenaGlobals(buffer->arena)->fillMutatorSize += filled;
  } else {
    ArenaGlobals(buffer->arena)->fillInternalSize += filled;
  }

  /* run any class-specific attachment method */
  Method(Buffer, buffer, attach)(buffer, base, limit, init, size);

  AVERT(Buffer, buffer);
  EVENT4(BufferFill, buffer, size, base, filled);
}
Ejemplo n.º 18
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Align align;
  BOOL b;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)AddrOffset(base, limit), MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= AddrOffset(base, limit);

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 19
0
void ProtSet(Addr base, Addr limit, AccessSet mode)
{
  int flags;

  AVER(sizeof(size_t) == sizeof(Addr));
  AVER(base < limit);
  AVER(base != 0);
  AVER(AddrOffset(base, limit) <= INT_MAX);     /* should be redundant */
  AVERT(AccessSet, mode);

  /* Convert between MPS AccessSet and UNIX PROT thingies.
     In this function, AccessREAD means protect against read accesses
     (disallow them).  PROT_READ means allow read accesses.  Notice that
     this follows a difference in contract as well as style.  AccessREAD
     means that no reads should be permitted (all reads should go via
     the signal handler), possibly other operations (write) also go via
     the signal handler; PROT_WRITE means that all writes should be
     allowed, possibly that means other operations (read) are also
     allowed.
   */
  switch(mode) {
  case AccessWRITE | AccessREAD:
  case AccessREAD:      /* forbids writes as well, see .assume.write-only */
    flags = PROT_NONE;
    break;
  case AccessWRITE:
    flags = PROT_READ | PROT_EXEC;
    break;
  case AccessSetEMPTY:
    flags = PROT_READ | PROT_WRITE | PROT_EXEC;
    break;
  default:
    NOTREACHED;
    flags = PROT_NONE;
  }

  /* .assume.mprotect.base */
  if(mprotect((void *)base, (size_t)AddrOffset(base, limit), flags) != 0)
    NOTREACHED;
}
Ejemplo n.º 20
0
static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
                             ClientArena clientArena)
{
  ClientChunk clChunk;
  Chunk chunk;
  Addr alignedBase;
  BootBlockStruct bootStruct;
  BootBlock boot = &bootStruct;
  Res res;
  void *p;

  AVER(chunkReturn != NULL);
  AVER(base != (Addr)0);
  /* @@@@ Should refuse on small chunks, instead of AVERring. */
  AVER(limit != (Addr)0);
  AVER(limit > base);

  /* Initialize boot block. */
  /* Chunk has to be page-aligned, and the boot allocs must be within it. */
  alignedBase = AddrAlignUp(base, ARENA_CLIENT_PAGE_SIZE);
  AVER(alignedBase < limit);
  res = BootBlockInit(boot, (void *)alignedBase, (void *)limit);
  if (res != ResOK)
    goto failBootInit;

  /* Allocate the chunk. */
  /* See <design/arena/>.@@@@ */
  res = BootAlloc(&p, boot, sizeof(ClientChunkStruct), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failChunkAlloc;
  clChunk = p;  chunk = ClientChunk2Chunk(clChunk);

  res = ChunkInit(chunk, ClientArena2Arena(clientArena),
                  alignedBase, AddrAlignDown(limit, ARENA_CLIENT_PAGE_SIZE),
                  ARENA_CLIENT_PAGE_SIZE, boot);
  if (res != ResOK)
    goto failChunkInit;

  ClientArena2Arena(clientArena)->committed +=
    AddrOffset(base, PageIndexBase(chunk, chunk->allocBase));
  BootBlockFinish(boot);

  clChunk->sig = ClientChunkSig;
  AVERT(ClientChunk, clChunk);
  *chunkReturn = chunk;
  return ResOK;

failChunkInit:
failChunkAlloc:
failBootInit:
  return res;
}
Ejemplo n.º 21
0
static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
{
  Arena arena;
  Size size;

  /* no checking as function is local to module */

  arena = VMArena2Arena(vmArena);
  size = AddrOffset(base, limit);
  AVER(size <= arena->committed);

  VMUnmap(vm, base, limit);
  arena->committed -= size;
  return;
}
Ejemplo n.º 22
0
/* MVFFAddToFreeList -- Add given range to free list
 *
 * Updates MVFF counters for additional free space.  Returns maximally
 * coalesced range containing given range.  Does not attempt to free
 * segments (see MVFFFreeSegs).  Cannot(!) fail.
 */
static void MVFFAddToFreeList(Addr *baseIO, Addr *limitIO, MVFF mvff) {
  Res res;
  Addr base, limit;

  AVER(baseIO != NULL);
  AVER(limitIO != NULL);
  AVERT(MVFF, mvff);
  base = *baseIO;
  limit = *limitIO;
  AVER(limit > base);

  res = CBSInsertReturningRange(baseIO, limitIO, CBSOfMVFF(mvff), base, limit);
  AVER(res == ResOK);
  mvff->free += AddrOffset(base, limit);

  return;
}
Ejemplo n.º 23
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));
 
  size = AddrOffset(base, limit);
  memset((void *)base, 0xCD, size);

  AVER(vm->mapped >= size);
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Ejemplo n.º 24
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));

  size = AddrOffset(base, limit);
  memset((void *)base, (int)0, size);

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Ejemplo n.º 25
0
static void FreelistBlockSetLimit(Freelist fl, FreelistBlock block, Addr limit)
{
  Size size;

  AVERT(Freelist, fl);
  AVERT(FreelistBlock, block);
  AVER(AddrIsAligned(limit, fl->alignment));
  AVER(FreelistBlockBase(block) < limit);

  size = AddrOffset(block, limit);
  if (size >= sizeof(block->large)) {
    block->large.next = FreelistTagReset(block->large.next);
    block->large.limit = limit;
  } else {
    AVER(size >= sizeof(block->small));
    block->small.next = FreelistTagSet(block->small.next);
  }
  AVER(FreelistBlockLimit(fl, block) == limit);
}
Ejemplo n.º 26
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(size > 0);

  base = AddrAlignUp(old, PoolAlignment(pool));
  size = size - AddrOffset(old, base);
  size = SizeAlignUp(size, PoolAlignment(pool));
  limit = AddrAdd(base, size);

  MVFFAddToFreeList(&base, &limit, mvff);

  MVFFFreeSegs(mvff, base, limit);
}
Ejemplo n.º 27
0
void VMDestroy(VM vm)
{
  int r;

  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);

  EVENT1(VMDestroy, vm);

  /* This appears to be pretty pointless, since the descriptor */
  /* page is about to vanish completely.  However, munmap might fail */
  /* for some reason, and this would ensure that it was still */
  /* discovered if sigs were being checked. */
  vm->sig = SigInvalid;

  r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
  AVER(r == 0);
  r = munmap((void *)vm,
             (size_t)SizeAlignUp(sizeof(VMStruct), vm->align));
  AVER(r == 0);
}
Ejemplo n.º 28
0
static void testCallback(CBS cbs, CBSBlock cbsBlock,
                         Size oldSize, Size newSize,
                         CallbackPrediction prediction)
{
  Insist(CBSCheck(cbs));
  Insist(CBSBlockCheck(cbsBlock));
  Insist(prediction->shouldBeCalled);
  Insist(oldSize == prediction->oldSize);

  if (newSize == 0) {
    Insist(prediction->base == 0);
    Insist(prediction->limit == 0);
  } else {
    Insist(CBSBlockSize(cbsBlock) == newSize);
    Insist(newSize == AddrOffset(prediction->base, prediction->limit));
    Insist(CBSBlockBase(cbsBlock) == prediction->base);
    Insist(CBSBlockLimit(cbsBlock) == prediction->limit);
  }

  prediction->shouldBeCalled = FALSE;
}
Ejemplo n.º 29
0
/* VM indirect functions
 *
 * These functions should be used to map and unmap within the arena.
 * They are responsible for maintaining vmArena->committed, and for
 * checking that the commit limit does not get exceeded.
 */
static Res vmArenaMap(VMArena vmArena, VM vm, Addr base, Addr limit)
{
  Arena arena;
  Size size;
  Res res;

  /* no checking as function is local to module */

  arena = VMArena2Arena(vmArena);
  size = AddrOffset(base, limit);
  /* committed can't overflow (since we can't commit more memory than */
  /* address space), but we're paranoid. */
  AVER(arena->committed < arena->committed + size);
  /* check against commit limit */
  if (arena->commitLimit < arena->committed + size)
    return ResCOMMIT_LIMIT;

  res = VMMap(vm, base, limit);
  if (res != ResOK)
    return res;
  arena->committed += size;
  return ResOK;
}
Ejemplo n.º 30
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  BOOL b;
  Size size;

  AVERT(VM, vm);
  AVER(AddrIsAligned(base, vm->pageSize));
  AVER(AddrIsAligned(limit, vm->pageSize));
  AVER(VMBase(vm) <= base);
  AVER(base < limit);
  AVER(limit <= VMLimit(vm));

  size = AddrOffset(base, limit);
  AVER(size <= VMMapped(vm));

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}