Esempio n. 1
0
Res BufferFill(Addr *pReturn, Buffer buffer, Size size)
{
  Res res;
  Pool pool;
  Addr base, limit, next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer));

  pool = BufferPool(buffer);

  /* If we're here because the buffer was trapped, then we attempt */
  /* the allocation here. */
  if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) {
    /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */
    if (buffer->mode & BufferModeFLIPPED) {
      BufferSetUnflipped(buffer);
    }

    /* .fill.logged: If the buffer is logged then we leave it logged. */
    next = AddrAdd(buffer->ap_s.alloc, size);
    if (next > (Addr)buffer->ap_s.alloc &&
        next <= (Addr)buffer->poolLimit) {
      buffer->ap_s.alloc = next;
      if (buffer->mode & BufferModeLOGGED) {
        EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
      }
      *pReturn = buffer->ap_s.init;
      return ResOK;
    }
  }

  /* There really isn't enough room for the allocation now. */
  AVER(AddrAdd(buffer->ap_s.alloc, size) > buffer->poolLimit ||
       AddrAdd(buffer->ap_s.alloc, size) < (Addr)buffer->ap_s.alloc);

  BufferDetach(buffer, pool);

  /* Ask the pool for some memory. */
  res = Method(Pool, pool, bufferFill)(&base, &limit, pool, buffer, size);
  if (res != ResOK)
    return res;

  /* Set up the buffer to point at the memory given by the pool */
  /* and do the allocation that was requested by the client. */
  BufferAttach(buffer, base, limit, base, size);

  if (buffer->mode & BufferModeLOGGED) {
    EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
  }

  *pReturn = base;
  return res;
}
Esempio n. 2
0
File: pool.c Progetto: bhanug/mps
Res PoolAlloc(Addr *pReturn, Pool pool, Size size)
{
  Res res;

  AVER(pReturn != NULL);
  AVERT(Pool, pool);
  AVER(size > 0);

  res = Method(Pool, pool, alloc)(pReturn, pool, size);
  if (res != ResOK)
    return res;
  /* Make sure that the allocated address was in the pool's memory. */
  /* .hasaddr.critical: The PoolHasAddr check is expensive, and in */
  /* allocation-bound programs this is on the critical path. */
  AVER_CRITICAL(PoolHasAddr(pool, *pReturn));
  /* All allocations should be aligned to the pool's alignment */
  AVER_CRITICAL(AddrIsAligned(*pReturn, pool->alignment));

  /* All PoolAllocs should advance the allocation clock, so we count */
  /* it all in the fillMutatorSize field. */
  ArenaGlobals(PoolArena(pool))->fillMutatorSize += size;

  EVENT3(PoolAlloc, pool, *pReturn, size);

  return ResOK;
}
Esempio n. 3
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVERT(VM, vm);
  AVER(sizeof(void *) == sizeof(Addr));
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  size = AddrOffset(base, limit);

  if(mmap((void *)base, (size_t)size,
          PROT_READ | PROT_WRITE | PROT_EXEC,
          MAP_ANON | MAP_PRIVATE | MAP_FIXED,
          -1, 0)
     == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Esempio n. 4
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  LPVOID b;
  Align align;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-map: We could check that the pages we are about to
   * map are unmapped using VirtualQuery. */

  b = VirtualAlloc((LPVOID)base, (SIZE_T)AddrOffset(base, limit),
                   MEM_COMMIT, PAGE_EXECUTE_READWRITE);
  if (b == NULL)
    return ResMEMORY;
  AVER((Addr)b == base);        /* base should've been aligned */

  vm->mapped += AddrOffset(base, limit);

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Esempio n. 5
0
static Res ResPoolInit(Pool pool, ArgList arg)
{
    AVER(pool != NULL);

    UNUSED(arg);
    /* Caller will set sig and AVERT. */
    EVENT3(PoolInit, pool, PoolArena(pool), ClassOfPool(pool));
    return ResOK;
}
Esempio n. 6
0
static Res BufferAbsInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args)
{
  Arena arena;

  AVER(buffer != NULL);
  AVERT(Pool, pool);
  AVER(BoolCheck(isMutator));
  AVERT(ArgList, args);

  /* Superclass init */
  InstInit(CouldBeA(Inst, buffer));
  
  arena = PoolArena(pool);

  /* Initialize the buffer.  See <code/mpmst.h> for a definition of
     the structure.  sig and serial comes later .init.sig-serial */
  buffer->arena = arena;
  buffer->pool = pool;
  RingInit(&buffer->poolRing);
  buffer->isMutator = isMutator;
  if (ArenaGlobals(arena)->bufferLogging) {
    buffer->mode = BufferModeLOGGED;
  } else {
    buffer->mode = 0;
  }
  buffer->fillSize = 0.0;
  buffer->emptySize = 0.0;
  buffer->alignment = PoolAlignment(pool);
  buffer->base = (Addr)0;
  buffer->initAtFlip = (Addr)0;
  /* In the next three assignments we really mean zero, not NULL, because
     the bit pattern is compared.  It's pretty unlikely we'll encounter
     a platform where this makes a difference. */
  buffer->ap_s.init = (mps_addr_t)0;
  buffer->ap_s.alloc = (mps_addr_t)0;
  buffer->ap_s.limit = (mps_addr_t)0;
  buffer->poolLimit = (Addr)0;
  buffer->rampCount = 0;

  /* .init.sig-serial: Now the vanilla stuff is initialized, sign the
     buffer and give it a serial number. It can then be safely checked
     in subclass methods. */
  buffer->serial = pool->bufferSerial; /* .trans.mod */
  ++pool->bufferSerial;
  SetClassOfPoly(buffer, CLASS(Buffer));
  buffer->sig = BufferSig;
  AVERT(Buffer, buffer);

  /* Attach the initialized buffer to the pool. */
  RingAppend(&pool->bufferRing, &buffer->poolRing);

  EVENT3(BufferInit, buffer, pool, BOOLOF(buffer->isMutator));

  return ResOK;
}
Esempio n. 7
0
Res VMInit(VM vm, Size size, Size grainSize, void *params)
{
  LPVOID vbase;
  Size pageSize, reserved;
  VMParams vmParams = params;

  AVER(vm != NULL);
  AVERT(ArenaGrainSize, grainSize);
  AVER(size > 0);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  pageSize = PageSize();

  /* Grains must consist of whole pages. */
  AVER(grainSize % pageSize == 0);

  /* Check that the rounded-up sizes will fit in a Size. */
  size = SizeRoundUp(size, grainSize);
  if (size < grainSize || size > (Size)(SIZE_T)-1)
    return ResRESOURCE;
  reserved = size + grainSize - pageSize;
  if (reserved < grainSize || reserved > (Size)(SIZE_T)-1)
    return ResRESOURCE;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       reserved,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL)
    return ResRESOURCE;

  AVER(AddrIsAligned(vbase, pageSize));

  vm->pageSize = pageSize;
  vm->block = vbase;
  vm->base = AddrAlignUp(vbase, grainSize);
  vm->limit = AddrAdd(vm->base, size);
  AVER(vm->base < vm->limit);  /* .assume.not-last */
  AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved));
  vm->reserved = reserved;
  vm->mapped = 0;

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm));
  return ResOK;
}
Esempio n. 8
0
File: pool.c Progetto: bhanug/mps
void PoolFree(Pool pool, Addr old, Size size)
{
  AVERT(Pool, pool);
  AVER(old != NULL);
  /* The pool methods should check that old is in pool. */
  AVER(size > 0);
  AVER(AddrIsAligned(old, pool->alignment));
  AVER(PoolHasRange(pool, old, AddrAdd(old, size)));

  Method(Pool, pool, free)(pool, old, size);
 
  EVENT3(PoolFree, pool, old, size);
}
Esempio n. 9
0
File: pool.c Progetto: bhanug/mps
Res PoolInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
{
  Res res;

  AVERT(PoolClass, klass);

  res = klass->init(pool, arena, klass, args);
  if (res != ResOK)
    return res;

  EVENT3(PoolInit, pool, PoolArena(pool), ClassOfPoly(Pool, pool));

  return ResOK;
}
Esempio n. 10
0
Res SegAlloc(Seg *segReturn, SegClass klass, LocusPref pref,
             Size size, Pool pool, ArgList args)
{
  Res res;
  Arena arena;
  Seg seg;
  Addr base;
  void *p;

  AVER(segReturn != NULL);
  AVERT(SegClass, klass);
  AVERT(LocusPref, pref);
  AVER(size > (Size)0);
  AVERT(Pool, pool);

  arena = PoolArena(pool);
  AVERT(Arena, arena);
  AVER(SizeIsArenaGrains(size, arena));

  /* allocate the memory from the arena */
  res = ArenaAlloc(&base, pref, size, pool);
  if (res != ResOK)
    goto failArena;

  /* allocate the segment object from the control pool */
  res = ControlAlloc(&p, arena, klass->size);
  if (res != ResOK)
    goto failControl;
  seg = p;

  res = SegInit(seg, klass, pool, base, size, args);
  if (res != ResOK)
    goto failInit;

  EVENT5(SegAlloc, arena, seg, SegBase(seg), size, pool);
  *segReturn = seg;
  return ResOK;

failInit:
  ControlFree(arena, seg, klass->size);
failControl:
  ArenaFree(base, size, pool);
failArena:
  EVENT3(SegAllocFail, arena, size, pool);
  return res;
}
Esempio n. 11
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));
 
  size = AddrOffset(base, limit);
  memset((void *)base, 0xCD, size);

  AVER(vm->mapped >= size);
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Esempio n. 12
0
Res VMCreate(VM *vmReturn, Size size)
{
  VM vm;

  AVER(vmReturn != NULL);

  /* Note that because we add VMANPageALIGNMENT rather than */
  /* VMANPageALIGNMENT-1 we are not in danger of overflowing */
  /* vm->limit even if malloc were perverse enough to give us */
  /* a block at the end of memory. */
  size = SizeAlignUp(size, VMANPageALIGNMENT) + VMANPageALIGNMENT;
  if ((size < VMANPageALIGNMENT) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  vm = (VM)malloc(sizeof(VMStruct));
  if (vm == NULL)
    return ResMEMORY;

  vm->block = malloc((size_t)size);
  if (vm->block == NULL) {
    free(vm);
    return ResMEMORY;
  }

  vm->base  = AddrAlignUp((Addr)vm->block, VMANPageALIGNMENT);
  vm->limit = AddrAdd(vm->base, size - VMANPageALIGNMENT);
  AVER(vm->limit < AddrAdd((Addr)vm->block, size));

  memset((void *)vm->block, VMJunkBYTE, size);
 
  /* Lie about the reserved address space, to simulate real */
  /* virtual memory. */
  vm->reserved = size - VMANPageALIGNMENT;
  vm->mapped = (Size)0;
 
  vm->sig = VMSig;

  AVERT(VM, vm);
 
  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;
}
Esempio n. 13
0
Res VMMap(VM vm, Addr base, Addr limit)
{
  Size size;

  AVER(base != (Addr)0);
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, VMANPageALIGNMENT));
  AVER(AddrIsAligned(limit, VMANPageALIGNMENT));

  size = AddrOffset(base, limit);
  memset((void *)base, (int)0, size);

  vm->mapped += size;

  EVENT3(VMMap, vm, base, limit);
  return ResOK;
}
Esempio n. 14
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Align align;
  BOOL b;

  AVERT(VM, vm);
  align = vm->align;
  AVER(AddrIsAligned(base, align));
  AVER(AddrIsAligned(limit, align));
  AVER(vm->base <= base);
  AVER(base < limit);
  AVER(limit <= vm->limit);

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)AddrOffset(base, limit), MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= AddrOffset(base, limit);

  EVENT3(VMUnmap, vm, base, limit);
}
Esempio n. 15
0
static Res segBufInit(Buffer buffer, Pool pool, Bool isMutator, ArgList args)
{
  SegBuf segbuf;
  Res res;

  /* Initialize the superclass fields first via next-method call */
  res = NextMethod(Buffer, SegBuf, init)(buffer, pool, isMutator, args);
  if (res != ResOK)
    return res;
  segbuf = CouldBeA(SegBuf, buffer);

  segbuf->seg = NULL;
  segbuf->rankSet = RankSetEMPTY;

  SetClassOfPoly(buffer, CLASS(SegBuf));
  segbuf->sig = SegBufSig;
  AVERC(SegBuf, segbuf);

  EVENT3(BufferInitSeg, buffer, pool, BOOLOF(buffer->isMutator));
  return ResOK;
}
Esempio n. 16
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  BOOL b;
  Size size;

  AVERT(VM, vm);
  AVER(AddrIsAligned(base, vm->pageSize));
  AVER(AddrIsAligned(limit, vm->pageSize));
  AVER(VMBase(vm) <= base);
  AVER(base < limit);
  AVER(limit <= VMLimit(vm));

  size = AddrOffset(base, limit);
  AVER(size <= VMMapped(vm));

  /* .improve.query-unmap: Could check that the pages we are about */
  /* to unmap are mapped, using VirtualQuery. */
  b = VirtualFree((LPVOID)base, (SIZE_T)size, MEM_DECOMMIT);
  AVER(b != 0);  /* .assume.free.success */
  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Esempio n. 17
0
void VMUnmap(VM vm, Addr base, Addr limit)
{
  Size size;
  void *addr;

  AVERT(VM, vm);
  AVER(base < limit);
  AVER(base >= vm->base);
  AVER(limit <= vm->limit);
  AVER(AddrIsAligned(base, vm->align));
  AVER(AddrIsAligned(limit, vm->align));

  size = AddrOffset(base, limit);

  /* see <design/vmo1/#fun.unmap.offset> */
  addr = mmap((void *)base, (size_t)size,
              PROT_NONE, MAP_ANON | MAP_PRIVATE | MAP_FIXED,
              -1, 0);
  AVER(addr == (void *)base);

  vm->mapped -= size;

  EVENT3(VMUnmap, vm, base, limit);
}
Esempio n. 18
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  LPVOID vbase;
  SYSTEM_INFO si;
  Align align;
  VM vm;
  Res res;
  BOOL b;
  VMParams vmParams = params;

  AVER(vmReturn != NULL);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  GetSystemInfo(&si);
  align = (Align)si.dwPageSize;
  AVER((DWORD)align == si.dwPageSize); /* check it didn't truncate */
  AVER(SizeIsP2(align));    /* see .assume.sysalign */
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)(SIZE_T)-1))
    return ResRESOURCE;

  /* Allocate the vm descriptor.  This is likely to be wasteful. */
  vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), align),
                       MEM_COMMIT, PAGE_READWRITE);
  if (vbase == NULL)
    return ResMEMORY;
  vm = (VM)vbase;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       size,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL) {
    res = ResRESOURCE;
    goto failReserve;
  }

  AVER(AddrIsAligned(vbase, align));

  vm->align = align;
  vm->base = (Addr)vbase;
  vm->limit = AddrAdd(vbase, size);
  vm->reserved = size;
  vm->mapped = 0;
  AVER(vm->base < vm->limit);  /* .assume.not-last */

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;

failReserve:
  b = VirtualFree((LPVOID)vm, (SIZE_T)0, MEM_RELEASE);
  AVER(b != 0);
  return res;
}
Esempio n. 19
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  Align align;
  VM vm;
  int pagesize;
  void *addr;
  Res res;

  AVER(vmReturn != NULL);
  AVER(params != NULL);

  /* Find out the page size from the OS */
  pagesize = getpagesize();
  /* check the actual returned pagesize will fit in an object of */
  /* type Align. */
  AVER(pagesize > 0);
  AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
  align = (Align)pagesize;
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  /* Map in a page to store the descriptor on. */
  addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE,
              MAP_ANON | MAP_PRIVATE,
              -1, 0);
  /* On Darwin the MAP_FAILED return value is not documented, but does
   * work.  MAP_FAILED _is_ documented by POSIX.
   */
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }
  vm = (VM)addr;

  vm->align = align;

  /* See .assume.not-last. */
  addr = mmap(0, (size_t)size,
              PROT_NONE, MAP_ANON | MAP_PRIVATE,
              -1, 0);
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = ResRESOURCE;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
  return res;
}