示例#1
0
/* MVFFAddSeg -- Allocates a new segment from the arena
 *
 * Allocates a new segment from the arena (with the given
 * withReservoirPermit flag) of at least the specified size.  The
 * specified size should be pool-aligned.  Adds it to the free list.
 */
static Res MVFFAddSeg(Seg *segReturn,
                      MVFF mvff, Size size, Bool withReservoirPermit)
{
  Pool pool;
  Arena arena;
  Size segSize;
  Seg seg;
  Res res;
  Align align;
  Addr base, limit;

  AVERT(MVFF, mvff);
  AVER(size > 0);
  AVER(BoolCheck(withReservoirPermit));

  pool = MVFF2Pool(mvff);
  arena = PoolArena(pool);
  align = ArenaAlign(arena);

  AVER(SizeIsAligned(size, PoolAlignment(pool)));

  /* Use extendBy unless it's too small (see */
  /* <design/poolmvff/#design.seg-size>). */
  if (size <= mvff->extendBy)
    segSize = mvff->extendBy;
  else
    segSize = size;

  segSize = SizeAlignUp(segSize, align);

  res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                 withReservoirPermit);
  if (res != ResOK) {
    /* try again for a seg just large enough for object */
    /* see <design/poolmvff/#design.seg-fail> */
    segSize = SizeAlignUp(size, align);
    res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                   withReservoirPermit);
    if (res != ResOK) {
      return res;
    }
  }

  mvff->total += segSize;
  base = SegBase(seg); limit = AddrAdd(base, segSize);
  DebugPoolFreeSplat(pool, base, limit);
  MVFFAddToFreeList(&base, &limit, mvff);
  AVER(base <= SegBase(seg));
  if (mvff->minSegSize > segSize) mvff->minSegSize = segSize;

  /* Don't call MVFFFreeSegs; that would be silly. */

  *segReturn = seg;
  return ResOK;
}
void VMDestroy(VM vm)
{
  int r;
  int zero_fd, none_fd;

  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);

  /* This appears to be pretty pointless, since the space descriptor */
  /* page is  about to vanish completely.  However, munmap might fail */
  /* for some reason, and this would ensure that it was still */
  /* discovered if sigs were being checked. */
  vm->sig = SigInvalid;

  zero_fd = vm->zero_fd; none_fd = vm->none_fd;
  r = munmap((caddr_t)vm->base, (int)AddrOffset(vm->base, vm->limit));
  AVER(r == 0);
  r = munmap((caddr_t)vm,
             (int)SizeAlignUp(sizeof(VMStruct), vm->align));
  AVER(r == 0);
  /* .close.fail: We ignore failure from close() as there's very */
  /* little we can do anyway. */
  (void)close(zero_fd);
  (void)close(none_fd);

  EVENT_P(VMDestroy, vm);
}
示例#3
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Res res;
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(AddrIsAligned(old, PoolAlignment(pool)));
  AVER(size > 0);

  size = SizeAlignUp(size, PoolAlignment(pool));
  base = old;
  limit = AddrAdd(base, size);

  res = MVFFAddToFreeList(&base, &limit, mvff);
  AVER(res == ResOK);
  if (res == ResOK)
    MVFFFreeSegs(mvff, base, limit);

  return;
}
示例#4
0
static Res MFSInit(Pool pool, ArgList args)
{
  Size extendBy = MFS_EXTEND_BY_DEFAULT;
  Bool extendSelf = TRUE;
  Size unitSize;
  MFS mfs;
  Arena arena;
  ArgStruct arg;

  AVER(pool != NULL);
  AVERT(ArgList, args);
  
  ArgRequire(&arg, args, MPS_KEY_MFS_UNIT_SIZE);
  unitSize = arg.val.size;
  if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY))
    extendBy = arg.val.size;
  else {
    if (extendBy < unitSize)
      extendBy = unitSize;
  }
  if (ArgPick(&arg, args, MFSExtendSelf))
    extendSelf = arg.val.b;

  AVER(extendBy >= unitSize);
  AVERT(Bool, extendSelf);
 
  mfs = PoolPoolMFS(pool);
  arena = PoolArena(pool);

  mfs->unroundedUnitSize = unitSize;

  if (unitSize < UNIT_MIN)
    unitSize = UNIT_MIN;
  unitSize = SizeAlignUp(unitSize, MPS_PF_ALIGN);
  extendBy = SizeAlignUp(extendBy, ArenaAlign(arena));

  mfs->extendBy = extendBy;
  mfs->extendSelf = extendSelf;
  mfs->unitSize = unitSize;
  mfs->freeList = NULL;
  mfs->tractList = NULL;
  mfs->sig = MFSSig;

  AVERT(MFS, mfs);
  EVENT5(PoolInitMFS, pool, arena, extendBy, BOOL(extendSelf), unitSize);
  return ResOK;
}
示例#5
0
Bool MPMCheck(void)
{
    CHECKL(sizeof(Word) * CHAR_BIT == MPS_WORD_WIDTH);
    CHECKL((Word)1 << MPS_WORD_SHIFT == MPS_WORD_WIDTH);
    CHECKL(AlignCheck(MPS_PF_ALIGN));
    /* Check that trace ids will fit in the TraceId type. */
    CHECKL(TraceLIMIT <= UINT_MAX);
    /* Check that there are enough bits in */
    /* a TraceSet to store all possible trace ids. */
    CHECKL(sizeof(TraceSet) * CHAR_BIT >= TraceLIMIT);

    CHECKL((SizeAlignUp(0, 2048) == 0));
    CHECKL(!SizeIsAligned(64, (unsigned) -1));
    CHECKL(SizeIsAligned(0, 32));
    CHECKL((SizeAlignUp(1024, 16) == 1024));
    /* .prime: 31051 is prime */
    CHECKL(SizeIsAligned(SizeAlignUp(31051, 256), 256));
    CHECKL(SizeIsAligned(SizeAlignUp(31051, 512), 512));
    CHECKL(!SizeIsAligned(31051, 1024));
    CHECKL(!SizeIsP2(0));
    CHECKL(SizeIsP2(128));
    CHECKL(SizeLog2((Size)1) == 0);
    CHECKL(SizeLog2((Size)256) == 8);
    CHECKL(SizeLog2((Size)65536) == 16);
    CHECKL(SizeLog2((Size)131072) == 17);

    /* .check.writef: We check that various types will fit in a Word; */
    /* See .writef.check.  Don't need to check WriteFS or WriteFF as they */
    /* should not be cast to Word. */
    CHECKL(sizeof(WriteFA) <= sizeof(Word));
    CHECKL(sizeof(WriteFP) <= sizeof(Word));
    CHECKL(sizeof(WriteFW) <= sizeof(Word)); /* Should be trivial*/
    CHECKL(sizeof(WriteFU) <= sizeof(Word));
    CHECKL(sizeof(WriteFB) <= sizeof(Word));
    CHECKL(sizeof(WriteFC) <= sizeof(Word));
    /* .check.write.double: See .write.double.check */
    {
        int e, DBL_EXP_DIG = 1;
        for (e = DBL_MAX_10_EXP; e > 0; e /= 10)
            DBL_EXP_DIG++;
        CHECKL(DBL_EXP_DIG < DBL_DIG);
        CHECKL(-(DBL_MIN_10_EXP) <= DBL_MAX_10_EXP);
    }

    return TRUE;
}
示例#6
0
static Res MFSInit(Pool pool, Arena arena, PoolClass klass, ArgList args)
{
  Size extendBy = MFS_EXTEND_BY_DEFAULT;
  Bool extendSelf = TRUE;
  Size unitSize;
  MFS mfs;
  ArgStruct arg;
  Res res;

  AVER(pool != NULL);
  AVERT(Arena, arena);
  AVERT(ArgList, args);
  UNUSED(klass); /* used for debug pools only */
  
  ArgRequire(&arg, args, MPS_KEY_MFS_UNIT_SIZE);
  unitSize = arg.val.size;
  if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY))
    extendBy = arg.val.size;
  if (ArgPick(&arg, args, MFSExtendSelf))
    extendSelf = arg.val.b;

  AVER(unitSize > 0);
  AVER(extendBy > 0);
  AVERT(Bool, extendSelf);

  res = NextMethod(Pool, MFSPool, init)(pool, arena, klass, args);
  if (res != ResOK)
    goto failNextInit;
  mfs = CouldBeA(MFSPool, pool);

  mfs->unroundedUnitSize = unitSize;

  if (unitSize < UNIT_MIN)
    unitSize = UNIT_MIN;
  unitSize = SizeAlignUp(unitSize, MPS_PF_ALIGN);
  if (extendBy < unitSize)
    extendBy = unitSize;
  extendBy = SizeArenaGrains(extendBy, arena);

  mfs->extendBy = extendBy;
  mfs->extendSelf = extendSelf;
  mfs->unitSize = unitSize;
  mfs->freeList = NULL;
  mfs->tractList = NULL;
  mfs->total = 0;
  mfs->free = 0;

  SetClassOfPoly(pool, CLASS(MFSPool));
  mfs->sig = MFSSig;
  AVERC(MFS, mfs);

  EVENT5(PoolInitMFS, pool, arena, extendBy, BOOLOF(extendSelf), unitSize);
  return ResOK;

failNextInit:
  AVER(res != ResOK);
  return res;
}
示例#7
0
Res SACFill(Addr *p_o, SAC sac, Size size, Bool hasReservoirPermit)
{
  Index i;
  Count blockCount, j;
  Size blockSize;
  Addr p, fl;
  Res res = ResOK; /* stop compiler complaining */
  mps_sac_t esac;

  AVER(p_o != NULL);
  AVERT(SAC, sac);
  AVER(size != 0);
  AVERT(Bool, hasReservoirPermit);
  esac = ExternalSACOfSAC(sac);

  sacFind(&i, &blockSize, sac, size);
  /* Check it's empty (in the future, there will be other cases). */
  AVER(esac->_freelists[i]._count == 0);

  /* Fill 1/3 of the cache for this class. */
  blockCount = esac->_freelists[i]._count_max / 3;
  /* Adjust size for the overlarge class. */
  if (blockSize == SizeMAX)
    /* .align: align 'cause some classes don't accept unaligned. */
    blockSize = SizeAlignUp(size, PoolAlignment(sac->pool));
  for (j = 0, fl = esac->_freelists[i]._blocks;
       j <= blockCount; ++j) {
    res = PoolAlloc(&p, sac->pool, blockSize, hasReservoirPermit);
    if (res != ResOK)
      break;
    /* @@@@ ignoring shields for now */
    *ADDR_PTR(Addr, p) = fl; fl = p;
  }
  /* If didn't get any, just return. */
  if (j == 0) {
    AVER(res != ResOK);
    return res;
  }

  /* Take the last one off, and return it. */
  esac->_freelists[i]._count = j - 1;
  *p_o = fl;
  /* @@@@ ignoring shields for now */
  esac->_freelists[i]._blocks = *ADDR_PTR(Addr, fl);
  return ResOK;
}
示例#8
0
static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size,
                     Bool withReservoirPermit, DebugInfo info)
{
  Res res;
  MVFF mvff;
  Addr base, limit;
  Bool foundBlock;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(aReturn != NULL);
  AVER(size > 0);
  AVER(BoolCheck(withReservoirPermit));
  UNUSED(info);

  size = SizeAlignUp(size, PoolAlignment(pool));

  foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);
  if (!foundBlock) {
    Seg seg;

    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);

    /* We know that the found range must intersect the new segment. */
    /* In particular, it doesn't necessarily lie entirely within it. */
    /* The next three AVERs test for intersection of two intervals. */
    AVER(base >= SegBase(seg) || limit <= SegLimit(seg));
    AVER(base < SegLimit(seg));
    AVER(SegBase(seg) < limit);

    /* We also know that the found range is no larger than the segment. */
    AVER(SegSize(seg) >= AddrOffset(base, limit));
  }
  AVER(foundBlock);
  AVER(AddrOffset(base, limit) == size);

  *aReturn = base;

  return ResOK;
}
示例#9
0
Res VMCreate(VM *vmReturn, Size size)
{
  VM vm;

  AVER(vmReturn != NULL);

  /* Note that because we add VMANPageALIGNMENT rather than */
  /* VMANPageALIGNMENT-1 we are not in danger of overflowing */
  /* vm->limit even if malloc were perverse enough to give us */
  /* a block at the end of memory. */
  size = SizeAlignUp(size, VMANPageALIGNMENT) + VMANPageALIGNMENT;
  if ((size < VMANPageALIGNMENT) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  vm = (VM)malloc(sizeof(VMStruct));
  if (vm == NULL)
    return ResMEMORY;

  vm->block = malloc((size_t)size);
  if (vm->block == NULL) {
    free(vm);
    return ResMEMORY;
  }

  vm->base  = AddrAlignUp((Addr)vm->block, VMANPageALIGNMENT);
  vm->limit = AddrAdd(vm->base, size - VMANPageALIGNMENT);
  AVER(vm->limit < AddrAdd((Addr)vm->block, size));

  memset((void *)vm->block, VMJunkBYTE, size);
 
  /* Lie about the reserved address space, to simulate real */
  /* virtual memory. */
  vm->reserved = size - VMANPageALIGNMENT;
  vm->mapped = (Size)0;
 
  vm->sig = VMSig;

  AVERT(VM, vm);
 
  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;
}
示例#10
0
void VMDestroy(VM vm)
{
  int r;

  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);

  EVENT1(VMDestroy, vm);

  /* This appears to be pretty pointless, since the descriptor */
  /* page is about to vanish completely.  However, munmap might fail */
  /* for some reason, and this would ensure that it was still */
  /* discovered if sigs were being checked. */
  vm->sig = SigInvalid;

  r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
  AVER(r == 0);
  r = munmap((void *)vm,
             (size_t)SizeAlignUp(sizeof(VMStruct), vm->align));
  AVER(r == 0);
}
示例#11
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(size > 0);

  base = AddrAlignUp(old, PoolAlignment(pool));
  size = size - AddrOffset(old, base);
  size = SizeAlignUp(size, PoolAlignment(pool));
  limit = AddrAdd(base, size);

  MVFFAddToFreeList(&base, &limit, mvff);

  MVFFFreeSegs(mvff, base, limit);
}
示例#12
0
Bool MFSCheck(MFS mfs)
{
  Arena arena;

  CHECKS(MFS, mfs);
  CHECKC(MFSPool, mfs);
  CHECKD(Pool, MFSPool(mfs));
  CHECKC(MFSPool, mfs);
  CHECKL(mfs->unitSize >= UNIT_MIN);
  CHECKL(mfs->extendBy >= UNIT_MIN);
  CHECKL(BoolCheck(mfs->extendSelf));
  arena = PoolArena(MFSPool(mfs));
  CHECKL(SizeIsArenaGrains(mfs->extendBy, arena));
  CHECKL(SizeAlignUp(mfs->unroundedUnitSize, PoolAlignment(MFSPool(mfs))) ==
         mfs->unitSize);
  if(mfs->tractList != NULL) {
    CHECKD_NOSIG(Tract, mfs->tractList);
  }
  CHECKL(mfs->free <= mfs->total);
  CHECKL((mfs->total - mfs->free) % mfs->unitSize == 0);
  return TRUE;
}
示例#13
0
void SACEmpty(SAC sac, Addr p, Size size)
{
  Index i;
  Size blockSize;
  mps_sac_t esac;
  
  AVERT(SAC, sac);
  AVER(p != NULL);
  AVER(PoolHasAddr(sac->pool, p));
  AVER(size > 0);
  esac = ExternalSACOfSAC(sac);

  sacFind(&i, &blockSize, sac, size);
  /* Check it's full (in the future, there will be other cases). */
  AVER(esac->_freelists[i]._count
       == esac->_freelists[i]._count_max);

  /* Adjust size for the overlarge class. */
  if (blockSize == SizeMAX)
    /* see .align */
    blockSize = SizeAlignUp(size, PoolAlignment(sac->pool));
  if (esac->_freelists[i]._count_max > 0) {
    Count blockCount;

    /* Flush 2/3 of the cache for this class. */
    /* Computed as count - count/3, so that the rounding works out right. */
    blockCount = esac->_freelists[i]._count;
    blockCount -= esac->_freelists[i]._count / 3;
    sacClassFlush(sac, i, blockSize, (blockCount > 0) ? blockCount : 1);
    /* Leave the current one in the cache. */
    esac->_freelists[i]._count += 1;
    /* @@@@ ignoring shields for now */
    *ADDR_PTR(Addr, p) = esac->_freelists[i]._blocks;
    esac->_freelists[i]._blocks = p;
  } else {
    /* Free even the current one. */
    PoolFree(sac->pool, p, blockSize);
  }
}
示例#14
0
{
  Arena arena;
  ClientArena clientArena;
  Size size;
  Size clArenaSize;   /* aligned size of ClientArenaStruct */
  Addr base, limit, chunkBase;
  Res res;
  Chunk chunk;
 
  size = va_arg(args, Size);
  base = va_arg(args, Addr);
  AVER(arenaReturn != NULL);
  AVER((ArenaClass)mps_arena_class_cl() == class);
  AVER(base != (Addr)0);

  clArenaSize = SizeAlignUp(sizeof(ClientArenaStruct), MPS_PF_ALIGN);
  if (size < clArenaSize)
    return ResMEMORY;

  limit = AddrAdd(base, size);

  /* allocate the arena */
  base = AddrAlignUp(base, MPS_PF_ALIGN);
  clientArena = (ClientArena)base;
  chunkBase = AddrAlignUp(AddrAdd(base, clArenaSize), MPS_PF_ALIGN);
  if (chunkBase > limit)
    return ResMEMORY;

  arena = ClientArena2Arena(clientArena);
  /* <code/arena.c#init.caller> */
  res = ArenaInit(arena, class);
Res VMCreate(VM *vmReturn, Size size)
{
  void *addr;
  Align align;
  int zero_fd;
  int none_fd;
  VM vm;
  long pagesize;
  Res res;

  AVER(vmReturn != NULL);

  /* Find out the page size from the OS */
  pagesize = sysconf(_SC_PAGESIZE);
  /* check the actual returned pagesize will fit in an object of */
  /* type Align. */
  AVER(pagesize > 0);
  AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
  /* Note implicit conversion from "long" to "Align". */
  align = pagesize;
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  zero_fd = open("/dev/zero", O_RDONLY);
  if(zero_fd == -1)
    return ResFAIL;
  none_fd = open("/etc/passwd", O_RDONLY);
  if(none_fd == -1) {
    res = ResFAIL;
    goto failNoneOpen;
  }

  /* Map in a page to store the descriptor on. */
  addr = mmap((void *)0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE, MAP_PRIVATE,
              zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == EAGAIN); /* .assume.mmap.err */
    res = ResMEMORY;
    goto failVMMap;
  }
  vm = (VM)addr;

  vm->zero_fd = zero_fd;
  vm->none_fd = none_fd;
  vm->align = align;

  /* .map.reserve: See .assume.not-last. */
  addr = mmap((void *)0, (size_t)size, PROT_NONE, MAP_SHARED,
              none_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    res = (errno == ENOMEM) ? ResRESOURCE : ResFAIL;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT_PAA(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
failVMMap:
  (void)close(none_fd); /* see .close.fail */
failNoneOpen:
  (void)close(zero_fd);
  return res;
}
示例#16
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  Align align;
  VM vm;
  int pagesize;
  void *addr;
  Res res;

  AVER(vmReturn != NULL);
  AVER(params != NULL);

  /* Find out the page size from the OS */
  pagesize = getpagesize();
  /* check the actual returned pagesize will fit in an object of */
  /* type Align. */
  AVER(pagesize > 0);
  AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
  align = (Align)pagesize;
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  /* Map in a page to store the descriptor on. */
  addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE,
              MAP_ANON | MAP_PRIVATE,
              -1, 0);
  /* On Darwin the MAP_FAILED return value is not documented, but does
   * work.  MAP_FAILED _is_ documented by POSIX.
   */
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }
  vm = (VM)addr;

  vm->align = align;

  /* See .assume.not-last. */
  addr = mmap(0, (size_t)size,
              PROT_NONE, MAP_ANON | MAP_PRIVATE,
              -1, 0);
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = ResRESOURCE;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
  return res;
}
Res VMCreate(VM *vmReturn, Size size)
{
  void *addr;
  Align align;
  int none_fd;
  VM vm;
  Res res;

  AVER(vmReturn != NULL);

  align = (Align)getpagesize();
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  none_fd = open("/etc/passwd", O_RDONLY);
  if (none_fd == -1) {
    return ResFAIL;
  }

  /* Map in a page to store the descriptor on. */
  addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE,
              MAP_ANONYMOUS | MAP_PRIVATE | MAP_VARIABLE,
              -1, 0);
  if (addr == (void *)-1) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = (e == ENOMEM) ? ResMEMORY : ResFAIL;
    goto failVMMap;
  }
  vm = (VM)addr;

  vm->none_fd = none_fd;
  vm->align = align;

  /* See .assume.not-last. */
  addr = mmap(0, (size_t)size,
              PROT_NONE, MAP_FILE | MAP_SHARED | MAP_VARIABLE,
              none_fd, 0);
  if (addr == (void *)-1) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = (e == ENOMEM) ? ResRESOURCE : ResFAIL;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;
  AVERT(VM, vm);
  EVENT_PAA(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
failVMMap:
  (void)close(none_fd); /* see .close.fail */
  return res;
}
Res VMCreate(VM *vmReturn, Size size)
{
  caddr_t addr;
  Align align;
  int zero_fd;
  int none_fd;
  VM vm;
  Res res;

  AVER(vmReturn != NULL);

  align = (Align)getpagesize();
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)INT_MAX)) /* see .assume.size */
    return ResRESOURCE;

  zero_fd = open("/dev/zero", O_RDONLY);
  if (zero_fd == -1)
    return ResFAIL;
  none_fd = open("/etc/passwd", O_RDONLY);
  if (none_fd == -1) {
    res = ResFAIL;
    goto failNoneOpen;
  }

  /* Map in a page to store the descriptor on. */
  addr = mmap((caddr_t)0, SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE, MAP_PRIVATE,
              zero_fd, (off_t)0);
  if (addr == (caddr_t)-1) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = (e == ENOMEM) ? ResMEMORY : ResFAIL;
    goto failVMMap;
  }
  vm = (VM)addr;

  vm->zero_fd = zero_fd;
  vm->none_fd = none_fd;
  vm->align = align;

  /* .map.reserve: See .assume.not-last. */
  addr = mmap((caddr_t)0, size, PROT_NONE, MAP_SHARED, none_fd,
              (off_t)0);
  if (addr == (caddr_t)-1) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = (e == ENOMEM) ? ResRESOURCE : ResFAIL;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT_PAA(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((caddr_t)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
failVMMap:
  (void)close(none_fd); /* see .close.fail */
failNoneOpen:
  (void)close(zero_fd);
  return res;
}
Res VMCreate(VM *vmReturn, Size size)
{
  void *addr;
  Align align;
  int zero_fd;
  VM vm;
  Res res;

  AVER(vmReturn != NULL);

  align = (Align)sysconf(_SC_PAGESIZE);
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  zero_fd = open("/dev/zero", O_RDONLY);
  if(zero_fd == -1)
    return ResFAIL;

  /* Map in a page to store the descriptor on. */
  addr = mmap((void *)0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE, MAP_PRIVATE,
              zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM || errno == EAGAIN); /* .assume.mmap.err */
    res = (errno == ENOMEM || errno == EAGAIN) ? ResMEMORY : ResFAIL;
    goto failVMMap;
  }
  vm = (VM)addr;

  vm->zero_fd = zero_fd;
  vm->align = align;

  /* .map.reserve: MAP_AUTORESRV is necessary to avoid reserving swap. */
  addr = mmap((void *)0, (size_t)size, PROT_NONE, MAP_SHARED | MAP_AUTORESRV,
	      zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    res = (errno == ENOMEM) ? ResRESOURCE : ResFAIL;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT_PAA(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
failVMMap:
  (void)close(zero_fd);
  return res;
}
示例#20
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  LPVOID vbase;
  SYSTEM_INFO si;
  Align align;
  VM vm;
  Res res;
  BOOL b;
  VMParams vmParams = params;

  AVER(vmReturn != NULL);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  GetSystemInfo(&si);
  align = (Align)si.dwPageSize;
  AVER((DWORD)align == si.dwPageSize); /* check it didn't truncate */
  AVER(SizeIsP2(align));    /* see .assume.sysalign */
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)(SIZE_T)-1))
    return ResRESOURCE;

  /* Allocate the vm descriptor.  This is likely to be wasteful. */
  vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), align),
                       MEM_COMMIT, PAGE_READWRITE);
  if (vbase == NULL)
    return ResMEMORY;
  vm = (VM)vbase;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       size,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL) {
    res = ResRESOURCE;
    goto failReserve;
  }

  AVER(AddrIsAligned(vbase, align));

  vm->align = align;
  vm->base = (Addr)vbase;
  vm->limit = AddrAdd(vbase, size);
  vm->reserved = size;
  vm->mapped = 0;
  AVER(vm->base < vm->limit);  /* .assume.not-last */

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;

failReserve:
  b = VirtualFree((LPVOID)vm, (SIZE_T)0, MEM_RELEASE);
  AVER(b != 0);
  return res;
}
示例#21
0
Res ChunkInit(Chunk chunk, Arena arena, Addr base, Addr limit, Size reserved,
              BootBlock boot)
{
  Size size;
  Count pages;
  Shift pageShift;
  Size pageTableSize;
  Addr allocBase;
  void *p;
  Res res;

  /* chunk is supposed to be uninitialized, so don't check it. */
  AVERT(Arena, arena);
  AVER(base != NULL);
  AVER(AddrIsAligned(base, ArenaGrainSize(arena)));
  AVER(base < limit);
  AVER(AddrIsAligned(limit, ArenaGrainSize(arena)));
  AVERT(BootBlock, boot);

  chunk->serial = (arena->chunkSerial)++;
  chunk->arena = arena;
  RingInit(&chunk->arenaRing);

  chunk->pageSize = ArenaGrainSize(arena);
  chunk->pageShift = pageShift = SizeLog2(chunk->pageSize);
  chunk->base = base;
  chunk->limit = limit;
  chunk->reserved = reserved;
  size = ChunkSize(chunk);

  /* .overhead.pages: Chunk overhead for the page allocation table. */
  chunk->pages = pages = size >> pageShift;
  res = BootAlloc(&p, boot, (size_t)BTSize(pages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failAllocTable;
  chunk->allocTable = p;

  pageTableSize = SizeAlignUp(pages * sizeof(PageUnion), chunk->pageSize);
  chunk->pageTablePages = pageTableSize >> pageShift;

  res = Method(Arena, arena, chunkInit)(chunk, boot);
  if (res != ResOK)
    goto failClassInit;

  /* @@@@ Is BootAllocated always right? */
  /* Last thing we BootAlloc'd is pageTable.  We requested pageSize */
  /* alignment, and pageTableSize is itself pageSize aligned, so */
  /* BootAllocated should also be pageSize aligned. */
  AVER(AddrIsAligned(BootAllocated(boot), chunk->pageSize));
  chunk->allocBase = (Index)(BootAllocated(boot) >> pageShift);

  /* Init allocTable after class init, because it might be mapped there. */
  BTResRange(chunk->allocTable, 0, pages);

  /* Check that there is some usable address space remaining in the chunk. */
  allocBase = PageIndexBase(chunk, chunk->allocBase);
  AVER(allocBase < chunk->limit);

  /* Add the chunk's free address space to the arena's freeLand, so that
     we can allocate from it. */
  if (arena->hasFreeLand) {
    res = ArenaFreeLandInsert(arena, allocBase, chunk->limit);
    if (res != ResOK)
      goto failLandInsert;
  }

  TreeInit(&chunk->chunkTree);

  chunk->sig = ChunkSig;
  AVERT(Chunk, chunk);

  ArenaChunkInsert(arena, chunk);

  return ResOK;

failLandInsert:
  Method(Arena, arena, chunkFinish)(chunk);
  /* .no-clean: No clean-ups needed past this point for boot, as we will
     discard the chunk. */
failClassInit:
failAllocTable:
  return res;
}