Example #1
0
Res BufferFill(Addr *pReturn, Buffer buffer, Size size)
{
  Res res;
  Pool pool;
  Addr base, limit, next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer));

  pool = BufferPool(buffer);

  /* If we're here because the buffer was trapped, then we attempt */
  /* the allocation here. */
  if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) {
    /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */
    if (buffer->mode & BufferModeFLIPPED) {
      BufferSetUnflipped(buffer);
    }

    /* .fill.logged: If the buffer is logged then we leave it logged. */
    next = AddrAdd(buffer->ap_s.alloc, size);
    if (next > (Addr)buffer->ap_s.alloc &&
        next <= (Addr)buffer->poolLimit) {
      buffer->ap_s.alloc = next;
      if (buffer->mode & BufferModeLOGGED) {
        EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
      }
      *pReturn = buffer->ap_s.init;
      return ResOK;
    }
  }

  /* There really isn't enough room for the allocation now. */
  AVER(AddrAdd(buffer->ap_s.alloc, size) > buffer->poolLimit ||
       AddrAdd(buffer->ap_s.alloc, size) < (Addr)buffer->ap_s.alloc);

  BufferDetach(buffer, pool);

  /* Ask the pool for some memory. */
  res = Method(Pool, pool, bufferFill)(&base, &limit, pool, buffer, size);
  if (res != ResOK)
    return res;

  /* Set up the buffer to point at the memory given by the pool */
  /* and do the allocation that was requested by the client. */
  BufferAttach(buffer, base, limit, base, size);

  if (buffer->mode & BufferModeLOGGED) {
    EVENT3(BufferReserve, buffer, buffer->ap_s.init, size);
  }

  *pReturn = base;
  return res;
}
Example #2
0
Res VMInit(VM vm, Size size, Size grainSize, void *params)
{
  LPVOID vbase;
  Size pageSize, reserved;
  VMParams vmParams = params;

  AVER(vm != NULL);
  AVERT(ArenaGrainSize, grainSize);
  AVER(size > 0);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  pageSize = PageSize();

  /* Grains must consist of whole pages. */
  AVER(grainSize % pageSize == 0);

  /* Check that the rounded-up sizes will fit in a Size. */
  size = SizeRoundUp(size, grainSize);
  if (size < grainSize || size > (Size)(SIZE_T)-1)
    return ResRESOURCE;
  reserved = size + grainSize - pageSize;
  if (reserved < grainSize || reserved > (Size)(SIZE_T)-1)
    return ResRESOURCE;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       reserved,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL)
    return ResRESOURCE;

  AVER(AddrIsAligned(vbase, pageSize));

  vm->pageSize = pageSize;
  vm->block = vbase;
  vm->base = AddrAlignUp(vbase, grainSize);
  vm->limit = AddrAdd(vm->base, size);
  AVER(vm->base < vm->limit);  /* .assume.not-last */
  AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved));
  vm->reserved = reserved;
  vm->mapped = 0;

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm));
  return ResOK;
}
Example #3
0
void MFSExtend(Pool pool, Addr base, Size size)
{
  MFS mfs;
  Tract tract;
  Word i, unitsPerExtent;
  Size unitSize;
  Header header = NULL;

  AVERT(Pool, pool);
  mfs = PoolPoolMFS(pool);
  AVERT(MFS, mfs);
  AVER(size == mfs->extendBy);

  /* Ensure that the memory we're adding belongs to this pool.  This is
     automatic if it was allocated using ArenaAlloc, but if the memory is
     being inserted from elsewhere then it must have been set up correctly. */
  AVER(PoolHasAddr(pool, base));
  
  /* .tract.chain: chain first tracts through TractP(tract) */
  tract = TractOfBaseAddr(PoolArena(pool), base);

  AVER(TractPool(tract) == pool);

  TractSetP(tract, (void *)mfs->tractList);
  mfs->tractList = tract;

  /* Update accounting */
  mfs->total += size;
  mfs->free += size;

  /* Sew together all the new empty units in the region, working down */
  /* from the top so that they are in ascending order of address on the */
  /* free list. */

  unitSize = mfs->unitSize;
  unitsPerExtent = size/unitSize;
  AVER(unitsPerExtent > 0);

#define SUB(b, s, i)    ((Header)AddrAdd(b, (s)*(i)))

  for(i = 0; i < unitsPerExtent; ++i)
  {
    header = SUB(base, unitSize, unitsPerExtent-i - 1);
    AVER(AddrIsAligned(header, pool->alignment));
    AVER(AddrAdd((Addr)header, unitSize) <= AddrAdd(base, size));
    header->next = mfs->freeList;
    mfs->freeList = header;
  }

#undef SUB
}
Example #4
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Res res;
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(AddrIsAligned(old, PoolAlignment(pool)));
  AVER(size > 0);

  size = SizeAlignUp(size, PoolAlignment(pool));
  base = old;
  limit = AddrAdd(base, size);

  res = MVFFAddToFreeList(&base, &limit, mvff);
  AVER(res == ResOK);
  if (res == ResOK)
    MVFFFreeSegs(mvff, base, limit);

  return;
}
Example #5
0
Bool BufferCommit(Buffer buffer, Addr p, Size size)
{
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(!BufferIsReady(buffer));

  /* <design/collection#.flip>. */
  /* .commit.before: If a flip occurs before this point, when the */
  /* pool reads "initAtFlip" it will point below the object, so it */
  /* will be trashed and the commit must fail when trip is called.  */
  AVER(p == buffer->ap_s.init);
  AVER(AddrAdd(buffer->ap_s.init, size) == buffer->ap_s.alloc);

  /* .commit.update: Atomically update the init pointer to declare */
  /* that the object is initialized (though it may be invalid if a */
  /* flip occurred). */
  buffer->ap_s.init = buffer->ap_s.alloc;

  /* .improve.memory-barrier: Memory barrier here on the DEC Alpha */
  /* (and other relaxed memory order architectures). */
  /* .commit.after: If a flip occurs at this point, the pool will */
  /* see "initAtFlip" above the object, which is valid, so it will */
  /* be collected.  The commit must succeed when trip is called.  */
  /* The pointer "p" will have been fixed up. */
  /* TODO: Check the above assertion and explain why it is so. */
  /* .commit.trip: Trip the buffer if a flip has occurred. */
  if (buffer->ap_s.limit == 0)
    return BufferTrip(buffer, p, size);

  /* No flip occurred, so succeed. */

  return TRUE;
}
Example #6
0
static Addr FreelistBlockLimit(Freelist fl, FreelistBlock block)
{
  AVERT(Freelist, fl);
  if (FreelistBlockIsSmall(block)) {
    return AddrAdd(FreelistBlockBase(block), fl->alignment);
  } else {
    return block->large.limit;
  }
}
Example #7
0
/* MVFFAddSeg -- Allocates a new segment from the arena
 *
 * Allocates a new segment from the arena (with the given
 * withReservoirPermit flag) of at least the specified size.  The
 * specified size should be pool-aligned.  Adds it to the free list.
 */
static Res MVFFAddSeg(Seg *segReturn,
                      MVFF mvff, Size size, Bool withReservoirPermit)
{
  Pool pool;
  Arena arena;
  Size segSize;
  Seg seg;
  Res res;
  Align align;
  Addr base, limit;

  AVERT(MVFF, mvff);
  AVER(size > 0);
  AVERT(Bool, withReservoirPermit);

  pool = MVFF2Pool(mvff);
  arena = PoolArena(pool);
  align = ArenaAlign(arena);

  AVER(SizeIsAligned(size, PoolAlignment(pool)));

  /* Use extendBy unless it's too small (see */
  /* <design/poolmvff/#design.seg-size>). */
  if (size <= mvff->extendBy)
    segSize = mvff->extendBy;
  else
    segSize = size;

  segSize = SizeAlignUp(segSize, align);

  res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                 withReservoirPermit, argsNone);
  if (res != ResOK) {
    /* try again for a seg just large enough for object */
    /* see <design/poolmvff/#design.seg-fail> */
    segSize = SizeAlignUp(size, align);
    res = SegAlloc(&seg, SegClassGet(), mvff->segPref, segSize, pool,
                   withReservoirPermit, argsNone);
    if (res != ResOK) {
      return res;
    }
  }

  mvff->total += segSize;
  base = SegBase(seg);
  limit = AddrAdd(base, segSize);
  DebugPoolFreeSplat(pool, base, limit);
  res = MVFFAddToFreeList(&base, &limit, mvff);
  AVER(res == ResOK);
  AVER(base <= SegBase(seg));
  if (mvff->minSegSize > segSize) mvff->minSegSize = segSize;

  /* Don't call MVFFFreeSegs; that would be silly. */

  *segReturn = seg;
  return ResOK;
}
Example #8
0
static Res VMChunkInit(Chunk chunk, BootBlock boot)
{
  VMChunk vmChunk;
  Addr overheadLimit;
  void *p;
  Res res;
  BT saMapped, saPages;

  /* chunk is supposed to be uninitialized, so don't check it. */
  vmChunk = Chunk2VMChunk(chunk);
  AVERT(BootBlock, boot);
  
  res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failSaMapped;
  saMapped = p;
  
  res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failSaPages;
  saPages = p;
  
  overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot));

  /* Put the page table as late as possible, as in VM systems we don't want */
  /* to map it. */
  res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize);
  if (res != ResOK)
    goto failAllocPageTable;
  chunk->pageTable = p;

  /* Map memory for the bit tables. */
  if (vmChunk->overheadMappedLimit < overheadLimit) {
    overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk));
    res = vmArenaMap(VMChunkVMArena(vmChunk), vmChunk->vm,
                     vmChunk->overheadMappedLimit, overheadLimit);
    if (res != ResOK)
      goto failTableMap;
    vmChunk->overheadMappedLimit = overheadLimit;
  }

  SparseArrayInit(&vmChunk->pages,
                  chunk->pageTable,
                  sizeof(PageUnion),
                  chunk->pages,
                  saMapped, saPages, vmChunk->vm);

  return ResOK;

  /* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */
failTableMap:
failSaPages:
failAllocPageTable:
failSaMapped:
  return res;
}
Example #9
0
void BufferAttach(Buffer buffer, Addr base, Addr limit,
                  Addr init, Size size)
{
  Size filled;

  AVERT(Buffer, buffer);
  AVER(BufferIsReset(buffer));
  AVER(AddrAdd(base, size) <= limit);
  AVER(base <= init);
  AVER(init <= limit);

  /* Set up the buffer to point at the supplied region */
  buffer->mode |= BufferModeATTACHED;
  buffer->base = base;
  buffer->ap_s.init = init;
  buffer->ap_s.alloc = AddrAdd(init, size);
  /* only set limit if not logged */
  if ((buffer->mode & BufferModeLOGGED) == 0) {
    buffer->ap_s.limit = limit;
  } else {
    AVER(buffer->ap_s.limit == (Addr)0);
  }
  AVER(buffer->initAtFlip == (Addr)0);
  buffer->poolLimit = limit;

  filled = AddrOffset(init, limit);
  buffer->fillSize += filled;
  if (buffer->isMutator) {
    if (base != init) { /* see <design/buffer#.count.alloc.how> */
      Size prealloc = AddrOffset(base, init);
      ArenaGlobals(buffer->arena)->allocMutatorSize -= prealloc;
    }
    ArenaGlobals(buffer->arena)->fillMutatorSize += filled;
  } else {
    ArenaGlobals(buffer->arena)->fillInternalSize += filled;
  }

  /* run any class-specific attachment method */
  Method(Buffer, buffer, attach)(buffer, base, limit, init, size);

  AVERT(Buffer, buffer);
  EVENT4(BufferFill, buffer, size, base, filled);
}
Example #10
0
Res VMCreate(VM *vmReturn, Size size)
{
  VM vm;

  AVER(vmReturn != NULL);

  /* Note that because we add VMANPageALIGNMENT rather than */
  /* VMANPageALIGNMENT-1 we are not in danger of overflowing */
  /* vm->limit even if malloc were perverse enough to give us */
  /* a block at the end of memory. */
  size = SizeAlignUp(size, VMANPageALIGNMENT) + VMANPageALIGNMENT;
  if ((size < VMANPageALIGNMENT) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  vm = (VM)malloc(sizeof(VMStruct));
  if (vm == NULL)
    return ResMEMORY;

  vm->block = malloc((size_t)size);
  if (vm->block == NULL) {
    free(vm);
    return ResMEMORY;
  }

  vm->base  = AddrAlignUp((Addr)vm->block, VMANPageALIGNMENT);
  vm->limit = AddrAdd(vm->base, size - VMANPageALIGNMENT);
  AVER(vm->limit < AddrAdd((Addr)vm->block, size));

  memset((void *)vm->block, VMJunkBYTE, size);
 
  /* Lie about the reserved address space, to simulate real */
  /* virtual memory. */
  vm->reserved = size - VMANPageALIGNMENT;
  vm->mapped = (Size)0;
 
  vm->sig = VMSig;

  AVERT(VM, vm);
 
  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;
}
Example #11
0
Bool ChunkCheck(Chunk chunk)
{
  CHECKS(Chunk, chunk);
  CHECKU(Arena, chunk->arena);
  CHECKL(chunk->serial < chunk->arena->chunkSerial);
  /* Can't use CHECKD_NOSIG because TreeEMPTY is NULL. */
  CHECKL(TreeCheck(&chunk->chunkTree));
  CHECKL(ChunkPagesToSize(chunk, 1) == ChunkPageSize(chunk));
  CHECKL(ShiftCheck(ChunkPageShift(chunk)));

  CHECKL(chunk->base != (Addr)0);
  CHECKL(chunk->base < chunk->limit);
  /* check chunk structure is at its own base: see .chunk.at.base. */
  CHECKL(chunk->base == (Addr)chunk);
  CHECKL((Addr)(chunk+1) <= chunk->limit);
  CHECKL(ChunkSizeToPages(chunk, ChunkSize(chunk)) == chunk->pages);
  /* check that the tables fit in the chunk */
  CHECKL(chunk->allocBase <= chunk->pages);
  CHECKL(chunk->allocBase >= chunk->pageTablePages);

  CHECKD_NOSIG(BT, chunk->allocTable);
  /* check that allocTable is in the chunk overhead */
  CHECKL((Addr)chunk->allocTable >= chunk->base);
  CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages))
         <= PageIndexBase(chunk, chunk->allocBase));

  /* check they don't overlap (knowing the order) */
  CHECKL(AddrAdd((Addr)chunk->allocTable, BTSize(chunk->pages))
         <= (Addr)chunk->pageTable);

  CHECKL(chunk->pageTable != NULL);
  CHECKL((Addr)chunk->pageTable >= chunk->base);
  CHECKL((Addr)&chunk->pageTable[chunk->pageTablePages]
         <= PageIndexBase(chunk, chunk->allocBase));
  CHECKL(NONNEGATIVE(INDEX_OF_ADDR(chunk, (Addr)chunk->pageTable)));
  /* check there's enough space in the page table */
  CHECKL(INDEX_OF_ADDR(chunk, AddrSub(chunk->limit, 1)) < chunk->pages);
  CHECKL(chunk->pageTablePages < chunk->pages);

  /* Could check the consistency of the tables, but not O(1). */
  return TRUE;
}
Example #12
0
void DoPlus( void )
{
    stack_entry *left;

    left = StkEntry( 1 );
    LRValue( left );
    RValue( ExprSP );
    switch( ExprSP->info.kind ) {
    case TK_POINTER:
    case TK_ADDRESS:
        /* get the pointer as the left operand */
        left = ExprSP;
        SwapStack( 1 );
    }
    AddOp( left, ExprSP );
    switch( left->info.kind ) {
    case TK_BOOL:
    case TK_ENUM:
    case TK_CHAR:
    case TK_INTEGER:
        U64Add( &left->v.uint, &ExprSP->v.uint, &left->v.uint );
        break;
    case TK_POINTER:
    case TK_ADDRESS:
        switch( ExprSP->info.kind ) {
        case TK_BOOL:
        case TK_ENUM:
        case TK_CHAR:
        case TK_INTEGER:
            break;
        default:
            Error( ERR_NONE, LIT_ENG( ERR_ILL_TYPE ) );
        }
        if( (left->info.modifier & TM_MOD_MASK) == TM_NEAR ) {
            //NYI: 64 bit offsets
            left->v.addr.mach.offset += U32FetchTrunc( ExprSP->v.uint );
        } else {
            //NYI: 64 bit offsets
            left->v.addr = AddrAdd( left->v.addr, U32FetchTrunc( ExprSP->v.uint ) );
        }
        break;
    case TK_REAL:
        LDAdd( &left->v.real, &ExprSP->v.real, &left->v.real );
        break;
    case TK_COMPLEX:
        LDAdd( &left->v.cmplx.re, &ExprSP->v.cmplx.re, &left->v.cmplx.re );
        LDAdd( &left->v.cmplx.im, &ExprSP->v.cmplx.im, &left->v.cmplx.im );
        break;
    default:
        Error( ERR_NONE, LIT_ENG( ERR_ILL_TYPE ) );
        break;
    }
    CombineEntries( left, left, ExprSP );
}
Example #13
0
File: pool.c Project: bhanug/mps
void PoolFree(Pool pool, Addr old, Size size)
{
  AVERT(Pool, pool);
  AVER(old != NULL);
  /* The pool methods should check that old is in pool. */
  AVER(size > 0);
  AVER(AddrIsAligned(old, pool->alignment));
  AVER(PoolHasRange(pool, old, AddrAdd(old, size)));

  Method(Pool, pool, free)(pool, old, size);
 
  EVENT3(PoolFree, pool, old, size);
}
Example #14
0
void DoMinus( void )
{
    stack_entry *left;

    left = StkEntry( 1 );
    LRValue( left );
    RValue( ExprSP );
    AddOp( left, ExprSP );
    switch( left->info.kind ) {
    case TK_BOOL:
    case TK_ENUM:
    case TK_CHAR:
    case TK_INTEGER:
        U64Sub( &left->v.uint, &ExprSP->v.uint, &left->v.uint );
        left->info.modifier = TM_SIGNED;
        break;
    case TK_POINTER:
    case TK_ADDRESS:
        switch( ExprSP->info.kind ) {
        case TK_BOOL:
        case TK_CHAR:
        case TK_ENUM:
        case TK_INTEGER:
            //NYI: 64 bit offsets
            left->v.addr = AddrAdd( left->v.addr, -U32FetchTrunc( ExprSP->v.uint ) );
            break;
        case TK_POINTER:
        case TK_ADDRESS:
            I32ToI64( AddrDiff( left->v.addr, ExprSP->v.addr ), &left->v.sint );
            left->info.kind = TK_INTEGER;
            left->info.modifier = TM_SIGNED;
            left->info.size = sizeof( signed_64 );
            left->th = NULL;
            break;
        default:
            Error( ERR_NONE, LIT_ENG( ERR_ILL_TYPE ) );
        }
        break;
    case TK_REAL:
        LDSub( &left->v.real, &ExprSP->v.real, &left->v.real );
        break;
    case TK_COMPLEX:
        LDSub( &left->v.cmplx.re, &ExprSP->v.cmplx.re, &left->v.cmplx.re );
        LDSub( &left->v.cmplx.im, &ExprSP->v.cmplx.im, &left->v.cmplx.im );
        break;
    default:
        Error( ERR_NONE, LIT_ENG( ERR_ILL_TYPE ) );
        break;
    }
    CombineEntries( left, left, ExprSP );
}
static void sigHandle(int sig, siginfo_t *info, void *context)
{
  int e;
  sigset_t sigset, oldset;
  struct sigaction sa;

  AVER(sig == SIGSEGV);
  AVER(info != NULL);

  if(info->si_code == SEGV_ACCERR) {
    AccessSet mode;
    Addr base, limit;

    /* We can't determine the access mode (read, write, etc.) */
    /* under Solaris without decoding the faulting instruction. */
    /* Don't bother, yet.  We can do this if necessary. */

    mode = AccessREAD | AccessWRITE;

    /* We assume that the access is for one word at the address. */

    base = (Addr)info->si_addr;
    limit = AddrAdd(base, (Size)sizeof(Addr));

    /* Offer each protection structure the opportunity to handle the */
    /* exception.  If it succeeds, then allow the mutator to continue. */

    /* MutatorFaultContext parameter is a dummy parameter in this */
    /* implementation */
    if(ArenaAccess(base, mode, NULL))
      return;
  }

  /* The exception was not handled by any known protection structure, */
  /* so throw it to the previously installed handler. */

  /* @@ This is really weak.
   * Need to implement rest of the contract of sigaction */
 
  e = sigaction(SIGSEGV, &sigNext, &sa);
  AVER(e == 0);
  sigemptyset(&sigset);
  sigaddset(&sigset, SIGSEGV);
  e = sigprocmask(SIG_UNBLOCK, &sigset, &oldset);
  AVER(e == 0);
  kill(getpid(), SIGSEGV);
  e = sigprocmask(SIG_SETMASK, &oldset, NULL);
  AVER(e == 0);
  e = sigaction(SIGSEGV, &sa, NULL);
  AVER(e == 0);
}
Example #16
0
static void test(mps_arena_t arena)
{
  BT bt;
  Nailboard board;
  Align align;
  Count nails;
  Addr base, limit;
  Index i, j, k;

  align = (Align)1 << (rnd() % 10);
  nails = (Count)1 << (rnd() % 16);
  nails += rnd() % nails;
  base = AddrAlignUp(0, align);
  limit = AddrAdd(base, nails * align);

  die(BTCreate(&bt, arena, nails), "BTCreate");
  BTResRange(bt, 0, nails);
  die(NailboardCreate(&board, arena, align, base, limit), "NailboardCreate");

  for (i = 0; i <= nails / 8; ++i) {
    Bool old;
    j = rnd() % nails;
    old = BTGet(bt, j);
    BTSet(bt, j);
    cdie(NailboardSet(board, AddrAdd(base, j * align)) == old, "NailboardSet");
    for (k = 0; k < nails / 8; ++k) {
      Index b, l;
      b = rnd() % nails;
      l = b + rnd() % (nails - b) + 1;
      cdie(BTIsResRange(bt, b, l)
           == NailboardIsResRange(board, AddrAdd(base, b * align),
                                  AddrAdd(base, l * align)),
           "NailboardIsResRange");
    }
  }

  die(NailboardDescribe(board, mps_lib_get_stdout(), 0), "NailboardDescribe");
}
Example #17
0
static Bool VMChunkCheck(VMChunk vmchunk)
{
  Chunk chunk;

  CHECKS(VMChunk, vmchunk);
  chunk = VMChunk2Chunk(vmchunk);
  CHECKD(Chunk, chunk);
  CHECKD_NOSIG(VM, vmchunk->vm); /* <design/check/#hidden-type> */
  CHECKL(VMAlign(vmchunk->vm) == ChunkPageSize(chunk));
  CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
  CHECKD(SparseArray, &vmchunk->pages);
  /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
     makes sure they're where they're expected to be (in the chunk). */
  CHECKL(chunk->base < (Addr)vmchunk->pages.mapped);
  CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <=
         vmchunk->overheadMappedLimit);
  CHECKL(chunk->base < (Addr)vmchunk->pages.pages);
  CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <=
         vmchunk->overheadMappedLimit);
  /* .improve.check-table: Could check the consistency of the tables. */
  
  return TRUE;
}
static void sigHandle(int sig, siginfo_t *info, void *contextArg)
{
  ucontext_t *ucontext;

  AVER(sig == SIGBUS);
  AVER(info != NULL);

  ucontext = contextArg;

  /* On OS X the si_code field does't appear to be useful.  Protection
   * faults appear as SIGBUS signals, and the only documented code for
   * SIGBUS is BUS_ADRALN (invalid address alignment) which is what
   * si_code is set to (it has value 1), even though the address is
   * in fact aligned.
   * On other platforms a test like info->si_code == SEGV_ACCERR appears
   * here. */
  if(1) {
    AccessSet mode;
    Addr base, limit;

    /* We dont't bother to determine the access mode (read, write, etc.)
     * under OS X.  It's possible that this information is available in
     * the context structures.  Needs more investigation.
     */

    mode = AccessREAD | AccessWRITE;

    /* We assume that the access is for one word at the address. */

    /* See [PPCUCH] */
    base = (Addr)ucontext->uc_mcontext->es.dar;
    limit = AddrAdd(base, (Size)sizeof(Addr));

    /* Offer each protection structure the opportunity to handle the */
    /* exception.  If it succeeds, then allow the mutator to continue. */

    /* MutatorFaultContext parameter is a dummy parameter for this */
    /* implementation */
    if(ArenaAccess(base, mode, NULL)) {
      return;
    }
  }

  /* The exception was not handled by any known protection structure, */
  /* so throw it to the previously installed handler. */

  /* @@ This is really weak.
   * Need to implement rest of the contract of sigaction */
  (*sigNext.sa_sigaction)(sig, info, contextArg);
}
Example #19
0
static Compare TagComp(void *key, SplayNode node)
{
  Addr addr1, addr2;

  addr1 = *(Addr *)key;
  addr2 = SplayNode2Tag(node)->addr;
  if (addr1 < addr2)
    return CompareLESS;
  else if (addr1 > addr2) {
    /* Check key is not inside the object of this tag */
    AVER_CRITICAL(AddrAdd(addr2, SplayNode2Tag(node)->size) <= addr1);
    return CompareGREATER;
  } else
    return CompareEQUAL;
}
Example #20
0
static Compare TagCompare(Tree node, TreeKey key)
{
  Addr addr1, addr2;

  addr1 = *(Addr *)key;
  addr2 = TagOfTree(node)->addr;
  if (addr1 < addr2)
    return CompareLESS;
  else if (addr1 > addr2) {
    /* Check key is not inside the object of this tag */
    AVER_CRITICAL(AddrAdd(addr2, TagOfTree(node)->size) <= addr1);
    return CompareGREATER;
  } else
    return CompareEQUAL;
}
Example #21
0
ATTRIBUTE_UNUSED
static Bool VMChunkCheck(VMChunk vmchunk)
{
  Chunk chunk;

  CHECKS(VMChunk, vmchunk);
  chunk = VMChunk2Chunk(vmchunk);
  CHECKD(Chunk, chunk);
  CHECKD(VM, VMChunkVM(vmchunk));
  CHECKL(SizeIsAligned(ChunkPageSize(chunk), VMPageSize(VMChunkVM(vmchunk))));
  CHECKL(vmchunk->overheadMappedLimit <= (Addr)chunk->pageTable);
  CHECKD(SparseArray, &vmchunk->pages);
  /* SparseArrayCheck is agnostic about where the BTs live, so VMChunkCheck
     makes sure they're where they're expected to be (in the chunk). */
  CHECKL(chunk->base < (Addr)vmchunk->pages.mapped);
  CHECKL(AddrAdd(vmchunk->pages.mapped, BTSize(chunk->pages)) <=
         vmchunk->overheadMappedLimit);
  CHECKL(chunk->base < (Addr)vmchunk->pages.pages);
  CHECKL(AddrAdd(vmchunk->pages.pages, BTSize(chunk->pageTablePages)) <=
         vmchunk->overheadMappedLimit);
  /* .improve.check-table: Could check the consistency of the tables. */
  
  return TRUE;
}
Example #22
0
static Res SegAbsInit(Seg seg, Pool pool, Addr base, Size size, ArgList args)
{
  Arena arena;
  Addr addr, limit;
  Tract tract;
  
  AVER(seg != NULL);
  AVERT(Pool, pool);
  arena = PoolArena(pool);
  AVER(AddrIsArenaGrain(base, arena));
  AVER(SizeIsArenaGrains(size, arena));
  AVERT(ArgList, args);

  NextMethod(Inst, Seg, init)(CouldBeA(Inst, seg));

  limit = AddrAdd(base, size);
  seg->limit = limit;
  seg->rankSet = RankSetEMPTY;
  seg->white = TraceSetEMPTY;
  seg->nailed = TraceSetEMPTY;
  seg->grey = TraceSetEMPTY;
  seg->pm = AccessSetEMPTY;
  seg->sm = AccessSetEMPTY;
  seg->defer = WB_DEFER_INIT;
  seg->depth = 0;
  seg->queued = FALSE;
  seg->firstTract = NULL;
  RingInit(SegPoolRing(seg));
  
  TRACT_FOR(tract, addr, arena, base, limit) {
    AVERT(Tract, tract);
    AVER(TractP(tract) == NULL);
    AVER(!TractHasSeg(tract));
    AVER(TractPool(tract) == pool);
    AVER(TractWhite(tract) == TraceSetEMPTY);
    TRACT_SET_SEG(tract, seg);
    if (addr == base) {
      AVER(seg->firstTract == NULL);
      seg->firstTract = tract;
    }
    AVER(seg->firstTract != NULL);
  }
static void sigHandle(int sig, siginfo_t *info, void *context)  /* .sigh.args */
{
  int e;
  /* sigset renamed to asigset due to clash with global on Darwin. */
  sigset_t asigset, oldset;
  struct sigaction sa;

  AVER(sig == PROT_SIGNAL);

  /* .sigh.context */
  if(PROT_SIGINFO_GOOD(info)) {
    AccessSet mode;
    Addr base, limit;

    mode = AccessREAD | AccessWRITE; /* .sigh.mode */

    /* We assume that the access is for one word at the address. */
    base = (Addr)info->si_addr;   /* .sigh.context */
    limit = AddrAdd(base, (Size)sizeof(Addr));

    /* Offer each protection structure the opportunity to handle the */
    /* exception.  If it succeeds, then allow the mutator to continue. */
    if(ArenaAccess(base, mode, NULL))
      return;
  }

  /* The exception was not handled by any known protection structure, */
  /* so throw it to the previously installed handler. */

  e = sigaction(PROT_SIGNAL, &sigNext, &sa);
  AVER(e == 0);
  sigemptyset(&asigset);
  sigaddset(&asigset, PROT_SIGNAL);
  e = sigprocmask(SIG_UNBLOCK, &asigset, &oldset);
  AVER(e == 0);
  kill(getpid(), PROT_SIGNAL);
  e = sigprocmask(SIG_SETMASK, &oldset, NULL);
  AVER(e == 0);
  e = sigaction(PROT_SIGNAL, &sa, NULL);
  AVER(e == 0);
}
Example #24
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(size > 0);

  base = AddrAlignUp(old, PoolAlignment(pool));
  size = size - AddrOffset(old, base);
  size = SizeAlignUp(size, PoolAlignment(pool));
  limit = AddrAdd(base, size);

  MVFFAddToFreeList(&base, &limit, mvff);

  MVFFFreeSegs(mvff, base, limit);
}
Example #25
0
Res BufferReserve(Addr *pReturn, Buffer buffer, Size size)
{
  Addr next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer)); /* <design/check/#.common> */

  /* Is there enough room in the unallocated portion of the buffer to */
  /* satisfy the request?  If so, just increase the alloc marker and */
  /* return a pointer to the area below it. */
  next = AddrAdd(buffer->ap_s.alloc, size);
  if (next > (Addr)buffer->ap_s.alloc &&
      next <= (Addr)buffer->ap_s.limit) {
    buffer->ap_s.alloc = next;
    *pReturn = buffer->ap_s.init;
    return ResOK;
  }

  /* If the buffer can't accommodate the request, call "fill". */
  return BufferFill(pReturn, buffer, size);
}
Example #26
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  LPVOID vbase;
  SYSTEM_INFO si;
  Align align;
  VM vm;
  Res res;
  BOOL b;
  VMParams vmParams = params;

  AVER(vmReturn != NULL);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  GetSystemInfo(&si);
  align = (Align)si.dwPageSize;
  AVER((DWORD)align == si.dwPageSize); /* check it didn't truncate */
  AVER(SizeIsP2(align));    /* see .assume.sysalign */
  size = SizeAlignUp(size, align);
  if ((size == 0) || (size > (Size)(SIZE_T)-1))
    return ResRESOURCE;

  /* Allocate the vm descriptor.  This is likely to be wasteful. */
  vbase = VirtualAlloc(NULL, SizeAlignUp(sizeof(VMStruct), align),
                       MEM_COMMIT, PAGE_READWRITE);
  if (vbase == NULL)
    return ResMEMORY;
  vm = (VM)vbase;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       size,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL) {
    res = ResRESOURCE;
    goto failReserve;
  }

  AVER(AddrIsAligned(vbase, align));

  vm->align = align;
  vm->base = (Addr)vbase;
  vm->limit = AddrAdd(vbase, size);
  vm->reserved = size;
  vm->mapped = 0;
  AVER(vm->base < vm->limit);  /* .assume.not-last */

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;

failReserve:
  b = VirtualFree((LPVOID)vm, (SIZE_T)0, MEM_RELEASE);
  AVER(b != 0);
  return res;
}
Example #27
0
void RangeInitSize(Range range, Addr base, Size size)
{
  RangeInit(range, base, AddrAdd(base, size));
}
Example #28
0
Res VMCreate(VM *vmReturn, Size size, void *params)
{
  Align align;
  VM vm;
  int pagesize;
  void *addr;
  Res res;

  AVER(vmReturn != NULL);
  AVER(params != NULL);

  /* Find out the page size from the OS */
  pagesize = getpagesize();
  /* check the actual returned pagesize will fit in an object of */
  /* type Align. */
  AVER(pagesize > 0);
  AVER((unsigned long)pagesize <= (unsigned long)(Align)-1);
  align = (Align)pagesize;
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  /* Map in a page to store the descriptor on. */
  addr = mmap(0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE,
              MAP_ANON | MAP_PRIVATE,
              -1, 0);
  /* On Darwin the MAP_FAILED return value is not documented, but does
   * work.  MAP_FAILED _is_ documented by POSIX.
   */
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    return ResMEMORY;
  }
  vm = (VM)addr;

  vm->align = align;

  /* See .assume.not-last. */
  addr = mmap(0, (size_t)size,
              PROT_NONE, MAP_ANON | MAP_PRIVATE,
              -1, 0);
  if(addr == MAP_FAILED) {
    int e = errno;
    AVER(e == ENOMEM); /* .assume.mmap.err */
    res = ResRESOURCE;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT3(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
  return res;
}
Res VMCreate(VM *vmReturn, Size size)
{
  void *addr;
  Align align;
  int zero_fd;
  VM vm;
  Res res;

  AVER(vmReturn != NULL);

  align = (Align)sysconf(_SC_PAGESIZE);
  AVER(SizeIsP2(align));
  size = SizeAlignUp(size, align);
  if((size == 0) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  zero_fd = open("/dev/zero", O_RDONLY);
  if(zero_fd == -1)
    return ResFAIL;

  /* Map in a page to store the descriptor on. */
  addr = mmap((void *)0, (size_t)SizeAlignUp(sizeof(VMStruct), align),
              PROT_READ | PROT_WRITE, MAP_PRIVATE,
              zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM || errno == EAGAIN); /* .assume.mmap.err */
    res = (errno == ENOMEM || errno == EAGAIN) ? ResMEMORY : ResFAIL;
    goto failVMMap;
  }
  vm = (VM)addr;

  vm->zero_fd = zero_fd;
  vm->align = align;

  /* .map.reserve: MAP_AUTORESRV is necessary to avoid reserving swap. */
  addr = mmap((void *)0, (size_t)size, PROT_NONE, MAP_SHARED | MAP_AUTORESRV,
	      zero_fd, (off_t)0);
  if(addr == MAP_FAILED) {
    AVER(errno == ENOMEM); /* .assume.mmap.err */
    res = (errno == ENOMEM) ? ResRESOURCE : ResFAIL;
    goto failReserve;
  }

  vm->base = (Addr)addr;
  vm->limit = AddrAdd(vm->base, size);
  vm->reserved = size;
  vm->mapped = (Size)0;

  vm->sig = VMSig;

  AVERT(VM, vm);

  EVENT_PAA(VMCreate, vm, vm->base, vm->limit);

  *vmReturn = vm;
  return ResOK;

failReserve:
  (void)munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), align));
failVMMap:
  (void)close(zero_fd);
  return res;
}
Example #30
0
Addr TractLimit(Tract tract, Arena arena)
{
  AVERT_CRITICAL(Tract, tract); /* .tract.critical */
  AVERT_CRITICAL(Arena, arena);
  return AddrAdd(TractBase(tract), ArenaGrainSize(arena));
}