Example #1
0
Res EventInit(void)
{
  Res res;

  /* Only if this is the first call. */
  if(!eventInited) { /* See .trans.log */
    AVER(EventNext == 0);
    AVER(EventLimit == 0);
    res = (Res)mps_io_create(&eventIO);
    if(res != ResOK) return res;
    EventNext = eventBuffer;
    EventLimit = &eventBuffer[EventBufferSIZE];
    eventUserCount = (Count)1;
    eventError = ResOK;
    eventInited = TRUE;
    EventKindControl = (Word)mps_lib_telemetry_control();
    EventInternSerial = (Serial)1; /* 0 is reserved */
    (void)EventInternString(MPSVersion()); /* emit version */
  } else {
    ++eventUserCount;
  }

  return ResOK;
}
Example #2
0
static void mapThreadRing(Ring threadRing, void (*func)(Thread))
{
  Ring node, next;
  mach_port_t self;

  AVERT(Ring, threadRing);

  self = mach_thread_self();
  AVER(MACH_PORT_VALID(self));
  RING_FOR(node, threadRing, next) {
    Thread thread = RING_ELT(Thread, arenaRing, node);
    AVERT(Thread, thread);
    if(thread->port != self)
      (*func)(thread);
  }
Example #3
0
void TreeRotateLeft(Tree *treeIO)
{
  Tree tree, right;

  AVER(treeIO != NULL);
  tree = *treeIO;
  AVERT(Tree, tree);
  right = TreeRight(tree);
  AVERT(Tree, right);

  TreeSetRight(tree, TreeLeft(right));
  TreeSetLeft(right, tree);

  *treeIO = right;
}
void VMDestroy(VM vm)
{
  int r;
  int zero_fd;

  AVERT(VM, vm);
  AVER(vm->mapped == (Size)0);

  /* This appears to be pretty pointless, since the descriptor */
  /* page is about to vanish completely.  However, munmap might fail */
  /* for some reason, and this would ensure that it was still */
  /* discovered if sigs were being checked. */
  vm->sig = SigInvalid;

  zero_fd = vm->zero_fd;
  r = munmap((void *)vm->base, (size_t)AddrOffset(vm->base, vm->limit));
  AVER(r == 0);
  r = munmap((void *)vm, (size_t)SizeAlignUp(sizeof(VMStruct), vm->align));
  AVER(r == 0);
  r = close(zero_fd);
  AVER(r == 0);

  EVENT_P(VMDestroy, vm);
}
Example #5
0
Res BufferReserve(Addr *pReturn, Buffer buffer, Size size)
{
  Addr next;

  AVER(pReturn != NULL);
  AVERT(Buffer, buffer);
  AVER(size > 0);
  AVER(SizeIsAligned(size, BufferPool(buffer)->alignment));
  AVER(BufferIsReady(buffer)); /* <design/check/#.common> */

  /* Is there enough room in the unallocated portion of the buffer to */
  /* satisfy the request?  If so, just increase the alloc marker and */
  /* return a pointer to the area below it. */
  next = AddrAdd(buffer->ap_s.alloc, size);
  if (next > (Addr)buffer->ap_s.alloc &&
      next <= (Addr)buffer->ap_s.limit) {
    buffer->ap_s.alloc = next;
    *pReturn = buffer->ap_s.init;
    return ResOK;
  }

  /* If the buffer can't accommodate the request, call "fill". */
  return BufferFill(pReturn, buffer, size);
}
Example #6
0
static void vmArenaUnmap(VMArena vmArena, VM vm, Addr base, Addr limit)
{
  Arena arena;
  Size size;

  /* no checking as function is local to module */

  arena = VMArena2Arena(vmArena);
  size = AddrOffset(base, limit);
  AVER(size <= arena->committed);

  VMUnmap(vm, base, limit);
  arena->committed -= size;
  return;
}
Example #7
0
void (ShieldRaise) (Arena arena, Seg seg, AccessSet mode)
{
    /* .seg.broken: Seg's shield invariants may not be true at */
    /* this point (this function is called to enforce them) so we */
    /* can't check seg. Nor can we check arena as that checks the */
    /* segs in the cache. */

    AVER((SegSM(seg) & mode) == AccessSetEMPTY);
    SegSetSM(seg, SegSM(seg) | mode); /* inv.prot.shield preserved */

    /* ensure inv.unsynced.suspended & inv.unsynced.depth */
    cache(arena, seg);
    AVERT(Arena, arena);
    AVERT(Seg, seg);
}
Example #8
0
void mps_pool_check_fenceposts(mps_pool_t mps_pool)
{
  Pool pool = (Pool)mps_pool;
  Arena arena;
  
  /* TESTT not AVERT, see <design/interface-c/#check.space */
  AVER(TESTT(Pool, pool));
  arena = PoolArena(pool);

  ArenaEnter(arena);

  AVERT(Pool, pool);
  DebugPoolCheckFences(pool);

  ArenaLeave(arena);
}
Example #9
0
Res StackScan(ScanState ss, Addr *stackBot)
{
  jmp_buf jb;
  void *stackTop = &jb;

  /* .assume.stack: This implementation assumes that the stack grows
   * downwards, so that the address of the jmp_buf is the limit of the
   * part of the stack that needs to be scanned. (StackScanInner makes
   * the same assumption.)
   */
  AVER(stackTop < (void *)stackBot);

  (void)setjmp(jb);

  return StackScanInner(ss, stackBot, stackTop, sizeof jb / sizeof(Addr*));
}
Example #10
0
static void BufferAbsFinish(Inst inst)
{
  Buffer buffer = MustBeA(Buffer, inst);
  AVERT(Buffer, buffer);
  AVER(BufferIsReset(buffer));

  /* Detach the buffer from its owning pool and unsig it. */
  RingRemove(&buffer->poolRing);
  InstFinish(MustBeA(Inst, buffer));
  buffer->sig = SigInvalid;
 
  /* Finish off the generic buffer fields. */
  RingFinish(&buffer->poolRing);

  EVENT1(BufferFinish, buffer);
}
Example #11
0
Res MutatorContextScan(ScanState ss, MutatorContext context,
                       mps_area_scan_t scan_area, void *closure)
{
  CONTEXT *cx;
  Res res;

  AVERT(ScanState, ss);
  AVERT(MutatorContext, context);
  AVER(context->var == MutatorContextTHREAD);

  cx = &context->the.context;
  res = TraceScanArea(ss, (Word *)cx, (Word *)((char *)cx + sizeof *cx),
                      scan_area, closure); /* .context.regroots */

  return res;
}
Example #12
0
void mps_pool_check_free_space(mps_pool_t mps_pool)
{
  Pool pool = (Pool)mps_pool;
  Arena arena;
  
  /* TESTT not AVERT, see <design/interface-c#.check.space */
  AVER(TESTT(Pool, pool));
  arena = PoolArena(pool);

  ArenaEnter(arena);

  AVERT(Pool, pool);
  DebugPoolCheckFreeSpace(pool);

  ArenaLeave(arena);
}
Example #13
0
/* LDReset -- reset a dependency to empty
 *
 * .reset.sync: This does not need to be synchronized with LDAge
 * because if the epoch advances after it is read the dependency
 * will simply include movement for more time than necessary.
 */
void LDReset(mps_ld_t ld, Arena arena)
{
  Bool b;
  Seg seg;

  AVER(ld != NULL);
  AVERT(Arena, arena);

  b = SegOfAddr(&seg, arena, (Addr)ld);
  if (b)
    ShieldExpose(arena, seg);   /* .ld.access */
  ld->_epoch = arena->epoch;
  ld->_rs = RefSetEMPTY;
  if (b)
    ShieldCover(arena, seg);
}
Example #14
0
Count TreeToVine(Tree *link)
{
  Count count = 0;
  
  AVER(link != NULL);
  AVERT(Tree, *link);

  while (*link != TreeEMPTY) {
    while (TreeHasLeft(*link))
      TreeRotateRight(link);
    link = &((*link)->right);
    ++count;
  }
  
  return count;
}
Example #15
0
static Res MVFFAlloc(Addr *aReturn, Pool pool, Size size,
                     Bool withReservoirPermit)
{
  Res res;
  MVFF mvff;
  Addr base, limit;
  Bool foundBlock;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(aReturn != NULL);
  AVER(size > 0);
  AVERT(Bool, withReservoirPermit);

  size = SizeAlignUp(size, PoolAlignment(pool));

  foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);
  if (!foundBlock) {
    Seg seg;

    res = MVFFAddSeg(&seg, mvff, size, withReservoirPermit);
    if (res != ResOK)
      return res;
    foundBlock = MVFFFindFirstFree(&base, &limit, mvff, size);

    /* We know that the found range must intersect the new segment. */
    /* In particular, it doesn't necessarily lie entirely within it. */
    /* The next three AVERs test for intersection of two intervals. */
    AVER(base >= SegBase(seg) || limit <= SegLimit(seg));
    AVER(base < SegLimit(seg));
    AVER(SegBase(seg) < limit);

    /* We also know that the found range is no larger than the segment. */
    AVER(SegSize(seg) >= AddrOffset(base, limit));
  }
  AVER(foundBlock);
  AVER(AddrOffset(base, limit) == size);

  *aReturn = base;

  return ResOK;
}
Example #16
0
static Res MFSInit(Pool pool, ArgList args)
{
  Size extendBy = MFS_EXTEND_BY_DEFAULT;
  Bool extendSelf = TRUE;
  Size unitSize;
  MFS mfs;
  Arena arena;
  ArgStruct arg;

  AVER(pool != NULL);
  AVERT(ArgList, args);
  
  ArgRequire(&arg, args, MPS_KEY_MFS_UNIT_SIZE);
  unitSize = arg.val.size;
  if (ArgPick(&arg, args, MPS_KEY_EXTEND_BY))
    extendBy = arg.val.size;
  if (ArgPick(&arg, args, MFSExtendSelf))
    extendSelf = arg.val.b;

  AVERT(Bool, extendSelf);
 
  mfs = PoolPoolMFS(pool);
  arena = PoolArena(pool);

  mfs->unroundedUnitSize = unitSize;

  if (unitSize < UNIT_MIN)
    unitSize = UNIT_MIN;
  unitSize = SizeAlignUp(unitSize, MPS_PF_ALIGN);
  if (extendBy < unitSize)
    extendBy = unitSize;
  extendBy = SizeArenaGrains(extendBy, arena);

  mfs->extendBy = extendBy;
  mfs->extendSelf = extendSelf;
  mfs->unitSize = unitSize;
  mfs->freeList = NULL;
  mfs->tractList = NULL;
  mfs->total = 0;
  mfs->free = 0;
  mfs->sig = MFSSig;

  AVERT(MFS, mfs);
  EVENT5(PoolInitMFS, pool, arena, extendBy, BOOLOF(extendSelf), unitSize);
  return ResOK;
}
Example #17
0
static void MVFFFinish(Pool pool)
{
  MVFF mvff;
  Arena arena;
  Seg seg;
  Ring ring, node, nextNode;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  ring = PoolSegRing(pool);
  RING_FOR(node, ring, nextNode) {
    seg = SegOfPoolRing(node);
    AVER(SegPool(seg) == pool);
    SegFree(seg);
  }
Example #18
0
Res BufferFramePush(AllocFrame *frameReturn, Buffer buffer)
{
  Pool pool;
  AVERT(Buffer, buffer);
  AVER(frameReturn != NULL);


  /* Process any flip */
  if (!BufferIsReset(buffer) && buffer->ap_s.limit == (Addr)0) {
    /* .fill.unflip: If the buffer is flipped then we unflip the buffer. */
    if (buffer->mode & BufferModeFLIPPED) {
      BufferSetUnflipped(buffer);
    }
  }
  pool = BufferPool(buffer);
  return Method(Pool, pool, framePush)(frameReturn, pool, buffer);
}
static void SplayLinkLeft(SplayNode *topIO, SplayNode *leftIO) {
    AVERT(SplayNode, *topIO);
    AVERT(SplayNode, *leftIO);

    /* Don't fix client properties yet. */

    /* .link.left.first: *leftIO is always the last node in the */
    /* left tree, so its right child must be null. */
    AVER(SplayNodeRightChild(*leftIO) == NULL);

    SplayNodeSetRightChild(*leftIO, *topIO);
    *leftIO = *topIO;
    *topIO = SplayNodeRightChild(*topIO);

    /* The following line is only required for .link.left.first. */
    SplayNodeSetRightChild(*leftIO, NULL);
}
Example #20
0
static void SplayAssembleRev(SplayTree splay, SplayState state)
{
  Tree left, right;

  AVERT(SplayTree, splay);
  AVER(state->middle != TreeEMPTY);
  
  left = TreeLeft(state->middle);
  left = SplayUpdateRightSpine(splay, state->leftLast, left);
  TreeSetLeft(state->middle, left);

  right = TreeRight(state->middle);
  right = SplayUpdateLeftSpine(splay, state->rightFirst, right);
  TreeSetRight(state->middle, right);

  splay->updateNode(splay, state->middle);
}
Example #21
0
Res ThreadRegister(Thread *threadReturn, Arena arena)
{
    Res res;
    Thread thread;
    HANDLE procHandle;
    BOOL b;
    void *p;

    AVER(threadReturn != NULL);
    AVERT(Arena, arena);

    res = ControlAlloc(&p, arena, sizeof(ThreadStruct),
                       /* withReservoirPermit */ FALSE);
    if(res != ResOK)
        return res;
    thread = (Thread)p; /* avoid pun */

    /* Duplicate handle gives us a new handle with updated privileges.
     * .thread.handle describes the ones needed.
     */
    procHandle = GetCurrentProcess();

    b = DuplicateHandle(procHandle, GetCurrentThread(), procHandle,
                        &thread->handle,
                        THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT,
                        FALSE, 0);
    if(!b)
        return ResRESOURCE;

    thread->id = GetCurrentThreadId();

    RingInit(&thread->arenaRing);

    thread->sig = ThreadSig;
    thread->serial = arena->threadSerial;
    ++arena->threadSerial;
    thread->arena = arena;

    AVERT(Thread, thread);

    RingAppend(ArenaThreadRing(arena), &thread->arenaRing);

    *threadReturn = thread;
    return ResOK;
}
Example #22
0
void EventDump(mps_lib_FILE *stream)
{
  Event event;
  EventKind kind;

  AVER(stream != NULL);

  for (kind = 0; kind < EventKindLIMIT; ++kind) {
    for (event = (Event)EventLast[kind];
         event < (Event)(EventBuffer[kind] + EventBufferSIZE);
         event = (Event)((char *)event + event->any.size)) {
      /* Try to keep going even if there's an error, because this is used as a
         backtrace and we'll take what we can get. */
      (void)EventWrite(event, stream);
      (void)WriteF(stream, "\n", NULL);
    }
  }
}
Example #23
0
Res MutatorContextInitThread(MutatorContext context, HANDLE thread)
{
  BOOL success;

  AVER(context != NULL);

  context->var = MutatorContextTHREAD;
  /* This dumps the relevant registers into the context */
  /* .context.flags */
  context->the.context.ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER;
  success = GetThreadContext(thread, &context->the.context);
  if (!success)
    return ResFAIL;
  context->sig = MutatorContextSig;

  AVERT(MutatorContext, context);
  return ResOK;
}
Example #24
0
void mps_arena_formatted_objects_walk(mps_arena_t mps_arena,
                                      mps_formatted_objects_stepper_t f,
                                      void *p, size_t s)
{
  Arena arena = (Arena)mps_arena;
  FormattedObjectsStepClosureStruct c;

  ArenaEnter(arena);
  AVERT(Arena, arena);
  AVER(FUNCHECK(f));
  /* p and s are arbitrary closures, hence can't be checked */
  c.sig = FormattedObjectsStepClosureSig;
  c.f = f;
  c.p = p;
  c.s = s;
  ArenaFormattedObjectsWalk(arena, ArenaFormattedObjectsStep, &c, 0);
  ArenaLeave(arena);
}
Example #25
0
void ThreadDeregister(Thread thread, Arena arena)
{
    Bool b;

    AVERT(Thread, thread);
    AVERT(Arena, arena);

    RingRemove(&thread->arenaRing);

    thread->sig = SigInvalid;

    RingFinish(&thread->arenaRing);

    b = CloseHandle(thread->handle);
    AVER(b); /* .error.close-handle */

    ControlFree(arena, thread, sizeof(ThreadStruct));
}
Example #26
0
Bool EPVMSegCheck(EPVMSeg epvmSeg)
{
  Seg seg;

  CHECKS(EPVMSeg, epvmSeg);
  CHECKL(AMSSegCheck(&epvmSeg->amsSegStruct));
  seg = EPVMSeg2Seg(epvmSeg);
  CHECKU(EPVMSave, epvmSeg->save);
  CHECKL(epvmSeg->save->size >= SegSize(seg));
  /* buffers only on the current save level */
  if (SegBuffer(seg) != NULL)
    CHECKL(EPVMCurrentSave(EPVMSegEPVM(epvmSeg)) == epvmSeg->save);
  /* See design.mps.poolepvm.protection.format and */
  /* d.m.p.protection.hack. */
  AVER(SegSummary(seg) == RefSetUNIV || SegSummary(seg) == RefSetEMPTY);

  return TRUE;
}
Example #27
0
void EPVMSetRankSet(Seg seg, RankSet rankSet)
{
  EPVMSeg epvmSeg;
  EPVMSave save;

  AVERT(Seg, seg);
  epvmSeg = Seg2EPVMSeg(seg);
  AVERT(EPVMSeg, epvmSeg);
  AVER(RankSetCheck(rankSet));

  save = epvmSeg->save;
  if (RankSetEMPTY == rankSet) {
    save->smallStringSeg = TRUE;
  } else {
    save->smallObjectSeg = TRUE;
  }
  SEG_SUPERCLASS(EPVMSegClass)->setRankSet(seg, rankSet);
}
Example #28
0
File: shield.c Project: bhanug/mps
void ShieldFinish(Shield shield)
{
  /* The queue should already have been destroyed by
     GlobalsPrepareToDestroy calling ShieldDestroyQueue. */
  AVER(shield->length == 0);
  AVER(shield->limit == 0);
  AVER(shield->queue == NULL);

  AVER(shield->depth == 0);
  AVER(shield->unsynced == 0);
  AVER(shield->holds == 0);
  shield->sig = SigInvalid;
}
Example #29
0
void MFSExtend(Pool pool, Addr base, Size size)
{
  MFS mfs;
  Tract tract;
  Word i, unitsPerExtent;
  Size unitSize;
  Header header = NULL;

  AVERT(Pool, pool);
  mfs = PoolPoolMFS(pool);
  AVERT(MFS, mfs);
  AVER(size == mfs->extendBy);

  /* Ensure that the memory we're adding belongs to this pool.  This is
     automatic if it was allocated using ArenaAlloc, but if the memory is
     being inserted from elsewhere then it must have been set up correctly. */
  AVER(PoolHasAddr(pool, base));
  
  /* .tract.chain: chain first tracts through TractP(tract) */
  tract = TractOfBaseAddr(PoolArena(pool), base);

  AVER(TractPool(tract) == pool);

  TractSetP(tract, (void *)mfs->tractList);
  mfs->tractList = tract;

  /* Update accounting */
  mfs->total += size;
  mfs->free += size;

  /* Sew together all the new empty units in the region, working down */
  /* from the top so that they are in ascending order of address on the */
  /* free list. */

  unitSize = mfs->unitSize;
  unitsPerExtent = size/unitSize;
  AVER(unitsPerExtent > 0);

#define SUB(b, s, i)    ((Header)AddrAdd(b, (s)*(i)))

  for(i = 0; i < unitsPerExtent; ++i)
  {
    header = SUB(base, unitSize, unitsPerExtent-i - 1);
    AVER(AddrIsAligned(header, pool->alignment));
    AVER(AddrAdd((Addr)header, unitSize) <= AddrAdd(base, size));
    header->next = mfs->freeList;
    mfs->freeList = header;
  }

#undef SUB
}
static void sigHandle(int sig, siginfo_t *info, void *context)
{
  int e;
  sigset_t sigset, oldset;
  struct sigaction sa;

  AVER(sig == SIGSEGV);
  AVER(info != NULL);

  if(info->si_code == SEGV_ACCERR) {
    AccessSet mode;
    Addr base, limit;

    /* We can't determine the access mode (read, write, etc.) */
    /* under Solaris without decoding the faulting instruction. */
    /* Don't bother, yet.  We can do this if necessary. */

    mode = AccessREAD | AccessWRITE;

    /* We assume that the access is for one word at the address. */

    base = (Addr)info->si_addr;
    limit = AddrAdd(base, (Size)sizeof(Addr));

    /* Offer each protection structure the opportunity to handle the */
    /* exception.  If it succeeds, then allow the mutator to continue. */

    /* MutatorFaultContext parameter is a dummy parameter in this */
    /* implementation */
    if(ArenaAccess(base, mode, NULL))
      return;
  }

  /* The exception was not handled by any known protection structure, */
  /* so throw it to the previously installed handler. */

  /* @@ This is really weak.
   * Need to implement rest of the contract of sigaction */
 
  e = sigaction(SIGSEGV, &sigNext, &sa);
  AVER(e == 0);
  sigemptyset(&sigset);
  sigaddset(&sigset, SIGSEGV);
  e = sigprocmask(SIG_UNBLOCK, &sigset, &oldset);
  AVER(e == 0);
  kill(getpid(), SIGSEGV);
  e = sigprocmask(SIG_SETMASK, &oldset, NULL);
  AVER(e == 0);
  e = sigaction(SIGSEGV, &sa, NULL);
  AVER(e == 0);
}