Пример #1
0
Res ThreadScan(ScanState ss, Thread thread, void *stackBot)
{
  DWORD id;
  Res res;

  id = GetCurrentThreadId();

  if(id != thread->id) { /* .thread.id */
    CONTEXT context;
    BOOL success;
    Addr *stackBase, *stackLimit, stackPtr;

    /* scan stack and register roots in other threads */

    /* This dumps the relevant registers into the context */
    /* .context.flags */
    context.ContextFlags = CONTEXT_CONTROL | CONTEXT_INTEGER;
    /* .thread.handle.get-context */
    success = GetThreadContext(thread->handle, &context);
    if(!success) {
      /* .error.get-context */
      /* We assume that the thread must have been destroyed. */
      /* We ignore the situation by returning immediately. */
      return ResOK;
    }

    stackPtr  = (Addr)context.Esp;   /* .i3.sp */
    /* .stack.align */
    stackBase  = (Addr *)AddrAlignUp(stackPtr, sizeof(Addr));
    stackLimit = (Addr *)stackBot;
    if (stackBase >= stackLimit)
      return ResOK;    /* .stack.below-bottom */

    /* scan stack inclusive of current sp and exclusive of
     * stackBot (.stack.full-descend)
     */
    res = TraceScanAreaTagged(ss, stackBase, stackLimit);
    if(res != ResOK)
      return res;

    /* (.context.regroots)
     * This scans the root registers (.context.regroots).  It also
     * unnecessarily scans the rest of the context.  The optimisation
     * to scan only relevant parts would be machine dependent.
     */
    res = TraceScanAreaTagged(ss, (Addr *)&context,
           (Addr *)((char *)&context + sizeof(CONTEXT)));
    if(res != ResOK)
      return res;

  } else { /* scan this thread's stack */
    res = StackScan(ss, stackBot);
    if(res != ResOK)
      return res;
  }

  return ResOK;
}
Пример #2
0
static Res VMChunkInit(Chunk chunk, BootBlock boot)
{
  VMChunk vmChunk;
  Addr overheadLimit;
  void *p;
  Res res;
  BT saMapped, saPages;

  /* chunk is supposed to be uninitialized, so don't check it. */
  vmChunk = Chunk2VMChunk(chunk);
  AVERT(BootBlock, boot);
  
  res = BootAlloc(&p, boot, BTSize(chunk->pages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failSaMapped;
  saMapped = p;
  
  res = BootAlloc(&p, boot, BTSize(chunk->pageTablePages), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failSaPages;
  saPages = p;
  
  overheadLimit = AddrAdd(chunk->base, (Size)BootAllocated(boot));

  /* Put the page table as late as possible, as in VM systems we don't want */
  /* to map it. */
  res = BootAlloc(&p, boot, chunk->pageTablePages << chunk->pageShift, chunk->pageSize);
  if (res != ResOK)
    goto failAllocPageTable;
  chunk->pageTable = p;

  /* Map memory for the bit tables. */
  if (vmChunk->overheadMappedLimit < overheadLimit) {
    overheadLimit = AddrAlignUp(overheadLimit, ChunkPageSize(chunk));
    res = vmArenaMap(VMChunkVMArena(vmChunk), vmChunk->vm,
                     vmChunk->overheadMappedLimit, overheadLimit);
    if (res != ResOK)
      goto failTableMap;
    vmChunk->overheadMappedLimit = overheadLimit;
  }

  SparseArrayInit(&vmChunk->pages,
                  chunk->pageTable,
                  sizeof(PageUnion),
                  chunk->pages,
                  saMapped, saPages, vmChunk->vm);

  return ResOK;

  /* .no-clean: No clean-ups needed for boot, as we will discard the chunk. */
failTableMap:
failSaPages:
failAllocPageTable:
failSaMapped:
  return res;
}
Пример #3
0
Res VMInit(VM vm, Size size, Size grainSize, void *params)
{
  LPVOID vbase;
  Size pageSize, reserved;
  VMParams vmParams = params;

  AVER(vm != NULL);
  AVERT(ArenaGrainSize, grainSize);
  AVER(size > 0);
  AVER(params != NULL); /* FIXME: Should have full AVERT? */

  AVER(COMPATTYPE(LPVOID, Addr));  /* .assume.lpvoid-addr */
  AVER(COMPATTYPE(SIZE_T, Size));

  pageSize = PageSize();

  /* Grains must consist of whole pages. */
  AVER(grainSize % pageSize == 0);

  /* Check that the rounded-up sizes will fit in a Size. */
  size = SizeRoundUp(size, grainSize);
  if (size < grainSize || size > (Size)(SIZE_T)-1)
    return ResRESOURCE;
  reserved = size + grainSize - pageSize;
  if (reserved < grainSize || reserved > (Size)(SIZE_T)-1)
    return ResRESOURCE;

  /* Allocate the address space. */
  vbase = VirtualAlloc(NULL,
                       reserved,
                       vmParams->topDown ?
                         MEM_RESERVE | MEM_TOP_DOWN :
                         MEM_RESERVE,
                       PAGE_NOACCESS);
  if (vbase == NULL)
    return ResRESOURCE;

  AVER(AddrIsAligned(vbase, pageSize));

  vm->pageSize = pageSize;
  vm->block = vbase;
  vm->base = AddrAlignUp(vbase, grainSize);
  vm->limit = AddrAdd(vm->base, size);
  AVER(vm->base < vm->limit);  /* .assume.not-last */
  AVER(vm->limit <= AddrAdd((Addr)vm->block, reserved));
  vm->reserved = reserved;
  vm->mapped = 0;

  vm->sig = VMSig;
  AVERT(VM, vm);

  EVENT3(VMInit, vm, VMBase(vm), VMLimit(vm));
  return ResOK;
}
Пример #4
0
static Res clientChunkCreate(Chunk *chunkReturn, Addr base, Addr limit,
                             ClientArena clientArena)
{
  ClientChunk clChunk;
  Chunk chunk;
  Addr alignedBase;
  BootBlockStruct bootStruct;
  BootBlock boot = &bootStruct;
  Res res;
  void *p;

  AVER(chunkReturn != NULL);
  AVER(base != (Addr)0);
  /* @@@@ Should refuse on small chunks, instead of AVERring. */
  AVER(limit != (Addr)0);
  AVER(limit > base);

  /* Initialize boot block. */
  /* Chunk has to be page-aligned, and the boot allocs must be within it. */
  alignedBase = AddrAlignUp(base, ARENA_CLIENT_PAGE_SIZE);
  AVER(alignedBase < limit);
  res = BootBlockInit(boot, (void *)alignedBase, (void *)limit);
  if (res != ResOK)
    goto failBootInit;

  /* Allocate the chunk. */
  /* See <design/arena/>.@@@@ */
  res = BootAlloc(&p, boot, sizeof(ClientChunkStruct), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failChunkAlloc;
  clChunk = p;  chunk = ClientChunk2Chunk(clChunk);

  res = ChunkInit(chunk, ClientArena2Arena(clientArena),
                  alignedBase, AddrAlignDown(limit, ARENA_CLIENT_PAGE_SIZE),
                  ARENA_CLIENT_PAGE_SIZE, boot);
  if (res != ResOK)
    goto failChunkInit;

  ClientArena2Arena(clientArena)->committed +=
    AddrOffset(base, PageIndexBase(chunk, chunk->allocBase));
  BootBlockFinish(boot);

  clChunk->sig = ClientChunkSig;
  AVERT(ClientChunk, clChunk);
  *chunkReturn = chunk;
  return ResOK;

failChunkInit:
failChunkAlloc:
failBootInit:
  return res;
}
Пример #5
0
Res VMCreate(VM *vmReturn, Size size)
{
  VM vm;

  AVER(vmReturn != NULL);

  /* Note that because we add VMANPageALIGNMENT rather than */
  /* VMANPageALIGNMENT-1 we are not in danger of overflowing */
  /* vm->limit even if malloc were perverse enough to give us */
  /* a block at the end of memory. */
  size = SizeAlignUp(size, VMANPageALIGNMENT) + VMANPageALIGNMENT;
  if ((size < VMANPageALIGNMENT) || (size > (Size)(size_t)-1))
    return ResRESOURCE;

  vm = (VM)malloc(sizeof(VMStruct));
  if (vm == NULL)
    return ResMEMORY;

  vm->block = malloc((size_t)size);
  if (vm->block == NULL) {
    free(vm);
    return ResMEMORY;
  }

  vm->base  = AddrAlignUp((Addr)vm->block, VMANPageALIGNMENT);
  vm->limit = AddrAdd(vm->base, size - VMANPageALIGNMENT);
  AVER(vm->limit < AddrAdd((Addr)vm->block, size));

  memset((void *)vm->block, VMJunkBYTE, size);
 
  /* Lie about the reserved address space, to simulate real */
  /* virtual memory. */
  vm->reserved = size - VMANPageALIGNMENT;
  vm->mapped = (Size)0;
 
  vm->sig = VMSig;

  AVERT(VM, vm);
 
  EVENT3(VMCreate, vm, vm->base, vm->limit);
  *vmReturn = vm;
  return ResOK;
}
Пример #6
0
static void MVFFFree(Pool pool, Addr old, Size size)
{
  Addr base, limit;
  MVFF mvff;

  AVERT(Pool, pool);
  mvff = Pool2MVFF(pool);
  AVERT(MVFF, mvff);

  AVER(old != (Addr)0);
  AVER(size > 0);

  base = AddrAlignUp(old, PoolAlignment(pool));
  size = size - AddrOffset(old, base);
  size = SizeAlignUp(size, PoolAlignment(pool));
  limit = AddrAdd(base, size);

  MVFFAddToFreeList(&base, &limit, mvff);

  MVFFFreeSegs(mvff, base, limit);
}
Пример #7
0
static void test(mps_arena_t arena)
{
  BT bt;
  Nailboard board;
  Align align;
  Count nails;
  Addr base, limit;
  Index i, j, k;

  align = (Align)1 << (rnd() % 10);
  nails = (Count)1 << (rnd() % 16);
  nails += rnd() % nails;
  base = AddrAlignUp(0, align);
  limit = AddrAdd(base, nails * align);

  die(BTCreate(&bt, arena, nails), "BTCreate");
  BTResRange(bt, 0, nails);
  die(NailboardCreate(&board, arena, align, base, limit), "NailboardCreate");

  for (i = 0; i <= nails / 8; ++i) {
    Bool old;
    j = rnd() % nails;
    old = BTGet(bt, j);
    BTSet(bt, j);
    cdie(NailboardSet(board, AddrAdd(base, j * align)) == old, "NailboardSet");
    for (k = 0; k < nails / 8; ++k) {
      Index b, l;
      b = rnd() % nails;
      l = b + rnd() % (nails - b) + 1;
      cdie(BTIsResRange(bt, b, l)
           == NailboardIsResRange(board, AddrAdd(base, b * align),
                                  AddrAdd(base, l * align)),
           "NailboardIsResRange");
    }
  }

  die(NailboardDescribe(board, mps_lib_get_stdout(), 0), "NailboardDescribe");
}
Пример #8
0
/* VMChunkCreate -- create a chunk
 *
 * chunkReturn, return parameter for the created chunk.
 * vmArena, the parent VMArena.
 * size, approximate amount of virtual address that the chunk should reserve.
 */
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
{
  Res res;
  Addr base, limit, chunkStructLimit;
  Align pageSize;
  VM vm;
  BootBlockStruct bootStruct;
  BootBlock boot = &bootStruct;
  VMChunk vmChunk;
  void *p;

  AVER(chunkReturn != NULL);
  AVERT(VMArena, vmArena);
  AVER(size > 0);

  res = VMCreate(&vm, size, vmArena->vmParams);
  if (res != ResOK)
    goto failVMCreate;

  pageSize = VMAlign(vm);
  /* The VM will have aligned the userSize; pick up the actual size. */
  base = VMBase(vm);
  limit = VMLimit(vm);

  res = BootBlockInit(boot, (void *)base, (void *)limit);
  if (res != ResOK)
    goto failBootInit;

  /* Allocate and map the descriptor. */
  /* See <design/arena/>.@@@@ */
  res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failChunkAlloc;
  vmChunk = p;
  /* Calculate the limit of the page where the chunkStruct resides. */
  chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), pageSize);
  res = vmArenaMap(vmArena, vm, base, chunkStructLimit);
  if (res != ResOK)
    goto failChunkMap;
  vmChunk->overheadMappedLimit = chunkStructLimit;

  vmChunk->vm = vm;
  res = ChunkInit(VMChunk2Chunk(vmChunk), VMArena2Arena(vmArena),
                  base, limit, pageSize, boot);
  if (res != ResOK)
    goto failChunkInit;

  BootBlockFinish(boot);

  vmChunk->sig = VMChunkSig;
  AVERT(VMChunk, vmChunk);

  *chunkReturn = VMChunk2Chunk(vmChunk);
  return ResOK;

failChunkInit:
  /* No need to unmap, as we're destroying the VM. */
failChunkMap:
failChunkAlloc:
failBootInit:
  VMDestroy(vm);
failVMCreate:
  return res;
}
Пример #9
0
/* VMChunkCreate -- create a chunk
 *
 * chunkReturn, return parameter for the created chunk.
 * vmArena, the parent VMArena.
 * size, approximate amount of virtual address that the chunk should reserve.
 */
static Res VMChunkCreate(Chunk *chunkReturn, VMArena vmArena, Size size)
{
  Arena arena;
  Res res;
  Addr base, limit, chunkStructLimit;
  VMStruct vmStruct;
  VM vm = &vmStruct;
  BootBlockStruct bootStruct;
  BootBlock boot = &bootStruct;
  VMChunk vmChunk;
  void *p;

  AVER(chunkReturn != NULL);
  AVERT(VMArena, vmArena);
  arena = VMArena2Arena(vmArena);
  AVER(size > 0);

  res = VMInit(vm, size, ArenaGrainSize(arena), vmArena->vmParams);
  if (res != ResOK)
    goto failVMInit;

  base = VMBase(vm);
  limit = VMLimit(vm);

  res = BootBlockInit(boot, (void *)base, (void *)limit);
  if (res != ResOK)
    goto failBootInit;

  /* Allocate and map the descriptor. */
  /* See <design/arena/>.@@@@ */
  res = BootAlloc(&p, boot, sizeof(VMChunkStruct), MPS_PF_ALIGN);
  if (res != ResOK)
    goto failChunkAlloc;
  vmChunk = p;
  /* Calculate the limit of the grain where the chunkStruct resides. */
  chunkStructLimit = AddrAlignUp((Addr)(vmChunk + 1), ArenaGrainSize(arena));
  res = vmArenaMap(vmArena, vm, base, chunkStructLimit);
  if (res != ResOK)
    goto failChunkMap;
  vmChunk->overheadMappedLimit = chunkStructLimit;

  /* Copy VM descriptor into its place in the chunk. */
  VMCopy(VMChunkVM(vmChunk), vm);
  res = ChunkInit(VMChunk2Chunk(vmChunk), arena, base, limit,
                  VMReserved(VMChunkVM(vmChunk)), boot);
  if (res != ResOK)
    goto failChunkInit;

  BootBlockFinish(boot);

  vmChunk->sig = VMChunkSig;
  AVERT(VMChunk, vmChunk);

  *chunkReturn = VMChunk2Chunk(vmChunk);
  return ResOK;

failChunkInit:
  VMUnmap(vm, VMBase(vm), chunkStructLimit);
failChunkMap:
failChunkAlloc:
failBootInit:
  VMFinish(vm);
failVMInit:
  return res;
}