Esempio n. 1
0
int main (int argc, char *argv[])
{
    int i, j, b;

    bdescr *a[ARRSIZE];

    srand(SEED);

    hs_init(&argc, &argv);

   // repeatedly sweep though the array, allocating new random-sized
   // objects and deallocating the old ones.
   for (i=0; i < LOOPS; i++)
   {
       for (j=0; j < ARRSIZE; j++)
       {
           if (i > 0)
           {
               IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
               freeGroup_lock(a[j]);
               DEBUG_ONLY(checkFreeListSanity());
           }
           b = (rand() % MAXALLOC) + 1;
           a[j] = allocGroup_lock(b);
           IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
           // allocating zero blocks isn't allowed
           DEBUG_ONLY(checkFreeListSanity());
       }
   }

   for (j=0; j < ARRSIZE; j++)
   {
       freeGroup_lock(a[j]);
   }
    
    // this time, sweep forwards allocating new blocks, and then
    // backwards deallocating them.
    for (i=0; i < LOOPS; i++)
    {
        for (j=0; j < ARRSIZE; j++)
        {
            b = (rand() % MAXALLOC) + 1;
            a[j] = allocGroup_lock(b);
            IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
            DEBUG_ONLY(checkFreeListSanity());
        }
        for (j=ARRSIZE-1; j >= 0; j--)
        {
            IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
            freeGroup_lock(a[j]);
            DEBUG_ONLY(checkFreeListSanity());
        }
    }
    
    DEBUG_ONLY(checkFreeListSanity());

    hs_exit(); // will do a memory leak test

    exit(0);
}
Esempio n. 2
0
static void
free_mega_group (bdescr *mg)
{
    bdescr *bd, *prev;

    // Find the right place in the free list.  free_mblock_list is
    // sorted by *address*, not by size as the free_list is.
    prev = NULL;
    bd = free_mblock_list;
    while (bd && bd->start < mg->start) {
        prev = bd;
        bd = bd->link;
    }

    // coalesce backwards
    if (prev)
    {
        mg->link = prev->link;
        prev->link = mg;
        mg = coalesce_mblocks(prev);
    }
    else
    {
        mg->link = free_mblock_list;
        free_mblock_list = mg;
    }
    // coalesce forwards
    coalesce_mblocks(mg);

    IF_DEBUG(sanity, checkFreeListSanity());
}    
Esempio n. 3
0
void checkSanity (rtsBool after_gc, rtsBool major_gc)
{
    checkFullHeap(after_gc && major_gc);

    checkFreeListSanity();

    // always check the stacks in threaded mode, because checkHeap()
    // does nothing in this case.
    if (after_gc) {
        checkMutableLists();
        checkGlobalTSOList(rtsTrue);
    }
}
Esempio n. 4
0
//
// Allocate a chunk of blocks that is at least min and at most max
// blocks in size. This API is used by the nursery allocator that
// wants contiguous memory preferably, but doesn't require it.  When
// memory is fragmented we might have lots of chunks that are
// less than a full megablock, so allowing the nursery allocator to
// use these reduces fragmentation considerably.  e.g. on a GHC build
// with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
// single compile.
//
// Further to this: in #7257 there is a program that creates serious
// fragmentation such that the heap is full of tiny <4 block chains.
// The nursery allocator therefore has to use single blocks to avoid
// fragmentation, but we make sure that we allocate large blocks
// preferably if there are any.
//
bdescr *
allocLargeChunk (W_ min, W_ max)
{
    bdescr *bd;
    StgWord ln, lnmax;

    if (min >= BLOCKS_PER_MBLOCK) {
        return allocGroup(max);
    }

    ln = log_2_ceil(min);
    lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST

    while (ln < lnmax && free_list[ln] == NULL) {
        ln++;
    }
    if (ln == lnmax) {
        return allocGroup(max);
    }
    bd = free_list[ln];

    if (bd->blocks <= max)              // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else   // block too big...
    {                              
        bd = split_free_block(bd, max, ln);
        ASSERT(bd->blocks == max);
        initGroup(bd);
    }

    n_alloc_blocks += bd->blocks;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}
Esempio n. 5
0
void
freeGroup(bdescr *p)
{
  StgWord ln;

  // Todo: not true in multithreaded GC
  // ASSERT_SM_LOCK();

  ASSERT(p->free != (P_)-1);

  p->free = (void *)-1;  /* indicates that this block is free */
  p->gen = NULL;
  p->gen_no = 0;
  /* fill the block group with garbage if sanity checking is on */
  IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE));

  if (p->blocks == 0) barf("freeGroup: block size is zero");

  if (p->blocks >= BLOCKS_PER_MBLOCK)
  {
      StgWord mblocks;

      mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
      // If this is an mgroup, make sure it has the right number of blocks
      ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));

      n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK;

      free_mega_group(p);
      return;
  }

  ASSERT(n_alloc_blocks >= p->blocks);
  n_alloc_blocks -= p->blocks;

  // coalesce forwards
  {
      bdescr *next;
      next = p + p->blocks;
      if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1)
      {
          p->blocks += next->blocks;
          ln = log_2(next->blocks);
          dbl_link_remove(next, &free_list[ln]);
          if (p->blocks == BLOCKS_PER_MBLOCK)
          {
              free_mega_group(p);
              return;
          }
          setup_tail(p);
      }
  }

  // coalesce backwards
  if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
  {
      bdescr *prev;
      prev = p - 1;
      if (prev->blocks == 0) prev = prev->link; // find the head

      if (prev->free == (P_)-1)
      {
          ln = log_2(prev->blocks);
          dbl_link_remove(prev, &free_list[ln]);
          prev->blocks += p->blocks;
          if (prev->blocks >= BLOCKS_PER_MBLOCK)
          {
              free_mega_group(prev);
              return;
          }
          p = prev;
      }
  }
      
  setup_tail(p);
  free_list_insert(p);

  IF_DEBUG(sanity, checkFreeListSanity());
}
Esempio n. 6
0
bdescr *
allocGroup (W_ n)
{
    bdescr *bd, *rem;
    StgWord ln;

    if (n == 0) barf("allocGroup: requested zero blocks");
    
    if (n >= BLOCKS_PER_MBLOCK)
    {
        StgWord mblocks;

        mblocks = BLOCKS_TO_MBLOCKS(n);

        // n_alloc_blocks doesn't count the extra blocks we get in a
        // megablock group.
        n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
        if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

        bd = alloc_mega_group(mblocks);
        // only the bdescrs of the first MB are required to be initialised
        initGroup(bd);
        goto finish;
    }
    
    n_alloc_blocks += n;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    ln = log_2_ceil(n);

    while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
        ln++;
    }

    if (ln == MAX_FREE_LIST) {
#if 0  /* useful for debugging fragmentation */
        if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
             - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
            debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
            RtsFlags.DebugFlags.block_alloc = 1;
            checkFreeListSanity();
        }
#endif

        bd = alloc_mega_group(1);
        bd->blocks = n;
        initGroup(bd);		         // we know the group will fit
        rem = bd + n;
        rem->blocks = BLOCKS_PER_MBLOCK-n;
        initGroup(rem); // init the slop
        n_alloc_blocks += rem->blocks;
        freeGroup(rem);      	         // add the slop on to the free list
        goto finish;
    }

    bd = free_list[ln];

    if (bd->blocks == n)	        // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else if (bd->blocks >  n)            // block too big...
    {                              
        bd = split_free_block(bd, n, ln);
        ASSERT(bd->blocks == n);
        initGroup(bd);
    }
    else
    {
        barf("allocGroup: free list corrupted");
    }

finish:
    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}