Example #1
0
/* Allocate a block of memory from the mark and sweep GC heap */
heap_block *heap::heap_allot(cell size, cell type)
{
	size = (size + block_size_increment - 1) & ~(block_size_increment - 1);

	free_heap_block *block = find_free_block(size);
	if(block)
	{
		block = split_free_block(block,size);
		block->set_type(type);
		block->set_marked_p(false);
		return block;
	}
	else
		return NULL;
}
Example #2
0
/* Allocate a block of memory from the mark and sweep GC heap */
heap_block *heap::heap_allot(cell size)
{
	size = (size + block_size_increment - 1) & ~(block_size_increment - 1);

	free_heap_block *block = find_free_block(size);
	if(block)
	{
		block = split_free_block(block,size);

		block->status = B_ALLOCATED;
		return block;
	}
	else
		return NULL;
}
Example #3
0
//
// Allocate a chunk of blocks that is at least min and at most max
// blocks in size. This API is used by the nursery allocator that
// wants contiguous memory preferably, but doesn't require it.  When
// memory is fragmented we might have lots of chunks that are
// less than a full megablock, so allowing the nursery allocator to
// use these reduces fragmentation considerably.  e.g. on a GHC build
// with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
// single compile.
//
// Further to this: in #7257 there is a program that creates serious
// fragmentation such that the heap is full of tiny <4 block chains.
// The nursery allocator therefore has to use single blocks to avoid
// fragmentation, but we make sure that we allocate large blocks
// preferably if there are any.
//
bdescr *
allocLargeChunk (W_ min, W_ max)
{
    bdescr *bd;
    StgWord ln, lnmax;

    if (min >= BLOCKS_PER_MBLOCK) {
        return allocGroup(max);
    }

    ln = log_2_ceil(min);
    lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST

    while (ln < lnmax && free_list[ln] == NULL) {
        ln++;
    }
    if (ln == lnmax) {
        return allocGroup(max);
    }
    bd = free_list[ln];

    if (bd->blocks <= max)              // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else   // block too big...
    {                              
        bd = split_free_block(bd, max, ln);
        ASSERT(bd->blocks == max);
        initGroup(bd);
    }

    n_alloc_blocks += bd->blocks;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}
/* Place an ADJUSTED sized block in heap */
static void place(void *bp, size_t asize) {
    
#if DEBUG
    if (NEXT_BLKP(bp)) {
        if (GET_PREV_ALLOC(GET_HEADER(NEXT_BLKP(bp)))) {
            dbg_printf("0x%lx: Fail to inform next block when free\n", (size_t)bp);
            exit(2);
        }
    }
#endif
    
    dbg_printf("=== Place, bp = 0x%lx, adjusted size = %ld \n", (size_t)bp, asize);
    
    /* block free size */
    size_t csize = GET_SIZE(GET_HEADER(bp));
    char *nextbp = NULL;
    int class_idx = 0;
    size_t flag = 0;
    
    /* Split, say, minimum block size set to 1 WSIZE = 8 byte */
    if ((csize - asize) >= (4 * WSIZE)) {
        
        class_idx = get_class_idx_by_size(GET_SIZE(GET_HEADER(bp)));
        
        /* Include previous block's information */
        flag = GET_PREV_ALLOC(GET_HEADER(bp)) ? 0x3 : 0x1;
        
        PUT(GET_HEADER(bp), PACK(asize, flag));
        PUT(GET_FOOTER(bp), PACK(asize, flag));
        
        nextbp = NEXT_BLKP(bp);
        
        PUT(GET_HEADER(nextbp), PACK((csize - asize), 0));
        PUT(GET_FOOTER(nextbp), PACK((csize - asize), 0));
        
        /* Inform the next block that this block is allocated */
        flag = GET(GET_HEADER(nextbp));
        flag |= 0x2;
        PUT(GET_HEADER(nextbp), flag);
        PUT(GET_FOOTER(nextbp), flag);
        
        split_free_block(bp, nextbp);
        remove_free_block(bp, class_idx);
        
        remove_free_block(nextbp, class_idx);
        insert_first(nextbp);
        
        mm_checkheap(CHECK_HEAP);
        
    }
    else {
        /* Include previous block's information */
        flag = GET_PREV_ALLOC(GET_HEADER(bp)) ? 0x3 : 0x1;
        
        PUT(GET_HEADER(bp), PACK(csize, flag));
        PUT(GET_FOOTER(bp), PACK(csize, flag));
        
        /* Inform the next block that this block is allocated */
        if ((size_t)bp == 0x800004980) {
            dbg_printf("bp size = %ld\n",GET_SIZE(GET_HEADER(bp)));
            dbg_printf("NEXT_BLKP(bp); 0x%lx\n",(size_t)NEXT_BLKP(bp));
        }
        nextbp = NEXT_BLKP(bp);
        if (nextbp) {
            flag = GET(GET_HEADER(nextbp));
            flag |= 0x2;
            PUT(GET_HEADER(nextbp), flag);
            /* Only put footer when next block is free */
            if (!GET_ALLOC(GET_HEADER(nextbp))) {
                PUT(GET_FOOTER(nextbp), flag);
            }
            
        }
        
        remove_free_block(bp, get_class_idx_by_size(csize));
    }
}
Example #5
0
bdescr *
allocGroup (W_ n)
{
    bdescr *bd, *rem;
    StgWord ln;

    if (n == 0) barf("allocGroup: requested zero blocks");
    
    if (n >= BLOCKS_PER_MBLOCK)
    {
        StgWord mblocks;

        mblocks = BLOCKS_TO_MBLOCKS(n);

        // n_alloc_blocks doesn't count the extra blocks we get in a
        // megablock group.
        n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
        if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

        bd = alloc_mega_group(mblocks);
        // only the bdescrs of the first MB are required to be initialised
        initGroup(bd);
        goto finish;
    }
    
    n_alloc_blocks += n;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    ln = log_2_ceil(n);

    while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
        ln++;
    }

    if (ln == MAX_FREE_LIST) {
#if 0  /* useful for debugging fragmentation */
        if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W
             - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) {
            debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK);
            RtsFlags.DebugFlags.block_alloc = 1;
            checkFreeListSanity();
        }
#endif

        bd = alloc_mega_group(1);
        bd->blocks = n;
        initGroup(bd);		         // we know the group will fit
        rem = bd + n;
        rem->blocks = BLOCKS_PER_MBLOCK-n;
        initGroup(rem); // init the slop
        n_alloc_blocks += rem->blocks;
        freeGroup(rem);      	         // add the slop on to the free list
        goto finish;
    }

    bd = free_list[ln];

    if (bd->blocks == n)	        // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else if (bd->blocks >  n)            // block too big...
    {                              
        bd = split_free_block(bd, n, ln);
        ASSERT(bd->blocks == n);
        initGroup(bd);
    }
    else
    {
        barf("allocGroup: free list corrupted");
    }

finish:
    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}