Example #1
0
static bdescr *
allocGroup_sync(nat n)
{
    bdescr *bd;
    ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
    bd = allocGroup(n);
    RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
    return bd;
}
Example #2
0
bdescr *
allocGroup_lock(W_ n)
{
    bdescr *bd;
    ACQUIRE_SM_LOCK;
    bd = allocGroup(n);
    RELEASE_SM_LOCK;
    return bd;
}
Example #3
0
//
// Allocate a chunk of blocks that is at least min and at most max
// blocks in size. This API is used by the nursery allocator that
// wants contiguous memory preferably, but doesn't require it.  When
// memory is fragmented we might have lots of chunks that are
// less than a full megablock, so allowing the nursery allocator to
// use these reduces fragmentation considerably.  e.g. on a GHC build
// with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a
// single compile.
//
// Further to this: in #7257 there is a program that creates serious
// fragmentation such that the heap is full of tiny <4 block chains.
// The nursery allocator therefore has to use single blocks to avoid
// fragmentation, but we make sure that we allocate large blocks
// preferably if there are any.
//
bdescr *
allocLargeChunk (W_ min, W_ max)
{
    bdescr *bd;
    StgWord ln, lnmax;

    if (min >= BLOCKS_PER_MBLOCK) {
        return allocGroup(max);
    }

    ln = log_2_ceil(min);
    lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST

    while (ln < lnmax && free_list[ln] == NULL) {
        ln++;
    }
    if (ln == lnmax) {
        return allocGroup(max);
    }
    bd = free_list[ln];

    if (bd->blocks <= max)              // exactly the right size!
    {
        dbl_link_remove(bd, &free_list[ln]);
        initGroup(bd);
    }
    else   // block too big...
    {                              
        bd = split_free_block(bd, max, ln);
        ASSERT(bd->blocks == max);
        initGroup(bd);
    }

    n_alloc_blocks += bd->blocks;
    if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;

    IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE));
    IF_DEBUG(sanity, checkFreeListSanity());
    return bd;
}
Example #4
0
static void
allocBlocks_sync(nat n, bdescr **hd, bdescr **tl, 
                 nat gen_no, step *stp,
                 StgWord32 flags)
{
    bdescr *bd;
    nat i;
    ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync);
    bd = allocGroup(n);
    for (i = 0; i < n; i++) {
        bd[i].blocks = 1;
        bd[i].gen_no = gen_no;
        bd[i].step = stp;
        bd[i].flags = flags;
        bd[i].link = &bd[i+1];
        bd[i].u.scan = bd[i].free = bd[i].start;
    }
    *hd = bd;
    *tl = &bd[n-1];
    RELEASE_SPIN_LOCK(&gc_alloc_block_sync);
}
Example #5
0
AdjustorWritable allocateExec (W_ bytes, AdjustorExecutable *exec_ret)
{
    void *ret;
    W_ n;

    ACQUIRE_SM_LOCK;

    // round up to words.
    n  = (bytes + sizeof(W_) + 1) / sizeof(W_);

    if (n+1 > BLOCK_SIZE_W) {
        barf("allocateExec: can't handle large objects");
    }

    if (exec_block == NULL ||
        exec_block->free + n + 1 > exec_block->start + BLOCK_SIZE_W) {
        bdescr *bd;
        W_ pagesize = getPageSize();
        bd = allocGroup(stg_max(1, pagesize / BLOCK_SIZE));
        debugTrace(DEBUG_gc, "allocate exec block %p", bd->start);
        bd->gen_no = 0;
        bd->flags = BF_EXEC;
        bd->link = exec_block;
        if (exec_block != NULL) {
            exec_block->u.back = bd;
        }
        bd->u.back = NULL;
        setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue);
        exec_block = bd;
    }
    *(exec_block->free) = n;  // store the size of this chunk
    exec_block->gen_no += n;  // gen_no stores the number of words allocated
    ret = exec_block->free + 1;
    exec_block->free += n + 1;

    RELEASE_SM_LOCK
    *exec_ret = ret;
    return ret;
}
Example #6
0
StgPtr
allocate (Capability *cap, W_ n)
{
    bdescr *bd;
    StgPtr p;

    TICK_ALLOC_HEAP_NOCTR(WDS(n));
    CCS_ALLOC(cap->r.rCCCS,n);
    if (cap->r.rCurrentTSO != NULL) {
        // cap->r.rCurrentTSO->alloc_limit -= n*sizeof(W_)
        ASSIGN_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit),
                     (PK_Int64((W_*)&(cap->r.rCurrentTSO->alloc_limit))
                      - n*sizeof(W_)));
    }

    if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
        // The largest number of words such that
        // the computation of req_blocks will not overflow.
        W_ max_words = (HS_WORD_MAX & ~(BLOCK_SIZE-1)) / sizeof(W_);
        W_ req_blocks;

        if (n > max_words)
            req_blocks = HS_WORD_MAX; // signal overflow below
        else
            req_blocks = (W_)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;

        // Attempting to allocate an object larger than maxHeapSize
        // should definitely be disallowed.  (bug #1791)
        if ((RtsFlags.GcFlags.maxHeapSize > 0 &&
             req_blocks >= RtsFlags.GcFlags.maxHeapSize) ||
            req_blocks >= HS_INT32_MAX)   // avoid overflow when
                                          // calling allocGroup() below
        {
            heapOverflow();
            // heapOverflow() doesn't exit (see #2592), but we aren't
            // in a position to do a clean shutdown here: we
            // either have to allocate the memory or exit now.
            // Allocating the memory would be bad, because the user
            // has requested that we not exceed maxHeapSize, so we
            // just exit.
            stg_exit(EXIT_HEAPOVERFLOW);
        }

        ACQUIRE_SM_LOCK
        bd = allocGroup(req_blocks);
        dbl_link_onto(bd, &g0->large_objects);
        g0->n_large_blocks += bd->blocks; // might be larger than req_blocks
        g0->n_new_large_words += n;
        RELEASE_SM_LOCK;
        initBdescr(bd, g0, g0);
        bd->flags = BF_LARGE;
        bd->free = bd->start + n;
        cap->total_allocated += n;
        return bd->start;
    }

    /* small allocation (<LARGE_OBJECT_THRESHOLD) */

    bd = cap->r.rCurrentAlloc;
    if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
        
        if (bd) finishedNurseryBlock(cap,bd);

        // The CurrentAlloc block is full, we need to find another
        // one.  First, we try taking the next block from the
        // nursery:
        bd = cap->r.rCurrentNursery->link;
        
        if (bd == NULL) {
            // The nursery is empty: allocate a fresh block (we can't
            // fail here).
            ACQUIRE_SM_LOCK;
            bd = allocBlock();
            cap->r.rNursery->n_blocks++;
            RELEASE_SM_LOCK;
            initBdescr(bd, g0, g0);
            bd->flags = 0;
            // If we had to allocate a new block, then we'll GC
            // pretty quickly now, because MAYBE_GC() will
            // notice that CurrentNursery->link is NULL.
        } else {
            newNurseryBlock(bd);
            // we have a block in the nursery: take it and put
            // it at the *front* of the nursery list, and use it
            // to allocate() from.
            //
            // Previously the nursery looked like this:
            //
            //           CurrentNursery
            //                  /
            //                +-+    +-+
            // nursery -> ... |A| -> |B| -> ...
            //                +-+    +-+
            //
            // After doing this, it looks like this:
            //
            //                      CurrentNursery
            //                            /
            //            +-+           +-+
            // nursery -> |B| -> ... -> |A| -> ...
            //            +-+           +-+
            //             |
            //             CurrentAlloc
            //
            // The point is to get the block out of the way of the
            // advancing CurrentNursery pointer, while keeping it
            // on the nursery list so we don't lose track of it.
            cap->r.rCurrentNursery->link = bd->link;
            if (bd->link != NULL) {
                bd->link->u.back = cap->r.rCurrentNursery;
            }
        }
        dbl_link_onto(bd, &cap->r.rNursery->blocks);
        cap->r.rCurrentAlloc = bd;
        IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
    }
    p = bd->free;
    bd->free += n;

    IF_DEBUG(sanity, ASSERT(*((StgWord8*)p) == 0xaa));
    return p;
}
Example #7
0
File: CNF.c Project: goldfirere/ghc
static StgCompactNFDataBlock *
compactAllocateBlockInternal(Capability            *cap,
                             StgWord                aligned_size,
                             StgCompactNFDataBlock *first,
                             AllocateOp             operation)
{
    StgCompactNFDataBlock *self;
    bdescr *block, *head;
    uint32_t n_blocks;
    generation *g;

    n_blocks = aligned_size / BLOCK_SIZE;

    // Attempting to allocate an object larger than maxHeapSize
    // should definitely be disallowed.  (bug #1791)
    if ((RtsFlags.GcFlags.maxHeapSize > 0 &&
         n_blocks >= RtsFlags.GcFlags.maxHeapSize) ||
        n_blocks >= HS_INT32_MAX)   // avoid overflow when
                                    // calling allocGroup() below
    {
        reportHeapOverflow();
        // reportHeapOverflow() doesn't exit (see #2592), but we aren't
        // in a position to do a clean shutdown here: we
        // either have to allocate the memory or exit now.
        // Allocating the memory would be bad, because the user
        // has requested that we not exceed maxHeapSize, so we
        // just exit.
        stg_exit(EXIT_HEAPOVERFLOW);
    }

    // It is imperative that first is the first block in the compact
    // (or NULL if the compact does not exist yet)
    // because the evacuate code does not update the generation of
    // blocks other than the first (so we would get the statistics
    // wrong and crash in Sanity)
    if (first != NULL) {
        block = Bdescr((P_)first);
        g = block->gen;
    } else {
        g = g0;
    }

    ACQUIRE_SM_LOCK;
    block = allocGroup(n_blocks);
    switch (operation) {
    case ALLOCATE_NEW:
        ASSERT(first == NULL);
        ASSERT(g == g0);
        dbl_link_onto(block, &g0->compact_objects);
        g->n_compact_blocks += block->blocks;
        g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    case ALLOCATE_IMPORT_NEW:
        dbl_link_onto(block, &g0->compact_blocks_in_import);
        /* fallthrough */
    case ALLOCATE_IMPORT_APPEND:
        ASSERT(first == NULL);
        ASSERT(g == g0);
        g->n_compact_blocks_in_import += block->blocks;
        g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    case ALLOCATE_APPEND:
        g->n_compact_blocks += block->blocks;
        if (g == g0)
            g->n_new_large_words += aligned_size / sizeof(StgWord);
        break;

    default:
#if defined(DEBUG)
        ASSERT(!"code should not be reached");
#else
        RTS_UNREACHABLE;
#endif
    }
    RELEASE_SM_LOCK;

    cap->total_allocated += aligned_size / sizeof(StgWord);

    self = (StgCompactNFDataBlock*) block->start;
    self->self = self;
    self->next = NULL;

    head = block;
    initBdescr(head, g, g);
    head->flags = BF_COMPACT;
    for (block = head + 1, n_blocks --; n_blocks > 0; block++, n_blocks--) {
        block->link = head;
        block->blocks = 0;
        block->flags = BF_COMPACT;
    }

    return self;
}
Example #8
0
static bool allocBlocks(int level, int numBlocks, Block* result)
{
    //printf("Allocating %d blocks at level %d\n", numBlocks, level);
    if(level == 0)
    {
        //printf("  Will attempt to find top-level group of %d blocks.\n", numBlocks);
        return findTopLevelGroup(numBlocks, result);
    }
    //Need to find subblock(s)
    //Best to worst options:
    //-Use same parent block from last allocation
    //-Allocate new parent block somewhere else
    //-Depth-first search subdivided blocks looking for free blocks
    //-Return false
    //First try lastParents
    int bestOvershootParent = -1;
    int bestOvershoot = 0xFFFF;
    int overshoot = 0;
    for(int i = 0; i < PARENT_SAVES; i++)
    {
        if(validBlock(lastParents[level][i]))
        {
            if(findGroupInBlock(lastParents[level][i], numBlocks, result, &overshoot))
            {
                if(overshoot < bestOvershoot)
                {
                    bestOvershoot = overshoot;
                    bestOvershootParent = i;
                }
            }
        }
    }
    if(bestOvershootParent != -1)
    {
        Block use = lastParents[level][bestOvershootParent];
        if(!findGroupInBlock(use, numBlocks, result, NULL))
        {
            //This should never ever happen
            //puts("VERY FATAL ERROR: CHECK FOUND LAST_PARENT CASE");
            return false;
        }
        //puts("Allocation successful via lastParents");
        //printf("Resulting block group: ");
        //printBlock(*result);
        //puts("");
        allocGroup(*result, numBlocks);
        return true;
    }
    //lastParents must be updated
    //If one is not yet valid, set it to a newly allocated block
    //Immediately use subblocks of that new block to fulfill current request
    //puts("Did not find good cached parent block.");
    bool allValid = true;
    Block* toReplace;
    for(int i = 0; i < PARENT_SAVES; i++)
    {
        if(!validBlock(lastParents[level][i]))
        {
            allValid = false;
            toReplace = &lastParents[level][i];
            break;
        }
    }
    if(!allValid)
    {
        bool success = replaceParentSave(toReplace, level - 1);
        if(success)
        {
            //use *toReplace (now a fresh subdivded block) as parent
            *result = getFirstSubblock(*toReplace);
            allocGroup(*result, numBlocks);
            //puts("Allocation successful via newParent");
            return true;
        }
    }
    else
    {
        int worstGroup = 100;
        int worstBlock = -1;
        for(int i = 0; i < PARENT_SAVES; i++)
        {
            int thisGroup = getMaxGroup(lastParents[level][i]);
            if(thisGroup < worstGroup)
            {
                worstGroup = thisGroup;
                worstBlock = i;
            }
        }
        toReplace = &lastParents[level][worstBlock];
        //replace worstBlock
        bool success = replaceParentSave(toReplace, level - 1);
        if(success)
        {
            *result = getFirstSubblock(*toReplace);
            allocGroup(*result, numBlocks);
            return true;
            //puts("Allocation sucessful via new parent.");
        }
    }
    //If here, then depth-first search for an appropriate parent block
    //that isn't necesarily listed in lastParent.
    Block first;
    for(int i = 0; i < numSubblocks[0]; i++)
    {
        Block topLevel = {{i, -1, -1, -1}};
        if(getBlockStatus(topLevel) == SUBDIV)
        {
            if(groupSearch(level, numBlocks, topLevel, &first))
            {
                *result = first;
                allocGroup(*result, numBlocks);
                return true;
                //puts("Allocation successful via worst-case dfs for parent");
            }
        }
    }
    //puts("Block group allocation failed!");
    return false;
}
Example #9
0
bdescr *
allocBlock(void)
{
    return allocGroup(1);
}