void freeExec (void *addr) { StgPtr p = (StgPtr)addr - 1; bdescr *bd = Bdescr((StgPtr)p); if ((bd->flags & BF_EXEC) == 0) { barf("freeExec: not executable"); } if (*(StgPtr)p == 0) { barf("freeExec: already free?"); } ACQUIRE_SM_LOCK; bd->gen_no -= *(StgPtr)p; *(StgPtr)p = 0; if (bd->gen_no == 0) { // Free the block if it is empty, but not if it is the block at // the head of the queue. if (bd != exec_block) { debugTrace(DEBUG_gc, "free exec block %p", bd->start); dbl_link_remove(bd, &exec_block); setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse); freeGroup(bd); } else { bd->free = bd->start; } } RELEASE_SM_LOCK }
// Take a free block group bd, and split off a group of size n from // it. Adjust the free list as necessary, and return the new group. static bdescr * split_free_block (bdescr *bd, W_ n, nat ln) { bdescr *fg; // free group ASSERT(bd->blocks > n); dbl_link_remove(bd, &free_list[ln]); fg = bd + bd->blocks - n; // take n blocks off the end fg->blocks = n; bd->blocks -= n; setup_tail(bd); ln = log_2(bd->blocks); dbl_link_onto(bd, &free_list[ln]); return fg; }
// // Allocate a chunk of blocks that is at least min and at most max // blocks in size. This API is used by the nursery allocator that // wants contiguous memory preferably, but doesn't require it. When // memory is fragmented we might have lots of chunks that are // less than a full megablock, so allowing the nursery allocator to // use these reduces fragmentation considerably. e.g. on a GHC build // with +RTS -H, I saw fragmentation go from 17MB down to 3MB on a // single compile. // // Further to this: in #7257 there is a program that creates serious // fragmentation such that the heap is full of tiny <4 block chains. // The nursery allocator therefore has to use single blocks to avoid // fragmentation, but we make sure that we allocate large blocks // preferably if there are any. // bdescr * allocLargeChunk (W_ min, W_ max) { bdescr *bd; StgWord ln, lnmax; if (min >= BLOCKS_PER_MBLOCK) { return allocGroup(max); } ln = log_2_ceil(min); lnmax = log_2_ceil(max); // tops out at MAX_FREE_LIST while (ln < lnmax && free_list[ln] == NULL) { ln++; } if (ln == lnmax) { return allocGroup(max); } bd = free_list[ln]; if (bd->blocks <= max) // exactly the right size! { dbl_link_remove(bd, &free_list[ln]); initGroup(bd); } else // block too big... { bd = split_free_block(bd, max, ln); ASSERT(bd->blocks == max); initGroup(bd); } n_alloc_blocks += bd->blocks; if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE)); IF_DEBUG(sanity, checkFreeListSanity()); return bd; }
void freeGroup(bdescr *p) { StgWord ln; // Todo: not true in multithreaded GC // ASSERT_SM_LOCK(); ASSERT(p->free != (P_)-1); p->free = (void *)-1; /* indicates that this block is free */ p->gen = NULL; p->gen_no = 0; /* fill the block group with garbage if sanity checking is on */ IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE)); if (p->blocks == 0) barf("freeGroup: block size is zero"); if (p->blocks >= BLOCKS_PER_MBLOCK) { StgWord mblocks; mblocks = BLOCKS_TO_MBLOCKS(p->blocks); // If this is an mgroup, make sure it has the right number of blocks ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks)); n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK; free_mega_group(p); return; } ASSERT(n_alloc_blocks >= p->blocks); n_alloc_blocks -= p->blocks; // coalesce forwards { bdescr *next; next = p + p->blocks; if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1) { p->blocks += next->blocks; ln = log_2(next->blocks); dbl_link_remove(next, &free_list[ln]); if (p->blocks == BLOCKS_PER_MBLOCK) { free_mega_group(p); return; } setup_tail(p); } } // coalesce backwards if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p))) { bdescr *prev; prev = p - 1; if (prev->blocks == 0) prev = prev->link; // find the head if (prev->free == (P_)-1) { ln = log_2(prev->blocks); dbl_link_remove(prev, &free_list[ln]); prev->blocks += p->blocks; if (prev->blocks >= BLOCKS_PER_MBLOCK) { free_mega_group(prev); return; } p = prev; } } setup_tail(p); free_list_insert(p); IF_DEBUG(sanity, checkFreeListSanity()); }
bdescr * allocGroup (W_ n) { bdescr *bd, *rem; StgWord ln; if (n == 0) barf("allocGroup: requested zero blocks"); if (n >= BLOCKS_PER_MBLOCK) { StgWord mblocks; mblocks = BLOCKS_TO_MBLOCKS(n); // n_alloc_blocks doesn't count the extra blocks we get in a // megablock group. n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK; if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; bd = alloc_mega_group(mblocks); // only the bdescrs of the first MB are required to be initialised initGroup(bd); goto finish; } n_alloc_blocks += n; if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; ln = log_2_ceil(n); while (ln < MAX_FREE_LIST && free_list[ln] == NULL) { ln++; } if (ln == MAX_FREE_LIST) { #if 0 /* useful for debugging fragmentation */ if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) { debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK); RtsFlags.DebugFlags.block_alloc = 1; checkFreeListSanity(); } #endif bd = alloc_mega_group(1); bd->blocks = n; initGroup(bd); // we know the group will fit rem = bd + n; rem->blocks = BLOCKS_PER_MBLOCK-n; initGroup(rem); // init the slop n_alloc_blocks += rem->blocks; freeGroup(rem); // add the slop on to the free list goto finish; } bd = free_list[ln]; if (bd->blocks == n) // exactly the right size! { dbl_link_remove(bd, &free_list[ln]); initGroup(bd); } else if (bd->blocks > n) // block too big... { bd = split_free_block(bd, n, ln); ASSERT(bd->blocks == n); initGroup(bd); } else { barf("allocGroup: free list corrupted"); } finish: IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE)); IF_DEBUG(sanity, checkFreeListSanity()); return bd; }