void returnMemoryToOS(nat n /* megablocks */) { static bdescr *bd; StgWord size; bd = free_mblock_list; while ((n > 0) && (bd != NULL)) { size = BLOCKS_TO_MBLOCKS(bd->blocks); if (size > n) { StgWord newSize = size - n; char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); freeAddr += newSize * MBLOCK_SIZE; bd->blocks = MBLOCK_GROUP_BLOCKS(newSize); freeMBlocks(freeAddr, n); n = 0; } else { char *freeAddr = MBLOCK_ROUND_DOWN(bd->start); n -= size; bd = bd->link; freeMBlocks(freeAddr, size); } } free_mblock_list = bd; osReleaseFreeMemory(); IF_DEBUG(gc, if (n != 0) { debugBelch("Wanted to free %d more MBlocks than are freeable\n", n); } );
/* Only initializes the start pointers on the first megablock and the * blocks field of the first bdescr; callers are responsible for calling * initGroup afterwards. */ static bdescr * alloc_mega_group (StgWord mblocks) { bdescr *best, *bd, *prev; StgWord n; n = MBLOCK_GROUP_BLOCKS(mblocks); best = NULL; prev = NULL; for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link) { if (bd->blocks == n) { if (prev) { prev->link = bd->link; } else { free_mblock_list = bd->link; } return bd; } else if (bd->blocks > n) { if (!best || bd->blocks < best->blocks) { best = bd; } } } if (best) { // we take our chunk off the end here. StgWord best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks); bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) + (best_mblocks-mblocks)*MBLOCK_SIZE); best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks); initMBlock(MBLOCK_ROUND_DOWN(bd)); } else { void *mblock = getMBlocks(mblocks); initMBlock(mblock); // only need to init the 1st one bd = FIRST_BDESCR(mblock); } bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks); return bd; }
static void * osTryReserveHeapMemory (W_ len, void *hint) { void *base, *top; void *start, *end; /* We try to allocate len + MBLOCK_SIZE, because we need memory which is MBLOCK_SIZE aligned, and then we discard what we don't need */ base = my_mmap(hint, len + MBLOCK_SIZE, MEM_RESERVE); top = (void*)((W_)base + len + MBLOCK_SIZE); if (((W_)base & MBLOCK_MASK) != 0) { start = MBLOCK_ROUND_UP(base); end = MBLOCK_ROUND_DOWN(top); ASSERT(((W_)end - (W_)start) == len); if (munmap(base, (W_)start-(W_)base) < 0) { sysErrorBelch("unable to release slop before heap"); } if (munmap(end, (W_)top-(W_)end) < 0) { sysErrorBelch("unable to release slop after heap"); } } else { start = base; } return start; }
STATIC_INLINE bdescr * coalesce_mblocks (bdescr *p) { bdescr *q; q = p->link; if (q != NULL && MBLOCK_ROUND_DOWN(q) == (StgWord8*)MBLOCK_ROUND_DOWN(p) + BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) { // can coalesce p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) + BLOCKS_TO_MBLOCKS(q->blocks)); p->link = q->link; return p; } return q; }
void freeGroup(bdescr *p) { StgWord ln; // Todo: not true in multithreaded GC // ASSERT_SM_LOCK(); ASSERT(p->free != (P_)-1); p->free = (void *)-1; /* indicates that this block is free */ p->gen = NULL; p->gen_no = 0; /* fill the block group with garbage if sanity checking is on */ IF_DEBUG(sanity,memset(p->start, 0xaa, (W_)p->blocks * BLOCK_SIZE)); if (p->blocks == 0) barf("freeGroup: block size is zero"); if (p->blocks >= BLOCKS_PER_MBLOCK) { StgWord mblocks; mblocks = BLOCKS_TO_MBLOCKS(p->blocks); // If this is an mgroup, make sure it has the right number of blocks ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks)); n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK; free_mega_group(p); return; } ASSERT(n_alloc_blocks >= p->blocks); n_alloc_blocks -= p->blocks; // coalesce forwards { bdescr *next; next = p + p->blocks; if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1) { p->blocks += next->blocks; ln = log_2(next->blocks); dbl_link_remove(next, &free_list[ln]); if (p->blocks == BLOCKS_PER_MBLOCK) { free_mega_group(p); return; } setup_tail(p); } } // coalesce backwards if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p))) { bdescr *prev; prev = p - 1; if (prev->blocks == 0) prev = prev->link; // find the head if (prev->free == (P_)-1) { ln = log_2(prev->blocks); dbl_link_remove(prev, &free_list[ln]); prev->blocks += p->blocks; if (prev->blocks >= BLOCKS_PER_MBLOCK) { free_mega_group(prev); return; } p = prev; } } setup_tail(p); free_list_insert(p); IF_DEBUG(sanity, checkFreeListSanity()); }