void freeGroup_lock(bdescr *p) { ACQUIRE_SM_LOCK; freeGroup(p); RELEASE_SM_LOCK; }
void freeExec (void *addr) { StgPtr p = (StgPtr)addr - 1; bdescr *bd = Bdescr((StgPtr)p); if ((bd->flags & BF_EXEC) == 0) { barf("freeExec: not executable"); } if (*(StgPtr)p == 0) { barf("freeExec: already free?"); } ACQUIRE_SM_LOCK; bd->gen_no -= *(StgPtr)p; *(StgPtr)p = 0; if (bd->gen_no == 0) { // Free the block if it is empty, but not if it is the block at // the head of the queue. if (bd != exec_block) { debugTrace(DEBUG_gc, "free exec block %p", bd->start); dbl_link_remove(bd, &exec_block); setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse); freeGroup(bd); } else { bd->free = bd->start; } } RELEASE_SM_LOCK }
void freeGroup_sync(bdescr *bd) { ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); freeGroup(bd); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); }
GroupSettings::GroupSettings(QWidget *parent) : QWidget(parent), ui(new Ui::GroupSettings) { ui->setupUi(this); connect(ui->groupName, SIGNAL(editingFinished()), SLOT(setGroupName())); freeGroup(); }
void freeChain(bdescr *bd) { bdescr *next_bd; while (bd != NULL) { next_bd = bd->link; freeGroup(bd); bd = next_bd; } }
void freeBlockGroup(blockGroup* block_group) { int i; for ( i = 0; i < block_group->total; i++ ) { freeGroup(block_group->groups[i]); } free(block_group->groups); free(block_group); return; }
// Add a new Group (if the name is not used yet) to the Groups list bool EntityManager_AddGroup(EntityManager *entityManager, const char *name) { groupData *entity = NULL; if (entityManager == NULL || name == NULL) { snprintf(errStr, sizeof(errStr), "EntityManager_AddGroup entityManager (isNull %d) and user name '%s' must not be NULL", entityManager == NULL, name); return false; } if (checkAddValidParams(entityManager, name) == false) return false; if (newGroup(&entity, name) == false) return false; if (ItemsList_AddItem(entityManager->Groups, name, entity) == false) { freeGroup(entity); return false; } return true; }
void compactFree(StgCompactNFData *str) { StgCompactNFDataBlock *block, *next; bdescr *bd; block = compactGetFirstBlock(str); for ( ; block; block = next) { next = block->next; bd = Bdescr((StgPtr)block); ASSERT((bd->flags & BF_EVACUATED) == 0); freeGroup(bd); } }
// // Resize each of the nurseries to the specified size. // static void resizeNurseriesEach (W_ blocks) { uint32_t i, node; bdescr *bd; W_ nursery_blocks; nursery *nursery; for (i = 0; i < n_nurseries; i++) { nursery = &nurseries[i]; nursery_blocks = nursery->n_blocks; if (nursery_blocks == blocks) continue; node = capNoToNumaNode(i); if (nursery_blocks < blocks) { debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks", blocks); nursery->blocks = allocNursery(node, nursery->blocks, blocks-nursery_blocks); } else { bdescr *next_bd; debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks", blocks); bd = nursery->blocks; while (nursery_blocks > blocks) { next_bd = bd->link; next_bd->u.back = NULL; nursery_blocks -= bd->blocks; // might be a large block freeGroup(bd); bd = next_bd; } nursery->blocks = bd; // might have gone just under, by freeing a large block, so make // up the difference. if (nursery_blocks < blocks) { nursery->blocks = allocNursery(node, nursery->blocks, blocks-nursery_blocks); } } nursery->n_blocks = blocks; ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks); } }
// Add a new Resource (if the name is not used yet) to the Resources list // Add ACL to the resource bool EntityManager_AddResource(EntityManager *entityManager, const char *name) { resourceData *entity = NULL; AclS *acl = NULL; if (entityManager == NULL || name == NULL) { snprintf(errStr, sizeof(errStr), "EntityManager_AddResource entityManager (isNull %d) and user name '%s' must not be NULL", entityManager == NULL, name); return false; } if (checkAddValidParams(entityManager, name) == false) return false; if (newResource(&entity, name) == false) return false; if (ItemsList_AddItem(entityManager->Resources, name, entity) == false) { freeGroup(entity); return false; } Acl_New(&acl); EntityManager_RegisterProperty(entityManager, name, ACL_PROPERTY_NAME, (void *)acl); return true; }
static void resizeNursery (nursery *nursery, W_ blocks) { bdescr *bd; W_ nursery_blocks; nursery_blocks = nursery->n_blocks; if (nursery_blocks == blocks) return; if (nursery_blocks < blocks) { debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks", blocks); nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks); } else { bdescr *next_bd; debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks", blocks); bd = nursery->blocks; while (nursery_blocks > blocks) { next_bd = bd->link; next_bd->u.back = NULL; nursery_blocks -= bd->blocks; // might be a large block freeGroup(bd); bd = next_bd; } nursery->blocks = bd; // might have gone just under, by freeing a large block, so make // up the difference. if (nursery_blocks < blocks) { nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks); } } nursery->n_blocks = blocks; ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks); }
void MainWindow::createPlotSettingsView() { groupSettings = new GroupSettings(this); plotSettings = new PlotSettings(this); { QScrollArea* scroller = new QScrollArea(this); QVBoxLayout* content = new QVBoxLayout(scroller->viewport()); content->setMargin(0); content->setSpacing(0); content->addWidget(groupSettings); content->addWidget(plotSettings); ui->commonSettings->setWidget(scroller); } { connect(groupSettings, SIGNAL(groupNameWasChanged()), groups, SLOT(retitle())); connect(groups, SIGNAL(groupChanged(Group*)), groupSettings, SLOT(catchGroup(Group*))); connect(groups, SIGNAL(noMoreGroup()), groupSettings, SLOT(freeGroup())); connect(groups, SIGNAL(wasActivated(QMdiSubWindow*)), plotSettings, SLOT(catchPlot(QMdiSubWindow*))); connect(groups, SIGNAL(noMoreGroup()), plotSettings, SLOT(toDefaultState())); connect(groups, SIGNAL(noMorePlots()), plotSettings, SLOT(toDefaultState())); connect(plotSettings, SIGNAL(copySettings(PlotSettingsFiller*)), groups, SLOT(copySettingsToActiveGroup(PlotSettingsFiller*))); } }
void sweep(generation *gen) { bdescr *bd, *prev, *next; uint32_t i; W_ freed, resid, fragd, blocks, live; ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks); live = 0; // estimate of live data in this gen freed = 0; fragd = 0; blocks = 0; prev = NULL; for (bd = gen->old_blocks; bd != NULL; bd = next) { next = bd->link; if (!(bd->flags & BF_MARKED)) { prev = bd; continue; } blocks++; resid = 0; for (i = 0; i < BLOCK_SIZE_W / BITS_IN(W_); i++) { if (bd->u.bitmap[i] != 0) resid++; } live += resid * BITS_IN(W_); if (resid == 0) { freed++; gen->n_old_blocks--; if (prev == NULL) { gen->old_blocks = next; } else { prev->link = next; } freeGroup(bd); } else { prev = bd; if (resid < (BLOCK_SIZE_W * 3) / (BITS_IN(W_) * 4)) { fragd++; bd->flags |= BF_FRAGMENTED; } bd->flags |= BF_SWEPT; } } gen->live_estimate = live; debugTrace(DEBUG_gc, "sweeping: %d blocks, %d were copied, %d freed (%d%%), %d are fragmented, live estimate: %ld%%", gen->n_old_blocks + freed, gen->n_old_blocks - blocks + freed, freed, blocks == 0 ? 0 : (freed * 100) / blocks, fragd, (unsigned long)((blocks - freed) == 0 ? 0 : ((live / BLOCK_SIZE_W) * 100) / (blocks - freed))); ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks); }
StgPtr todo_block_full (nat size, gen_workspace *ws) { rtsBool urgent_to_push, can_extend; StgPtr p; bdescr *bd; // todo_free has been pre-incremented by Evac.c:alloc_for_copy(). We // are expected to leave it bumped when we've finished here. ws->todo_free -= size; bd = ws->todo_bd; ASSERT(bd != NULL); ASSERT(bd->link == NULL); ASSERT(bd->gen == ws->gen); // We intentionally set ws->todo_lim lower than the full size of // the block, so that we can push out some work to the global list // and get the parallel threads working as soon as possible. // // So when ws->todo_lim is reached, we end up here and have to // decide whether it's worth pushing out the work we have or not. // If we have enough room in the block to evacuate the current // object, and it's not urgent to push this work, then we just // extend the limit and keep going. Where "urgent" is defined as: // the global pool is empty, and there's enough work in this block // to make it worth pushing. // urgent_to_push = looksEmptyWSDeque(ws->todo_q) && (ws->todo_free - bd->u.scan >= WORK_UNIT_WORDS / 2); // We can extend the limit for the current block if there's enough // room for the current object, *and* we're not into the second or // subsequent block of a large block. The second condition occurs // when we evacuate an object that is larger than a block. In // that case, alloc_todo_block() sets todo_lim to be exactly the // size of the large object, and we don't evacuate any more // objects into this block. The reason is that the rest of the GC // is not set up to handle objects that start in the second or // later blocks of a group. We just about manage this in the // nursery (see scheduleHandleHeapOverflow()) so evacuate() can // handle this, but other parts of the GC can't. We could // probably fix this, but it's a rare case anyway. // can_extend = ws->todo_free + size <= bd->start + bd->blocks * BLOCK_SIZE_W && ws->todo_free < ws->todo_bd->start + BLOCK_SIZE_W; if (!urgent_to_push && can_extend) { ws->todo_lim = stg_min(bd->start + bd->blocks * BLOCK_SIZE_W, ws->todo_lim + stg_max(WORK_UNIT_WORDS,size)); debugTrace(DEBUG_gc, "increasing limit for %p to %p", bd->start, ws->todo_lim); p = ws->todo_free; ws->todo_free += size; return p; } gct->copied += ws->todo_free - bd->free; bd->free = ws->todo_free; ASSERT(bd->u.scan >= bd->start && bd->u.scan <= bd->free); // If this block is not the scan block, we want to push it out and // make room for a new todo block. if (bd != gct->scan_bd) { // If this block does not have enough space to allocate the // current object, but it also doesn't have any work to push, then // push it on to the scanned list. if (bd->u.scan == bd->free) { if (bd->free == bd->start) { // Normally the block would not be empty, because then // there would be enough room to copy the current // object. However, if the object we're copying is // larger than a block, then we might have an empty // block here. freeGroup(bd); } else { push_scanned_block(bd, ws); } } // Otherwise, push this block out to the global list. else { DEBUG_ONLY( generation *gen ); DEBUG_ONLY( gen = ws->gen ); debugTrace(DEBUG_gc, "push todo block %p (%ld words), step %d, todo_q: %ld", bd->start, (unsigned long)(bd->free - bd->u.scan), gen->no, dequeElements(ws->todo_q)); if (!pushWSDeque(ws->todo_q, bd)) { bd->link = ws->todo_overflow; ws->todo_overflow = bd; ws->n_todo_overflow++; } } } ws->todo_bd = NULL; ws->todo_free = NULL; ws->todo_lim = NULL; alloc_todo_block(ws, size); p = ws->todo_free; ws->todo_free += size; return p; }
bdescr * allocGroup (W_ n) { bdescr *bd, *rem; StgWord ln; if (n == 0) barf("allocGroup: requested zero blocks"); if (n >= BLOCKS_PER_MBLOCK) { StgWord mblocks; mblocks = BLOCKS_TO_MBLOCKS(n); // n_alloc_blocks doesn't count the extra blocks we get in a // megablock group. n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK; if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; bd = alloc_mega_group(mblocks); // only the bdescrs of the first MB are required to be initialised initGroup(bd); goto finish; } n_alloc_blocks += n; if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks; ln = log_2_ceil(n); while (ln < MAX_FREE_LIST && free_list[ln] == NULL) { ln++; } if (ln == MAX_FREE_LIST) { #if 0 /* useful for debugging fragmentation */ if ((W_)mblocks_allocated * BLOCKS_PER_MBLOCK * BLOCK_SIZE_W - (W_)((n_alloc_blocks - n) * BLOCK_SIZE_W) > (2*1024*1024)/sizeof(W_)) { debugBelch("Fragmentation, wanted %d blocks, %ld MB free\n", n, ((mblocks_allocated * BLOCKS_PER_MBLOCK) - n_alloc_blocks) / BLOCKS_PER_MBLOCK); RtsFlags.DebugFlags.block_alloc = 1; checkFreeListSanity(); } #endif bd = alloc_mega_group(1); bd->blocks = n; initGroup(bd); // we know the group will fit rem = bd + n; rem->blocks = BLOCKS_PER_MBLOCK-n; initGroup(rem); // init the slop n_alloc_blocks += rem->blocks; freeGroup(rem); // add the slop on to the free list goto finish; } bd = free_list[ln]; if (bd->blocks == n) // exactly the right size! { dbl_link_remove(bd, &free_list[ln]); initGroup(bd); } else if (bd->blocks > n) // block too big... { bd = split_free_block(bd, n, ln); ASSERT(bd->blocks == n); initGroup(bd); } else { barf("allocGroup: free list corrupted"); } finish: IF_DEBUG(sanity, memset(bd->start, 0xaa, bd->blocks * BLOCK_SIZE)); IF_DEBUG(sanity, checkFreeListSanity()); return bd; }