void push_scanned_block (bdescr *bd, gen_workspace *ws) { ASSERT(bd != NULL); ASSERT(bd->link == NULL); ASSERT(bd->gen == ws->gen); ASSERT(bd->u.scan == bd->free); if (bd->start + bd->blocks * BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS) { // a partially full block: put it on the part_list list. bd->link = ws->part_list; ws->part_list = bd; ws->n_part_blocks += bd->blocks; IF_DEBUG(sanity, ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks)); } else { // put the scan block on the ws->scavd_list. bd->link = ws->scavd_list; ws->scavd_list = bd; ws->n_scavd_blocks += bd->blocks; IF_DEBUG(sanity, ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks)); } }
void push_scanned_block (bdescr *bd, gen_workspace *ws) { ASSERT(bd != NULL); ASSERT(bd->link == NULL); ASSERT(bd->gen == ws->gen); ASSERT(bd->u.scan == bd->free); if (bd->blocks == 1 && bd->start + BLOCK_SIZE_W - bd->free > WORK_UNIT_WORDS) { // A partially full block: put it on the part_list list. // Only for single objects - see Note [big objects] bd->link = ws->part_list; ws->part_list = bd; ws->n_part_blocks += bd->blocks; ws->n_part_words += bd->free - bd->start; IF_DEBUG(sanity, ASSERT(countBlocks(ws->part_list) == ws->n_part_blocks)); } else { // put the scan block on the ws->scavd_list. bd->link = ws->scavd_list; ws->scavd_list = bd; ws->n_scavd_blocks += bd->blocks; ws->n_scavd_words += bd->free - bd->start; IF_DEBUG(sanity, ASSERT(countBlocks(ws->scavd_list) == ws->n_scavd_blocks)); } }
static void checkGeneration (generation *gen, rtsBool after_major_gc USED_IF_THREADS) { nat n; gen_workspace *ws; ASSERT(countBlocks(gen->blocks) == gen->n_blocks); ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks); #if defined(THREADED_RTS) // heap sanity checking doesn't work with SMP, because we can't // zero the slop (see Updates.h). However, we can sanity-check // the heap after a major gc, because there is no slop. if (!after_major_gc) return; #endif checkHeapChain(gen->blocks); for (n = 0; n < n_capabilities; n++) { ws = &gc_threads[n]->gens[gen->no]; checkHeapChain(ws->todo_bd); checkHeapChain(ws->part_list); checkHeapChain(ws->scavd_list); } checkLargeObjects(gen->large_objects); }
static W_ genBlocks (generation *gen) { ASSERT(countBlocks(gen->blocks) == gen->n_blocks); ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks); return gen->n_blocks + gen->n_old_blocks + countAllocdBlocks(gen->large_objects); }
static W_ genBlocks (generation *gen) { ASSERT(countBlocks(gen->blocks) == gen->n_blocks); ASSERT(countBlocks(gen->large_objects) == gen->n_large_blocks); ASSERT(countCompactBlocks(gen->compact_objects) == gen->n_compact_blocks); ASSERT(countCompactBlocks(gen->compact_blocks_in_import) == gen->n_compact_blocks_in_import); return gen->n_blocks + gen->n_old_blocks + countAllocdBlocks(gen->large_objects) + countAllocdCompactBlocks(gen->compact_objects) + countAllocdCompactBlocks(gen->compact_blocks_in_import); }
void bigWigAverageOverBed(char *inBw, char *inBed, char *outTab) /* bigWigAverageOverBed - Compute average score of big wig over each bed, which may have introns. */ { struct bed *bedList; int fieldCount; bedLoadAllReturnFieldCount(inBed, &bedList, &fieldCount); checkUniqueNames(bedList); struct bbiFile *bbi = bigWigFileOpen(inBw); FILE *f = mustOpen(outTab, "w"); FILE *bedF = NULL; if (bedOut != NULL) bedF = mustOpen(bedOut, "w"); /* Count up number of blocks in file. It takes about 1/100th of of second to * look up a single block in a bigWig. On the other hand to stream through * the whole file setting a array of doubles takes about 30 seconds, so we change * strategy at 3,000 blocks. * I (Jim) usually avoid having two paths through the code like this, and am tempted * to always go the ~30 second chromosome-at-a-time way. On the other hand the block-way * was developed first, and it was useful to have both ways to test against each other. * (This found a bug where the chromosome way wasn't handling beds in chromosomes not * covered by the bigWig for instance). Since this code is not likely to change too * much, keeping both implementations in seems reasonable. */ int blockCount = countBlocks(bedList, fieldCount); verbose(2, "Got %d blocks, if >= 3000 will use chromosome-at-a-time method\n", blockCount); if (blockCount < 3000) averageFetchingEachBlock(bbi, bedList, fieldCount, f, bedF); else averageFetchingEachChrom(bbi, &bedList, fieldCount, f, bedF); carefulClose(&bedF); carefulClose(&f); }
static int dir_mdu(direntry_t *entry, MainParam_t *mp) { Arg_t *parentArg = (Arg_t *) (mp->arg); Arg_t arg; int ret; arg = *parentArg; arg.mp.arg = (void *) &arg; arg.parent = parentArg; arg.inDir = 1; /* account for the space occupied by the directory itself */ if(!isRootDir(entry->Dir)) { arg.blocks = countBlocks(entry->Dir, getStart(entry->Dir, &entry->dir)); } else { arg.blocks = 0; } /* recursion */ ret = mp->loop(mp->File, &arg.mp, "*"); if(!arg.summary || !parentArg->inDir) { printf("%-7d ", arg.blocks); fprintPwd(stdout, entry,0); fputc('\n', stdout); } arg.parent->blocks += arg.blocks; return ret; }
W_ gcThreadLiveBlocks (nat i, nat g) { W_ blocks; blocks = countBlocks(gc_threads[i]->gens[g].todo_bd); blocks += gc_threads[i]->gens[g].n_part_blocks; blocks += gc_threads[i]->gens[g].n_scavd_blocks; return blocks; }
static int file_mdu(direntry_t *entry, MainParam_t *mp) { unsigned int blocks; Arg_t * arg = (Arg_t *) (mp->arg); blocks = countBlocks(entry->Dir,getStart(entry->Dir, &entry->dir)); if(arg->all || !arg->inDir) { printf("%-7d ", blocks); fprintPwd(stdout, entry,0); fputc('\n', stdout); } arg->blocks += blocks; return GOT_ONE; }
BCRS* CreateBCRS(MATRIX* my_m) { int i, jj, k, index, block_number = 0; int nrows = my_m->nrows; int ncols = my_m->ncols; int nnz = my_m->nnz; double** mal = my_m->mel; double current; BCRS* cc = (BCRS*)malloc(sizeof(BCRS)); int nblocks = countBlocks(my_m); int* colInd = malloc(nblocks * sizeof(int)); int* rowPtr = malloc((nrows+1) * sizeof(int)); int* nnzPtr = malloc((nblocks+1) * sizeof(int)); double* value = malloc(nnz * sizeof(double)); for(i=0; i<nrows+1; ++i) { rowPtr[i] = 0; } index = 0; for(k=0; k<nrows; k++){ for(jj=0; jj<ncols; ++jj){ if (mal[k][jj] != 0) { if ((jj == 0) || (jj != 0 &&(mal[k][jj-1] == 0))) { colInd[block_number] = jj; nnzPtr[block_number] = index; block_number++; } value[index] = mal[k][jj]; index++; } } rowPtr[k+1] = block_number; } nnzPtr[nblocks] = nnz; cc->colInd = colInd; cc->rowPtr = rowPtr; cc->value = value; cc->nnzPtr = nnzPtr; cc->nrows = nrows; cc->ncols = ncols; cc->nnz = nnz; cc->nblocks = nblocks; return cc; }
// // Resize each of the nurseries to the specified size. // static void resizeNurseriesEach (W_ blocks) { uint32_t i, node; bdescr *bd; W_ nursery_blocks; nursery *nursery; for (i = 0; i < n_nurseries; i++) { nursery = &nurseries[i]; nursery_blocks = nursery->n_blocks; if (nursery_blocks == blocks) continue; node = capNoToNumaNode(i); if (nursery_blocks < blocks) { debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks", blocks); nursery->blocks = allocNursery(node, nursery->blocks, blocks-nursery_blocks); } else { bdescr *next_bd; debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks", blocks); bd = nursery->blocks; while (nursery_blocks > blocks) { next_bd = bd->link; next_bd->u.back = NULL; nursery_blocks -= bd->blocks; // might be a large block freeGroup(bd); bd = next_bd; } nursery->blocks = bd; // might have gone just under, by freeing a large block, so make // up the difference. if (nursery_blocks < blocks) { nursery->blocks = allocNursery(node, nursery->blocks, blocks-nursery_blocks); } } nursery->n_blocks = blocks; ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks); } }
static void resizeNursery (nursery *nursery, W_ blocks) { bdescr *bd; W_ nursery_blocks; nursery_blocks = nursery->n_blocks; if (nursery_blocks == blocks) return; if (nursery_blocks < blocks) { debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks", blocks); nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks); } else { bdescr *next_bd; debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks", blocks); bd = nursery->blocks; while (nursery_blocks > blocks) { next_bd = bd->link; next_bd->u.back = NULL; nursery_blocks -= bd->blocks; // might be a large block freeGroup(bd); bd = next_bd; } nursery->blocks = bd; // might have gone just under, by freeing a large block, so make // up the difference. if (nursery_blocks < blocks) { nursery->blocks = allocNursery(nursery->blocks, blocks-nursery_blocks); } } nursery->n_blocks = blocks; ASSERT(countBlocks(nursery->blocks) == nursery->n_blocks); }
void sweep(generation *gen) { bdescr *bd, *prev, *next; uint32_t i; W_ freed, resid, fragd, blocks, live; ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks); live = 0; // estimate of live data in this gen freed = 0; fragd = 0; blocks = 0; prev = NULL; for (bd = gen->old_blocks; bd != NULL; bd = next) { next = bd->link; if (!(bd->flags & BF_MARKED)) { prev = bd; continue; } blocks++; resid = 0; for (i = 0; i < BLOCK_SIZE_W / BITS_IN(W_); i++) { if (bd->u.bitmap[i] != 0) resid++; } live += resid * BITS_IN(W_); if (resid == 0) { freed++; gen->n_old_blocks--; if (prev == NULL) { gen->old_blocks = next; } else { prev->link = next; } freeGroup(bd); } else { prev = bd; if (resid < (BLOCK_SIZE_W * 3) / (BITS_IN(W_) * 4)) { fragd++; bd->flags |= BF_FRAGMENTED; } bd->flags |= BF_SWEPT; } } gen->live_estimate = live; debugTrace(DEBUG_gc, "sweeping: %d blocks, %d were copied, %d freed (%d%%), %d are fragmented, live estimate: %ld%%", gen->n_old_blocks + freed, gen->n_old_blocks - blocks + freed, freed, blocks == 0 ? 0 : (freed * 100) / blocks, fragd, (unsigned long)((blocks - freed) == 0 ? 0 : ((live / BLOCK_SIZE_W) * 100) / (blocks - freed))); ASSERT(countBlocks(gen->old_blocks) == gen->n_old_blocks); }
void memInventory (rtsBool show) { nat g, i; W_ gen_blocks[RtsFlags.GcFlags.generations]; W_ nursery_blocks, retainer_blocks, arena_blocks, exec_blocks; W_ live_blocks = 0, free_blocks = 0; rtsBool leak; // count the blocks we current have for (g = 0; g < RtsFlags.GcFlags.generations; g++) { gen_blocks[g] = 0; for (i = 0; i < n_capabilities; i++) { gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]); gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].part_list); gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].scavd_list); gen_blocks[g] += countBlocks(gc_threads[i]->gens[g].todo_bd); } gen_blocks[g] += genBlocks(&generations[g]); } nursery_blocks = 0; for (i = 0; i < n_capabilities; i++) { ASSERT(countBlocks(nurseries[i].blocks) == nurseries[i].n_blocks); nursery_blocks += nurseries[i].n_blocks; if (capabilities[i].pinned_object_block != NULL) { nursery_blocks += capabilities[i].pinned_object_block->blocks; } nursery_blocks += countBlocks(capabilities[i].pinned_object_blocks); } retainer_blocks = 0; #ifdef PROFILING if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) { retainer_blocks = retainerStackBlocks(); } #endif // count the blocks allocated by the arena allocator arena_blocks = arenaBlocks(); // count the blocks containing executable memory exec_blocks = countAllocdBlocks(exec_block); /* count the blocks on the free list */ free_blocks = countFreeList(); live_blocks = 0; for (g = 0; g < RtsFlags.GcFlags.generations; g++) { live_blocks += gen_blocks[g]; } live_blocks += nursery_blocks + + retainer_blocks + arena_blocks + exec_blocks; #define MB(n) (((double)(n) * BLOCK_SIZE_W) / ((1024*1024)/sizeof(W_))) leak = live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK; if (show || leak) { if (leak) { debugBelch("Memory leak detected:\n"); } else { debugBelch("Memory inventory:\n"); } for (g = 0; g < RtsFlags.GcFlags.generations; g++) { debugBelch(" gen %d blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", g, gen_blocks[g], MB(gen_blocks[g])); } debugBelch(" nursery : %5" FMT_Word " blocks (%6.1lf MB)\n", nursery_blocks, MB(nursery_blocks)); debugBelch(" retainer : %5" FMT_Word " blocks (%6.1lf MB)\n", retainer_blocks, MB(retainer_blocks)); debugBelch(" arena blocks : %5" FMT_Word " blocks (%6.1lf MB)\n", arena_blocks, MB(arena_blocks)); debugBelch(" exec : %5" FMT_Word " blocks (%6.1lf MB)\n", exec_blocks, MB(exec_blocks)); debugBelch(" free : %5" FMT_Word " blocks (%6.1lf MB)\n", free_blocks, MB(free_blocks)); debugBelch(" total : %5" FMT_Word " blocks (%6.1lf MB)\n", live_blocks + free_blocks, MB(live_blocks+free_blocks)); if (leak) { debugBelch("\n in system : %5" FMT_Word " blocks (%" FMT_Word " MB)\n", mblocks_allocated * BLOCKS_PER_MBLOCK, mblocks_allocated); } } if (leak) { debugBelch("\n"); findMemoryLeak(); } ASSERT(n_alloc_blocks == live_blocks); ASSERT(!leak); }