void ObjectWalker::seed(GCData& data) { ObjectArray *current_rs = memory_->remember_set(); for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { Object* tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) saw_object(0, tmp); } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { saw_object(0, i->get()); } { std::lock_guard<std::mutex> guard(data.thread_nexus()->threads_mutex()); for(ThreadList::iterator i = data.thread_nexus()->threads()->begin(); i != data.thread_nexus()->threads()->end(); ++i) { scan(*i, false); } } }
void ObjectWalker::seed(GCData& data) { ObjectArray *current_rs = object_memory_->remember_set(); for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { Object* tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) saw_object(tmp); } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { saw_object(i->get()); } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { saw_object(i->object()); } }
void ObjectMemory::collect_mature(GCData& data) { #ifdef RBX_GC_STATS stats::GCStats::get()->objects_seen.start(); stats::GCStats::get()->collect_mature.start(); #endif // validate_handles(data.handles()); // validate_handles(data.cached_handles()); timer::Running<size_t, 1000000> timer(full_collection_time); collect_mature_now = false; code_manager_.clear_marks(); immix_->reset_stats(); immix_->collect(data); immix_->clean_weakrefs(); code_manager_.sweep(); data.global_cache()->prune_unmarked(mark()); prune_handles(data.handles(), false); prune_handles(data.cached_handles(), false); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); // We no longer need to unmark all, we use the rotating mark instead. // This means that the mark we just set on all reachable objects will // be ignored next time anyway. // // immix_->unmark_all(data); rotate_mark(); full_collections++; #ifdef RBX_GC_STATS stats::GCStats::get()->collect_mature.stop(); stats::GCStats::get()->objects_seen.stop(); #endif }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; static int collect_times = 0; // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); collect_times++; data.global_cache()->prune_young(); }
void GarbageCollector::unmark_all(GCData& data) { UnmarkVisitor visit(object_memory_); visit_roots(data.roots(), visit); visit_call_frames_list(data.call_frames(), visit); for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) { visit.call(i->object()); } for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) { visit.call(i->object()); } visit.drain_stack(); }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<size_t, 1000000> timer(young_collection_time); // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); young_collections++; data.global_cache()->prune_young(); }
void GenerateTupleTgtCands(OutputFactor2TgtCandList& tCand,E2Costs& e2costs,GCData& data) { // check if candidates are non-empty bool gotCands=1; for(size_t j=0; gotCands && j<tCand.size(); ++j) gotCands &= !tCand[j].empty(); if(gotCands) { // enumerate tuples CHECK(data.DistinctOutFactors()==tCand.size()); std::vector<unsigned> radix(data.DistinctOutFactors()); for(size_t i=0; i<tCand.size(); ++i) radix[i]=tCand[i].size(); unsigned *tuples=0; size_t numTuples=GenerateTuples(radix.size(),&radix[0],tuples); data.totalTuples+=numTuples; for(size_t i=0; i<numTuples; ++i) { mPhrase e(radix.size()); float costs=0.0; for(size_t j=0; j<radix.size(); ++j) { CHECK(tuples[radix.size()*i+j]<tCand[j].size()); std::pair<float,vFactor> const& mycand=tCand[j][tuples[radix.size()*i+j]]; e[j]=mycand.second; costs+=mycand.first; } #ifdef DEBUG bool mismatch=0; for(size_t j=1; !mismatch && j<e.size(); ++j) if(e[j].size()!=e[j-1].size()) mismatch=1; CHECK(mismatch==0); #endif std::pair<E2Costs::iterator,bool> p=e2costs.insert(std::make_pair(e,costs)); if(p.second) ++data.distinctTuples; else { // entry known, take min of costs, alternative: sum probs if(costs<p.first->second) p.first->second=costs; } } delete [] tuples; } }
void GeneratePerFactorTgtList(size_t factorType,PPtr pptr,GCData& data,Len2Cands& len2cands) { std::vector<FactorTgtCand> cands; data.pdicts[factorType]->GetTargetCandidates(pptr,cands); for(std::vector<FactorTgtCand>::const_iterator cand=cands.begin(); cand!=cands.end(); ++cand) { CHECK(data.weights[factorType].size()==cand->second.size()); float costs=std::inner_product(data.weights[factorType].begin(), data.weights[factorType].end(), cand->second.begin(), 0.0); size_t len=cand->first.size(); if(len>=len2cands.size()) len2cands.resize(len+1,0); if(!len2cands[len]) len2cands[len]=new OutputFactor2TgtCandList(data.DistinctOutFactors()); OutputFactor2TgtCandList &outf2tcandlist=*len2cands[len]; outf2tcandlist[data.OutFT(factorType)].push_back(std::make_pair(costs,cand->first)); } }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<size_t, 1000000> timer(young_collection_time); // validate_handles(data.handles()); // validate_handles(data.cached_handles()); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), true); prune_handles(data.cached_handles(), true); young_collections++; data.global_cache()->prune_young(); if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); i++) { assert(refill_slab((*i)->local_slab())); } } }
void ObjectMemory::collect_young(GCData& data, YoungCollectStats* stats) { collect_young_now = false; timer::Running<1000000> timer(gc_stats.total_young_collection_time, gc_stats.last_young_collection_time); young_->reset_stats(); young_->collect(data, stats); prune_handles(data.handles(), data.cached_handles(), young_); gc_stats.young_collection_count++; data.global_cache()->prune_young(); if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { gc::Slab& slab = (*i)->local_slab(); gc_stats.slab_allocated(slab.allocations(), slab.byte_used()); // Reset the slab to a size of 0 so that the thread has to do // an allocation to get a proper refill. This keeps the number // of threads in the system from starving the available // number of slabs. slab.refill(0, 0); } } young_->reset(); }
void ObjectMemory::collect_mature(GCData& data) { timer::Running<1000000> timer(gc_stats.total_full_collection_time, gc_stats.last_full_collection_time); collect_mature_now = false; code_manager_.clear_marks(); immix_->reset_stats(); immix_->collect(data); immix_->clean_weakrefs(); code_manager_.sweep(); data.global_cache()->prune_unmarked(mark()); prune_handles(data.handles(), data.cached_handles(), NULL); // Have to do this after all things that check for mark bits is // done, as it free()s objects, invalidating mark bits. mark_sweep_->after_marked(); inflated_headers_->deallocate_headers(mark()); // We no longer need to unmark all, we use the rotating mark instead. // This means that the mark we just set on all reachable objects will // be ignored next time anyway. // // immix_->unmark_all(data); rotate_mark(); gc_stats.full_collection_count++; }
/** * Perform garbage collection on the young objects. */ void BakerGC::collect(GCData& data, YoungCollectStats* stats) { #ifdef HAVE_VALGRIND_H VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size()); VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif Object* tmp; ObjectArray *current_rs = object_memory_->swap_remember_set(); total_objects = 0; copy_spills_ = 0; reset_promoted(); // Start by copying objects in the remember set for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { // assert(tmp->mature_object_p()); // assert(!tmp->forwarded_p()); // Remove the Remember bit, since we're clearing the set. tmp->clear_remember(); scan_object(tmp); } } delete current_rs; for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->swap_remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); ++oi) { tmp = *oi; if(tmp) { tmp->clear_remember(); scan_object(tmp); } } delete rs; } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { i->set(saw_object(i->get())); } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, true); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(!i->in_use_p()) continue; if(!i->weak_p() && i->object()->young_object_p()) { i->set_object(saw_object(i->object())); // Users manipulate values accessible from the data* within an // RData without running a write barrier. Thusly if we see a mature // rdata, we must always scan it because it could contain // young pointers. } else if(!i->object()->young_object_p() && i->is_rdata()) { scan_object(i->object()); } assert(i->object()->type_id() > InvalidType && i->object()->type_id() < LastObjectType); } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::GlobalHandle* global_handle = *i; capi::Handle** loc = global_handle->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p() && obj->young_object_p()) { hdl->set_object(saw_object(obj)); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif // Handle all promotions to non-young space that occurred. handle_promotions(); assert(fully_scanned_p()); // We're now done seeing the entire object graph of normal, live references. // Now we get to handle the unusual references, like finalizers and such. // Objects with finalizers must be kept alive until the finalizers have // run. walk_finalizers(); // Process possible promotions from processing objects with finalizers. handle_promotions(); if(!promoted_stack_.empty()) rubinius::bug("promote stack has elements!"); if(!fully_scanned_p()) rubinius::bug("more young refs"); // Check any weakrefs and replace dead objects with nil clean_weakrefs(true); // Swap the 2 halves Heap *x = next; next = current; current = x; if(stats) { stats->lifetime = lifetime_; stats->percentage_used = current->percentage_used(); stats->promoted_objects = promoted_objects_; stats->excess_objects = copy_spills_; } // Tune the age at which promotion occurs if(autotune_) { double used = current->percentage_used(); if(used > cOverFullThreshold) { if(tune_threshold_ >= cOverFullTimes) { if(lifetime_ > cMinimumLifetime) lifetime_--; } else { tune_threshold_++; } } else if(used < cUnderFullThreshold) { if(tune_threshold_ <= cUnderFullTimes) { if(lifetime_ < cMaximumLifetime) lifetime_++; } else { tune_threshold_--; } } else if(tune_threshold_ > 0) { tune_threshold_--; } else if(tune_threshold_ < 0) { tune_threshold_++; } else if(tune_threshold_ == 0) { if(lifetime_ < original_lifetime_) { lifetime_++; } else if(lifetime_ > original_lifetime_) { lifetime_--; } } } }
void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) { saw_object(tmp); } } for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) { if(!i->weak_p()) saw_object(i->object()); } for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) { if(!i->weak_p()) saw_object(i->object()); } for(VariableRootBuffers::Iterator i(data.variable_buffers()); i.more(); i.advance()) { Object*** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; if(tmp->reference_p() && tmp->young_object_p()) { saw_object(tmp); } } } // Walk all the call frames for(CallFrameLocationList::const_iterator i = data.call_frames().begin(); i != data.call_frames().end(); i++) { CallFrame** loc = *i; walk_call_frame(*loc); } gc_.process_mark_stack(allocator_); // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); ObjectArray *current_rs = object_memory->remember_set; int cleared = 0; for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); oi++) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { assert(tmp->zone == MatureObjectZone); assert(!tmp->forwarded_p()); if(!tmp->marked_p()) { cleared++; *oi = NULL; } } } // Switch the which_mark_ for next time. which_mark_ = (which_mark_ == 1 ? 2 : 1); #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } #endif }
/** * Performs a garbage collection of the immix space. */ void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); int via_handles_ = 0; int via_roots = 0; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); via_roots++; } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); via_handles_++; } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif gc_.process_mark_stack(allocator_); // We've now finished marking the entire object graph. // Marking objects to be Finalized can cause more things to continue to // live, so we must check the mark_stack again. do { walk_finalizers(); } while(gc_.process_mark_stack(allocator_)); clean_weakrefs(); // Remove unreachable locked objects still in the list if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { clean_locked_objects(*i, false); } } // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); // Clear unreachable objects from the various remember sets int cleared = 0; unsigned int mark = object_memory_->mark(); cleared = object_memory_->unremember_objects(mark); for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; cleared += wb->unremember_objects(mark); } // Now, calculate how much space we're still using. immix::Chunks& chunks = gc_.block_allocator().chunks(); immix::AllBlockIterator iter(chunks); int live_bytes = 0; int total_bytes = 0; while(immix::Block* block = iter.next()) { total_bytes += immix::cBlockSize; live_bytes += block->bytes_from_lines(); } double percentage_live = (double)live_bytes / (double)total_bytes; if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked" << ", " << via_roots << " roots " << via_handles_ << " handles " << (int)(percentage_live * 100) << "% live" << ", " << live_bytes << "/" << total_bytes << "]\n"; } if(percentage_live >= 0.90) { if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: expanding. " << (int)(percentage_live * 100) << "%]\n"; } gc_.block_allocator().add_chunk(); } #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } delete[] holes; holes = NULL; #endif }
void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); int via_handles_ = 0; int via_roots = 0; int via_stack = 0; int callframes = 0; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); via_roots++; } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); i++) { for(Roots::Iterator ri((*i)->roots()); ri.more(); ri.advance()) { ri->set(saw_object(ri->get())); } } } for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } std::list<capi::Handle**>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::Handle**>::iterator i = gh->begin(); i != gh->end(); i++) { capi::Handle** loc = *i; if(capi::Handle* hdl = *loc) { if(!CAPI_REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); via_handles_++; } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } for(VariableRootBuffers::Iterator i(data.variable_buffers()); i.more(); i.advance()) { Object*** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; via_stack++; if(tmp->reference_p() && tmp->young_object_p()) { saw_object(tmp); } } } // Walk all the call frames for(CallFrameLocationList::const_iterator i = data.call_frames().begin(); i != data.call_frames().end(); i++) { callframes++; CallFrame** loc = *i; walk_call_frame(*loc); } gc_.process_mark_stack(allocator_); // We've now finished marking the entire object graph. check_finalize(); // Finalize can cause more things to continue to live, so we must // check the mark_stack again. gc_.process_mark_stack(allocator_); // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); ObjectArray *current_rs = object_memory_->remember_set(); int cleared = 0; for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); oi++) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { assert(tmp->zone() == MatureObjectZone); assert(!tmp->forwarded_p()); if(!tmp->marked_p(object_memory_->mark())) { cleared++; *oi = NULL; } } } for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); wbi++) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); oi++) { tmp = *oi; if(tmp) { assert(tmp->zone() == MatureObjectZone); assert(!tmp->forwarded_p()); if(!tmp->marked_p(object_memory_->mark())) { cleared++; *oi = NULL; } } } } // Now, calculate how much space we're still using. immix::Chunks& chunks = gc_.block_allocator().chunks(); immix::AllBlockIterator iter(chunks); int live_bytes = 0; int total_bytes = 0; while(immix::Block* block = iter.next()) { total_bytes += immix::cBlockSize; live_bytes += block->bytes_from_lines(); } double percentage_live = (double)live_bytes / (double)total_bytes; if(object_memory_->state->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked" << ", " << via_roots << " roots " << via_handles_ << " handles " << (int)(percentage_live * 100) << "% live" << ", " << live_bytes << "/" << total_bytes << "]\n"; } if(percentage_live >= 0.90) { if(object_memory_->state->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: expanding. " << (int)(percentage_live * 100) << "%]\n"; } gc_.block_allocator().add_chunk(); } #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } #endif }
void ObjectWalker::seed(GCData& data) { Object* tmp; ObjectArray *current_rs = object_memory_->remember_set(); for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) saw_object(tmp); } for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); ++oi) { tmp = *oi; if(tmp) saw_object(tmp); } } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { saw_object(i->get()); } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { for(Roots::Iterator ri((*i)->roots()); ri.more(); ri.advance()) { saw_object(ri->get()); } } } for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) { saw_object(i->object()); } for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) { saw_object(i->object()); } for(VariableRootBuffers::Iterator i(data.variable_buffers()); i.more(); i.advance()) { Object*** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; saw_object(tmp); } } RootBuffers* rb = data.root_buffers(); if(rb) { for(RootBuffers::Iterator i(*rb); i.more(); i.advance()) { Object** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { saw_object(buffer[idx]); } } } // Walk all the call frames for(CallFrameLocationList::iterator i = data.call_frames().begin(); i != data.call_frames().end(); ++i) { CallFrame** loc = *i; walk_call_frame(*loc); } }
/* Perform garbage collection on the young objects. */ void BakerGC::collect(GCData& data) { #ifdef RBX_GC_STATS stats::GCStats::get()->bytes_copied.start(); stats::GCStats::get()->objects_copied.start(); stats::GCStats::get()->objects_promoted.start(); stats::GCStats::get()->collect_young.start(); #endif Object* tmp; ObjectArray *current_rs = object_memory->remember_set; object_memory->remember_set = new ObjectArray(0); total_objects = 0; // Tracks all objects that we promoted during this run, so // we can scan them at the end. promoted_ = new ObjectArray(0); promoted_current = promoted_insert = promoted_->begin(); for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { assert(tmp->zone == MatureObjectZone); assert(!tmp->forwarded_p()); // Remove the Remember bit, since we're clearing the set. tmp->clear_remember(); scan_object(tmp); } } delete current_rs; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p() && tmp->young_object_p()) { i->set(saw_object(tmp)); } } for(VariableRootBuffers::Iterator i(data.variable_buffers()); i.more(); i.advance()) { Object*** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; if(tmp->reference_p() && tmp->young_object_p()) { *var = saw_object(tmp); } } } // Walk all the call frames for(CallFrameLocationList::iterator i = data.call_frames().begin(); i != data.call_frames().end(); i++) { CallFrame** loc = *i; walk_call_frame(*loc); } /* Ok, now handle all promoted objects. This is setup a little weird * so I should explain. * * We want to scan each promoted object. But this scanning will likely * cause more objects to be promoted. Adding to an ObjectArray that your * iterating over blows up the iterators, so instead we rotate the * current promoted set out as we iterator over it, and stick an * empty ObjectArray in. * * This way, when there are no more objects that are promoted, the last * ObjectArray will be empty. * */ promoted_current = promoted_insert = promoted_->begin(); while(promoted_->size() > 0 || !fully_scanned_p()) { if(promoted_->size() > 0) { for(;promoted_current != promoted_->end(); ++promoted_current) { tmp = *promoted_current; assert(tmp->zone == MatureObjectZone); scan_object(tmp); if(watched_p(tmp)) { std::cout << "detected " << tmp << " during scan of promoted objects.\n"; } } promoted_->resize(promoted_insert - promoted_->begin()); promoted_current = promoted_insert = promoted_->begin(); } /* As we're handling promoted objects, also handle unscanned objects. * Scanning these unscanned objects (via the scan pointer) will * cause more promotions. */ copy_unscanned(); } assert(promoted_->size() == 0); delete promoted_; promoted_ = NULL; assert(fully_scanned_p()); /* Another than is going to be found is found now, so we go back and * look at everything in current and call delete_object() on anything * thats not been forwarded. */ find_lost_souls(); /* Check any weakrefs and replace dead objects with nil*/ clean_weakrefs(true); /* Swap the 2 halves */ Heap *x = next; next = current; current = x; next->reset(); #ifdef RBX_GC_STATS stats::GCStats::get()->collect_young.stop(); stats::GCStats::get()->objects_copied.stop(); stats::GCStats::get()->objects_promoted.stop(); stats::GCStats::get()->bytes_copied.stop(); #endif }