/** * Performs a garbage collection of the immix space. */ void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); int via_handles_ = 0; int via_roots = 0; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); via_roots++; } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); via_handles_++; } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif gc_.process_mark_stack(allocator_); // We've now finished marking the entire object graph. // Marking objects to be Finalized can cause more things to continue to // live, so we must check the mark_stack again. do { walk_finalizers(); } while(gc_.process_mark_stack(allocator_)); clean_weakrefs(); // Remove unreachable locked objects still in the list if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { clean_locked_objects(*i, false); } } // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); // Clear unreachable objects from the various remember sets int cleared = 0; unsigned int mark = object_memory_->mark(); cleared = object_memory_->unremember_objects(mark); for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; cleared += wb->unremember_objects(mark); } // Now, calculate how much space we're still using. immix::Chunks& chunks = gc_.block_allocator().chunks(); immix::AllBlockIterator iter(chunks); int live_bytes = 0; int total_bytes = 0; while(immix::Block* block = iter.next()) { total_bytes += immix::cBlockSize; live_bytes += block->bytes_from_lines(); } double percentage_live = (double)live_bytes / (double)total_bytes; if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked" << ", " << via_roots << " roots " << via_handles_ << " handles " << (int)(percentage_live * 100) << "% live" << ", " << live_bytes << "/" << total_bytes << "]\n"; } if(percentage_live >= 0.90) { if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: expanding. " << (int)(percentage_live * 100) << "%]\n"; } gc_.block_allocator().add_chunk(); } #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } delete[] holes; holes = NULL; #endif }
/** * Perform garbage collection on the young objects. */ void BakerGC::collect(GCData& data, YoungCollectStats* stats) { #ifdef HAVE_VALGRIND_H VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size()); VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif Object* tmp; ObjectArray *current_rs = object_memory_->swap_remember_set(); total_objects = 0; copy_spills_ = 0; reset_promoted(); // Start by copying objects in the remember set for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { // assert(tmp->mature_object_p()); // assert(!tmp->forwarded_p()); // Remove the Remember bit, since we're clearing the set. tmp->clear_remember(); scan_object(tmp); } } delete current_rs; for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->swap_remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); ++oi) { tmp = *oi; if(tmp) { tmp->clear_remember(); scan_object(tmp); } } delete rs; } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { i->set(saw_object(i->get())); } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, true); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(!i->in_use_p()) continue; if(!i->weak_p() && i->object()->young_object_p()) { i->set_object(saw_object(i->object())); // Users manipulate values accessible from the data* within an // RData without running a write barrier. Thusly if we see a mature // rdata, we must always scan it because it could contain // young pointers. } else if(!i->object()->young_object_p() && i->is_rdata()) { scan_object(i->object()); } assert(i->object()->type_id() > InvalidType && i->object()->type_id() < LastObjectType); } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::GlobalHandle* global_handle = *i; capi::Handle** loc = global_handle->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p() && obj->young_object_p()) { hdl->set_object(saw_object(obj)); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif // Handle all promotions to non-young space that occurred. handle_promotions(); assert(fully_scanned_p()); // We're now done seeing the entire object graph of normal, live references. // Now we get to handle the unusual references, like finalizers and such. // Objects with finalizers must be kept alive until the finalizers have // run. walk_finalizers(); // Process possible promotions from processing objects with finalizers. handle_promotions(); if(!promoted_stack_.empty()) rubinius::bug("promote stack has elements!"); if(!fully_scanned_p()) rubinius::bug("more young refs"); // Check any weakrefs and replace dead objects with nil clean_weakrefs(true); // Swap the 2 halves Heap *x = next; next = current; current = x; if(stats) { stats->lifetime = lifetime_; stats->percentage_used = current->percentage_used(); stats->promoted_objects = promoted_objects_; stats->excess_objects = copy_spills_; } // Tune the age at which promotion occurs if(autotune_) { double used = current->percentage_used(); if(used > cOverFullThreshold) { if(tune_threshold_ >= cOverFullTimes) { if(lifetime_ > cMinimumLifetime) lifetime_--; } else { tune_threshold_++; } } else if(used < cUnderFullThreshold) { if(tune_threshold_ <= cUnderFullTimes) { if(lifetime_ < cMaximumLifetime) lifetime_++; } else { tune_threshold_--; } } else if(tune_threshold_ > 0) { tune_threshold_--; } else if(tune_threshold_ < 0) { tune_threshold_++; } else if(tune_threshold_ == 0) { if(lifetime_ < original_lifetime_) { lifetime_++; } else if(lifetime_ > original_lifetime_) { lifetime_--; } } } }
void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); int via_handles_ = 0; int via_roots = 0; int via_stack = 0; int callframes = 0; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); via_roots++; } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); i++) { for(Roots::Iterator ri((*i)->roots()); ri.more(); ri.advance()) { ri->set(saw_object(ri->get())); } } } for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } std::list<capi::Handle**>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::Handle**>::iterator i = gh->begin(); i != gh->end(); i++) { capi::Handle** loc = *i; if(capi::Handle* hdl = *loc) { if(!CAPI_REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); via_handles_++; } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } for(VariableRootBuffers::Iterator i(data.variable_buffers()); i.more(); i.advance()) { Object*** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; via_stack++; if(tmp->reference_p() && tmp->young_object_p()) { saw_object(tmp); } } } // Walk all the call frames for(CallFrameLocationList::const_iterator i = data.call_frames().begin(); i != data.call_frames().end(); i++) { callframes++; CallFrame** loc = *i; walk_call_frame(*loc); } gc_.process_mark_stack(allocator_); // We've now finished marking the entire object graph. check_finalize(); // Finalize can cause more things to continue to live, so we must // check the mark_stack again. gc_.process_mark_stack(allocator_); // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); ObjectArray *current_rs = object_memory_->remember_set(); int cleared = 0; for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); oi++) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { assert(tmp->zone() == MatureObjectZone); assert(!tmp->forwarded_p()); if(!tmp->marked_p(object_memory_->mark())) { cleared++; *oi = NULL; } } } for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); wbi++) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); oi++) { tmp = *oi; if(tmp) { assert(tmp->zone() == MatureObjectZone); assert(!tmp->forwarded_p()); if(!tmp->marked_p(object_memory_->mark())) { cleared++; *oi = NULL; } } } } // Now, calculate how much space we're still using. immix::Chunks& chunks = gc_.block_allocator().chunks(); immix::AllBlockIterator iter(chunks); int live_bytes = 0; int total_bytes = 0; while(immix::Block* block = iter.next()) { total_bytes += immix::cBlockSize; live_bytes += block->bytes_from_lines(); } double percentage_live = (double)live_bytes / (double)total_bytes; if(object_memory_->state->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked" << ", " << via_roots << " roots " << via_handles_ << " handles " << (int)(percentage_live * 100) << "% live" << ", " << live_bytes << "/" << total_bytes << "]\n"; } if(percentage_live >= 0.90) { if(object_memory_->state->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: expanding. " << (int)(percentage_live * 100) << "%]\n"; } gc_.block_allocator().add_chunk(); } #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } #endif }