void cpu_event_each_channel(STATE, OBJECT (*cb)(STATE, void*, OBJECT), void *cb_data) { struct thread_info *ti = (struct thread_info*)state->thread_infos; while(ti) { if(ti->channel && REFERENCE_P(ti->channel)) { ti->channel = cb(state, cb_data, ti->channel); } if(ti->buffer && REFERENCE_P(ti->buffer)) { ti->buffer = cb(state, cb_data, ti->buffer); } ti = ti->next; } }
inline void Object::write_barrier(STATE, void* ptr) { Object* obj = reinterpret_cast<Object*>(ptr); if(!REFERENCE_P(obj) || state->young_object_p(this) || !state->young_object_p(obj)) return; inline_write_barrier_passed(state, ptr); }
void write_barrier(Object* target, Object* val) { if(target->Remember) return; if(!REFERENCE_P(val)) return; if(target->zone != MatureObjectZone) return; if(val->zone != YoungObjectZone) return; remember_object(target); }
/** * Checks if the store is creating a reference from a mature generation * object (target) to a young generation object (val). If it is, target * (i.e. the mature object) is added to the remember set. * * @param target The object holding the reference (i.e. the referer). * @param val The object being referenced (i.e. the referee). */ void write_barrier(Object* target, Object* val) { if(target->remembered_p()) return; if(!REFERENCE_P(val)) return; if(target->zone() == YoungObjectZone) return; if(val->zone() != YoungObjectZone) return; remember_object(target); }
void ImmixGC::collect_scan(GCData* data) { for(Roots::Iterator i(data->roots()); i.more(); i.advance()) { if(Object* fwd = saw_object(i->get())) { i->set(fwd); } } { utilities::thread::SpinLock::LockGuard guard(data->thread_nexus()->threads_lock()); for(ThreadList::iterator i = data->thread_nexus()->threads()->begin(); i != data->thread_nexus()->threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { if(Object* fwd = saw_object(i->object())) { i->set_object(fwd); } } } std::list<capi::GlobalHandle*>* gh = data->global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { if(Object* fwd = saw_object(obj)) { hdl->set_object(fwd); } } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } /* TODO: JIT if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this); */ }
void ObjectMemory::add_global_capi_handle_location(STATE, capi::Handle** loc, const char* file, int line) { SYNC(state); if(*loc && REFERENCE_P(*loc)) { if(!capi_handles_->validate(*loc)) { std::cerr << std::endl << "==================================== ERROR ====================================" << std::endl; std::cerr << "| An extension is trying to add an invalid handle at the following location: |" << std::endl; std::ostringstream out; out << file << ":" << line; std::cerr << "| " << std::left << std::setw(75) << out.str() << " |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| An invalid handle means that it points to an invalid VALUE. This can happen |" << std::endl; std::cerr << "| when you haven't initialized the VALUE pointer yet, in which case we |" << std::endl; std::cerr << "| suggest either initializing it properly or otherwise first initialize it to |" << std::endl; std::cerr << "| NULL if you can only set it to a proper VALUE pointer afterwards. Consider |" << std::endl; std::cerr << "| the following example that could cause this problem: |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr; |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| Either change this register after initializing |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr; |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| Or initialize it with NULL: |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr = NULL; |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| Please note that this is NOT a problem in Rubinius, but in the extension |" << std::endl; std::cerr << "| that contains the given file above. A very common source of this problem is |" << std::endl; std::cerr << "| using older versions of therubyracer before 0.11.x. Please upgrade to at |" << std::endl; std::cerr << "| least version 0.11.x if you're using therubyracer and encounter this |" << std::endl; std::cerr << "| problem. For some more background information on why this is a problem |" << std::endl; std::cerr << "| with therubyracer, you can read the following blog post: |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| http://blog.thefrontside.net/2012/12/04/therubyracer-rides-again/ |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "================================== ERROR ======================================" << std::endl; rubinius::bug("Halting due to invalid handle"); } } capi::GlobalHandle* global_handle = new capi::GlobalHandle(loc, file, line); global_capi_handle_locations_.push_back(global_handle); }
void ImmixGC::collect_scan(GCData* data) { for(Roots::Iterator i(data->roots()); i.more(); i.advance()) { Object* tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); } if(data->threads()) { for(std::list<ManagedThread*>::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); } } std::list<capi::GlobalHandle*>* gh = data->global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this); #endif }
void SharedState::add_global_handle_location(capi::Handle** loc, const char* file, int line) { SYNC_TL; if(*loc && REFERENCE_P(*loc)) { if(!global_handles_->validate(*loc)) { std::cerr << std::endl << "==================================== ERROR ====================================" << std::endl; std::cerr << "| An extension is trying to add an invalid handle at the following location: |" << std::endl; std::ostringstream out; out << file << ":" << line; std::cerr << "| " << std::left << std::setw(75) << out.str() << " |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| An invalid handle means that it points to an invalid VALUE. This can happen |" << std::endl; std::cerr << "| when you haven't initialized the VALUE pointer yet, in which case we |" << std::endl; std::cerr << "| suggest either initializing it properly or otherwise first initialize it to |" << std::endl; std::cerr << "| NULL if you can only set it to a proper VALUE pointer afterwards. Consider |" << std::endl; std::cerr << "| the following example that could cause this problem: |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr; |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| Either change this register after initializing |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr; |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| Or initialize it with NULL: |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "| VALUE ptr = NULL; |" << std::endl; std::cerr << "| rb_gc_register_address(&ptr); |" << std::endl; std::cerr << "| ptr = rb_str_new(\"test\"); |" << std::endl; std::cerr << "| |" << std::endl; std::cerr << "================================== ERROR ======================================" << std::endl; rubinius::bug("Halting due to invalid handle"); } } capi::GlobalHandle* global_handle = new capi::GlobalHandle(loc, file, line); global_handle_locations_.push_back(global_handle); }
/** * Perform garbage collection on the young objects. */ void BakerGC::collect(GCData& data, YoungCollectStats* stats) { #ifdef HAVE_VALGRIND_H VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size()); VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif Object* tmp; ObjectArray *current_rs = object_memory_->swap_remember_set(); total_objects = 0; copy_spills_ = 0; reset_promoted(); // Start by copying objects in the remember set for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { // assert(tmp->mature_object_p()); // assert(!tmp->forwarded_p()); // Remove the Remember bit, since we're clearing the set. tmp->clear_remember(); scan_object(tmp); } } delete current_rs; for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; ObjectArray* rs = wb->swap_remember_set(); for(ObjectArray::iterator oi = rs->begin(); oi != rs->end(); ++oi) { tmp = *oi; if(tmp) { tmp->clear_remember(); scan_object(tmp); } } delete rs; } for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { i->set(saw_object(i->get())); } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, true); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(!i->in_use_p()) continue; if(!i->weak_p() && i->object()->young_object_p()) { i->set_object(saw_object(i->object())); // Users manipulate values accessible from the data* within an // RData without running a write barrier. Thusly if we see a mature // rdata, we must always scan it because it could contain // young pointers. } else if(!i->object()->young_object_p() && i->is_rdata()) { scan_object(i->object()); } assert(i->object()->type_id() > InvalidType && i->object()->type_id() < LastObjectType); } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::GlobalHandle* global_handle = *i; capi::Handle** loc = global_handle->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p() && obj->young_object_p()) { hdl->set_object(saw_object(obj)); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif // Handle all promotions to non-young space that occurred. handle_promotions(); assert(fully_scanned_p()); // We're now done seeing the entire object graph of normal, live references. // Now we get to handle the unusual references, like finalizers and such. // Objects with finalizers must be kept alive until the finalizers have // run. walk_finalizers(); // Process possible promotions from processing objects with finalizers. handle_promotions(); if(!promoted_stack_.empty()) rubinius::bug("promote stack has elements!"); if(!fully_scanned_p()) rubinius::bug("more young refs"); // Check any weakrefs and replace dead objects with nil clean_weakrefs(true); // Swap the 2 halves Heap *x = next; next = current; current = x; if(stats) { stats->lifetime = lifetime_; stats->percentage_used = current->percentage_used(); stats->promoted_objects = promoted_objects_; stats->excess_objects = copy_spills_; } // Tune the age at which promotion occurs if(autotune_) { double used = current->percentage_used(); if(used > cOverFullThreshold) { if(tune_threshold_ >= cOverFullTimes) { if(lifetime_ > cMinimumLifetime) lifetime_--; } else { tune_threshold_++; } } else if(used < cUnderFullThreshold) { if(tune_threshold_ <= cUnderFullTimes) { if(lifetime_ < cMaximumLifetime) lifetime_++; } else { tune_threshold_--; } } else if(tune_threshold_ > 0) { tune_threshold_--; } else if(tune_threshold_ < 0) { tune_threshold_++; } else if(tune_threshold_ == 0) { if(lifetime_ < original_lifetime_) { lifetime_++; } else if(lifetime_ > original_lifetime_) { lifetime_--; } } } }
/** * Performs a garbage collection of the immix space. */ void ImmixGC::collect(GCData& data) { Object* tmp; gc_.clear_lines(); int via_handles_ = 0; int via_roots = 0; for(Roots::Iterator i(data.roots()); i.more(); i.advance()) { tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); via_roots++; } if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); via_handles_++; } } std::list<capi::GlobalHandle*>* gh = data.global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); via_handles_++; } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this); #endif gc_.process_mark_stack(allocator_); // We've now finished marking the entire object graph. // Marking objects to be Finalized can cause more things to continue to // live, so we must check the mark_stack again. do { walk_finalizers(); } while(gc_.process_mark_stack(allocator_)); clean_weakrefs(); // Remove unreachable locked objects still in the list if(data.threads()) { for(std::list<ManagedThread*>::iterator i = data.threads()->begin(); i != data.threads()->end(); ++i) { clean_locked_objects(*i, false); } } // Sweep up the garbage gc_.sweep_blocks(); // This resets the allocator state to sync it up with the BlockAllocator // properly. allocator_.get_new_block(); // Clear unreachable objects from the various remember sets int cleared = 0; unsigned int mark = object_memory_->mark(); cleared = object_memory_->unremember_objects(mark); for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin(); wbi != object_memory_->aux_barriers().end(); ++wbi) { gc::WriteBarrier* wb = *wbi; cleared += wb->unremember_objects(mark); } // Now, calculate how much space we're still using. immix::Chunks& chunks = gc_.block_allocator().chunks(); immix::AllBlockIterator iter(chunks); int live_bytes = 0; int total_bytes = 0; while(immix::Block* block = iter.next()) { total_bytes += immix::cBlockSize; live_bytes += block->bytes_from_lines(); } double percentage_live = (double)live_bytes / (double)total_bytes; if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked" << ", " << via_roots << " roots " << via_handles_ << " handles " << (int)(percentage_live * 100) << "% live" << ", " << live_bytes << "/" << total_bytes << "]\n"; } if(percentage_live >= 0.90) { if(object_memory_->state()->shared.config.gc_immix_debug) { std::cerr << "[GC IMMIX: expanding. " << (int)(percentage_live * 100) << "%]\n"; } gc_.block_allocator().add_chunk(); } #ifdef IMMIX_DEBUG std::cout << "Immix: RS size cleared: " << cleared << "\n"; immix::Chunks& chunks = gc_.block_allocator().chunks(); std::cout << "chunks=" << chunks.size() << "\n"; immix::AllBlockIterator iter(chunks); int blocks_seen = 0; int total_objects = 0; int total_object_bytes = 0; while(immix::Block* block = iter.next()) { blocks_seen++; std::cout << "block " << block << ", holes=" << block->holes() << " " << "objects=" << block->objects() << " " << "object_bytes=" << block->object_bytes() << " " << "frag=" << block->fragmentation_ratio() << "\n"; total_objects += block->objects(); total_object_bytes += block->object_bytes(); } std::cout << blocks_seen << " blocks\n"; std::cout << gc_.bytes_allocated() << " bytes allocated\n"; std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n"; int* holes = new int[10]; for(int i = 0; i < 10; i++) { holes[i] = 0; } immix::AllBlockIterator iter2(chunks); while(immix::Block* block = iter2.next()) { int h = block->holes(); if(h > 9) h = 9; holes[h]++; } std::cout << "== hole stats ==\n"; for(int i = 0; i < 10; i++) { if(holes[i] > 0) { std::cout << i << ": " << holes[i] << "\n"; } } delete[] holes; holes = NULL; #endif }
bool reference_p() const { return REFERENCE_P(this); }
/** * Perform garbage collection on the young objects. */ void BakerGC::collect(GCData* data, YoungCollectStats* stats) { #ifdef HAVE_VALGRIND_H (void)VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size()); (void)VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size()); #endif mprotect(next->start(), next->size(), PROT_READ | PROT_WRITE); mprotect(current->start(), current->size(), PROT_READ | PROT_WRITE); check_growth_start(); ObjectArray *current_rs = object_memory_->swap_remember_set(); total_objects = 0; copy_spills_ = 0; reset_promoted(); // Start by copying objects in the remember set for(ObjectArray::iterator oi = current_rs->begin(); oi != current_rs->end(); ++oi) { Object* tmp = *oi; // unremember_object throws a NULL in to remove an object // so we don't have to compact the set in unremember if(tmp) { // Remove the Remember bit, since we're clearing the set. tmp->clear_remember(); scan_object(tmp); } } delete current_rs; scan_mark_set(); scan_mature_mark_stack(); for(Roots::Iterator i(data->roots()); i.more(); i.advance()) { i->set(saw_object(i->get())); } if(data->threads()) { for(std::list<ManagedThread*>::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { scan(*i, true); } } for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) { if(!i->in_use_p()) continue; if(!i->weak_p() && i->object()->young_object_p()) { i->set_object(saw_object(i->object())); // Users manipulate values accessible from the data* within an // RData without running a write barrier. Thusly if we see a mature // rdata, we must always scan it because it could contain // young pointers. } else if(!i->object()->young_object_p() && i->is_rdata()) { scan_object(i->object()); } } std::list<capi::GlobalHandle*>* gh = data->global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::GlobalHandle* global_handle = *i; capi::Handle** loc = global_handle->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p() && obj->young_object_p()) { hdl->set_object(saw_object(obj)); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this); #endif // Handle all promotions to non-young space that occurred. handle_promotions(); assert(fully_scanned_p()); // We're now done seeing the entire object graph of normal, live references. // Now we get to handle the unusual references, like finalizers and such. // Check any weakrefs and replace dead objects with nil // We need to do this before checking finalizers so people can't access // objects kept alive for finalization through weakrefs. clean_weakrefs(true); do { // Objects with finalizers must be kept alive until the finalizers have // run. walk_finalizers(); // Scan any fibers that aren't running but still active scan_fibers(data, false); handle_promotions(); } while(!promoted_stack_.empty() && !fully_scanned_p()); // Remove unreachable locked objects still in the list if(data->threads()) { for(std::list<ManagedThread*>::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { clean_locked_objects(*i, true); } } // Update the pending mark set to remove unreachable objects. update_mark_set(); // Update the existing mark stack of the mature gen because young // objects might have moved. update_mature_mark_stack(); // Update the weak ref set to remove unreachable weak refs. update_weak_refs_set(); // Swap the 2 halves Heap* x = next; next = current; current = x; if(stats) { stats->lifetime = lifetime_; stats->percentage_used = current->percentage_used(); stats->promoted_objects = promoted_objects_; stats->excess_objects = copy_spills_; } // Tune the age at which promotion occurs if(autotune_lifetime_) { double used = current->percentage_used(); if(used > cOverFullThreshold) { if(tune_threshold_ >= cOverFullTimes) { if(lifetime_ > cMinimumLifetime) lifetime_--; } else { tune_threshold_++; } } else if(used < cUnderFullThreshold) { if(tune_threshold_ <= cUnderFullTimes) { if(lifetime_ < cMaximumLifetime) lifetime_++; } else { tune_threshold_--; } } else if(tune_threshold_ > 0) { tune_threshold_--; } else if(tune_threshold_ < 0) { tune_threshold_++; } else if(tune_threshold_ == 0) { if(lifetime_ < original_lifetime_) { lifetime_++; } else if(lifetime_ > original_lifetime_) { lifetime_--; } } } }
static inline bool instance_of(const Object* obj) { if(REFERENCE_P(obj)) { return obj->type_id() == T::type; } return false; }