Tuple* Tuple::tuple_dup(STATE) { native_int fields = num_fields(); Tuple* tup = state->vm()->new_young_tuple_dirty(fields); if(likely(tup)) { for(native_int i = 0; i < fields; i++) { Object *obj = field[i]; // fields equals size so bounds checking is unecessary tup->field[i] = obj; // Because tup is promised to be a young object, // we can elide the write barrier usage. } return tup; } // Otherwise, use slower creation path that might create // a mature object. tup = create(state, fields); for(native_int i = 0; i < fields; i++) { Object *obj = field[i]; // fields equals size so bounds checking is unecessary tup->field[i] = obj; if(obj->reference_p()) tup->write_barrier(state, obj); } return tup; }
void GarbageCollector::saw_variable_scope(CallFrame* call_frame, StackVariables* scope) { scope->self_ = mark_object(scope->self()); scope->block_ = mark_object(scope->block()); scope->module_ = (Module*)mark_object(scope->module()); int locals = call_frame->cm->backend_method()->number_of_locals; for(int i = 0; i < locals; i++) { Object* local = scope->get_local(i); if(local->reference_p()) { scope->set_local(i, mark_object(local)); } } if(scope->last_match_ && scope->last_match_->reference_p()) { scope->last_match_ = mark_object(scope->last_match_); } VariableScope* parent = scope->parent(); if(parent) { scope->parent_ = (VariableScope*)mark_object(parent); } VariableScope* heap = scope->on_heap(); if(heap) { scope->on_heap_ = (VariableScope*)mark_object(heap); } }
void TaskProbe::execute_instruction(Task* task, MethodContext* ctx, opcode op) { if(enabled_p(PROBE_EXECUTE_INSTRUCTION)) { std::cout << std::left << std::setw(27) << ctx->cm()->name()->c_str(task->state) << "+" << std::right << std::setw(4) << ctx->ip << ": " << std::left << std::setw(30) << InstructionSequence::get_instruction_name(op) << " "; int stack_pos = ctx->js.stack - ctx->stk; std::cout << std::right << std::setw(4) << stack_pos; if(stack_pos >= 0) { std::cout << " " << std::right << std::setw(10) << (void*) *ctx->js.stack; } std::cout << std::endl; if(stack_pos >= 0) { Object* top = *ctx->js.stack; if(top->reference_p()) { ObjectPosition pos = task->state->om->validate_object(top); assert(pos != cInWrongYoungHalf); assert(pos != cUnknown); } } } }
// HACK todo test this! void MarkSweepGC::clean_weakrefs() { if(!weak_refs) return; for(ObjectArray::iterator i = weak_refs->begin(); i != weak_refs->end(); i++) { // ATM, only a Tuple can be marked weak. Tuple* tup = as<Tuple>(*i); for(size_t ti = 0; ti < tup->num_fields(); ti++) { Object* obj = tup->at(object_memory->state, ti); if(!obj->reference_p()) continue; if(obj->young_object_p()) { if(!obj->marked_p()) { tup->field[ti] = Qnil; } } else { Entry *entry = find_entry(obj); if(!entry->marked_p()) { tup->field[ti] = Qnil; } } } } delete weak_refs; weak_refs = NULL; }
void MarkSweepGC::collect(Roots &roots, CallFrameLocationList& call_frames) { Object* tmp; Root* root = static_cast<Root*>(roots.head()); while(root) { tmp = root->get(); if(tmp->reference_p()) { saw_object(tmp); } root = static_cast<Root*>(root->next()); } // Walk all the call frames for(CallFrameLocationList::const_iterator i = call_frames.begin(); i != call_frames.end(); ++i) { CallFrame** loc = *i; walk_call_frame(*loc); } while(!mark_stack_.empty()) { tmp = mark_stack_.back(); mark_stack_.pop_back(); scan_object(tmp); } after_marked(); }
void GarbageCollector::clean_weakrefs(bool check_forwards) { if(!weak_refs_) return; for(ObjectArray::iterator i = weak_refs_->begin(); i != weak_refs_->end(); ++i) { if(!*i) continue; // Object was removed during young gc. WeakRef* ref = try_as<WeakRef>(*i); if(!ref) continue; // Other type for some reason? Object* obj = ref->object(); if(!obj->reference_p()) continue; if(check_forwards) { if(obj->young_object_p()) { if(!obj->forwarded_p()) { ref->set_object(object_memory_, cNil); } else { ref->set_object(object_memory_, obj->forward()); } } } else if(!obj->marked_p(object_memory_->mark())) { ref->set_object(object_memory_, cNil); } } delete weak_refs_; weak_refs_ = NULL; }
void GarbageCollector::clean_weakrefs(bool check_forwards) { if(!weak_refs_) return; for(ObjectArray::iterator i = weak_refs_->begin(); i != weak_refs_->end(); i++) { WeakRef* ref = try_as<WeakRef>(*i); if(!ref) continue; // WTF. Object* obj = ref->object(); if(!obj->reference_p()) continue; if(check_forwards) { if(obj->young_object_p()) { if(!obj->forwarded_p()) { ref->set_object(object_memory_, Qnil); } else { ref->set_object(object_memory_, obj->forward()); } } } else if(!obj->marked_p(object_memory_->mark())) { ref->set_object(object_memory_, Qnil); } } delete weak_refs_; weak_refs_ = NULL; }
void GarbageCollector::visit_call_frame(CallFrame* top_call_frame, ObjectVisitor& visit) { CallFrame* call_frame = top_call_frame; while(call_frame) { if(call_frame->custom_static_scope_p() && call_frame->static_scope_ && call_frame->static_scope_->reference_p()) { call_frame->static_scope_ = (StaticScope*)visit.call(call_frame->static_scope_); } if(call_frame->cm && call_frame->cm->reference_p()) { call_frame->cm = (CompiledMethod*)visit.call(call_frame->cm); } if(call_frame->cm && call_frame->stk) { native_int stack_size = call_frame->cm->stack_size()->to_native(); for(native_int i = 0; i < stack_size; i++) { Object* obj = call_frame->stk[i]; if(obj && obj->reference_p()) { call_frame->stk[i] = visit.call(obj); } } } if(call_frame->multiple_scopes_p() && call_frame->top_scope_) { call_frame->top_scope_ = (VariableScope*)visit.call(call_frame->top_scope_); } visit_variable_scope(call_frame, call_frame->scope, visit); call_frame = static_cast<CallFrame*>(call_frame->previous); } }
void GarbageCollector::visit_variable_scope(CallFrame* call_frame, StackVariables* scope, ObjectVisitor& visit) { scope->self_ = visit.call(scope->self()); scope->block_ = visit.call(scope->block()); scope->module_ = (Module*)visit.call(scope->module()); int locals = call_frame->cm->backend_method()->number_of_locals; for(int i = 0; i < locals; i++) { Object* local = scope->get_local(i); if(local->reference_p()) { scope->set_local(i, visit.call(local)); } } VariableScope* parent = scope->parent(); if(parent && parent->reference_p()) { scope->parent_ = ((VariableScope*)visit.call(parent)); } VariableScope* on_heap = scope->on_heap(); if(on_heap) { scope->on_heap_ = ((VariableScope*)visit.call(on_heap)); } }
inline void Object::write_barrier(STATE, void* ptr) { Object* obj = reinterpret_cast<Object*>(ptr); if(!obj->reference_p() || state->vm()->young_object_p(this) || !state->vm()->young_object_p(obj)) return; inline_write_barrier_passed(state, ptr); }
inline void Object::write_barrier(VM* vm, void* ptr) { Object* obj = reinterpret_cast<Object*>(ptr); if(!obj->reference_p() || this->young_object_p() || !obj->young_object_p()) return; inline_write_barrier_passed(vm, ptr); }
void ImmixGC::collect_scan(GCData* data) { for(Roots::Iterator i(data->roots()); i.more(); i.advance()) { Object* tmp = i->get(); if(tmp->reference_p()) saw_object(tmp); } if(data->threads()) { for(std::list<ManagedThread*>::iterator i = data->threads()->begin(); i != data->threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { saw_object(i->object()); } } std::list<capi::GlobalHandle*>* gh = data->global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { saw_object(obj); } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } #ifdef ENABLE_LLVM if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this); #endif }
rb_encoding* rb_enc_get(VALUE obj) { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); Object* val = env->get_object(obj); if(!val->reference_p() && !val->symbol_p()) return 0; Encoding* enc = Encoding::get_object_encoding(env->state(), val); if(enc->nil_p()) return 0; return enc->get_encoding(); }
inline bool check_frozen(STATE, CallFrame* call_frame) { Object* value = stack_top(); if(value->reference_p() && value->frozen_p()) { Exception::frozen_error(state, value); return false; } return true; }
void GarbageCollector::visit_roots(Roots& roots, ObjectVisitor& visit) { Root* root = static_cast<Root*>(roots.head()); while(root) { Object* tmp = root->get(); if(tmp->reference_p()) { visit.call(tmp); } root = static_cast<Root*>(root->next()); } }
int rb_enc_get_index(VALUE obj) { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); Object* val = env->get_object(obj); if(!val->reference_p() && !val->symbol_p()) return -1; Encoding* enc = Encoding::get_object_encoding(env->state(), val); if(enc->nil_p()) return 0; return Encoding::find_index(env->state(), enc->name()->c_str(env->state())); }
void ImmixGC::collect_scan(GCData* data) { for(Roots::Iterator i(data->roots()); i.more(); i.advance()) { if(Object* fwd = saw_object(i->get())) { i->set(fwd); } } { utilities::thread::SpinLock::LockGuard guard(data->thread_nexus()->threads_lock()); for(ThreadList::iterator i = data->thread_nexus()->threads()->begin(); i != data->thread_nexus()->threads()->end(); ++i) { scan(*i, false); } } for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) { if(i->in_use_p() && !i->weak_p()) { if(Object* fwd = saw_object(i->object())) { i->set_object(fwd); } } } std::list<capi::GlobalHandle*>* gh = data->global_handle_locations(); if(gh) { for(std::list<capi::GlobalHandle*>::iterator i = gh->begin(); i != gh->end(); ++i) { capi::Handle** loc = (*i)->handle(); if(capi::Handle* hdl = *loc) { if(!REFERENCE_P(hdl)) continue; if(hdl->valid_p()) { Object* obj = hdl->object(); if(obj && obj->reference_p()) { if(Object* fwd = saw_object(obj)) { hdl->set_object(fwd); } } } else { std::cerr << "Detected bad handle checking global capi handles\n"; } } } } /* TODO: JIT if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this); */ }
virtual Object* immediate() { native_int id = id_->to_native(); if(id & TAG_REF_MASK) { Object* obj = reinterpret_cast<Object*>(id); // Be sure to not leak a bad reference leak out here. if(obj->reference_p()) return cNil; return obj; } return 0; }
void HeapDebug::walk(Roots &roots) { Object* tmp; Root* root = static_cast<Root*>(roots.head()); while(root) { tmp = root->get(); if(tmp->reference_p()) { saw_object(tmp); } root = static_cast<Root*>(root->next()); } }
void rb_gc_mark(VALUE ptr) { NativeMethodEnvironment* env = NativeMethodEnvironment::get(); Object* object = env->get_object(ptr); if(object->reference_p()) { Object* res = VM::current_state()->current_mark.call(object); if(res) { env->handles()[ptr]->set(res); } } }
void GarbageCollector::scan(RootBuffers& buffers, bool young_only) { for(RootBuffers::Iterator i(buffers); i.more(); i.advance()) { Object** buffer = i->buffer(); for(int idx = 0; idx < i->size(); idx++) { Object* tmp = buffer[idx]; if(tmp->reference_p() && (!young_only || tmp->young_object_p())) { buffer[idx] = saw_object(tmp); } } } }
void GarbageCollector::scan(VariableRootBuffers& buffers, bool young_only) { for(VariableRootBuffers::Iterator vi(buffers); vi.more(); vi.advance()) { Object*** buffer = vi->buffer(); for(int idx = 0; idx < vi->size(); idx++) { Object** var = buffer[idx]; Object* tmp = *var; if(tmp->reference_p() && (!young_only || tmp->young_object_p())) { *var = saw_object(tmp); } } } }
Fixnum* System::vm_memory_size(STATE, Object* obj) { if(obj->reference_p()) { size_t bytes = obj->size_in_bytes(state); Object* iv = obj->ivars(); if(LookupTable* lt = try_as<LookupTable>(iv)) { bytes += iv->size_in_bytes(state); bytes += lt->values()->size_in_bytes(state); bytes += (lt->entries()->to_native() * sizeof(LookupTableBucket)); } else if(iv->reference_p()) { bytes += iv->size_in_bytes(state); } return Fixnum::from(bytes); } return Fixnum::from(0); }
Tuple* Tuple::copy_from(STATE, Tuple* other, Fixnum* start, Fixnum *length, Fixnum* dest) { size_t osize = other->num_fields(); size_t size = this->num_fields(); int olend = start->to_native(); int lend = dest->to_native(); int olength = length->to_native(); // left end should be within range if(olend < 0 || (size_t)olend > osize) { Exception::object_bounds_exceeded_error(state, other, olend); } if(lend < 0 || (size_t)lend > size) { Exception::object_bounds_exceeded_error(state, this, lend); } // length can not be negative and must fit in src/dest if(olength < 0) { Exception::object_bounds_exceeded_error(state, "length must be positive"); } if((size_t)(olend + olength) > osize) { Exception::object_bounds_exceeded_error(state, "length should not exceed size of source"); } if((size_t)olength > (size - lend)) { Exception::object_bounds_exceeded_error(state, "length should not exceed space in destination"); } for(size_t src = olend, dst = lend; src < (size_t)(olend + olength); ++src, ++dst) { // Since we have carefully checked the bounds we don't need to do it in at/put Object *obj = other->field[src]; this->field[dst] = obj; // but this is necessary to keep the GC happy if(obj->reference_p()) write_barrier(state, obj); } return this; }
void MarkSweepGC::collect(Roots &roots) { Object* tmp; Root* root = static_cast<Root*>(roots.head()); while(root) { tmp = root->get(); if(tmp->reference_p()) { saw_object(tmp); } root = static_cast<Root*>(root->next()); } // Cleanup all weakrefs seen clean_weakrefs(); // Sweep up the garbage sweep_objects(); }
void GarbageCollector::scan(VariableRootBuffers& buffers, bool young_only, AddressDisplacement* offset) { VariableRootBuffer* vrb = displace(buffers.front(), offset); while(vrb) { Object*** buffer = displace(vrb->buffer(), offset); for(int idx = 0; idx < vrb->size(); idx++) { Object** var = displace(buffer[idx], offset); Object* tmp = *var; if(tmp && tmp->reference_p() && (!young_only || tmp->young_object_p())) { *var = saw_object(tmp); } } vrb = displace((VariableRootBuffer*)vrb->next(), offset); } }
/** * Scans the specified Object +obj+ for references to other Objects, and * marks those Objects as reachable. Understands how to read the inside of * an Object and find all references located within. For each reference * found, it marks the object pointed to as live (which may trigger * movement of the object in a copying garbage collector), but does not * recursively scan into the referenced object (since such recursion could * be arbitrarily deep, depending on the object graph, and this could cause * the stack to blow up). * /param obj The Object to be scanned for references to other Objects. */ void GarbageCollector::scan_object(Object* obj) { Object* slot; #ifdef ENABLE_OBJECT_WATCH if(watched_p(obj)) { std::cout << "detected " << obj << " during scan_object.\n"; } #endif // Check and update an inflated header if(obj->inflated_header_p()) { obj->inflated_header()->reset_object(obj); } slot = saw_object(obj->klass()); if(slot) obj->klass(object_memory_, force_as<Class>(slot)); if(obj->ivars()->reference_p()) { slot = saw_object(obj->ivars()); if(slot) obj->ivars(object_memory_, slot); } // Handle Tuple directly, because it's so common if(Tuple* tup = try_as<Tuple>(obj)) { int size = tup->num_fields(); for(int i = 0; i < size; i++) { slot = tup->field[i]; if(slot->reference_p()) { slot = saw_object(slot); if(slot) { tup->field[i] = slot; object_memory_->write_barrier(tup, slot); } } } } else { TypeInfo* ti = object_memory_->type_info[obj->type_id()]; ObjectMark mark(this); ti->mark(obj, mark); } }
/** * Scans the specified Object +obj+ for references to other Objects, and * marks those Objects as reachable. Understands how to read the inside of * an Object and find all references located within. For each reference * found, it marks the object pointed to as live (which may trigger * movement of the object in a copying garbage collector), but does not * recursively scan into the referenced object (since such recursion could * be arbitrarily deep, depending on the object graph, and this could cause * the stack to blow up). * /param obj The Object to be scanned for references to other Objects. */ void GarbageCollector::scan_object(Object* obj) { #ifdef ENABLE_OBJECT_WATCH if(watched_p(obj)) { std::cout << "detected " << obj << " during scan_object.\n"; } #endif // We set scanned here before we finish scanning the object. // This is done so we don't have a race condition while we're // scanning the object and another thread updates a field during // the phase where the object is partially scanned. scanned_object(obj); if(Object* klass = saw_object(obj->klass())) { obj->klass(object_memory_, force_as<Class>(klass)); } if(obj->ivars()->reference_p()) { if(Object* ivars = saw_object(obj->ivars())) { obj->ivars(object_memory_, ivars); } } // Handle Tuple directly, because it's so common if(Tuple* tup = try_as<Tuple>(obj)) { native_int size = tup->num_fields(); for(native_int i = 0; i < size; i++) { Object* slot = tup->field[i]; if(slot->reference_p()) { if(Object* moved = saw_object(slot)) { tup->field[i] = moved; object_memory_->write_barrier(tup, moved); } } } } else { TypeInfo* ti = object_memory_->type_info[obj->type_id()]; ObjectMark mark(this); ti->mark(obj, mark); } }
/* For each type, there is an automatically generated version * of this function (called via virtual dispatch) that marks * all slots. */ void TypeInfo::auto_mark(Object* obj, ObjectMark& mark) { // HACK: should not inspect an object that stores bytes // for references. Evan said auto_mark is slated for // destruction also. if(obj->stores_bytes_p()) return; // HACK copied from Tuple; Object* tmp; Tuple* tup = static_cast<Tuple*>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { tmp = tup->field[i]; if(tmp->reference_p()) { tmp = mark.call(tmp); if(tmp) { tup->field[i] = tmp; mark.just_set(obj, tmp); } } } }
void BakerGC::walk_finalizers() { FinalizerHandler* fh = object_memory_->finalizer_handler(); if(!fh) return; for(FinalizerHandler::iterator i = fh->begin(); !i.end(); /* advance is handled in the loop */) { FinalizeObject& fi = i.current(); bool live = true; if(fi.object->young_object_p()) { live = fi.object->forwarded_p(); if(Object* fwd = saw_object(fi.object)) { fi.object = fwd; } } else { // If this object is mature, scan it. This // means that any young objects it refers to are properly // GC'ed and kept alive if necessary scan_object(fi.object); } Object* fin = fi.ruby_finalizer; if(fin && fin->reference_p()) { if(fin->young_object_p()) { if(Object* fwd = saw_object(fin)) { fi.ruby_finalizer = fwd; } } else { // If this object is mature, scan it. This // means that any young objects it refers to are properly // GC'ed and kept alive if necessary scan_object(fin); } } i.next(live); } }