void RuntimeDataHolder::mark_all(Object* obj, ObjectMark& mark) { Object* tmp; for(std::list<jit::RuntimeData*>::iterator i = runtime_data_.begin(); i != runtime_data_.end(); ++i) { jit::RuntimeData* rd = *i; tmp = mark.call(rd->method()); if(tmp) { rd->method_ = (CompiledMethod*)tmp; if(obj) mark.just_set(obj, tmp); } tmp = mark.call(rd->name()); if(tmp) { rd->name_ = (Symbol*)tmp; if(obj) mark.just_set(obj, tmp); } tmp = mark.call(rd->module()); if(tmp) { rd->module_ = (Module*)tmp; if(obj) mark.just_set(obj, tmp); } } }
void SendSite::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); SendSite* ss = as<SendSite>(obj); if(ss->inner_cache_) { Object* tmp; SendSite::Internal* cache = ss->inner_cache_; if(cache->module) { tmp = mark.call(cache->module); if(tmp) { cache->module = (Module*)tmp; mark.just_set(obj, tmp); } } if(cache->method) { tmp = mark.call(cache->method); if(tmp) { cache->method = (Executable*)tmp; mark.just_set(obj, tmp); } } if(cache->recv_class) { tmp = mark.call(cache->recv_class); if(tmp) { cache->recv_class = (Module*)tmp; mark.just_set(obj, tmp); } } } }
void RuntimeDataHolder::mark_all(Object* obj, ObjectMark& mark) { for(std::vector<jit::RuntimeData*>::iterator i = runtime_data_.begin(); i != runtime_data_.end(); ++i) { jit::RuntimeData* rd = *i; if(rd->method()) { if(Object* tmp = mark.call(rd->method())) { rd->method_ = force_as<CompiledCode>(tmp); if(obj) mark.just_set(obj, tmp); } } if(rd->name()) { if(Object* tmp = mark.call(rd->name())) { rd->name_ = force_as<Symbol>(tmp); if(obj) mark.just_set(obj, tmp); } } if(rd->module()) { if(Object* tmp = mark.call(rd->module())) { rd->module_ = force_as<Module>(tmp); if(obj) mark.just_set(obj, tmp); } } } }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { if(vmm->specializations[i].jit_data) { vmm->specializations[i].jit_data->set_mark(); vmm->specializations[i].jit_data->mark_all(cm, mark); } } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; MethodCacheEntry* mce = cache->cache_; if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_ = (MethodCacheEntry*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } }
void Encoding::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Encoding* enc_o = force_as<Encoding>(obj); if(!enc_o->get_managed()) return; OnigEncodingType* enc = enc_o->get_encoding(); if(!enc) return; ByteArray* enc_ba = ByteArray::from_body(enc); if(ByteArray* tmp = force_as<ByteArray>(mark.call(enc_ba))) { enc_o->set_encoding(reinterpret_cast<OnigEncodingType*>(tmp->raw_bytes())); mark.just_set(obj, tmp); enc = enc_o->get_encoding(); } if(enc->name) { ByteArray* ba = ByteArray::from_body(const_cast<char*>(enc->name)); if(ByteArray* tmp = force_as<ByteArray>(mark.call(ba))) { enc->name = reinterpret_cast<const char*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->inline_cache_count(); i++) { InlineCache* cache = &mcode->caches[i]; for(int j = 0; j < cTrackedICHits; ++j) { MethodCacheEntry* mce = cache->cache_[j].entry(); if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_[j].assign(static_cast<MethodCacheEntry*>(tmp)); mark.just_set(obj, tmp); } } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = static_cast<CallUnit*>(tmp); mark.just_set(obj, tmp); } } } }
void VariableScope::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); vs->fixup(); if(!vs->isolated()) { Object** ary = vs->stack_locals(); if(Fiber* fib = try_as<Fiber>(vs->fiber())) { FiberData* data = fib->data(); AddressDisplacement dis(data->data_offset(), data->data_lower_bound(), data->data_upper_bound()); ary = dis.displace(ary); } size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { Object* tmp = mark.call(ary[i]); if(tmp) { ary[i] = tmp; } } } }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->call_site_count(); i++) { size_t index = mcode->call_site_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } for(size_t i = 0; i < mcode->constant_cache_count(); i++) { size_t index = mcode->constant_cache_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } }
void Tuple::Info::mark(Object* obj, ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { Object* tmp = mark.call(tup->field[i]); if(tmp && tmp != tup->field[i]) mark.set(obj, &tup->field[i], tmp); } }
void Tuple::Info::mark(Object* obj, ObjectMark& mark) { Object* tmp; Tuple* tup = as<Tuple>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { tmp = mark.call(tup->field[i]); if(tmp) mark.set(obj, &tup->field[i], tmp); } }
void RuntimeDataHolder::mark_all(Object* obj, ObjectMark& mark) { Object* tmp; for(std::list<jit::RuntimeData*>::iterator i = runtime_data_.begin(); i != runtime_data_.end(); ++i) { jit::RuntimeData* rd = *i; if(rd->method()) { tmp = mark.call(rd->method()); if(tmp) { rd->method_ = (CompiledMethod*)tmp; if(obj) mark.just_set(obj, tmp); } } if(rd->name()) { tmp = mark.call(rd->name()); if(tmp) { rd->name_ = (Symbol*)tmp; if(obj) mark.just_set(obj, tmp); } } if(rd->module()) { tmp = mark.call(rd->module()); if(tmp) { rd->module_ = (Module*)tmp; if(obj) mark.just_set(obj, tmp); } } GCLiteral* lit = rd->literals(); while(lit) { tmp = mark.call(lit->object()); if(tmp) { lit->set_object(tmp); if(obj) mark.just_set(obj, tmp); } lit = lit->next(); } } }
void PackedObject::Info::mark(Object* obj, ObjectMark& mark) { PackedObject* po = reinterpret_cast<PackedObject*>(obj); size_t fields = to_fields(object_size(obj)); Object** body = po->body_as_array(); for(size_t i = 0; i < fields; i++) { if(Object* tmp = mark.call(body[i])) { mark.set(obj, &body[i], tmp); } } }
void NativeMethodContext::mark_handles(ObjectMark& mark) { for (HandleStorage::iterator it = handles_->begin(); it != handles_->end(); ++it) { Object* marked = mark.call(*it); if (marked) { *it = marked; mark.just_set(this, marked); } } HandleStorage& globals = NativeMethodContext::global_handles(); for (HandleStorage::iterator it = globals.begin(); it != globals.end(); ++it) { Object* marked = mark.call(*it); if (marked) { *it = marked; mark.just_set(this, marked); } } }
void VariableScope::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); vs->fixup(); Object* tmp; size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { tmp = mark.call(vs->get_local(i)); if(tmp) vs->set_local(mark.state(), i, tmp); } }
void VariableScope::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); vs->fixup(); if(!vs->isolated()) { Object** ary = vs->stack_locals(); size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { Object* tmp = mark.call(ary[i]); if(tmp) { ary[i] = tmp; } } } }
void Executable::Info::mark_inliners(Object* obj, ObjectMark& mark) { Executable* exc = static_cast<Executable*>(obj); if(!exc->inliners_ || exc->inliners_ == (Inliners*)cNil) return; Inliners* inl = exc->inliners_; inl->set_mark(); // std::cout << "Marking inliners: " << inl->inliners().size() << "\n"; for(std::vector<CompiledCode*>::iterator i = inl->inliners().begin(); i != inl->inliners().end(); ++i) { CompiledCode* code = *i; if(Object* tmp = mark.call(code)) { *i = static_cast<CompiledCode*>(tmp); mark.just_set(obj, tmp); } } }
/* For each type, there is an automatically generated version * of this function (called via virtual dispatch) that marks * all slots. */ void TypeInfo::auto_mark(Object* obj, ObjectMark& mark) { // HACK: should not inspect an object that stores bytes // for references. Evan said auto_mark is slated for // destruction also. if(obj->stores_bytes_p()) return; // HACK copied from Tuple; Object* tmp; Tuple* tup = static_cast<Tuple*>(obj); for(size_t i = 0; i < tup->num_fields(); i++) { tmp = tup->field[i]; if(tmp->reference_p()) { tmp = mark.call(tmp); if(tmp) { tup->field[i] = tmp; mark.just_set(obj, tmp); } } } }
void Executable::Info::mark_inliners(Object* obj, ObjectMark& mark) { Executable* exc = (Executable*)obj; if(!exc->inliners_ || exc->inliners_ == (Inliners*)cNil) return; Inliners* inl = exc->inliners_; inl->set_mark(); // std::cout << "Marking inliners: " << inl->inliners().size() << "\n"; for(std::list<CompiledMethod*>::iterator i = inl->inliners().begin(); i != inl->inliners().end(); ++i) { CompiledMethod* cm = *i; Object* tmp = mark.call(cm); if(tmp) { assert(kind_of<CompiledMethod>(tmp)); *i = (CompiledMethod*)tmp; mark.just_set(obj, tmp); } } }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; if(cache->module) { tmp = mark.call(cache->module); if(tmp) { cache->module = (Module*)tmp; mark.just_set(obj, tmp); } } if(cache->method) { tmp = mark.call(cache->method); if(tmp) { cache->method = (Executable*)tmp; mark.just_set(obj, tmp); } } if(cache->klass_) { tmp = mark.call(cache->klass_); if(tmp) { cache->klass_ = (Class*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } for(IndirectLiterals::iterator i = vmm->indirect_literals().begin(); i != vmm->indirect_literals().end(); ++i) { Object** ptr = (*i); if((tmp = mark.call(*ptr)) != NULL) { *ptr = tmp; mark.just_set(obj, tmp); } } }
void Regexp::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Regexp* reg_o = force_as<Regexp>(obj); regex_t* reg = reg_o->onig_data; if(!reg) return; ByteArray* reg_ba = ByteArray::from_body(reg); if(ByteArray* reg_tmp = force_as<ByteArray>(mark.call(reg_ba))) { reg_o->onig_data = reinterpret_cast<regex_t*>(reg_tmp->raw_bytes()); mark.just_set(obj, reg_tmp); reg_ba = reg_tmp; reg = reg_o->onig_data; } if(reg->p) { ByteArray* ba = ByteArray::from_body(reg->p); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->p = reinterpret_cast<unsigned char*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->exact) { int exact_size = reg->exact_end - reg->exact; ByteArray* ba = ByteArray::from_body(reg->exact); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->exact = reinterpret_cast<unsigned char*>(tmp->raw_bytes()); reg->exact_end = reg->exact + exact_size; mark.just_set(obj, tmp); } } if(reg->int_map) { ByteArray* ba = ByteArray::from_body(reg->int_map); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->int_map = reinterpret_cast<int*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->int_map_backward) { ByteArray* ba = ByteArray::from_body(reg->int_map_backward); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->int_map_backward = reinterpret_cast<int*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->repeat_range) { ByteArray* ba = ByteArray::from_body(reg->repeat_range); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->repeat_range = reinterpret_cast<OnigRepeatRange*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } }