void Encoding::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Encoding* enc_o = force_as<Encoding>(obj); if(!enc_o->get_managed()) return; OnigEncodingType* enc = enc_o->get_encoding(); if(!enc) return; ByteArray* enc_ba = ByteArray::from_body(enc); if(ByteArray* tmp = force_as<ByteArray>(mark.call(enc_ba))) { enc_o->set_encoding(reinterpret_cast<OnigEncodingType*>(tmp->raw_bytes())); mark.just_set(obj, tmp); enc = enc_o->get_encoding(); } if(enc->name) { ByteArray* ba = ByteArray::from_body(const_cast<char*>(enc->name)); if(ByteArray* tmp = force_as<ByteArray>(mark.call(ba))) { enc->name = reinterpret_cast<const char*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } }
void Data::Info::mark(Object* t, memory::ObjectMark& mark) { auto_mark(t, mark); Data* data = force_as<Data>(t); if(mark.mature_gc_in_progress()) { // Don't scan objects concurrently since this might // not be thread safe. The C library in use here // might be in the process of freeing up malloc'ed // resources so we would see objects in an invalid // state and scan wrong pointers etc. return; } if(data->freed_p()) { logger::error("finalizer: Data mark called for already freed object"); return; } Data::MarkFunctor marker = data->mark(); if(marker) { memory::ObjectMark* cur = capi::current_mark(); capi::set_current_mark(&mark); (*marker)(data->data()); capi::set_current_mark(cur); } }
void CompiledCode::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code()) return; MachineCode* mcode = code->machine_code(); mcode->set_mark(); for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { // TODO: JIT } for(size_t i = 0; i < mcode->references_count(); i++) { if(size_t ip = mcode->references()[i]) { Object* ref = reinterpret_cast<Object*>(mcode->opcodes[ip]); if(Object* updated_ref = mark.call(ref)) { mcode->opcodes[ip] = reinterpret_cast<intptr_t>(updated_ref); mark.just_set(code, updated_ref); } } } }
void SendSite::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); SendSite* ss = as<SendSite>(obj); if(ss->inner_cache_) { Object* tmp; SendSite::Internal* cache = ss->inner_cache_; if(cache->module) { tmp = mark.call(cache->module); if(tmp) { cache->module = (Module*)tmp; mark.just_set(obj, tmp); } } if(cache->method) { tmp = mark.call(cache->method); if(tmp) { cache->method = (Executable*)tmp; mark.just_set(obj, tmp); } } if(cache->recv_class) { tmp = mark.call(cache->recv_class); if(tmp) { cache->recv_class = (Module*)tmp; mark.just_set(obj, tmp); } } } }
void VariableScope::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); vs->fixup(); if(!vs->isolated()) { Object** ary = vs->stack_locals(); if(Fiber* fib = try_as<Fiber>(vs->fiber())) { FiberData* data = fib->data(); AddressDisplacement dis(data->data_offset(), data->data_lower_bound(), data->data_upper_bound()); ary = dis.displace(ary); } size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { Object* tmp = mark.call(ary[i]); if(tmp) { ary[i] = tmp; } } } }
void Fiber::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); Fiber* fib = force_as<Fiber>(obj); FiberData* data = fib->data(); if(!data || data->dead_p()) return; data->set_mark(); }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { if(vmm->specializations[i].jit_data) { vmm->specializations[i].jit_data->set_mark(); vmm->specializations[i].jit_data->mark_all(cm, mark); } } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; MethodCacheEntry* mce = cache->cache_; if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_ = (MethodCacheEntry*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } }
void Fiber::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Fiber* fib = (Fiber*)obj; if(CallFrame* cf = fib->call_frame()) { mark.gc->walk_call_frame(cf); } }
void Data::Info::mark(Object* t, ObjectMark& mark) { auto_mark(t, mark); Data* data = as<Data>(t); if(data->mark()) { ObjectMark* cur = VM::current_state()->current_mark; VM::current_state()->current_mark = &mark; (*data->mark())(data->data()); VM::current_state()->current_mark = cur; } }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->inline_cache_count(); i++) { InlineCache* cache = &mcode->caches[i]; for(int j = 0; j < cTrackedICHits; ++j) { MethodCacheEntry* mce = cache->cache_[j].entry(); if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_[j].assign(static_cast<MethodCacheEntry*>(tmp)); mark.just_set(obj, tmp); } } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = static_cast<CallUnit*>(tmp); mark.just_set(obj, tmp); } } } }
void PolyInlineCache::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); PolyInlineCache* cache = static_cast<PolyInlineCache*>(obj); for(int i = 0; i < cTrackedICHits; ++i) { InlineCacheEntry* ice = cache->entries_[i]; if(ice) { if(InlineCacheEntry* updated = static_cast<InlineCacheEntry*>(mark.call(ice))) { cache->entries_[i] = updated; mark.just_set(cache, updated); } } } }
void VariableScope::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); vs->fixup(); Object* tmp; size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { tmp = mark.call(vs->get_local(i)); if(tmp) vs->set_local(mark.state(), i, tmp); } }
void Data::Info::mark(Object* t, ObjectMark& mark) { auto_mark(t, mark); Data* data = force_as<Data>(t); RDataShadow* rdata = data->rdata(); if(rdata->dmark) { ObjectMark* cur = capi::current_mark(); capi::set_current_mark(&mark); (*rdata->dmark)(rdata->data); capi::set_current_mark(cur); } }
void Data::Info::mark(Object* t, ObjectMark& mark) { auto_mark(t, mark); STATE = mark.gc->state(); Data* data = as<Data>(t); if(data->mark(state)) { ObjectMark* cur = capi::current_mark(); capi::set_current_mark(&mark); (*data->mark(state))(data->data(state)); capi::set_current_mark(cur); } }
void VariableScope::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); if(!vs->isolated_p()) { Object** ary = vs->locals(); size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { if(Object* tmp = mark.call(ary[i])) { ary[i] = tmp; } } } }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->call_site_count(); i++) { size_t index = mcode->call_site_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } for(size_t i = 0; i < mcode->constant_cache_count(); i++) { size_t index = mcode->constant_cache_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } }
void Module::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Array* subclasses = as<Module>(obj)->hierarchy_subclasses_; if(subclasses->nil_p()) return; native_int offset = subclasses->offset(); native_int size = subclasses->size(); Tuple* tup = subclasses->tuple(); for(native_int i = offset; i < size + offset; ++i) { if(WeakRef* ref = try_as<WeakRef>(tup->field[i])) { if(!ref->alive_p()) { tup->field[i] = cNil; } } } subclasses->set_size(size - tup->delete_inplace(offset, size, cNil)); }
void Data::Info::mark(Object* t, ObjectMark& mark) { auto_mark(t, mark); Data* data = force_as<Data>(t); if(data->freed_p()) { // TODO: Fix the issue of finalizer ordering. // std::cerr << "Data::Info::mark called for already freed object" << std::endl; return; } RDataShadow* rdata = data->rdata(); if(rdata->dmark) { ObjectMark* cur = capi::current_mark(); capi::set_current_mark(&mark); (*rdata->dmark)(rdata->data); capi::set_current_mark(cur); } }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; if(cache->module) { tmp = mark.call(cache->module); if(tmp) { cache->module = (Module*)tmp; mark.just_set(obj, tmp); } } if(cache->method) { tmp = mark.call(cache->method); if(tmp) { cache->method = (Executable*)tmp; mark.just_set(obj, tmp); } } if(cache->klass_) { tmp = mark.call(cache->klass_); if(tmp) { cache->klass_ = (Class*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } for(IndirectLiterals::iterator i = vmm->indirect_literals().begin(); i != vmm->indirect_literals().end(); ++i) { Object** ptr = (*i); if((tmp = mark.call(*ptr)) != NULL) { *ptr = tmp; mark.just_set(obj, tmp); } } }
void NativeMethodContext::Info::mark(Object* self, ObjectMark& mark) { auto_mark(self, mark); as<NativeMethodContext>(self)->mark_handles(mark); }
/* By default, just call auto_mark(). This exists so that * other types can overload this to perform work before or * after auto_marking is done. */ void TypeInfo::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); }
void RespondToCache::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); }
void MonoInlineCache::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); }
void Executable::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); }
void CallCustomCache::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); }
void CallSite::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); CallSite* call_site = as<CallSite>(obj); // TODO: pass State into GC! VM* vm = VM::current(); if(Cache* cache = call_site->cache()) { // Disable if call site is unstable for caching if(cache->evictions() > max_evictions) { call_site->set_cache(nullptr); call_site->executor(CallSite::dispatch); call_site->delete_cache(cache); vm->metrics()->inline_cache_disabled++; } else { // Campact and possibly reset cache Cache* new_cache = cache->compact(); if(new_cache != cache) { call_site->set_cache(new_cache); call_site->delete_cache(cache); if(new_cache) { vm->metrics()->inline_cache_count++; } else { call_site->executor(CallSite::dispatch_once); } } } } // Do not merge conditionals, we may have set _cache_ to nullptr above. if(Cache* cache = call_site->cache()) { // Re-order more used cache entries to the front cache->reorder(); // Mark caches. for(int32_t i = 0; i < cache->size(); i++) { Cache::Entry* entry = cache->entries(i); if(Object* ref = mark.call(entry->receiver_class())) { entry->receiver_class(as<Class>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->prediction())) { entry->prediction(as<Prediction>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->module())) { entry->module(as<Module>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->executable())) { entry->executable(as<Executable>(ref)); mark.just_set(call_site, ref); } if(vm->shared.profiler()->collecting_p()) { if(CompiledCode* code = try_as<CompiledCode>(entry->executable())) { if(code->machine_code()->sample_count > vm->shared.profiler()->sample_min()) { vm->shared.profiler()->add_entry(call_site->serial(), call_site->ip(), code->machine_code()->serial(), entry->hits(), entry->receiver_class()->name(), entry->module()->name()); } } } } } // Clear dead list call_site->clear_dead_list(); }
void Regexp::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); Regexp* reg_o = force_as<Regexp>(obj); regex_t* reg = reg_o->onig_data; if(!reg) return; ByteArray* reg_ba = ByteArray::from_body(reg); if(ByteArray* reg_tmp = force_as<ByteArray>(mark.call(reg_ba))) { reg_o->onig_data = reinterpret_cast<regex_t*>(reg_tmp->raw_bytes()); mark.just_set(obj, reg_tmp); reg_ba = reg_tmp; reg = reg_o->onig_data; } if(reg->p) { ByteArray* ba = ByteArray::from_body(reg->p); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->p = reinterpret_cast<unsigned char*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->exact) { int exact_size = reg->exact_end - reg->exact; ByteArray* ba = ByteArray::from_body(reg->exact); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->exact = reinterpret_cast<unsigned char*>(tmp->raw_bytes()); reg->exact_end = reg->exact + exact_size; mark.just_set(obj, tmp); } } if(reg->int_map) { ByteArray* ba = ByteArray::from_body(reg->int_map); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->int_map = reinterpret_cast<int*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->int_map_backward) { ByteArray* ba = ByteArray::from_body(reg->int_map_backward); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->int_map_backward = reinterpret_cast<int*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } if(reg->repeat_range) { ByteArray* ba = ByteArray::from_body(reg->repeat_range); ByteArray* tmp = force_as<ByteArray>(mark.call(ba)); if(tmp) { reg->repeat_range = reinterpret_cast<OnigRepeatRange*>(tmp->raw_bytes()); mark.just_set(obj, tmp); } } }
void CallSite::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); }