void CompiledCode::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code()) return; MachineCode* mcode = code->machine_code(); mcode->set_mark(); for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { // TODO: JIT } for(size_t i = 0; i < mcode->references_count(); i++) { if(size_t ip = mcode->references()[i]) { Object* ref = reinterpret_cast<Object*>(mcode->opcodes[ip]); if(Object* updated_ref = mark.call(ref)) { mcode->opcodes[ip] = reinterpret_cast<intptr_t>(updated_ref); mark.just_set(code, updated_ref); } } } }
void Tuple::Info::mark(Object* obj, memory::ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { if(Object* tmp = mark.call(tup->field[i])) { mark.set(obj, &tup->field[i], tmp); } } }
void RTuple::Info::mark(Object* obj, memory::ObjectMark& mark) { Tuple* tup = as<Tuple>(obj); for(native_int i = 0; i < tup->num_fields(); i++) { if(Object* tmp = mark.call( MemoryHandle::object(reinterpret_cast<VALUE>(tup->field[i])))) { mark.set_value(obj, &tup->field[i], tmp); } } }
void PolyInlineCache::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); PolyInlineCache* cache = static_cast<PolyInlineCache*>(obj); for(int i = 0; i < cTrackedICHits; ++i) { InlineCacheEntry* ice = cache->entries_[i]; if(ice) { if(InlineCacheEntry* updated = static_cast<InlineCacheEntry*>(mark.call(ice))) { cache->entries_[i] = updated; mark.just_set(cache, updated); } } } }
void Data::Info::mark(Object* t, memory::ObjectMark& mark) { auto_mark(t, mark); Data* data = force_as<Data>(t); if(mark.mature_gc_in_progress()) { // Don't scan objects concurrently since this might // not be thread safe. The C library in use here // might be in the process of freeing up malloc'ed // resources so we would see objects in an invalid // state and scan wrong pointers etc. return; } if(data->freed_p()) { logger::error("finalizer: Data mark called for already freed object"); return; } Data::MarkFunctor marker = data->mark(); if(marker) { memory::ObjectMark* cur = capi::current_mark(); capi::set_current_mark(&mark); (*marker)(data->data()); capi::set_current_mark(cur); } }
void Executable::Info::mark_inliners(Object* obj, memory::ObjectMark& mark) { Executable* exc = static_cast<Executable*>(obj); if(!exc->inliners() || exc->inliners() == (Inliners*)cNil) return; Inliners* inl = exc->inliners(); inl->set_mark(); // std::cout << "Marking inliners: " << inl->inliners().size() << "\n"; for(std::vector<CompiledCode*>::iterator i = inl->inliners().begin(); i != inl->inliners().end(); ++i) { CompiledCode* code = *i; if(Object* tmp = mark.call(code)) { *i = static_cast<CompiledCode*>(tmp); mark.just_set(obj, tmp); } } }
void VariableScope::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); VariableScope* vs = as<VariableScope>(obj); if(!vs->isolated_p()) { Object** ary = vs->locals(); size_t locals = vs->number_of_locals(); for(size_t i = 0; i < locals; i++) { if(Object* tmp = mark.call(ary[i])) { ary[i] = tmp; } } } }
void CallSite::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); CallSite* call_site = as<CallSite>(obj); // TODO: pass State into GC! VM* vm = VM::current(); if(Cache* cache = call_site->cache()) { // Disable if call site is unstable for caching if(cache->evictions() > max_evictions) { call_site->set_cache(nullptr); call_site->executor(CallSite::dispatch); call_site->delete_cache(cache); vm->metrics()->inline_cache_disabled++; } else { // Campact and possibly reset cache Cache* new_cache = cache->compact(); if(new_cache != cache) { call_site->set_cache(new_cache); call_site->delete_cache(cache); if(new_cache) { vm->metrics()->inline_cache_count++; } else { call_site->executor(CallSite::dispatch_once); } } } } // Do not merge conditionals, we may have set _cache_ to nullptr above. if(Cache* cache = call_site->cache()) { // Re-order more used cache entries to the front cache->reorder(); // Mark caches. for(int32_t i = 0; i < cache->size(); i++) { Cache::Entry* entry = cache->entries(i); if(Object* ref = mark.call(entry->receiver_class())) { entry->receiver_class(as<Class>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->prediction())) { entry->prediction(as<Prediction>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->module())) { entry->module(as<Module>(ref)); mark.just_set(call_site, ref); } if(Object* ref = mark.call(entry->executable())) { entry->executable(as<Executable>(ref)); mark.just_set(call_site, ref); } if(vm->shared.profiler()->collecting_p()) { if(CompiledCode* code = try_as<CompiledCode>(entry->executable())) { if(code->machine_code()->sample_count > vm->shared.profiler()->sample_min()) { vm->shared.profiler()->add_entry(call_site->serial(), call_site->ip(), code->machine_code()->serial(), entry->hits(), entry->receiver_class()->name(), entry->module()->name()); } } } } } // Clear dead list call_site->clear_dead_list(); }