void CompiledCode::Info::mark(Object* obj, memory::ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code()) return; MachineCode* mcode = code->machine_code(); mcode->set_mark(); for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { // TODO: JIT } for(size_t i = 0; i < mcode->references_count(); i++) { if(size_t ip = mcode->references()[i]) { Object* ref = reinterpret_cast<Object*>(mcode->opcodes[ip]); if(Object* updated_ref = mark.call(ref)) { mcode->opcodes[ip] = reinterpret_cast<intptr_t>(updated_ref); mark.just_set(code, updated_ref); } } } }
Object* CompiledCode::primitive_failed(STATE, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); Class* cls = args.recv()->direct_class(state); uint64_t class_data = cls->data_raw(); MachineCode* v = code->machine_code(); executor target = v->unspecialized; for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { uint64_t c_id = v->specializations[i].class_data.raw; executor x = v->specializations[i].execute; if(c_id == class_data && x != 0) { target = x; break; } } if(target) { return target(state, exec, mod, args); } else { return MachineCode::execute(state, exec, mod, args); } }
void test_specialize_transforms_ivars_to_slots() { CompiledCode* code = CompiledCode::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); code->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil)); code->iseq(state, iseq); MachineCode* mcode = new MachineCode(state, code); Object::Info ti(ObjectType); ti.slots[state->symbol("@blah")->index()] = 5; ti.slot_locations.resize(6); ti.slot_locations[5] = 33; mcode->specialize(state, code, &ti); TS_ASSERT_EQUALS(mcode->total, 3U); TS_ASSERT_EQUALS(mcode->opcodes[0], static_cast<unsigned int>(InstructionSequence::insn_push_my_offset)); TS_ASSERT_EQUALS(mcode->opcodes[1], 33U); TS_ASSERT_EQUALS(mcode->opcodes[2], static_cast<unsigned int>(InstructionSequence::insn_push_nil)); }
Object* CompiledCode::default_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { LockableScopedLock lg(state, &state->shared(), __FILE__, __LINE__); CompiledCode* code = as<CompiledCode>(exec); if(code->execute == default_executor) { const char* reason = 0; int ip = -1; OnStack<5> os(state, code, exec, mod, args.recv_location(), args.block_location()); VariableRootBuffer vrb(state->vm()->current_root_buffers(), &args.arguments_location(), args.total()); GCTokenImpl gct; if(!code->internalize(state, gct, call_frame, &reason, &ip)) { Exception::bytecode_error(state, call_frame, code, ip, reason); return 0; } } lg.unlock(); return code->execute(state, call_frame, exec, mod, args); }
void Compiler::compile_method(LLVMState* ls, BackgroundCompileRequest* req) { CompiledCode* cm = req->method(); if(ls->config().jit_inline_debug) { struct timeval tv; gettimeofday(&tv, NULL); ls->log() << "JIT: compiling " << ls->enclosure_name(cm) << "#" << ls->symbol_debug_str(cm->name()) << " (" << tv.tv_sec << "." << tv.tv_usec << ")\n"; } JITMethodInfo info(ctx_, cm, cm->backend_method()); info.is_block = false; if(Class* cls = req->receiver_class()) { info.set_self_class(cls); } ctx_.set_root(&info); jit::MethodBuilder work(ls, info); work.setup(); compile_builder(ctx_, ls, info, work); ctx_.set_root(NULL); }
Object* CompiledCode::primitive_failed(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); Class* cls = args.recv()->lookup_begin(state); uint32_t id = cls->class_id(); MachineCode* v = code->machine_code(); executor target = v->unspecialized; for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { uint32_t c_id = v->specializations[i].class_id; executor x = v->specializations[i].execute; if(c_id == id && x != 0) { target = x; break; } } if(target) { return target(state, call_frame, exec, mod, args); } else { return MachineCode::execute(state, call_frame, exec, mod, args); } }
Object* CompiledCode::specialized_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); Class* cls = args.recv()->class_object(state); int id = cls->class_id(); MachineCode* v = code->machine_code(); executor target = v->unspecialized; for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { int c_id = v->specializations[i].class_id; executor x = v->specializations[i].execute; if(c_id == id && x != 0) { target = x; break; } } // This is a bug. We should not have this setup if there are no // specializations. FIX THIS BUG! if(!target) target = v->fallback; return target(state, call_frame, exec, mod, args); }
void Compiler::compile_method(BackgroundCompileRequest* req) { CompiledCode* code = req->method(); if(ctx_->llvm_state()->config().jit_inline_debug) { struct timeval tv; gettimeofday(&tv, NULL); ctx_->llvm_state()->log() << "JIT: compiling " << ctx_->llvm_state()->enclosure_name(code) << "#" << ctx_->llvm_state()->symbol_debug_str(code->name()) << " (" << tv.tv_sec << "." << tv.tv_usec << ")\n"; } JITMethodInfo info(ctx_, code, code->machine_code()); info.is_block = false; if(Class* cls = req->receiver_class()) { info.set_self_class(cls); } ctx_->set_root(&info); jit::MethodBuilder work(ctx_, info); work.setup(); compile_builder(info, work); ctx_->set_root(NULL); }
r_mint Env::method_id(rmethod meth) { CompiledCode* code = i(meth); if(MachineCode* mcode = code->machine_code()) { return (mcode->method_id() << 1) | 1; } return 0; }
r_mint Env::method_id(rcompiled_code code) { CompiledCode* ccode = i(code); if(MachineCode* mcode = ccode->machine_code()) { return (mcode->method_id() << 1) | 1; } return 0; }
Tuple* CompiledCode::constant_caches(STATE) { CompiledCode* self = this; OnStack<1> os(state, self); if(self->machine_code() == NULL) { if(!self->internalize(state)) return force_as<Tuple>(Primitives::failure()); } MachineCode* mcode = self->machine_code(); return mcode->constant_caches(state); }
CompiledCode* CompiledCode::dup(STATE) { CompiledCode* code = state->memory()->new_object<CompiledCode>(state, G(compiled_code)); code->copy_object(state, this); code->set_executor(CompiledCode::default_executor); code->machine_code(NULL); return code; }
CompiledCode* CompiledCode::dup(STATE) { CompiledCode* code = CompiledCode::create(state); code->copy_object(state, this); code->set_executor(CompiledCode::default_executor); code->jit_data_ = NULL; code->machine_code_ = NULL; return code; }
CompiledCode* CompiledCode::dup(STATE) { CompiledCode* code = state->new_object_dirty<CompiledCode>(G(compiled_code)); code->copy_object(state, this); code->set_executor(CompiledCode::default_executor); code->jit_data_ = NULL; code->machine_code_ = NULL; return code; }
Tuple* CompiledCode::constant_caches(STATE, CallFrame* calling_environment) { GCTokenImpl gct; CompiledCode* self = this; OnStack<1> os(state, self); if(self->machine_code_ == NULL) { if(!self->internalize(state, gct, calling_environment)) return force_as<Tuple>(Primitives::failure()); } MachineCode* mcode = self->machine_code_; return mcode->constant_caches(state); }
Object* CompiledCode::default_executor(STATE, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); if(code->execute == default_executor) { if(!code->internalize(state)) return 0; } return code->execute(state, exec, mod, args); }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->inline_cache_count(); i++) { InlineCache* cache = &mcode->caches[i]; for(int j = 0; j < cTrackedICHits; ++j) { MethodCacheEntry* mce = cache->cache_[j].entry(); if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_[j].assign(static_cast<MethodCacheEntry*>(tmp)); mark.just_set(obj, tmp); } } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = static_cast<CallUnit*>(tmp); mark.just_set(obj, tmp); } } } }
CompiledCode* CompiledCode::create(STATE) { CompiledCode* code = state->new_object<CompiledCode>(G(compiled_code)); code->local_count(state, Fixnum::from(0)); code->set_executor(CompiledCode::default_executor); code->machine_code_ = NULL; code->inliners_ = 0; code->prim_index_ = -1; #ifdef ENABLE_LLVM code->jit_data_ = NULL; #endif return code; }
void test_create() { CompiledCode* code = CompiledCode::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("blah")); code->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 1); iseq->opcodes()->put(state, 0, Fixnum::from(0)); code->iseq(state, iseq); MachineCode* mcode = new MachineCode(state, code); TS_ASSERT_EQUALS(mcode->total, 1U); TS_ASSERT_EQUALS(mcode->opcodes[0], 0U); }
Object* rbx_create_block(STATE, CallFrame* call_frame, int index) { CPP_TRY Object* _lit = call_frame->compiled_code->literals()->at(state, index); CompiledCode* code = as<CompiledCode>(_lit); // TODO: We don't need to be doing this everytime. code->scope(state, call_frame->constant_scope()); MachineCode* mcode = call_frame->compiled_code->machine_code(); GCTokenImpl gct; return BlockEnvironment::under_call_frame(state, gct, code, mcode, call_frame); CPP_CATCH }
Executable* MethodTableBucket::get_method(STATE) { if(!method()->nil_p()) return as<Executable>(method()); if(method_id()->nil_p()) return nil<Executable>(); CompiledCode* code = CodeDB::load(state, as<String>(method_id())); if(ConstantScope* cs = try_as<ConstantScope>(scope())) { code->scope(state, cs); } else { code->scope(state, nil<ConstantScope>()); } code->serial(state, serial_); method(state, code); return as<Executable>(code); }
void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledCode* code = as<CompiledCode>(obj); if(!code->machine_code_) return; MachineCode* mcode = code->machine_code_; mcode->set_mark(); #ifdef ENABLE_LLVM if(code->jit_data()) { code->jit_data()->set_mark(); code->jit_data()->mark_all(code, mark); } for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { if(mcode->specializations[i].jit_data) { mcode->specializations[i].jit_data->set_mark(); mcode->specializations[i].jit_data->mark_all(code, mark); } } #endif for(size_t i = 0; i < mcode->call_site_count(); i++) { size_t index = mcode->call_site_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } for(size_t i = 0; i < mcode->constant_cache_count(); i++) { size_t index = mcode->constant_cache_offsets()[i]; Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]); Object* new_cache = mark.call(old_cache); if(new_cache != old_cache) { mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache); mark.just_set(code, new_cache); } } }
void test_validate_ip() { CompiledCode* code = CompiledCode::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); code->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil)); code->iseq(state, iseq); MachineCode* mcode = new MachineCode(state, code); TS_ASSERT_EQUALS(mcode->validate_ip(state, 0), true); TS_ASSERT_EQUALS(mcode->validate_ip(state, 1), false); TS_ASSERT_EQUALS(mcode->validate_ip(state, 2), true); }
Object* rbx_create_block(STATE, CallFrame* call_frame, int index) { CPP_TRY Object* _lit = call_frame->cm->literals()->at(state, index); CompiledCode* cm = as<CompiledCode>(_lit); // TODO: We don't need to be doing this everytime. if(cm->scope()->nil_p()) { cm->scope(state, call_frame->constant_scope()); } VMMethod* vmm = call_frame->cm->backend_method(); GCTokenImpl gct; return BlockEnvironment::under_call_frame(state, gct, cm, vmm, call_frame); CPP_CATCH }
MachineCode* CompiledCode::internalize(STATE, GCToken gct, const char** reason, int* ip) { MachineCode* mcode = machine_code_; atomic::memory_barrier(); if(mcode) return mcode; CompiledCode* self = this; OnStack<1> os(state, self); self->hard_lock(state, gct); mcode = self->machine_code_; if(!mcode) { { BytecodeVerification bv(self); if(!bv.verify(state)) { if(reason) *reason = bv.failure_reason(); if(ip) *ip = bv.failure_ip(); std::cerr << "Error validating bytecode: " << bv.failure_reason() << "\n"; return 0; } } mcode = new MachineCode(state, self); if(self->resolve_primitive(state)) { mcode->fallback = execute; } else { mcode->setup_argument_handler(); } // We need to have an explicit memory barrier here, because we need to // be sure that mcode is completely initialized before it's set. // Otherwise another thread might see a partially initialized // MachineCode. atomic::write(&self->machine_code_, mcode); set_executor(mcode->fallback); } self->hard_unlock(state, gct); return mcode; }
CompiledCode* create_compiled_code() { CompiledCode* code = CompiledCode::create(state); code->iseq(state, InstructionSequence::create(state, 1)); code->iseq()->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_ret)); code->stack_size(state, Fixnum::from(10)); code->total_args(state, Fixnum::from(0)); code->required_args(state, code->total_args()); code->formalize(state); return code; }
Object* CompiledCode::set_breakpoint(STATE, GCToken gct, Fixnum* ip, Object* bp) { CompiledCode* self = this; OnStack<3> os(state, self, ip, bp); int i = ip->to_native(); if(self->machine_code_ == NULL) { if(!self->internalize(state, gct)) return Primitives::failure(); } if(!self->machine_code_->validate_ip(state, i)) return Primitives::failure(); if(self->breakpoints_->nil_p()) { self->breakpoints(state, LookupTable::create(state)); } self->breakpoints_->store(state, ip, bp); self->machine_code_->debugging = 1; self->machine_code_->run = MachineCode::debugger_interpreter; return ip; }
void VM::update_profile(STATE) { timer::StopWatch<timer::nanoseconds> timer(metrics().machine.profile_ns); metrics().machine.profiles++; profile_sample_count_++; CompiledCode* code = state->vm()->call_frame()->compiled_code; code->machine_code()->sample_count++; Tuple* profile = profile_.get(); if(profile->nil_p()) { profile = Tuple::create(state, max_profile_entries_); profile_.set(profile); } ::qsort(reinterpret_cast<void*>(profile->field), profile->num_fields(), sizeof(intptr_t), profile_compare); for(native_int i = 0; i < profile->num_fields(); i++) { if(code == profile->at(i)) return; } CompiledCode* pcode = try_as<CompiledCode>(profile->at(0)); if(!pcode || (pcode && code->machine_code()->call_count > pcode->machine_code()->call_count)) { profile->put(state, 0, code); min_profile_call_count_ = code->machine_code()->call_count; } }
CallFrame* LLVMState::find_candidate(STATE, CompiledCode* start, CallFrame* call_frame) { if(!config_.jit_inline_generic) { return call_frame; } int depth = config().jit_limit_search; if(!start) { throw CompileError("find_candidate: null start"); } if(!call_frame) { throw CompileError("find_candidate: null call frame"); } // if(!start) { // start = call_frame->compiled_code; // call_frame = call_frame->previous; // depth--; // } if(debug_search) { std::cout << "> call_count: " << call_frame->compiled_code->machine_code()->call_count << " size: " << call_frame->compiled_code->machine_code()->total << " sends: " << call_frame->compiled_code->machine_code()->call_site_count() << std::endl; call_frame->print_backtrace(state, 1); } if(start->machine_code()->total > (size_t)config_.jit_limit_inline_method) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method isn't small: " << start->machine_code()->total << " > " << config_.jit_limit_inline_method << std::endl; } return call_frame; } MachineCode* mcode = start->machine_code(); if(mcode->required_args != mcode->total_args) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method req_args != total_args" << std::endl; } return call_frame; } if(mcode->no_inline_p()) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method no_inline_p() = true" << std::endl; } return call_frame; } CallFrame* callee = call_frame; call_frame = call_frame->previous; if(!call_frame) return callee; // Now start looking at callers. while(depth-- > 0) { CompiledCode* cur = call_frame->compiled_code; if(!cur) { if(debug_search) { std::cout << "JIT: STOP. reason: synthetic CallFrame hit" << std::endl; } return callee; } MachineCode* mcode = cur->machine_code(); if(debug_search) { std::cout << "> call_count: " << mcode->call_count << " size: " << mcode->total << " sends: " << mcode->call_site_count() << std::endl; call_frame->print_backtrace(state, 1); } /* if(call_frame->block_p() || mcode->required_args != mcode->total_args // has a splat || mcode->call_count < 200 // not called much || mcode->jitted() // already jitted || mcode->parent() // is a block ) return callee; */ if(mcode->required_args != mcode->total_args) { if(debug_search) { std::cout << "JIT: STOP. reason: req_args != total_args" << std::endl; } return callee; } if(mcode->call_count < config_.jit_threshold_inline) { if(debug_search) { std::cout << "JIT: STOP. reason: call_count too small: " << mcode->call_count << " < " << config_.jit_threshold_inline << std::endl; } return callee; } if(mcode->jitted_p()) { if(debug_search) { std::cout << "JIT: STOP. reason: already jitted" << std::endl; } return callee; } if(mcode->no_inline_p()) { if(debug_search) { std::cout << "JIT: STOP. reason: no_inline_p() = true" << std::endl; } return callee; } if(call_frame->jitted_p() || call_frame->inline_method_p()) { return callee; } if(mcode->call_site_count() > eMaxInlineSendCount) { if(debug_search) { std::cout << "JIT: STOP. reason: high send count" << std::endl; } return call_frame; } // if(mcode->required_args != mcode->total_args // has a splat // || mcode->call_count < 200 // not called much // || mcode->jitted() // already jitted // || !mcode->no_inline_p() // method marked as not inlineable // ) return callee; CallFrame* prev = call_frame->previous; if(!prev) { if(debug_search) { std::cout << "JIT: STOP. reason: toplevel method" << std::endl; } return call_frame; } // if(cur->machine_code()->total > SMALL_METHOD_SIZE) { // if(debug_search) { // std::cout << "JIT: STOP. reason: big method: " // << cur->machine_code()->total << " > " // << SMALL_METHOD_SIZE // << "\n"; // } // return call_frame; // } // if(!next || cur->machine_code()->total > SMALL_METHOD_SIZE) return call_frame; callee = call_frame; call_frame = prev; } return callee; }
void LLVMState::run(STATE) { GCTokenImpl gct; JITCompileRequest* compile_request = nil<JITCompileRequest>(); OnStack<1> os(state, compile_request); metrics().init(metrics::eJITMetrics); state->gc_dependent(gct, 0); bool show_machine_code_ = jit_dump_code() & cMachineCode; while(!thread_exit_) { current_compiler_ = 0; { GCIndependent guard(state, 0); { utilities::thread::Mutex::LockGuard lg(compile_lock_); while(compile_list_.get()->empty_p()) { compile_cond_.wait(compile_lock_); if(thread_exit_) break; } } } if(thread_exit_) break; { utilities::thread::Mutex::LockGuard guard(request_lock_); compile_request = try_as<JITCompileRequest>(compile_list_.get()->shift(state)); if(!compile_request || compile_request->nil_p()) continue; } utilities::thread::Condition* cond = compile_request->waiter(); // Don't proceed until requester has reached the wait_cond if(cond) wait_mutex.lock(); Context ctx(this); jit::Compiler jit(&ctx); current_compiler_ = &jit; uint32_t class_id = 0; uint32_t serial_id = 0; void* func = 0; try { if(compile_request->receiver_class() && !compile_request->receiver_class()->nil_p()) { // Apparently already compiled, probably some race if(compile_request->method()->find_specialized( compile_request->receiver_class())) { if(config().jit_show_compiling) { CompiledCode* code = compile_request->method(); llvm::outs() << "[[[ JIT already compiled " << enclosure_name(code) << "#" << symbol_debug_str(code->name()) << (compile_request->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(cond) { wait_mutex.unlock(); cond->signal(); } current_compiler_ = 0; continue; } class_id = compile_request->receiver_class()->class_id(); serial_id = compile_request->receiver_class()->serial_id(); } { timer::StopWatch<timer::microseconds> timer( metrics().m.jit_metrics.time_last_us, metrics().m.jit_metrics.time_total_us); jit.compile(compile_request); bool indy = !config().jit_sync; func = jit.generate_function(indy); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(config().jit_show_compiling) { CompiledCode* code = compile_request->method(); llvm::outs() << "[[[ JIT error background compiling " << enclosure_name(code) << "#" << symbol_debug_str(code->name()) << (compile_request->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(cond) { wait_mutex.unlock(); cond->signal(); } current_compiler_ = 0; continue; } } catch(LLVMState::CompileError& e) { utilities::logger::warn("JIT: compile error: %s", e.error()); metrics().m.jit_metrics.methods_failed++; // If someone was waiting on this, wake them up. if(cond) { wait_mutex.unlock(); cond->signal(); } current_compiler_ = 0; continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!compile_request->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); start_method_update(); if(!compile_request->is_block()) { if(class_id) { compile_request->method()->add_specialized(state, class_id, serial_id, reinterpret_cast<executor>(func), rd); } else { compile_request->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { compile_request->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } compile_request->machine_code()->clear_compiling(); end_method_update(); rd->run_write_barrier(shared().om, compile_request->method()); if(config().jit_show_compiling) { CompiledCode* code = compile_request->method(); llvm::outs() << "[[[ JIT finished background compiling " << enclosure_name(code) << "#" << symbol_debug_str(code->name()) << (compile_request->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(cond) { wait_mutex.unlock(); cond->signal(); } current_compiler_ = 0; metrics().m.jit_metrics.methods_compiled++; } }