Object* CompiledCode::specialized_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); Class* cls = args.recv()->class_object(state); int id = cls->class_id(); MachineCode* v = code->machine_code(); executor target = v->unspecialized; for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { int c_id = v->specializations[i].class_id; executor x = v->specializations[i].execute; if(c_id == id && x != 0) { target = x; break; } } // This is a bug. We should not have this setup if there are no // specializations. FIX THIS BUG! if(!target) target = v->fallback; return target(state, call_frame, exec, mod, args); }
Object* CompiledCode::primitive_failed(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); Class* cls = args.recv()->lookup_begin(state); uint32_t id = cls->class_id(); MachineCode* v = code->machine_code(); executor target = v->unspecialized; for(int i = 0; i < MachineCode::cMaxSpecializations; i++) { uint32_t c_id = v->specializations[i].class_id; executor x = v->specializations[i].execute; if(c_id == id && x != 0) { target = x; break; } } if(target) { return target(state, call_frame, exec, mod, args); } else { return MachineCode::execute(state, call_frame, exec, mod, args); } }
Class* find_class_by_id(int64_t id) { for(int i = 0; i < cTrackedICHits; i++) { Class* cls = seen_classes_[i].klass(); if(cls && cls->class_id() == id) return cls; } return 0; }
virtual void perform() { const char* thread_name = "rbx.jit"; ManagedThread::set_current(ls_, thread_name); ls_->set_run_state(ManagedThread::eIndependent); RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1); #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { utilities::thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.broadcast(); if(stop_) goto halt; while(pause_) { condition_.wait(mutex_); if(stop_) goto halt; } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) goto halt; while(pending_requests_.empty()) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) goto halt; } // now locked again, shift a request req = pending_requests_.front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->gc_dependent(); Context ctx(ls_); jit::Compiler jit(&ctx); // mutex now unlock, allowing others to push more requests // current_req_ = req; current_compiler_ = &jit; int spec_id = 0; Class* cls = req->receiver_class(); if(cls && !cls->nil_p()) { spec_id = cls->class_id(); } void* func = 0; { timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent); jit.compile(req); func = jit.generate_function(); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT error background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!req->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); ls_->start_method_update(); if(!req->is_block()) { if(spec_id) { req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd); } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } req->machine_code()->clear_compiling(); // assert(req->method()->jit_data()); ls_->end_method_update(); rd->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT finished background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); } halt: RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1); }