void LLVMState::compile_soon(STATE, GCToken gct, CompiledCode* code, CallFrame* call_frame, Object* placement, bool is_block) { bool wait = config().jit_sync; if(code->machine_code()->call_count <= 1) { return; } if(code->machine_code()->compiling_p()) { return; } int hits = code->machine_code()->call_count; code->machine_code()->set_compiling(); BackgroundCompileRequest* req = new BackgroundCompileRequest(state, code, placement, hits, is_block); queued_methods_++; if(wait) { wait_mutex.lock(); req->set_waiter(&wait_cond); background_thread_->add(req); state->set_call_frame(call_frame); gc_independent(); wait_cond.wait(wait_mutex); wait_mutex.unlock(); gc_dependent(); state->set_call_frame(0); if(state->shared().config.jit_show_compiling) { llvm::outs() << "[[[ JIT compiled " << enclosure_name(code) << "#" << symbol_debug_str(code->name()) << (req->is_block() ? " (block) " : " (method) ") << queued_methods() << "/" << jitted_methods() << " ]]]\n"; } } else { background_thread_->add(req); if(state->shared().config.jit_show_compiling) { llvm::outs() << "[[[ JIT queued " << enclosure_name(code) << "#" << symbol_debug_str(code->name()) << (req->is_block() ? " (block) " : " (method) ") << queued_methods() << "/" << jitted_methods() << " ]]]\n"; } } }
virtual void perform() { for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.signal(); while(pause_) { condition_.wait(mutex_); } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) return; while(pending_requests_.size() == 0) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) return; } // now locked again, shift a request req = pending_requests_.front(); pending_requests_.pop_front(); state = cRunning; } // mutex now unlock, allowing others to push more requests // LLVMCompiler* jit = new LLVMCompiler(); { timer::Running timer(ls_->time_spent); jit->compile(ls_, req->vmmethod(), req->is_block()); jit->generate_function(ls_); } if(show_machine_code_) { jit->show_machine_code(); } // Ok, compiled, generated machine code, now update MachineMethod // Ok, now we are manipulating managed memory, so make // sure the GC doesn't run. ls_->shared().gc_dependent(); req->vmmethod()->set_jitted(jit->llvm_function(), jit->code_bytes(), jit->function_pointer()); if(req->is_block()) { BlockEnvironment* be = req->block_env(); if(!be) { llvm::outs() << "Fatal error in JIT. Expected a BlockEnvironment.\n"; } else { be->set_native_function(jit->function_pointer()); } } else { MachineMethod* mm = req->machine_method(); if(!mm) { llvm::outs() << "Fatal error in JIT. Expected a MachineMethod.\n"; } else { mm->update(req->vmmethod(), jit); mm->activate(); } } int which = ls_->add_jitted_method(); if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT finished background compiling " << which << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); } }
virtual void perform() { const char* thread_name = "rbx.jit"; ManagedThread::set_current(ls_, thread_name); ls_->set_run_state(ManagedThread::eIndependent); RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1); #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { utilities::thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.broadcast(); if(stop_) goto halt; while(pause_) { condition_.wait(mutex_); if(stop_) goto halt; } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) goto halt; while(pending_requests_.empty()) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) goto halt; } // now locked again, shift a request req = pending_requests_.front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->gc_dependent(); Context ctx(ls_); jit::Compiler jit(&ctx); // mutex now unlock, allowing others to push more requests // current_req_ = req; current_compiler_ = &jit; int spec_id = 0; Class* cls = req->receiver_class(); if(cls && !cls->nil_p()) { spec_id = cls->class_id(); } void* func = 0; { timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent); jit.compile(req); func = jit.generate_function(); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT error background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!req->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); ls_->start_method_update(); if(!req->is_block()) { if(spec_id) { req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd); } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } req->machine_code()->clear_compiling(); // assert(req->method()->jit_data()); ls_->end_method_update(); rd->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT finished background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); } halt: RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1); }
virtual void perform() { sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.signal(); while(pause_) { condition_.wait(mutex_); } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) return; while(pending_requests_.size() == 0) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) return; } // now locked again, shift a request req = pending_requests_.front(); pending_requests_.pop_front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->shared().gc_dependent(); // mutex now unlock, allowing others to push more requests // jit::Compiler jit; void* func = 0; { timer::Running<size_t, 1000000> timer(ls_->shared().stats.jit_time_spent); if(req->is_block()) { jit.compile_block(ls_, req->method(), req->vmmethod()); } else { jit.compile_method(ls_, req->method(), req->vmmethod()); } func = jit.generate_function(ls_); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT error in background compiling ]]]\n"; } // If someone was waiting on this, wake them up. if(thread::Condition* cond = req->waiter()) { cond->signal(); } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } req->vmmethod()->set_jitted(jit.llvm_function(), jit.code_bytes(), func); if(!req->is_block()) { req->method()->execute = reinterpret_cast<executor>(func); } assert(req->method()->jit_data()); req->method()->jit_data()->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT finished background compiling " << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(thread::Condition* cond = req->waiter()) { cond->signal(); } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); } }