void wake_all_waiters(THREAD) { utilities::thread::Mutex::LockGuard guard(mutex_); if(!atomic::compare_and_swap(&should_stop_, 1, 0)) { // Ok, someone else has already restarted so we don't // have anything to do here anymore return; } if(cDebugThreading) { std::cerr << "[" << VM::current() << " WORLD waking all threads]\n"; } if(state->run_state_ != ManagedThread::eAlone) { rubinius::bug("A non-alone thread is trying to wake all"); } *check_global_interrupts_ = false; // For ourself.. atomic::fetch_and_add(&pending_threads_, 1); waiting_to_run_.broadcast(); state->run_state_ = ManagedThread::eRunning; }
void restart_threads_externally() { utilities::thread::Mutex::LockGuard guard(mutex_); if(!atomic::compare_and_swap(&should_stop_, 1, 0)) { // Ok, someone else has already restarted so we don't // have anything to do here anymore return; } if(cDebugThreading) { std::cerr << "[" << VM::current() << " WORLD waking all threads (externally)]\n"; } waiting_to_run_.broadcast(); }
virtual void perform() { const char* thread_name = "rbx.jit"; ManagedThread::set_current(ls_, thread_name); ls_->set_run_state(ManagedThread::eIndependent); RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1); #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { utilities::thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.broadcast(); if(stop_) goto halt; while(pause_) { condition_.wait(mutex_); if(stop_) goto halt; } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) goto halt; while(pending_requests_.empty()) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) goto halt; } // now locked again, shift a request req = pending_requests_.front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->gc_dependent(); Context ctx(ls_); jit::Compiler jit(&ctx); // mutex now unlock, allowing others to push more requests // current_req_ = req; current_compiler_ = &jit; int spec_id = 0; Class* cls = req->receiver_class(); if(cls && !cls->nil_p()) { spec_id = cls->class_id(); } void* func = 0; { timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent); jit.compile(req); func = jit.generate_function(); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT error background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!req->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); ls_->start_method_update(); if(!req->is_block()) { if(spec_id) { req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd); } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } req->machine_code()->clear_compiling(); // assert(req->method()->jit_data()); ls_->end_method_update(); rd->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT finished background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); } halt: RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1); }
void unlock() { stop_ = false; wait_condition_.broadcast(); lock_.unlock(); }