void* Thread::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->thread()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); vm->thread()->pid(state, Fixnum::from(gettid())); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: run: %s, %d, %#x", vm->name().c_str(), vm->thread()->pid()->to_native(), (unsigned int)thread_debug_self()); } NativeMethod::init_thread(state); state->vm()->managed_phase(state); Object* value = vm->thread()->function()(state); vm->set_call_frame(NULL); vm->thread()->join_lock_.lock(); vm->thread()->stopped(); memory::LockedObjects& locked_objects = state->vm()->locked_objects(); for(memory::LockedObjects::iterator i = locked_objects.begin(); i != locked_objects.end(); ++i) { (*i)->unlock_for_terminate(state); } locked_objects.clear(); vm->thread()->join_cond_.broadcast(); vm->thread()->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->unmanaged_phase(state); if(vm->main_thread_p() || (!value && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }
void Console::process_requests(STATE) { GCTokenImpl gct; RBX_DTRACE_CONST char* thread_name = const_cast<RBX_DTRACE_CONST char*>("rbx.console.request"); request_vm_->set_name(thread_name); RUBINIUS_THREAD_START(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); state->gc_independent(gct, 0); while(!request_exit_) { Object* status = fsevent_.get()->wait_for_event(state); if(request_exit_) break; if(status->nil_p()) continue; char* request = read_request(state); if(request) { utilities::thread::Mutex::LockGuard lg(list_lock_); request_list_->push_back(request); response_cond_.signal(); } } state->gc_dependent(gct, 0); RUBINIUS_THREAD_STOP(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); }
void ImmixMarker::perform(STATE) { GCTokenImpl gct; const char* thread_name = "rbx.immix"; self_->set_name(thread_name); RUBINIUS_THREAD_START(thread_name, state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); while(!exit_) { if(data_) { { timer::Running<1000000> timer(state->memory()->gc_stats.total_full_concurrent_collection_time, state->memory()->gc_stats.last_full_concurrent_collection_time); // Allow for a young stop the world GC to occur // every bunch of marks. 100 is a fairly arbitrary // number, based mostly on the fact it didn't cause // big increases in young gc times because of long // stop the world wait times. while(immix_->process_mark_stack(100)) { state->gc_independent(gct, 0); state->gc_dependent(gct, 0); } } atomic::integer initial_stop = state->memory()->gc_stats.last_full_stop_collection_time; { timer::Running<1000000> timer(state->memory()->gc_stats.total_full_stop_collection_time, state->memory()->gc_stats.last_full_stop_collection_time); // Finish and pause while(!state->stop_the_world()) { state->checkpoint(gct, 0); } state->memory()->collect_mature_finish(state, data_); state->memory()->clear_mature_mark_in_progress(); } state->memory()->gc_stats.last_full_stop_collection_time.add(initial_stop.value); state->memory()->print_mature_stats(state, data_); state->restart_world(); } { utilities::thread::Mutex::LockGuard lg(run_lock_); if(data_) { delete data_; data_ = NULL; } if(exit_) break; state->gc_independent(gct, 0); paused_ = true; pause_cond_.signal(); run_cond_.wait(run_lock_); } state->gc_dependent(gct, 0); } state->memory()->clear_mature_mark_in_progress(); RUBINIUS_THREAD_STOP(thread_name, state->vm()->thread_id(), 1); }
void Console::process_responses(STATE) { GCTokenImpl gct; RBX_DTRACE_CONST char* thread_name = const_cast<RBX_DTRACE_CONST char*>("rbx.console.response"); response_vm_->set_name(thread_name); RUBINIUS_THREAD_START(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); state->gc_dependent(gct, 0); char* request = NULL; while(!response_exit_) { { utilities::thread::Mutex::LockGuard lg(list_lock_); if(request_list_->size() > 0) { request = request_list_->back(); request_list_->pop_back(); } } if(response_exit_) break; if(request) { Array* args = Array::create(state, 1); args->aset(state, 0, String::create(state, request)); Object* result = console_.get()->send(state, 0, state->symbol("evaluate"), args, cNil); if(String* response = try_as<String>(result)) { GCIndependent guard(state, 0); write_response(state, reinterpret_cast<const char*>(response->byte_address()), response->byte_size()); } request = NULL; } else { utilities::thread::Mutex::LockGuard lg(response_lock_); GCIndependent guard(state, 0); atomic::memory_barrier(); if(response_exit_) break; response_cond_.wait(response_lock_); } } RUBINIUS_THREAD_STOP(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); }
void FinalizerHandler::perform(STATE) { GCTokenImpl gct; const char* thread_name = "rbx.finalizer"; self_->set_name(thread_name); RUBINIUS_THREAD_START(thread_name, state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); while(!exit_) { state->vm()->set_call_frame(0); if(!process_list_) first_process_item(); if(!process_list_) { { utilities::thread::Mutex::LockGuard lg(worker_lock_); if(finishing_) supervisor_signal(); // exit_ might have been set in the mean while after // we grabbed the worker_lock if(exit_) break; state->gc_independent(gct, 0); paused_ = true; pause_cond_.signal(); worker_wait(); if(exit_) break; } state->gc_dependent(); { utilities::thread::Mutex::LockGuard lg(worker_lock_); paused_ = false; if(exit_) break; } continue; } finalize(state); next_process_item(); } RUBINIUS_THREAD_STOP(thread_name, state->vm()->thread_id(), 1); }
void SignalHandler::perform(STATE) { #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_BLOCK, &set, NULL); #endif GCTokenImpl gct; const char* thread_name = "rbx.signal"; utilities::thread::Thread::set_os_name(thread_name); RUBINIUS_THREAD_START(thread_name, state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); while(!exit_) { { utilities::thread::Mutex::LockGuard lg(worker_lock_); if(exit_) break; state->gc_independent(gct, 0); paused_ = true; pause_cond_.signal(); worker_cond_.wait(worker_lock_); // If we should exit now, don't try to become // dependent first but break and exit the thread if(exit_) break; } state->gc_dependent(); { utilities::thread::Mutex::LockGuard lg(worker_lock_); if(exit_) break; paused_ = false; } target_->set_check_local_interrupts(); target_->wakeup(state, gct, 0); } RUBINIUS_THREAD_STOP(thread_name, state->vm()->thread_id(), 1); }
void Metrics::process_metrics(STATE) { GCTokenImpl gct; RBX_DTRACE_CONST char* thread_name = const_cast<RBX_DTRACE_CONST char*>("rbx.metrics"); vm_->set_name(thread_name); RUBINIUS_THREAD_START(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); state->vm()->thread->hard_unlock(state, gct, 0); state->gc_dependent(gct, 0); timer_->set(interval_); while(!thread_exit_) { { GCIndependent guard(state, 0); if(timer_->wait_for_tick() < 0) { logger::error("metrics: error waiting for timer, exiting thread"); break; } } if(thread_exit_) break; { utilities::thread::Mutex::LockGuard guard(metrics_lock_); metrics_collection_.init(); ThreadList* threads = state->shared().threads(); for(ThreadList::iterator i = threads->begin(); i != threads->end(); ++i) { if(VM* vm = (*i)->as_vm()) { if(MetricsData* data = vm->metrics()) { metrics_collection_.add(data); } } } #ifdef ENABLE_LLVM if(state->shared().llvm_state) { if(VM* vm = state->shared().llvm_state->vm()) { metrics_collection_.add(vm->metrics()); } } #endif metrics_collection_.add(&metrics_history_); update_ruby_values(state); } { GCIndependent guard(state, 0); if(emitter_) emitter_->send_metrics(); } } timer_->clear(); RUBINIUS_THREAD_STOP(const_cast<RBX_DTRACE_CONST char*>(thread_name), state->vm()->thread_id(), 1); }
virtual void perform() { const char* thread_name = "rbx.jit"; ManagedThread::set_current(ls_, thread_name); ls_->set_run_state(ManagedThread::eIndependent); RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1); #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { utilities::thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.broadcast(); if(stop_) goto halt; while(pause_) { condition_.wait(mutex_); if(stop_) goto halt; } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) goto halt; while(pending_requests_.empty()) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) goto halt; } // now locked again, shift a request req = pending_requests_.front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->gc_dependent(); Context ctx(ls_); jit::Compiler jit(&ctx); // mutex now unlock, allowing others to push more requests // current_req_ = req; current_compiler_ = &jit; int spec_id = 0; Class* cls = req->receiver_class(); if(cls && !cls->nil_p()) { spec_id = cls->class_id(); } void* func = 0; { timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent); jit.compile(req); func = jit.generate_function(); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT error background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!req->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); ls_->start_method_update(); if(!req->is_block()) { if(spec_id) { req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd); } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } req->machine_code()->clear_compiling(); // assert(req->method()->jit_data()); ls_->end_method_update(); rd->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT finished background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); } halt: RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1); }
void* Thread::in_new_thread(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; int calculate_stack = 0; NativeMethod::init_thread(state); { std::ostringstream tn; tn << "rbx.ruby." << vm->thread_id(); VM::set_current(vm, tn.str()); } RUBINIUS_THREAD_START(tn.str().c_str(), vm->thread_id(), 0); state->set_call_frame(0); if(cDebugThreading) { std::cerr << "[THREAD " << vm->thread_id() << " (" << (unsigned int)thread_debug_self() << ") started thread]\n"; } vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), THREAD_STACK_SIZE); GCTokenImpl gct; // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->hard_lock(state, gct); vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. vm->shared.gc_dependent(state); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->runner_(state); vm->shared.tool_broker()->thread_stop(state); if(!ret) { if(vm->thread_state()->raise_reason() == cExit) { vm->shared.env()->halt_and_exit(state); } } // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); LockedObjects& los = vm->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct); } vm->thread->init_lock_.lock(); NativeMethod::cleanup_thread(state); vm->thread->alive(state, cFalse); vm->thread->cleanup(); vm->thread->init_lock_.unlock(); vm->shared.gc_independent(state); vm->shared.clear_critical(state); VM::discard(state, vm); if(cDebugThreading) { std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n"; } RUBINIUS_THREAD_STOP(tn.str().c_str(), vm->thread_id(), 0); return 0; }
void* Fiber::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->fiber()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>( vm->name().c_str()), vm->fiber()->fiber_id()->to_native(), 0); vm->fiber()->pid(state, Fixnum::from(gettid())); if(state->shared().config.machine_thread_log_lifetime.value) { logger::write("fiber: run: %s, %d, %#x", vm->name().c_str(), vm->fiber()->pid()->to_native(), (intptr_t)pthread_self()); } NativeMethod::init_thread(state); vm->fiber()->suspend_and_continue(state); Object* value = vm->fiber()->block()->send(state, G(sym_call), as<Array>(vm->thread()->fiber_value()), vm->fiber()->block()); vm->set_call_frame(NULL); if(value) { vm->thread()->fiber_value(state, value); } else { vm->thread()->fiber_value(state, cNil); } if(vm->thread_state()->raise_reason() != cFiberCancel) { if(vm->fiber()->status() == eTransfer) { // restart the root Fiber vm->thread()->fiber()->invoke_context(vm); vm->thread()->fiber()->restart(state); } else { vm->fiber()->invoke_context()->fiber()->restart(state); } } { std::lock_guard<std::mutex> guard(vm->fiber_wait_mutex()); vm->fiber()->status(eDead); vm->set_suspended(); } vm->unmanaged_phase(); state->shared().report_profile(state); NativeMethod::cleanup_thread(state); if(state->shared().config.machine_fiber_log_lifetime.value) { logger::write("fiber: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>( vm->name().c_str()), vm->fiber()->fiber_id()->to_native(), 0); return 0; }
void* Thread::run(void* ptr) { GCTokenImpl gct; VM* vm = reinterpret_cast<VM*>(ptr); SharedState& shared = vm->shared; State state_obj(vm), *state = &state_obj; vm->set_current_thread(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); if(cDebugThreading) { utilities::logger::debug("Thread: start thread: id: %d, pthread: %d", vm->thread_id(), (unsigned int)thread_debug_self()); } int stack_address = 0; vm->set_root_stack(reinterpret_cast<uintptr_t>(&stack_address), THREAD_STACK_SIZE); NativeMethod::init_thread(state); vm->thread->pid(state, Fixnum::from(gettid())); // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. state->gc_dependent(gct, 0); vm->thread->hard_lock(state, gct, 0); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->function_(state); vm->shared.tool_broker()->thread_stop(state); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->thread->join_lock_.lock(); vm->thread->stopped(); LockedObjects& los = state->vm()->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct, 0); } vm->thread->join_cond_.broadcast(); vm->thread->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(cDebugThreading) { utilities::logger::debug("Thread: exit thread: id: %d", vm->thread_id()); } shared.gc_independent(); if(vm->main_thread_p() || (!ret && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } else { vm->set_zombie(state); } RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }