Object* Thread::join(STATE, GCToken gct, CallFrame* calling_environment) { state->set_call_frame(calling_environment); init_lock_.lock(); VM* vm = vm_; if(!vm) { init_lock_.unlock(); return cTrue; } pthread_t id = vm->os_thread(); if(cDebugThreading) { std::cerr << "[THREAD joining " << thread_debug_id(id) << "]\n"; } init_lock_.unlock(); state->gc_independent(gct); void* val; int err = pthread_join(id, &val); state->gc_dependent(); switch(err) { case 0: break; case EDEADLK: std::cerr << "Join deadlock: " << thread_debug_id(id) << "/" << thread_debug_self() << "\n"; break; case EINVAL: std::cerr << "Invalid thread id: " << thread_debug_id(id) << "\n"; break; case ESRCH: // This means that the thread finished execution and detached // itself. We treat this as having joined it. break; } return cTrue; }
void* Thread::in_new_thread(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; int calculate_stack = 0; NativeMethod::init_thread(state); { std::ostringstream tn; tn << "rbx.ruby." << vm->thread_id(); VM::set_current(vm, tn.str()); } RUBINIUS_THREAD_START(tn.str().c_str(), vm->thread_id(), 0); state->set_call_frame(0); if(cDebugThreading) { std::cerr << "[THREAD " << vm->thread_id() << " (" << (unsigned int)thread_debug_self() << ") started thread]\n"; } vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), THREAD_STACK_SIZE); GCTokenImpl gct; // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->hard_lock(state, gct); vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. vm->shared.gc_dependent(state); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->runner_(state); vm->shared.tool_broker()->thread_stop(state); if(!ret) { if(vm->thread_state()->raise_reason() == cExit) { vm->shared.env()->halt_and_exit(state); } } // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); LockedObjects& los = vm->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct); } vm->thread->init_lock_.lock(); NativeMethod::cleanup_thread(state); vm->thread->alive(state, cFalse); vm->thread->cleanup(); vm->thread->init_lock_.unlock(); vm->shared.gc_independent(state); vm->shared.clear_critical(state); VM::discard(state, vm); if(cDebugThreading) { std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n"; } RUBINIUS_THREAD_STOP(tn.str().c_str(), vm->thread_id(), 0); return 0; }
void* Thread::in_new_thread(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; int calculate_stack = 0; NativeMethod::init_thread(state); VM::set_current(vm); state->set_call_frame(0); vm->shared.gc_dependent(state); if(cDebugThreading) { std::cerr << "[THREAD " << vm->thread_id() << " (" << (unsigned int)thread_debug_self() << ") started thread]\n"; } vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), 4194304); vm->thread->init_lock_.unlock(); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->runner_(state); vm->shared.tool_broker()->thread_stop(state); if(!ret) { if(vm->thread_state()->raise_reason() == cExit) { vm->shared.env()->halt_and_exit(state); } } vm->thread->init_lock_.lock(); GCTokenImpl gct; std::list<ObjectHeader*>& los = vm->locked_objects(); for(std::list<ObjectHeader*>::iterator i = los.begin(); i != los.end(); i++) { (*i)->unlock_for_terminate(state, gct); } NativeMethod::cleanup_thread(state); vm->thread->alive(state, cFalse); vm->thread->cleanup(); vm->thread->init_lock_.unlock(); vm->shared.remove_managed_thread(vm); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->shared.gc_independent(state); vm->shared.clear_critical(state); VM::discard(state, vm); if(cDebugThreading) { std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n"; } return 0; }
void* Thread::run(void* ptr) { GCTokenImpl gct; VM* vm = reinterpret_cast<VM*>(ptr); SharedState& shared = vm->shared; State state_obj(vm), *state = &state_obj; vm->set_current_thread(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); if(cDebugThreading) { utilities::logger::debug("Thread: start thread: id: %d, pthread: %d", vm->thread_id(), (unsigned int)thread_debug_self()); } int stack_address = 0; vm->set_root_stack(reinterpret_cast<uintptr_t>(&stack_address), THREAD_STACK_SIZE); NativeMethod::init_thread(state); vm->thread->pid(state, Fixnum::from(gettid())); // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. state->gc_dependent(gct, 0); vm->thread->hard_lock(state, gct, 0); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->function_(state); vm->shared.tool_broker()->thread_stop(state); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->thread->join_lock_.lock(); vm->thread->stopped(); LockedObjects& los = state->vm()->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct, 0); } vm->thread->join_cond_.broadcast(); vm->thread->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(cDebugThreading) { utilities::logger::debug("Thread: exit thread: id: %d", vm->thread_id()); } shared.gc_independent(); if(vm->main_thread_p() || (!ret && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } else { vm->set_zombie(state); } RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }
void* Thread::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->thread()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); vm->thread()->pid(state, Fixnum::from(gettid())); if(state->shared().config.machine_thread_log_lifetime.value) { logger::write("thread: run: %s, %d, %#x", vm->name().c_str(), vm->thread()->pid()->to_native(), (unsigned int)thread_debug_self()); } NativeMethod::init_thread(state); state->vm()->managed_phase(state); Object* value = vm->thread()->function()(state); vm->set_call_frame(NULL); vm->thread()->join_lock_.lock(); vm->thread()->stopped(); state->shared().report_profile(state); memory::LockedObjects& locked_objects = state->vm()->locked_objects(); for(memory::LockedObjects::iterator i = locked_objects.begin(); i != locked_objects.end(); ++i) { (*i)->unlock_for_terminate(state); } locked_objects.clear(); vm->thread()->join_cond_.broadcast(); vm->thread()->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(state->shared().config.machine_thread_log_lifetime.value) { logger::write("thread: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->unmanaged_phase(state); if(vm->main_thread_p() || (!value && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }