void* Thread::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->thread()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); vm->thread()->pid(state, Fixnum::from(gettid())); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: run: %s, %d, %#x", vm->name().c_str(), vm->thread()->pid()->to_native(), (unsigned int)thread_debug_self()); } NativeMethod::init_thread(state); state->vm()->managed_phase(state); Object* value = vm->thread()->function()(state); vm->set_call_frame(NULL); vm->thread()->join_lock_.lock(); vm->thread()->stopped(); memory::LockedObjects& locked_objects = state->vm()->locked_objects(); for(memory::LockedObjects::iterator i = locked_objects.begin(); i != locked_objects.end(); ++i) { (*i)->unlock_for_terminate(state); } locked_objects.clear(); vm->thread()->join_cond_.broadcast(); vm->thread()->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->unmanaged_phase(state); if(vm->main_thread_p() || (!value && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }
void* Thread::run(void* ptr) { GCTokenImpl gct; VM* vm = reinterpret_cast<VM*>(ptr); SharedState& shared = vm->shared; State state_obj(vm), *state = &state_obj; vm->set_current_thread(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); if(cDebugThreading) { utilities::logger::debug("Thread: start thread: id: %d, pthread: %d", vm->thread_id(), (unsigned int)thread_debug_self()); } int stack_address = 0; vm->set_root_stack(reinterpret_cast<uintptr_t>(&stack_address), THREAD_STACK_SIZE); NativeMethod::init_thread(state); vm->thread->pid(state, Fixnum::from(gettid())); // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. state->gc_dependent(gct, 0); vm->thread->hard_lock(state, gct, 0); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->function_(state); vm->shared.tool_broker()->thread_stop(state); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->thread->join_lock_.lock(); vm->thread->stopped(); LockedObjects& los = state->vm()->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct, 0); } vm->thread->join_cond_.broadcast(); vm->thread->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(cDebugThreading) { utilities::logger::debug("Thread: exit thread: id: %d", vm->thread_id()); } shared.gc_independent(); if(vm->main_thread_p() || (!ret && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } else { vm->set_zombie(state); } RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }