void CallFrame::dump() { VM* vm = VM::current(); State state_obj(vm), *state = &state_obj; std::cout << "<CallFrame:" << (void*)this << " "; if(native_method_p()) { std::cout << "capi>\n"; return; } if(is_inline_frame()) { std::cout << "inline "; } if(is_block_p(state)) { std::cout << "block "; } else if(dispatch_data) { std::cout << "name=" << name()->debug_str(state) << " "; } else { std::cout << "name=" << compiled_code->name()->debug_str(state) << " "; } std::cout << "ip=" << ip_ << " "; std::cout << "line=" << line(state); std::cout << ">\n"; }
void* Thread::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->thread()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); vm->thread()->pid(state, Fixnum::from(gettid())); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: run: %s, %d, %#x", vm->name().c_str(), vm->thread()->pid()->to_native(), (unsigned int)thread_debug_self()); } NativeMethod::init_thread(state); state->vm()->managed_phase(state); Object* value = vm->thread()->function()(state); vm->set_call_frame(NULL); vm->thread()->join_lock_.lock(); vm->thread()->stopped(); memory::LockedObjects& locked_objects = state->vm()->locked_objects(); for(memory::LockedObjects::iterator i = locked_objects.begin(); i != locked_objects.end(); ++i) { (*i)->unlock_for_terminate(state); } locked_objects.clear(); vm->thread()->join_cond_.broadcast(); vm->thread()->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(state->shared().config.log_thread_lifetime.value) { logger::write("thread: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->unmanaged_phase(state); if(vm->main_thread_p() || (!value && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }
void Fiber::start_on_stack() { #ifdef RBX_FIBER_ENABLED VM* vm = VM::current(); State state_obj(vm), *state = &state_obj; Fiber* fib = Fiber::current(state); // Reset the current fiber again to reset the stack limits so // we can properly detect stack overflows vm->set_current_fiber(fib); Array* result = nil<Array>(); Object* obj = fib->starter()->send(state, G(sym_call), fib->value(), cNil, false); // GC has run! Don't use stack vars! fib = Fiber::current(state); fib->status(Fiber::eDead); fib->dead(cTrue); fib->set_call_frame(state, 0); Fiber* dest = fib->prev(); // If this fiber has already been cleaned up, just ignore this if(!dest->data()) return; assert(!dest->nil_p()); // Box this up so it's in a standard format at the point // of returning, so we can deal with it in the same way // as *args from #yield, #resume, and #transfer if(obj) { result = Array::create(state, 1); result->set(state, 0, obj); } else { if(state->vm()->thread_state()->raise_reason() == cException) { dest->exception(state, state->vm()->thread_state()->current_exception()); } } vm->metrics().system.fibers_destroyed++; dest->run(state); dest->value(state, result); dest->data()->switch_and_orphan(state, fib->data()); // TODO: CallFrame: return from this function rubinius::bug("returning from Fiber::start_on_stack"); #else rubinius::bug("Fibers not supported on this platform"); #endif }
Object* ImmixGC::saw_object(Object* obj) { #ifdef ENABLE_OBJECT_WATCH if(watched_p(obj)) { std::cout << "detected " << obj << " during immix scanning.\n"; } #endif if(!obj->reference_p()) return obj; memory::Address fwd = gc_.mark_address(memory::Address(obj), allocator_); Object* copy = fwd.as<Object>(); // Check and update an inflated header if(copy && copy != obj && obj->inflated_header_p()) { InflatedHeader* ih = obj->deflate_header(); ih->reset_object(copy); State state_obj(state()); if(!copy->set_inflated_header(&state_obj, ih, obj->current_header())) { rubinius::bug("Massive IMMIX inflated header screwup."); } } return copy; }
void ImmixGC::check_finalize() { // If finalizers are running right now, just fixup any finalizer references if(object_memory_->running_finalizers()) { for(std::list<FinalizeObject>::iterator i = object_memory_->finalize().begin(); i != object_memory_->finalize().end(); ++i) { if(i->object) { i->object = saw_object(i->object); } if(i->ruby_finalizer) { i->ruby_finalizer = saw_object(i->ruby_finalizer); } } return; } for(std::list<FinalizeObject>::iterator i = object_memory_->finalize().begin(); i != object_memory_->finalize().end(); ) { FinalizeObject& fi = *i; if(i->ruby_finalizer) { i->ruby_finalizer = saw_object(i->ruby_finalizer); } bool remove = false; switch(i->status) { case FinalizeObject::eLive: if(!i->object->marked_p(object_memory_->mark())) { // Run C finalizers now rather that queue them. if(i->finalizer) { State state_obj(state()); (*i->finalizer)(&state_obj, i->object); i->status = FinalizeObject::eFinalized; remove = true; } else { i->queued(); object_memory_->add_to_finalize(&fi); // We have to still keep it alive though until we finish with it. i->object = saw_object(i->object); } } else { // Update the reference i->object = saw_object(i->object); } break; case FinalizeObject::eQueued: // Nothing, we haven't gotten to it yet. // Keep waiting and keep i->object updated. i->object = saw_object(i->object); i->queue_count++; break; case FinalizeObject::eFinalized: if(!i->object->marked_p(object_memory_->mark())) { // finalized and done with. remove = true; } else { // RESURRECTION! i->queued(); i->object = saw_object(i->object); } break; } if(remove) { i = object_memory_->finalize().erase(i); } else { ++i; } } }
void* Thread::in_new_thread(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; int calculate_stack = 0; NativeMethod::init_thread(state); { std::ostringstream tn; tn << "rbx.ruby." << vm->thread_id(); VM::set_current(vm, tn.str()); } RUBINIUS_THREAD_START(tn.str().c_str(), vm->thread_id(), 0); state->set_call_frame(0); if(cDebugThreading) { std::cerr << "[THREAD " << vm->thread_id() << " (" << (unsigned int)thread_debug_self() << ") started thread]\n"; } vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), THREAD_STACK_SIZE); GCTokenImpl gct; // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->hard_lock(state, gct); vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. vm->shared.gc_dependent(state); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->runner_(state); vm->shared.tool_broker()->thread_stop(state); if(!ret) { if(vm->thread_state()->raise_reason() == cExit) { vm->shared.env()->halt_and_exit(state); } } // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); LockedObjects& los = vm->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct); } vm->thread->init_lock_.lock(); NativeMethod::cleanup_thread(state); vm->thread->alive(state, cFalse); vm->thread->cleanup(); vm->thread->init_lock_.unlock(); vm->shared.gc_independent(state); vm->shared.clear_critical(state); VM::discard(state, vm); if(cDebugThreading) { std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n"; } RUBINIUS_THREAD_STOP(tn.str().c_str(), vm->thread_id(), 0); return 0; }
void* Fiber::run(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; vm->set_stack_bounds(vm->fiber()->stack_size()->to_native()); vm->set_current_thread(); vm->set_start_time(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>( vm->name().c_str()), vm->fiber()->fiber_id()->to_native(), 0); vm->fiber()->pid(state, Fixnum::from(gettid())); if(state->shared().config.machine_thread_log_lifetime.value) { logger::write("fiber: run: %s, %d, %#x", vm->name().c_str(), vm->fiber()->pid()->to_native(), (intptr_t)pthread_self()); } NativeMethod::init_thread(state); vm->fiber()->suspend_and_continue(state); Object* value = vm->fiber()->block()->send(state, G(sym_call), as<Array>(vm->thread()->fiber_value()), vm->fiber()->block()); vm->set_call_frame(NULL); if(value) { vm->thread()->fiber_value(state, value); } else { vm->thread()->fiber_value(state, cNil); } if(vm->thread_state()->raise_reason() != cFiberCancel) { if(vm->fiber()->status() == eTransfer) { // restart the root Fiber vm->thread()->fiber()->invoke_context(vm); vm->thread()->fiber()->restart(state); } else { vm->fiber()->invoke_context()->fiber()->restart(state); } } { std::lock_guard<std::mutex> guard(vm->fiber_wait_mutex()); vm->fiber()->status(eDead); vm->set_suspended(); } vm->unmanaged_phase(); state->shared().report_profile(state); NativeMethod::cleanup_thread(state); if(state->shared().config.machine_fiber_log_lifetime.value) { logger::write("fiber: exit: %s %fs", vm->name().c_str(), vm->run_time()); } vm->set_zombie(state); RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>( vm->name().c_str()), vm->fiber()->fiber_id()->to_native(), 0); return 0; }
void* Thread::in_new_thread(void* ptr) { VM* vm = reinterpret_cast<VM*>(ptr); State state_obj(vm), *state = &state_obj; int calculate_stack = 0; NativeMethod::init_thread(state); VM::set_current(vm); state->set_call_frame(0); vm->shared.gc_dependent(state); if(cDebugThreading) { std::cerr << "[THREAD " << vm->thread_id() << " (" << (unsigned int)thread_debug_self() << ") started thread]\n"; } vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), 4194304); vm->thread->init_lock_.unlock(); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->runner_(state); vm->shared.tool_broker()->thread_stop(state); if(!ret) { if(vm->thread_state()->raise_reason() == cExit) { vm->shared.env()->halt_and_exit(state); } } vm->thread->init_lock_.lock(); GCTokenImpl gct; std::list<ObjectHeader*>& los = vm->locked_objects(); for(std::list<ObjectHeader*>::iterator i = los.begin(); i != los.end(); i++) { (*i)->unlock_for_terminate(state, gct); } NativeMethod::cleanup_thread(state); vm->thread->alive(state, cFalse); vm->thread->cleanup(); vm->thread->init_lock_.unlock(); vm->shared.remove_managed_thread(vm); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->shared.gc_independent(state); vm->shared.clear_critical(state); VM::discard(state, vm); if(cDebugThreading) { std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n"; } return 0; }
void* Thread::run(void* ptr) { GCTokenImpl gct; VM* vm = reinterpret_cast<VM*>(ptr); SharedState& shared = vm->shared; State state_obj(vm), *state = &state_obj; vm->set_current_thread(); RUBINIUS_THREAD_START( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); if(cDebugThreading) { utilities::logger::debug("Thread: start thread: id: %d, pthread: %d", vm->thread_id(), (unsigned int)thread_debug_self()); } int stack_address = 0; vm->set_root_stack(reinterpret_cast<uintptr_t>(&stack_address), THREAD_STACK_SIZE); NativeMethod::init_thread(state); vm->thread->pid(state, Fixnum::from(gettid())); // Lock the thread object and unlock it at __run__ in the ruby land. vm->thread->alive(state, cTrue); vm->thread->init_lock_.unlock(); // Become GC-dependent after unlocking init_lock_ to avoid deadlocks. // gc_dependent may lock when it detects GC is happening. Also the parent // thread is locked until init_lock_ is unlocked by this child thread. state->gc_dependent(gct, 0); vm->thread->hard_lock(state, gct, 0); vm->shared.tool_broker()->thread_start(state); Object* ret = vm->thread->function_(state); vm->shared.tool_broker()->thread_stop(state); // Clear the call_frame, so that if we wait for GC going independent, // the GC doesn't see pointers into now-unallocated CallFrames vm->set_call_frame(0); vm->thread->join_lock_.lock(); vm->thread->stopped(); LockedObjects& los = state->vm()->locked_objects(); for(LockedObjects::iterator i = los.begin(); i != los.end(); ++i) { (*i)->unlock_for_terminate(state, gct, 0); } vm->thread->join_cond_.broadcast(); vm->thread->join_lock_.unlock(); NativeMethod::cleanup_thread(state); if(cDebugThreading) { utilities::logger::debug("Thread: exit thread: id: %d", vm->thread_id()); } shared.gc_independent(); if(vm->main_thread_p() || (!ret && vm->thread_state()->raise_reason() == cExit)) { state->shared().signals()->system_exit(vm->thread_state()->raise_value()); } else { vm->set_zombie(state); } RUBINIUS_THREAD_STOP( const_cast<RBX_DTRACE_CHAR_P>(vm->name().c_str()), vm->thread_id(), 0); return 0; }