Esempio n. 1
0
  void* Thread::in_new_thread(void* ptr) {
    VM* vm = reinterpret_cast<VM*>(ptr);

    int calculate_stack = 0;
    NativeMethod::init_thread(vm);
    VM::set_current(vm);

    vm->set_call_frame(0);
    vm->shared.gc_dependent(vm);

    if(cDebugThreading) {
      std::cerr << "[THREAD " << vm->thread_id()
                << " (" << (unsigned int)thread_debug_self() << ") started thread]\n";
    }

    vm->set_stack_bounds(reinterpret_cast<uintptr_t>(&calculate_stack), 4194304);

    vm->thread->init_lock_.unlock();

    vm->shared.tool_broker()->thread_start(vm);
    Object* ret = vm->thread->runner_(vm);
    vm->shared.tool_broker()->thread_stop(vm);

    if(!ret) {
      if(vm->thread_state()->raise_reason() == cExit) {
        vm->shared.env()->halt();
      }
    }

    vm->thread->init_lock_.lock();

    std::list<ObjectHeader*>& los = vm->locked_objects();
    for(std::list<ObjectHeader*>::iterator i = los.begin();
        i != los.end();
        i++) {
      (*i)->unlock_for_terminate(vm);
    }

    NativeMethod::cleanup_thread(vm);

    vm->thread->cleanup();
    vm->thread->init_lock_.unlock();

    vm->shared.remove_managed_thread(vm);

    // Clear the call_frame, so that if we wait for GC going independent,
    // the GC doesn't see pointers into now-unallocated CallFrames
    vm->set_call_frame(0);
    vm->shared.gc_independent(vm);
    vm->shared.clear_critical(vm);

    VM::discard(vm, vm);

    if(cDebugThreading) {
      std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n";
    }

    return 0;
  }
Esempio n. 2
0
  void* Thread::in_new_thread(void* ptr) {
    VM* vm = reinterpret_cast<VM*>(ptr);

    State state_obj(vm), *state = &state_obj;

    int calculate_stack = 0;
    NativeMethod::init_thread(state);

    {
      std::ostringstream tn;
      tn << "rbx.ruby." << vm->thread_id();
      VM::set_current(vm, tn.str());
    }

    RUBINIUS_THREAD_START(tn.str().c_str(), vm->thread_id(), 0);

    state->set_call_frame(0);

    if(cDebugThreading) {
      std::cerr << "[THREAD " << vm->thread_id()
                << " (" << (unsigned int)thread_debug_self() << ") started thread]\n";
    }

    vm->set_root_stack(reinterpret_cast<uintptr_t>(&calculate_stack), THREAD_STACK_SIZE);

    GCTokenImpl gct;

    // Lock the thread object and unlock it at __run__ in the ruby land.
    vm->thread->hard_lock(state, gct);
    vm->thread->alive(state, cTrue);
    vm->thread->init_lock_.unlock();

    // Become GC-dependent after unlocking init_lock_ to avoid deadlocks.
    // gc_dependent may lock when it detects GC is happening. Also the parent
    // thread is locked until init_lock_ is unlocked by this child thread.
    vm->shared.gc_dependent(state);

    vm->shared.tool_broker()->thread_start(state);
    Object* ret = vm->thread->runner_(state);
    vm->shared.tool_broker()->thread_stop(state);

    if(!ret) {
      if(vm->thread_state()->raise_reason() == cExit) {
        vm->shared.env()->halt_and_exit(state);
      }
    }

    // Clear the call_frame, so that if we wait for GC going independent,
    // the GC doesn't see pointers into now-unallocated CallFrames
    vm->set_call_frame(0);

    LockedObjects& los = vm->locked_objects();
    for(LockedObjects::iterator i = los.begin();
        i != los.end();
        ++i) {
      (*i)->unlock_for_terminate(state, gct);
    }

    vm->thread->init_lock_.lock();
    NativeMethod::cleanup_thread(state);

    vm->thread->alive(state, cFalse);
    vm->thread->cleanup();
    vm->thread->init_lock_.unlock();

    vm->shared.gc_independent(state);
    vm->shared.clear_critical(state);

    VM::discard(state, vm);

    if(cDebugThreading) {
      std::cerr << "[LOCK thread " << vm->thread_id() << " exited]\n";
    }

    RUBINIUS_THREAD_STOP(tn.str().c_str(), vm->thread_id(), 0);
    return 0;
  }