コード例 #1
0
    void become_dependent(THREAD, utilities::thread::Condition* cond = NULL) {
      switch(state->run_state()) {
      case ManagedThread::eAlone:
        // Running alone, ignore.
        return;
      case ManagedThread::eRunning:
        // Ignore this, a running thread is already dependent.
        return;
      case ManagedThread::eSuspended:
        // Again, bad, don't allow this.
        rubinius::bug("Trying to make a suspended thread dependent");
        break;
      case ManagedThread::eIndependent:
        // If the GC is running, wait here...
        if(should_stop_) {
          utilities::thread::Mutex::LockGuard guard(mutex_);
          state->run_state_ = ManagedThread::eSuspended;
          if(cond) cond->broadcast();
          // We need to grab the mutex because we might want
          // to wait here.
          while(should_stop_) {
            waiting_to_run_.wait(mutex_);
          }
        }

        // Ok, we're running again.
        state->run_state_ = ManagedThread::eRunning;
        atomic::fetch_and_add(&pending_threads_, 1);
      }
    }
コード例 #2
0
    void wait_to_run(THREAD) {
      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD stopping, waiting to be restarted]\n";
      }

      if(state->run_state_ != ManagedThread::eRunning) {
        rubinius::bug("Suspending a non running thread!");
      }

      state->run_state_ = ManagedThread::eSuspended;
      atomic::fetch_and_sub(&pending_threads_, 1);

      utilities::thread::Mutex::LockGuard guard(mutex_);
      // Ok, since we have just locked that implies a barrier
      // so we don't have to add an explicit barrier here.
      while(should_stop_) {
        waiting_to_run_.wait(mutex_);
      }

      atomic::fetch_and_add(&pending_threads_, 1);
      state->run_state_ = ManagedThread::eRunning;

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD restarted]\n";
      }
    }
コード例 #3
0
ファイル: state.cpp プロジェクト: dziulius/rubinius
    void pause() {
      utilities::thread::Mutex::LockGuard guard(mutex_);

      // it's idle, ie paused.
      if(state == cIdle || state == cPaused) return;

      pause_ = true;

      while(!paused_ && (ls_->run_state() == ManagedThread::eRunning ||
                         ls_->run_state() == ManagedThread::eIndependent)) {
        pause_condition_.wait(mutex_);
      }
    }
コード例 #4
0
ファイル: world_state.hpp プロジェクト: Halfnhav/rubinius
    void become_dependent(THREAD, utilities::thread::Condition* cond = NULL) {
      switch(state->run_state()) {
      case ManagedThread::eAlone:
        // Running alone, ignore.
        return;
      case ManagedThread::eRunning:
        // Ignore this, a running thread is already dependent.
        return;
      case ManagedThread::eSuspended:
        // Again, bad, don't allow this.
        rubinius::bug("Trying to make a suspended thread dependent");
        break;
      case ManagedThread::eIndependent:
        for(;;) {
          // If the GC is running, wait here...
          if(should_stop_) {
            utilities::thread::Mutex::LockGuard guard(mutex_);
            state->run_state_ = ManagedThread::eSuspended;
            if(cond) cond->broadcast();
            // We need to grab the mutex because we might want
            // to wait here.
            while(should_stop_) {
              waiting_to_run_.wait(mutex_);
            }
          }

          // Ok, we're running again.
          state->run_state_ = ManagedThread::eRunning;
          atomic::fetch_and_add(&pending_threads_, 1);
          // After decreasing the thread count, we have to check whether
          // we might have to stop. This is basically because
          // there is a race condition here that should_stop isn't true
          // yet the first time before incrementing. However, another thread
          // waiting to GC could have set should_stop and not seen the
          // increment of this thread yet, thinking it's safe to GC.
          //
          if(!atomic::read(&should_stop_)) return;
          // If we do have to stop, subtract one from the thread count
          // and retry again. This will make the thread go into the wait.
          atomic::fetch_and_sub(&pending_threads_, 1);
        }
      }
    }
コード例 #5
0
ファイル: world_state.hpp プロジェクト: Halfnhav/rubinius
    void stop_threads_externally() {
      while(!atomic::compare_and_swap(&should_stop_, 0, 1)) {
        if(cDebugThreading) {
          std::cerr << "[WORLD waiting to stopping all threads (as external event)]\n";
        }
        // Wait around on the run condition variable until whoever is currently
        // working independently is done and sets should_stop_ to false.
        utilities::thread::Mutex::LockGuard guard(mutex_);
        while(should_stop_) {
          waiting_to_run_.wait(mutex_);
        }
        // We will now redo the loop to check if we can stop properly this time
      }

      if(cDebugThreading) {
        std::cerr << "[WORLD stopping all threads (as external event)]\n";
      }

      *check_global_interrupts_ = true;
      // We need a write barrier so we're sure we're seeing an up to
      // date version of pending_threads_ in each loop.
      while(atomic::read(&pending_threads_) > 0) {
        if(cDebugThreading) {
          std::cerr << "[" << VM::current() << " WORLD waiting on condvar: "
                    << pending_threads_ << "]\n";
        }
        // We yield here so other threads are scheduled and can be run.
        // We've benchmarked this and this turned out to cause the least
        // cpu burn compared to not doing anything at all here or sleeping
        // for 1 nanosecond with {0, 1}.
        atomic::pause();
      }

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD o/~ I think we're alone now.. o/~]\n";
      }
    }
コード例 #6
0
ファイル: state.cpp プロジェクト: dziulius/rubinius
    virtual void perform() {
      const char* thread_name = "rbx.jit";
      ManagedThread::set_current(ls_, thread_name);

      ls_->set_run_state(ManagedThread::eIndependent);

      RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1);

#ifndef RBX_WINDOWS
      sigset_t set;
      sigfillset(&set);
      pthread_sigmask(SIG_SETMASK, &set, NULL);
#endif

      for(;;) { // forever

        BackgroundCompileRequest* req = 0;

        // Lock, wait, get a request, unlock
        {
          utilities::thread::Mutex::LockGuard guard(mutex_);

          if(pause_) {
            state = cPaused;

            paused_ = true;
            pause_condition_.broadcast();

            if(stop_) goto halt;

            while(pause_) {
              condition_.wait(mutex_);
              if(stop_) goto halt;
            }

            state = cUnknown;
            paused_ = false;
          }

          // If we've been asked to stop, do so now.
          if(stop_) goto halt;


          while(pending_requests_.empty()) {
            state = cIdle;

            // unlock and wait...
            condition_.wait(mutex_);

            if(stop_) goto halt;
          }

          // now locked again, shift a request
          req = pending_requests_.front();

          state = cRunning;
        }

        // This isn't ideal, but it's the safest. Keep the GC from
        // running while we're building the IR.
        ls_->gc_dependent();

        Context ctx(ls_);
        jit::Compiler jit(&ctx);

        // mutex now unlock, allowing others to push more requests
        //

        current_req_ = req;
        current_compiler_ = &jit;

        int spec_id = 0;
        Class* cls = req->receiver_class();
        if(cls && !cls->nil_p()) {
          spec_id = cls->class_id();
        }

        void* func = 0;
        {
          timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent);

          jit.compile(req);

          func = jit.generate_function();
        }

        // We were unable to compile this function, likely
        // because it's got something we don't support.
        if(!func) {
          if(ls_->config().jit_show_compiling) {
            CompiledCode* code = req->method();
            llvm::outs() << "[[[ JIT error background compiling "
                      << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name())
                      << (req->is_block() ? " (block)" : " (method)")
                      << " ]]]\n";
          }
          // If someone was waiting on this, wake them up.
          if(utilities::thread::Condition* cond = req->waiter()) {
            cond->signal();
          }

          current_req_ = 0;
          current_compiler_ = 0;
          pending_requests_.pop_front();
          delete req;

          // We don't depend on the GC here, so let it run independent
          // of us.
          ls_->gc_independent();

          continue;
        }

        if(show_machine_code_) {
          jit.show_machine_code();
        }

        // If the method has had jit'ing request disabled since we started
        // JIT'ing it, discard our work.
        if(!req->machine_code()->jit_disabled()) {

          jit::RuntimeDataHolder* rd = ctx.runtime_data_holder();

          atomic::memory_barrier();
          ls_->start_method_update();

          if(!req->is_block()) {
            if(spec_id) {
              req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd);
            } else {
              req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd);
            }
          } else {
            req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd);
          }

          req->machine_code()->clear_compiling();

          // assert(req->method()->jit_data());

          ls_->end_method_update();

          rd->run_write_barrier(ls_->write_barrier(), req->method());

          ls_->shared().stats.jitted_methods++;

          if(ls_->config().jit_show_compiling) {
            CompiledCode* code = req->method();
            llvm::outs() << "[[[ JIT finished background compiling "
                      << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name())
                      << (req->is_block() ? " (block)" : " (method)")
                      << " ]]]\n";
          }
        }

        // If someone was waiting on this, wake them up.
        if(utilities::thread::Condition* cond = req->waiter()) {
          cond->signal();
        }

        current_req_ = 0;
        current_compiler_ = 0;
        pending_requests_.pop_front();
        delete req;

        // We don't depend on the GC here, so let it run independent
        // of us.
        ls_->gc_independent();
      }

halt:
      RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1);
    }