Ejemplo n.º 1
0
    void restart() {
      mutex_.init();
      condition_.init();
      pause_condition_.init();

      state = cUnknown;
      stop_ = false;
      pause_ = false;
      paused_ = false;

      run();
    }
Ejemplo n.º 2
0
 BackgroundCompilerThread(LLVMState* ls)
   : Thread(0, false)
   , ls_(ls)
   , current_compiler_(0)
   , current_req_(0)
   , state(cUnknown)
   , stop_(false)
   , pause_(false)
   , paused_(false)
 {
   show_machine_code_ = ls->jit_dump_code() & cMachineCode;
   condition_.init();
   pause_condition_.init();
 }
Ejemplo n.º 3
0
    void unpark() {
      utilities::thread::Mutex::LockGuard lg(mutex_);
      if(!sleeping_) return;

      wake_ = true;
      cond_.signal();
    }
Ejemplo n.º 4
0
    void wait_to_run(THREAD) {
      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD stopping, waiting to be restarted]\n";
      }

      if(state->run_state_ != ManagedThread::eRunning) {
        rubinius::bug("Suspending a non running thread!");
      }

      state->run_state_ = ManagedThread::eSuspended;
      atomic::fetch_and_sub(&pending_threads_, 1);

      utilities::thread::Mutex::LockGuard guard(mutex_);
      // Ok, since we have just locked that implies a barrier
      // so we don't have to add an explicit barrier here.
      while(should_stop_) {
        waiting_to_run_.wait(mutex_);
      }

      atomic::fetch_and_add(&pending_threads_, 1);
      state->run_state_ = ManagedThread::eRunning;

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD restarted]\n";
      }
    }
Ejemplo n.º 5
0
    void wake_all_waiters(THREAD) {
      utilities::thread::Mutex::LockGuard guard(mutex_);

      if(!atomic::compare_and_swap(&should_stop_, 1, 0)) {
        // Ok, someone else has already restarted so we don't
        // have anything to do here anymore
        return;
      }

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD waking all threads]\n";
      }

      if(state->run_state_ != ManagedThread::eAlone) {
        rubinius::bug("A non-alone thread is trying to wake all");
      }

      *check_global_interrupts_ = false;
      // For ourself..
      atomic::fetch_and_add(&pending_threads_, 1);

      waiting_to_run_.broadcast();

      state->run_state_ = ManagedThread::eRunning;
    }
Ejemplo n.º 6
0
    void become_dependent(THREAD, utilities::thread::Condition* cond = NULL) {
      switch(state->run_state()) {
      case ManagedThread::eAlone:
        // Running alone, ignore.
        return;
      case ManagedThread::eRunning:
        // Ignore this, a running thread is already dependent.
        return;
      case ManagedThread::eSuspended:
        // Again, bad, don't allow this.
        rubinius::bug("Trying to make a suspended thread dependent");
        break;
      case ManagedThread::eIndependent:
        // If the GC is running, wait here...
        if(should_stop_) {
          utilities::thread::Mutex::LockGuard guard(mutex_);
          state->run_state_ = ManagedThread::eSuspended;
          if(cond) cond->broadcast();
          // We need to grab the mutex because we might want
          // to wait here.
          while(should_stop_) {
            waiting_to_run_.wait(mutex_);
          }
        }

        // Ok, we're running again.
        state->run_state_ = ManagedThread::eRunning;
        atomic::fetch_and_add(&pending_threads_, 1);
      }
    }
Ejemplo n.º 7
0
 WorldState()
   : pending_threads_(0)
   , should_stop_(0)
   , time_waiting_(0)
 {
   mutex_.init();
   waiting_to_run_.init();
 }
Ejemplo n.º 8
0
 WorldState(bool* check_global_interrupts)
   : pending_threads_(0)
   , should_stop_(0)
   , check_global_interrupts_(check_global_interrupts)
   , time_waiting_(0)
 {
   mutex_.init();
   waiting_to_run_.init();
 }
Ejemplo n.º 9
0
    void unpause() {
      utilities::thread::Mutex::LockGuard guard(mutex_);

      // idle, just waiting for more work, ok, thats fine.
      if(state != cPaused) return;

      pause_ = false;

      condition_.signal();
    }
Ejemplo n.º 10
0
 /**
  * Called after a fork(), when we know we're alone again, to get
  * everything back in the proper order.
  */
 void reinit() {
   // When we're reinitting the world state, we're stopped
   // so we have to initialize pending_threads_ to 0 and
   // should_stop to 1 so we start off in the proper state
   // and can continue after a fork.
   pending_threads_ = 0;
   should_stop_ = 1;
   mutex_.init();
   waiting_to_run_.init();
 }
Ejemplo n.º 11
0
    void pause() {
      utilities::thread::Mutex::LockGuard guard(mutex_);

      // it's idle, ie paused.
      if(state == cIdle || state == cPaused) return;

      pause_ = true;

      while(!paused_ && (ls_->run_state() == ManagedThread::eRunning ||
                         ls_->run_state() == ManagedThread::eIndependent)) {
        pause_condition_.wait(mutex_);
      }
    }
Ejemplo n.º 12
0
    void restart_threads_externally() {
      utilities::thread::Mutex::LockGuard guard(mutex_);
      if(!atomic::compare_and_swap(&should_stop_, 1, 0)) {
        // Ok, someone else has already restarted so we don't
        // have anything to do here anymore
        return;
      }

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD waking all threads (externally)]\n";
      }

      waiting_to_run_.broadcast();
    }
Ejemplo n.º 13
0
    void stop() {
      {
        utilities::thread::Mutex::LockGuard guard(mutex_);
        if(state == cStopped) return;

        stop_ = true;

        if(state == cIdle) {
          condition_.signal();
        } else if(state == cPaused) {
          // TODO refactor common from unpause
          pause_ = false;
          condition_.signal();
        }
      }

      join();

      {
        utilities::thread::Mutex::LockGuard guard(mutex_);
        state = cStopped;
      }
    }
Ejemplo n.º 14
0
    void become_dependent(THREAD, utilities::thread::Condition* cond = NULL) {
      switch(state->run_state()) {
      case ManagedThread::eAlone:
        // Running alone, ignore.
        return;
      case ManagedThread::eRunning:
        // Ignore this, a running thread is already dependent.
        return;
      case ManagedThread::eSuspended:
        // Again, bad, don't allow this.
        rubinius::bug("Trying to make a suspended thread dependent");
        break;
      case ManagedThread::eIndependent:
        for(;;) {
          // If the GC is running, wait here...
          if(should_stop_) {
            utilities::thread::Mutex::LockGuard guard(mutex_);
            state->run_state_ = ManagedThread::eSuspended;
            if(cond) cond->broadcast();
            // We need to grab the mutex because we might want
            // to wait here.
            while(should_stop_) {
              waiting_to_run_.wait(mutex_);
            }
          }

          // Ok, we're running again.
          state->run_state_ = ManagedThread::eRunning;
          atomic::fetch_and_add(&pending_threads_, 1);
          // After decreasing the thread count, we have to check whether
          // we might have to stop. This is basically because
          // there is a race condition here that should_stop isn't true
          // yet the first time before incrementing. However, another thread
          // waiting to GC could have set should_stop and not seen the
          // increment of this thread yet, thinking it's safe to GC.
          //
          if(!atomic::read(&should_stop_)) return;
          // If we do have to stop, subtract one from the thread count
          // and retry again. This will make the thread go into the wait.
          atomic::fetch_and_sub(&pending_threads_, 1);
        }
      }
    }
Ejemplo n.º 15
0
    void stop_threads_externally() {
      while(!atomic::compare_and_swap(&should_stop_, 0, 1)) {
        if(cDebugThreading) {
          std::cerr << "[WORLD waiting to stopping all threads (as external event)]\n";
        }
        // Wait around on the run condition variable until whoever is currently
        // working independently is done and sets should_stop_ to false.
        utilities::thread::Mutex::LockGuard guard(mutex_);
        while(should_stop_) {
          waiting_to_run_.wait(mutex_);
        }
        // We will now redo the loop to check if we can stop properly this time
      }

      if(cDebugThreading) {
        std::cerr << "[WORLD stopping all threads (as external event)]\n";
      }

      *check_global_interrupts_ = true;
      // We need a write barrier so we're sure we're seeing an up to
      // date version of pending_threads_ in each loop.
      while(atomic::read(&pending_threads_) > 0) {
        if(cDebugThreading) {
          std::cerr << "[" << VM::current() << " WORLD waiting on condvar: "
                    << pending_threads_ << "]\n";
        }
        // We yield here so other threads are scheduled and can be run.
        // We've benchmarked this and this turned out to cause the least
        // cpu burn compared to not doing anything at all here or sleeping
        // for 1 nanosecond with {0, 1}.
        atomic::pause();
      }

      if(cDebugThreading) {
        std::cerr << "[" << VM::current() << " WORLD o/~ I think we're alone now.. o/~]\n";
      }
    }
Ejemplo n.º 16
0
 void unlock() {
   stop_ = false;
   wait_condition_.broadcast();
   lock_.unlock();
 }
Ejemplo n.º 17
0
    virtual void perform() {
      const char* thread_name = "rbx.jit";
      ManagedThread::set_current(ls_, thread_name);

      ls_->set_run_state(ManagedThread::eIndependent);

      RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1);

#ifndef RBX_WINDOWS
      sigset_t set;
      sigfillset(&set);
      pthread_sigmask(SIG_SETMASK, &set, NULL);
#endif

      for(;;) { // forever

        BackgroundCompileRequest* req = 0;

        // Lock, wait, get a request, unlock
        {
          utilities::thread::Mutex::LockGuard guard(mutex_);

          if(pause_) {
            state = cPaused;

            paused_ = true;
            pause_condition_.broadcast();

            if(stop_) goto halt;

            while(pause_) {
              condition_.wait(mutex_);
              if(stop_) goto halt;
            }

            state = cUnknown;
            paused_ = false;
          }

          // If we've been asked to stop, do so now.
          if(stop_) goto halt;


          while(pending_requests_.empty()) {
            state = cIdle;

            // unlock and wait...
            condition_.wait(mutex_);

            if(stop_) goto halt;
          }

          // now locked again, shift a request
          req = pending_requests_.front();

          state = cRunning;
        }

        // This isn't ideal, but it's the safest. Keep the GC from
        // running while we're building the IR.
        ls_->gc_dependent();

        Context ctx(ls_);
        jit::Compiler jit(&ctx);

        // mutex now unlock, allowing others to push more requests
        //

        current_req_ = req;
        current_compiler_ = &jit;

        int spec_id = 0;
        Class* cls = req->receiver_class();
        if(cls && !cls->nil_p()) {
          spec_id = cls->class_id();
        }

        void* func = 0;
        {
          timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent);

          jit.compile(req);

          func = jit.generate_function();
        }

        // We were unable to compile this function, likely
        // because it's got something we don't support.
        if(!func) {
          if(ls_->config().jit_show_compiling) {
            CompiledCode* code = req->method();
            llvm::outs() << "[[[ JIT error background compiling "
                      << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name())
                      << (req->is_block() ? " (block)" : " (method)")
                      << " ]]]\n";
          }
          // If someone was waiting on this, wake them up.
          if(utilities::thread::Condition* cond = req->waiter()) {
            cond->signal();
          }

          current_req_ = 0;
          current_compiler_ = 0;
          pending_requests_.pop_front();
          delete req;

          // We don't depend on the GC here, so let it run independent
          // of us.
          ls_->gc_independent();

          continue;
        }

        if(show_machine_code_) {
          jit.show_machine_code();
        }

        // If the method has had jit'ing request disabled since we started
        // JIT'ing it, discard our work.
        if(!req->machine_code()->jit_disabled()) {

          jit::RuntimeDataHolder* rd = ctx.runtime_data_holder();

          atomic::memory_barrier();
          ls_->start_method_update();

          if(!req->is_block()) {
            if(spec_id) {
              req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd);
            } else {
              req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd);
            }
          } else {
            req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd);
          }

          req->machine_code()->clear_compiling();

          // assert(req->method()->jit_data());

          ls_->end_method_update();

          rd->run_write_barrier(ls_->write_barrier(), req->method());

          ls_->shared().stats.jitted_methods++;

          if(ls_->config().jit_show_compiling) {
            CompiledCode* code = req->method();
            llvm::outs() << "[[[ JIT finished background compiling "
                      << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name())
                      << (req->is_block() ? " (block)" : " (method)")
                      << " ]]]\n";
          }
        }

        // If someone was waiting on this, wake them up.
        if(utilities::thread::Condition* cond = req->waiter()) {
          cond->signal();
        }

        current_req_ = 0;
        current_compiler_ = 0;
        pending_requests_.pop_front();
        delete req;

        // We don't depend on the GC here, so let it run independent
        // of us.
        ls_->gc_independent();
      }

halt:
      RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1);
    }
Ejemplo n.º 18
0
 void add(BackgroundCompileRequest* req) {
   utilities::thread::Mutex::LockGuard guard(mutex_);
   pending_requests_.push_back(req);
   condition_.signal();
 }