void test_specialize_transforms_ivars_to_slots() {
        CompiledCode* code = CompiledCode::create(state);
        Tuple* tup = Tuple::from(state, 1, state->symbol("@blah"));
        code->literals(state, tup);

        InstructionSequence* iseq = InstructionSequence::create(state, 3);
        iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar));
        iseq->opcodes()->put(state, 1, Fixnum::from(0));
        iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil));

        code->iseq(state, iseq);

        MachineCode* mcode = new MachineCode(state, code);

        Object::Info ti(ObjectType);
        ti.slots[state->symbol("@blah")->index()] = 5;
        ti.slot_locations.resize(6);
        ti.slot_locations[5] = 33;
        mcode->specialize(state, code, &ti);

        TS_ASSERT_EQUALS(mcode->total, 3U);
        TS_ASSERT_EQUALS(mcode->opcodes[0], static_cast<unsigned int>(InstructionSequence::insn_push_my_offset));
        TS_ASSERT_EQUALS(mcode->opcodes[1], 33U);
        TS_ASSERT_EQUALS(mcode->opcodes[2], static_cast<unsigned int>(InstructionSequence::insn_push_nil));
    }
Exemple #2
0
  void CompiledCode::add_specialized(uint32_t class_id, uint32_t serial_id, executor exec,
                                       jit::RuntimeDataHolder* rd)
  {
    if(!machine_code_) rubinius::bug("specializing with no backend");

    MachineCode* v = machine_code_;

    for(int i = 0; i < MachineCode::cMaxSpecializations; i++) {
      uint32_t id = v->specializations[i].class_data.f.class_id;
      if(id == 0 || id == class_id) {
        v->specializations[i].class_data.f.class_id = class_id;
        v->specializations[i].class_data.f.serial_id = serial_id;
        v->specializations[i].execute = exec;
        v->specializations[i].jit_data = rd;

        v->set_execute_status(MachineCode::eJIT);
        if(primitive()->nil_p()) {
          execute = specialized_executor;
        }
        return;
      }
    }

    // No room for the specialization, this is bad.
    std::cerr << "No room for specialization!\n";
  }
Exemple #3
0
  BlockEnvironment* BlockEnvironment::under_call_frame(STATE, GCToken gct,
      CompiledCode* ccode, MachineCode* caller,
      CallFrame* call_frame)
  {

    MachineCode* mcode = ccode->machine_code();
    if(!mcode) {
      OnStack<1> os(state, ccode);
      state->set_call_frame(call_frame);
      mcode = ccode->internalize(state, gct);
      if(!mcode) {
        Exception::internal_error(state, call_frame, "invalid bytecode method");
        return 0;
      }
    }

    mcode->set_parent(caller);

    BlockEnvironment* be = state->new_object_dirty<BlockEnvironment>(G(blokenv));
    be->scope(state, call_frame->promote_scope(state));
    be->top_scope(state, call_frame->top_scope(state));
    be->compiled_code(state, ccode);
    be->module(state, call_frame->module());
    be->metadata_container(state, nil<Tuple>());
    return be;
  }
Exemple #4
0
  void CompiledCode::add_specialized(int spec_id, executor exec,
                                       jit::RuntimeDataHolder* rd)
  {
    if(!machine_code_) rubinius::bug("specializing with no backend");

    MachineCode* v = machine_code_;

    // Must happen only on the first specialization
    if(!v->unspecialized) {
      if(execute == specialized_executor) {
        rubinius::bug("cant setup unspecialized from specialized");
      }

      v->unspecialized = execute;
    }

    for(int i = 0; i < MachineCode::cMaxSpecializations; i++) {
      int id = v->specializations[i].class_id;
      if(id == 0 || id == spec_id) {
        v->specializations[i].class_id = spec_id;
        v->specializations[i].execute = exec;
        v->specializations[i].jit_data = rd;

        v->set_execute_status(MachineCode::eJIT);
        execute = specialized_executor;
        return;
      }
    }

    // No room for the specialization, this is bad.
    std::cerr << "No room for specialization!\n";
  }
  void CompiledCode::Info::mark(Object* obj, memory::ObjectMark& mark) {
    auto_mark(obj, mark);

    mark_inliners(obj, mark);

    CompiledCode* code = as<CompiledCode>(obj);
    if(!code->machine_code()) return;

    MachineCode* mcode = code->machine_code();
    mcode->set_mark();

    for(int i = 0; i < MachineCode::cMaxSpecializations; i++) {
      // TODO: JIT
    }

    for(size_t i = 0; i < mcode->references_count(); i++) {
      if(size_t ip = mcode->references()[i]) {
        Object* ref = reinterpret_cast<Object*>(mcode->opcodes[ip]);
        if(Object* updated_ref = mark.call(ref)) {
          mcode->opcodes[ip] = reinterpret_cast<intptr_t>(updated_ref);
          mark.just_set(code, updated_ref);
        }
      }
    }
  }
  Tuple* CompiledCode::constant_caches(STATE) {
    CompiledCode* self = this;
    OnStack<1> os(state, self);

    if(self->machine_code() == NULL) {
      if(!self->internalize(state)) return force_as<Tuple>(Primitives::failure());
    }
    MachineCode* mcode = self->machine_code();
    return mcode->constant_caches(state);
  }
Exemple #7
0
  Tuple* CompiledCode::constant_caches(STATE, CallFrame* calling_environment) {
    GCTokenImpl gct;
    CompiledCode* self = this;
    OnStack<1> os(state, self);

    if(self->machine_code_ == NULL) {
      if(!self->internalize(state, gct, calling_environment)) return force_as<Tuple>(Primitives::failure());
    }
    MachineCode* mcode = self->machine_code_;
    return mcode->constant_caches(state);
  }
Exemple #8
0
  void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) {
    auto_mark(obj, mark);

    mark_inliners(obj, mark);

    CompiledCode* code = as<CompiledCode>(obj);
    if(!code->machine_code_) return;

    MachineCode* mcode = code->machine_code_;
    mcode->set_mark();

    Object* tmp;

#ifdef ENABLE_LLVM
    if(code->jit_data()) {
      code->jit_data()->set_mark();
      code->jit_data()->mark_all(code, mark);
    }


    for(int i = 0; i < MachineCode::cMaxSpecializations; i++) {
      if(mcode->specializations[i].jit_data) {
        mcode->specializations[i].jit_data->set_mark();
        mcode->specializations[i].jit_data->mark_all(code, mark);
      }
    }
#endif

    for(size_t i = 0; i < mcode->inline_cache_count(); i++) {
      InlineCache* cache = &mcode->caches[i];

      for(int j = 0; j < cTrackedICHits; ++j) {
        MethodCacheEntry* mce = cache->cache_[j].entry();
        if(mce) {
          tmp = mark.call(mce);
          if(tmp) {
            cache->cache_[j].assign(static_cast<MethodCacheEntry*>(tmp));
            mark.just_set(obj, tmp);
          }
        }
      }

      if(cache->call_unit_) {
        tmp = mark.call(cache->call_unit_);
        if(tmp) {
          cache->call_unit_ = static_cast<CallUnit*>(tmp);
          mark.just_set(obj, tmp);
        }
      }
    }
  }
Exemple #9
0
  MachineCode* CompiledCode::internalize(STATE, GCToken gct,
                                        const char** reason, int* ip)
  {
    MachineCode* mcode = machine_code_;

    atomic::memory_barrier();

    if(mcode) return mcode;

    CompiledCode* self = this;
    OnStack<1> os(state, self);

    self->hard_lock(state, gct);

    mcode = self->machine_code_;
    if(!mcode) {
      {
        BytecodeVerification bv(self);
        if(!bv.verify(state)) {
          if(reason) *reason = bv.failure_reason();
          if(ip) *ip = bv.failure_ip();
          std::cerr << "Error validating bytecode: " << bv.failure_reason() << "\n";
          return 0;
        }
      }

      mcode = new MachineCode(state, self);

      if(self->resolve_primitive(state)) {
        mcode->fallback = execute;
      } else {
        mcode->setup_argument_handler();
      }

      // We need to have an explicit memory barrier here, because we need to
      // be sure that mcode is completely initialized before it's set.
      // Otherwise another thread might see a partially initialized
      // MachineCode.
      atomic::write(&self->machine_code_, mcode);

      set_executor(mcode->fallback);
    }

    self->hard_unlock(state, gct);
    return mcode;
  }
    void test_validate_ip() {
        CompiledCode* code = CompiledCode::create(state);
        Tuple* tup = Tuple::from(state, 1, state->symbol("@blah"));
        code->literals(state, tup);

        InstructionSequence* iseq = InstructionSequence::create(state, 3);
        iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar));
        iseq->opcodes()->put(state, 1, Fixnum::from(0));
        iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil));

        code->iseq(state, iseq);

        MachineCode* mcode = new MachineCode(state, code);
        TS_ASSERT_EQUALS(mcode->validate_ip(state, 0), true);
        TS_ASSERT_EQUALS(mcode->validate_ip(state, 1), false);
        TS_ASSERT_EQUALS(mcode->validate_ip(state, 2), true);
    }
Exemple #11
0
  void CompiledCode::add_specialized(STATE, uint32_t class_id, uint32_t serial_id, executor exec,
                                       jit::RuntimeDataHolder* rd)
  {
    if(!machine_code_) {
      utilities::logger::error("specializing with no backend");
      return;
    }

    MachineCode* v = machine_code_;

    int i;

    for(i = 0; i < MachineCode::cMaxSpecializations; i++) {
      uint32_t id = v->specializations[i].class_data.f.class_id;

      if(id == 0 || id == class_id) break;
    }

    /* We have fixed space for specializations. If we exceed this, overwrite
     * the first one. This should be converted to some sort of LRU cache.
     */
    if(i == MachineCode::cMaxSpecializations) {
      std::ostringstream msg;

      msg << "Specialization space exceeded for " <<
        machine_code_->name()->cpp_str(state);
      utilities::logger::warn(msg.str().c_str());

      i = 0;
    }

    v->specializations[i].class_data.f.class_id = class_id;
    v->specializations[i].class_data.f.serial_id = serial_id;
    v->specializations[i].execute = exec;
    v->specializations[i].jit_data = rd;

    v->set_execute_status(MachineCode::eJIT);
    if(primitive()->nil_p()) {
      execute = specialized_executor;
    }
  }
Exemple #12
0
  Location* Location::create(STATE, CallFrame* call_frame,
                             bool include_variables)
  {
    if(NativeMethodFrame* nmf = call_frame->native_method_frame()) {
      return create(state, nmf);
    }

    Location* loc = state->new_object_dirty<Location>(G(location));
    loc->method_module(state, call_frame->module());
    loc->receiver(state, call_frame->self());
    loc->method(state, call_frame->compiled_code);
    loc->ip(state, Fixnum::from(call_frame->ip()));
    loc->flags(state, Fixnum::from(0));

    if(call_frame->is_block_p(state)) {
      loc->name(state, call_frame->top_scope(state)->method()->name());
      loc->set_is_block(state);
    } else {
      loc->name(state, call_frame->name());
    }

    MachineCode* mcode = call_frame->compiled_code->machine_code();
    if(mcode && mcode->jitted_p()) {
      loc->set_is_jit(state);
    }

    if(include_variables) {
      // Use promote_scope because it can figure out of the generated
      // VariableScope should be isolated by default (true atm for JITd
      // frames)
      loc->variables(state, call_frame->promote_scope(state));
    } else {
      loc->variables(state, nil<VariableScope>());
    }

    loc->constant_scope(state, call_frame->constant_scope());

    return loc;
  }
  MachineCode* CompiledCode::internalize(STATE) {
    timer::StopWatch<timer::microseconds> timer(
        state->vm()->metrics().machine.bytecode_internalizer_us);

    atomic::memory_barrier();

    MachineCode* mcode = machine_code();

    if(mcode) return mcode;

    {
      BytecodeVerifier bytecode_verifier(this);
      bytecode_verifier.verify(state);
    }

    mcode = new MachineCode(state, this);

    if(resolve_primitive(state)) {
      mcode->fallback = execute;
    } else {
      mcode->setup_argument_handler();
    }

    /* There is a race here because another Thread may have run this
     * CompiledCode instance and internalized it. We attempt to store our
     * version assuming that we are the only ones to do so and throw away our
     * work if someone else has beat us to it.
     */
    MachineCode** mcode_ptr = &_machine_code_;
    if(atomic::compare_and_swap(reinterpret_cast<void**>(mcode_ptr), 0, mcode)) {
      set_executor(mcode->fallback);
      return mcode;
    } else {
      return machine_code();
    }
  }
Exemple #14
0
  void CompiledCode::Info::mark(Object* obj, ObjectMark& mark) {
    auto_mark(obj, mark);

    mark_inliners(obj, mark);

    CompiledCode* code = as<CompiledCode>(obj);
    if(!code->machine_code_) return;

    MachineCode* mcode = code->machine_code_;
    mcode->set_mark();

#ifdef ENABLE_LLVM
    if(code->jit_data()) {
      code->jit_data()->set_mark();
      code->jit_data()->mark_all(code, mark);
    }


    for(int i = 0; i < MachineCode::cMaxSpecializations; i++) {
      if(mcode->specializations[i].jit_data) {
        mcode->specializations[i].jit_data->set_mark();
        mcode->specializations[i].jit_data->mark_all(code, mark);
      }
    }
#endif

    for(size_t i = 0; i < mcode->call_site_count(); i++) {
      size_t index = mcode->call_site_offsets()[i];
      Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]);
      Object* new_cache = mark.call(old_cache);
      if(new_cache != old_cache) {
        mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache);
        mark.just_set(code, new_cache);
      }
    }

    for(size_t i = 0; i < mcode->constant_cache_count(); i++) {
      size_t index = mcode->constant_cache_offsets()[i];
      Object* old_cache = reinterpret_cast<Object*>(mcode->opcodes[index + 1]);
      Object* new_cache = mark.call(old_cache);
      if(new_cache != old_cache) {
        mcode->opcodes[index + 1] = reinterpret_cast<intptr_t>(new_cache);
        mark.just_set(code, new_cache);
      }
    }
  }
Exemple #15
0
  CallFrame* LLVMState::find_candidate(STATE, CompiledCode* start, CallFrame* call_frame) {
    if(!config_.jit_inline_generic) {
      return call_frame;
    }

    int depth = config().jit_limit_search;

    if(!start) {
      throw CompileError("find_candidate: null start");
    }

    if(!call_frame) {
      throw CompileError("find_candidate: null call frame");
    }

    // if(!start) {
      // start = call_frame->compiled_code;
      // call_frame = call_frame->previous;
      // depth--;
    // }

    if(debug_search) {
      std::cout << "> call_count: " << call_frame->compiled_code->machine_code()->call_count
            << " size: " << call_frame->compiled_code->machine_code()->total
            << " sends: " << call_frame->compiled_code->machine_code()->call_site_count()
            << std::endl;

      call_frame->print_backtrace(state, 1);
    }

    if(start->machine_code()->total > (size_t)config_.jit_limit_inline_method) {
      if(debug_search) {
        std::cout << "JIT: STOP. reason: trigger method isn't small: "
              << start->machine_code()->total << " > "
              << config_.jit_limit_inline_method
              << std::endl;
      }

      return call_frame;
    }

    MachineCode* mcode = start->machine_code();

    if(mcode->required_args != mcode->total_args) {
      if(debug_search) {
        std::cout << "JIT: STOP. reason: trigger method req_args != total_args" << std::endl;
      }

      return call_frame;
    }

    if(mcode->no_inline_p()) {
      if(debug_search) {
        std::cout << "JIT: STOP. reason: trigger method no_inline_p() = true" << std::endl;
      }

      return call_frame;
    }

    CallFrame* callee = call_frame;
    call_frame = call_frame->previous;

    if(!call_frame) return callee;

    // Now start looking at callers.

    while(depth-- > 0) {
      CompiledCode* cur = call_frame->compiled_code;

      if(!cur) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: synthetic CallFrame hit" << std::endl;
        }
        return callee;
      }

      MachineCode* mcode = cur->machine_code();

      if(debug_search) {
        std::cout << "> call_count: " << mcode->call_count
              << " size: " << mcode->total
              << " sends: " << mcode->call_site_count()
              << std::endl;

        call_frame->print_backtrace(state, 1);
      }


      /*
      if(call_frame->block_p()
          || mcode->required_args != mcode->total_args // has a splat
          || mcode->call_count < 200 // not called much
          || mcode->jitted() // already jitted
          || mcode->parent() // is a block
        ) return callee;
      */

      if(mcode->required_args != mcode->total_args) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: req_args != total_args" << std::endl;
        }
        return callee;
      }

      if(mcode->call_count < config_.jit_threshold_inline) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: call_count too small: "
                << mcode->call_count << " < "
                << config_.jit_threshold_inline << std::endl;
        }

        return callee;
      }

      if(mcode->jitted_p()) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: already jitted" << std::endl;
        }

        return callee;
      }

      if(mcode->no_inline_p()) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: no_inline_p() = true" << std::endl;
        }

        return callee;
      }

      if(call_frame->jitted_p() || call_frame->inline_method_p()) {
        return callee;
      }

      if(mcode->call_site_count() > eMaxInlineSendCount) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: high send count" << std::endl;
        }

        return call_frame;
      }

      // if(mcode->required_args != mcode->total_args // has a splat
          // || mcode->call_count < 200 // not called much
          // || mcode->jitted() // already jitted
          // || !mcode->no_inline_p() // method marked as not inlineable
        // ) return callee;

      CallFrame* prev = call_frame->previous;

      if(!prev) {
        if(debug_search) {
          std::cout << "JIT: STOP. reason: toplevel method" << std::endl;
        }
        return call_frame;
      }

      // if(cur->machine_code()->total > SMALL_METHOD_SIZE) {
        // if(debug_search) {
          // std::cout << "JIT: STOP. reason: big method: "
                // << cur->machine_code()->total << " > "
                // << SMALL_METHOD_SIZE
                // << "\n";
        // }

        // return call_frame;
      // }

      // if(!next || cur->machine_code()->total > SMALL_METHOD_SIZE) return call_frame;

      callee = call_frame;
      call_frame = prev;
    }

    return callee;
  }
Exemple #16
0
Object* MachineCode::uncommon_interpreter(STATE,
                                          MachineCode* const mcode,
                                          CallFrame* const call_frame,
                                          int32_t entry_ip,
                                          native_int sp,
                                          CallFrame* const method_call_frame,
                                          jit::RuntimeDataHolder* rd,
                                          UnwindInfoSet& thread_unwinds)
{

  MachineCode* mc = method_call_frame->compiled_code->machine_code();

  if(++mc->uncommon_count > state->shared().config.jit_deoptimize_threshold) {
    if(state->shared().config.jit_uncommon_print) {
      std::cerr << "[[[ Deoptimizing uncommon method ]]]\n";
      call_frame->print_backtrace(state);

      std::cerr << "Method Call Frame:\n";
      method_call_frame->print_backtrace(state);
    }

    mc->uncommon_count = 0;
    mc->deoptimize(state, method_call_frame->compiled_code, rd);
  }

#include "vm/gen/instruction_locations.hpp"

  opcode* stream = mcode->opcodes;
  InterpreterState is;
  GCTokenImpl gct;

  Object** stack_ptr = call_frame->stk + sp;

  UnwindInfoSet unwinds(thread_unwinds);
continue_to_run:
  try {

#undef DISPATCH
#define DISPATCH goto *insn_locations[stream[call_frame->inc_ip()]];

#undef next_int
#undef cache_ip
#undef flush_ip

#define next_int ((opcode)(stream[call_frame->inc_ip()]))
#define cache_ip(which)
#define flush_ip()

#include "vm/gen/instruction_implementations.hpp"

  } catch(TypeError& e) {
    flush_ip();
    Exception* exc =
      Exception::make_type_error(state, e.type, e.object, e.reason);
    exc->locations(state, Location::from_call_stack(state, call_frame));

    state->raise_exception(exc);
    call_frame->scope->flush_to_heap(state);
    return NULL;
  } catch(const RubyException& exc) {
    exc.exception->locations(state,
          Location::from_call_stack(state, call_frame));
    state->raise_exception(exc.exception);
    return NULL;
  }

  // No reason to be here!
  rubinius::bug("Control flow error in interpreter");

exception:
  VMThreadState* th = state->vm()->thread_state();
  //
  switch(th->raise_reason()) {
  case cException:
    if(unwinds.has_unwinds()) {
      UnwindInfo info = unwinds.pop();
      stack_position(info.stack_depth);
      call_frame->set_ip(info.target_ip);
      cache_ip(info.target_ip);
      goto continue_to_run;
    } else {
      call_frame->scope->flush_to_heap(state);
      return NULL;
    }

  case cBreak:
    // If we're trying to break to here, we're done!
    if(th->destination_scope() == call_frame->scope->on_heap()) {
      stack_push(th->raise_value());
      th->clear_break();
      goto continue_to_run;
      // Don't return here, because we want to loop back to the top
      // and keep running this method.
    }

    // Otherwise, fall through and run the unwinds
  case cReturn:
  case cCatchThrow:
  case cThreadKill:
    // Otherwise, we're doing a long return/break unwind through
    // here. We need to run ensure blocks.
    while(unwinds.has_unwinds()) {
      UnwindInfo info = unwinds.pop();
      if(info.for_ensure()) {
        stack_position(info.stack_depth);
        call_frame->set_ip(info.target_ip);
        cache_ip(info.target_ip);

        // Don't reset ep here, we're still handling the return/break.
        goto continue_to_run;
      }
    }

    // Ok, no ensures to run.
    if(th->raise_reason() == cReturn) {
      call_frame->scope->flush_to_heap(state);

      // If we're trying to return to here, we're done!
      if(th->destination_scope() == call_frame->scope->on_heap()) {
        Object* val = th->raise_value();
        th->clear_return();
        return val;
      } else {
        // Give control of this exception to the caller.
        return NULL;
      }

    } else { // It's cBreak thats not for us!
      call_frame->scope->flush_to_heap(state);
      // Give control of this exception to the caller.
      return NULL;
    }

  case cExit:
    call_frame->scope->flush_to_heap(state);
    return NULL;
  default:
    break;
  } // switch

  rubinius::bug("Control flow error in interpreter");
  return NULL;
}
Exemple #17
0
  r_mint Env::machine_code_id(rmachine_code code) {
    MachineCode* mcode = i(code);

    return (mcode->method_id() << 1) | 1;
  }