void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
  assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");

  int hot_count = m->invocation_count();
  reset_counter_for_invocation_event(m);
  const char* comment = "count";

  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler) {
    nmethod* nm = m->code();
    if (nm == NULL ) {
      const char* comment = "count";
      CompileBroker::compile_method(m, InvocationEntryBci,
                                    m, hot_count, comment, CHECK);
    } else {
#ifdef TIERED

      if (nm->is_compiled_by_c1()) {
        const char* comment = "tier1 overflow";
        CompileBroker::compile_method(m, InvocationEntryBci,
                                      m, hot_count, comment, CHECK);
      }
#endif // TIERED
    }
  }
}
예제 #2
0
// Check if the method can be compiled, change level if necessary
void SimpleThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) {
  assert(level <= TieredStopAtLevel, "Invalid compilation level");
  if (level == CompLevel_none) {
    return;
  }
  if (level == CompLevel_aot) {
    if (mh->has_aot_code()) {
      if (PrintTieredEvents) {
        print_event(COMPILE, mh, mh, bci, level);
      }
      MutexLocker ml(Compile_lock);
      NoSafepointVerifier nsv;
      if (mh->has_aot_code() && mh->code() != mh->aot_code()) {
        mh->aot_code()->make_entrant();
        if (mh->has_compiled_code()) {
          mh->code()->make_not_entrant();
        }
        Method::set_code(mh, mh->aot_code());
      }
    }
    return;
  }

  // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
  // in the interpreter and then compile with C2 (the transition function will request that,
  // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
  // pure C1.
  if (!can_be_compiled(mh, level)) {
    if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
        compile(mh, bci, CompLevel_simple, thread);
    }
    return;
  }
  if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) {
    return;
  }
  if (!CompileBroker::compilation_is_in_queue(mh)) {
    if (PrintTieredEvents) {
      print_event(COMPILE, mh, mh, bci, level);
    }
    submit_compile(mh, bci, level, thread);
  }
}
void SimpleCompPolicy::method_invocation_event(const methodHandle& m, JavaThread* thread) {
  const int comp_level = CompLevel_highest_tier;
  const int hot_count = m->invocation_count();
  reset_counter_for_invocation_event(m);
  const char* comment = "count";

  if (is_compilation_enabled() && can_be_compiled(m, comp_level)) {
    nmethod* nm = m->code();
    if (nm == NULL ) {
      CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread);
    }
  }
}
예제 #4
0
// Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments.
void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
  nmethod* m_code = m->code();
  info._callee = m;
  if (m_code != NULL) {
    info._to_interpreter = false;
    info._entry  = m_code->verified_entry_point();
  } else {
    // Callee is interpreted code.  In any case entering the interpreter
    // puts a converter-frame on the stack to save arguments.
    info._to_interpreter = true;
    info._entry      = m()->get_c2i_entry();
  }
}
// Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments.
void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
  nmethod* m_code = m->code();
  info._callee = m;
  if (m_code != NULL && m_code->is_in_use()) {
    info._to_interpreter = false;
    info._entry  = m_code->verified_entry_point();
  } else {
    // Callee is interpreted code.  In any case entering the interpreter
    // puts a converter-frame on the stack to save arguments.
    assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
    info._to_interpreter = true;
    info._entry      = m()->get_c2i_entry();
  }
}
// is_optimized: Compiler has generated an optimized call (i.e., no inline
// cache) static_bound: The call can be static bound (i.e, no need to use
// inline cache)
void CompiledIC::compute_monomorphic_entry(methodHandle method,
                                           KlassHandle receiver_klass,
                                           bool is_optimized,
                                           bool static_bound,
                                           CompiledICInfo& info,
                                           TRAPS) {
  nmethod* method_code = method->code();
  address entry = NULL;
  if (method_code != NULL && method_code->is_in_use()) {
    // Call to compiled code
    if (static_bound || is_optimized) {
      entry      = method_code->verified_entry_point();
    } else {
      entry      = method_code->entry_point();
    }
  }
  if (entry != NULL) {
    // Call to compiled code
    info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
  } else {
    // Note: the following problem exists with Compiler1:
    //   - at compile time we may or may not know if the destination is final
    //   - if we know that the destination is final, we will emit an optimized
    //     virtual call (no inline cache), and need a Method* to make a call
    //     to the interpreter
    //   - if we do not know if the destination is final, we emit a standard
    //     virtual call, and use CompiledICHolder to call interpreted code
    //     (no static call stub has been generated)
    //     However in that case we will now notice it is static_bound
    //     and convert the call into what looks to be an optimized
    //     virtual call. This causes problems in verifying the IC because
    //     it look vanilla but is optimized. Code in is_call_to_interpreted
    //     is aware of this and weakens its asserts.

    // static_bound should imply is_optimized -- otherwise we have a
    // performance bug (statically-bindable method is called via
    // dynamically-dispatched call note: the reverse implication isn't
    // necessarily true -- the call may have been optimized based on compiler
    // analysis (static_bound is only based on "final" etc.)
#ifdef COMPILER2
#ifdef TIERED
#if defined(ASSERT)
    // can't check the assert because we don't have the CompiledIC with which to
    // find the address if the call instruction.
    //
    // CodeBlob* cb = find_blob_unsafe(instruction_address());
    // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
#endif // ASSERT
#else
    assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
#endif // TIERED
#endif // COMPILER2
    if (is_optimized) {
      // Use stub entry
      info.set_interpreter_entry(method()->get_c2i_entry(), method());
    } else {
      // Use icholder entry
      CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
      info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
    }
  }
  assert(info.is_optimized() == is_optimized, "must agree");
}