Exemplo n.º 1
0
// If a method is old enough and is still in the interpreter we would want to
// start profiling without waiting for the compiled method to arrive.
// We also take the load on compilers into the account.
bool AdvancedThresholdPolicy::should_create_mdo(methodOop method, CompLevel cur_level) {
  if (cur_level == CompLevel_none &&
      CompileBroker::queue_size(CompLevel_full_optimization) <=
      Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
    int i = method->invocation_count();
    int b = method->backedge_count();
    double k = Tier0ProfilingStartPercentage / 100.0;
    return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
  }
  return false;
}
Exemplo n.º 2
0
double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
  double queue_size = CompileBroker::queue_size(level);
  int comp_count = compiler_count(level);
  double k = queue_size / (feedback_k * comp_count) + 1;

  // Increase C1 compile threshold when the code cache is filled more
  // than specified by IncreaseFirstTierCompileThresholdAt percentage.
  // The main intention is to keep enough free space for C2 compiled code
  // to achieve peak performance if the code cache is under stress.
  if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization))  {
    double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level));
    if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
      k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
    }
  }
  return k;
}
Exemplo n.º 3
0
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel AdvancedThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
  if (is_trivial(method)) return CompLevel_simple;

  CompLevel next_level = cur_level;
  int i = method->invocation_count();
  int b = method->backedge_count();

  switch(cur_level) {
  case CompLevel_none:
    // If we were at full profile level, would we switch to full opt?
    if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) {
      next_level = CompLevel_full_optimization;
    } else if ((this->*p)(i, b, cur_level)) {
      // C1-generated fully profiled code is about 30% slower than the limited profile
      // code that has only invocation and backedge counters. The observation is that
      // if C2 queue is large enough we can spend too much time in the fully profiled code
      // while waiting for C2 to pick the method from the queue. To alleviate this problem
      // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
      // we choose to compile a limited profiled version and then recompile with full profiling
      // when the load on C2 goes down.
      if (CompileBroker::queue_size(CompLevel_full_optimization) >
          Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
        next_level = CompLevel_limited_profile;
      } else {
        next_level = CompLevel_full_profile;
      }
    }
    break;
  case CompLevel_limited_profile:
    if (is_method_profiled(method)) {
      // Special case: we got here because this method was fully profiled in the interpreter.
      next_level = CompLevel_full_optimization;
    } else {
      methodDataOop mdo = method->method_data();
      if (mdo != NULL) {
        if (mdo->would_profile()) {
          if (CompileBroker::queue_size(CompLevel_full_optimization) <=
              Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
              (this->*p)(i, b, cur_level)) {
            next_level = CompLevel_full_profile;
          }
        } else {
          next_level = CompLevel_full_optimization;
        }
      }
    }
    break;
  case CompLevel_full_profile:
    {
      methodDataOop mdo = method->method_data();
      if (mdo != NULL) {
        if (mdo->would_profile()) {
          int mdo_i = mdo->invocation_count_delta();
          int mdo_b = mdo->backedge_count_delta();
          if ((this->*p)(mdo_i, mdo_b, cur_level)) {
            next_level = CompLevel_full_optimization;
          }
        } else {
          next_level = CompLevel_full_optimization;
        }
      }
    }
    break;
  }
  return next_level;
}
Exemplo n.º 4
0
double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
  double queue_size = CompileBroker::queue_size(level);
  int comp_count = compiler_count(level);
  double k = queue_size / (feedback_k * comp_count) + 1;
  return k;
}
Exemplo n.º 5
0
// Common transition function. Given a predicate determines if a method should transition to another level.
CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
  CompLevel next_level = cur_level;
  int i = method->invocation_count();
  int b = method->backedge_count();

  if (is_trivial(method)) {
    next_level = CompLevel_simple;
  } else {
    switch(cur_level) {
    case CompLevel_none:
      // If we were at full profile level, would we switch to full opt?
      if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
        next_level = CompLevel_full_optimization;
      } else if ((this->*p)(i, b, cur_level, method)) {
#if INCLUDE_JVMCI
        if (UseJVMCICompiler) {
          // Since JVMCI takes a while to warm up, its queue inevitably backs up during
          // early VM execution.
          next_level = CompLevel_full_profile;
          break;
        }
#endif
        // C1-generated fully profiled code is about 30% slower than the limited profile
        // code that has only invocation and backedge counters. The observation is that
        // if C2 queue is large enough we can spend too much time in the fully profiled code
        // while waiting for C2 to pick the method from the queue. To alleviate this problem
        // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
        // we choose to compile a limited profiled version and then recompile with full profiling
        // when the load on C2 goes down.
        if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
            Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
          next_level = CompLevel_limited_profile;
        } else {
          next_level = CompLevel_full_profile;
        }
      }
      break;
    case CompLevel_limited_profile:
      if (is_method_profiled(method)) {
        // Special case: we got here because this method was fully profiled in the interpreter.
        next_level = CompLevel_full_optimization;
      } else {
        MethodData* mdo = method->method_data();
        if (mdo != NULL) {
          if (mdo->would_profile()) {
            if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
                                     Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
                                     (this->*p)(i, b, cur_level, method))) {
              next_level = CompLevel_full_profile;
            }
          } else {
            next_level = CompLevel_full_optimization;
          }
        }
      }
      break;
    case CompLevel_full_profile:
      {
        MethodData* mdo = method->method_data();
        if (mdo != NULL) {
          if (mdo->would_profile()) {
            int mdo_i = mdo->invocation_count_delta();
            int mdo_b = mdo->backedge_count_delta();
            if ((this->*p)(mdo_i, mdo_b, cur_level, method)) {
              next_level = CompLevel_full_optimization;
            }
          } else {
            next_level = CompLevel_full_optimization;
          }
        }
      }
      break;
    }
  }
  return MIN2(next_level, (CompLevel)TieredStopAtLevel);
}