void NonTieredCompPolicy::trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci) {
  if (TraceInvocationCounterOverflow) {
    MethodCounters* mcs = m->method_counters();
    assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
    InvocationCounter* ic = mcs->invocation_counter();
    InvocationCounter* bc = mcs->backedge_counter();
    ResourceMark rm;
    if (bci == InvocationEntryBci) {
      tty->print("comp-policy cntr ovfl @ %d in entry of ", bci);
    } else {
      tty->print("comp-policy cntr ovfl @ %d in loop of ", bci);
    }
    m->print_value();
    tty->cr();
    ic->print();
    bc->print();
    if (ProfileInterpreter) {
      if (bci != InvocationEntryBci) {
        MethodData* mdo = m->method_data();
        if (mdo != NULL) {
          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
          tty->print_cr("back branch count = %d", count);
        }
      }
    }
  }
}
void NonTieredCompPolicy::disable_compilation(Method* method) {
  MethodCounters* mcs = method->method_counters();
  if (mcs != NULL) {
    mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
    mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
  }
}
// This method can be called by any component of the runtime to notify the policy
// that it's recommended to delay the compilation of this method.
void NonTieredCompPolicy::delay_compilation(Method* method) {
  MethodCounters* mcs = method->method_counters();
  if (mcs != NULL) {
    mcs->invocation_counter()->decay();
    mcs->backedge_counter()->decay();
  }
}
Example #4
0
 int  interpreter_throwout_count() const        {
   MethodCounters* mcs = method_counters();
   if (mcs == NULL) {
     return 0;
   } else {
     return mcs->interpreter_throwout_count();
   }
 }
Example #5
0
 // Tracking number of breakpoints, for fullspeed debugging.
 // Only mutated by VM thread.
 u2   number_of_breakpoints()             const {
   MethodCounters* mcs = method_counters();
   if (mcs == NULL) {
     return 0;
   } else {
     return mcs->number_of_breakpoints();
   }
 }
Example #6
0
 int interpreter_invocation_count() {
   if (TieredCompilation) {
     return invocation_count();
   } else {
     MethodCounters* mcs = method_counters();
     return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
   }
 }
Example #7
0
// Set carry flags on the counters if necessary
void SimpleThresholdPolicy::handle_counter_overflow(Method* method) {
  MethodCounters *mcs = method->method_counters();
  if (mcs != NULL) {
    set_carry_if_necessary(mcs->invocation_counter());
    set_carry_if_necessary(mcs->backedge_counter());
  }
  MethodData* mdo = method->method_data();
  if (mdo != NULL) {
    set_carry_if_necessary(mdo->invocation_counter());
    set_carry_if_necessary(mdo->backedge_counter());
  }
}
void NonTieredCompPolicy::reset_counter_for_invocation_event(const methodHandle& m) {
  // Make sure invocation and backedge counter doesn't overflow again right away
  // as would be the case for native methods.

  // BUT also make sure the method doesn't look like it was never executed.
  // Set carry bit and reduce counter's value to min(count, CompileThreshold/2).
  MethodCounters* mcs = m->method_counters();
  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
  mcs->invocation_counter()->set_carry();
  mcs->backedge_counter()->set_carry();

  assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
}
void NonTieredCompPolicy::reset_counter_for_back_branch_event(const methodHandle& m) {
  // Delay next back-branch event but pump up invocation counter to trigger
  // whole method compilation.
  MethodCounters* mcs = m->method_counters();
  assert(mcs != NULL, "MethodCounters cannot be NULL for profiling");
  InvocationCounter* i = mcs->invocation_counter();
  InvocationCounter* b = mcs->backedge_counter();

  // Don't set invocation_counter's value too low otherwise the method will
  // look like immature (ic < ~5300) which prevents the inlining based on
  // the type profiling.
  i->set(i->state(), CompileThreshold);
  // Don't reset counter too low - it is used to check if OSR method is ready.
  b->set(b->state(), CompileThreshold / 2);
}
Example #10
0
// Get a measure of how much mileage the method has on it.
int MethodData::mileage_of(Method* method) {
  int mileage = 0;
  if (TieredCompilation) {
    mileage = MAX2(method->invocation_count(), method->backedge_count());
  } else {
    int iic = method->interpreter_invocation_count();
    if (mileage < iic)  mileage = iic;
    MethodCounters* mcs = method->method_counters();
    if (mcs != NULL) {
      InvocationCounter* ic = mcs->invocation_counter();
      InvocationCounter* bc = mcs->backedge_counter();
      int icval = ic->count();
      if (ic->carry()) icval += CompileThreshold;
      if (mileage < icval)  mileage = icval;
      int bcval = bc->count();
      if (bc->carry()) bcval += CompileThreshold;
      if (mileage < bcval)  mileage = bcval;
    }
  }
  return mileage;
}
void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
  ScopeDesc* sd = trap_scope;
  MethodCounters* mcs;
  InvocationCounter* c;
  for (; !sd->is_top(); sd = sd->sender()) {
    mcs = sd->method()->method_counters();
    if (mcs != NULL) {
      // Reset ICs of inlined methods, since they can trigger compilations also.
      mcs->invocation_counter()->reset();
    }
  }
  mcs = sd->method()->method_counters();
  if (mcs != NULL) {
    c = mcs->invocation_counter();
    if (is_osr) {
      // It was an OSR method, so bump the count higher.
      c->set(c->state(), CompileThreshold);
    } else {
      c->reset();
    }
    mcs->backedge_counter()->reset();
  }
}
Example #12
0
 // Count of times method was exited via exception while interpreting
 void interpreter_throwout_increment(TRAPS) {
   MethodCounters* mcs = get_method_counters(CHECK);
   if (mcs != NULL) {
     mcs->interpreter_throwout_increment();
   }
 }
Example #13
0
 void decr_number_of_breakpoints(TRAPS)         {
   MethodCounters* mcs = get_method_counters(CHECK);
   if (mcs != NULL) {
     mcs->decr_number_of_breakpoints();
   }
 }
Example #14
0
 float rate() const                             {
   MethodCounters* mcs = method_counters();
   return mcs == NULL ? 0 : mcs->rate();
 }
int CppInterpreter::native_entry(Method* method, intptr_t UNUSED, TRAPS) {
  // Make sure method is native and not abstract
  assert(method->is_native() && !method->is_abstract(), "should be");

  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();

  // Allocate and initialize our frame
  InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0);
  thread->push_zero_frame(frame);
  interpreterState istate = frame->interpreter_state();
  intptr_t *locals = istate->locals();

  // Update the invocation counter
  if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) {
    MethodCounters* mcs = method->method_counters();
    if (mcs == NULL) {
      CALL_VM_NOCHECK(mcs = InterpreterRuntime::build_method_counters(thread, method));
      if (HAS_PENDING_EXCEPTION)
        goto unwind_and_return;
    }
    InvocationCounter *counter = mcs->invocation_counter();
    counter->increment();
    if (counter->reached_InvocationLimit(mcs->backedge_counter())) {
      CALL_VM_NOCHECK(
        InterpreterRuntime::frequency_counter_overflow(thread, NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unwind_and_return;
    }
  }

  // Lock if necessary
  BasicObjectLock *monitor;
  monitor = NULL;
  if (method->is_synchronized()) {
    monitor = (BasicObjectLock*) istate->stack_base();
    oop lockee = monitor->obj();
    markOop disp = lockee->mark()->set_unlocked();

    monitor->lock()->set_displaced_header(disp);
    if (Atomic::cmpxchg_ptr(monitor, lockee->mark_addr(), disp) != disp) {
      if (thread->is_lock_owned((address) disp->clear_lock_bits())) {
        monitor->lock()->set_displaced_header(NULL);
      }
      else {
        CALL_VM_NOCHECK(InterpreterRuntime::monitorenter(thread, monitor));
        if (HAS_PENDING_EXCEPTION)
          goto unwind_and_return;
      }
    }
  }

  // Get the signature handler
  InterpreterRuntime::SignatureHandler *handler; {
    address handlerAddr = method->signature_handler();
    if (handlerAddr == NULL) {
      CALL_VM_NOCHECK(InterpreterRuntime::prepare_native_call(thread, method));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;

      handlerAddr = method->signature_handler();
      assert(handlerAddr != NULL, "eh?");
    }
    if (handlerAddr == (address) InterpreterRuntime::slow_signature_handler) {
      CALL_VM_NOCHECK(handlerAddr =
        InterpreterRuntime::slow_signature_handler(thread, method, NULL,NULL));
      if (HAS_PENDING_EXCEPTION)
        goto unlock_unwind_and_return;
    }
    handler = \
      InterpreterRuntime::SignatureHandler::from_handlerAddr(handlerAddr);
  }

  // Get the native function entry point
  address function;
  function = method->native_function();
  assert(function != NULL, "should be set if signature handler is");

  // Build the argument list
  stack->overflow_check(handler->argument_count() * 2, THREAD);
  if (HAS_PENDING_EXCEPTION)
    goto unlock_unwind_and_return;

  void **arguments;
  void *mirror; {
    arguments =
      (void **) stack->alloc(handler->argument_count() * sizeof(void **));
    void **dst = arguments;

    void *env = thread->jni_environment();
    *(dst++) = &env;

    if (method->is_static()) {
      istate->set_oop_temp(
        method->constants()->pool_holder()->java_mirror());
      mirror = istate->oop_temp_addr();
      *(dst++) = &mirror;
    }

    intptr_t *src = locals;
    for (int i = dst - arguments; i < handler->argument_count(); i++) {
      ffi_type *type = handler->argument_type(i);
      if (type == &ffi_type_pointer) {
        if (*src) {
          stack->push((intptr_t) src);
          *(dst++) = stack->sp();
        }
        else {
          *(dst++) = src;
        }
        src--;
      }
      else if (type->size == 4) {
        *(dst++) = src--;
      }
      else if (type->size == 8) {
        src--;
        *(dst++) = src--;
      }
      else {
        ShouldNotReachHere();
      }
    }
  }

  // Set up the Java frame anchor
  thread->set_last_Java_frame();

  // Change the thread state to _thread_in_native
  ThreadStateTransition::transition_from_java(thread, _thread_in_native);

  // Make the call
  intptr_t result[4 - LogBytesPerWord];
  ffi_call(handler->cif(), (void (*)()) function, result, arguments);

  // Change the thread state back to _thread_in_Java.
  // ThreadStateTransition::transition_from_native() cannot be used
  // here because it does not check for asynchronous exceptions.
  // We have to manage the transition ourself.
  thread->set_thread_state(_thread_in_native_trans);

  // Make sure new state is visible in the GC thread
  if (os::is_MP()) {
    if (UseMembar) {
      OrderAccess::fence();
    }
    else {
      InterfaceSupport::serialize_memory(thread);
    }
  }

  // Handle safepoint operations, pending suspend requests,
  // and pending asynchronous exceptions.
  if (SafepointSynchronize::do_call_back() ||
      thread->has_special_condition_for_native_trans()) {
    JavaThread::check_special_condition_for_native_trans(thread);
    CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops());
  }

  // Finally we can change the thread state to _thread_in_Java.
  thread->set_thread_state(_thread_in_Java);
  fixup_after_potential_safepoint();

  // Clear the frame anchor
  thread->reset_last_Java_frame();

  // If the result was an oop then unbox it and store it in
  // oop_temp where the garbage collector can see it before
  // we release the handle it might be protected by.
  if (handler->result_type() == &ffi_type_pointer) {
    if (result[0])
      istate->set_oop_temp(*(oop *) result[0]);
    else
      istate->set_oop_temp(NULL);
  }

  // Reset handle block
  thread->active_handles()->clear();

 unlock_unwind_and_return:

  // Unlock if necessary
  if (monitor) {
    BasicLock *lock = monitor->lock();
    markOop header = lock->displaced_header();
    oop rcvr = monitor->obj();
    monitor->set_obj(NULL);

    if (header != NULL) {
      if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
        monitor->set_obj(rcvr); {
          HandleMark hm(thread);
          CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(thread, monitor));
        }
      }
    }
  }

 unwind_and_return:

  // Unwind the current activation
  thread->pop_zero_frame();

  // Pop our parameters
  stack->set_sp(stack->sp() + method->size_of_parameters());

  // Push our result
  if (!HAS_PENDING_EXCEPTION) {
    BasicType type = result_type_of(method);
    stack->set_sp(stack->sp() - type2size[type]);

    switch (type) {
    case T_VOID:
      break;

    case T_BOOLEAN:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jboolean *) result != 0, 0);
      break;

    case T_CHAR:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jchar *) result, 0);
      break;

    case T_BYTE:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerByte);
#endif
      SET_LOCALS_INT(*(jbyte *) result, 0);
      break;

    case T_SHORT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerShort);
#endif
      SET_LOCALS_INT(*(jshort *) result, 0);
      break;

    case T_INT:
#ifndef VM_LITTLE_ENDIAN
      result[0] <<= (BitsPerWord - BitsPerInt);
#endif
      SET_LOCALS_INT(*(jint *) result, 0);
      break;

    case T_LONG:
      SET_LOCALS_LONG(*(jlong *) result, 0);
      break;

    case T_FLOAT:
      SET_LOCALS_FLOAT(*(jfloat *) result, 0);
      break;

    case T_DOUBLE:
      SET_LOCALS_DOUBLE(*(jdouble *) result, 0);
      break;

    case T_OBJECT:
    case T_ARRAY:
      SET_LOCALS_OBJECT(istate->oop_temp(), 0);
      break;

    default:
      ShouldNotReachHere();
    }
  }

  // No deoptimized frames on the stack
  return 0;
}
Example #16
0
 void set_rate(float rate, TRAPS) {
   MethodCounters* mcs = get_method_counters(CHECK);
   if (mcs != NULL) {
     mcs->set_rate(rate);
   }
 }
 static void do_method(Method* m) {
   MethodCounters* mcs = m->method_counters();
   if (mcs != NULL) {
     mcs->invocation_counter()->decay();
   }
 }
Example #18
0
 void set_rate(float rate) {
   MethodCounters* mcs = method_counters();
   if (mcs != NULL) {
     mcs->set_rate(rate);
   }
 }
Example #19
0
 void set_prev_event_count(int count, TRAPS)    {
   MethodCounters* mcs = get_method_counters(CHECK);
   if (mcs != NULL) {
     mcs->set_interpreter_invocation_count(count);
   }
 }
Example #20
0
 // Initialization only
 void clear_number_of_breakpoints()             {
   MethodCounters* mcs = method_counters();
   if (mcs != NULL) {
     mcs->clear_number_of_breakpoints();
   }
 }
Example #21
0
 void set_prev_time(jlong time, TRAPS)          {
   MethodCounters* mcs = get_method_counters(CHECK);
   if (mcs != NULL) {
     mcs->set_prev_time(time);
   }
 }
Example #22
0
 jlong prev_time() const                        {
   MethodCounters* mcs = method_counters();
   return mcs == NULL ? 0 : mcs->prev_time();
 }
Example #23
0
 int increment_interpreter_invocation_count(TRAPS) {
   if (TieredCompilation) ShouldNotReachHere();
   MethodCounters* mcs = get_method_counters(CHECK_0);
   return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
 }
Example #24
0
 void set_prev_event_count(int count) {
   MethodCounters* mcs = method_counters();
   if (mcs != NULL) {
     mcs->set_interpreter_invocation_count(count);
   }
 }
Example #25
0
 void print_method_on(outputStream* st) {
   ProfilerNode::print_method_on(st);
   MethodCounters* mcs = method()->method_counters();
   if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short();
 }
Example #26
0
 void set_prev_time(jlong time) {
   MethodCounters* mcs = method_counters();
   if (mcs != NULL) {
     mcs->set_prev_time(time);
   }
 }