bool MethodComparator::methods_EMCP(methodOop old_method, methodOop new_method) {
  if (old_method->code_size() != new_method->code_size())
    return false;
  if (check_stack_and_locals_size(old_method, new_method) != 0) {
    // RC_TRACE macro has an embedded ResourceMark
    RC_TRACE(0x00800000, ("Methods %s non-comparable with diagnosis %d",
      old_method->name()->as_C_string(),
      check_stack_and_locals_size(old_method, new_method)));
    return false;
  }

  _old_cp = old_method->constants();
  _new_cp = new_method->constants();
  BytecodeStream s_old(old_method);
  BytecodeStream s_new(new_method);
  _s_old = &s_old;
  _s_new = &s_new;
  _switchable_test = false;
  Bytecodes::Code c_old, c_new;

  while ((c_old = s_old.next()) >= 0) {
    if ((c_new = s_new.next()) < 0 || c_old != c_new)
      return false;

    if (! args_same(c_old, c_new))
      return false;
  }
  return true;
}
Ejemplo n.º 2
0
void BytecodeInterpreter::layout_interpreterState(interpreterState istate,
                                                  frame*    caller,
                                                  frame*    current,
                                                  methodOop method,
                                                  intptr_t* locals,
                                                  intptr_t* stack,
                                                  intptr_t* stack_base,
                                                  intptr_t* monitor_base,
                                                  intptr_t* frame_bottom,
                                                  bool      is_top_frame) {
  istate->set_locals(locals);
  istate->set_method(method);
  istate->set_self_link(istate);
  istate->set_prev_link(NULL);
  // thread will be set by a hacky repurposing of frame::patch_pc()
  // bcp will be set by vframeArrayElement::unpack_on_stack()
  istate->set_constants(method->constants()->cache());
  istate->set_msg(BytecodeInterpreter::method_resume);
  istate->set_bcp_advance(0);
  istate->set_oop_temp(NULL);
  istate->set_mdx(NULL);
  if (caller->is_interpreted_frame()) {
    interpreterState prev = caller->get_interpreterState();
    prev->set_callee(method);
    if (*prev->bcp() == Bytecodes::_invokeinterface)
      prev->set_bcp_advance(5);
    else
      prev->set_bcp_advance(3);
  }
  istate->set_callee(NULL);
  istate->set_monitor_base((BasicObjectLock *) monitor_base);
  istate->set_stack_base(stack_base);
  istate->set_stack(stack);
  istate->set_stack_limit(stack_base - method->max_stack() - 1);
}
Ejemplo n.º 3
0
// update_rate() is called from select_task() while holding a compile queue lock.
void AdvancedThresholdPolicy::update_rate(jlong t, methodOop m) {
  if (is_old(m)) {
    // We don't remove old methods from the queue,
    // so we can just zero the rate.
    m->set_rate(0);
    return;
  }

  // We don't update the rate if we've just came out of a safepoint.
  // delta_s is the time since last safepoint in milliseconds.
  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
  jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
  // How many events were there since the last time?
  int event_count = m->invocation_count() + m->backedge_count();
  int delta_e = event_count - m->prev_event_count();

  // We should be running for at least 1ms.
  if (delta_s >= TieredRateUpdateMinTime) {
    // And we must've taken the previous point at least 1ms before.
    if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
      m->set_prev_time(t);
      m->set_prev_event_count(event_count);
      m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
    } else
      if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
        // If nothing happened for 25ms, zero the rate. Don't modify prev values.
        m->set_rate(0);
      }
  }
}
Ejemplo n.º 4
0
 // for hashing into the table
 static int hash(methodOop method) {
     // The point here is to try to make something fairly unique
     // out of the fields we can read without grabbing any locks
     // since the method may be locked when we need the hash.
     return (
         method->code_size() ^
         method->max_stack() ^
         method->max_locals() ^
         method->size_of_parameters());
 }
bool MethodComparator::methods_switchable(methodOop old_method, methodOop new_method,
                                          BciMap &bci_map) {
  if (old_method->code_size() > new_method->code_size())
    // Something has definitely been deleted in the new method, compared to the old one.
    return false;

  if (! check_stack_and_locals_size(old_method, new_method))
    return false;

  _old_cp = old_method->constants();
  _new_cp = new_method->constants();
  BytecodeStream s_old(old_method);
  BytecodeStream s_new(new_method);
  _s_old = &s_old;
  _s_new = &s_new;
  _bci_map = &bci_map;
  _switchable_test = true;
  GrowableArray<int> fwd_jmps(16);
  _fwd_jmps = &fwd_jmps;
  Bytecodes::Code c_old, c_new;

  while ((c_old = s_old.next()) >= 0) {
    if ((c_new = s_new.next()) < 0)
      return false;
    if (! (c_old == c_new && args_same(c_old, c_new))) {
      int old_bci = s_old.bci();
      int new_st_bci = s_new.bci();
      bool found_match = false;
      do {
        c_new = s_new.next();
        if (c_new == c_old && args_same(c_old, c_new)) {
          found_match = true;
          break;
        }
      } while (c_new >= 0);
      if (! found_match)
        return false;
      int new_end_bci = s_new.bci();
      bci_map.store_fragment_location(old_bci, new_st_bci, new_end_bci);
    }
  }

  // Now we can test all forward jumps
  for (int i = 0; i < fwd_jmps.length() / 2; i++) {
    if (! bci_map.old_and_new_locations_same(fwd_jmps.at(i*2), fwd_jmps.at(i*2+1))) {
      RC_TRACE(0x00800000,
        ("Fwd jump miss: old dest = %d, calc new dest = %d, act new dest = %d",
        fwd_jmps.at(i*2), bci_map.new_bci_for_old(fwd_jmps.at(i*2)),
        fwd_jmps.at(i*2+1)));
      return false;
    }
  }

  return true;
}
// Simple methods are as good being compiled with C1 as C2.
// Determine if a given method is such a case.
bool SimpleThresholdPolicy::is_trivial(methodOop method) {
  if (method->is_accessor()) return true;
  if (method->code() != NULL) {
    methodDataOop mdo = method->method_data();
    if (mdo != NULL && mdo->num_loops() == 0 &&
        (method->code_size() < 5  || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
      return !mdo->would_profile();
    }
  }
  return false;
}
Ejemplo n.º 7
0
// If a method is old enough and is still in the interpreter we would want to
// start profiling without waiting for the compiled method to arrive.
// We also take the load on compilers into the account.
bool AdvancedThresholdPolicy::should_create_mdo(methodOop method, CompLevel cur_level) {
  if (cur_level == CompLevel_none &&
      CompileBroker::queue_size(CompLevel_full_optimization) <=
      Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
    int i = method->invocation_count();
    int b = method->backedge_count();
    double k = Tier0ProfilingStartPercentage / 100.0;
    return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
  }
  return false;
}
Ejemplo n.º 8
0
// Check if this method has been stale from a given number of milliseconds.
// See select_task().
bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, methodOop m) {
  jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
  jlong delta_t = t - m->prev_time();
  if (delta_t > timeout && delta_s > timeout) {
    int event_count = m->invocation_count() + m->backedge_count();
    int delta_e = event_count - m->prev_event_count();
    // Return true if there were no events.
    return delta_e == 0;
  }
  return false;
}
Ejemplo n.º 9
0
// Apply heuristics and return true if x should be compiled before y
bool AdvancedThresholdPolicy::compare_methods(methodOop x, methodOop y) {
  if (x->highest_comp_level() > y->highest_comp_level()) {
    // recompilation after deopt
    return true;
  } else
    if (x->highest_comp_level() == y->highest_comp_level()) {
      if (weight(x) > weight(y)) {
        return true;
      }
    }
  return false;
}
Ejemplo n.º 10
0
// Get a measure of how much mileage the method has on it.
int methodDataOopDesc::mileage_of(methodOop method) {
  int mileage = 0;
#ifdef COMPILER2
  int iic = method->interpreter_invocation_count();
  if (mileage < iic)  mileage = iic; 
#endif
  int icval = method->invocation_counter()->count();
  if (mileage < icval)  mileage = icval; 
  int bcval = method->backedge_counter()->count();
  if (mileage < bcval)  mileage = bcval;
  return mileage;
}
int AbstractInterpreter::layout_activation(methodOop method,
                                           int       tempcount,
                                           int       popframe_extra_args,
                                           int       moncount,
                                           int       caller_actual_parameters,
                                           int       callee_param_count,
                                           int       callee_locals,
                                           frame*    caller,
                                           frame*    interpreter_frame,
                                           bool      is_top_frame,
                                           bool      is_bottom_frame) {
  assert(popframe_extra_args == 0, "what to do?");
  assert(!is_top_frame || (!callee_locals && !callee_param_count),
         "top frame should have no caller");

  // This code must exactly match what InterpreterFrame::build
  // does (the full InterpreterFrame::build, that is, not the
  // one that creates empty frames for the deoptimizer).
  //
  // If interpreter_frame is not NULL then it will be filled in.
  // It's size is determined by a previous call to this method,
  // so it should be correct.
  //
  // Note that tempcount is the current size of the expression
  // stack.  For top most frames we will allocate a full sized
  // expression stack and not the trimmed version that non-top
  // frames have.

  int header_words        = InterpreterFrame::header_words;
  int monitor_words       = moncount * frame::interpreter_frame_monitor_size();
  int stack_words         = is_top_frame ? method->max_stack() : tempcount;
  int callee_extra_locals = callee_locals - callee_param_count;

  if (interpreter_frame) {
    intptr_t *locals        = interpreter_frame->fp() + method->max_locals();
    interpreterState istate = interpreter_frame->get_interpreterState();
    intptr_t *monitor_base  = (intptr_t*) istate;
    intptr_t *stack_base    = monitor_base - monitor_words;
    intptr_t *stack         = stack_base - tempcount - 1;

    BytecodeInterpreter::layout_interpreterState(istate,
                                                 caller,
                                                 NULL,
                                                 method,
                                                 locals,
                                                 stack,
                                                 stack_base,
                                                 monitor_base,
                                                 NULL,
                                                 is_top_frame);
  }
  return header_words + monitor_words + stack_words + callee_extra_locals;
}
Ejemplo n.º 12
0
BasicType Bytecode_static::result_type(methodOop method) const {
  int index = java_hwrd_at(1);
  constantPoolOop constants = method->constants(); 
  symbolOop field_type = constants->signature_ref_at(index);
  BasicType basic_type = FieldType::basic_type(field_type);
  return basic_type;
}
Ejemplo n.º 13
0
// Get a measure of how much mileage the method has on it.
int methodDataOopDesc::mileage_of(methodOop method) {
  int mileage = 0;
  int iic = method->interpreter_invocation_count();
  if (mileage < iic)  mileage = iic;

  InvocationCounter* ic = method->invocation_counter();
  InvocationCounter* bc = method->backedge_counter();

  int icval = ic->count();
  if (ic->carry()) icval += CompileThreshold;
  if (mileage < icval)  mileage = icval;
  int bcval = bc->count();
  if (bc->carry()) bcval += CompileThreshold;
  if (mileage < bcval)  mileage = bcval;
  return mileage;
}
Ejemplo n.º 14
0
void BlockScope::initialize(methodOop method, klassOop methodHolder, Scope* p, InlinedScope* s, RScope* rs, SendInfo* info) {
  InlinedScope::initialize(method, methodHolder, s, rs, info);
  _parent = p; 
  _self_is_initialized = false;
  if (s == NULL) {
    // top scope: create a context (currently always initialized for blocks)
    // (context is set up by the prologue node)
    _context = new SAPReg(this, PrologueBCI, EpilogueBCI);
  } else {
    // set up for context passed in by caller
    // (_context may be changed later if this scope allocates its own context)
    switch (method->block_info()) {
      case methodOopDesc::expects_nil:		// no context needed
   	_context = NULL; break;
      case methodOopDesc::expects_self:
   	_context = self()->preg(); fatal("self not known yet -- fix this"); break;
      case methodOopDesc::expects_parameter:	// fix this -- should find which
	Unimplemented();
	break;
      case methodOopDesc::expects_context:
   	if (p->isInlinedScope()) {
	  _context = ((InlinedScope*)p)->context(); 
	} else {
	  fatal("shouldn't inline");  	// shouldn't inline block unless parent was inlined, too
	}
	break;
      default:
   	fatal("unexpected incoming info");
    }
  }
}
Ejemplo n.º 15
0
void collect_profiled_methods(methodOop m) {
  methodHandle mh(Thread::current(), m);
  if ((m->method_data() != NULL) &&
      (PrintMethodData || CompilerOracle::should_print(mh))) {
    collected_profiled_methods->push(m);
  }
}
Ejemplo n.º 16
0
// Is method profiled enough?
bool AdvancedThresholdPolicy::is_method_profiled(methodOop method) {
  methodDataOop mdo = method->method_data();
  if (mdo != NULL) {
    int i = mdo->invocation_count_delta();
    int b = mdo->backedge_count_delta();
    return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
  }
  return false;
}
Ejemplo n.º 17
0
  void trace(methodOop method, address bcp, uintptr_t tos, uintptr_t tos2) {
#ifndef PRODUCT
    MutexLocker ml(BytecodeTrace_lock);
    if (_current_method != method) {
      // Note 1: This code will not work as expected with true MT/MP.
      //         Need an explicit lock or a different solution.
      ResourceMark rm;
      tty->cr();
      tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
      method->print_name(tty);
      tty->cr();
      _current_method = method;
    }
    if (Verbose) {
      const char* format;
      switch (Bytecodes::length_at(bcp)) {
        case  1: format = "%x  %02x         "    ; break;
        case  2: format = "%x  %02x %02x      "  ; break;
        case  3: format = "%x  %02x %02x %02x   "; break;
        default: format = "%x  %02x %02x %02x .."; break;
      }
      tty->print(format, bcp, *bcp, *(bcp+1), *(bcp+2));
    }
    Bytecodes::Code code;
    if (_previous_bytecode == Bytecodes::_wide) {
      code = Bytecodes::cast(*(bcp+1));
    } else {
      code = Bytecodes::cast(*bcp);
    }
    int bci = bcp - method->code_base();
    const char* format = _previous_bytecode == Bytecodes::_wide ? Bytecodes::wide_format(code) : Bytecodes::format(code);
    tty->print("[%d] ", (int) Thread::current()->osthread()->thread_id());
    if (Verbose) {
      tty->print("%8d  %4d  0x%016lx 0x%016lx %s", 
	   BytecodeCounter::counter_value(), bci, tos, tos2, Bytecodes::name(code));
    } else {
      tty->print("%8d  %4d  %s", 
	   BytecodeCounter::counter_value(), bci, Bytecodes::name(code));
    }
    print_attributes(bcp, bci, format);
    tty->cr();
    _previous_bytecode = code;
#endif
  }
Ejemplo n.º 18
0
// Determine if a method should be compiled with a normal entry point at a different level.
CompLevel AdvancedThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
  CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);

  // If OSR method level is greater than the regular method level, the levels should be
  // equalized by raising the regular method level in order to avoid OSRs during each
  // invocation of the method.
  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
    methodDataOop mdo = method->method_data();
    guarantee(mdo != NULL, "MDO should not be NULL");
    if (mdo->invocation_count() >= 1) {
      next_level = CompLevel_full_optimization;
    }
  } else {
    next_level = MAX2(osr_level, next_level);
  }

  return next_level;
}
Ejemplo n.º 19
0
// If deoptimization happens, this function returns the point where the interpreter reexecutes
// the bytecode.
// Note: Bytecodes::_athrow is a special case in that it does not return
//       Interpreter::deopt_entry(vtos, 0) like others
address AbstractInterpreter::deopt_reexecute_entry(methodOop method, address bcp) {
  assert(method->contains(bcp), "just checkin'");
  Bytecodes::Code code   = Bytecodes::java_code_at(bcp);
#ifdef COMPILER1
  if(code == Bytecodes::_athrow ) {
    return Interpreter::rethrow_exception_entry();
  }
#endif /* COMPILER1 */
  return Interpreter::deopt_entry(vtos, 0);
}
Ejemplo n.º 20
0
BasicType CppInterpreter::result_type_of(methodOop method) {
  BasicType t;
  switch (method->result_index()) {
    case 0 : t = T_BOOLEAN; break;
    case 1 : t = T_CHAR;    break;
    case 2 : t = T_BYTE;    break;
    case 3 : t = T_SHORT;   break;
    case 4 : t = T_INT;     break;
    case 5 : t = T_LONG;    break;
    case 6 : t = T_VOID;    break;
    case 7 : t = T_FLOAT;   break;
    case 8 : t = T_DOUBLE;  break;
    case 9 : t = T_OBJECT;  break;
    default: ShouldNotReachHere();
  }
  assert(AbstractInterpreter::BasicType_as_index(t) == method->result_index(),
         "out of step with AbstractInterpreter::BasicType_as_index");
  return t;
}
Ejemplo n.º 21
0
// Determine if we should do an OSR compilation of a given method.
CompLevel AdvancedThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
  if (cur_level == CompLevel_none) {
    // If there is a live OSR method that means that we deopted to the interpreter
    // for the transition.
    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
    if (osr_level > CompLevel_none) {
      return osr_level;
    }
  }
  return common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level);
}
static BasicType constant_pool_type(methodOop method, int index) {
  constantTag tag = method->constants()->tag_at(index);
       if (tag.is_int              ()) return T_INT;
  else if (tag.is_float            ()) return T_FLOAT;
  else if (tag.is_long             ()) return T_LONG;
  else if (tag.is_double           ()) return T_DOUBLE;
  else if (tag.is_string           ()) return T_OBJECT;
  else if (tag.is_unresolved_string()) return T_OBJECT;
  else if (tag.is_klass		   ()) return T_OBJECT;
  ShouldNotReachHere();
  return T_ILLEGAL;
}
Ejemplo n.º 23
0
int AbstractInterpreter::size_top_interpreter_activation(methodOop method)
{
#ifdef PPC
  StackFrame frame;

  int call_stub_frame = round_to(
    StubRoutines::call_stub_base_size() +
    method->max_locals() * wordSize, StackAlignmentInBytes);

  int interpreter_frame = round_to(
    frame.unaligned_size() +
    slop_factor + 
    method->max_stack() * wordSize +
    (method->is_synchronized() ?
     frame::interpreter_frame_monitor_size() * wordSize : 0) +
    sizeof(BytecodeInterpreter), StackAlignmentInBytes);

  return (call_stub_frame + interpreter_frame) / wordSize;
#else
  Unimplemented();
#endif // PPC
}
Ejemplo n.º 24
0
bool NonTieredCompPolicy::is_mature(methodOop method) {
    methodDataOop mdo = method->method_data();
    assert(mdo != NULL, "Should be");
    uint current = mdo->mileage_of(method);
    uint initial = mdo->creation_mileage();
    if (current < initial)
        return true;  // some sort of overflow
    uint target;
    if (ProfileMaturityPercentage <= 0)
        target = (uint) -ProfileMaturityPercentage;  // absolute value
    else
        target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
    return (current >= initial + target);
}
Ejemplo n.º 25
0
// Given that a new (potential) event has come in,
// maintain the current JVMTI location on a per-thread per-env basis
// and use it to filter out duplicate events:
// - instruction rewrites
// - breakpoint followed by single step
// - single step at a breakpoint
void JvmtiEnvThreadState::compare_and_set_current_location(methodOop new_method,
                                                           address new_location, jvmtiEvent event) {

  int new_bci = new_location - new_method->code_base();

  // The method is identified and stored as a jmethodID which is safe in this
  // case because the class cannot be unloaded while a method is executing.
  jmethodID new_method_id = new_method->jmethod_id();

  // the last breakpoint or single step was at this same location
  if (_current_bci == new_bci && _current_method_id == new_method_id) {
    switch (event) {
    case JVMTI_EVENT_BREAKPOINT:
      // Repeat breakpoint is complicated. If we previously posted a breakpoint
      // event at this location and if we also single stepped at this location
      // then we skip the duplicate breakpoint.
      _breakpoint_posted = _breakpoint_posted && _single_stepping_posted;
      break;
    case JVMTI_EVENT_SINGLE_STEP:
      // Repeat single step is easy: just don't post it again.
      // If step is pending for popframe then it may not be
      // a repeat step. The new_bci and method_id is same as current_bci
      // and current method_id after pop and step for recursive calls.
      // This has been handled by clearing the location
      _single_stepping_posted = true;
      break;
    default:
      assert(false, "invalid event value passed");
      break;
    }
    return;
  }

  set_current_location(new_method_id, new_bci);
  _breakpoint_posted = false;
  _single_stepping_posted = false;
}
int MethodComparator::check_stack_and_locals_size(methodOop old_method, methodOop new_method) {
  if (old_method->max_stack() != new_method->max_stack()) {
    return 1;
  } else if (old_method->max_locals() != new_method->max_locals()) {
    return 2;
  } else if (old_method->size_of_parameters() != new_method->size_of_parameters()) {
    return 3;
  } else return 0;
}
Ejemplo n.º 27
0
void FlatProfiler::interpreted_update(methodOop method, klassOop klass, TickPosition where) {
  int index = entry(method->selector_or_method()->identity_hash());
  if (!table[index]) {
    table[index] = new interpretedNode(method, klass, where);
  } else {
    for(pnode* node = table[index]; node; node = node->next()) {
      if (node->match(method, klass)) {
        node->update(where);
        return;
      }
      if (!node->next())
        node->set_next(new interpretedNode(method, klass, where));
    }
  }
}
Ejemplo n.º 28
0
int CppInterpreter::empty_entry(methodOop method, intptr_t UNUSED, TRAPS) {
  JavaThread *thread = (JavaThread *) THREAD;
  ZeroStack *stack = thread->zero_stack();

  // Drop into the slow path if we need a safepoint check
  if (SafepointSynchronize::do_call_back()) {
    return normal_entry(method, 0, THREAD);
  }

  // Pop our parameters
  stack->set_sp(stack->sp() + method->size_of_parameters());

  // No deoptimized frames on the stack
  return 0;
}
Ejemplo n.º 29
0
// RedefineClasses() API support:
// If this constantPoolCacheEntry refers to old_method then update it
// to refer to new_method.
bool ConstantPoolCacheEntry::adjust_method_entry(methodOop old_method,
       methodOop new_method, bool * trace_name_printed) {

  if (is_vfinal()) {
    // virtual and final so _f2 contains method ptr instead of vtable index
    if (f2_as_vfinal_method() == old_method) {
      // match old_method so need an update
      // NOTE: can't use set_f2_as_vfinal_method as it asserts on different values
      _f2 = (intptr_t)new_method;
      if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
        if (!(*trace_name_printed)) {
          // RC_TRACE_MESG macro has an embedded ResourceMark
          RC_TRACE_MESG(("adjust: name=%s",
            Klass::cast(old_method->method_holder())->external_name()));
          *trace_name_printed = true;
        }
        // RC_TRACE macro has an embedded ResourceMark
        RC_TRACE(0x00400000, ("cpc vf-entry update: %s(%s)",
          new_method->name()->as_C_string(),
          new_method->signature()->as_C_string()));
      }

      return true;
    }

    // f1() is not used with virtual entries so bail out
    return false;
  }

  if ((oop)_f1 == NULL) {
    // NULL f1() means this is a virtual entry so bail out
    // We are assuming that the vtable index does not need change.
    return false;
  }

  if ((oop)_f1 == old_method) {
    _f1 = new_method;
    if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
      if (!(*trace_name_printed)) {
        // RC_TRACE_MESG macro has an embedded ResourceMark
        RC_TRACE_MESG(("adjust: name=%s",
          Klass::cast(old_method->method_holder())->external_name()));
        *trace_name_printed = true;
      }
      // RC_TRACE macro has an embedded ResourceMark
      RC_TRACE(0x00400000, ("cpc entry update: %s(%s)",
        new_method->name()->as_C_string(),
        new_method->signature()->as_C_string()));
    }

    return true;
  }

  return false;
}
Ejemplo n.º 30
0
void mixinOopDesc::add_method(methodOop method) {
  objArrayOop old_array = methods();
  symbolOop selector = method->selector();
  // Find out if a method with the same selector exists.
  for (int index = 1; index <= old_array->length(); index++) {
    assert(old_array->obj_at(index)->is_method(), "must be method");
    methodOop m = methodOop(old_array->obj_at(index));
    if (m->selector() == selector) {
      objArrayOop new_array = old_array->copy();
      new_array->obj_at_put(index, method);
      set_methods(new_array);
      return;
    }
  }
  // Extend the array
  set_methods(old_array->copy_add(method));
}