Example #1
0
//----------------------------profile_taken_branch-----------------------------
void Parse::profile_taken_branch(int target_bci, bool force_update) {
  // This is a potential osr_site if we have a backedge.
  int cur_bci = bci();
  bool osr_site =
    (target_bci <= cur_bci) && count_invocations() && UseOnStackReplacement;

  // If we are going to OSR, restart at the target bytecode.
  set_bci(target_bci);

  // To do: factor out the the limit calculations below. These duplicate
  // the similar limit calculations in the interpreter.

  if (method_data_update() || force_update) {
    ciMethodData* md = method()->method_data();
    assert(md != NULL, "expected valid ciMethodData");
    ciProfileData* data = md->bci_to_data(cur_bci);
    assert(data->is_JumpData(), "need JumpData for taken branch");
    increment_md_counter_at(md, data, JumpData::taken_offset());
  }

  // In the new tiered system this is all we need to do. In the old
  // (c2 based) tiered sytem we must do the code below.
#ifndef TIERED
  if (method_data_update()) {
    ciMethodData* md = method()->method_data();
    if (osr_site) {
      ciProfileData* data = md->bci_to_data(cur_bci);
      int limit = (CompileThreshold
                   * (OnStackReplacePercentage - InterpreterProfilePercentage)) / 100;
      test_for_osr_md_counter_at(md, data, JumpData::taken_offset(), limit);
    }
  } else {
    // With method data update off, use the invocation counter to trigger an
    // OSR compilation, as done in the interpreter.
    if (osr_site) {
      int limit = (CompileThreshold * OnStackReplacePercentage) / 100;
      increment_and_test_invocation_counter(limit);
    }
  }
#endif // TIERED

  // Restore the original bytecode.
  set_bci(cur_bci);
}
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
  // Does this field have a constant value?  If so, just push the value.
  if (field->is_constant()) {
    if (field->is_static()) {
      // final static field
      if (push_constant(field->constant_value()))
        return;
    }
    else {
      // final non-static field of a trusted class (classes in
      // java.lang.invoke and sun.invoke packages and subpackages).
      if (obj->is_Con()) {
        const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
        ciObject* constant_oop = oop_ptr->const_oop();
        ciConstant constant = field->constant_value_of(constant_oop);

        if (push_constant(constant, true))
          return;
      }
    }
  }

  ciType* field_klass = field->type();
  bool is_vol = field->is_volatile();

  // Compute address and memory type.
  int offset = field->offset_in_bytes();
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node *adr = basic_plus_adr(obj, obj, offset);
  BasicType bt = field->layout_type();

  // Build the resultant type of the load
  const Type *type;

  bool must_assert_null = false;

  if( bt == T_OBJECT ) {
    if (!field->type()->is_loaded()) {
      type = TypeInstPtr::BOTTOM;
      must_assert_null = true;
    } else if (field->is_constant() && field->is_static()) {
      // This can happen if the constant oop is non-perm.
      ciObject* con = field->constant_value().as_object();
      // Do not "join" in the previous type; it doesn't add value,
      // and may yield a vacuous result if the field is of interface type.
      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
      assert(type != NULL, "field singleton type must be consistent");
    } else {
      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
    }
  } else {
    type = Type::get_const_basic_type(bt);
  }
  // Build the load.
  Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);

  // Adjust Java stack
  if (type2size[bt] == 1)
    push(ld);
  else
    push_pair(ld);

  if (must_assert_null) {
    // Do not take a trap here.  It's possible that the program
    // will never load the field's class, and will happily see
    // null values in this field forever.  Don't stumble into a
    // trap for such a program, or we might get a long series
    // of useless recompilations.  (Or, we might load a class
    // which should not be loaded.)  If we ever see a non-null
    // value, we will then trap and recompile.  (The trap will
    // not need to mention the class index, since the class will
    // already have been loaded if we ever see a non-null value.)
    // uncommon_trap(iter().get_field_signature_index());
#ifndef PRODUCT
    if (PrintOpto && (Verbose || WizardMode)) {
      method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
    }
#endif
    if (C->log() != NULL) {
      C->log()->elem("assert_null reason='field' klass='%d'",
                     C->log()->identify(field->type()));
    }
    // If there is going to be a trap, put it at the next bytecode:
    set_bci(iter().next_bci());
    do_null_assert(peek(), T_OBJECT);
    set_bci(iter().cur_bci()); // put it back
  }

  // If reference is volatile, prevent following memory ops from
  // floating up past the volatile read.  Also prevents commoning
  // another volatile read.
  if (field->is_volatile()) {
    // Memory barrier includes bogus read of value to force load BEFORE membar
    insert_mem_bar(Op_MemBarAcquire, ld);
  }
}
void Canonicalizer::do_If(If* x) {
  // move const to right
  if (x->x()->type()->is_constant()) x->swap_operands();
  // simplify
  const Value l = x->x(); ValueType* lt = l->type();
  const Value r = x->y(); ValueType* rt = r->type();
  if (lt->is_constant() && rt->is_constant()) {
    // pattern: If (lc cond rc) => simplify to: Goto
    Goto* g = NULL;
    switch (lt->tag()) {
      case intTag:
        g = new Goto(x->sux_for(is_true(lt->as_IntConstant ()->value(), x->cond(), rt->as_IntConstant ()->value())), x->is_safepoint());
        break;
      case longTag:
        g = new Goto(x->sux_for(is_true(lt->as_LongConstant()->value(), x->cond(), rt->as_LongConstant()->value())), x->is_safepoint());
        break;
      // other cases not implemented (must be extremely careful with floats & doubles!)
    }
    if (g != NULL) {
      // If this If is a safepoint then the debug information should come from the state_before of the If.
      g->set_state_before(x->state_before());
      set_canonical(g);
    }
  } else if (rt->as_IntConstant() != NULL) {
    // pattern: If (l cond rc) => investigate further
    const jint rc = rt->as_IntConstant()->value();
    if (l->as_CompareOp() != NULL) {
      // pattern: If ((a cmp b) cond rc) => simplify to: If (x cond y) or: Goto
      CompareOp* cmp = l->as_CompareOp();
      bool unordered_is_less = cmp->op() == Bytecodes::_fcmpl || cmp->op() == Bytecodes::_dcmpl;
      BlockBegin* lss_sux = x->sux_for(is_true(-1, x->cond(), rc)); // successor for a < b
      BlockBegin* eql_sux = x->sux_for(is_true( 0, x->cond(), rc)); // successor for a = b
      BlockBegin* gtr_sux = x->sux_for(is_true(+1, x->cond(), rc)); // successor for a > b
      BlockBegin* nan_sux = unordered_is_less ? lss_sux : gtr_sux ; // successor for unordered
      // Note: At this point all successors (lss_sux, eql_sux, gtr_sux, nan_sux) are
      //       equal to x->tsux() or x->fsux(). Furthermore, nan_sux equals either
      //       lss_sux or gtr_sux.
      if (lss_sux == eql_sux && eql_sux == gtr_sux) {
        // all successors identical => simplify to: Goto
        set_canonical(new Goto(lss_sux, x->is_safepoint()));
      } else {
        // two successors differ and two successors are the same => simplify to: If (x cmp y)
        // determine new condition & successors
        If::Condition cond;
        BlockBegin* tsux = NULL;
        BlockBegin* fsux = NULL;
             if (lss_sux == eql_sux) { cond = If::leq; tsux = lss_sux; fsux = gtr_sux; }
        else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; }
        else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; }
        else                         { ShouldNotReachHere();                           }
        set_canonical(new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint()));
        set_bci(cmp->bci());
      }
    } else if (l->as_InstanceOf() != NULL) {
      // NOTE: Code permanently disabled for now since it leaves the old InstanceOf
      //       instruction in the graph (it is pinned). Need to fix this at some point.
      return;
      // pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto
      InstanceOf* inst = l->as_InstanceOf();
      BlockBegin* is_inst_sux = x->sux_for(is_true(1, x->cond(), rc)); // successor for instanceof == 1
      BlockBegin* no_inst_sux = x->sux_for(is_true(0, x->cond(), rc)); // successor for instanceof == 0
      if (is_inst_sux == no_inst_sux && inst->is_loaded()) {
        // both successors identical and klass is loaded => simplify to: Goto
        set_canonical(new Goto(is_inst_sux, x->is_safepoint()));       
      } else {
        // successors differ => simplify to: IfInstanceOf
        set_canonical(new IfInstanceOf(inst->klass(), inst->obj(), true, inst->bci(), is_inst_sux, no_inst_sux));
      }
    }
  }
}
Example #4
0
oop_t ActivationObj::loop(oop_t this_activation) {

  The::set_active_context( this_activation, this);
  
  DECLARE_STACK;
  smi         bci = get_pc_quickly(io);
  
  ActivationMapObj* m_addr = map_addr();
  
  oop_t           codes_oop    = m_addr->codes();
  ByteVectorObj*  codes_addr   = ByteVectorObj::from(codes_oop);
  char*           codes        = codes_addr->bytes();
  fint            codes_length = codes_addr->indexableSize();



  oop_t         literals       = m_addr->literals();
  ObjVectorObj* literals_addr  = ObjVectorObj::from(literals);
  fint          literals_io    = literals_addr->indexableOrigin();
  
 
  fint index = 0, temp_index;
  # define UC_index ((temp_index = index << INDEXWIDTH), (index = 0), temp_index | bc_index)
  bool undirected_resend = false;
  # define UC_undirected_resend (undirected_resend ? (undirected_resend = false, true) : false)
  
  fint lexical_level = 0;
  
  # define use_lit (literals_addr->read_oop(literals_io + UC_index))
  
  oop_t delegatee = 0, temp_del;
  # define UC_del  ((temp_del = delegatee), (delegatee = 0), temp_del)
  
  fint arg_count = 0, temp_arg_count;
  # define UC_arg_count ((temp_arg_count = arg_count), (arg_count = 0), temp_arg_count)
  
  fint temp_bci;
  // for process pre-emption, stop on backward branches
  // todo optimize should probably just stop every 10 or 100 backward branches, or even just every N bytecodes
  # define set_bci(bci_oop) (temp_bci = value_of_smiOop(assert_smi(bci_oop)), stop = temp_bci < bci, bci = temp_bci)
  
  oop_t self = get_self_quickly(io);
  oop_t rcvr = get_rcvr_quickly(io);
  for ( bool stop = false; !stop; ) {
    if (bci >= codes_length) {
      oop_t r = pop();
      oop_t s = get_sender_quickly(io);
      if (s != NULL) // it'll be NULL if we're returning from the start method
        ActivationObj::from(s)->remote_push(r);
      // todo optimize time slow; quits this routine just for a return -- dmu 1/06
      return s;
    }
    unsigned char bc = codes[bci++];
    ByteCodeKind kind  = getOp(bc);
    fint         bc_index = getIndex(bc);
    // printf("interpreting a bytecode in activationMap %i, bc is %i, kind is %i, bc_index is %i\n", map_oop(), bc, kind, bc_index);
    switch (kind) {
     default:   fatal("unknown kind of bytecode"); break;
     
     case                   INDEX_CODE:          index = UC_index;     break;
     case           LEXICAL_LEVEL_CODE:  lexical_level = UC_index;     break;
     case          ARGUMENT_COUNT_CODE:      arg_count = UC_index;     break;
  
     case           READ_LOCAL_CODE:   push(local_obj_addr(lexical_level)-> read_arg_or_local(UC_index)      );  lexical_level = 0;               break;
     case          WRITE_LOCAL_CODE:        local_obj_addr(lexical_level)->write_arg_or_local(UC_index, pop());  lexical_level = 0;  push(self);  break;
     
     case          BRANCH_CODE:                                                          set_bci(use_lit);                   break;
     case          BRANCH_TRUE_CODE:     if ( pop() == The::oop_of(The:: true_object))   set_bci(use_lit);  else index = 0;  break;
     case          BRANCH_FALSE_CODE:    if ( pop() == The::oop_of(The::false_object))   set_bci(use_lit);  else index = 0;  break;
     case          BRANCH_INDEXED_CODE:
                                        {
                                         ObjVectorObj* branch_vector_addr = ObjVectorObj::from(assert_objVector(use_lit));
                                         oop_t branch_index_oop = pop();
                                         if ( is_smi(branch_index_oop) ) {
                                            smi branch_index = value_of_smiOop(branch_index_oop);
                                            if (  0 <= branch_index  &&  branch_index < branch_vector_addr->indexableSize()  )   {
                                              oop_t dest_oop = branch_vector_addr->indexable_at(branch_index);
                                              set_bci(dest_oop);
                                            }
                                         }
                                        }
                                        break;
       
     
     case      DELEGATEE_CODE:               delegatee = use_lit;                                     break;


     case LITERAL_CODE:
      {
       oop_t lit = use_lit;
       if (::is_block(lit)) {
         put_sp_quickly(io, sp); // make sure that the sp is stored correctly, because an allocation could trigger a GC
         oop_t cloned_block = BlockObj::clone_block(lit, this_activation);
         ActivationObj* possibly_moved_act_addr = ActivationObj::from(this_activation); // mightHaveScavengedTheActivation
         if (possibly_moved_act_addr != this) {
           possibly_moved_act_addr->remote_push(cloned_block);
           possibly_moved_act_addr->put_pc_quickly( io, bci );
           return this_activation;
         } else {
           push(cloned_block);
         }
       } else {
         push(lit);
       }
      }
      break;
     
     case IMPLICIT_SEND_CODE:
      // fall through
     case SEND_CODE:
     {
      oop_t selector = use_lit;
      if (selector == The::oop_of(The::restart_selector)) {
        put_sp_quickly( io,  sp  = first_stack_offset               );
        put_pc_quickly( io,  bci = get_pc_after_endInit_quickly(io) );
        break;
      }
      put_sp_quickly( io, sp );
      // todo optimize dmu 3/6. This is here for the _Breakpoint primitve to help debugging by storing the PC.
      // But it slows every primitive, sigh.
      put_pc_quickly( io, bci);

      oop_t a = send(kind == IMPLICIT_SEND_CODE, selector, UC_undirected_resend, UC_del, UC_arg_count, this_activation); 
      if (a != this_activation || ActivationObj::from(a) != this) { // mightHaveScavengedTheActivation
        // put_pc_quickly( io, bci); // commented out after I added the put_pc_quickly above, dmu 3/6
        return a;
      }
      sp = get_sp_quickly(io);
     }
     break;
      
     case NO_OPERAND_CODE:
      switch(bc_index) {
       default: fatal("???"); break;
        case               POP_CODE:      pop();                                  break;
        case              SELF_CODE:      push(self);                             break;
        case          END_INIT_CODE:      put_pc_after_endInit_quickly(io, bci);  break;

        case   NONLOCAL_RETURN_CODE:      return nonlocal_return(pop(), rcvr);    break;
        case UNDIRECTED_RESEND_CODE:      undirected_resend = true;               break;
       }
       break;

    }
  }
  put_sp_quickly( io, sp  );
  put_pc_quickly( io, bci );
  return this_activation;
}
//---------------------------catch_call_exceptions-----------------------------
// Put a Catch and CatchProj nodes behind a just-created call.
// Send their caught exceptions to the proper handler.
// This may be used after a call to the rethrow VM stub,
// when it is needed to process unloaded exception classes.
void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
  // Exceptions are delivered through this channel:
  Node* i_o = this->i_o();

  // Add a CatchNode.
  GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
  GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
  GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);

  for (; !handlers.is_done(); handlers.next()) {
    ciExceptionHandler* h        = handlers.handler();
    int                 h_bci    = h->handler_bci();
    ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
    const TypePtr* h_extype = TypeOopPtr::make_from_klass_unique(h_klass)->cast_away_null();
    // Ignore exceptions with no implementors.  These cannot be thrown
    // (without class loading anyways, which will deopt this code).
    if( h_extype->empty() ) continue;

    // Do not introduce unloaded exception types into the graph:
    if (!h_klass->is_loaded()) {
      if (saw_unloaded->contains(h_bci)) {
        /* We've already seen an unloaded exception with h_bci, 
           so don't duplicate. Duplication will cause the CatchNode to be
           unnecessarily large. See 4713716. */
        continue;
      } else {
        saw_unloaded->append(h_bci);
      }
    }
    // Note:  It's OK if the BCIs repeat themselves.
    bcis->append(h_bci);
    extypes->append(h_extype);
  }

  int len = bcis->length();
  CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
  Node *catch_ = _gvn.transform(cn);

  // now branch with the exception state to each of the (potential)
  // handlers
  for(int i=0; i < len; i++) {
    // Setup JVM state to enter the handler.
    PreserveJVMState pjvms(this);
    // Locals are just copied from before the call.
    // Get control from the CatchNode.
    int handler_bci = bcis->at(i);
    Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
    // This handler cannot happen?
    if (ctrl == top())  continue;
    set_control(ctrl);

    // Create exception oop
    const TypeInstPtr* extype = extypes->at(i)->is_instptr();
    
    Node *thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
Node*ex_adr=basic_plus_adr(top(),thread,in_bytes(JavaThread::pending_exception_offset()));
    int pending_ex_alias_idx = C->get_alias_index(ex_adr->bottom_type()->is_ptr());
    Node *ex_oop = make_load( NULL, ex_adr, extype, T_OBJECT, pending_ex_alias_idx );
    Node *ex_st  = store_to_memory( ctrl, ex_adr, null(), T_OBJECT, pending_ex_alias_idx );
record_for_igvn(ex_st);

    // Handle unloaded exception classes.
    if (saw_unloaded->contains(handler_bci)) {
      // An unloaded exception type is coming here.  Do an uncommon trap.
      // We do not expect the same handler bci to take both cold unloaded
      // and hot loaded exceptions.  But, watch for it.
if(PrintOpto&&extype->is_loaded()){
C2OUT->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ",handler_bci);
method()->print_name(C2OUT);C2OUT->cr();
      }
      // Emit an uncommon trap instead of processing the block.
      set_bci(handler_bci);
      push_ex_oop(ex_oop);
uncommon_trap(Deoptimization::Reason_unloaded,extype->klass(),"not loaded exception",false);
      set_bci(iter().cur_bci()); // put it back
      continue;
    }

    // go to the exception handler
    if (handler_bci < 0) {     // merge with corresponding rethrow node
      throw_to_exit(make_exception_state(ex_oop));
    } else {                      // Else jump to corresponding handle
      push_ex_oop(ex_oop);        // Clear stack and push just the oop.
      merge_exception(handler_bci);
    }
  }

  // The first CatchProj is for the normal return.
  // (Note:  If this is a call to rethrow_Java, this node goes dead.)
  set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
}
//------------------------------do_call----------------------------------------
// Handle your basic call.  Inline if we can & want to, else just setup call.
void Parse::do_call() {
  // It's likely we are going to add debug info soon.
  // Also, if we inline a guy who eventually needs debug info for this JVMS,
  // our contribution to it is cleaned up right here.
  kill_dead_locals();

  // Set frequently used booleans
  bool is_virtual = bc() == Bytecodes::_invokevirtual;
  bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
  bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;

  // Find target being called
  bool             will_link;
  ciMethod*        dest_method   = iter().get_method(will_link);
  ciInstanceKlass* holder_klass  = dest_method->holder();
  ciKlass* holder = iter().get_declared_method_holder();
  ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);

  int   nargs    = dest_method->arg_size();
  // See if the receiver (if any) is NULL, hence we always throw BEFORE
  // attempting to resolve the call or initialize the holder class.  Doing so
  // out of order opens a window where we can endlessly deopt because the call
  // holder is not initialized, but the call never actually happens (forcing
  // class initialization) because we only see NULL receivers.
  CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc());
  debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") );
  if( is_virtual_or_interface &&
      _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) {
    builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true );
    return;
  }

  // uncommon-trap when callee is unloaded, uninitialized or will not link
  // bailout when too many arguments for register representation
  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
    return;
  }
assert(FAM||holder_klass->is_loaded(),"");
  assert(dest_method->is_static() == !has_receiver, "must match bc");
  // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
  // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
  assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
  // Note:  In the absence of miranda methods, an abstract class K can perform
  // an invokevirtual directly on an interface method I.m if K implements I.

  // ---------------------
  // Does Class Hierarchy Analysis reveal only a single target of a v-call?
  // Then we may inline or make a static call, but become dependent on there being only 1 target.
  // Does the call-site type profile reveal only one receiver?
  // Then we may introduce a run-time check and inline on the path where it succeeds.
  // The other path may uncommon_trap, check for another receiver, or do a v-call.

  // Choose call strategy.
  bool call_is_virtual = is_virtual_or_interface;
  int vtable_index = methodOopDesc::invalid_vtable_index;
  ciMethod* call_method = dest_method;

  // Try to get the most accurate receiver type
  if (is_virtual_or_interface) {
    Node*             receiver_node = stack(sp() - nargs);
const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr();
    if( inst_type ) {
ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass();
      // If the receiver is not yet linked then: (1) we never can make this
      // call because no objects can be created until linkage, and (2) CHA
      // reports incorrect answers... so do not bother with making the call
      // until after the klass gets linked.
      ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass;
if(!ikl->is_linked()){
        uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false);
        return;
      }
    }
    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);

    // Have the call been sufficiently improved such that it is no longer a virtual?
    if (optimized_virtual_method != NULL) {
      call_method     = optimized_virtual_method;
      call_is_virtual = false;
    } else if (false) {
      // We can make a vtable call at this site
      vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
    }
  }

  // Note:  It's OK to try to inline a virtual call.
  // The call generator will not attempt to inline a polymorphic call
  // unless it knows how to optimize the receiver dispatch.
bool try_inline=(C->do_inlining()||InlineAccessors)&&
                    (!C->method()->should_disable_inlining()) &&
                    (call_method->number_of_breakpoints() == 0);

  // Get profile data for the *callee*.  First see if we have precise
  // CodeProfile for this exact inline because C1 inlined it already.
  CodeProfile *callee_cp;
  int callee_cp_inloff;

  if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) {
    callee_cp = c1_cp();        // Use same CodeProfile as current
    callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion
  } else {
    // If callee has a cp, clone it and use
    callee_cp = call_method->codeprofile(true);
    callee_cp_inloff = 0;

    if (callee_cp || FAM) {
      // The cloned cp needs to be freed later
      Compile* C = Compile::current();
      C->record_cloned_cp(callee_cp);
    } else { // Had profile info at top level, but not for this call site?
      // callee_cp will hold the just created cp, or whatever cp allocated by
      // other thread which wins the race in set_codeprofile
      callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method));
    }
  }

  CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL;

  // ---------------------
  inc_sp(- nargs);              // Temporarily pop args for JVM state of call
  JVMState* jvms = sync_jvms();

  // ---------------------
  // Decide call tactic.
  // This call checks with CHA, the interpreter profile, intrinsics table, etc.
  // It decides whether inlining is desirable or not.
CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi);

  // ---------------------
  // Round double arguments before call
  round_double_arguments(dest_method);

#ifndef PRODUCT
  // Record first part of parsing work for this call
  parse_histogram()->record_change();
#endif // not PRODUCT

  assert(jvms == this->jvms(), "still operating on the right JVMS");
  assert(jvms_in_sync(),       "jvms must carry full info into CG");

  // save across call, for a subsequent cast_not_null.
  Node* receiver = has_receiver ? argument(0) : NULL;

  JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy());
  if( new_jvms == NULL ) {      // Did it work?
    // When inlining attempt fails (e.g., too many arguments),
    // it may contaminate the current compile state, making it
    // impossible to pull back and try again.  Once we call
    // cg->generate(), we are committed.  If it fails, the whole
    // compilation task is compromised.
    if (failing())  return;
    if (PrintOpto || PrintInlining || PrintC2Inlining) {
      // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
      if (cg->is_intrinsic() && call_method->code_size() > 0) {
C2OUT->print("Bailed out of intrinsic, will not inline: ");
        call_method->print_name(C2OUT); C2OUT->cr();
      }
    }
    // This can happen if a library intrinsic is available, but refuses
    // the call site, perhaps because it did not match a pattern the
    // intrinsic was expecting to optimize.  The fallback position is
    // to call out-of-line.
    try_inline = false;  // Inline tactic bailed out.
cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi);
new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy());
assert(new_jvms!=NULL,"call failed to generate:  calls should work");
    if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE;
  }

  if (cg->is_inline()) {
    C->env()->notice_inlined_method(call_method);
  }

  // Reset parser state from [new_]jvms, which now carries results of the call.
  // Return value (if any) is already pushed on the stack by the cg.
  add_exception_states_from(new_jvms);
  if (new_jvms->map()->control() == top()) {
    stop_and_kill_map();
  } else {
    assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
    set_jvms(new_jvms);
  }

  if (!stopped()) {
    // This was some sort of virtual call, which did a null check for us.
    // Now we can assert receiver-not-null, on the normal return path.
    if (receiver != NULL && cg->is_virtual()) {
Node*cast=cast_not_null(receiver,true);
      // %%% assert(receiver == cast, "should already have cast the receiver");
    }

    // Round double result after a call from strict to non-strict code
    round_double_result(dest_method);

    // If the return type of the method is not loaded, assert that the
    // value we got is a null.  Otherwise, we need to recompile.
    if (!dest_method->return_type()->is_loaded()) {
      // If there is going to be a trap, put it at the next bytecode:
      set_bci(iter().next_bci());
      do_null_assert(peek(), T_OBJECT);
      set_bci(iter().cur_bci()); // put it back
    } else {
      assert0( call_method->return_type()->is_loaded() );
      BasicType result_type = dest_method->return_type()->basic_type();
if(result_type==T_OBJECT||result_type==T_ARRAY){
        const Type *t = peek()->bottom_type();
        assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() );
      }
    }
  }

  // Restart record of parsing work after possible inlining of call
#ifndef PRODUCT
  parse_histogram()->set_initial_state(bc());
#endif
}