コード例 #1
0
//--------------------gen_stub-------------------------------
void GraphKit::gen_stub(address C_function,
                        const char *name,
                        int is_fancy_jump,
                        bool pass_tls,
                        bool return_pc) {
  ResourceMark rm;

  const TypeTuple *jdomain = C->tf()->domain();
  const TypeTuple *jrange  = C->tf()->range();

  // The procedure start
  StartNode* start = new (C) StartNode(root(), jdomain);
  _gvn.set_type_bottom(start);

  // Make a map, with JVM state
  uint parm_cnt = jdomain->cnt();
  uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
  // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  JVMState* jvms = new (C) JVMState(0);
  jvms->set_bci(InvocationEntryBci);
  jvms->set_monoff(max_map);
  jvms->set_scloff(max_map);
  jvms->set_endoff(max_map);
  {
    SafePointNode *map = new (C) SafePointNode( max_map, jvms );
    jvms->set_map(map);
    set_jvms(jvms);
    assert(map == this->map(), "kit.map is set");
  }

  // Make up the parameters
  uint i;
  for( i = 0; i < parm_cnt; i++ )
    map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i)));
  for( ; i<map()->req(); i++ )
    map()->init_req(i, top());      // For nicer debugging

  // GraphKit requires memory to be a MergeMemNode:
  set_all_memory(map()->memory());

  // Get base of thread-local storage area
  Node* thread = _gvn.transform( new (C) ThreadLocalNode() );

  const int NoAlias = Compile::AliasIdxBot;

  Node* adr_last_Java_pc = basic_plus_adr(top(),
                                            thread,
                                            in_bytes(JavaThread::frame_anchor_offset()) +
                                            in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
#if defined(SPARC)
  Node* adr_flags = basic_plus_adr(top(),
                                   thread,
                                   in_bytes(JavaThread::frame_anchor_offset()) +
                                   in_bytes(JavaFrameAnchor::flags_offset()));
#endif /* defined(SPARC) */


  // Drop in the last_Java_sp.  last_Java_fp is not touched.
  // Always do this after the other "last_Java_frame" fields are set since
  // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
  // users will look at the other fields.
  //
  Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
  Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
  store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias);

  // Set _thread_in_native
  // The order of stores into TLS is critical!  Setting _thread_in_native MUST
  // be last, because a GC is allowed at any time after setting it and the GC
  // will require last_Java_pc and last_Java_sp.
  Node* adr_state = basic_plus_adr(top(), thread, in_bytes(JavaThread::thread_state_offset()));

  //-----------------------------
  // Compute signature for C call.  Varies from the Java signature!
  const Type **fields = TypeTuple::fields(2*parm_cnt+2);
  uint cnt = TypeFunc::Parms;
  // The C routines gets the base of thread-local storage passed in as an
  // extra argument.  Not all calls need it, but its cheap to add here.
  for( ; cnt<parm_cnt; cnt++ )
    fields[cnt] = jdomain->field_at(cnt);
  fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
  // Also pass in the caller's PC, if asked for.
  if( return_pc )
    fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC

  const TypeTuple* domain = TypeTuple::make(cnt,fields);
  // The C routine we are about to call cannot return an oop; it can block on
  // exit and a GC will trash the oop while it sits in C-land.  Instead, we
  // return the oop through TLS for runtime calls.
  // Also, C routines returning integer subword values leave the high
  // order bits dirty; these must be cleaned up by explicit sign extension.
  const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
  // Make a private copy of jrange->fields();
  const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
  // Fixup oop returns
  int retval_ptr = retval->isa_oop_ptr();
  if( retval_ptr ) {
    assert( pass_tls, "Oop must be returned thru TLS" );
    // Fancy-jumps return address; others return void
    rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;

  } else if( retval->isa_int() ) { // Returning any integer subtype?
    // "Fatten" byte, char & short return types to 'int' to show that
    // the native C code can return values with junk high order bits.
    // We'll sign-extend it below later.
    rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext

  } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types
    rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
    if( jrange->cnt() == TypeFunc::Parms+2 )
      rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
  }
  const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);

  // Final C signature
  const TypeFunc *c_sig = TypeFunc::make(domain,range);

  //-----------------------------
  // Make the call node
  CallRuntimeNode *call = new (C)
    CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
  //-----------------------------

  // Fix-up the debug info for the call
  call->set_jvms( new (C) JVMState(0) );
  call->jvms()->set_bci(0);
  call->jvms()->set_offsets(cnt);

  // Set fixed predefined input arguments
  cnt = 0;
  for( i=0; i<TypeFunc::Parms; i++ )
    call->init_req( cnt++, map()->in(i) );
  // A little too aggressive on the parm copy; return address is not an input
  call->set_req(TypeFunc::ReturnAdr, top());
  for( ; i<parm_cnt; i++ )    // Regular input arguments
    call->init_req( cnt++, map()->in(i) );

  call->init_req( cnt++, thread );
  if( return_pc )             // Return PC, if asked for
    call->init_req( cnt++, returnadr() );
  _gvn.transform_no_reclaim(call);


  //-----------------------------
  // Now set up the return results
  set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) );
  set_i_o(     _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O    )) );
  set_all_memory_call(call);
  if (range->cnt() > TypeFunc::Parms) {
    Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) );
    // C-land is allowed to return sub-word values.  Convert to integer type.
    assert( retval != Type::TOP, "" );
    if (retval == TypeInt::BOOL) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) );
    } else if (retval == TypeInt::CHAR) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) );
    } else if (retval == TypeInt::BYTE) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) );
    } else if (retval == TypeInt::SHORT) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) );
    }
    map()->set_req( TypeFunc::Parms, retnode );
  }

  //-----------------------------

  // Clear last_Java_sp
  store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias);
  // Clear last_Java_pc and (optionally)_flags
  store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias);
#if defined(SPARC)
  store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
#endif /* defined(SPARC) */
#ifdef IA64
  Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
  if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
  store_to_memory(NULL, adr_last_Java_fp,    null(),    T_ADDRESS, NoAlias);
#endif

  // For is-fancy-jump, the C-return value is also the branch target
  Node* target = map()->in(TypeFunc::Parms);
  // Runtime call returning oop in TLS?  Fetch it out
  if( pass_tls ) {
    Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
    Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);
    map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
    // clear thread-local-storage(tls)
    store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias);
  }

  //-----------------------------
  // check exception
  Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
  Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);

  Node* exit_memory = reset_memory();

  Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) );
  Node* bo  = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
  IfNode   *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN);

  Node* if_null     = _gvn.transform( new (C) IfFalseNode(iff) );
  Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff)  );

  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
  Node *to_exc = new (C) TailCallNode(if_not_null,
                                      i_o(),
                                      exit_memory,
                                      frameptr(),
                                      returnadr(),
                                      exc_target, null());
  root()->add_req(_gvn.transform(to_exc));  // bind to root to keep live
  C->init_start(start);

  //-----------------------------
  // If this is a normal subroutine return, issue the return and be done.
  Node *ret;
  switch( is_fancy_jump ) {
  case 0:                       // Make a return instruction
    // Return to caller, free any space for return address
    ret = new (C) ReturnNode(TypeFunc::Parms, if_null,
                             i_o(),
                             exit_memory,
                             frameptr(),
                             returnadr());
    if (C->tf()->range()->cnt() > TypeFunc::Parms)
      ret->add_req( map()->in(TypeFunc::Parms) );
    break;
  case 1:    // This is a fancy tail-call jump.  Jump to computed address.
    // Jump to new callee; leave old return address alone.
    ret = new (C) TailCallNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               returnadr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  case 2:                       // Pop return address & jump
    // Throw away old return address; jump to new computed address
    //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow");
    ret = new (C) TailJumpNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  default:
    ShouldNotReachHere();
  }
  root()->add_req(_gvn.transform(ret));
}
コード例 #2
0
//------------------------------do_call----------------------------------------
// Handle your basic call.  Inline if we can & want to, else just setup call.
void Parse::do_call() {
  // It's likely we are going to add debug info soon.
  // Also, if we inline a guy who eventually needs debug info for this JVMS,
  // our contribution to it is cleaned up right here.
  kill_dead_locals();

  // Set frequently used booleans
  bool is_virtual = bc() == Bytecodes::_invokevirtual;
  bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
  bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;

  // Find target being called
  bool             will_link;
  ciMethod*        dest_method   = iter().get_method(will_link);
  ciInstanceKlass* holder_klass  = dest_method->holder();
  ciKlass* holder = iter().get_declared_method_holder();
  ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);

  int   nargs    = dest_method->arg_size();
  // See if the receiver (if any) is NULL, hence we always throw BEFORE
  // attempting to resolve the call or initialize the holder class.  Doing so
  // out of order opens a window where we can endlessly deopt because the call
  // holder is not initialized, but the call never actually happens (forcing
  // class initialization) because we only see NULL receivers.
  CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc());
  debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") );
  if( is_virtual_or_interface &&
      _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) {
    builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true );
    return;
  }

  // uncommon-trap when callee is unloaded, uninitialized or will not link
  // bailout when too many arguments for register representation
  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
    return;
  }
assert(FAM||holder_klass->is_loaded(),"");
  assert(dest_method->is_static() == !has_receiver, "must match bc");
  // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
  // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
  assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
  // Note:  In the absence of miranda methods, an abstract class K can perform
  // an invokevirtual directly on an interface method I.m if K implements I.

  // ---------------------
  // Does Class Hierarchy Analysis reveal only a single target of a v-call?
  // Then we may inline or make a static call, but become dependent on there being only 1 target.
  // Does the call-site type profile reveal only one receiver?
  // Then we may introduce a run-time check and inline on the path where it succeeds.
  // The other path may uncommon_trap, check for another receiver, or do a v-call.

  // Choose call strategy.
  bool call_is_virtual = is_virtual_or_interface;
  int vtable_index = methodOopDesc::invalid_vtable_index;
  ciMethod* call_method = dest_method;

  // Try to get the most accurate receiver type
  if (is_virtual_or_interface) {
    Node*             receiver_node = stack(sp() - nargs);
const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr();
    if( inst_type ) {
ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass();
      // If the receiver is not yet linked then: (1) we never can make this
      // call because no objects can be created until linkage, and (2) CHA
      // reports incorrect answers... so do not bother with making the call
      // until after the klass gets linked.
      ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass;
if(!ikl->is_linked()){
        uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false);
        return;
      }
    }
    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);

    // Have the call been sufficiently improved such that it is no longer a virtual?
    if (optimized_virtual_method != NULL) {
      call_method     = optimized_virtual_method;
      call_is_virtual = false;
    } else if (false) {
      // We can make a vtable call at this site
      vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
    }
  }

  // Note:  It's OK to try to inline a virtual call.
  // The call generator will not attempt to inline a polymorphic call
  // unless it knows how to optimize the receiver dispatch.
bool try_inline=(C->do_inlining()||InlineAccessors)&&
                    (!C->method()->should_disable_inlining()) &&
                    (call_method->number_of_breakpoints() == 0);

  // Get profile data for the *callee*.  First see if we have precise
  // CodeProfile for this exact inline because C1 inlined it already.
  CodeProfile *callee_cp;
  int callee_cp_inloff;

  if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) {
    callee_cp = c1_cp();        // Use same CodeProfile as current
    callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion
  } else {
    // If callee has a cp, clone it and use
    callee_cp = call_method->codeprofile(true);
    callee_cp_inloff = 0;

    if (callee_cp || FAM) {
      // The cloned cp needs to be freed later
      Compile* C = Compile::current();
      C->record_cloned_cp(callee_cp);
    } else { // Had profile info at top level, but not for this call site?
      // callee_cp will hold the just created cp, or whatever cp allocated by
      // other thread which wins the race in set_codeprofile
      callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method));
    }
  }

  CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL;

  // ---------------------
  inc_sp(- nargs);              // Temporarily pop args for JVM state of call
  JVMState* jvms = sync_jvms();

  // ---------------------
  // Decide call tactic.
  // This call checks with CHA, the interpreter profile, intrinsics table, etc.
  // It decides whether inlining is desirable or not.
CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi);

  // ---------------------
  // Round double arguments before call
  round_double_arguments(dest_method);

#ifndef PRODUCT
  // Record first part of parsing work for this call
  parse_histogram()->record_change();
#endif // not PRODUCT

  assert(jvms == this->jvms(), "still operating on the right JVMS");
  assert(jvms_in_sync(),       "jvms must carry full info into CG");

  // save across call, for a subsequent cast_not_null.
  Node* receiver = has_receiver ? argument(0) : NULL;

  JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy());
  if( new_jvms == NULL ) {      // Did it work?
    // When inlining attempt fails (e.g., too many arguments),
    // it may contaminate the current compile state, making it
    // impossible to pull back and try again.  Once we call
    // cg->generate(), we are committed.  If it fails, the whole
    // compilation task is compromised.
    if (failing())  return;
    if (PrintOpto || PrintInlining || PrintC2Inlining) {
      // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
      if (cg->is_intrinsic() && call_method->code_size() > 0) {
C2OUT->print("Bailed out of intrinsic, will not inline: ");
        call_method->print_name(C2OUT); C2OUT->cr();
      }
    }
    // This can happen if a library intrinsic is available, but refuses
    // the call site, perhaps because it did not match a pattern the
    // intrinsic was expecting to optimize.  The fallback position is
    // to call out-of-line.
    try_inline = false;  // Inline tactic bailed out.
cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi);
new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy());
assert(new_jvms!=NULL,"call failed to generate:  calls should work");
    if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE;
  }

  if (cg->is_inline()) {
    C->env()->notice_inlined_method(call_method);
  }

  // Reset parser state from [new_]jvms, which now carries results of the call.
  // Return value (if any) is already pushed on the stack by the cg.
  add_exception_states_from(new_jvms);
  if (new_jvms->map()->control() == top()) {
    stop_and_kill_map();
  } else {
    assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
    set_jvms(new_jvms);
  }

  if (!stopped()) {
    // This was some sort of virtual call, which did a null check for us.
    // Now we can assert receiver-not-null, on the normal return path.
    if (receiver != NULL && cg->is_virtual()) {
Node*cast=cast_not_null(receiver,true);
      // %%% assert(receiver == cast, "should already have cast the receiver");
    }

    // Round double result after a call from strict to non-strict code
    round_double_result(dest_method);

    // If the return type of the method is not loaded, assert that the
    // value we got is a null.  Otherwise, we need to recompile.
    if (!dest_method->return_type()->is_loaded()) {
      // If there is going to be a trap, put it at the next bytecode:
      set_bci(iter().next_bci());
      do_null_assert(peek(), T_OBJECT);
      set_bci(iter().cur_bci()); // put it back
    } else {
      assert0( call_method->return_type()->is_loaded() );
      BasicType result_type = dest_method->return_type()->basic_type();
if(result_type==T_OBJECT||result_type==T_ARRAY){
        const Type *t = peek()->bottom_type();
        assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() );
      }
    }
  }

  // Restart record of parsing work after possible inlining of call
#ifndef PRODUCT
  parse_histogram()->set_initial_state(bc());
#endif
}