Пример #1
0
void Parse::do_field_access(bool is_get, bool is_field) {
  bool will_link;
  ciField* field = iter().get_field(will_link);
  assert(will_link, "getfield: typeflow responsibility");

  ciInstanceKlass* field_holder = field->holder();

  if (is_field == field->is_static()) {
    // Interpreter will throw java_lang_IncompatibleClassChangeError
    // Check this before allowing <clinit> methods to access static fields
    uncommon_trap(Deoptimization::Reason_unhandled,
                  Deoptimization::Action_none);
    return;
  }

  if (!is_field && !field_holder->is_initialized()) {
    if (!static_field_ok_in_clinit(field, method())) {
      uncommon_trap(Deoptimization::Reason_uninitialized,
                    Deoptimization::Action_reinterpret,
                    NULL, "!static_field_ok_in_clinit");
      return;
    }
  }

  assert(field->will_link(method()->holder(), bc()), "getfield: typeflow responsibility");

  // Note:  We do not check for an unloaded field type here any more.

  // Generate code for the object pointer.
  Node* obj;
  if (is_field) {
    int obj_depth = is_get ? 0 : field->type()->size();
    obj = do_null_check(peek(obj_depth), T_OBJECT);
    // Compile-time detect of null-exception?
    if (stopped())  return;

#ifdef ASSERT
    const TypeInstPtr *tjp = TypeInstPtr::make(TypePtr::NotNull, iter().get_declared_field_holder());
    assert(_gvn.type(obj)->higher_equal(tjp), "cast_up is no longer needed");
#endif

    if (is_get) {
      --_sp;  // pop receiver before getting
      do_get_xxx(obj, field, is_field);
    } else {
      do_put_xxx(obj, field, is_field);
      --_sp;  // pop receiver after putting
    }
  } else {
    const TypeInstPtr* tip = TypeInstPtr::make(field_holder->java_mirror());
    obj = _gvn.makecon(tip);
    if (is_get) {
      do_get_xxx(obj, field, is_field);
    } else {
      do_put_xxx(obj, field, is_field);
    }
  }
}
Пример #2
0
//=============================================================================
void Parse::do_anewarray() {
  bool will_link;
  ciKlass* klass = iter().get_klass(will_link);

  // Uncommon Trap when class that array contains is not loaded
  // we need the loaded class for the rest of graph; do not
  // initialize the container class (see Java spec)!!!
  assert(will_link, "anewarray: typeflow responsibility");

  ciObjArrayKlass* array_klass = ciObjArrayKlass::make(klass);
  // Check that array_klass object is loaded
  if (!array_klass->is_loaded()) {
    // Generate uncommon_trap for unloaded array_class
    uncommon_trap(Deoptimization::Reason_unloaded,
                  Deoptimization::Action_reinterpret,
                  array_klass);
    return;
  }

  kill_dead_locals();

  const TypeKlassPtr* array_klass_type = TypeKlassPtr::make(array_klass);
  Node* count_val = pop();
  Node* obj = new_array(makecon(array_klass_type), count_val, 1);
  push(obj);
}
Пример #3
0
//------------------------------do_new-----------------------------------------
void Parse::do_new() {
  kill_dead_locals();

  bool will_link;
  ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
  assert(will_link, "_new: typeflow responsibility");

  // Should initialize, or throw an InstantiationError?
  if (!klass->is_initialized() && !klass->is_being_initialized() ||
      klass->is_abstract() || klass->is_interface() ||
      klass->name() == ciSymbol::java_lang_Class() ||
      iter().is_unresolved_klass()) {
    uncommon_trap(Deoptimization::Reason_uninitialized,
                  Deoptimization::Action_reinterpret,
                  klass);
    return;
  }
  if (klass->is_being_initialized()) {
    emit_guard_for_new(klass);
  }

  Node* kls = makecon(TypeKlassPtr::make(klass));
  Node* obj = new_instance(kls);

  // Push resultant oop onto stack
  push(obj);

  // Keep track of whether opportunities exist for StringBuilder
  // optimizations.
  if (OptimizeStringConcat &&
      (klass == C->env()->StringBuilder_klass() ||
       klass == C->env()->StringBuffer_klass())) {
    C->set_has_stringbuilder(true);
  }
}
Пример #4
0
void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
  // Emit guarded new
  //   if (klass->_init_thread != current_thread ||
  //       klass->_init_state != being_initialized)
  //      uncommon_trap
  Node* cur_thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
  Node* merge = new (C, 3) RegionNode(3);
  _gvn.set_type(merge, Type::CONTROL);
  Node* kls = makecon(TypeKlassPtr::make(klass));

  Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
  Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
  Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
  IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(1, IfFalse(iff));

  Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
  adr_node = basic_plus_adr(kls, kls, init_state_offset);
  Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
  Node* being_init = _gvn.intcon(instanceKlass::being_initialized);
  tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
  iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(2, IfFalse(iff));

  PreserveJVMState pjvms(this);
  record_for_igvn(merge);
  set_control(merge);

  uncommon_trap(Deoptimization::Reason_uninitialized,
                Deoptimization::Action_reinterpret,
                klass);
}
Пример #5
0
//------------------------------do_new-----------------------------------------
void Parse::do_new() {
  kill_dead_locals();

  // The allocator will coalesce int->oop copies away.  See comment in
  // coalesce.cpp about how this works.  It depends critically on the exact
  // code shape produced here, so if you are changing this code shape
  // make sure the GC info for the heap-top is correct in and around the
  // slow-path call.

  bool will_link;
  ciInstanceKlass* klass = iter().get_klass(will_link)->as_instance_klass();
  assert(will_link, "_new: typeflow responsibility");

  // Should initialize, or throw an InstantiationError?
  if (!klass->is_initialized() ||
      klass->is_abstract() || klass->is_interface() ||
      klass->name() == ciSymbol::java_lang_Class()) {
    uncommon_trap(Deoptimization::Reason_uninitialized,
                  Deoptimization::Action_reinterpret,
                  klass);
    return;
  }

  Node* obj = new_instance(klass);

  // Push resultant oop onto stack
  push(obj);
}
Пример #6
0
 // Shorthand, to avoid saying "Deoptimization::" so many times.
 void uncommon_trap(Deoptimization::DeoptReason reason,
                    Deoptimization::DeoptAction action,
                    ciKlass* klass = NULL, const char* reason_string = NULL,
                    bool must_throw = false, bool keep_exact_action = false) {
   uncommon_trap(Deoptimization::make_trap_request(reason, action),
                 klass, reason_string, must_throw, keep_exact_action);
 }
Пример #7
0
//----------------------test_counter_against_threshold ------------------------
void Parse::test_counter_against_threshold(Node* cnt, int limit) {
  // Test the counter against the limit and uncommon trap if greater.

  // This code is largely copied from the range check code in
  // array_addressing()

  // Test invocation count vs threshold
  Node *threshold = makecon(TypeInt::make(limit));
  Node *chk   = _gvn.transform( new (C) CmpUNode( cnt, threshold) );
  BoolTest::mask btest = BoolTest::lt;
  Node *tst   = _gvn.transform( new (C) BoolNode( chk, btest) );
  // Branch to failure if threshold exceeded
  { BuildCutout unless(this, tst, PROB_ALWAYS);
    uncommon_trap(Deoptimization::Reason_age,
                  Deoptimization::Action_maybe_recompile);
  }
}
// uncommon-trap call-sites where callee is unloaded, uninitialized or will not link
bool Parse::can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass* klass) {
  // Additional inputs to consider...
  // bc      = bc()
  // caller  = method()
  // iter().get_method_holder_index()
  assert( dest_method->is_loaded(), "ciTypeFlow should not let us get here" );
  // Interface classes can be loaded & linked and never get around to
  // being initialized.  Uncommon-trap for not-initialized static or
  // v-calls.  Let interface calls happen.
  ciInstanceKlass* holder_klass  = dest_method->holder();
  if (!holder_klass->is_initialized() &&
      !holder_klass->is_interface()) {

    if( method()->is_static() && method()->name() == ciSymbol::class_initializer_name() )
      return false;             // OK to inline inside of <clinit>
    if( method()->name() == ciSymbol::object_initializer_name() )
      // because any thread calling the constructor must first have
      // synchronized on the class by executing a '_new' bytecode.
      return false;

    // Here we have decided that we cannot make the call because the method
    // holder is not initialized.  We can still check for a null-receiver at
    // runtime and throw the NPE - which avoids a deopt if the user is
    // expecting the NPE.
if(!dest_method->is_static()){
int nargs=1+dest_method->signature()->size();
assert(sp()>=nargs,"stack accepts only positive values");
Node*receiver=stack(sp()-nargs);
      receiver = do_null_check(receiver, T_OBJECT, "nullchk receiver");
    }
    uncommon_trap(Deoptimization::Reason_uninitialized,holder_klass,"call site where called method holder is not initialized",false);
    return true;
  }

  assert(dest_method->will_link(method()->holder(), klass, bc()), "dest_method: typeflow responsibility");
  return false;
}
Пример #9
0
void Parse::do_multianewarray() {
  int ndimensions = iter().get_dimensions();

  // the m-dimensional array
  bool will_link;
  ciArrayKlass* array_klass = iter().get_klass(will_link)->as_array_klass();
  assert(will_link, "multianewarray: typeflow responsibility");

  // Note:  Array classes are always initialized; no is_initialized check.

  enum { MAX_DIMENSION = 5 };
  if (ndimensions > MAX_DIMENSION || ndimensions <= 0) {
    uncommon_trap(Deoptimization::Reason_unhandled,
                  Deoptimization::Action_none);
    return;
  }

  kill_dead_locals();

  // get the lengths from the stack (first dimension is on top)
  Node* length[MAX_DIMENSION+1];
  length[ndimensions] = NULL;  // terminating null for make_runtime_call
  int j;
  for (j = ndimensions-1; j >= 0 ; j--) length[j] = pop();

  // The original expression was of this form: new T[length0][length1]...
  // It is often the case that the lengths are small (except the last).
  // If that happens, use the fast 1-d creator a constant number of times.
  const jint expand_limit = MIN2((juint)MultiArrayExpandLimit, (juint)100);
  jint expand_count = 1;        // count of allocations in the expansion
  jint expand_fanout = 1;       // running total fanout
  for (j = 0; j < ndimensions-1; j++) {
    jint dim_con = find_int_con(length[j], -1);
    expand_fanout *= dim_con;
    expand_count  += expand_fanout; // count the level-J sub-arrays
    if (dim_con <= 0
        || dim_con > expand_limit
        || expand_count > expand_limit) {
      expand_count = 0;
      break;
    }
  }

  // Can use multianewarray instead of [a]newarray if only one dimension,
  // or if all non-final dimensions are small constants.
  if (ndimensions == 1 || (1 <= expand_count && expand_count <= expand_limit)) {
    Node* obj = NULL;
    // Set the original stack and the reexecute bit for the interpreter
    // to reexecute the multianewarray bytecode if deoptimization happens.
    // Do it unconditionally even for one dimension multianewarray.
    // Note: the reexecute bit will be set in GraphKit::add_safepoint_edges()
    // when AllocateArray node for newarray is created.
    { PreserveReexecuteState preexecs(this);
      _sp += ndimensions;
      // Pass 0 as nargs since uncommon trap code does not need to restore stack.
      obj = expand_multianewarray(array_klass, &length[0], ndimensions, 0);
    } //original reexecute and sp are set back here
    push(obj);
    return;
  }

  address fun = NULL;
  switch (ndimensions) {
  //case 1: Actually, there is no case 1.  It's handled by new_array.
  case 2: fun = OptoRuntime::multianewarray2_Java(); break;
  case 3: fun = OptoRuntime::multianewarray3_Java(); break;
  case 4: fun = OptoRuntime::multianewarray4_Java(); break;
  case 5: fun = OptoRuntime::multianewarray5_Java(); break;
  default: ShouldNotReachHere();
  };

  Node* c = make_runtime_call(RC_NO_LEAF | RC_NO_IO,
                              OptoRuntime::multianewarray_Type(ndimensions),
                              fun, NULL, TypeRawPtr::BOTTOM,
                              makecon(TypeKlassPtr::make(array_klass)),
                              length[0], length[1], length[2],
                              length[3], length[4]);
  Node* res = _gvn.transform(new (C, 1) ProjNode(c, TypeFunc::Parms));

  const Type* type = TypeOopPtr::make_from_klass_raw(array_klass);

  // Improve the type:  We know it's not null, exact, and of a given length.
  type = type->is_ptr()->cast_to_ptr_type(TypePtr::NotNull);
  type = type->is_aryptr()->cast_to_exactness(true);

  const TypeInt* ltype = _gvn.find_int_type(length[0]);
  if (ltype != NULL)
    type = type->is_aryptr()->cast_to_size(ltype);

  // We cannot sharpen the nested sub-arrays, since the top level is mutable.

  Node* cast = _gvn.transform( new (C, 2) CheckCastPPNode(control(), res, type) );
  push(cast);

  // Possible improvements:
  // - Make a fast path for small multi-arrays.  (W/ implicit init. loops.)
  // - Issue CastII against length[*] values, to TypeInt::POS.
}
Пример #10
0
//------------------------------array_store_check------------------------------
// pull array from stack and check that the store is valid
void Parse::array_store_check() {

  // Shorthand access to array store elements without popping them.
  Node *obj = peek(0);
  Node *idx = peek(1);
  Node *ary = peek(2);

  if (_gvn.type(obj) == TypePtr::NULL_PTR) {
    // There's never a type check on null values.
    // This cutout lets us avoid the uncommon_trap(Reason_array_check)
    // below, which turns into a performance liability if the
    // gen_checkcast folds up completely.
    return;
  }

  // Extract the array klass type
  int klass_offset = oopDesc::klass_offset_in_bytes();
  Node* p = basic_plus_adr( ary, ary, klass_offset );
  // p's type is array-of-OOPS plus klass_offset
  Node* array_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS) );
  // Get the array klass
  const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();

  // array_klass's type is generally INexact array-of-oop.  Heroically
  // cast the array klass to EXACT array and uncommon-trap if the cast
  // fails.
  bool always_see_exact_class = false;
  if (MonomorphicArrayCheck
      && !too_many_traps(Deoptimization::Reason_array_check)) {
    always_see_exact_class = true;
    // (If no MDO at all, hope for the best, until a trap actually occurs.)
  }

  // Is the array klass is exactly its defined type?
  if (always_see_exact_class && !tak->klass_is_exact()) {
    // Make a constant out of the inexact array klass
    const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
    Node* con = makecon(extak);
    Node* cmp = _gvn.transform(new (C) CmpPNode( array_klass, con ));
    Node* bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::eq ));
    Node* ctrl= control();
    { BuildCutout unless(this, bol, PROB_MAX);
      uncommon_trap(Deoptimization::Reason_array_check,
                    Deoptimization::Action_maybe_recompile,
                    tak->klass());
    }
    if (stopped()) {          // MUST uncommon-trap?
      set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
    } else {                  // Cast array klass to exactness:
      // Use the exact constant value we know it is.
      replace_in_map(array_klass,con);
      CompileLog* log = C->log();
      if (log != NULL) {
        log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
                  log->identify(tak->klass()));
      }
      array_klass = con;      // Use cast value moving forward
    }
  }

  // Come here for polymorphic array klasses

  // Extract the array element class
  int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
  Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
  Node *a_e_klass = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p2, tak) );

  // Check (the hard way) and throw if not a subklass.
  // Result is ignored, we just need the CFG effects.
  gen_checkcast( obj, a_e_klass );
}
Пример #11
0
//------------------------------array_store_check------------------------------
// pull array from stack and check that the store is valid
void Parse::array_store_check() {

  // Shorthand access to array store elements without popping them.
  Node *obj = peek(0);
  Node *idx = peek(1);
  Node *ary = peek(2);

  if (_gvn.type(obj) == TypePtr::NULL_PTR) {
    // There's never a type check on null values.
    // This cutout lets us avoid the uncommon_trap(Reason_array_check)
    // below, which turns into a performance liability if the
    // gen_checkcast folds up completely.
    return;
  }

  // Extract the array klass type
  int klass_offset = oopDesc::klass_offset_in_bytes();
  Node* p = basic_plus_adr( ary, ary, klass_offset );
  // p's type is array-of-OOPS plus klass_offset
  Node* array_klass = _gvn.transform(LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS));
  // Get the array klass
  const TypeKlassPtr *tak = _gvn.type(array_klass)->is_klassptr();

  // The type of array_klass is usually INexact array-of-oop.  Heroically
  // cast array_klass to EXACT array and uncommon-trap if the cast fails.
  // Make constant out of the inexact array klass, but use it only if the cast
  // succeeds.
  bool always_see_exact_class = false;
  if (MonomorphicArrayCheck
      && !too_many_traps(Deoptimization::Reason_array_check)
      && !tak->klass_is_exact()
      && tak != TypeKlassPtr::OBJECT) {
      // Regarding the fourth condition in the if-statement from above:
      //
      // If the compiler has determined that the type of array 'ary' (represented
      // by 'array_klass') is java/lang/Object, the compiler must not assume that
      // the array 'ary' is monomorphic.
      //
      // If 'ary' were of type java/lang/Object, this arraystore would have to fail,
      // because it is not possible to perform a arraystore into an object that is not
      // a "proper" array.
      //
      // Therefore, let's obtain at runtime the type of 'ary' and check if we can still
      // successfully perform the store.
      //
      // The implementation reasons for the condition are the following:
      //
      // java/lang/Object is the superclass of all arrays, but it is represented by the VM
      // as an InstanceKlass. The checks generated by gen_checkcast() (see below) expect
      // 'array_klass' to be ObjArrayKlass, which can result in invalid memory accesses.
      //
      // See issue JDK-8057622 for details.

    always_see_exact_class = true;
    // (If no MDO at all, hope for the best, until a trap actually occurs.)

    // Make a constant out of the inexact array klass
    const TypeKlassPtr *extak = tak->cast_to_exactness(true)->is_klassptr();
    Node* con = makecon(extak);
    Node* cmp = _gvn.transform(new CmpPNode( array_klass, con ));
    Node* bol = _gvn.transform(new BoolNode( cmp, BoolTest::eq ));
    Node* ctrl= control();
    { BuildCutout unless(this, bol, PROB_MAX);
      uncommon_trap(Deoptimization::Reason_array_check,
                    Deoptimization::Action_maybe_recompile,
                    tak->klass());
    }
    if (stopped()) {          // MUST uncommon-trap?
      set_control(ctrl);      // Then Don't Do It, just fall into the normal checking
    } else {                  // Cast array klass to exactness:
      // Use the exact constant value we know it is.
      replace_in_map(array_klass,con);
      CompileLog* log = C->log();
      if (log != NULL) {
        log->elem("cast_up reason='monomorphic_array' from='%d' to='(exact)'",
                  log->identify(tak->klass()));
      }
      array_klass = con;      // Use cast value moving forward
    }
  }

  // Come here for polymorphic array klasses

  // Extract the array element class
  int element_klass_offset = in_bytes(ObjArrayKlass::element_klass_offset());
  Node *p2 = basic_plus_adr(array_klass, array_klass, element_klass_offset);
  // We are allowed to use the constant type only if cast succeeded. If always_see_exact_class is true,
  // we must set a control edge from the IfTrue node created by the uncommon_trap above to the
  // LoadKlassNode.
  Node* a_e_klass = _gvn.transform(LoadKlassNode::make(_gvn, always_see_exact_class ? control() : NULL,
                                                       immutable_memory(), p2, tak));

  // Check (the hard way) and throw if not a subklass.
  // Result is ignored, we just need the CFG effects.
  gen_checkcast(obj, a_e_klass);
}
Пример #12
0
//----------------------------catch_inline_exceptions--------------------------
// Handle all exceptions thrown by an inlined method or individual bytecode.
// Common case 1: we have no handler, so all exceptions merge right into
// the rethrow case.
// Case 2: we have some handlers, with loaded exception klasses that have
// no subklasses.  We do a Deutsch-Shiffman style type-check on the incoming
// exception oop and branch to the handler directly.
// Case 3: We have some handlers with subklasses or are not loaded at
// compile-time.  We have to call the runtime to resolve the exception.
// So we insert a RethrowCall and all the logic that goes with it.
void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
  // Caller is responsible for saving away the map for normal control flow!
  assert(stopped(), "call set_map(NULL) first");
  assert(method()->has_exception_handlers(), "don't come here w/o work to do");

  Node* ex_node = saved_ex_oop(ex_map);
  if (ex_node == top()) {
    // No action needed.
    return;
  }
const TypeInstPtr*ex_type=_gvn.type(ex_node)->is_instptr();

  // determine potential exception handlers
  ciExceptionHandlerStream handlers(method(), bci(),
                                    ex_type->klass()->as_instance_klass(),
                                    ex_type->klass_is_exact());

  // Start executing from the given throw state.  (Keep its stack, for now.)
  // Get the exception oop as known at compile time.
  ex_node = use_exception_state(ex_map);

  // Get the exception oop klass from its header
  const TypeOopPtr *toop = ex_node->bottom_type()->is_oopptr();
  const TypeKlassPtr *tkid = TypeKlassPtr::make_kid(toop->klass(),toop->klass_is_exact());
Node*ex_kid_node=_gvn.transform(new(C,2)GetKIDNode(control(),ex_node,tkid));
  // Have handlers and the exception klass is not exact?  It might be the
  // merging of many exact exception klasses (happens alot with nested inlined
  // throw/catch blocks).  
  if (has_ex_handler() && !ex_type->klass_is_exact()) {
    // Compute the exception klass a little more cleverly.
    // Obvious solution is to simple do a GetKlass from the 'ex_node'.
    // However, if the ex_node is a PhiNode, I'm going to do a GetKlass for
    // each arm of the Phi.  If I know something clever about the exceptions
    // I'm loading the class from, I can replace the GetKlass with the
    // klass constant for the exception oop.
    if( ex_node->is_Phi() ) {
ex_kid_node=new(C,ex_node->req())PhiNode(ex_node->in(0),TypeKlassPtr::KID);
      for( uint i = 1; i < ex_node->req(); i++ ) {
        const TypeOopPtr *toopi = ex_node->in(i)->bottom_type()->is_oopptr();
        const TypeKlassPtr *tkidi = TypeKlassPtr::make_kid(toop->klass(),toop->klass_is_exact());
        Node *kid = _gvn.transform(new (C, 2) GetKIDNode(ex_node->in(0)->in(i), ex_node->in(i),tkidi));
ex_kid_node->init_req(i,kid);
      }
_gvn.set_type(ex_kid_node,TypeKlassPtr::KID);
      
    }
  }

  // Scan the exception table for applicable handlers.
  // If none, we can call rethrow() and be done!
  // If precise (loaded with no subklasses), insert a D.S. style
  // pointer compare to the correct handler and loop back.
  // If imprecise, switch to the Rethrow VM-call style handling.

  int remaining = handlers.count_remaining();

  // iterate through all entries sequentially
ciInstanceKlass*handler_catch_klass=NULL;
  for (;!handlers.is_done(); handlers.next()) {
    // Do nothing if turned off
    if( !DeutschShiffmanExceptions ) break;
    ciExceptionHandler* handler = handlers.handler();

    if (handler->is_rethrow()) {
      // If we fell off the end of the table without finding an imprecise
      // exception klass (and without finding a generic handler) then we
      // know this exception is not handled in this method.  We just rethrow
      // the exception into the caller.
      throw_to_exit(make_exception_state(ex_node));
      return;
    }

    // exception handler bci range covers throw_bci => investigate further
    int handler_bci = handler->handler_bci();

    if (remaining == 1) {
      push_ex_oop(ex_node);        // Push exception oop for handler
      merge_exception(handler_bci); // jump to handler
      return;                   // No more handling to be done here!
    }

handler_catch_klass=handler->catch_klass();
if(!handler_catch_klass->is_loaded())//klass is not loaded?
      break;                    // Must call Rethrow!
    // Sharpen handler klass.  Some klasses cannot have any oops
    // (e.g. interface with no implementations).
    const TypePtr* tpx = TypeOopPtr::make_from_klass_unique(handler_catch_klass);
    const TypeOopPtr *tp = tpx->isa_oopptr(); // Oop of this klass is possible?
    Node *handler_klass = tp ? _gvn.makecon( TypeKlassPtr::make_kid(tp->klass(),true) ) : NULL;

    Node *failure = gen_subtype_check( ex_kid_node, handler_klass, _gvn.type(ex_node) );
    { PreserveJVMState pjvms(this);
Node*ex_oop=_gvn.transform(new(C,2)CheckCastPPNode(control(),ex_node,tpx));
      push_ex_oop(ex_oop);      // Push exception oop for handler
      merge_exception(handler_bci);
    }

    // Come here if exception does not match handler.
    // Carry on with more handler checks.
set_control(failure);
    --remaining;
  }

  assert(!stopped(), "you should return if you finish the chain");

  if (remaining == 1) {
    // Further checks do not matter.
  }

  if (can_rerun_bytecode()) {
    // Do not push_ex_oop here!
    // Re-executing the bytecode will reproduce the throwing condition.
    bool must_throw = true;
    uncommon_trap(Deoptimization::Reason_unloaded,handler_catch_klass,"matching handler klass not loaded",
                  must_throw);
    return;
  }

  // Oops, need to call into the VM to resolve the klasses at runtime.
  // Note:  This call must not deoptimize, since it is not a real at this bci!
  kill_dead_locals();

  make_runtime_call(RC_NO_LEAF | RC_MUST_THROW,
                    false /* !must_callruntimenode */,
OptoRuntime::forward_exception2_Type(),
StubRoutines::forward_exception_entry2(),
"forward_exception2",
                    TypeRawPtr::BOTTOM, // sets the exception oop back into thr->_pending_ex
                    ex_node);

  // Rethrow is a pure call, no side effects, only a result.
  // The result cannot be allocated, so we use I_O

  // Catch exceptions from the rethrow
  catch_call_exceptions(handlers);
}
Пример #13
0
//---------------------------catch_call_exceptions-----------------------------
// Put a Catch and CatchProj nodes behind a just-created call.
// Send their caught exceptions to the proper handler.
// This may be used after a call to the rethrow VM stub,
// when it is needed to process unloaded exception classes.
void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
  // Exceptions are delivered through this channel:
  Node* i_o = this->i_o();

  // Add a CatchNode.
  GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
  GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
  GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);

  for (; !handlers.is_done(); handlers.next()) {
    ciExceptionHandler* h        = handlers.handler();
    int                 h_bci    = h->handler_bci();
    ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
    const TypePtr* h_extype = TypeOopPtr::make_from_klass_unique(h_klass)->cast_away_null();
    // Ignore exceptions with no implementors.  These cannot be thrown
    // (without class loading anyways, which will deopt this code).
    if( h_extype->empty() ) continue;

    // Do not introduce unloaded exception types into the graph:
    if (!h_klass->is_loaded()) {
      if (saw_unloaded->contains(h_bci)) {
        /* We've already seen an unloaded exception with h_bci, 
           so don't duplicate. Duplication will cause the CatchNode to be
           unnecessarily large. See 4713716. */
        continue;
      } else {
        saw_unloaded->append(h_bci);
      }
    }
    // Note:  It's OK if the BCIs repeat themselves.
    bcis->append(h_bci);
    extypes->append(h_extype);
  }

  int len = bcis->length();
  CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
  Node *catch_ = _gvn.transform(cn);

  // now branch with the exception state to each of the (potential)
  // handlers
  for(int i=0; i < len; i++) {
    // Setup JVM state to enter the handler.
    PreserveJVMState pjvms(this);
    // Locals are just copied from before the call.
    // Get control from the CatchNode.
    int handler_bci = bcis->at(i);
    Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
    // This handler cannot happen?
    if (ctrl == top())  continue;
    set_control(ctrl);

    // Create exception oop
    const TypeInstPtr* extype = extypes->at(i)->is_instptr();
    
    Node *thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
Node*ex_adr=basic_plus_adr(top(),thread,in_bytes(JavaThread::pending_exception_offset()));
    int pending_ex_alias_idx = C->get_alias_index(ex_adr->bottom_type()->is_ptr());
    Node *ex_oop = make_load( NULL, ex_adr, extype, T_OBJECT, pending_ex_alias_idx );
    Node *ex_st  = store_to_memory( ctrl, ex_adr, null(), T_OBJECT, pending_ex_alias_idx );
record_for_igvn(ex_st);

    // Handle unloaded exception classes.
    if (saw_unloaded->contains(handler_bci)) {
      // An unloaded exception type is coming here.  Do an uncommon trap.
      // We do not expect the same handler bci to take both cold unloaded
      // and hot loaded exceptions.  But, watch for it.
if(PrintOpto&&extype->is_loaded()){
C2OUT->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ",handler_bci);
method()->print_name(C2OUT);C2OUT->cr();
      }
      // Emit an uncommon trap instead of processing the block.
      set_bci(handler_bci);
      push_ex_oop(ex_oop);
uncommon_trap(Deoptimization::Reason_unloaded,extype->klass(),"not loaded exception",false);
      set_bci(iter().cur_bci()); // put it back
      continue;
    }

    // go to the exception handler
    if (handler_bci < 0) {     // merge with corresponding rethrow node
      throw_to_exit(make_exception_state(ex_oop));
    } else {                      // Else jump to corresponding handle
      push_ex_oop(ex_oop);        // Clear stack and push just the oop.
      merge_exception(handler_bci);
    }
  }

  // The first CatchProj is for the normal return.
  // (Note:  If this is a call to rethrow_Java, this node goes dead.)
  set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
}
Пример #14
0
//------------------------------do_call----------------------------------------
// Handle your basic call.  Inline if we can & want to, else just setup call.
void Parse::do_call() {
  // It's likely we are going to add debug info soon.
  // Also, if we inline a guy who eventually needs debug info for this JVMS,
  // our contribution to it is cleaned up right here.
  kill_dead_locals();

  // Set frequently used booleans
  bool is_virtual = bc() == Bytecodes::_invokevirtual;
  bool is_virtual_or_interface = is_virtual || bc() == Bytecodes::_invokeinterface;
  bool has_receiver = is_virtual_or_interface || bc() == Bytecodes::_invokespecial;

  // Find target being called
  bool             will_link;
  ciMethod*        dest_method   = iter().get_method(will_link);
  ciInstanceKlass* holder_klass  = dest_method->holder();
  ciKlass* holder = iter().get_declared_method_holder();
  ciInstanceKlass* klass = ciEnv::get_instance_klass_for_declared_method_holder(holder);

  int   nargs    = dest_method->arg_size();
  // See if the receiver (if any) is NULL, hence we always throw BEFORE
  // attempting to resolve the call or initialize the holder class.  Doing so
  // out of order opens a window where we can endlessly deopt because the call
  // holder is not initialized, but the call never actually happens (forcing
  // class initialization) because we only see NULL receivers.
  CPData_Invoke *caller_cpdi = cpdata()->as_Invoke(bc());
  debug_only( assert(caller_cpdi->is_Invoke(), "Not invoke!") );
  if( is_virtual_or_interface &&
      _gvn.type(stack(sp() - nargs))->higher_equal(TypePtr::NULL_PTR) ) {
    builtin_throw( Deoptimization::Reason_null_check, "null receiver", caller_cpdi, caller_cpdi->saw_null(), /*must_throw=*/true );
    return;
  }

  // uncommon-trap when callee is unloaded, uninitialized or will not link
  // bailout when too many arguments for register representation
  if (!will_link || can_not_compile_call_site(dest_method, klass)) {
    return;
  }
assert(FAM||holder_klass->is_loaded(),"");
  assert(dest_method->is_static() == !has_receiver, "must match bc");
  // Note: this takes into account invokeinterface of methods declared in java/lang/Object,
  // which should be invokevirtuals but according to the VM spec may be invokeinterfaces
  assert(holder_klass->is_interface() || holder_klass->super() == NULL || (bc() != Bytecodes::_invokeinterface), "must match bc");
  // Note:  In the absence of miranda methods, an abstract class K can perform
  // an invokevirtual directly on an interface method I.m if K implements I.

  // ---------------------
  // Does Class Hierarchy Analysis reveal only a single target of a v-call?
  // Then we may inline or make a static call, but become dependent on there being only 1 target.
  // Does the call-site type profile reveal only one receiver?
  // Then we may introduce a run-time check and inline on the path where it succeeds.
  // The other path may uncommon_trap, check for another receiver, or do a v-call.

  // Choose call strategy.
  bool call_is_virtual = is_virtual_or_interface;
  int vtable_index = methodOopDesc::invalid_vtable_index;
  ciMethod* call_method = dest_method;

  // Try to get the most accurate receiver type
  if (is_virtual_or_interface) {
    Node*             receiver_node = stack(sp() - nargs);
const TypeInstPtr*inst_type=_gvn.type(receiver_node)->isa_instptr();
    if( inst_type ) {
ciInstanceKlass*ikl=inst_type->klass()->as_instance_klass();
      // If the receiver is not yet linked then: (1) we never can make this
      // call because no objects can be created until linkage, and (2) CHA
      // reports incorrect answers... so do not bother with making the call
      // until after the klass gets linked.
      ciInstanceKlass *ikl2 = ikl->is_subtype_of(klass) ? ikl : klass;
if(!ikl->is_linked()){
        uncommon_trap(Deoptimization::Reason_uninitialized,klass,"call site where receiver is not linked",false);
        return;
      }
    }
    const TypeOopPtr* receiver_type = _gvn.type(receiver_node)->isa_oopptr();
    ciMethod* optimized_virtual_method = optimize_inlining(method(), bci(), klass, dest_method, receiver_type);

    // Have the call been sufficiently improved such that it is no longer a virtual?
    if (optimized_virtual_method != NULL) {
      call_method     = optimized_virtual_method;
      call_is_virtual = false;
    } else if (false) {
      // We can make a vtable call at this site
      vtable_index = call_method->resolve_vtable_index(method()->holder(), klass);
    }
  }

  // Note:  It's OK to try to inline a virtual call.
  // The call generator will not attempt to inline a polymorphic call
  // unless it knows how to optimize the receiver dispatch.
bool try_inline=(C->do_inlining()||InlineAccessors)&&
                    (!C->method()->should_disable_inlining()) &&
                    (call_method->number_of_breakpoints() == 0);

  // Get profile data for the *callee*.  First see if we have precise
  // CodeProfile for this exact inline because C1 inlined it already.
  CodeProfile *callee_cp;
  int callee_cp_inloff;

  if( caller_cpdi->inlined_method_oid() == call_method->objectId() ) {
    callee_cp = c1_cp();        // Use same CodeProfile as current
    callee_cp_inloff = caller_cpdi->cpd_offset(); // But use inlined portion
  } else {
    // If callee has a cp, clone it and use
    callee_cp = call_method->codeprofile(true);
    callee_cp_inloff = 0;

    if (callee_cp || FAM) {
      // The cloned cp needs to be freed later
      Compile* C = Compile::current();
      C->record_cloned_cp(callee_cp);
    } else { // Had profile info at top level, but not for this call site?
      // callee_cp will hold the just created cp, or whatever cp allocated by
      // other thread which wins the race in set_codeprofile
      callee_cp = call_method->set_codeprofile(CodeProfile::make(call_method));
    }
  }

  CPData_Invoke *c2_caller_cpdi = UseC1 ? c2cpdata()->as_Invoke(bc()) : NULL;

  // ---------------------
  inc_sp(- nargs);              // Temporarily pop args for JVM state of call
  JVMState* jvms = sync_jvms();

  // ---------------------
  // Decide call tactic.
  // This call checks with CHA, the interpreter profile, intrinsics table, etc.
  // It decides whether inlining is desirable or not.
CallGenerator*cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),callee_cp,callee_cp_inloff,c2_caller_cpdi,caller_cpdi);

  // ---------------------
  // Round double arguments before call
  round_double_arguments(dest_method);

#ifndef PRODUCT
  // Record first part of parsing work for this call
  parse_histogram()->record_change();
#endif // not PRODUCT

  assert(jvms == this->jvms(), "still operating on the right JVMS");
  assert(jvms_in_sync(),       "jvms must carry full info into CG");

  // save across call, for a subsequent cast_not_null.
  Node* receiver = has_receiver ? argument(0) : NULL;

  JVMState* new_jvms = cg->generate(jvms, caller_cpdi, is_private_copy());
  if( new_jvms == NULL ) {      // Did it work?
    // When inlining attempt fails (e.g., too many arguments),
    // it may contaminate the current compile state, making it
    // impossible to pull back and try again.  Once we call
    // cg->generate(), we are committed.  If it fails, the whole
    // compilation task is compromised.
    if (failing())  return;
    if (PrintOpto || PrintInlining || PrintC2Inlining) {
      // Only one fall-back, so if an intrinsic fails, ignore any bytecodes.
      if (cg->is_intrinsic() && call_method->code_size() > 0) {
C2OUT->print("Bailed out of intrinsic, will not inline: ");
        call_method->print_name(C2OUT); C2OUT->cr();
      }
    }
    // This can happen if a library intrinsic is available, but refuses
    // the call site, perhaps because it did not match a pattern the
    // intrinsic was expecting to optimize.  The fallback position is
    // to call out-of-line.
    try_inline = false;  // Inline tactic bailed out.
cg=C->call_generator(call_method,vtable_index,call_is_virtual,jvms,try_inline,prof_factor(),c1_cp(),c1_cp_inloff(),c2_caller_cpdi,caller_cpdi);
new_jvms=cg->generate(jvms,caller_cpdi,is_private_copy());
assert(new_jvms!=NULL,"call failed to generate:  calls should work");
    if (c2_caller_cpdi) c2_caller_cpdi->_inlining_failure_id = IF_GENERALFAILURE;
  }

  if (cg->is_inline()) {
    C->env()->notice_inlined_method(call_method);
  }

  // Reset parser state from [new_]jvms, which now carries results of the call.
  // Return value (if any) is already pushed on the stack by the cg.
  add_exception_states_from(new_jvms);
  if (new_jvms->map()->control() == top()) {
    stop_and_kill_map();
  } else {
    assert(new_jvms->same_calls_as(jvms), "method/bci left unchanged");
    set_jvms(new_jvms);
  }

  if (!stopped()) {
    // This was some sort of virtual call, which did a null check for us.
    // Now we can assert receiver-not-null, on the normal return path.
    if (receiver != NULL && cg->is_virtual()) {
Node*cast=cast_not_null(receiver,true);
      // %%% assert(receiver == cast, "should already have cast the receiver");
    }

    // Round double result after a call from strict to non-strict code
    round_double_result(dest_method);

    // If the return type of the method is not loaded, assert that the
    // value we got is a null.  Otherwise, we need to recompile.
    if (!dest_method->return_type()->is_loaded()) {
      // If there is going to be a trap, put it at the next bytecode:
      set_bci(iter().next_bci());
      do_null_assert(peek(), T_OBJECT);
      set_bci(iter().cur_bci()); // put it back
    } else {
      assert0( call_method->return_type()->is_loaded() );
      BasicType result_type = dest_method->return_type()->basic_type();
if(result_type==T_OBJECT||result_type==T_ARRAY){
        const Type *t = peek()->bottom_type();
        assert0( t == TypePtr::NULL_PTR || t->is_oopptr()->klass()->is_loaded() );
      }
    }
  }

  // Restart record of parsing work after possible inlining of call
#ifndef PRODUCT
  parse_histogram()->set_initial_state(bc());
#endif
}