Example #1
0
void Parse::emit_guard_for_new(ciInstanceKlass* klass) {
  // Emit guarded new
  //   if (klass->_init_thread != current_thread ||
  //       klass->_init_state != being_initialized)
  //      uncommon_trap
  Node* cur_thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
  Node* merge = new (C, 3) RegionNode(3);
  _gvn.set_type(merge, Type::CONTROL);
  Node* kls = makecon(TypeKlassPtr::make(klass));

  Node* init_thread_offset = _gvn.MakeConX(instanceKlass::init_thread_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
  Node* adr_node = basic_plus_adr(kls, kls, init_thread_offset);
  Node* init_thread = make_load(NULL, adr_node, TypeRawPtr::BOTTOM, T_ADDRESS);
  Node *tst   = Bool( CmpP( init_thread, cur_thread), BoolTest::eq);
  IfNode* iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(1, IfFalse(iff));

  Node* init_state_offset = _gvn.MakeConX(instanceKlass::init_state_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes());
  adr_node = basic_plus_adr(kls, kls, init_state_offset);
  Node* init_state = make_load(NULL, adr_node, TypeInt::INT, T_INT);
  Node* being_init = _gvn.intcon(instanceKlass::being_initialized);
  tst   = Bool( CmpI( init_state, being_init), BoolTest::eq);
  iff = create_and_map_if(control(), tst, PROB_ALWAYS, COUNT_UNKNOWN);
  set_control(IfTrue(iff));
  merge->set_req(2, IfFalse(iff));

  PreserveJVMState pjvms(this);
  record_for_igvn(merge);
  set_control(merge);

  uncommon_trap(Deoptimization::Reason_uninitialized,
                Deoptimization::Action_reinterpret,
                klass);
}
Example #2
0
//-------------------------------set_md_flag_at--------------------------------
void Parse::set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant) {
  Node* adr_node = method_data_addressing(md, data, DataLayout::flags_offset());

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* flags = make_load(NULL, adr_node, TypeInt::BYTE, T_BYTE, adr_type);
  Node* incr = _gvn.transform(new (C) OrINode(flags, _gvn.intcon(flag_constant)));
  store_to_memory(NULL, adr_node, incr, T_BYTE, adr_type);
}
Example #3
0
//--------------------------test_for_osr_md_counter_at-------------------------
void Parse::test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, int limit) {
  Node* adr_node = method_data_addressing(md, data, counter_offset);

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);

  test_counter_against_threshold(cnt, limit);
}
Example #4
0
//--------------------------increment_md_counter_at----------------------------
void Parse::increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize counter_offset, Node* idx, uint stride) {
  Node* adr_node = method_data_addressing(md, data, counter_offset, idx, stride);

  const TypePtr* adr_type = _gvn.type(adr_node)->is_ptr();
  Node* cnt  = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);
  Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(DataLayout::counter_increment)));
  store_to_memory(NULL, adr_node, incr, T_INT, adr_type );
}
Example #5
0
 // Create a LoadNode, reading from the parser's memory state.
 // (Note:  require_atomic_access is useful only with T_LONG.)
 //
 // We choose the unordered semantics by default because we have
 // adapted the `do_put_xxx' and `do_get_xxx' procedures for the case
 // of volatile fields.
 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                 bool require_atomic_access = false, bool unaligned = false,
                 bool mismatched = false) {
     // This version computes alias_index from bottom_type
     return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
                      mo, control_dependency, require_atomic_access,
                      unaligned, mismatched);
 }
Example #6
0
 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
                 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
                 bool require_atomic_access = false, bool unaligned = false,
                 bool mismatched = false) {
     // This version computes alias_index from an address type
     assert(adr_type != NULL, "use other make_load factory");
     return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
                      mo, control_dependency, require_atomic_access,
                      unaligned, mismatched);
 }
Example #7
0
//----------------------increment_and_test_invocation_counter-------------------
void Parse::increment_and_test_invocation_counter(int limit) {
  if (!count_invocations()) return;

  // Get the Method* node.
  const TypePtr* adr_type = TypeMetadataPtr::make(method());
  Node *method_node = makecon(adr_type);

  // Load the interpreter_invocation_counter from the Method*.
  int offset = Method::interpreter_invocation_counter_offset_in_bytes();
  Node* adr_node = basic_plus_adr(method_node, method_node, offset);
  Node* cnt = make_load(NULL, adr_node, TypeInt::INT, T_INT, adr_type);

  test_counter_against_threshold(cnt, limit);

  // Add one to the counter and store
  Node* incr = _gvn.transform(new (C) AddINode(cnt, _gvn.intcon(1)));
  store_to_memory( NULL, adr_node, incr, T_INT, adr_type );
}
Example #8
0
//----------------------increment_and_test_invocation_counter-------------------
void Parse::increment_and_test_invocation_counter(int limit) {
  if (!count_invocations()) return;

  // Get the Method* node.
  ciMethod* m = method();
  MethodCounters* counters_adr = m->ensure_method_counters();
  if (counters_adr == NULL) {
    C->record_failure("method counters allocation failed");
    return;
  }

  Node* ctrl = control();
  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
  Node *counters_node = makecon(adr_type);
  Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
    MethodCounters::interpreter_invocation_counter_offset_in_bytes());
  Node* cnt = make_load(ctrl, adr_iic_node, TypeInt::INT, T_INT, adr_type, MemNode::unordered);

  test_counter_against_threshold(cnt, limit);

  // Add one to the counter and store
  Node* incr = _gvn.transform(new AddINode(cnt, _gvn.intcon(1)));
  store_to_memory(ctrl, adr_iic_node, incr, T_INT, adr_type, MemNode::unordered);
}
void Parse::do_get_xxx(Node* obj, ciField* field, bool is_field) {
  // Does this field have a constant value?  If so, just push the value.
  if (field->is_constant()) {
    if (field->is_static()) {
      // final static field
      if (push_constant(field->constant_value()))
        return;
    }
    else {
      // final non-static field of a trusted class (classes in
      // java.lang.invoke and sun.invoke packages and subpackages).
      if (obj->is_Con()) {
        const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr();
        ciObject* constant_oop = oop_ptr->const_oop();
        ciConstant constant = field->constant_value_of(constant_oop);

        if (push_constant(constant, true))
          return;
      }
    }
  }

  ciType* field_klass = field->type();
  bool is_vol = field->is_volatile();

  // Compute address and memory type.
  int offset = field->offset_in_bytes();
  const TypePtr* adr_type = C->alias_type(field)->adr_type();
  Node *adr = basic_plus_adr(obj, obj, offset);
  BasicType bt = field->layout_type();

  // Build the resultant type of the load
  const Type *type;

  bool must_assert_null = false;

  if( bt == T_OBJECT ) {
    if (!field->type()->is_loaded()) {
      type = TypeInstPtr::BOTTOM;
      must_assert_null = true;
    } else if (field->is_constant() && field->is_static()) {
      // This can happen if the constant oop is non-perm.
      ciObject* con = field->constant_value().as_object();
      // Do not "join" in the previous type; it doesn't add value,
      // and may yield a vacuous result if the field is of interface type.
      type = TypeOopPtr::make_from_constant(con)->isa_oopptr();
      assert(type != NULL, "field singleton type must be consistent");
    } else {
      type = TypeOopPtr::make_from_klass(field_klass->as_klass());
    }
  } else {
    type = Type::get_const_basic_type(bt);
  }
  // Build the load.
  Node* ld = make_load(NULL, adr, type, bt, adr_type, is_vol);

  // Adjust Java stack
  if (type2size[bt] == 1)
    push(ld);
  else
    push_pair(ld);

  if (must_assert_null) {
    // Do not take a trap here.  It's possible that the program
    // will never load the field's class, and will happily see
    // null values in this field forever.  Don't stumble into a
    // trap for such a program, or we might get a long series
    // of useless recompilations.  (Or, we might load a class
    // which should not be loaded.)  If we ever see a non-null
    // value, we will then trap and recompile.  (The trap will
    // not need to mention the class index, since the class will
    // already have been loaded if we ever see a non-null value.)
    // uncommon_trap(iter().get_field_signature_index());
#ifndef PRODUCT
    if (PrintOpto && (Verbose || WizardMode)) {
      method()->print_name(); tty->print_cr(" asserting nullness of field at bci: %d", bci());
    }
#endif
    if (C->log() != NULL) {
      C->log()->elem("assert_null reason='field' klass='%d'",
                     C->log()->identify(field->type()));
    }
    // If there is going to be a trap, put it at the next bytecode:
    set_bci(iter().next_bci());
    do_null_assert(peek(), T_OBJECT);
    set_bci(iter().cur_bci()); // put it back
  }

  // If reference is volatile, prevent following memory ops from
  // floating up past the volatile read.  Also prevents commoning
  // another volatile read.
  if (field->is_volatile()) {
    // Memory barrier includes bogus read of value to force load BEFORE membar
    insert_mem_bar(Op_MemBarAcquire, ld);
  }
}
//--------------------gen_stub-------------------------------
void GraphKit::gen_stub(address C_function,
                        const char *name,
                        int is_fancy_jump,
                        bool pass_tls,
                        bool return_pc) {
  ResourceMark rm;

  const TypeTuple *jdomain = C->tf()->domain();
  const TypeTuple *jrange  = C->tf()->range();

  // The procedure start
  StartNode* start = new (C) StartNode(root(), jdomain);
  _gvn.set_type_bottom(start);

  // Make a map, with JVM state
  uint parm_cnt = jdomain->cnt();
  uint max_map = MAX2(2*parm_cnt+1, jrange->cnt());
  // %%% SynchronizationEntryBCI is redundant; use InvocationEntryBci in interfaces
  assert(SynchronizationEntryBCI == InvocationEntryBci, "");
  JVMState* jvms = new (C) JVMState(0);
  jvms->set_bci(InvocationEntryBci);
  jvms->set_monoff(max_map);
  jvms->set_scloff(max_map);
  jvms->set_endoff(max_map);
  {
    SafePointNode *map = new (C) SafePointNode( max_map, jvms );
    jvms->set_map(map);
    set_jvms(jvms);
    assert(map == this->map(), "kit.map is set");
  }

  // Make up the parameters
  uint i;
  for( i = 0; i < parm_cnt; i++ )
    map()->init_req(i, _gvn.transform(new (C) ParmNode(start, i)));
  for( ; i<map()->req(); i++ )
    map()->init_req(i, top());      // For nicer debugging

  // GraphKit requires memory to be a MergeMemNode:
  set_all_memory(map()->memory());

  // Get base of thread-local storage area
  Node* thread = _gvn.transform( new (C) ThreadLocalNode() );

  const int NoAlias = Compile::AliasIdxBot;

  Node* adr_last_Java_pc = basic_plus_adr(top(),
                                            thread,
                                            in_bytes(JavaThread::frame_anchor_offset()) +
                                            in_bytes(JavaFrameAnchor::last_Java_pc_offset()));
#if defined(SPARC)
  Node* adr_flags = basic_plus_adr(top(),
                                   thread,
                                   in_bytes(JavaThread::frame_anchor_offset()) +
                                   in_bytes(JavaFrameAnchor::flags_offset()));
#endif /* defined(SPARC) */


  // Drop in the last_Java_sp.  last_Java_fp is not touched.
  // Always do this after the other "last_Java_frame" fields are set since
  // as soon as last_Java_sp != NULL the has_last_Java_frame is true and
  // users will look at the other fields.
  //
  Node *adr_sp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_sp_offset()));
  Node *last_sp = basic_plus_adr(top(), frameptr(), (intptr_t) STACK_BIAS);
  store_to_memory(NULL, adr_sp, last_sp, T_ADDRESS, NoAlias);

  // Set _thread_in_native
  // The order of stores into TLS is critical!  Setting _thread_in_native MUST
  // be last, because a GC is allowed at any time after setting it and the GC
  // will require last_Java_pc and last_Java_sp.
  Node* adr_state = basic_plus_adr(top(), thread, in_bytes(JavaThread::thread_state_offset()));

  //-----------------------------
  // Compute signature for C call.  Varies from the Java signature!
  const Type **fields = TypeTuple::fields(2*parm_cnt+2);
  uint cnt = TypeFunc::Parms;
  // The C routines gets the base of thread-local storage passed in as an
  // extra argument.  Not all calls need it, but its cheap to add here.
  for( ; cnt<parm_cnt; cnt++ )
    fields[cnt] = jdomain->field_at(cnt);
  fields[cnt++] = TypeRawPtr::BOTTOM; // Thread-local storage
  // Also pass in the caller's PC, if asked for.
  if( return_pc )
    fields[cnt++] = TypeRawPtr::BOTTOM; // Return PC

  const TypeTuple* domain = TypeTuple::make(cnt,fields);
  // The C routine we are about to call cannot return an oop; it can block on
  // exit and a GC will trash the oop while it sits in C-land.  Instead, we
  // return the oop through TLS for runtime calls.
  // Also, C routines returning integer subword values leave the high
  // order bits dirty; these must be cleaned up by explicit sign extension.
  const Type* retval = (jrange->cnt() == TypeFunc::Parms) ? Type::TOP : jrange->field_at(TypeFunc::Parms);
  // Make a private copy of jrange->fields();
  const Type **rfields = TypeTuple::fields(jrange->cnt() - TypeFunc::Parms);
  // Fixup oop returns
  int retval_ptr = retval->isa_oop_ptr();
  if( retval_ptr ) {
    assert( pass_tls, "Oop must be returned thru TLS" );
    // Fancy-jumps return address; others return void
    rfields[TypeFunc::Parms] = is_fancy_jump ? TypeRawPtr::BOTTOM : Type::TOP;

  } else if( retval->isa_int() ) { // Returning any integer subtype?
    // "Fatten" byte, char & short return types to 'int' to show that
    // the native C code can return values with junk high order bits.
    // We'll sign-extend it below later.
    rfields[TypeFunc::Parms] = TypeInt::INT; // It's "dirty" and needs sign-ext

  } else if( jrange->cnt() >= TypeFunc::Parms+1 ) { // Else copy other types
    rfields[TypeFunc::Parms] = jrange->field_at(TypeFunc::Parms);
    if( jrange->cnt() == TypeFunc::Parms+2 )
      rfields[TypeFunc::Parms+1] = jrange->field_at(TypeFunc::Parms+1);
  }
  const TypeTuple* range = TypeTuple::make(jrange->cnt(),rfields);

  // Final C signature
  const TypeFunc *c_sig = TypeFunc::make(domain,range);

  //-----------------------------
  // Make the call node
  CallRuntimeNode *call = new (C)
    CallRuntimeNode(c_sig, C_function, name, TypePtr::BOTTOM);
  //-----------------------------

  // Fix-up the debug info for the call
  call->set_jvms( new (C) JVMState(0) );
  call->jvms()->set_bci(0);
  call->jvms()->set_offsets(cnt);

  // Set fixed predefined input arguments
  cnt = 0;
  for( i=0; i<TypeFunc::Parms; i++ )
    call->init_req( cnt++, map()->in(i) );
  // A little too aggressive on the parm copy; return address is not an input
  call->set_req(TypeFunc::ReturnAdr, top());
  for( ; i<parm_cnt; i++ )    // Regular input arguments
    call->init_req( cnt++, map()->in(i) );

  call->init_req( cnt++, thread );
  if( return_pc )             // Return PC, if asked for
    call->init_req( cnt++, returnadr() );
  _gvn.transform_no_reclaim(call);


  //-----------------------------
  // Now set up the return results
  set_control( _gvn.transform( new (C) ProjNode(call,TypeFunc::Control)) );
  set_i_o(     _gvn.transform( new (C) ProjNode(call,TypeFunc::I_O    )) );
  set_all_memory_call(call);
  if (range->cnt() > TypeFunc::Parms) {
    Node* retnode = _gvn.transform( new (C) ProjNode(call,TypeFunc::Parms) );
    // C-land is allowed to return sub-word values.  Convert to integer type.
    assert( retval != Type::TOP, "" );
    if (retval == TypeInt::BOOL) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFF)) );
    } else if (retval == TypeInt::CHAR) {
      retnode = _gvn.transform( new (C) AndINode(retnode, intcon(0xFFFF)) );
    } else if (retval == TypeInt::BYTE) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(24)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(24)) );
    } else if (retval == TypeInt::SHORT) {
      retnode = _gvn.transform( new (C) LShiftINode(retnode, intcon(16)) );
      retnode = _gvn.transform( new (C) RShiftINode(retnode, intcon(16)) );
    }
    map()->set_req( TypeFunc::Parms, retnode );
  }

  //-----------------------------

  // Clear last_Java_sp
  store_to_memory(NULL, adr_sp, null(), T_ADDRESS, NoAlias);
  // Clear last_Java_pc and (optionally)_flags
  store_to_memory(NULL, adr_last_Java_pc, null(), T_ADDRESS, NoAlias);
#if defined(SPARC)
  store_to_memory(NULL, adr_flags, intcon(0), T_INT, NoAlias);
#endif /* defined(SPARC) */
#ifdef IA64
  Node* adr_last_Java_fp = basic_plus_adr(top(), thread, in_bytes(JavaThread::last_Java_fp_offset()));
  if( os::is_MP() ) insert_mem_bar(Op_MemBarRelease);
  store_to_memory(NULL, adr_last_Java_fp,    null(),    T_ADDRESS, NoAlias);
#endif

  // For is-fancy-jump, the C-return value is also the branch target
  Node* target = map()->in(TypeFunc::Parms);
  // Runtime call returning oop in TLS?  Fetch it out
  if( pass_tls ) {
    Node* adr = basic_plus_adr(top(), thread, in_bytes(JavaThread::vm_result_offset()));
    Node* vm_result = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);
    map()->set_req(TypeFunc::Parms, vm_result); // vm_result passed as result
    // clear thread-local-storage(tls)
    store_to_memory(NULL, adr, null(), T_ADDRESS, NoAlias);
  }

  //-----------------------------
  // check exception
  Node* adr = basic_plus_adr(top(), thread, in_bytes(Thread::pending_exception_offset()));
  Node* pending = make_load(NULL, adr, TypeOopPtr::BOTTOM, T_OBJECT, NoAlias, false);

  Node* exit_memory = reset_memory();

  Node* cmp = _gvn.transform( new (C) CmpPNode(pending, null()) );
  Node* bo  = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) );
  IfNode   *iff = create_and_map_if(control(), bo, PROB_MIN, COUNT_UNKNOWN);

  Node* if_null     = _gvn.transform( new (C) IfFalseNode(iff) );
  Node* if_not_null = _gvn.transform( new (C) IfTrueNode(iff)  );

  assert (StubRoutines::forward_exception_entry() != NULL, "must be generated before");
  Node *exc_target = makecon(TypeRawPtr::make( StubRoutines::forward_exception_entry() ));
  Node *to_exc = new (C) TailCallNode(if_not_null,
                                      i_o(),
                                      exit_memory,
                                      frameptr(),
                                      returnadr(),
                                      exc_target, null());
  root()->add_req(_gvn.transform(to_exc));  // bind to root to keep live
  C->init_start(start);

  //-----------------------------
  // If this is a normal subroutine return, issue the return and be done.
  Node *ret;
  switch( is_fancy_jump ) {
  case 0:                       // Make a return instruction
    // Return to caller, free any space for return address
    ret = new (C) ReturnNode(TypeFunc::Parms, if_null,
                             i_o(),
                             exit_memory,
                             frameptr(),
                             returnadr());
    if (C->tf()->range()->cnt() > TypeFunc::Parms)
      ret->add_req( map()->in(TypeFunc::Parms) );
    break;
  case 1:    // This is a fancy tail-call jump.  Jump to computed address.
    // Jump to new callee; leave old return address alone.
    ret = new (C) TailCallNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               returnadr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  case 2:                       // Pop return address & jump
    // Throw away old return address; jump to new computed address
    //assert(C_function == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C), "fancy_jump==2 only for rethrow");
    ret = new (C) TailJumpNode(if_null,
                               i_o(),
                               exit_memory,
                               frameptr(),
                               target, map()->in(TypeFunc::Parms));
    break;
  default:
    ShouldNotReachHere();
  }
  root()->add_req(_gvn.transform(ret));
}
Example #11
0
 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
   // This version computes alias_index from an address type
   assert(adr_type != NULL, "use other make_load factory");
   return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
                    require_atomic_access);
 }
Example #12
0
 // Create a LoadNode, reading from the parser's memory state.
 // (Note:  require_atomic_access is useful only with T_LONG.)
 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
                 bool require_atomic_access = false) {
   // This version computes alias_index from bottom_type
   return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
                    require_atomic_access);
 }
//---------------------------catch_call_exceptions-----------------------------
// Put a Catch and CatchProj nodes behind a just-created call.
// Send their caught exceptions to the proper handler.
// This may be used after a call to the rethrow VM stub,
// when it is needed to process unloaded exception classes.
void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) {
  // Exceptions are delivered through this channel:
  Node* i_o = this->i_o();

  // Add a CatchNode.
  GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1);
  GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL);
  GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0);

  for (; !handlers.is_done(); handlers.next()) {
    ciExceptionHandler* h        = handlers.handler();
    int                 h_bci    = h->handler_bci();
    ciInstanceKlass*    h_klass  = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass();
    const TypePtr* h_extype = TypeOopPtr::make_from_klass_unique(h_klass)->cast_away_null();
    // Ignore exceptions with no implementors.  These cannot be thrown
    // (without class loading anyways, which will deopt this code).
    if( h_extype->empty() ) continue;

    // Do not introduce unloaded exception types into the graph:
    if (!h_klass->is_loaded()) {
      if (saw_unloaded->contains(h_bci)) {
        /* We've already seen an unloaded exception with h_bci, 
           so don't duplicate. Duplication will cause the CatchNode to be
           unnecessarily large. See 4713716. */
        continue;
      } else {
        saw_unloaded->append(h_bci);
      }
    }
    // Note:  It's OK if the BCIs repeat themselves.
    bcis->append(h_bci);
    extypes->append(h_extype);
  }

  int len = bcis->length();
  CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1);
  Node *catch_ = _gvn.transform(cn);

  // now branch with the exception state to each of the (potential)
  // handlers
  for(int i=0; i < len; i++) {
    // Setup JVM state to enter the handler.
    PreserveJVMState pjvms(this);
    // Locals are just copied from before the call.
    // Get control from the CatchNode.
    int handler_bci = bcis->at(i);
    Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci));
    // This handler cannot happen?
    if (ctrl == top())  continue;
    set_control(ctrl);

    // Create exception oop
    const TypeInstPtr* extype = extypes->at(i)->is_instptr();
    
    Node *thread = _gvn.transform( new (C, 1) ThreadLocalNode() );
Node*ex_adr=basic_plus_adr(top(),thread,in_bytes(JavaThread::pending_exception_offset()));
    int pending_ex_alias_idx = C->get_alias_index(ex_adr->bottom_type()->is_ptr());
    Node *ex_oop = make_load( NULL, ex_adr, extype, T_OBJECT, pending_ex_alias_idx );
    Node *ex_st  = store_to_memory( ctrl, ex_adr, null(), T_OBJECT, pending_ex_alias_idx );
record_for_igvn(ex_st);

    // Handle unloaded exception classes.
    if (saw_unloaded->contains(handler_bci)) {
      // An unloaded exception type is coming here.  Do an uncommon trap.
      // We do not expect the same handler bci to take both cold unloaded
      // and hot loaded exceptions.  But, watch for it.
if(PrintOpto&&extype->is_loaded()){
C2OUT->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ",handler_bci);
method()->print_name(C2OUT);C2OUT->cr();
      }
      // Emit an uncommon trap instead of processing the block.
      set_bci(handler_bci);
      push_ex_oop(ex_oop);
uncommon_trap(Deoptimization::Reason_unloaded,extype->klass(),"not loaded exception",false);
      set_bci(iter().cur_bci()); // put it back
      continue;
    }

    // go to the exception handler
    if (handler_bci < 0) {     // merge with corresponding rethrow node
      throw_to_exit(make_exception_state(ex_oop));
    } else {                      // Else jump to corresponding handle
      push_ex_oop(ex_oop);        // Clear stack and push just the oop.
      merge_exception(handler_bci);
    }
  }

  // The first CatchProj is for the normal return.
  // (Note:  If this is a call to rethrow_Java, this node goes dead.)
  set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)));
}