Exemple #1
0
void NativeMovConstReg::set_data(int64_t src)  {
  verify();
  uint41_t new_X;
  uint41_t new_L;
  IPF_Bundle *bundle = (IPF_Bundle *)addr_at(0);
  X2::set_imm((uint64_t)src, bundle->get_slot2(), new_X, new_L);
  bundle->set_slot1( new_L );
  bundle->set_slot2( new_X );

  ICache::invalidate_range((address)bundle, sizeof(bundle));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* nm = CodeCache::find_blob(instruction_address());
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = (oop)src;
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here") ;
        }
      }
    }
  }
}
Exemple #2
0
void NativeMovConstReg32::set_data(intptr_t x) {
  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* cb = CodeCache::find_blob(instruction_address());
  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    Metadata** metadata_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = cast_to_oop(x);
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
        }
      }
      if (iter.type() == relocInfo::metadata_type) {
        metadata_Relocation *r = iter.metadata_reloc();
        if (metadata_addr == NULL) {
          metadata_addr = r->metadata_addr();
          *metadata_addr = (Metadata*)x;
        } else {
          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
        }
      }
    }
  }
}
Exemple #3
0
void NativeMovRegMem::print() {
  if (is_immediate()) {
    // offset is a signed 13-bit immediate, so casting it to int will not lose significant bits
    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + %d]", p2i(instruction_address()), (int)offset());
  } else {
    tty->print_cr(INTPTR_FORMAT ": mov reg, [reg + reg]", p2i(instruction_address()));
  }
}
Exemple #4
0
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_oop is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
#ifdef ASSERT
#ifdef TIERED
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_method = caller->is_compiled_by_c1();
#else
#ifdef COMPILER1
  bool is_c1_method = true;
#else
  bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
  assert( is_c1_method ||
         !is_monomorphic ||
         is_optimized() ||
         (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
Exemple #5
0
void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
  Untested("copy_instruction_to");
  int instruction_size = next_instruction_address() - instruction_address();
  for (int i = 0; i < instruction_size; i += BytesPerInstWord) {
    *(int*)(new_instruction_address + i) = *(int*)(address(this) + i);
  }
}
void NativeLoadAddress::verify() {
  // make sure code pattern is actually a mov [reg+offset], reg instruction
  u_char test_byte = *(u_char*)instruction_address();
  if ( ! (test_byte == instruction_code) ) {
    fatal ("not a lea reg, [reg+offs] instruction");
  }
}
Exemple #7
0
void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
  address stub = find_stub();
  guarantee(stub != NULL, "stub not found");

  if (TraceICs) {
    ResourceMark rm;
    tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
                  p2i(instruction_address()),
                  callee->name_and_sig_as_C_string());
  }

  // Creation also verifies the object.
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());

  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(),
         "a) MT-unsafe modification of inline cache");
  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
         "b) MT-unsafe modification of inline cache");

  // Update stub.
  method_holder->set_data((intptr_t)callee());
  jump->set_jump_destination(entry);

  // Update jump to call.
  set_destination_mt_safe(stub);
}
Exemple #8
0
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
  address stub = find_stub(/*is_aot*/ false);
  guarantee(stub != NULL, "stub not found");

  if (TraceICs) {
    ResourceMark rm;
    tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
                  p2i(instruction_address()),
                  callee->name_and_sig_as_C_string());
  }

  // Creation also verifies the object.
  NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
  NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());

  // A generated lambda form might be deleted from the Lambdaform
  // cache in MethodTypeForm.  If a jit compiled lambdaform method
  // becomes not entrant and the cache access returns null, the new
  // resolve will lead to a new generated LambdaForm.

  assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee() || callee->is_compiled_lambda_form(),
         "a) MT-unsafe modification of inline cache");
  assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry,
         "b) MT-unsafe modification of inline cache");

  // Update stub.
  method_holder->set_data((intptr_t)callee());
  jump->set_jump_destination(entry);

  // Update jump to call.
  set_destination_mt_safe(stub);
}
Exemple #9
0
void NativeCall::verify() {
  NativeInstruction::verify();
  // make sure code pattern is actually a call instruction
  int x = long_at(0);
  if (!is_op(x, Assembler::call_op)) {
    fatal("not a call: 0x%x @ " INTPTR_FORMAT, x, p2i(instruction_address()));
  }
}
void NativeCall::verify() {
  // Make sure code pattern is actually a call imm32 instruction.
  int inst = char_at(0) & 0xFF;
  if (inst != instruction_code) {
    tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", instruction_address(),
                                                        inst);
    fatal("not a call imm32");
  }
}
// We cannot rely on locks here, since the free-running threads must run at
// full speed.
//
// Used in the runtime linkage of calls; see class CompiledIC.
// (Cf. 4506997 and 4479829, where threads witnessed garbage displacements.)
void NativeCall::set_destination_mt_safe(address dest) {
  debug_only(verify());
  // Make sure patching code is locked.  No two threads can patch at the same
  // time but one may be executing this code.
  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 
  // Both C1 and C2 should now be generating code which aligns the patched address
  // to be within a single cache line except that C1 does not do the alignment on
  // uniprocessor systems.
  assert(!os::is_MP() || ((uintptr_t)displacement_address() / cache_line_size ==
      ((uintptr_t)displacement_address()+3) / cache_line_size), "destination should be aligned");
  if ((uintptr_t)displacement_address() / cache_line_size ==
      ((uintptr_t)displacement_address()+3) / cache_line_size) {
    // Simple case:  The destination lies within a single cache line.
    set_destination(dest);
  } else if ((uintptr_t)instruction_address() / cache_line_size ==
	     ((uintptr_t)instruction_address()+1) / cache_line_size) {
    // Tricky case:  The instruction prefix lies within a single cache line.
    int disp = dest - return_address();
    int call_opcode = instruction_address()[0];

    // First patch dummy jump in place:
    {
      unsigned char patch_jump[2];
      patch_jump[0] = 0xEB;       // jmp rel8
      patch_jump[1] = 0xFE;       // jmp to self
      assert(sizeof(patch_jump)==sizeof(short), "sanity check");
      *(short*)instruction_address() = *(short*)patch_jump;
    }

    OrderAccess::fence();
    // (Note: We assume any reader which has already started to read
    // the unpatched call will completely read the whole unpatched call
    // without seeing the next writes we are about to make.)

    // Next, patch the last three bytes:
    unsigned char patch_disp[5];
    patch_disp[0] = call_opcode;
    *(int*)&patch_disp[1] = disp;
    assert(sizeof(patch_disp)==instruction_size, "sanity check");
    for (int i = sizeof(short); i < instruction_size; i++)
      instruction_address()[i] = patch_disp[i];

    OrderAccess::fence();
    // (Note: We assume that any reader which reads the opcode we are
    // about to repatch will also read the writes we just made.)

    // Finally, overwrite the jump:
    *(short*)instruction_address() = *(short*)&patch_disp[0];
    
    debug_only(verify());
    guarantee(destination() == dest, "patch succeeded");
  } else {
    // Impossible:  One or the other must be atomically writable.
    ShouldNotReachHere();
  }
  ICache::invalidate_range(instruction_address(), instruction_size);
}
void CompiledStaticCall::print() {
  tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
  if (is_clean()) {
    tty->print("clean");
  } else if (is_call_to_compiled()) {
    tty->print("compiled");
  } else if (is_call_to_interpreted()) {
    tty->print("interpreted");
  }
  tty->cr();
}
address CompiledStaticCall::find_stub() {
  // Find reloc. information containing this call-site
  RelocIterator iter((nmethod*)NULL, instruction_address());
  while (iter.next()) {
    if (iter.addr() == instruction_address()) {
      switch(iter.type()) {
        case relocInfo::static_call_type:
          return iter.static_call_reloc()->static_stub();
        // We check here for opt_virtual_call_type, since we reuse the code
        // from the CompiledIC implementation
        case relocInfo::opt_virtual_call_type:
          return iter.opt_virtual_call_reloc()->static_stub();
        case relocInfo::poll_type:
        case relocInfo::poll_return_type: // A safepoint can't overlap a call.
        default:
          ShouldNotReachHere();
      }
    }
  }
  return NULL;
}
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
  assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");

  address entry;
  if (call_info->call_kind() == CallInfo::itable_call) {
    assert(bytecode == Bytecodes::_invokeinterface, "");
    int itable_index = call_info->itable_index();
    entry = VtableStubs::find_itable_stub(itable_index);
    if (entry == false) {
      return false;
    }
#ifdef ASSERT
    int index = call_info->resolved_method()->itable_index();
    assert(index == itable_index, "CallInfo pre-computes this");
    InstanceKlass* k = call_info->resolved_method()->method_holder();
    assert(k->verify_itable_index(itable_index), "sanity check");
#endif //ASSERT
    CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
                                                    call_info->resolved_klass()(), false);
    holder->claim();
    InlineCacheBuffer::create_transition_stub(this, holder, entry);
  } else {
    assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
    // Can be different than selected_method->vtable_index(), due to package-private etc.
    int vtable_index = call_info->vtable_index();
    assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
    entry = VtableStubs::find_vtable_stub(vtable_index);
    if (entry == NULL) {
      return false;
    }
    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
  }

  if (TraceICs) {
    ResourceMark rm;
    tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
                   p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
  }

  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_megamorphic(), "sanity check");
  return true;
}
void NativeMovRegMem::verify() {
  // make sure code pattern is actually a mov [reg+offset], reg instruction
  u_char test_byte = *(u_char*)instruction_address();
  if ( ! ( (test_byte == instruction_code_reg2memb) 
      || (test_byte == instruction_code_mem2regb) 
      || (test_byte == instruction_code_mem2regl) 
      || (test_byte == instruction_code_reg2meml) 
      || (test_byte == instruction_code_mem2reg_movzxb )
      || (test_byte == instruction_code_mem2reg_movzxw )
      || (test_byte == instruction_code_mem2reg_movsxb )
      || (test_byte == instruction_code_mem2reg_movsxw )
      || (test_byte == instruction_code_float_s) 
      || (test_byte == instruction_code_float_d)
      || (test_byte == instruction_code_long_volatile) ) ) {
    fatal ("not a mov [reg+offs], reg instruction");
  }
}
void CompiledIC::set_to_clean() {  
  assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
  if (TraceInlineCacheClearing || TraceICs) {
    tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
    print();
  }

  address entry;
#ifdef COMPILER1
  entry = is_optimized() ? 
    Runtime1::entry_for(Runtime1::resolve_invoke_opt_virtual_id) :
    Runtime1::entry_for(Runtime1::resolve_invokevirtual_id);
#else
  entry =
    is_optimized()
    ? OptoRuntime::resolve_opt_virtual_call_Java()
    : OptoRuntime::resolve_virtual_call_Java();
#endif

  // A zombie transition will always be safe, since the oop has already been set to NULL, so
  // we only need to patch the destination
  bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();

  if (safe_transition) {
    if (!is_optimized()) set_cached_oop(NULL);  
    // Kill any leftover stub we might have too
    if (is_in_transition_state()) {
      ICStub* old_stub = ICStub_from_destination_address(stub_address());
      old_stub->clear();
    }
    set_ic_destination(entry); 
  } else {
    // Unsafe transition - create stub. 
    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
  }
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_clean(), "sanity check");
}
void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
  methodHandle method = call_info->selected_method();
  bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  assert(method->is_oop(), "cannot be NULL and must be oop");
  assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
  assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
  
  address entry;
  if (is_invoke_interface) {
    int index = klassItable::compute_itable_index(call_info->resolved_method()());            
    entry = VtableStubs::create_stub(false, index, method());    
    assert(entry != NULL, "entry not computed");
    klassOop k = call_info->resolved_method()->method_holder();
    assert(Klass::cast(k)->is_interface(), "sanity check");
    InlineCacheBuffer::create_transition_stub(this, k, entry);
  } else {
    // Can be different than method->vtable_index(), due to package-private etc.
    int vtable_index = call_info->vtable_index(); 
    entry = VtableStubs::create_stub(true, vtable_index, method());
    InlineCacheBuffer::create_transition_stub(this, method(), entry);
  }
      
  if (TraceICs) {
    ResourceMark rm;
    tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
		   instruction_address(), method->print_value_string(), entry);
  } 

  Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, method());  
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_megamorphic(), "sanity check");
}
void CompiledIC::set_to_clean(bool in_use) {
  assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
  if (TraceInlineCacheClearing || TraceICs) {
    tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
    print();
  }

  address entry;
  if (is_optimized()) {
    entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
  } else {
    entry = SharedRuntime::get_resolve_virtual_call_stub();
  }

  // A zombie transition will always be safe, since the metadata has already been set to NULL, so
  // we only need to patch the destination
  bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();

  if (safe_transition) {
    // Kill any leftover stub we might have too
    clear_ic_stub();
    if (is_optimized()) {
      set_ic_destination(entry);
    } else {
      set_ic_destination_and_value(entry, (void*)NULL);
    }
  } else {
    // Unsafe transition - create stub.
    InlineCacheBuffer::create_transition_stub(this, NULL, entry);
  }
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_clean(), "sanity check");
}
void CompiledStaticCall::set(const StaticCallInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  assert(is_clean(), "do not update a call entry - use clean");

  if (info._to_interpreter) {
    // Call to interpreted code
    set_to_interpreted(info.callee(), info.entry());
  } else {
    if (TraceICs) {
      ResourceMark rm;
      tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
                    p2i(instruction_address()),
                    p2i(info.entry()));
    }
    // Call to compiled code
    assert (CodeCache::contains(info.entry()), "wrong entry point");
    set_destination_mt_safe(info.entry());
  }
}
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_value is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
  // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
  // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
#ifdef ASSERT
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
  assert( is_c1_or_jvmci_method ||
         !is_monomorphic ||
         is_optimized() ||
         !caller->is_alive() ||
         (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
Exemple #21
0
void NativeJump::print() {
  tty->print_cr(INTPTR_FORMAT ": jmpl reg, " INTPTR_FORMAT, p2i(instruction_address()), p2i(jump_destination()));
}
void NativeCall::print() {
  tty->print_cr("0x%x: call 0x%x", instruction_address(), destination());
}
Exemple #23
0
void PltNativeCallWrapper::set_to_interpreted(const methodHandle& method, CompiledICInfo& info) {
  assert(!info.to_aot(), "only for nmethod");
  CompiledPltStaticCall* csc = CompiledPltStaticCall::at(instruction_address());
  csc->set_to_interpreted(method, info.entry());
}
Exemple #24
0
void NativeMovConstRegPatching::print() {
  tty->print_cr(INTPTR_FORMAT ": mov reg, 0x%x", p2i(instruction_address()), data());
}
Exemple #25
0
void NativeMovConstReg32::print() {
  tty->print_cr(INTPTR_FORMAT ": mov reg, " INTPTR_FORMAT, p2i(instruction_address()), data());
}
Exemple #26
0
void NativeFarCall::print() {
  tty->print_cr(INTPTR_FORMAT ": call " INTPTR_FORMAT, p2i(instruction_address()), p2i(destination()));
}
void CompiledIC::print_compiled_ic() {
  tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
             p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
}
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  //
  // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
  // callsites. In addition ic_miss code will update a site to monomorphic if it determines
  // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
  //
  // In both of these cases the only thing being modifed is the jump/call target and these
  // transitions are mt_safe

  Thread *thread = Thread::current();
  if (info.to_interpreter()) {
    // Call to interpreter
    if (info.is_optimized() && is_optimized()) {
       assert(is_clean(), "unsafe IC path");
       MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
      // the call analysis (callee structure) specifies that the call is optimized
      // (either because of CHA or the static target is final)
      // At code generation time, this call has been emitted as static call
      // Call via stub
      assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
      methodHandle method (thread, (Method*)info.cached_metadata());
      csc->set_to_interpreted(method, info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
           p2i(instruction_address()),
           method->print_value_string());
      }
    } else {
      // Call via method-klass-holder
      InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
      }
    }
  } else {
    // Call to compiled code
    bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT
    CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
    assert (cb->is_nmethod(), "must be compiled!");
#endif /* ASSERT */

    // This is MT safe if we come from a clean-cache and go through a
    // non-verified entry point
    bool safe = SafepointSynchronize::is_at_safepoint() ||
                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));

    if (!safe) {
      InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
    } else {
      if (is_optimized()) {
      set_ic_destination(info.entry());
      } else {
        set_ic_destination_and_value(info.entry(), info.cached_metadata());
      }
    }

    if (TraceICs) {
      ResourceMark rm(thread);
      assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
      tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
        p2i(instruction_address()),
        ((Klass*)info.cached_metadata())->print_value_string(),
        (safe) ? "" : "via stub");
    }
  }
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}
Exemple #29
0
void CompiledIC::print_compiled_ic() {
  tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
             instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
}
bool CompiledStaticCall::is_call_to_interpreted() const {
  // It is a call to interpreted, if it calls to a stub. Hence, the destination
  // must be in the stub part of the nmethod that contains the call
  nmethod* nm = CodeCache::find_nmethod(instruction_address());
  return nm->stub_contains(destination());
}