示例#1
0
bool CompiledIC::is_call_to_interpreted() const {  
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Call to interpreter if destination is either calling to a stub (if it
  // is optimized), or calling to an I2C
  bool is_call_to_interpreted = false;
#ifdef COMPILER1
  if (!is_optimized()) {
    is_call_to_interpreted =
      Runtime1::blob_for(Runtime1::interpreter_entries_id)->contains(ic_destination());
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    is_call_to_interpreted = cb->contains(ic_destination());
  }
#else
  if (!is_optimized()) {
    CodeBlob* cb = CodeCache::find_blob(ic_destination());  
    is_call_to_interpreted = (cb != NULL && cb->is_c2i_adapter());
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    is_call_to_interpreted = cb->contains(ic_destination());
  }
#endif // COMPILER1
  assert(!is_call_to_interpreted || is_optimized() || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
  return is_call_to_interpreted;
}
示例#2
0
bool CompiledIC::is_call_to_interpreted() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Call to interpreter if destination is either calling to a stub (if it
  // is optimized), or calling to an I2C blob
  bool is_call_to_interpreted = false;
  if (!is_optimized()) {
    // must use unsafe because the destination can be a zombie (and we're cleaning)
    // and the print_compiled_ic code wants to know if site (in the non-zombie)
    // is to the interpreter.
    CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
    is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
    assert(!is_call_to_interpreted ||  (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    address dest = ic_destination();
#ifdef ASSERT
    {
      CodeBlob* db = CodeCache::find_blob_unsafe(dest);
      assert(!db->is_adapter_blob(), "must use stub!");
    }
#endif /* ASSERT */
    is_call_to_interpreted = cb->contains(dest);
  }
  return is_call_to_interpreted;
}
示例#3
0
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_oop is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
#ifdef ASSERT
#ifdef TIERED
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_method = caller->is_compiled_by_c1();
#else
#ifdef COMPILER1
  bool is_c1_method = true;
#else
  bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
  assert( is_c1_method ||
         !is_monomorphic ||
         is_optimized() ||
         (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
示例#4
0
bool CompiledIC::is_clean() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  bool is_clean = false;
  address dest = ic_destination();
  is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
             dest == SharedRuntime::get_resolve_virtual_call_stub();
  assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
  return is_clean;
}
示例#5
0
void ICStub::set_stub(CompiledIC *ic, oop cached_value, address dest_addr) {
  // We cannot store a pointer to the 'ic' object, since it is resource allocated. Instead we
  // store the location of the inline cache. Then we have enough information recreate the CompiledIC
  // object when we need to remove the stub.
  _ic_site = ic->instruction_address();

  // Assemble new stub
  InlineCacheBuffer::assemble_ic_buffer_code(code_begin(), cached_value, dest_addr);
  assert(destination() == dest_addr,   "can recover destination");
  assert(cached_oop() == cached_value, "can recover destination");
}
示例#6
0
void ICStub::finalize() {
  if (!is_empty()) {
    ResourceMark rm;
    CompiledIC *ic = CompiledIC_at(ic_site());
    assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?");

    assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
    ic->set_cached_oop(cached_oop());
    ic->set_ic_destination(destination());
  }
}
示例#7
0
bool CompiledIC::is_clean() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  bool is_clean = false;
  address dest = ic_destination();
#ifdef COMPILER1
  is_clean = 
         dest == Runtime1::entry_for(Runtime1::resolve_invokevirtual_id) ||
         dest == Runtime1::entry_for(Runtime1::resolve_invoke_opt_virtual_id);
#else
  is_clean = dest == OptoRuntime::resolve_virtual_call_Java() ||
             dest == OptoRuntime::resolve_opt_virtual_call_Java();
#endif
  assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
  return is_clean;
}