示例#1
0
vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) {
  // Interpreter frame
  if (f->is_interpreted_frame()) {
    return new interpretedVFrame(f, reg_map, thread);
  }  

#ifndef CORE
  // Compiled frame
  CodeBlob* cb = CodeCache::find_blob(f->pc());
  if (cb != NULL) {  
    if (cb->is_nmethod()) {      
      nmethod* nm = (nmethod*)cb;            
      // Compiled method (native stub or Java code)
      ScopeDesc* scope  = nm->scope_desc_at(f->pc(), reg_map->is_pc_at_call(f->id()));
      return new compiledVFrame(f, reg_map, thread, scope);  
    }

    if (f->is_glue_frame()) {
      // This is a conversion frame. Skip this frame and try again.      
      RegisterMap temp_map = *reg_map;
      frame s = f->sender(&temp_map);
      return new_vframe(&s, &temp_map, thread);
    }
  }
  // Deoptimized frame
  if (f->is_deoptimized_frame()) {
    return new deoptimizedVFrame(f, reg_map, thread);  
  }
#endif

  // External frame
  return new externalVFrame(f, reg_map, thread);
}
// --- generate
address MethodStubBlob::generate( heapRef moop, address c2i_adapter ) {
  // NativeMethodStubs must be jumped-to directly and are packed back-to-back.
  // Hence they start CodeEntryAligned, and each later one has to be
  // CodeEntryAligned so we expect the instruction_size to be a multiple.
  assert0( round_to(NativeMethodStub::instruction_size,CodeEntryAlignment) == NativeMethodStub::instruction_size );
  NativeMethodStub *nms;
  do {
    // The _free_list is a racing CAS-managed link-list.  Must read the
    // _free_list exactly ONCE before the CAS attempt below, or otherwise know
    // we have something that used to be on the free_list and is not-null.  In
    // generally, if we re-read the free_list we have to null-check the result.
    nms = _free_list;
    if( !nms ) {
      // CodeCache makes CodeBlobs.  Make a CodeBlob typed as a methodCodeStub.
      CodeBlob *cb = CodeCache::malloc_CodeBlob( CodeBlob::methodstub, 256*NativeMethodStub::instruction_size );
      address adr = (address)round_to((intptr_t)cb->code_begins(),CodeEntryAlignment);
      cb->_code_start_offset = adr-(address)cb->_code_begins;
      while( adr+NativeMethodStub::instruction_size < cb->end() ) {
        free_stub((NativeMethodStub*)adr);
        adr += NativeMethodStub::instruction_size;
      }
      // The last not-null thing jammed on the freelist.
      nms = (NativeMethodStub*)(adr-NativeMethodStub::instruction_size);
    }
  } while( Atomic::cmpxchg_ptr(*(NativeMethodStub**)nms,&_free_list,nms) != nms );
  nms->fill( moop, c2i_adapter );
return(address)nms;
}
示例#3
0
StubCodeGenerator::~StubCodeGenerator() {
  if (PrintStubCode || _print_code) {
    CodeBuffer* cbuf = _masm->code();
    CodeBlob*   blob = CodeCache::find_blob_unsafe(cbuf->insts()->start());
    if (blob != NULL) {
      blob->set_strings(cbuf->strings());
    }
    bool saw_first = false;
    StubCodeDesc* toprint[1000];
    int toprint_len = 0;
    for (StubCodeDesc* cdesc = _last_stub; cdesc != NULL; cdesc = cdesc->_next) {
      toprint[toprint_len++] = cdesc;
      if (cdesc == _first_stub) { saw_first = true; break; }
    }
    assert(saw_first, "must get both first & last");
    // Print in reverse order:
    qsort(toprint, toprint_len, sizeof(toprint[0]), compare_cdesc);
    for (int i = 0; i < toprint_len; i++) {
      StubCodeDesc* cdesc = toprint[i];
      cdesc->print();
      tty->cr();
      Disassembler::decode(cdesc->begin(), cdesc->end());
      tty->cr();
    }
  }
}
示例#4
0
void NativeMovConstReg32::set_data(intptr_t x) {
  set_long_at(sethi_offset, set_data32_sethi(  long_at(sethi_offset), x));
  set_long_at(add_offset,   set_data32_simm13( long_at(add_offset),   x));

  // also store the value into an oop_Relocation cell, if any
  CodeBlob* cb = CodeCache::find_blob(instruction_address());
  nmethod*  nm = cb ? cb->as_nmethod_or_null() : NULL;
  if (nm != NULL) {
    RelocIterator iter(nm, instruction_address(), next_instruction_address());
    oop* oop_addr = NULL;
    Metadata** metadata_addr = NULL;
    while (iter.next()) {
      if (iter.type() == relocInfo::oop_type) {
        oop_Relocation *r = iter.oop_reloc();
        if (oop_addr == NULL) {
          oop_addr = r->oop_addr();
          *oop_addr = cast_to_oop(x);
        } else {
          assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
        }
      }
      if (iter.type() == relocInfo::metadata_type) {
        metadata_Relocation *r = iter.metadata_reloc();
        if (metadata_addr == NULL) {
          metadata_addr = r->metadata_addr();
          *metadata_addr = (Metadata*)x;
        } else {
          assert(metadata_addr == r->metadata_addr(), "must be only one set-metadata here");
        }
      }
    }
  }
}
示例#5
0
// This method is safe to call without holding the CodeCache_lock, as long as a dead codeblob is not
// looked up (i.e., one that has been marked for deletion). It only dependes on the _segmap to contain
// valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled.
CodeBlob* CodeCache::find_blob(void* start) {
  CodeBlob* result = find_blob_unsafe(start);
  if (result == NULL) return NULL;
  // We could potientially look up non_entrant methods
  guarantee(!result->is_zombie() || result->is_locked_by_vm() || is_error_reported(), "unsafe access to zombie method");
  return result;
}
示例#6
0
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_oop is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
#ifdef ASSERT
#ifdef TIERED
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_method = caller->is_compiled_by_c1();
#else
#ifdef COMPILER1
  bool is_c1_method = true;
#else
  bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
  assert( is_c1_method ||
         !is_monomorphic ||
         is_optimized() ||
         (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
示例#7
0
vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) {
  // Interpreter frame
  if (f->is_interpreted_frame()) {
    return new interpretedVFrame(f, reg_map, thread);
  }

  // Compiled frame
  CodeBlob* cb = f->cb();
  if (cb != NULL) {
    if (cb->is_nmethod()) {
      nmethod* nm = (nmethod*)cb;
      return new compiledVFrame(f, reg_map, thread, nm);
    }

    if (f->is_runtime_frame()) {
      // Skip this frame and try again.
      RegisterMap temp_map = *reg_map;
      frame s = f->sender(&temp_map);
      return new_vframe(&s, &temp_map, thread);
    }
  }

  // External frame
  return new externalVFrame(f, reg_map, thread);
}
示例#8
0
nmethod* CodeCache::first_nmethod() {
  assert_locked_or_safepoint(CodeCache_lock);
  CodeBlob* cb = first();
  while (cb != NULL && !cb->is_nmethod()) {
    cb = next(cb);
  }
  return (nmethod*)cb;
}
示例#9
0
bool Relocation::is_copy() const {
#ifndef CORE
  CodeBlob* cb = code();
  address code_end = (address)cb + cb->size();
  return !(addr() >= cb->instructions_begin() && addr() <= code_end);
#else
  return false;
#endif // !CORE
}
示例#10
0
文件: debug.cpp 项目: LeLiKa/openjdk
extern "C" void nm(intptr_t p) {
  // Actually we look through all CodeBlobs (the nm name has been kept for backwards compatability)
  Command c("nm");
  CodeBlob* cb = CodeCache::find_blob((address)p);
  if (cb == NULL) {
    tty->print_cr("NULL");
  } else {
    cb->print();
  }
}
示例#11
0
文件: debug.cpp 项目: LeLiKa/openjdk
extern "C" void printnm(intptr_t p) {
  char buffer[256];
  sprintf(buffer, "printnm: " INTPTR_FORMAT, p);
  Command c(buffer);
  CodeBlob* cb = CodeCache::find_blob((address) p);
  if (cb->is_nmethod()) {
    nmethod* nm = (nmethod*)cb;
    nm->print_nmethod(true);
  }
}
void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
  assert(entry_point != NULL, "must set legal entry point");
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
  assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");

  assert(!is_icholder || is_icholder_entry(entry_point), "must be");

  // Don't use ic_destination for this test since that forwards
  // through ICBuffer instead of returning the actual current state of
  // the CompiledIC.
  if (is_icholder_entry(_ic_call->destination())) {
    // When patching for the ICStub case the cached value isn't
    // overwritten until the ICStub copied into the CompiledIC during
    // the next safepoint.  Make sure that the CompiledICHolder* is
    // marked for release at this point since it won't be identifiable
    // once the entry point is overwritten.
    InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data());
  }

  if (TraceCompiledIC) {
    tty->print("  ");
    print_compiled_ic();
    tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
    if (!is_optimized()) {
      tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
    }
    if (is_icstub) {
      tty->print(" (icstub)");
    }
    tty->cr();
  }

  {
    MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
    CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
     _ic_call->set_destination_mt_safe(entry_point);
  }

  if (is_optimized() || is_icstub) {
    // Optimized call sites don't have a cache value and ICStub call
    // sites only change the entry point.  Changing the value in that
    // case could lead to MT safety issues.
    assert(cache == NULL, "must be null");
    return;
  }

  if (cache == NULL)  cache = (void*)Universe::non_oop_word();

  _value->set_data((intptr_t)cache);
}
示例#13
0
文件: debug.cpp 项目: wei-tang/JVM
extern "C" void disnm(intptr_t p) {
  Command c("disnm");
  CodeBlob* cb = CodeCache::find_blob((address) p);
  nmethod* nm = cb->as_nmethod_or_null();
  if (nm) {
    nm->print();
    Disassembler::decode(nm);
  } else {
    cb->print();
    Disassembler::decode(cb);
  }
}
inline static bool checkByteBuffer(address pc, address* stub) {
  // BugId 4454115: A read from a MappedByteBuffer can fault
  // here if the underlying file has been truncated.
  // Do not crash the VM in such a case.
  CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
  nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
  if (nm != NULL && nm->has_unsafe_access()) {
    *stub = StubRoutines::handler_for_unsafe_access();
    return true;
  }
  return false;
}
示例#15
0
  CodeBlob iDefHeader(InstructionIndex length, std::vector<NameHash> argNames) {
    CodeBlob blob = CodeBlob{FN_OP_DEF};
    blob.append(length);
    
    NumArgs numArgs = (NumArgs)argNames.size();
    blob.append(numArgs);

    for(auto argName : argNames) {
      blob.append(argName);
    }

    return blob;
  }
bool CompiledIC::is_icholder_entry(address entry) {
  CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
  if (cb != NULL && cb->is_adapter_blob()) {
    return true;
  }
  // itable stubs also use CompiledICHolder
  if (cb != NULL && cb->is_vtable_blob()) {
    VtableStub* s = VtableStubs::entry_point(entry);
    return (s != NULL) && s->is_itable_stub();
  }

  return false;
}
示例#17
0
bool vframeStream::fill_from_frame() {  
  // Interpreted frame
  if (_frame.is_interpreted_frame()) {
    fill_from_interpreter_frame();
    return true;
  }

  // Compiled frame  

#ifndef CORE
  CodeBlob* code = CodeCache::find_blob(_frame.pc());
  if (code != NULL && code->is_nmethod()) {
    nmethod* nm = (nmethod*)code;
    if (nm->is_native_method()) {
      // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
      fill_from_compiled_native_frame(nm);
    } else {    
      bool at_call = _reg_map.is_pc_at_call(_frame.id());
      PcDesc* pc_desc = nm->pc_desc_at(_frame.pc(), at_call);
#ifdef ASSERT
      if (pc_desc == NULL) {
        tty->print_cr("Error in fill_from_frame: pc_desc for " INTPTR_FORMAT " not found", _frame.pc());
        nm->print();
        nm->method()->print_codes();
        nm->print_code();
        nm->print_pcs();
      }
#endif
      assert(pc_desc != NULL, "scopeDesc must exist");
      fill_from_compiled_frame(nm, pc_desc->scope_decode_offset());
    }
    return true;
  }
#endif

  // End of stack?
  if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
    _mode = at_end_mode;
    return true;
  }    

  // Deoptimized frame
#ifndef CORE
  if (_frame.is_deoptimized_frame()) {
    fill_from_deoptimized_frame(_thread->vframe_array_for(&_frame), vframeArray::first_index());
    return true;
  }
#endif

  return false;
}
void CompiledStaticCall::set_to_clean() {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
  // Reset call site
  MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
  CodeBlob* cb = CodeCache::find_blob_unsafe(this);
  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
  set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());

  // Do not reset stub here:  It is too expensive to call find_stub.
  // Instead, rely on caller (nmethod::clear_inline_caches) to clear
  // both the call and its stub.
}
示例#19
0
void CompiledIC::set_ic_destination(address entry_point) {
  assert(entry_point != NULL, "must set legal entry point");
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  if (TraceCompiledIC) {
    tty->print("  ");
    print_compiled_ic();
    tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
  }
  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
  _ic_call->set_destination_mt_safe(entry_point);
}
inline static bool checkICMiss(sigcontext* uc, address* pc, address* stub) {
#ifdef COMPILER2
  if (nativeInstruction_at(*pc)->is_ic_miss_trap()) {
#ifdef ASSERT
#ifdef TIERED
    CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
    assert(cb->is_compiled_by_c2(), "Wrong compiler");
#endif // TIERED
#endif // ASSERT
    // Inline cache missed and user trap "Tne G0+ST_RESERVED_FOR_USER_0+2" taken.
    *stub = SharedRuntime::get_ic_miss_stub();
    // At the stub it needs to look like a call from the caller of this
    // method (not a call from the segv site).
    *pc = (address)SIG_REGS(uc).u_regs[CON_O7];
    return true;
  }
#endif  // COMPILER2
  return false;
}
示例#21
0
void InterfaceSupport::verify_stack() {
  JavaThread* thread = JavaThread::current();
  ResourceMark rm(thread);
  // disabled because it throws warnings that oop maps should only be accessed
  // in VM thread or during debugging

  if (!thread->has_pending_exception()) {
    // verification does not work if there are pending exceptions
    StackFrameStream sfs(thread);
    CodeBlob* cb = sfs.current()->cb();
      // In case of exceptions we might not have a runtime_stub on
      // top of stack, hence, all callee-saved registers are not going
      // to be setup correctly, hence, we cannot do stack verify
    if (cb != NULL && !(cb->is_runtime_stub() || cb->is_uncommon_trap_stub())) return;

    for (; !sfs.is_done(); sfs.next()) {
      sfs.current()->verify(sfs.register_map());
    }
  }
}
bool CompiledIC::is_call_to_interpreted() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Call to interpreter if destination is either calling to a stub (if it
  // is optimized), or calling to an I2C blob
  bool is_call_to_interpreted = false;
  if (!is_optimized()) {
    // must use unsafe because the destination can be a zombie (and we're cleaning)
    // and the print_compiled_ic code wants to know if site (in the non-zombie)
    // is to the interpreter.
    CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
    is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
    assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    address dest = ic_destination();
#ifdef ASSERT
    {
      CodeBlob* db = CodeCache::find_blob_unsafe(dest);
      assert(!db->is_adapter_blob(), "must use stub!");
    }
#endif /* ASSERT */
    is_call_to_interpreted = cb->contains(dest);
  }
  return is_call_to_interpreted;
}
示例#23
0
void pd_ps(frame f) {
  intptr_t* sp = f.sp();
  intptr_t* prev_sp = sp - 1;
  intptr_t *pc = NULL;
  intptr_t *next_pc = NULL;
  int count = 0;
  tty->print("register window backtrace from %#x:\n", sp);
  while (sp != NULL && ((intptr_t)sp & 7) == 0 && sp > prev_sp && sp < prev_sp+1000) {
    pc      = next_pc;
    next_pc = (intptr_t*) sp[I7->sp_offset_in_saved_window()];
    tty->print("[%d] sp=%#x pc=", count, sp);
    findpc((intptr_t)pc);
    if (WizardMode && Verbose) {
      // print register window contents also
      tty->print_cr("    L0..L7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
                    sp[0+0],sp[0+1],sp[0+2],sp[0+3],
                    sp[0+4],sp[0+5],sp[0+6],sp[0+7]);
      tty->print_cr("    I0..I7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
                    sp[8+0],sp[8+1],sp[8+2],sp[8+3],
                    sp[8+4],sp[8+5],sp[8+6],sp[8+7]);
      // (and print stack frame contents too??)

      CodeBlob *b = CodeCache::find_blob((address) pc);
      if (b != NULL) {
        if (b->is_nmethod()) {
          methodOop m = ((nmethod*)b)->method();
          int nlocals = m->max_locals();
          int nparams  = m->size_of_parameters();
          tty->print_cr("compiled java method (locals = %d, params = %d)", nlocals, nparams);
        }
      }
    }
    prev_sp = sp;
    sp = (intptr_t *)sp[FP->sp_offset_in_saved_window()];
    sp = (intptr_t *)((intptr_t)sp + STACK_BIAS);
    count += 1;
  }
  if (sp != NULL)
    tty->print("[%d] sp=%#x [bogus sp!]", count, sp);
}
示例#24
0
bool CompiledIC::is_call_to_interpreted() const {  
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Call to interpreter if destination is either calling to a stub (if it
  // is optimized), or calling to an I2C
  bool is_call_to_interpreted = false;
#ifdef COMPILER1
  if (!is_optimized()) {
    is_call_to_interpreted =
      Runtime1::blob_for(Runtime1::interpreter_entries_id)->contains(ic_destination());
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    is_call_to_interpreted = cb->contains(ic_destination());
  }
#else
  if (!is_optimized()) {
    CodeBlob* cb = CodeCache::find_blob(ic_destination());  
    is_call_to_interpreted = (cb != NULL && cb->is_c2i_adapter());
  } else {
    // Check if we are calling into our own codeblob (i.e., to a stub)
    CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
    is_call_to_interpreted = cb->contains(ic_destination());
  }
#endif // COMPILER1
  assert(!is_call_to_interpreted || is_optimized() || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
  return is_call_to_interpreted;
}
示例#25
0
void InterfaceSupport::stress_derived_pointers() {
#ifdef COMPILER2
  JavaThread *thread = JavaThread::current();
  if (!is_init_completed()) return;
  ResourceMark rm(thread);
  bool found = false;
  for (StackFrameStream sfs(thread); !sfs.is_done() && !found; sfs.next()) {
    CodeBlob* cb = sfs.current()->cb();
    if (cb != NULL && cb->oop_maps() ) {
      // Find oopmap for current method
      const ImmutableOopMap* map = cb->oop_map_for_return_address(sfs.current()->pc());
      assert(map != NULL, "no oopmap found for pc");
      found = map->has_derived_pointer();
    }
  }
  if (found) {
    // $$$ Not sure what to do here.
    /*
    Scavenge::invoke(0);
    */
  }
#endif
}
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_value is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
  // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
  // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
#ifdef ASSERT
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
  assert( is_c1_or_jvmci_method ||
         !is_monomorphic ||
         is_optimized() ||
         !caller->is_alive() ||
         (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
示例#27
0
CodeBlob* CodeCache::find_blob_unsafe(void* start) {  
  CodeBlob* result = (CodeBlob*)_heap->find_start(start);
  if (result == NULL) return NULL;
  assert(result->blob_contains((address)start), "found wrong CodeBlob");     
  return result;  
}
示例#28
0
文件: debug.cpp 项目: LeLiKa/openjdk
static void find(intptr_t x, bool print_pc) {
  address addr = (address)x;

  CodeBlob* b = CodeCache::find_blob_unsafe(addr);
  if (b != NULL) {
    if (b->is_buffer_blob()) {
      // the interpreter is generated into a buffer blob
      InterpreterCodelet* i = Interpreter::codelet_containing(addr);
      if (i != NULL) {
        i->print();
        return;
      }
      if (Interpreter::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing into interpreter code (not bytecode specific)", addr);
        return;
      }
      //
      if (AdapterHandlerLibrary::contains(b)) {
        AdapterHandlerLibrary::print_handler(b);
      }
      // the stubroutines are generated into a buffer blob
      StubCodeDesc* d = StubCodeDesc::desc_for(addr);
      if (d != NULL) {
        d->print();
        if (print_pc) tty->cr();
        return;
      }
      if (StubRoutines::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", addr);
        return;
      }
      // the InlineCacheBuffer is using stubs generated into a buffer blob
      if (InlineCacheBuffer::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
        return;
      }
      VtableStub* v = VtableStubs::stub_containing(addr);
      if (v != NULL) {
        v->print();
        return;
      }
    }
    if (print_pc && b->is_nmethod()) {
      ResourceMark rm;
      tty->print("%#p: Compiled ", addr);
      ((nmethod*)b)->method()->print_value_on(tty);
      tty->print("  = (CodeBlob*)" INTPTR_FORMAT, b);
      tty->cr();
      return;
    }
    if ( b->is_nmethod()) {
      if (b->is_zombie()) {
        tty->print_cr(INTPTR_FORMAT " is zombie nmethod", b);
      } else if (b->is_not_entrant()) {
        tty->print_cr(INTPTR_FORMAT " is non-entrant nmethod", b);
      }
    }
    b->print();
    return;
  }

  if (Universe::heap()->is_in(addr)) {
    HeapWord* p = Universe::heap()->block_start(addr);
    bool print = false;
    // If we couldn't find it it just may mean that heap wasn't parseable
    // See if we were just given an oop directly
    if (p != NULL && Universe::heap()->block_is_obj(p)) {
      print = true;
    } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
      p = (HeapWord*) addr;
      print = true;
    }
    if (print) {
      oop(p)->print();
      if (p != (HeapWord*)x && oop(p)->is_constMethod() &&
          constMethodOop(p)->contains(addr)) {
        Thread *thread = Thread::current();
        HandleMark hm(thread);
        methodHandle mh (thread, constMethodOop(p)->method());
        if (!mh->is_native()) {
          tty->print_cr("bci_from(%p) = %d; print_codes():",
                        addr, mh->bci_from(address(x)));
          mh->print_codes();
        }
      }
      return;
    }
  } else if (Universe::heap()->is_in_reserved(addr)) {
    tty->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", addr);
    return;
  }

  if (JNIHandles::is_global_handle((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
    return;
  }
  if (JNIHandles::is_weak_global_handle((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
    return;
  }
  if (JNIHandleBlock::any_contains((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
    return;
  }

  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
    // Check for privilege stack
    if (thread->privileged_stack_top() != NULL && thread->privileged_stack_top()->contains(addr)) {
      tty->print_cr(INTPTR_FORMAT " is pointing into the privilege stack for thread: " INTPTR_FORMAT, addr, thread);
      return;
    }
    // If the addr is a java thread print information about that.
    if (addr == (address)thread) {
       thread->print();
       return;
    }
  }

  // Try an OS specific find
  if (os::find(addr)) {
    return;
  }

  if (print_pc) {
    tty->print_cr(INTPTR_FORMAT ": probably in C++ code; check debugger", addr);
    Disassembler::decode(same_page(addr-40,addr),same_page(addr+40,addr));
    return;
  }

  tty->print_cr(INTPTR_FORMAT " is pointing to unknown location", addr);
}
示例#29
0
nmethod* CodeCache::find_nmethod(void* start) {
  CodeBlob *cb = find_blob(start);
  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
  return (nmethod*)cb;
}
示例#30
0
文件: debug.cpp 项目: LeLiKa/openjdk
extern "C" void disnm(intptr_t p) {
  Command c("disnm");
  CodeBlob* cb = CodeCache::find_blob((address) p);
  cb->print();
  Disassembler::decode(cb);
}