示例#1
0
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_oop is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
#ifdef ASSERT
#ifdef TIERED
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_method = caller->is_compiled_by_c1();
#else
#ifdef COMPILER1
  bool is_c1_method = true;
#else
  bool is_c1_method = false;
#endif // COMPILER1
#endif // TIERED
  assert( is_c1_method ||
         !is_monomorphic ||
         is_optimized() ||
         (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
示例#2
0
vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) {
  // Interpreter frame
  if (f->is_interpreted_frame()) {
    return new interpretedVFrame(f, reg_map, thread);
  }  

#ifndef CORE
  // Compiled frame
  CodeBlob* cb = CodeCache::find_blob(f->pc());
  if (cb != NULL) {  
    if (cb->is_nmethod()) {      
      nmethod* nm = (nmethod*)cb;            
      // Compiled method (native stub or Java code)
      ScopeDesc* scope  = nm->scope_desc_at(f->pc(), reg_map->is_pc_at_call(f->id()));
      return new compiledVFrame(f, reg_map, thread, scope);  
    }

    if (f->is_glue_frame()) {
      // This is a conversion frame. Skip this frame and try again.      
      RegisterMap temp_map = *reg_map;
      frame s = f->sender(&temp_map);
      return new_vframe(&s, &temp_map, thread);
    }
  }
  // Deoptimized frame
  if (f->is_deoptimized_frame()) {
    return new deoptimizedVFrame(f, reg_map, thread);  
  }
#endif

  // External frame
  return new externalVFrame(f, reg_map, thread);
}
示例#3
0
vframe* vframe::new_vframe(const frame* f, const RegisterMap* reg_map, JavaThread* thread) {
  // Interpreter frame
  if (f->is_interpreted_frame()) {
    return new interpretedVFrame(f, reg_map, thread);
  }

  // Compiled frame
  CodeBlob* cb = f->cb();
  if (cb != NULL) {
    if (cb->is_nmethod()) {
      nmethod* nm = (nmethod*)cb;
      return new compiledVFrame(f, reg_map, thread, nm);
    }

    if (f->is_runtime_frame()) {
      // Skip this frame and try again.
      RegisterMap temp_map = *reg_map;
      frame s = f->sender(&temp_map);
      return new_vframe(&s, &temp_map, thread);
    }
  }

  // External frame
  return new externalVFrame(f, reg_map, thread);
}
示例#4
0
nmethod* CodeCache::first_nmethod() {
  assert_locked_or_safepoint(CodeCache_lock);
  CodeBlob* cb = first();
  while (cb != NULL && !cb->is_nmethod()) {
    cb = next(cb);
  }
  return (nmethod*)cb;
}
示例#5
0
文件: debug.cpp 项目: LeLiKa/openjdk
extern "C" void printnm(intptr_t p) {
  char buffer[256];
  sprintf(buffer, "printnm: " INTPTR_FORMAT, p);
  Command c(buffer);
  CodeBlob* cb = CodeCache::find_blob((address) p);
  if (cb->is_nmethod()) {
    nmethod* nm = (nmethod*)cb;
    nm->print_nmethod(true);
  }
}
void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
  assert(entry_point != NULL, "must set legal entry point");
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
  assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");

  assert(!is_icholder || is_icholder_entry(entry_point), "must be");

  // Don't use ic_destination for this test since that forwards
  // through ICBuffer instead of returning the actual current state of
  // the CompiledIC.
  if (is_icholder_entry(_ic_call->destination())) {
    // When patching for the ICStub case the cached value isn't
    // overwritten until the ICStub copied into the CompiledIC during
    // the next safepoint.  Make sure that the CompiledICHolder* is
    // marked for release at this point since it won't be identifiable
    // once the entry point is overwritten.
    InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data());
  }

  if (TraceCompiledIC) {
    tty->print("  ");
    print_compiled_ic();
    tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
    if (!is_optimized()) {
      tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
    }
    if (is_icstub) {
      tty->print(" (icstub)");
    }
    tty->cr();
  }

  {
    MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
    CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
    assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
     _ic_call->set_destination_mt_safe(entry_point);
  }

  if (is_optimized() || is_icstub) {
    // Optimized call sites don't have a cache value and ICStub call
    // sites only change the entry point.  Changing the value in that
    // case could lead to MT safety issues.
    assert(cache == NULL, "must be null");
    return;
  }

  if (cache == NULL)  cache = (void*)Universe::non_oop_word();

  _value->set_data((intptr_t)cache);
}
inline static bool checkByteBuffer(address pc, address* stub) {
  // BugId 4454115: A read from a MappedByteBuffer can fault
  // here if the underlying file has been truncated.
  // Do not crash the VM in such a case.
  CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
  nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
  if (nm != NULL && nm->has_unsafe_access()) {
    *stub = StubRoutines::handler_for_unsafe_access();
    return true;
  }
  return false;
}
示例#8
0
bool vframeStream::fill_from_frame() {  
  // Interpreted frame
  if (_frame.is_interpreted_frame()) {
    fill_from_interpreter_frame();
    return true;
  }

  // Compiled frame  

#ifndef CORE
  CodeBlob* code = CodeCache::find_blob(_frame.pc());
  if (code != NULL && code->is_nmethod()) {
    nmethod* nm = (nmethod*)code;
    if (nm->is_native_method()) {
      // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
      fill_from_compiled_native_frame(nm);
    } else {    
      bool at_call = _reg_map.is_pc_at_call(_frame.id());
      PcDesc* pc_desc = nm->pc_desc_at(_frame.pc(), at_call);
#ifdef ASSERT
      if (pc_desc == NULL) {
        tty->print_cr("Error in fill_from_frame: pc_desc for " INTPTR_FORMAT " not found", _frame.pc());
        nm->print();
        nm->method()->print_codes();
        nm->print_code();
        nm->print_pcs();
      }
#endif
      assert(pc_desc != NULL, "scopeDesc must exist");
      fill_from_compiled_frame(nm, pc_desc->scope_decode_offset());
    }
    return true;
  }
#endif

  // End of stack?
  if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
    _mode = at_end_mode;
    return true;
  }    

  // Deoptimized frame
#ifndef CORE
  if (_frame.is_deoptimized_frame()) {
    fill_from_deoptimized_frame(_thread->vframe_array_for(&_frame), vframeArray::first_index());
    return true;
  }
#endif

  return false;
}
void CompiledStaticCall::set_to_clean() {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
  // Reset call site
  MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
  CodeBlob* cb = CodeCache::find_blob_unsafe(this);
  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
  set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());

  // Do not reset stub here:  It is too expensive to call find_stub.
  // Instead, rely on caller (nmethod::clear_inline_caches) to clear
  // both the call and its stub.
}
示例#10
0
void CompiledIC::set_ic_destination(address entry_point) {
  assert(entry_point != NULL, "must set legal entry point");
  assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  if (TraceCompiledIC) {
    tty->print("  ");
    print_compiled_ic();
    tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
  }
  MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
  CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
  assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
#endif
  _ic_call->set_destination_mt_safe(entry_point);
}
示例#11
0
void pd_ps(frame f) {
  intptr_t* sp = f.sp();
  intptr_t* prev_sp = sp - 1;
  intptr_t *pc = NULL;
  intptr_t *next_pc = NULL;
  int count = 0;
  tty->print("register window backtrace from %#x:\n", sp);
  while (sp != NULL && ((intptr_t)sp & 7) == 0 && sp > prev_sp && sp < prev_sp+1000) {
    pc      = next_pc;
    next_pc = (intptr_t*) sp[I7->sp_offset_in_saved_window()];
    tty->print("[%d] sp=%#x pc=", count, sp);
    findpc((intptr_t)pc);
    if (WizardMode && Verbose) {
      // print register window contents also
      tty->print_cr("    L0..L7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
                    sp[0+0],sp[0+1],sp[0+2],sp[0+3],
                    sp[0+4],sp[0+5],sp[0+6],sp[0+7]);
      tty->print_cr("    I0..I7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
                    sp[8+0],sp[8+1],sp[8+2],sp[8+3],
                    sp[8+4],sp[8+5],sp[8+6],sp[8+7]);
      // (and print stack frame contents too??)

      CodeBlob *b = CodeCache::find_blob((address) pc);
      if (b != NULL) {
        if (b->is_nmethod()) {
          methodOop m = ((nmethod*)b)->method();
          int nlocals = m->max_locals();
          int nparams  = m->size_of_parameters();
          tty->print_cr("compiled java method (locals = %d, params = %d)", nlocals, nparams);
        }
      }
    }
    prev_sp = sp;
    sp = (intptr_t *)sp[FP->sp_offset_in_saved_window()];
    sp = (intptr_t *)((intptr_t)sp + STACK_BIAS);
    count += 1;
  }
  if (sp != NULL)
    tty->print("[%d] sp=%#x [bogus sp!]", count, sp);
}
bool CompiledIC::is_call_to_compiled() const {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");

  // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
  // method is guaranteed to still exist, since we only remove methods after all inline caches
  // has been cleaned up
  CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
  bool is_monomorphic = (cb != NULL && cb->is_nmethod());
  // Check that the cached_value is a klass for non-optimized monomorphic calls
  // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
  // for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
  // For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
  // virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
#ifdef ASSERT
  CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
  bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
  assert( is_c1_or_jvmci_method ||
         !is_monomorphic ||
         is_optimized() ||
         !caller->is_alive() ||
         (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
  return is_monomorphic;
}
示例#13
0
文件: debug.cpp 项目: LeLiKa/openjdk
static void find(intptr_t x, bool print_pc) {
  address addr = (address)x;

  CodeBlob* b = CodeCache::find_blob_unsafe(addr);
  if (b != NULL) {
    if (b->is_buffer_blob()) {
      // the interpreter is generated into a buffer blob
      InterpreterCodelet* i = Interpreter::codelet_containing(addr);
      if (i != NULL) {
        i->print();
        return;
      }
      if (Interpreter::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing into interpreter code (not bytecode specific)", addr);
        return;
      }
      //
      if (AdapterHandlerLibrary::contains(b)) {
        AdapterHandlerLibrary::print_handler(b);
      }
      // the stubroutines are generated into a buffer blob
      StubCodeDesc* d = StubCodeDesc::desc_for(addr);
      if (d != NULL) {
        d->print();
        if (print_pc) tty->cr();
        return;
      }
      if (StubRoutines::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", addr);
        return;
      }
      // the InlineCacheBuffer is using stubs generated into a buffer blob
      if (InlineCacheBuffer::contains(addr)) {
        tty->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
        return;
      }
      VtableStub* v = VtableStubs::stub_containing(addr);
      if (v != NULL) {
        v->print();
        return;
      }
    }
    if (print_pc && b->is_nmethod()) {
      ResourceMark rm;
      tty->print("%#p: Compiled ", addr);
      ((nmethod*)b)->method()->print_value_on(tty);
      tty->print("  = (CodeBlob*)" INTPTR_FORMAT, b);
      tty->cr();
      return;
    }
    if ( b->is_nmethod()) {
      if (b->is_zombie()) {
        tty->print_cr(INTPTR_FORMAT " is zombie nmethod", b);
      } else if (b->is_not_entrant()) {
        tty->print_cr(INTPTR_FORMAT " is non-entrant nmethod", b);
      }
    }
    b->print();
    return;
  }

  if (Universe::heap()->is_in(addr)) {
    HeapWord* p = Universe::heap()->block_start(addr);
    bool print = false;
    // If we couldn't find it it just may mean that heap wasn't parseable
    // See if we were just given an oop directly
    if (p != NULL && Universe::heap()->block_is_obj(p)) {
      print = true;
    } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
      p = (HeapWord*) addr;
      print = true;
    }
    if (print) {
      oop(p)->print();
      if (p != (HeapWord*)x && oop(p)->is_constMethod() &&
          constMethodOop(p)->contains(addr)) {
        Thread *thread = Thread::current();
        HandleMark hm(thread);
        methodHandle mh (thread, constMethodOop(p)->method());
        if (!mh->is_native()) {
          tty->print_cr("bci_from(%p) = %d; print_codes():",
                        addr, mh->bci_from(address(x)));
          mh->print_codes();
        }
      }
      return;
    }
  } else if (Universe::heap()->is_in_reserved(addr)) {
    tty->print_cr(INTPTR_FORMAT " is an unallocated location in the heap", addr);
    return;
  }

  if (JNIHandles::is_global_handle((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
    return;
  }
  if (JNIHandles::is_weak_global_handle((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
    return;
  }
  if (JNIHandleBlock::any_contains((jobject) addr)) {
    tty->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
    return;
  }

  for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
    // Check for privilege stack
    if (thread->privileged_stack_top() != NULL && thread->privileged_stack_top()->contains(addr)) {
      tty->print_cr(INTPTR_FORMAT " is pointing into the privilege stack for thread: " INTPTR_FORMAT, addr, thread);
      return;
    }
    // If the addr is a java thread print information about that.
    if (addr == (address)thread) {
       thread->print();
       return;
    }
  }

  // Try an OS specific find
  if (os::find(addr)) {
    return;
  }

  if (print_pc) {
    tty->print_cr(INTPTR_FORMAT ": probably in C++ code; check debugger", addr);
    Disassembler::decode(same_page(addr-40,addr),same_page(addr+40,addr));
    return;
  }

  tty->print_cr(INTPTR_FORMAT " is pointing to unknown location", addr);
}
示例#14
0
void pd_ps(frame f) {
  intptr_t* sp = f.sp();
  intptr_t* prev_sp = sp - 1;
  intptr_t *pc = NULL;
  intptr_t *next_pc = NULL;
  int count = 0;
  tty->print("register window backtrace from %#x:\n", sp);
  while (sp != NULL && ((intptr_t)sp & 7) == 0 && sp > prev_sp && sp < prev_sp+1000) {
    pc      = next_pc;
    next_pc = (intptr_t*) sp[I7->sp_offset_in_saved_window()];
    tty->print("[%d] sp=%#x pc=", count, sp);
    findpc((intptr_t)pc);
    if (WizardMode && Verbose) {
      // print register window contents also
      tty->print_cr("    L0..L7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
		    sp[0+0],sp[0+1],sp[0+2],sp[0+3],
		    sp[0+4],sp[0+5],sp[0+6],sp[0+7]);
      tty->print_cr("    I0..I7: {%#x %#x %#x %#x %#x %#x %#x %#x}",
		    sp[8+0],sp[8+1],sp[8+2],sp[8+3],
		    sp[8+4],sp[8+5],sp[8+6],sp[8+7]);
      // (and print stack frame contents too??)

#ifndef CORE
      CodeBlob *b = CodeCache::find_blob((address) pc);
      if (b != NULL) {
	if (b->is_nmethod()) {
#ifdef COMPILER1
	  methodOop m = ((nmethod*)b)->method();
	  int nlocals = m->max_locals();
	  int nparams  = m->size_of_parameters();
	  tty->print_cr("compiled java method (locals = %d, params = %d)", nlocals, nparams);
	  
	  jint *fp = (jint *)sp[FP->sp_offset_in_saved_window()];

	  // print params
	  tty->print_cr("params:");
	  for (int p=nparams-1; p>=0; p--) {
	    tty->print_cr("  %8x:[fp+%3d]: %#x",fp+23+p,23+p,*(fp+23+p));
	  }

	  // print locals
	  tty->print_cr("locals:",nlocals);
	  for (int l=0; l<nlocals; l++) {	    
	    tty->print_cr("  %8x:[fp-%3d]: %#x",fp-(l+1),l+1,*(fp-(l+1)));
	  }
	  // print oops???
	  // print monitors???
	  // print spills???
#endif	  
	} else if (b->is_java_method()) {
	  tty->print_cr("interpreted java method");
	} else if (b->is_native_method()) {
	  tty->print_cr("native method");
	} else if (b->is_osr_method()) {
	  tty->print_cr("osr method");
	}	
      }
#endif // NOT CORE
    }
    prev_sp = sp;
    sp = (intptr_t *)sp[FP->sp_offset_in_saved_window()];
    sp = (intptr_t *)((intptr_t)sp + STACK_BIAS);
    count += 1;
  }
  if (sp != NULL)
    tty->print("[%d] sp=%#x [bogus sp!]", count, sp);
}
示例#15
0
nmethod* CodeCache::find_nmethod(void* start) {
  CodeBlob *cb = find_blob(start);
  assert(cb == NULL || cb->is_nmethod(), "did not find an nmethod");
  return (nmethod*)cb;
}
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
                        siginfo_t* info,
                        void* ucVoid,
                        int abort_if_unrecognized) {
  ucontext_t* uc = (ucontext_t*) ucVoid;

  Thread* t = ThreadLocalStorage::get_thread_slow();

  // Must do this before SignalHandlerMark, if crash protection installed we will longjmp away
  // (no destructors can be run)
  os::WatcherThreadCrashProtection::check_crash_protection(sig, t);

  SignalHandlerMark shm(t);

  // Note: it's not uncommon that JNI code uses signal/sigset to install
  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
  // or have a SIGILL handler when detecting CPU type). When that happens,
  // JVM_handle_linux_signal() might be invoked with junk info/ucVoid. To
  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
  // that do not require siginfo/ucontext first.

  if (sig == SIGPIPE || sig == SIGXFSZ) {
    // allow chained handler to go first
    if (os::Linux::chained_handler(sig, info, ucVoid)) {
      return true;
    } else {
      if (PrintMiscellaneous && (WizardMode || Verbose)) {
        char buf[64];
        warning("Ignoring %s - see bugs 4229104 or 646499219",
                os::exception_name(sig, buf, sizeof(buf)));
      }
      return true;
    }
  }

  JavaThread* thread = NULL;
  VMThread* vmthread = NULL;
  if (os::Linux::signal_handlers_are_installed) {
    if (t != NULL ){
      if(t->is_Java_thread()) {
        thread = (JavaThread*)t;
      }
      else if(t->is_VM_thread()){
        vmthread = (VMThread *)t;
      }
    }
  }
/*
  NOTE: does not seem to work on linux.
  if (info == NULL || info->si_code <= 0 || info->si_code == SI_NOINFO) {
    // can't decode this kind of signal
    info = NULL;
  } else {
    assert(sig == info->si_signo, "bad siginfo");
  }
*/
  // decide if this trap can be handled by a stub
  address stub = NULL;

  address pc          = NULL;

  //%note os_trap_1
  if (info != NULL && uc != NULL && thread != NULL) {
    pc = (address) os::Linux::ucontext_get_pc(uc);

#ifdef BUILTIN_SIM
    if (pc == (address) Fetch32PFI) {
       uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
       return 1 ;
    }
    if (pc == (address) FetchNPFI) {
       uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
       return 1 ;
    }
#else
    if (StubRoutines::is_safefetch_fault(pc)) {
      uc->uc_mcontext.pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
      return 1;
    }
#endif

#ifndef AMD64
    // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
    // This can happen in any running code (currently more frequently in
    // interpreter code but has been seen in compiled code)
    if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) {
      fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due "
            "to unstable signal handling in this distribution.");
    }
#endif // AMD64

    // Handle ALL stack overflow variations here
    if (sig == SIGSEGV) {
      address addr = (address) info->si_addr;

      // check if fault address is within thread stack
      if (addr < thread->stack_base() &&
          addr >= thread->stack_base() - thread->stack_size()) {
        // stack overflow
        if (thread->in_stack_yellow_zone(addr)) {
          thread->disable_stack_yellow_zone();
          if (thread->thread_state() == _thread_in_Java) {
            // Throw a stack overflow exception.  Guard pages will be reenabled
            // while unwinding the stack.
            stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
          } else {
            // Thread was in the vm or native code.  Return and try to finish.
            return 1;
          }
        } else if (thread->in_stack_red_zone(addr)) {
          // Fatal red zone violation.  Disable the guard pages and fall through
          // to handle_unexpected_exception way down below.
          thread->disable_stack_red_zone();
          tty->print_raw_cr("An irrecoverable stack overflow has occurred.");

          // This is a likely cause, but hard to verify. Let's just print
          // it as a hint.
          tty->print_raw_cr("Please check if any of your loaded .so files has "
                            "enabled executable stack (see man page execstack(8))");
        } else {
          // Accessing stack address below sp may cause SEGV if current
          // thread has MAP_GROWSDOWN stack. This should only happen when
          // current thread was created by user code with MAP_GROWSDOWN flag
          // and then attached to VM. See notes in os_linux.cpp.
          if (thread->osthread()->expanding_stack() == 0) {
             thread->osthread()->set_expanding_stack();
             if (os::Linux::manually_expand_stack(thread, addr)) {
               thread->osthread()->clear_expanding_stack();
               return 1;
             }
             thread->osthread()->clear_expanding_stack();
          } else {
             fatal("recursive segv. expanding stack.");
          }
        }
      }
    }

    if (thread->thread_state() == _thread_in_Java) {
      // Java thread running in Java code => find exception handler if any
      // a fault inside compiled code, the interpreter, or a stub

      // Handle signal from NativeJump::patch_verified_entry().
      if ((sig == SIGILL || sig == SIGTRAP)
          && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant()) {
        if (TraceTraps) {
          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
        }
        stub = SharedRuntime::get_handle_wrong_method_stub();
      } else if (sig == SIGSEGV && os::is_poll_address((address)info->si_addr)) {
        stub = SharedRuntime::get_poll_stub(pc);
      } else if (sig == SIGBUS /* && info->si_code == BUS_OBJERR */) {
        // BugId 4454115: A read from a MappedByteBuffer can fault
        // here if the underlying file has been truncated.
        // Do not crash the VM in such a case.
        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
        nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
        if (nm != NULL && nm->has_unsafe_access()) {
          stub = handle_unsafe_access(thread, pc);
        }
      }
      else

      if (sig == SIGFPE  &&
          (info->si_code == FPE_INTDIV || info->si_code == FPE_FLTDIV)) {
        stub =
          SharedRuntime::
          continuation_for_implicit_exception(thread,
                                              pc,
                                              SharedRuntime::
                                              IMPLICIT_DIVIDE_BY_ZERO);
      } else if (sig == SIGSEGV &&
               !MacroAssembler::needs_explicit_null_check((intptr_t)info->si_addr)) {
          // Determination of interpreter/vtable stub/compiled code null exception
          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
      }
    } else if (thread->thread_state() == _thread_in_vm &&
               sig == SIGBUS && /* info->si_code == BUS_OBJERR && */
               thread->doing_unsafe_access()) {
        stub = handle_unsafe_access(thread, pc);
    }

    // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC kicks in
    // and the heap gets shrunk before the field access.
    if ((sig == SIGSEGV) || (sig == SIGBUS)) {
      address addr = JNI_FastGetField::find_slowcase_pc(pc);
      if (addr != (address)-1) {
        stub = addr;
      }
    }

    // Check to see if we caught the safepoint code in the
    // process of write protecting the memory serialization page.
    // It write enables the page immediately after protecting it
    // so we can just return to retry the write.
    if ((sig == SIGSEGV) &&
        os::is_memory_serialize_page(thread, (address) info->si_addr)) {
      // Block current thread until the memory serialize page permission restored.
      os::block_on_serialize_page_trap();
      return true;
    }
  }

  if (stub != NULL) {
    // save all thread context in case we need to restore it
    if (thread != NULL) thread->set_saved_exception_pc(pc);

#ifdef BUILTIN_SIM
    uc->uc_mcontext.gregs[REG_PC] = (greg_t)stub;
#else
    uc->uc_mcontext.pc = (__u64)stub;
#endif
    return true;
  }

  // signal-chaining
  if (os::Linux::chained_handler(sig, info, ucVoid)) {
     return true;
  }

  if (!abort_if_unrecognized) {
    // caller wants another chance, so give it to him
    return false;
  }

  if (pc == NULL && uc != NULL) {
    pc = os::Linux::ucontext_get_pc(uc);
  }

  // unmask current signal
  sigset_t newset;
  sigemptyset(&newset);
  sigaddset(&newset, sig);
  sigprocmask(SIG_UNBLOCK, &newset, NULL);

  VMError err(t, sig, pc, info, ucVoid);
  err.report_and_die();

  ShouldNotReachHere();
}
void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
  // Updating a cache to the wrong entry can cause bugs that are very hard
  // to track down - if cache entry gets invalid - we just clean it. In
  // this way it is always the same code path that is responsible for
  // updating and resolving an inline cache
  //
  // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
  // callsites. In addition ic_miss code will update a site to monomorphic if it determines
  // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
  //
  // In both of these cases the only thing being modifed is the jump/call target and these
  // transitions are mt_safe

  Thread *thread = Thread::current();
  if (info.to_interpreter()) {
    // Call to interpreter
    if (info.is_optimized() && is_optimized()) {
       assert(is_clean(), "unsafe IC path");
       MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
      // the call analysis (callee structure) specifies that the call is optimized
      // (either because of CHA or the static target is final)
      // At code generation time, this call has been emitted as static call
      // Call via stub
      assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
      CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
      methodHandle method (thread, (Method*)info.cached_metadata());
      csc->set_to_interpreted(method, info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
           p2i(instruction_address()),
           method->print_value_string());
      }
    } else {
      // Call via method-klass-holder
      InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
      if (TraceICs) {
         ResourceMark rm(thread);
         tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
      }
    }
  } else {
    // Call to compiled code
    bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT
    CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
    assert (cb->is_nmethod(), "must be compiled!");
#endif /* ASSERT */

    // This is MT safe if we come from a clean-cache and go through a
    // non-verified entry point
    bool safe = SafepointSynchronize::is_at_safepoint() ||
                (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));

    if (!safe) {
      InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
    } else {
      if (is_optimized()) {
      set_ic_destination(info.entry());
      } else {
        set_ic_destination_and_value(info.entry(), info.cached_metadata());
      }
    }

    if (TraceICs) {
      ResourceMark rm(thread);
      assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
      tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
        p2i(instruction_address()),
        ((Klass*)info.cached_metadata())->print_value_string(),
        (safe) ? "" : "via stub");
    }
  }
  // We can't check this anymore. With lazy deopt we could have already
  // cleaned this IC entry before we even return. This is possible if
  // we ran out of space in the inline cache buffer trying to do the
  // set_next and we safepointed to free up space. This is a benign
  // race because the IC entry was complete when we safepointed so
  // cleaning it immediately is harmless.
  // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
}