void VM_EnterInterpOnlyMode::doit() { // Set up the current stack depth for later tracking _state->invalidate_cur_stack_depth(); _state->enter_interp_only_mode(); JavaThread *thread = _state->get_thread(); if (thread->has_last_Java_frame()) { // If running in fullspeed mode, single stepping is implemented // as follows: first, the interpreter does not dispatch to // compiled code for threads that have single stepping enabled; // second, we deoptimize all methods on the thread's stack when // interpreted-only mode is enabled the first time for a given // thread (nothing to do if no Java frames yet). int num_marked = 0; ResourceMark resMark; RegisterMap rm(thread, false); for (vframe* vf = thread->last_java_vframe(&rm); vf; vf = vf->sender()) { if (can_be_deoptimized(vf)) { ((compiledVFrame*) vf)->code()->mark_for_deoptimization(); ++num_marked; } } if (num_marked > 0) { VM_Deoptimize op; VMThread::execute(&op); } } }
void InterfaceSupport::verify_last_frame() { JavaThread* thread = JavaThread::current(); ResourceMark rm(thread); RegisterMap reg_map(thread); frame fr = thread->last_frame(); fr.verify(®_map); }
ThreadSnapshot::ThreadSnapshot(JavaThread* thread) { _thread = thread; _threadObj = thread->threadObj(); _stack_trace = NULL; _concurrent_locks = NULL; _next = NULL; ThreadStatistics* stat = thread->get_thread_stat(); _contended_enter_ticks = stat->contended_enter_ticks(); _contended_enter_count = stat->contended_enter_count(); _monitor_wait_ticks = stat->monitor_wait_ticks(); _monitor_wait_count = stat->monitor_wait_count(); _sleep_ticks = stat->sleep_ticks(); _sleep_count = stat->sleep_count(); _blocker_object = NULL; _blocker_object_owner = NULL; _thread_status = java_lang_Thread::get_thread_status(_threadObj); _is_ext_suspended = thread->is_being_ext_suspended(); _is_in_native = (thread->thread_state() == _thread_in_native); if (_thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER || _thread_status == java_lang_Thread::IN_OBJECT_WAIT || _thread_status == java_lang_Thread::IN_OBJECT_WAIT_TIMED) { Handle obj = ThreadService::get_current_contended_monitor(thread); if (obj() == NULL) { // monitor no longer exists; thread is not blocked _thread_status = java_lang_Thread::RUNNABLE; } else { _blocker_object = obj(); JavaThread* owner = ObjectSynchronizer::get_lock_owner(obj, false); if ((owner == NULL && _thread_status == java_lang_Thread::BLOCKED_ON_MONITOR_ENTER) || (owner != NULL && owner->is_attaching_via_jni())) { // ownership information of the monitor is not available // (may no longer be owned or releasing to some other thread) // make this thread in RUNNABLE state. // And when the owner thread is in attaching state, the java thread // is not completely initialized. For example thread name and id // and may not be set, so hide the attaching thread. _thread_status = java_lang_Thread::RUNNABLE; _blocker_object = NULL; } else if (owner != NULL) { _blocker_object_owner = owner->threadObj(); } } } // Support for JSR-166 locks if (JDK_Version::current().supports_thread_park_blocker() && (_thread_status == java_lang_Thread::PARKED || _thread_status == java_lang_Thread::PARKED_TIMED)) { _blocker_object = thread->current_park_blocker(); if (_blocker_object != NULL && _blocker_object->is_a(SystemDictionary::abstract_ownable_synchronizer_klass())) { _blocker_object_owner = java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(_blocker_object); } } }
void ThreadLocalAllocBuffer::resize_all_tlabs() { if (ResizeTLAB) { for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) { thread->tlab().resize(); } } }
// Create an exception then throws it. extern "C" void j3ThrowExceptionFromJIT() { JavaObject *exc = 0; llvm_gcroot(exc, 0); JavaThread *th = JavaThread::get(); JavaMethod* meth = th->getCallingMethodLevel(0); exc = th->getJVM()->CreateUnsatisfiedLinkError(meth); j3ThrowException(exc); }
void InterfaceSupport::walk_stack() { JavaThread* thread = JavaThread::current(); walk_stack_counter++; if (!thread->has_last_Java_frame()) return; ResourceMark rm(thread); RegisterMap reg_map(thread); walk_stack_from(thread->last_java_vframe(®_map)); }
static bool caller_is_deopted() { JavaThread* thread = JavaThread::current(); RegisterMap reg_map(thread, false); frame runtime_frame = thread->last_frame(); frame caller_frame = runtime_frame.sender(®_map); assert(caller_frame.is_compiled_frame(), "must be compiled"); return caller_frame.is_deoptimized_frame(); }
// For Forte Analyzer AsyncGetCallTrace profiling support - thread is // currently interrupted by SIGPROF bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) { assert(Thread::current() == this, "caller must be current thread"); assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if // isInJava == true. It should be more reliable than CONTEXT info. if (jt->has_last_Java_frame()) { *fr_addr = jt->pd_last_frame(); return true; } // At this point, we don't have a last_Java_frame, so // we try to glean some information out of the CONTEXT // if we were running Java code when SIGPROF came in. if (isInJava) { CONTEXT* uc = (CONTEXT*)ucontext; #ifdef AMD64 intptr_t* ret_fp = (intptr_t*) uc->Rbp; intptr_t* ret_sp = (intptr_t*) uc->Rsp; ExtendedPC addr = ExtendedPC((address)uc->Rip); #else intptr_t* ret_fp = (intptr_t*) uc->Ebp; intptr_t* ret_sp = (intptr_t*) uc->Esp; ExtendedPC addr = ExtendedPC((address)uc->Eip); #endif // AMD64 if (addr.pc() == NULL || ret_sp == NULL ) { // CONTEXT wasn't useful return false; } frame ret_frame(ret_sp, ret_fp, addr.pc()); if (!ret_frame.safe_for_sender(jt)) { #ifdef COMPILER2 // C2 uses ebp as a general register see if NULL fp helps frame ret_frame2(ret_sp, NULL, addr.pc()); if (!ret_frame2.safe_for_sender(jt)) { // nothing else to try if the frame isn't good return false; } ret_frame = ret_frame2; #else // nothing else to try if the frame isn't good return false; #endif /* COMPILER2 */ } *fr_addr = ret_frame; return true; } // nothing else to try return false; }
jobject JNIHandles::make_local(JNIEnv* env, oop obj) { if (obj == NULL) { return NULL; // ignore null handles } else { JavaThread* thread = JavaThread::thread_from_jni_environment(env); assert(Universe::heap()->is_in_reserved(obj), "sanity check"); return thread->active_handles()->allocate_handle(obj); } }
// Does not call Java code. Can not yield a GC. extern "C" void j3EndJNI(uint32** oldLRN) { JavaThread* th = JavaThread::get(); // We're going back to Java th->endJNI(); // Update the number of references. th->currentAddedReferences = *oldLRN; }
bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) { assert(this->is_Java_thread(), "must be JavaThread"); JavaThread* jt = (JavaThread *)this; // If we have a last_Java_frame, then we should use it even if // isInJava == true. It should be more reliable than ucontext info. if (jt->has_last_Java_frame() && jt->frame_anchor()->walkable()) { *fr_addr = jt->pd_last_frame(); return true; } // At this point, we don't have a last_Java_frame, so // we try to glean some information out of the ucontext // if we were running Java code when SIGPROF came in. if (isInJava) { ucontext_t* uc = (ucontext_t*) ucontext; intptr_t* ret_fp; intptr_t* ret_sp; ExtendedPC addr = os::Bsd::fetch_frame_from_ucontext(this, uc, &ret_sp, &ret_fp); if (addr.pc() == NULL || ret_sp == NULL ) { // ucontext wasn't useful return false; } #if INCLUDE_CDS if (UseSharedSpaces && MetaspaceShared::is_in_shared_region(addr.pc(), MetaspaceShared::md)) { // In the middle of a trampoline call. Bail out for safety. // This happens rarely so shouldn't affect profiling. return false; } #endif frame ret_frame(ret_sp, ret_fp, addr.pc()); if (!ret_frame.safe_for_sender(jt)) { #if defined(COMPILER2) || INCLUDE_JVMCI // C2 and JVMCI use ebp as a general register see if NULL fp helps frame ret_frame2(ret_sp, NULL, addr.pc()); if (!ret_frame2.safe_for_sender(jt)) { // nothing else to try if the frame isn't good return false; } ret_frame = ret_frame2; #else // nothing else to try if the frame isn't good return false; #endif /* COMPILER2 || INCLUDE_JVMCI */ } *fr_addr = ret_frame; return true; } // nothing else to try return false; }
void JavaCalls::call_helper(JavaValue* result, methodHandle* m, JavaCallArguments* args, TRAPS) { methodHandle method = *m; JavaThread* thread = (JavaThread*)THREAD; assert(thread->is_Java_thread(), "must be called by a java thread"); assert(method.not_null(), "must have a method to call"); assert(!SafepointSynchronize::is_at_safepoint(), "call to Java code during VM operation"); assert(!thread->handle_area()->no_handle_mark_active(), "cannot call out to Java here"); CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
void DirtyCardQueueSet::abandon_logs() { assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); clear(); // Since abandon is done only at safepoints, we can safely manipulate // these queues. for (JavaThread* t = Threads::first(); t; t = t->next()) { t->dirty_card_queue().reset(); } shared_dirty_card_queue()->reset(); }
void VM_ThreadStop::doit() { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); JavaThread* target = java_lang_Thread::thread(target_thread()); // Note that this now allows multiple ThreadDeath exceptions to be // thrown at a thread. if (target != NULL) { // the thread has run and is not already in the process of exiting target->send_thread_stop(throwable()); } }
// Flush registers to stack. In case of error we will need to stack walk. address bootstrap_flush_windows(void) { Thread* thread = ThreadLocalStorage::get_thread_slow(); // Very early in process there is no thread. if (thread != NULL) { guarantee(thread->is_Java_thread(), "Not a Java thread."); JavaThread* jt = (JavaThread*)thread; guarantee(!jt->has_last_Java_frame(), "Must be able to flush registers!"); } return (address)_flush_reg_windows(); };
// Stress deoptimization static void deopt_caller() { if ( !caller_is_deopted()) { JavaThread* thread = JavaThread::current(); RegisterMap reg_map(thread, false); frame runtime_frame = thread->last_frame(); frame caller_frame = runtime_frame.sender(®_map); Deoptimization::deoptimize_frame(thread, caller_frame.id()); assert(caller_is_deopted(), "Must be deoptimized"); } }
void VM_ThreadStop::doit() { assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); JavaThread* target = java_lang_Thread::thread(target_thread()); // Note that this now allows multiple ThreadDeath exceptions to be // thrown at a thread. if (target != NULL) { // the thread has run and is not already in the process of exiting printf("%s[%d] [tid: %lu]: 试图终止线程[%s]...\n", __FILE__, __LINE__, pthread_self(), target->name()); target->send_thread_stop(throwable()); } }
extern "C" void psf() { // print stack frames { Command c("psf"); JavaThread* p = JavaThread::active(); tty->print(" for thread: "); p->print(); tty->cr(); if (p->has_last_Java_frame()) { p->trace_frames(); } } }
void CppInterpreter::remove_vmslots(int first_slot, int num_slots, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; ZeroStack *stack = thread->zero_stack(); intptr_t *vmslots = stack->sp(); // Move everything down for (int i = first_slot - 1; i >= 0; i--) SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i), i + num_slots); // Deallocate the space stack->set_sp(stack->sp() + num_slots); }
void G1SATBCardTableModRefBS::enqueue(oop pre_val) { assert(pre_val->is_oop_or_null(true), "Error"); if (!JavaThread::satb_mark_queue_set().is_active()) return; Thread* thr = Thread::current(); if (thr->is_Java_thread()) { JavaThread* jt = (JavaThread*)thr; jt->satb_mark_queue().enqueue(pre_val); } else { MutexLocker x(Shared_SATB_Q_lock); JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); } }
void DirtyCardQueueSet::iterate_closure_all_threads(bool consume, size_t worker_i) { assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); for(JavaThread* t = Threads::first(); t; t = t->next()) { bool b = t->dirty_card_queue().apply_closure(_closure, consume); guarantee(b, "Should not be interrupted."); } bool b = shared_dirty_card_queue()->apply_closure(_closure, consume, worker_i); guarantee(b, "Should not be interrupted."); }
// walk all 'known' threads at NMT sync point, and collect their recorders void SyncThreadRecorderClosure::do_thread(Thread* thread) { assert(SafepointSynchronize::is_at_safepoint(), "Safepoint required"); if (thread->is_Java_thread()) { JavaThread* javaThread = (JavaThread*)thread; MemRecorder* recorder = javaThread->get_recorder(); if (recorder != NULL) { MemTracker::enqueue_pending_recorder(recorder); javaThread->set_recorder(NULL); } } _thread_count ++; }
extern "C" JavaObject* nativeFirstNonNullClassLoader() { JavaObject* res = 0; llvm_gcroot(res, 0); BEGIN_NATIVE_EXCEPTION(0) JavaThread* th = JavaThread::get(); res = th->getNonNullClassLoader(); END_NATIVE_EXCEPTION return res; }
int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; // Allocate and initialize our frame. InterpreterFrame *frame = InterpreterFrame::build(method, CHECK_0); thread->push_zero_frame(frame); // Execute those bytecodes! main_loop(0, THREAD); // No deoptimized frames on the stack return 0; }
extern "C" JavaObject* Java_gnu_classpath_VMStackWalker_firstNonNullClassLoader__() { JavaObject* res = 0; llvm_gcroot(res, 0); BEGIN_NATIVE_EXCEPTION(0) JavaThread* th = JavaThread::get(); res = th->getNonNullClassLoader(); END_NATIVE_EXCEPTION return res; }
// The new slots will be inserted before slot insert_before. // Slots < insert_before will have the same slot number after the insert. // Slots >= insert_before will become old_slot + num_slots. void CppInterpreter::insert_vmslots(int insert_before, int num_slots, TRAPS) { JavaThread *thread = (JavaThread *) THREAD; ZeroStack *stack = thread->zero_stack(); // Allocate the space stack->overflow_check(num_slots, CHECK); stack->alloc(num_slots * wordSize); intptr_t *vmslots = stack->sp(); // Shuffle everything up for (int i = 0; i < insert_before; i++) SET_VMSLOTS_SLOT(VMSLOTS_SLOT(i + num_slots), i); }
void IdealGraphPrinter::clean_up() { JavaThread *p; for (p = Threads::first(); p; p = p->next()) { if (p->is_Compiler_thread()) { CompilerThread *c = (CompilerThread *)p; IdealGraphPrinter *printer = c->ideal_graph_printer(); if (printer) { delete printer; } c->set_ideal_graph_printer(NULL); } } }
void JavaCalls::call_helper(JavaValue* result, const methodHandle& method, JavaCallArguments* args, TRAPS) { // During dumping, Java execution environment is not fully initialized. Also, Java execution // may cause undesirable side-effects in the class metadata. assert(!DumpSharedSpaces, "must not execute Java bytecodes when dumping"); JavaThread* thread = (JavaThread*)THREAD; assert(thread->is_Java_thread(), "must be called by a java thread"); assert(method.not_null(), "must have a method to call"); assert(!SafepointSynchronize::is_at_safepoint(), "call to Java code during VM operation"); assert(!thread->handle_area()->no_handle_mark_active(), "cannot call out to Java here"); CHECK_UNHANDLED_OOPS_ONLY(thread->clear_unhandled_oops();)
ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) { _r = r; _pp = NULL; assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds"); Thread* tp = Thread::current(); if (tp != NULL && tp->is_Java_thread()) { JavaThread* jtp = (JavaThread*) tp; ThreadProfiler* pp = jtp->get_thread_profiler(); _pp = pp; if (pp != NULL) { pp->region_flag[r] = true; } } }
static address handle_unsafe_access() { JavaThread* thread = JavaThread::current(); address pc = thread->saved_exception_pc(); // pc is the instruction which we must emulate // doing a no-op is fine: return garbage from the load // therefore, compute npc address npc = Assembler::locate_next_instruction(pc); // request an async exception thread->set_pending_unsafe_access_error(); // return address of next instruction to execute return npc; }