GenericFunctionPointer interp_find_native(Method_Handle method) { hythread_suspend_enable(); GenericFunctionPointer f = classloader_find_native((Method_Handle) method); hythread_suspend_disable(); return f; }
/** * Initializes Java monitor. * * Monitor is a recursive lock with one conditional variable associated with it. * Implementation may use the knowledge of internal object layout in order to allocate lock * and conditional variable in the most efficient manner. * * @param[in] monitor object where monitor needs to be initialized. */ IDATA VMCALL jthread_monitor_init(jobject monitor) { assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_create(lockword); hythread_suspend_enable(); return status; } // jthread_monitor_init
void interp_throw_exception(const char* exc_name, const char* exc_message) { M2N_ALLOC_MACRO; assert(!hythread_is_suspend_enabled()); hythread_suspend_enable(); assert(hythread_is_suspend_enabled()); jthrowable exc_object = exn_create(exc_name, exc_message); exn_raise_object(exc_object); hythread_suspend_disable(); M2N_FREE_MACRO; }
/** * Returns true if specified thread holds the lock associated with the given monitor. * * @param[in] thread thread which may hold the lock * @param[in] monitor object those monitor is possibly locked * @return true if thread holds the lock, false otherwise; */ jboolean VMCALL jthread_holds_lock(jthread thread, jobject monitor) { jthread lock_owner; IDATA status = jthread_get_lock_owner(monitor, &lock_owner); assert(status == TM_ERROR_NONE); hythread_suspend_disable(); jboolean result = vm_objects_are_equal(thread, lock_owner); hythread_suspend_enable(); return result; } // jthread_holds_lock
/** * Completely releases the ownership over monitor. * * @param[in] monitor monitor */ IDATA VMCALL jthread_monitor_release(jobject monitor) { assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_release(lockword); assert(status == TM_ERROR_NONE); hythread_suspend_enable(); return TM_ERROR_NONE; } // jthread_monitor_release
int start_proc(void *args) { hythread_thin_monitor_t *lock_p = (hythread_thin_monitor_t*)((void**)args)[0]; hythread_thin_monitor_t *monitor_p = (hythread_thin_monitor_t*)((void**)args)[1]; IDATA *ret = (IDATA*)&(((void**)args)[2]); IDATA status; // wait to start hythread_suspend_disable(); status = hythread_thin_monitor_enter(monitor_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } // notify main thread about thread start status = hythread_thin_monitor_enter(lock_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } started_thread_count++; status = hythread_thin_monitor_notify(lock_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } status = hythread_thin_monitor_exit(lock_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } // fall to infinite wait status = hythread_thin_monitor_wait(monitor_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } (*ret)++; status = hythread_thin_monitor_exit(monitor_p); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); tf_assert_same(status, TM_ERROR_NONE); } hythread_suspend_enable(); return 0; }
/** * Attempt to gain the ownership over monitor without blocking. * * @param[in] monitor object where monitor is located */ IDATA VMCALL jthread_monitor_try_enter(jobject monitor) { assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_try_enter(lockword); hythread_suspend_enable(); if (status == TM_ERROR_NONE && ti_is_enabled()) { jthread_add_owned_monitor(monitor); } return status; } // jthread_monitor_try_enter
void select_force_gc() { vm_gc_lock_enum(); if (gc_algorithm < 10) { force_gc(); } else if ((gc_algorithm / 10) == 2) { full_gc(0); } else if ((gc_algorithm / 10) == 3) { heap.old_objects.prev_pos = heap.old_objects.pos; copy_gc(0); } vm_gc_unlock_enum(); hythread_suspend_disable(); vm_hint_finalize(); hythread_suspend_enable(); }
Class* interp_resolve_class_new(Class *clazz, int classId) { assert(!hythread_is_suspend_enabled()); Compilation_Handle handle; handle.env = VM_Global_State::loader_env; handle.jit = 0; hythread_suspend_enable(); Class *objClass = resolve_class_new((Compile_Handle*)&handle, clazz, classId); hythread_suspend_disable(); if (!objClass) { if (!exn_raised()) class_throw_linking_error_for_interpreter(clazz, classId, OPCODE_NEW); } return objClass; }
Method *interp_resolve_special_method(Class *clazz, int methodId) { assert(!hythread_is_suspend_enabled()); Compilation_Handle handle; handle.env = VM_Global_State::loader_env; handle.jit = 0; hythread_suspend_enable(); Method *method = resolve_special_method( (Compile_Handle*)&handle, clazz, methodId); hythread_suspend_disable(); if (!method) { if (!exn_raised()) class_throw_linking_error_for_interpreter(clazz, methodId, OPCODE_INVOKESPECIAL); } return method; }
void jthread_set_tm_data(jobject thread, void *val) { static unsigned offset = (unsigned)-1; hythread_suspend_disable(); ManagedObject *thread_obj = ((ObjectHandle) thread)->object; if (offset == -1) { Class *clazz = thread_obj->vt()->clss; Field *field = class_lookup_field_recursive(clazz, "vm_thread", "J"); offset = field->get_offset(); } U_8* java_ref = (U_8*)thread_obj; *(jlong*)(java_ref + offset) = (jlong) (POINTER_SIZE_INT) val; hythread_suspend_enable(); } // jthread_set_tm_data
Field* interp_resolve_static_field(Class *clazz, int fieldId, bool putfield) { assert(!hythread_is_suspend_enabled()); Compilation_Handle handle; handle.env = VM_Global_State::loader_env; handle.jit = 0; hythread_suspend_enable(); Field *field = resolve_static_field((Compile_Handle*)&handle, clazz, fieldId, putfield); hythread_suspend_disable(); if (!field) { if (!exn_raised()) class_throw_linking_error_for_interpreter(clazz, fieldId, putfield?OPCODE_PUTSTATIC:OPCODE_GETSTATIC); } return field; }
/** * Returns the list of all Java threads. * * @param[out] threads resulting threads list * @param[out] count_ptr number of threads in the resulting list */ IDATA VMCALL jthread_get_all_threads(jthread ** threads, jint * count_ptr) { assert(threads); assert(count_ptr); hythread_group_t java_thread_group = get_java_thread_group(); assert(java_thread_group); hythread_iterator_t iterator = hythread_iterator_create(java_thread_group); IDATA count = hythread_iterator_size(iterator); IDATA java_thread_count = 0; for (IDATA i = 0; i < count; i++) { hythread_t native_thread = hythread_iterator_next(&iterator); vm_thread_t vm_thread = jthread_get_vm_thread(native_thread); if (vm_thread && vm_thread->java_thread) { java_thread_count++; } } jthread *java_threads = (jthread*)malloc(sizeof(jthread) * java_thread_count); if (!java_threads) { hythread_iterator_release(&iterator); return TM_ERROR_OUT_OF_MEMORY; } hythread_iterator_reset(&iterator); java_thread_count = 0; for (IDATA i = 0; i < count; i++) { hythread_t native_thread = hythread_iterator_next(&iterator); vm_thread_t vm_thread = jthread_get_vm_thread(native_thread); if (vm_thread && vm_thread->java_thread) { hythread_suspend_disable(); ObjectHandle thr = oh_allocate_local_handle(); assert(thr); thr->object = vm_thread->java_thread->object; assert(thr->object); hythread_suspend_enable(); java_threads[java_thread_count++] = thr; } } *threads = java_threads; *count_ptr = (jint)java_thread_count; IDATA status = hythread_iterator_release(&iterator); return status; } // jthread_get_all_threads
/** * Releases the ownership over monitor. * * @param[in] monitor monitor * @sa JNI::MonitorExit() */ IDATA VMCALL jthread_monitor_exit(jobject monitor) { assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_exit(lockword); hythread_suspend_enable(); if (status == TM_ERROR_NONE && ti_is_enabled()) { jthread_remove_owned_monitor(monitor); } if (status == TM_ERROR_ILLEGAL_STATE) { jthread_throw_exception("java/lang/IllegalMonitorStateException", "Illegal monitor state"); } return status; } // jthread_monitor_exit
void* jthread_get_tm_data(jobject thread) { static int offset = -1; hythread_suspend_disable(); ManagedObject *thread_obj = ((ObjectHandle) thread)->object; if (offset == -1) { Class *clazz = thread_obj->vt()->clss; Field *field = class_lookup_field_recursive(clazz, "vm_thread", "J"); offset = field->get_offset(); } U_8* java_ref = (U_8*)thread_obj; void** val = (void**)(java_ref + offset); hythread_suspend_enable(); return *val; } // jthread_get_tm_data
IDATA jthread_throw_exception_object(jobject object) { if (interpreter_enabled()) { // FIXME - Function set_current_thread_exception does the same // actions as exn_raise_object, and it should be replaced. hythread_suspend_disable(); set_current_thread_exception(object->object); hythread_suspend_enable(); } else { if (is_unwindable()) { exn_throw_object(object); } else { ASSERT_RAISE_AREA; exn_raise_object(object); } } return 0; }
/** * Runs java.lang.Thread.detach() method. */ static jint run_java_detach(jthread java_thread) { assert(hythread_is_suspend_enabled()); JNIEnv *jni_env = jthread_get_JNI_env(java_thread); Global_Env *vm_env = jni_get_vm_env(jni_env); Class *thread_class = vm_env->java_lang_Thread_Class; static Method *detach = NULL; if (detach == NULL) { const char *method_name = "detach"; const char *descriptor = "(Ljava/lang/Throwable;)V"; detach = class_lookup_method(thread_class, method_name, descriptor); if (detach == NULL) { TRACE("Failed to find thread's detach method " << descriptor << " , exception = " << exn_get()); return TM_ERROR_INTERNAL; } } // Initialize arguments. jvalue args[2]; args[0].l = java_thread; if (vm_env->IsVmShutdowning()) { args[1].l = NULL; } else { args[1].l = exn_get(); } exn_clear(); hythread_suspend_disable(); vm_execute_java_method_array((jmethodID) detach, 0, args); hythread_suspend_enable(); if (exn_raised()) { TRACE ("java.lang.Thread.detach(Throwable) method completed with an exception: " << exn_get_name()); return TM_ERROR_INTERNAL; } return TM_ERROR_NONE; }
/** * Returns the number of times given thread have entered given monitor; * * If the given monitor is not owned by this thread, 0 is returned. * * @param[in] monitor monitor those owner needs to be determined * @param[in] owner thread which owns the monitor */ IDATA VMCALL jthread_get_lock_recursion(jobject monitor, jthread owner) { assert(monitor); hythread_t given_thread = owner ? jthread_get_native_thread(owner) : NULL; hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); hythread_t lock_owner = hythread_thin_monitor_get_owner(lockword); IDATA recursion = 0; if (lock_owner && (!given_thread || hythread_get_id(lock_owner) == hythread_get_id(given_thread))) { recursion = hythread_thin_monitor_get_recursion(lockword); } hythread_suspend_enable(); return recursion; } // jthread_get_lock_recursion
jvmtiError interpreter_ti_getObject( jvmtiEnv* env, VM_thread *thread, jint depth, jint slot, jobject* value_ptr) { StackFrame *frame; // check error condition: JVMTI_ERROR_NULL_POINTER if( value_ptr == NULL ) return JVMTI_ERROR_NULL_POINTER; // check error condition: JVMTI_ERROR_NO_MORE_FRAMES // check error condition: JVMTI_ERROR_OPAQUE_FRAME // check error condition: JVMTI_ERROR_INVALID_SLOT jvmtiError err = interpreter_ti_getLocalCommon(env, thread, depth, slot, &frame); if (err != JVMTI_ERROR_NONE) return err; // TODO: check error condition: JVMTI_ERROR_TYPE_MISMATCH // partial check error condition: JVMTI_ERROR_TYPE_MISMATCH if (frame->locals.ref(slot) == 0) { return JVMTI_ERROR_TYPE_MISMATCH; } assert(hythread_is_suspend_enabled()); hythread_suspend_disable(); ManagedObject *obj = UNCOMPRESS_INTERP(frame->locals(slot).ref); if (NULL == obj) { *value_ptr = NULL; } else { ObjectHandle handle = oh_allocate_local_handle(); handle->object = obj; *value_ptr = (jobject) handle; } hythread_suspend_enable(); return JVMTI_ERROR_NONE; }
/** * Returns the owner of the lock associated with the given monitor. * * If the given monitor is not owned by any thread, NULL is returned. * * @param[in] monitor monitor those owner needs to be determined * @param[out] lock_owner thread which owns the monitor */ IDATA VMCALL jthread_get_lock_owner(jobject monitor, jthread * lock_owner) { assert(monitor); assert(lock_owner); *lock_owner = NULL; IDATA status = TM_ERROR_NONE; hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); hythread_t native_thread = hythread_thin_monitor_get_owner(lockword); if (native_thread) { vm_thread_t vm_thread = jthread_get_vm_thread(native_thread); if (vm_thread) { *lock_owner = vm_thread->java_thread; } else { status = TM_ERROR_ILLEGAL_STATE; } } hythread_suspend_enable(); return status; } // jthread_get_lock_owner
// Callback function for NCAI breakpoint processing bool ncai_process_breakpoint_event(TIEnv *env, const VMBreakPoint* bp, const POINTER_SIZE_INT data) { TRACE2("ncai.break", "BREAKPOINT occured, location = " << bp->addr); VM_thread* vm_thread = p_TLS_vmthread; if (!vm_thread) return false; jvmti_thread_t jvmti_thread = &vm_thread->jvmti_thread; // This check works for current thread only if (jvmti_thread->flag_ncai_handler) // Recursion return true; jvmti_thread->flag_ncai_handler = true; NCAIEnv* ncai_env = env->ncai_env; void* addr = (void*)bp->addr; bool suspend_enabled = hythread_is_suspend_enabled(); if (!suspend_enabled) hythread_suspend_enable(); hythread_t hythread = hythread_self(); ncaiThread thread = reinterpret_cast<ncaiThread>(hythread); ncaiBreakpoint func = (ncaiBreakpoint)ncai_env->get_event_callback(NCAI_EVENT_BREAKPOINT); if (NULL != func) { if (ncai_env->global_events[NCAI_EVENT_BREAKPOINT - NCAI_MIN_EVENT_TYPE_VAL]) { TRACE2("ncai.break", "Calling global breakpoint callback, address = " << addr); func((ncaiEnv*)ncai_env, thread, addr); TRACE2("ncai.break", "Finished global breakpoint callback, address = " << addr); } else { ncaiEventThread* next_et; ncaiEventThread* first_et = ncai_env->event_threads[NCAI_EVENT_BREAKPOINT - NCAI_MIN_EVENT_TYPE_VAL]; for (ncaiEventThread* et = first_et; NULL != et; et = next_et) { next_et = et->next; if (et->thread == thread) { TRACE2("ncai.break", "Calling local breakpoint callback, address = " << addr); func((ncaiEnv*)ncai_env, thread, addr); TRACE2("ncai.break", "Finished local breakpoint callback, address = " << addr); } et = next_et; } } } if (!suspend_enabled) hythread_suspend_disable(); jvmti_thread->flag_ncai_handler = false; return true; }
int test_hythread_thread_suspend(void){ void **args; hythread_t thread = NULL; hythread_thin_monitor_t lock; hythread_thin_monitor_t monitor; IDATA status; int i; // create monitors status = hythread_thin_monitor_create(&lock); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_create(&monitor); tf_assert_same(status, TM_ERROR_NONE); // alloc and set thread start procedure args args = (void**)calloc(3, sizeof(void*)); args[0] = &lock; args[1] = &monitor; args[2] = 0; // create thread hythread_suspend_disable(); status = hythread_thin_monitor_enter(&lock); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); status = hythread_create(&thread, 0, 0, 0, (hythread_entrypoint_t)start_proc, args); tf_assert_same(status, TM_ERROR_NONE); // waiting start of tested thread hythread_suspend_disable(); status = hythread_thin_monitor_wait(&lock); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_exit(&lock); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); // suspend tested thread status = hythread_suspend_other(thread); tf_assert_same(status, TM_ERROR_NONE); // notify tested thread hythread_suspend_disable(); status = hythread_thin_monitor_enter(&monitor); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_notify_all(&monitor); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_exit(&monitor); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); // check tested argument for(i = 0; i < 1000; i++) { tf_assert_same(args[2], 0); hythread_sleep(1); } // resume thread hythread_resume(thread); test_thread_join(thread, 1); tf_assert_same((IDATA)args[2], 1); return 0; }
int test_hythread_thread_suspend_all(void) { void **args; hythread_t thread_list[THREAD_COUNT]; hythread_thin_monitor_t lock; hythread_thin_monitor_t monitor; IDATA status; int i; // create monitors status = hythread_thin_monitor_create(&monitor); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_create(&lock); tf_assert_same(status, TM_ERROR_NONE); // alloc and set thread start procedure args args = (void**)calloc(3, sizeof(void*)); args[0] = &lock; args[1] = &monitor; args[2] = 0; // create threads hythread_suspend_disable(); status = hythread_thin_monitor_enter(&lock); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); started_thread_count = 0; for(i = 0; i < THREAD_COUNT; i++) { thread_list[i] = NULL; status = hythread_create(&thread_list[i], 0, 0, 0, (hythread_entrypoint_t)start_proc, args); tf_assert_same(status, TM_ERROR_NONE); log_info("%d thread is started", i + 1); } // waiting start of tested thread hythread_suspend_disable(); while (started_thread_count < 10) { status = hythread_thin_monitor_wait(&lock); tf_assert_same(status, TM_ERROR_NONE); } status = hythread_thin_monitor_exit(&lock); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); // suspend tested thread status = hythread_suspend_all(NULL, ((HyThread_public*)hythread_self())->group); tf_assert_same(status, TM_ERROR_NONE); log_info("all threads are suspended"); // notify tested threads hythread_suspend_disable(); status = hythread_thin_monitor_enter(&monitor); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_notify_all(&monitor); tf_assert_same(status, TM_ERROR_NONE); status = hythread_thin_monitor_exit(&monitor); tf_assert_same(status, TM_ERROR_NONE); hythread_suspend_enable(); log_info("notify all suspended threads"); // check tested argument for(i = 0; i < 1000; i++) { tf_assert_same(args[2], 0); hythread_sleep(1); } // resume thread status = hythread_resume_all(((HyThread_public*)hythread_self())->group); tf_assert_same(status, TM_ERROR_NONE); log_info("resume all suspended threads"); for(i = 0; i < THREAD_COUNT; i++) { test_thread_join(thread_list[i], i); log_info("%d thread is terminated", i + 1); } tf_assert_same((IDATA)args[2], THREAD_COUNT); return 0; }
Class* interp_class_get_array_of_class(Class *objClass) { hythread_suspend_enable(); Class *clazz = class_get_array_of_class(objClass); hythread_suspend_disable(); return clazz; }
/** * Wait on the <code>object</code>'s monitor with the specified timeout. * * This function instructs the current thread to be scheduled off * the processor and wait on the monitor until the following occurs: * <UL> * <LI>another thread invokes <code>thread_notify(object)</code> * and VM chooses this thread to wake up; * <LI>another thread invokes <code>thread_notifyAll(object);</code> * <LI>another thread invokes <code>thread_interrupt(thread);</code> * <LI>real time elapsed from the waiting begin is * greater or equal the timeout specified. * </UL> * * @param[in] monitor object where monitor is located * @param[in] millis time to wait (in milliseconds) * @param[in] nanos time to wait (in nanoseconds) * @sa java.lang.Object.wait() */ IDATA VMCALL jthread_monitor_timed_wait(jobject monitor, jlong millis, jint nanos) { assert(monitor); hythread_suspend_disable(); hythread_t native_thread = hythread_self(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); if (!hythread_is_fat_lock(*lockword)) { if (!hythread_owns_thin_lock(native_thread, *lockword)) { CTRACE(("ILLEGAL_STATE wait %x\n", lockword)); hythread_suspend_enable(); return TM_ERROR_ILLEGAL_STATE; } hythread_inflate_lock(lockword); } apr_time_t wait_begin; if (ti_is_enabled()) { int disable_count = hythread_reset_suspend_disable(); jthread_set_wait_monitor(monitor); jthread_set_owned_monitor(monitor); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAIT)) { jvmti_send_wait_monitor_event(monitor, (jlong) millis); } if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1); } hythread_set_suspend_disable(disable_count); // should be moved to event handler wait_begin = apr_time_now(); jthread_remove_owned_monitor(monitor); } hythread_thread_lock(native_thread); IDATA state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_RUNNABLE; state |= TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT; if ((millis > 0) || (nanos > 0)) { state |= TM_THREAD_STATE_WAITING_WITH_TIMEOUT; } else { state |= TM_THREAD_STATE_WAITING_INDEFINITELY; } IDATA status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); status = hythread_thin_monitor_wait_interruptable(lockword, millis, nanos); hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); if ((millis > 0) || (nanos > 0)) { state &= ~TM_THREAD_STATE_WAITING_WITH_TIMEOUT; } else { state &= ~TM_THREAD_STATE_WAITING_INDEFINITELY; } state &= ~(TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT); state |= TM_THREAD_STATE_RUNNABLE; hythread_set_state(native_thread, state); hythread_thread_unlock(native_thread); hythread_suspend_enable(); if (ti_is_enabled()) { jthread_add_owned_monitor(monitor); int disable_count = hythread_reset_suspend_disable(); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0); } if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAITED)) { jvmti_send_waited_monitor_event(monitor, ((status == APR_TIMEUP) ? (jboolean) 1 : (jboolean) 0)); } hythread_set_suspend_disable(disable_count); // should be moved to event handler jvmti_thread_t jvmti_thread = jthread_get_jvmti_thread(hythread_self()); jvmti_thread->waited_time += apr_time_now() - wait_begin; } return status; } // jthread_monitor_timed_wait
static void report_loaded_unloaded_module(ncaiModule module, bool loaded) { DebugUtilsTI *ti = VM_Global_State::loader_env->TI; hythread_t hythread = hythread_self(); ncaiThread thread = reinterpret_cast<ncaiThread>(hythread); bool suspend_enabled = hythread_is_suspend_enabled(); if (!suspend_enabled) hythread_suspend_enable(); TIEnv *ti_env = ti->getEnvironments(); TIEnv *next_ti_env; const char* trace_text = loaded ? "ModuleLoad" : "ModuleUnload"; while (NULL != ti_env) { next_ti_env = ti_env->next; NCAIEnv* env = ti_env->ncai_env; if (NULL == env) { ti_env = next_ti_env; continue; } ncaiModuleLoad func_l = (ncaiModuleLoad)env->get_event_callback(NCAI_EVENT_MODULE_LOAD); ncaiModuleLoad func_u = (ncaiModuleUnload)env->get_event_callback(NCAI_EVENT_MODULE_UNLOAD); ncaiModule env_module = NULL; ncaiModLU func = loaded ? (ncaiModLU)func_l : (ncaiModLU)func_u; ncaiEventKind event = loaded ? NCAI_EVENT_MODULE_LOAD : NCAI_EVENT_MODULE_UNLOAD; if (NULL != func) { if (env->global_events[event - NCAI_MIN_EVENT_TYPE_VAL]) { TRACE2("ncai.modules", "Calling global " << trace_text << " callback for module " << module->info->name); find_init_module_record(env, module, &env_module); func((ncaiEnv*)env, thread, env_module); TRACE2("ncai.modules", "Finished global " << trace_text << " callback for module " << module->info->name); ti_env = next_ti_env; continue; } ncaiEventThread* next_et; ncaiEventThread* first_et = env->event_threads[event - NCAI_MIN_EVENT_TYPE_VAL]; for (ncaiEventThread* et = first_et; NULL != et; et = next_et) { next_et = et->next; if (et->thread == thread) { TRACE2("ncai.modules", "Calling local " << trace_text << " callback for module " << module->info->name); find_init_module_record(env, module, &env_module); func((ncaiEnv*)env, thread, env_module); TRACE2("ncai.modules", "Finished local " << trace_text << " callback for module " << module->info->name); } et = next_et; } } ti_env = next_ti_env; } if (!suspend_enabled) hythread_suspend_disable(); }
/** * Gains the ownership over monitor. * * Current thread blocks if the specified monitor is owned by other thread. * * @param[in] monitor object where monitor is located * @sa JNI::MonitorEnter() */ IDATA VMCALL jthread_monitor_enter(jobject monitor) { IDATA state; hythread_t native_thread; apr_time_t enter_begin; assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #ifdef LOCK_RESERVATION // busy unreserve lock before blocking and inflating while (TM_ERROR_NONE != hythread_unreserve_lock(lockword)) { hythread_yield(); hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); } status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #endif //LOCK_RESERVATION native_thread = hythread_self(); hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_RUNNABLE; state |= TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); // should be moved to event handler if (ti_is_enabled()) { enter_begin = apr_time_now(); int disable_count = hythread_reset_suspend_disable(); jthread_set_owned_monitor(monitor); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1); } hythread_set_suspend_disable(disable_count); } // busy wait and inflate // reload pointer after safepoints lockword = vm_object_get_lockword_addr(monitor); while ((status = hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY) { hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); if (hythread_is_fat_lock(*lockword)) { status = hythread_thin_monitor_enter(lockword); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); assert(0); return status; } goto contended_entered; } hythread_yield(); } assert(status == TM_ERROR_NONE); if (!hythread_is_fat_lock(*lockword)) { hythread_inflate_lock(lockword); } // do all ti staff here contended_entered: if (ti_is_enabled()) { int disable_count = hythread_reset_suspend_disable(); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0); } hythread_set_suspend_disable(disable_count); // should be moved to event handler jvmti_thread_t jvmti_thread = jthread_get_jvmti_thread(hythread_self()); jvmti_thread->blocked_time += apr_time_now() - enter_begin; } hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; state |= TM_THREAD_STATE_RUNNABLE; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); entered: if (ti_is_enabled()) { jthread_add_owned_monitor(monitor); } hythread_suspend_enable(); return TM_ERROR_NONE; } // jthread_monitor_enter