static void jthread_remove_owned_monitor(jobject monitor) { vm_thread_t vm_thread = jthread_self_vm_thread(); assert(vm_thread); jvmti_thread_t jvmti_thread = &vm_thread->jvmti_thread; if (!jvmti_thread) { // nothing to do return; } CTRACE(("TM: remove owned monitor: %x", monitor)); for (int i = jvmti_thread->owned_monitors_nmb - 1; i >= 0; i--) { if (vm_objects_are_equal(jvmti_thread->owned_monitors[i], monitor)) { int disable_status = hythread_reset_suspend_disable(); vm_thread->jni_env->DeleteGlobalRef(jvmti_thread->owned_monitors[i]); hythread_set_suspend_disable(disable_status); int j; for (j = i; j < jvmti_thread->owned_monitors_nmb - 1; j++) { jvmti_thread->owned_monitors[j] = jvmti_thread->owned_monitors[j + 1]; } jvmti_thread->owned_monitors[j] = NULL; jvmti_thread->owned_monitors_nmb--; return; } } } // jthread_remove_owned_monitor
// for the case pure stop the world static void gc_partial_con_PSTW( GC *gc) { int64 time_collection_start = time_now(); INFO2("gc.space.stat","Stop-the-world collection = "<<gc->num_collections<<""); INFO2("gc.con.info", "from last check point =" << (unsigned int)(time_collection_start -get_last_check_point()) ); // stop the world enumeration gc->num_collections++; int disable_count = hythread_reset_suspend_disable(); gc_set_rootset_type(ROOTSET_IS_REF); gc_prepare_rootset(gc); if(gc->cause != GC_CAUSE_RUNTIME_FORCE_GC ) { unsigned int new_obj_size = gc_get_mutator_new_obj_size(gc); Con_Collection_Statistics * con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc); con_collection_stat->heap_utilization_rate = (float)(con_collection_stat->surviving_size_at_gc_end + new_obj_size)/gc->committed_heap_size; } //reclaim heap gc_reset_mutator_context(gc); if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); gc_ms_reclaim_heap((GC_MS*)gc); //update live size gc_PSTW_update_stat_after_marking(gc); // reset the collection and resume mutators gc_reset_after_con_collection(gc); set_con_nil(gc); // concurrent scheduling will continue after mutators are resumed vm_resume_threads_after(); assert(hythread_is_suspend_enabled()); hythread_set_suspend_disable(disable_count); }
/** * Locks thin monitor. * * @param[in] lockword_ptr monitor addr */ IDATA hythread_thin_monitor_enter(hythread_thin_monitor_t *lockword_ptr) { hythread_monitor_t fat_monitor; IDATA status; int saved_disable_count; assert(lockword_ptr); if (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_NONE) { return TM_ERROR_NONE; } while (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_EBUSY) { if (IS_FAT_LOCK(*lockword_ptr)) { fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(*lockword_ptr)); // find fat_monitor in lock table CTRACE((" lock %d\n", FAT_LOCK_ID(*lockword_ptr))); saved_disable_count = hythread_reset_suspend_disable(); status = hythread_monitor_enter(fat_monitor); hythread_set_suspend_disable(saved_disable_count); return status; // lock fat_monitor } //hythread_safe_point(); hythread_yield(); } if (IS_FAT_LOCK(*lockword_ptr)) { // lock already inflated return TM_ERROR_NONE; } CTRACE(("inflate_contended thin_lcok%d\n", ++inflate_contended)); fat_monitor = hythread_inflate_lock(lockword_ptr); if (fat_monitor == NULL) { return TM_ERROR_OUT_OF_MEMORY; } return TM_ERROR_NONE; }
static void jthread_set_wait_monitor(jobject monitor) { vm_thread_t vm_thread = jthread_self_vm_thread(); assert(vm_thread); jvmti_thread_t jvmti_thread = &vm_thread->jvmti_thread; if (!jvmti_thread) { // nothing to do return; } CTRACE(("TM: set wait monitor: %x", monitor)); int disable_count = hythread_reset_suspend_disable(); jvmti_thread->wait_monitor = vm_thread->jni_env->NewGlobalRef(monitor); hythread_set_suspend_disable(disable_count); } // jthread_set_wait_monitor
static void jthread_add_owned_monitor(jobject monitor) { vm_thread_t vm_thread = jthread_self_vm_thread(); assert(vm_thread); jvmti_thread_t jvmti_thread = &vm_thread->jvmti_thread; if (!jvmti_thread) { // nothing to do return; } CTRACE(("TM: add owned monitor: %x", monitor)); int disable_status = hythread_reset_suspend_disable(); if (jvmti_thread->contended_monitor) { vm_thread->jni_env-> DeleteGlobalRef(jvmti_thread->contended_monitor); jvmti_thread->contended_monitor = NULL; } if (jvmti_thread->wait_monitor) { vm_thread->jni_env->DeleteGlobalRef(jvmti_thread->wait_monitor); jvmti_thread->wait_monitor = NULL; } if (jvmti_thread->owned_monitors_nmb >= jvmti_thread->owned_monitors_size) { int new_size = jvmti_thread->owned_monitors_size * 2; CTRACE(("Increasing owned_monitors_size to: %d", new_size)); jobject* new_monitors = (jobject*)apr_palloc(vm_thread->pool, new_size * sizeof(jobject)); assert(new_monitors); memcpy(new_monitors, jvmti_thread->owned_monitors, jvmti_thread->owned_monitors_size * sizeof(jobject)); jvmti_thread->owned_monitors = new_monitors; jvmti_thread->owned_monitors_size = new_size; } jvmti_thread->owned_monitors[jvmti_thread->owned_monitors_nmb] = vm_thread->jni_env->NewGlobalRef(monitor); jvmti_thread->owned_monitors_nmb++; hythread_set_suspend_disable(disable_status); } // jthread_add_owned_monitor
static unsigned int gc_con_heap_full_mostly_con( GC *gc ) { while( gc->gc_concurrent_status == GC_CON_START_MARKERS ) { // we should enumerate rootset after old rootset is traced vm_thread_yield(); } int64 final_start = time_now(); int disable_count = hythread_reset_suspend_disable(); gc_set_rootset_type(ROOTSET_IS_OBJ); gc_prepare_rootset(gc); gc_set_barrier_function(WB_REM_NIL); //in stw phase, so we can remove write barrier at any time terminate_mostly_con_mark(); // terminate current mostly concurrent marking //in the stop the world phase (only conclctors is running at the moment), so the spin lock will not lose more performance while(gc->gc_concurrent_status == GC_CON_TRACING) { vm_thread_yield(); //let the unfinished marker run } //final marking phase gc_clear_conclctor_role(gc); wspace_mostly_con_final_mark(gc); /*just debugging*/ int64 final_time = time_now() - final_start; INFO2("gc.scheduler", "[MOSTLY_CON] final marking time=" << final_time << " us"); gc_ms_get_current_heap_usage((GC_MS *)gc); // start STW reclaiming heap gc_con_update_stat_heap_exhausted(gc); // calculate util rate gc_reset_mutator_context(gc); if(!IGNORE_FINREF ) gc_set_obj_with_fin(gc); gc_ms_reclaim_heap((GC_MS*)gc); // reset after partial stop the world collection gc_reset_after_con_collection(gc); set_con_nil(gc); vm_resume_threads_after(); hythread_set_suspend_disable(disable_count); return GC_PARTIAL_PMSS; }
/** * Waits on a conditional, handling interruptions and thread state. */ IDATA condvar_wait_impl(hycond_t *cond, osmutex_t *mutex, I_64 ms, IDATA nano, IDATA interruptable) { int r; int disable_count; hythread_t self; self = tm_self_tls; // check interrupted flag if (interruptable && self->interrupted) { // clean interrupted flag IDATA status = hythread_clear_interrupted_other(self); assert(status == TM_ERROR_INTERRUPT); return TM_ERROR_INTERRUPT; } // Store provided cond into current thread cond self->current_condition = interruptable ? cond : NULL; disable_count = hythread_reset_suspend_disable(); r = os_cond_timedwait(cond, mutex, ms, nano); hythread_set_suspend_disable(disable_count); self->current_condition = NULL; // check interrupted flag if (interruptable && self->interrupted) { // clean interrupted flag IDATA status = hythread_clear_interrupted_other(self); assert(status == TM_ERROR_INTERRUPT); return TM_ERROR_INTERRUPT; } return r; }
IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; int saved_recursion; //int saved_disable_count; hythread_t self = tm_self_tls; if (mon_ptr->owner != self) { return TM_ERROR_ILLEGAL_STATE; } saved_recursion = mon_ptr->recursion_count; assert(saved_recursion>=0); mon_ptr->owner = NULL; mon_ptr->recursion_count =0; mon_ptr->wait_count++; port_mutex_lock(&self->mutex); self->state |= TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = mon_ptr; port_mutex_unlock(&self->mutex); do { apr_time_t start; assert(mon_ptr->notify_count >= 0); assert(mon_ptr->notify_count < mon_ptr->wait_count); start = apr_time_now(); status = condvar_wait_impl(&mon_ptr->condition, &mon_ptr->mutex, ms, nano, interruptable); if (status != TM_ERROR_NONE || mon_ptr->notify_count) { break; } // we should not change ms and nano if both are 0 (meaning "no timeout") if (ms || nano) { apr_interval_time_t elapsed; elapsed = apr_time_now() - start; // microseconds nano -= (IDATA)((elapsed % 1000) * 1000); if (nano < 0) { ms -= elapsed/1000 + 1; nano += 1000000; } else { ms -= elapsed/1000; } if (ms < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nano && nano < 1000000); } } while (1); // consume the notify_count unless we got an error (or were interrupted) if (mon_ptr->notify_count > 0 && (status == TM_ERROR_NONE || mon_ptr->notify_count == mon_ptr->wait_count)) { mon_ptr->notify_count--; } port_mutex_lock(&self->mutex); self->state &= ~TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = NULL; port_mutex_unlock(&self->mutex); mon_ptr->wait_count--; assert(mon_ptr->notify_count <= mon_ptr->wait_count); if (self->request) { int save_count; port_mutex_unlock(&mon_ptr->mutex); hythread_safe_point(); hythread_exception_safe_point(); save_count = hythread_reset_suspend_disable(); port_mutex_lock(&mon_ptr->mutex); hythread_set_suspend_disable(save_count); } mon_ptr->recursion_count = saved_recursion; mon_ptr->owner = self; assert(mon_ptr->owner); return status; }
/** * Gains the ownership over monitor. * * Current thread blocks if the specified monitor is owned by other thread. * * @param[in] monitor object where monitor is located * @sa JNI::MonitorEnter() */ IDATA VMCALL jthread_monitor_enter(jobject monitor) { IDATA state; hythread_t native_thread; apr_time_t enter_begin; assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #ifdef LOCK_RESERVATION // busy unreserve lock before blocking and inflating while (TM_ERROR_NONE != hythread_unreserve_lock(lockword)) { hythread_yield(); hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); } status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #endif //LOCK_RESERVATION native_thread = hythread_self(); hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_RUNNABLE; state |= TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); // should be moved to event handler if (ti_is_enabled()) { enter_begin = apr_time_now(); int disable_count = hythread_reset_suspend_disable(); jthread_set_owned_monitor(monitor); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1); } hythread_set_suspend_disable(disable_count); } // busy wait and inflate // reload pointer after safepoints lockword = vm_object_get_lockword_addr(monitor); while ((status = hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY) { hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); if (hythread_is_fat_lock(*lockword)) { status = hythread_thin_monitor_enter(lockword); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); assert(0); return status; } goto contended_entered; } hythread_yield(); } assert(status == TM_ERROR_NONE); if (!hythread_is_fat_lock(*lockword)) { hythread_inflate_lock(lockword); } // do all ti staff here contended_entered: if (ti_is_enabled()) { int disable_count = hythread_reset_suspend_disable(); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0); } hythread_set_suspend_disable(disable_count); // should be moved to event handler jvmti_thread_t jvmti_thread = jthread_get_jvmti_thread(hythread_self()); jvmti_thread->blocked_time += apr_time_now() - enter_begin; } hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; state |= TM_THREAD_STATE_RUNNABLE; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); entered: if (ti_is_enabled()) { jthread_add_owned_monitor(monitor); } hythread_suspend_enable(); return TM_ERROR_NONE; } // jthread_monitor_enter
/** * Wait on the <code>object</code>'s monitor with the specified timeout. * * This function instructs the current thread to be scheduled off * the processor and wait on the monitor until the following occurs: * <UL> * <LI>another thread invokes <code>thread_notify(object)</code> * and VM chooses this thread to wake up; * <LI>another thread invokes <code>thread_notifyAll(object);</code> * <LI>another thread invokes <code>thread_interrupt(thread);</code> * <LI>real time elapsed from the waiting begin is * greater or equal the timeout specified. * </UL> * * @param[in] monitor object where monitor is located * @param[in] millis time to wait (in milliseconds) * @param[in] nanos time to wait (in nanoseconds) * @sa java.lang.Object.wait() */ IDATA VMCALL jthread_monitor_timed_wait(jobject monitor, jlong millis, jint nanos) { assert(monitor); hythread_suspend_disable(); hythread_t native_thread = hythread_self(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); if (!hythread_is_fat_lock(*lockword)) { if (!hythread_owns_thin_lock(native_thread, *lockword)) { CTRACE(("ILLEGAL_STATE wait %x\n", lockword)); hythread_suspend_enable(); return TM_ERROR_ILLEGAL_STATE; } hythread_inflate_lock(lockword); } apr_time_t wait_begin; if (ti_is_enabled()) { int disable_count = hythread_reset_suspend_disable(); jthread_set_wait_monitor(monitor); jthread_set_owned_monitor(monitor); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAIT)) { jvmti_send_wait_monitor_event(monitor, (jlong) millis); } if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1); } hythread_set_suspend_disable(disable_count); // should be moved to event handler wait_begin = apr_time_now(); jthread_remove_owned_monitor(monitor); } hythread_thread_lock(native_thread); IDATA state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_RUNNABLE; state |= TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT; if ((millis > 0) || (nanos > 0)) { state |= TM_THREAD_STATE_WAITING_WITH_TIMEOUT; } else { state |= TM_THREAD_STATE_WAITING_INDEFINITELY; } IDATA status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); status = hythread_thin_monitor_wait_interruptable(lockword, millis, nanos); hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); if ((millis > 0) || (nanos > 0)) { state &= ~TM_THREAD_STATE_WAITING_WITH_TIMEOUT; } else { state &= ~TM_THREAD_STATE_WAITING_INDEFINITELY; } state &= ~(TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT); state |= TM_THREAD_STATE_RUNNABLE; hythread_set_state(native_thread, state); hythread_thread_unlock(native_thread); hythread_suspend_enable(); if (ti_is_enabled()) { jthread_add_owned_monitor(monitor); int disable_count = hythread_reset_suspend_disable(); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0); } if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAITED)) { jvmti_send_waited_monitor_event(monitor, ((status == APR_TIMEUP) ? (jboolean) 1 : (jboolean) 0)); } hythread_set_suspend_disable(disable_count); // should be moved to event handler jvmti_thread_t jvmti_thread = jthread_get_jvmti_thread(hythread_self()); jvmti_thread->waited_time += apr_time_now() - wait_begin; } return status; } // jthread_monitor_timed_wait