/** * Acquires the lock over threading subsystem. * * The lock blocks new thread creation and thread exit operations. */ IDATA VMCALL hythread_global_lock() { IDATA status; hythread_t self = hythread_self(); // we need not care about suspension if the thread // is not even attached to hythread if (self == NULL) { return port_mutex_lock(&TM_LIBRARY->TM_LOCK); } // disable_count must be 0 on potentially // blocking operation to prevent suspension deadlocks, // meaning that the thread is safe for suspension assert(hythread_is_suspend_enabled()); status = port_mutex_lock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); // make sure we do not get a global thread lock // while being requested to suspend while (self->suspend_count) { // give up global thread lock before safepoint, // because this thread can be suspended at a safepoint status = port_mutex_unlock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); hythread_safe_point(); status = port_mutex_lock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); } return TM_ERROR_NONE; }
/** * Releases the ownership over monitor. * * @param[in] mon_ptr monitor */ IDATA VMCALL jthread_raw_monitor_exit(jrawMonitorID mon_ptr) { hythread_monitor_t monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr); if (!monitor) { return TM_ERROR_INVALID_MONITOR; } IDATA status = hythread_monitor_exit(monitor); hythread_safe_point(); hythread_exception_safe_point(); return status; } // jthread_raw_monitor_exit
/* this method is called before STW gc start, there is a big lock outside */ void gc_wait_con_finish( GC* gc ) { int64 time_collection_start = time_now(); unsigned int partial_type; //for time measuring and debugging /* cocurrent gc is idle */ if( state_transformation( gc, GC_CON_NIL, GC_CON_DISABLE ) ) { // for the race condition of con schduling and STW gc Con_Collection_Statistics *con_collection_stat = gc_ms_get_con_collection_stat((GC_MS*)gc); con_collection_stat->gc_start_time = time_now(); con_collection_stat->pause_start_time = con_collection_stat->gc_start_time; partial_type = GC_PARTIAL_PSTW; gc_partial_con_PSTW( gc ); } else { while(gc->gc_concurrent_status == GC_CON_STW_ENUM ) { //wait concurrent gc finish enumeration hythread_safe_point(); vm_thread_yield(); } if( gc_is_kind(ALGO_CON_MOSTLY) ) partial_type = gc_con_heap_full_mostly_con(gc); else if( gc_is_kind(ALGO_CON_OTF_OBJ) || gc_is_kind(ALGO_CON_OTF_REF) ) { partial_type = gc_con_heap_full_otf(gc); if(gc->gc_concurrent_status == GC_CON_RESET) { while( gc->gc_concurrent_status == GC_CON_RESET ) { //wait concurrent to finish hythread_safe_point(); vm_thread_yield(); } } } else RAISE_ERROR; } int64 pause_time = time_now()-time_collection_start; gc_con_stat_information_out(gc); if(GC_CAUSE_RUNTIME_FORCE_GC == gc->cause) { INFO2("gc.con.time","[GC][Con]pause( Forcing GC ): "<<(unsigned int)(pause_time)<<" us "); } else { partial_stop_the_world_info( partial_type, (unsigned int)pause_time ); } }
IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; int saved_recursion; //int saved_disable_count; hythread_t self = tm_self_tls; if (mon_ptr->owner != self) { return TM_ERROR_ILLEGAL_STATE; } saved_recursion = mon_ptr->recursion_count; assert(saved_recursion>=0); mon_ptr->owner = NULL; mon_ptr->recursion_count =0; mon_ptr->wait_count++; port_mutex_lock(&self->mutex); self->state |= TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = mon_ptr; port_mutex_unlock(&self->mutex); do { apr_time_t start; assert(mon_ptr->notify_count >= 0); assert(mon_ptr->notify_count < mon_ptr->wait_count); start = apr_time_now(); status = condvar_wait_impl(&mon_ptr->condition, &mon_ptr->mutex, ms, nano, interruptable); if (status != TM_ERROR_NONE || mon_ptr->notify_count) { break; } // we should not change ms and nano if both are 0 (meaning "no timeout") if (ms || nano) { apr_interval_time_t elapsed; elapsed = apr_time_now() - start; // microseconds nano -= (IDATA)((elapsed % 1000) * 1000); if (nano < 0) { ms -= elapsed/1000 + 1; nano += 1000000; } else { ms -= elapsed/1000; } if (ms < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nano && nano < 1000000); } } while (1); // consume the notify_count unless we got an error (or were interrupted) if (mon_ptr->notify_count > 0 && (status == TM_ERROR_NONE || mon_ptr->notify_count == mon_ptr->wait_count)) { mon_ptr->notify_count--; } port_mutex_lock(&self->mutex); self->state &= ~TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = NULL; port_mutex_unlock(&self->mutex); mon_ptr->wait_count--; assert(mon_ptr->notify_count <= mon_ptr->wait_count); if (self->request) { int save_count; port_mutex_unlock(&mon_ptr->mutex); hythread_safe_point(); hythread_exception_safe_point(); save_count = hythread_reset_suspend_disable(); port_mutex_lock(&mon_ptr->mutex); hythread_set_suspend_disable(save_count); } mon_ptr->recursion_count = saved_recursion; mon_ptr->owner = self; assert(mon_ptr->owner); return status; }
IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) { IDATA status; IDATA result; hythread_t self; hythread_monitor_t mon; if (nanos == 0 && millis == 0) { hythread_yield(); return TM_ERROR_NONE; } if (!(self = hythread_self())) { // Report error in case current thread is not attached return TM_ERROR_UNATTACHED_THREAD; } // Grab thread monitor mon = self->monitor; status = hythread_monitor_enter(mon); assert(status == TM_ERROR_NONE); assert(mon->recursion_count == 0); mon->owner = NULL; mon->wait_count++; // Set thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->waited_monitor = mon; self->state |= TM_THREAD_STATE_SLEEPING; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); do { apr_time_t start; assert(mon->notify_count >= 0); assert(mon->notify_count < mon->wait_count); start = apr_time_now(); result = condvar_wait_impl(&mon->condition, &mon->mutex, millis, nanos, interruptable); if (result != TM_ERROR_NONE) { break; } // we should not change millis and nanos if both are 0 (meaning "no timeout") if (millis || nanos) { apr_interval_time_t elapsed = apr_time_now() - start; nanos -= (IDATA)((elapsed % 1000) * 1000); if (nanos < 0) { millis -= elapsed/1000 + 1; nanos += 1000000; } else { millis -= elapsed/1000; } if (millis < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nanos && nanos < 1000000); } } while(1); // Restore thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->state &= ~TM_THREAD_STATE_SLEEPING; self->waited_monitor = NULL; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); // Release thread monitor mon->wait_count--; mon->owner = self; assert(mon->notify_count <= mon->wait_count); status = hythread_monitor_exit(mon); assert(status == TM_ERROR_NONE); if (self->request) { hythread_safe_point(); hythread_exception_safe_point(); } return (result == TM_ERROR_INTERRUPT && interruptable) ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; }
/** * Gains the ownership over monitor. * * Current thread blocks if the specified monitor is owned by other thread. * * @param[in] monitor object where monitor is located * @sa JNI::MonitorEnter() */ IDATA VMCALL jthread_monitor_enter(jobject monitor) { IDATA state; hythread_t native_thread; apr_time_t enter_begin; assert(monitor); hythread_suspend_disable(); hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor); IDATA status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #ifdef LOCK_RESERVATION // busy unreserve lock before blocking and inflating while (TM_ERROR_NONE != hythread_unreserve_lock(lockword)) { hythread_yield(); hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); } status = hythread_thin_monitor_try_enter(lockword); if (status != TM_ERROR_EBUSY) { goto entered; } #endif //LOCK_RESERVATION native_thread = hythread_self(); hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_RUNNABLE; state |= TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); // should be moved to event handler if (ti_is_enabled()) { enter_begin = apr_time_now(); int disable_count = hythread_reset_suspend_disable(); jthread_set_owned_monitor(monitor); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1); } hythread_set_suspend_disable(disable_count); } // busy wait and inflate // reload pointer after safepoints lockword = vm_object_get_lockword_addr(monitor); while ((status = hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY) { hythread_safe_point(); hythread_exception_safe_point(); lockword = vm_object_get_lockword_addr(monitor); if (hythread_is_fat_lock(*lockword)) { status = hythread_thin_monitor_enter(lockword); if (status != TM_ERROR_NONE) { hythread_suspend_enable(); assert(0); return status; } goto contended_entered; } hythread_yield(); } assert(status == TM_ERROR_NONE); if (!hythread_is_fat_lock(*lockword)) { hythread_inflate_lock(lockword); } // do all ti staff here contended_entered: if (ti_is_enabled()) { int disable_count = hythread_reset_suspend_disable(); if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) { jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0); } hythread_set_suspend_disable(disable_count); // should be moved to event handler jvmti_thread_t jvmti_thread = jthread_get_jvmti_thread(hythread_self()); jvmti_thread->blocked_time += apr_time_now() - enter_begin; } hythread_thread_lock(native_thread); state = hythread_get_state(native_thread); state &= ~TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER; state |= TM_THREAD_STATE_RUNNABLE; status = hythread_set_state(native_thread, state); assert(status == TM_ERROR_NONE); hythread_thread_unlock(native_thread); entered: if (ti_is_enabled()) { jthread_add_owned_monitor(monitor); } hythread_suspend_enable(); return TM_ERROR_NONE; } // jthread_monitor_enter