/* * Inflates the compressed lockword into fat fat_monitor */ hythread_monitor_t VMCALL hythread_inflate_lock(hythread_thin_monitor_t *lockword_ptr) { hythread_monitor_t fat_monitor; IDATA status; IDATA fat_monitor_id; U_32 lockword; int i; // we don't need to write lock on lock_table during all this function because // the only invariant we need is 'fat lock is not in the fat lock table before we put it' // however this invariant is true because we hold monitor->mutex during this function // so it cannot be called twice for the signle monitor concurrently lockword = *lockword_ptr; if (IS_FAT_LOCK (lockword)) { return locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); } #ifdef LOCK_RESERVATION // unreserve lock first if (IS_RESERVED(lockword)) { unreserve_self_lock(lockword_ptr); lockword = *lockword_ptr; } assert(!IS_RESERVED(lockword)); #endif assert(hythread_owns_thin_lock(tm_self_tls, lockword)); assert(!hythread_is_suspend_enabled()); CTRACE(("inflation begin for %x thread: %d", lockword, tm_self_tls->thread_id)); status = hythread_monitor_init(&fat_monitor, 0); // allocate fat fat_monitor //assert(status == TM_ERROR_NONE); if (status != TM_ERROR_NONE) { return NULL; } status = hythread_monitor_enter(fat_monitor); if (status != TM_ERROR_NONE) { return NULL; } for (i = RECURSION(lockword); i > 0; i--) { CTRACE(("inflate recursion monitor")); status = hythread_monitor_enter(fat_monitor); // transfer recursion count to fat fat_monitor assert(status == TM_ERROR_NONE); } fat_monitor_id = locktable_put_fat_monitor(fat_monitor); // put fat_monitor into lock table set_fat_lock_id(lockword_ptr, fat_monitor_id); CTRACE(("hythread_inflate_lock %d thread: %d\n", FAT_LOCK_ID(*lockword_ptr), tm_self_tls->thread_id)); //assert(FAT_LOCK_ID(*lockword_ptr) != 2); CTRACE(("FAT ID : 0x%x", *lockword_ptr)); #ifdef LOCK_RESERVATION assert(!IS_RESERVED(*lockword_ptr)); #endif return fat_monitor; }
/** * Locks thin monitor. * * @param[in] lockword_ptr monitor addr */ IDATA hythread_thin_monitor_enter(hythread_thin_monitor_t *lockword_ptr) { hythread_monitor_t fat_monitor; IDATA status; int saved_disable_count; assert(lockword_ptr); if (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_NONE) { return TM_ERROR_NONE; } while (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_EBUSY) { if (IS_FAT_LOCK(*lockword_ptr)) { fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(*lockword_ptr)); // find fat_monitor in lock table CTRACE((" lock %d\n", FAT_LOCK_ID(*lockword_ptr))); saved_disable_count = hythread_reset_suspend_disable(); status = hythread_monitor_enter(fat_monitor); hythread_set_suspend_disable(saved_disable_count); return status; // lock fat_monitor } //hythread_safe_point(); hythread_yield(); } if (IS_FAT_LOCK(*lockword_ptr)) { // lock already inflated return TM_ERROR_NONE; } CTRACE(("inflate_contended thin_lcok%d\n", ++inflate_contended)); fat_monitor = hythread_inflate_lock(lockword_ptr); if (fat_monitor == NULL) { return TM_ERROR_OUT_OF_MEMORY; } return TM_ERROR_NONE; }
static IDATA HYTHREAD_PROC hythread_interrupter(void *args) { IDATA status; hythread_monitor_t mon = (hythread_monitor_t)args; status = hythread_monitor_enter(mon); assert(status == TM_ERROR_NONE); status = hycond_notify_all(&mon->condition); assert(status == TM_ERROR_NONE); hythread_exit(mon); return 0; }
/** * Gains the ownership over monitor. * * Current thread blocks if the specified monitor is owned by other thread. * * @param[in] mon_ptr monitor */ IDATA VMCALL jthread_raw_monitor_enter(jrawMonitorID mon_ptr) { hythread_monitor_t monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr); if (!monitor) { return TM_ERROR_INVALID_MONITOR; } IDATA status = hythread_monitor_enter(monitor); hythread_safe_point(); hythread_exception_safe_point(); return status; } // jthread_raw_monitor_enter
IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) { IDATA status; IDATA result; hythread_t self; hythread_monitor_t mon; if (nanos == 0 && millis == 0) { hythread_yield(); return TM_ERROR_NONE; } if (!(self = hythread_self())) { // Report error in case current thread is not attached return TM_ERROR_UNATTACHED_THREAD; } // Grab thread monitor mon = self->monitor; status = hythread_monitor_enter(mon); assert(status == TM_ERROR_NONE); assert(mon->recursion_count == 0); mon->owner = NULL; mon->wait_count++; // Set thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->waited_monitor = mon; self->state |= TM_THREAD_STATE_SLEEPING; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); do { apr_time_t start; assert(mon->notify_count >= 0); assert(mon->notify_count < mon->wait_count); start = apr_time_now(); result = condvar_wait_impl(&mon->condition, &mon->mutex, millis, nanos, interruptable); if (result != TM_ERROR_NONE) { break; } // we should not change millis and nanos if both are 0 (meaning "no timeout") if (millis || nanos) { apr_interval_time_t elapsed = apr_time_now() - start; nanos -= (IDATA)((elapsed % 1000) * 1000); if (nanos < 0) { millis -= elapsed/1000 + 1; nanos += 1000000; } else { millis -= elapsed/1000; } if (millis < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nanos && nanos < 1000000); } } while(1); // Restore thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->state &= ~TM_THREAD_STATE_SLEEPING; self->waited_monitor = NULL; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); // Release thread monitor mon->wait_count--; mon->owner = self; assert(mon->notify_count <= mon->wait_count); status = hythread_monitor_exit(mon); assert(status == TM_ERROR_NONE); if (self->request) { hythread_safe_point(); hythread_exception_safe_point(); } return (result == TM_ERROR_INTERRUPT && interruptable) ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; }