Ejemplo n.º 1
0
/**
 * Attempt to gain the ownership over monitor without blocking.
 *
 * @param[in] mon_ptr monitor
 * @return 0 in case of successful attempt.
 */
IDATA VMCALL jthread_raw_monitor_try_enter(jrawMonitorID mon_ptr)
{
    hythread_monitor_t monitor =
         (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr);
    if (!monitor) {
        return TM_ERROR_INVALID_MONITOR;
    }
    return hythread_monitor_try_enter((hythread_monitor_t) monitor);
} // jthread_raw_monitor_try_enter
/** 
 * Interrupt a thread.
 * 
 * If the thread is currently blocked (i.e. waiting on a monitor_wait or sleeping)
 * resume the thread and cause it to return from the blocking function with
 * HYTHREAD_INTERRUPTED.
 * 
 * @param[in] thread a thread to be interrupted
 * @return none
 */
void VMCALL hythread_interrupt(hythread_t thread) {
    IDATA status;
    hythread_monitor_t mon;

    apr_atomic_set32(&thread->interrupted, TRUE);

    mon = thread->waited_monitor;
    if (mon) {
        // If thread was doing any kind of wait, notify it.
        if (hythread_monitor_try_enter(mon) == TM_ERROR_NONE) {
            status = hycond_notify_all(&mon->condition);
            assert(status == TM_ERROR_NONE);
            status = hythread_monitor_exit(mon);
            assert(status == TM_ERROR_NONE);
        } else {
            status = hythread_create(NULL, 0, 0, 0,
                hythread_interrupter, (void *)mon);
            assert (status == TM_ERROR_NONE);
        }
    }
} // hythread_interrupt
Ejemplo n.º 3
0
/**
 * Attempts to lock thin monitor.
 * If the monitor is already locked, this call returns immediately with TM_BUSY.  
 * 
 * @param[in] lockword_ptr monitor addr 
 */
IDATA hythread_thin_monitor_try_enter(hythread_thin_monitor_t *lockword_ptr) {
    U_32 lockword;
    // warkaround strange intel compiler bug 
#if defined (__INTEL_COMPILER) && defined (LINUX)
    volatile
#endif
	IDATA this_id = tm_self_tls->thread_id;
    IDATA lock_id;
    IDATA status;
    hythread_monitor_t fat_monitor;
    int UNUSED i;
    assert(!hythread_is_suspend_enabled());
    assert((UDATA)lockword_ptr > 4);    
    assert(tm_self_tls);
    
    // By DRLVM design rules lockword (see description in thin locks paper)
    // is only modified without compare-and-exchange by owner thread. If tools
    // like Intel Thread Checker find a bug about this line, it may actually be a 
    // false-positive.
    
    lockword = *lockword_ptr;       
    lock_id = THREAD_ID(lockword);
    //CTRACE(("try lock %x %d", this_id, RECURSION(lockword)));
    
    // Check if the lock is already reserved or owned by this thread
    if (lock_id == this_id) {    
        if (RECURSION(lockword) == MAX_RECURSION) {
            //inflate lock in case of recursion overflow
            fat_monitor = hythread_inflate_lock(lockword_ptr);

            if (fat_monitor == NULL) {
                return TM_ERROR_OUT_OF_MEMORY; 
            }
            return hythread_monitor_try_enter(fat_monitor);
            //break FAT_LOCK;
        } else {
            CTRACE(("try lock %x count:%d", this_id, res_lock_count++)); 
            // increase recursion
            RECURSION_INC(lockword_ptr, lockword);
            return TM_ERROR_NONE;
        }        
    } 

    // Fast path didn't work, someoneelse is holding the monitor (or it isn't reserved yet):   

    // DO SPIN FOR A WHILE, this will decrease the number of fat locks.
#ifdef SPIN_COUNT
    for (i = SPIN_COUNT; i >=0; i--, lockword = *lockword_ptr, lock_id = THREAD_ID(lockword)) { 
#endif

        // Check if monitor is free and thin
        if (lock_id == 0) {
            // Monitor is free
            assert( RECURSION(lockword) < 1);
            assert(this_id > 0 && this_id < 0x8000); 
            // Acquire monitor
            if (0 != port_atomic_cas16 (((volatile apr_uint16_t*) lockword_ptr)+1, 
                                        (apr_uint16_t) this_id, 0)) {

#ifdef SPIN_COUNT
                continue; 
#else
                return TM_ERROR_EBUSY;
#endif
            }

#ifdef LOCK_RESERVATION
            //lockword = *lockword_ptr; // this reloading of lockword may be odd, need to investigate;
            if (IS_RESERVED(lockword)) {
                CTRACE(("initially reserve lock %x count: %d ", *lockword_ptr, init_reserve_cout++));
                RECURSION_INC(lockword_ptr, *lockword_ptr);
            }
#endif
            CTRACE(("CAS lock %x count: %d ", *lockword_ptr, cas_cout++));
            return TM_ERROR_NONE;
        } else 

            // Fat monitor
            if (IS_FAT_LOCK(lockword)) {
                CTRACE(("FAT MONITOR %d \n", ++fat_lock2_count/*, vm_get_object_class_name(lockword_ptr-1)*/));  
                fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); //  find fat_monitor in lock table
            
                status = hythread_monitor_try_enter(fat_monitor);
#ifdef SPIN_COUNT
                if (status == TM_ERROR_EBUSY) {
                    continue; 
                }
#endif
                return status;
            }

#ifdef LOCK_RESERVATION
        // unreserved busy lock
            else if (IS_RESERVED(lockword)) {
                status = hythread_unreserve_lock(lockword_ptr);
                if (status != TM_ERROR_NONE) {
#ifdef SPIN_COUNT
                    if (status == TM_ERROR_EBUSY) {
                        continue;
                    }
#endif //SPIN_COUNT
                    return status;
                }
                return hythread_thin_monitor_try_enter(lockword_ptr);
            }
#endif 
#ifdef SPIN_COUNT
        hythread_yield();
    }
#endif
    return TM_ERROR_EBUSY;
}