示例#1
0
/**
 * Locks thin monitor.
 * 
 * @param[in] lockword_ptr monitor addr 
 */
IDATA hythread_thin_monitor_enter(hythread_thin_monitor_t *lockword_ptr) {
    hythread_monitor_t fat_monitor;
    IDATA status; 
    int saved_disable_count;

    assert(lockword_ptr);    

    if (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_NONE) {
        return TM_ERROR_NONE;
    }

    while (hythread_thin_monitor_try_enter(lockword_ptr) == TM_ERROR_EBUSY) {
        if (IS_FAT_LOCK(*lockword_ptr)) {
            fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(*lockword_ptr)); //  find fat_monitor in lock table
            CTRACE((" lock %d\n", FAT_LOCK_ID(*lockword_ptr)));
            saved_disable_count = hythread_reset_suspend_disable();
            status = hythread_monitor_enter(fat_monitor);
            hythread_set_suspend_disable(saved_disable_count);
            return status; // lock fat_monitor
        } 
        //hythread_safe_point();
        hythread_yield();
    }
    if (IS_FAT_LOCK(*lockword_ptr)) {
        // lock already inflated
        return TM_ERROR_NONE;
    }
    CTRACE(("inflate_contended  thin_lcok%d\n", ++inflate_contended));   
    fat_monitor = hythread_inflate_lock(lockword_ptr);

    if (fat_monitor == NULL) {
        return TM_ERROR_OUT_OF_MEMORY; 
    }
    return TM_ERROR_NONE;
}
int test_jthread_holds_lock(void) {

    tested_thread_sturct_t *tts;
    tested_thread_sturct_t *critical_tts = NULL;
    int blocked_count;
    int i;

    hysem_create(&mon_enter, 0, 1);

    // Initialize tts structures and run all tested threads
    tested_threads_run(run_for_test_jthread_holds_lock);
    

    for (i = 0; i < MAX_TESTED_THREAD_NUMBER; i++){
        blocked_count = 0;
        critical_tts = NULL;

        hysem_wait(mon_enter);

        reset_tested_thread_iterator(&tts);
        while(next_tested_thread(&tts)){
            while(tts->phase == TT_PHASE_NONE) {
                // thread is not started yet
                hythread_yield();
            }
            if (tts->phase == TT_PHASE_IN_CRITICAL_SECTON){
                tf_assert(jthread_holds_lock(tts->java_thread, tts->monitor) > 0);
                tf_assert_null(critical_tts);
                critical_tts = tts;
            } else if (tts->phase != TT_PHASE_DEAD){
                check_tested_thread_phase(tts, TT_PHASE_WAITING_ON_MONITOR);
                tf_assert(jthread_holds_lock(tts->java_thread, tts->monitor) == 0);
                if (tts->phase == TT_PHASE_WAITING_ON_MONITOR){
                    blocked_count++;
                }
            }
        }
        tf_assert(critical_tts); // thread in critical section found
        tf_assert_same(blocked_count, MAX_TESTED_THREAD_NUMBER - i - 1);
        tested_thread_send_stop_request(critical_tts);
        tested_thread_wait_ended(critical_tts);
        check_tested_thread_phase(critical_tts, TT_PHASE_DEAD);
    }

    // Terminate all threads and clear tts structures
    tested_threads_destroy();

    return TEST_PASSED;
}
int test_jthread_get_owned_monitors(void) {

    tested_thread_sturct_t *tts;
    tested_thread_sturct_t *critical_tts;
    int i;
    jint owned_monitors_count;
    jobject *owned_monitors = NULL;

    hysem_create(&mon_enter, 0, 1);

    // Initialize tts structures and run all tested threads
    tested_threads_run(run_for_test_jthread_get_owned_monitors);
    
    for (i = 0; i < MAX_TESTED_THREAD_NUMBER; i++){
        critical_tts = NULL;

        hysem_wait(mon_enter);

        reset_tested_thread_iterator(&tts);
        while(next_tested_thread(&tts)) {
            while(tts->phase == TT_PHASE_NONE) {
                // thread is not started yet
                hythread_yield();
            }
            if (tts->phase == TT_PHASE_IN_CRITICAL_SECTON){
                tf_assert_same(jthread_get_owned_monitors (tts->java_thread,
                    &owned_monitors_count, &owned_monitors), TM_ERROR_NONE);
                tf_assert(critical_tts == NULL);
                critical_tts = tts;
                tf_assert_same(owned_monitors_count, 1);
                tf_assert_same(owned_monitors[0]->object, tts->monitor->object);
            } else if (tts->phase == TT_PHASE_WAITING_ON_MONITOR){
                tf_assert_same(jthread_get_owned_monitors (tts->java_thread,
                    &owned_monitors_count, &owned_monitors), TM_ERROR_NONE);
                tf_assert_same(owned_monitors_count, 0);
            }
        }
        tf_assert(critical_tts);
        tested_thread_send_stop_request(critical_tts);
        tested_thread_wait_ended(critical_tts);
    }
    // Terminate all threads and clear tts structures
    tested_threads_destroy();

    return TEST_PASSED;
}
int test_jthread_get_contended_monitor(void) {

    tested_thread_sturct_t *tts;
    tested_thread_sturct_t *critical_tts = NULL;
    jobject contended_monitor;
    int i;

    hysem_create(&mon_enter, 0, 1);

    // Initialize tts structures and run all tested threads
    tested_threads_run(run_for_test_jthread_get_contended_monitor);
    
    for (i = 0; i < MAX_TESTED_THREAD_NUMBER; i++){
        critical_tts = NULL;
        
        hysem_wait(mon_enter);

        reset_tested_thread_iterator(&tts);
        while(next_tested_thread(&tts)){
            while(tts->phase == TT_PHASE_NONE) {
                // thread is not started yet
                hythread_yield();
            }
            if (tts->phase == TT_PHASE_IN_CRITICAL_SECTON){
                tf_assert_same(jthread_get_contended_monitor(tts->java_thread,
                    &contended_monitor), TM_ERROR_NONE);
                tf_assert_null(contended_monitor);
                tf_assert_null(critical_tts);
                critical_tts = tts;
            } else if (tts->phase != TT_PHASE_DEAD) {
                check_tested_thread_phase(tts, TT_PHASE_WAITING_ON_MONITOR);
                // This can't be guaranteed
                //tf_assert(vm_objects_are_equal(contended_monitor, tts->monitor));
            }
        }
        tested_thread_send_stop_request(critical_tts);
        tested_thread_wait_ended(critical_tts);
        check_tested_thread_phase(critical_tts, TT_PHASE_DEAD);
    }
    // Terminate all threads and clear tts structures
    tested_threads_destroy();

    return TEST_PASSED;
}
示例#5
0
IDATA VMCALL hythread_set_to_group(hythread_t thread, hythread_group_t group) {

#ifdef ORDER
    int map_id;
#endif

    IDATA status;
    hythread_t cur, prev;

    assert(thread);
    assert(group);

Retry_lock:    
    // Acquire global TM lock to prevent concurrent access to thread list
    status = hythread_global_lock();
    assert(status == TM_ERROR_NONE);

#ifdef ORDER
#ifdef ORDER_DEBUG
    printf("[TEST]: hythread mapping to object (%d, %d)\n", thread->p_tid, thread->p_count);
#endif
    if(hythread_vm_is_initializing || (thread->p_tid == 0 && thread->p_count == 0)){
    }
    else{
        if(hythread_get_IsRecord()){
#ifdef ORDER_DEBUG
            printf("[RECORD]: RECORD IN hythread_set_to_group!!!\n");
#endif
            threadRunOrderFile = fopen("THREAD_CREATE_ORDER.log", "a+");
#ifdef ORDER_DEBUG
            assert(threadRunOrderFile);
#endif
            fprintf(threadRunOrderFile, "%d %d\n", thread->p_tid, thread->p_count);
            fflush(threadRunOrderFile);
            fclose(threadRunOrderFile);
            threadRunOrderFile = NULL;
        }
        else{
//#ifdef ORDER_DEBUG
            printf("[REPLAY]: REPLAY IN hythread_set_to_group!!!\n");
//#endif
            if(threadRunOrderFile == NULL){
                threadRunOrderFile = fopen("THREAD_CREATE_ORDER.log", "r");
            }
#ifdef ORDER_DEBUG
            assert(threadRunOrderFile);
#endif
            if(p_tid == -1 && p_count == -1){
#ifdef ORDER_DEBUG
                if(feof(threadRunOrderFile)){
                    assert(0);
                }
#endif
                fscanf(threadRunOrderFile, "%d %d\n", &p_tid, &p_count);
            }

            if(p_tid == thread->p_tid && p_count == thread->p_count){
                p_tid = -1;
                p_count = -1;
            }
            else{
                IDATA status_temp = hythread_global_unlock();
                assert(status_temp == TM_ERROR_NONE);
//#ifdef ORDER_DEBUG
	       printf("[THREAD_CREATE]: This is not the correct order of thread create, pthread_self %d\n", pthread_self());
//#endif
                usleep(1000);
                hythread_yield();
				
                goto Retry_lock;
            }
        }
    }
#endif

    assert(thread->os_handle);
    
    if (!thread->thread_id) {
        char free_slot_found = 0;

        unsigned int i;
        for(i = 0; i < MAX_ID; i++) {
            // increase next_id to allow thread_id change 
            next_id++;
            if (next_id == MAX_ID) {
	            next_id = 1;
            }
            if (fast_thread_array[next_id] == NULL) {
                thread->thread_id = next_id;
	            free_slot_found = 1;


#ifdef ORDER

                {
                    char name[40];
                    FILE* thread_map = NULL;
                    int current_pthread_id = (int)thread->os_handle;

                    sprintf(name, "THREAD_MAP_WORKING_CLASSLIB.log");

                    thread_map = fopen(name, "a+");
#ifdef ORDER_DEBUG
                    assert(thread_map);
#endif

                    fwrite(&next_id, sizeof(int), 1, thread_map);
                    fwrite(&current_pthread_id, sizeof(int), 1, thread_map);

                    fflush(thread_map);
                    fclose(thread_map);
                }





//    printf("create thread id : %d\n", (int)new_thread->os_handle);
                for (map_id = 0 ; map_id < ORDER_THREAD_NUM ; map_id ++ )
                {
                   if (pthreadid_tid_mapping[map_id][0] == (int)thread->os_handle)
                   {
                       Thread_Map tmap;
                       tmap.thread_global_id = next_id;
                       tmap.pthread_id = (int)thread->os_handle;
                       tmap.thread_assigned_id = pthreadid_tid_mapping[map_id][1];
//                       if (threadMapFile == NULL)
                       threadMapFile = fopen("RECORD_THREAD_MAP.log", "a+");
                       fwrite((char *)&tmap, 1, sizeof(Thread_Map), threadMapFile);
                       fflush(threadMapFile);
                       fclose(threadMapFile);
                       threadMapFile = NULL;

#ifdef ORDER_DEBUG
                       printf("pthread id exists : %d\n", (int)pthreadid_tid_mapping[map_id][0]);
                       printf("tid mapping : %d -> %d\n",pthreadid_tid_mapping[map_id][0], pthreadid_tid_mapping[map_id][1]);
#endif

                       break;
                   }
                   else if (pthreadid_tid_mapping[map_id][0] == 0)
                   {
                       Thread_Map tmap;
                       tmap.thread_global_id = next_id;
                       tmap.pthread_id = (int)thread->os_handle;
                       tmap.thread_assigned_id = next_id;
//                       if (threadMapFile == NULL)
                       threadMapFile = fopen("RECORD_THREAD_MAP.log", "a+");
                       fwrite((char *)&tmap, 1, sizeof(Thread_Map), threadMapFile);
                       fflush(threadMapFile);
                       fclose(threadMapFile);
                       threadMapFile = NULL;

                       pthreadid_tid_mapping[map_id][0] = (int)(int)thread->os_handle;
                       pthreadid_tid_mapping[map_id][1] = next_id;

#ifdef ORDER_DEBUG
                       printf("new pthread id : %d\n", (int)pthreadid_tid_mapping[map_id][0]);
                       printf("tid mapping : %d -> %d\n", pthreadid_tid_mapping[map_id][0], pthreadid_tid_mapping[map_id][1]);
#endif

                       break;
                   }
                   if(i == (ORDER_THREAD_NUM - 1))
                   {
                       printf("[yzm]Error : Thread Map overflow!\n");
			  assert(0);
                       exit(0);
                   }
                }
#endif

                break;
            }
        }

        if (!free_slot_found) {
            status = hythread_global_unlock();
            assert(status == TM_ERROR_NONE);
            return TM_ERROR_OUT_OF_MEMORY;
        }
    }

    assert(thread->thread_id);
    fast_thread_array[thread->thread_id] = thread;

    thread->group = group;
    group->threads_count++;
    cur  = group->thread_list->next;
    prev = cur->prev;
    thread->next = cur;
    thread->prev = prev;
    prev->next = cur->prev = thread;

    port_mutex_lock(&thread->mutex);
    thread->state |= TM_THREAD_STATE_ALIVE | TM_THREAD_STATE_RUNNABLE;
    port_mutex_unlock(&thread->mutex);

    status = hythread_global_unlock();
    assert(status == TM_ERROR_NONE);

    return TM_ERROR_NONE;
}
示例#6
0
IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) {
    IDATA status;
    IDATA result;
    hythread_t self;
    hythread_monitor_t mon;

    if (nanos == 0 && millis == 0) {
        hythread_yield();
        return TM_ERROR_NONE;
    }
    if (!(self = hythread_self())) {
        // Report error in case current thread is not attached
        return TM_ERROR_UNATTACHED_THREAD;
    }

    // Grab thread monitor
    mon = self->monitor;
    status = hythread_monitor_enter(mon);
    assert(status == TM_ERROR_NONE);
    assert(mon->recursion_count == 0);
    mon->owner = NULL;
    mon->wait_count++;

    // Set thread state
    status = port_mutex_lock(&self->mutex);
    assert(status == TM_ERROR_NONE);
    self->waited_monitor = mon;
    self->state |= TM_THREAD_STATE_SLEEPING;
    status = port_mutex_unlock(&self->mutex);
    assert(status == TM_ERROR_NONE);

    do {
        apr_time_t start;
        assert(mon->notify_count >= 0);
        assert(mon->notify_count < mon->wait_count);
        start = apr_time_now();

        result = condvar_wait_impl(&mon->condition, &mon->mutex, millis, nanos, interruptable);
        if (result != TM_ERROR_NONE) {
            break;
        }
        // we should not change millis and nanos if both are 0 (meaning "no timeout")
        if (millis || nanos) {
            apr_interval_time_t elapsed = apr_time_now() - start;
            nanos -= (IDATA)((elapsed % 1000) * 1000);
            if (nanos < 0) {
                millis -= elapsed/1000 + 1;
                nanos += 1000000;
            } else {
                millis -= elapsed/1000;
            }
            if (millis < 0) {
                assert(status == TM_ERROR_NONE);
                status = TM_ERROR_TIMEOUT;
                break;
            }
            assert(0 <= nanos && nanos < 1000000);
        }
    } while(1);

    // Restore thread state
    status = port_mutex_lock(&self->mutex);
    assert(status == TM_ERROR_NONE);
    self->state &= ~TM_THREAD_STATE_SLEEPING;
    self->waited_monitor = NULL;
    status = port_mutex_unlock(&self->mutex);
    assert(status == TM_ERROR_NONE);

    // Release thread monitor
    mon->wait_count--;
    mon->owner = self;
    assert(mon->notify_count <= mon->wait_count);
    status = hythread_monitor_exit(mon);
    assert(status == TM_ERROR_NONE);

    if (self->request) {
        hythread_safe_point();
        hythread_exception_safe_point();
    }

    return (result == TM_ERROR_INTERRUPT && interruptable)
        ? TM_ERROR_INTERRUPT : TM_ERROR_NONE;
}
示例#7
0
/**
 * Attempts to lock thin monitor.
 * If the monitor is already locked, this call returns immediately with TM_BUSY.  
 * 
 * @param[in] lockword_ptr monitor addr 
 */
IDATA hythread_thin_monitor_try_enter(hythread_thin_monitor_t *lockword_ptr) {
    U_32 lockword;
    // warkaround strange intel compiler bug 
#if defined (__INTEL_COMPILER) && defined (LINUX)
    volatile
#endif
	IDATA this_id = tm_self_tls->thread_id;
    IDATA lock_id;
    IDATA status;
    hythread_monitor_t fat_monitor;
    int UNUSED i;
    assert(!hythread_is_suspend_enabled());
    assert((UDATA)lockword_ptr > 4);    
    assert(tm_self_tls);
    
    // By DRLVM design rules lockword (see description in thin locks paper)
    // is only modified without compare-and-exchange by owner thread. If tools
    // like Intel Thread Checker find a bug about this line, it may actually be a 
    // false-positive.
    
    lockword = *lockword_ptr;       
    lock_id = THREAD_ID(lockword);
    //CTRACE(("try lock %x %d", this_id, RECURSION(lockword)));
    
    // Check if the lock is already reserved or owned by this thread
    if (lock_id == this_id) {    
        if (RECURSION(lockword) == MAX_RECURSION) {
            //inflate lock in case of recursion overflow
            fat_monitor = hythread_inflate_lock(lockword_ptr);

            if (fat_monitor == NULL) {
                return TM_ERROR_OUT_OF_MEMORY; 
            }
            return hythread_monitor_try_enter(fat_monitor);
            //break FAT_LOCK;
        } else {
            CTRACE(("try lock %x count:%d", this_id, res_lock_count++)); 
            // increase recursion
            RECURSION_INC(lockword_ptr, lockword);
            return TM_ERROR_NONE;
        }        
    } 

    // Fast path didn't work, someoneelse is holding the monitor (or it isn't reserved yet):   

    // DO SPIN FOR A WHILE, this will decrease the number of fat locks.
#ifdef SPIN_COUNT
    for (i = SPIN_COUNT; i >=0; i--, lockword = *lockword_ptr, lock_id = THREAD_ID(lockword)) { 
#endif

        // Check if monitor is free and thin
        if (lock_id == 0) {
            // Monitor is free
            assert( RECURSION(lockword) < 1);
            assert(this_id > 0 && this_id < 0x8000); 
            // Acquire monitor
            if (0 != port_atomic_cas16 (((volatile apr_uint16_t*) lockword_ptr)+1, 
                                        (apr_uint16_t) this_id, 0)) {

#ifdef SPIN_COUNT
                continue; 
#else
                return TM_ERROR_EBUSY;
#endif
            }

#ifdef LOCK_RESERVATION
            //lockword = *lockword_ptr; // this reloading of lockword may be odd, need to investigate;
            if (IS_RESERVED(lockword)) {
                CTRACE(("initially reserve lock %x count: %d ", *lockword_ptr, init_reserve_cout++));
                RECURSION_INC(lockword_ptr, *lockword_ptr);
            }
#endif
            CTRACE(("CAS lock %x count: %d ", *lockword_ptr, cas_cout++));
            return TM_ERROR_NONE;
        } else 

            // Fat monitor
            if (IS_FAT_LOCK(lockword)) {
                CTRACE(("FAT MONITOR %d \n", ++fat_lock2_count/*, vm_get_object_class_name(lockword_ptr-1)*/));  
                fat_monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); //  find fat_monitor in lock table
            
                status = hythread_monitor_try_enter(fat_monitor);
#ifdef SPIN_COUNT
                if (status == TM_ERROR_EBUSY) {
                    continue; 
                }
#endif
                return status;
            }

#ifdef LOCK_RESERVATION
        // unreserved busy lock
            else if (IS_RESERVED(lockword)) {
                status = hythread_unreserve_lock(lockword_ptr);
                if (status != TM_ERROR_NONE) {
#ifdef SPIN_COUNT
                    if (status == TM_ERROR_EBUSY) {
                        continue;
                    }
#endif //SPIN_COUNT
                    return status;
                }
                return hythread_thin_monitor_try_enter(lockword_ptr);
            }
#endif 
#ifdef SPIN_COUNT
        hythread_yield();
    }
#endif
    return TM_ERROR_EBUSY;
}
示例#8
0
/**
 * Gains the ownership over monitor.
 *
 * Current thread blocks if the specified monitor is owned by other thread.
 *
 * @param[in] monitor object where monitor is located
 * @sa JNI::MonitorEnter()
 */
IDATA VMCALL jthread_monitor_enter(jobject monitor)
{
    IDATA state;
    hythread_t native_thread;
    apr_time_t enter_begin;

    assert(monitor);
    hythread_suspend_disable();
    hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor);
    IDATA status = hythread_thin_monitor_try_enter(lockword);
    if (status != TM_ERROR_EBUSY) {
        goto entered;
    }

#ifdef LOCK_RESERVATION
    // busy unreserve lock before blocking and inflating
    while (TM_ERROR_NONE != hythread_unreserve_lock(lockword)) {
        hythread_yield();
        hythread_safe_point();
        hythread_exception_safe_point();
        lockword = vm_object_get_lockword_addr(monitor);
    }
    status = hythread_thin_monitor_try_enter(lockword);
    if (status != TM_ERROR_EBUSY) {
        goto entered;
    }
#endif //LOCK_RESERVATION

    native_thread = hythread_self();
    hythread_thread_lock(native_thread);
    state = hythread_get_state(native_thread);
    state &= ~TM_THREAD_STATE_RUNNABLE;
    state |= TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
    status = hythread_set_state(native_thread, state);
    assert(status == TM_ERROR_NONE);
    hythread_thread_unlock(native_thread);

    // should be moved to event handler
    if (ti_is_enabled()) {
        enter_begin = apr_time_now();
        int disable_count = hythread_reset_suspend_disable();
        jthread_set_owned_monitor(monitor);
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1);
        }
        hythread_set_suspend_disable(disable_count);
    }

    // busy wait and inflate
    // reload pointer after safepoints
    lockword = vm_object_get_lockword_addr(monitor);
    while ((status =
            hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY)
    {
        hythread_safe_point();
        hythread_exception_safe_point();
        lockword = vm_object_get_lockword_addr(monitor);

        if (hythread_is_fat_lock(*lockword)) {
            status = hythread_thin_monitor_enter(lockword);
            if (status != TM_ERROR_NONE) {
                hythread_suspend_enable();
                assert(0);
                return status;
            }
            goto contended_entered;
        }
        hythread_yield();
    }
    assert(status == TM_ERROR_NONE);
    if (!hythread_is_fat_lock(*lockword)) {
        hythread_inflate_lock(lockword);
    }

// do all ti staff here
contended_entered:
    if (ti_is_enabled()) {
        int disable_count = hythread_reset_suspend_disable();
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0);
        }
        hythread_set_suspend_disable(disable_count);
        // should be moved to event handler
        jvmti_thread_t jvmti_thread =
            jthread_get_jvmti_thread(hythread_self());
        jvmti_thread->blocked_time += apr_time_now() - enter_begin;
    }

    hythread_thread_lock(native_thread);
    state = hythread_get_state(native_thread);
    state &= ~TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
    state |= TM_THREAD_STATE_RUNNABLE;
    status = hythread_set_state(native_thread, state);
    assert(status == TM_ERROR_NONE);
    hythread_thread_unlock(native_thread);

entered:
    if (ti_is_enabled()) {
        jthread_add_owned_monitor(monitor);
    }
    hythread_suspend_enable();
    return TM_ERROR_NONE;
} // jthread_monitor_enter