Ejemplo n.º 1
0
/**
 * Detaches a thread from the threading library.
 * Assumes that the thread is being detached is already attached.
 *
 * @param[in] thread A hythread_t representing the thread to be detached.
 *                   If this is NULL, the current thread is detached.
 */
void VMCALL hythread_detach_ex(hythread_t thread)
{
    IDATA status;

    // Acquire global TM lock to prevent concurrent access to thread list
    status = hythread_global_lock();
    assert(status == TM_ERROR_NONE);

    if (thread == NULL) {
        thread = hythread_self();
    }
    assert(thread);

    // Detach if thread is attached to group.
    hythread_remove_from_group(thread);

    if (thread == hythread_self()) // Detach current thread only
        port_thread_detach();

    // FIXME - uncomment after TM state transition complete
    // release thread data
    //hythread_struct_release(thread);
    
    status = hythread_global_unlock();
    assert(status == TM_ERROR_NONE);
}
Ejemplo n.º 2
0
/**
 * Registers the current OS thread with the threading subsystem.
 *
 * @param[in] new_thread a new thread to register
 * @param[in] lib        a new thread library or NULL
 *                       in case of NULL this thread will go to the default group
 * @param[in] group      a new thread group or NULL
 *                       in case of NULL this thread will go to the default group
 */
IDATA hythread_attach_ex(hythread_t new_thread,
                         hythread_library_t lib,
                         hythread_group_t group)
{
    int res;
    IDATA status;
    hythread_t self = hythread_self();

    assert(new_thread);

#ifdef ORDER
    U_32 p_tid = new_thread->p_tid;
    U_32 p_count = new_thread->p_count;
#endif

    hythread_struct_init(new_thread);

#ifdef ORDER
    new_thread->p_tid = p_tid;
    new_thread->p_count = p_count;
#endif

    assert(lib == NULL);

    new_thread->library = TM_LIBRARY;
    if (self) {
        // to avoid creating multiple OS handles
        new_thread->os_handle = self->os_handle;
    } else {
        new_thread->os_handle = port_thread_current();
    }
    assert(new_thread->os_handle);

    res = port_thread_attach();
    // It's OK to have an error here when Port shared library
    // is not available yet; only signals/crash handling will
    // not be available for the thread
    //assert(res == 0);

    CTRACE(("TM: native attached: native: %p ", new_thread));

    status = hythread_set_to_group(new_thread,
        (group == NULL ? TM_DEFAULT_GROUP : group));
    hythread_set_self(new_thread);
    assert(new_thread == hythread_self());

    if (self) {
        // remove old attached thread
        hythread_remove_from_group(self);
        self->thread_id = new_thread->thread_id;
    }
    return status;
}
Ejemplo n.º 3
0
/**
 * Allocates VM_thread structure
 */
vm_thread_t jthread_allocate_thread()
{
    vm_thread_t vm_thread =
            (vm_thread_t)STD_CALLOC(1, sizeof(struct VM_thread));
    assert(vm_thread);
    ((hythread_t)vm_thread)->java_status = TM_STATUS_ALLOCATED;

#ifdef ORDER
    ((hythread_t)vm_thread)->p_tid = hythread_self()->thread_id;
    hythread_self()->thread_create_count++;
    ((hythread_t)vm_thread)->p_count = hythread_self()->thread_create_count;
#endif

    return vm_thread;
} // jthread_allocate_thread
Ejemplo n.º 4
0
/**
 * Releases the lock over threading subsystem.
 * 
 */
IDATA VMCALL hythread_global_unlock() {
    IDATA status;
    assert(!hythread_self() || hythread_is_suspend_enabled());
    status = port_mutex_unlock(&TM_LIBRARY->TM_LOCK);
    assert(status == TM_ERROR_NONE);
    return TM_ERROR_NONE;
}
Ejemplo n.º 5
0
/**
 * Acquires the lock over threading subsystem.
 * 
 * The lock blocks new thread creation and thread exit operations. 
 */
IDATA VMCALL hythread_global_lock() {
    IDATA status;
    hythread_t self = hythread_self();

    // we need not care about suspension if the thread
    // is not even attached to hythread
    if (self == NULL) {
        return port_mutex_lock(&TM_LIBRARY->TM_LOCK);
    }

    // disable_count must be 0 on potentially
    // blocking operation to prevent suspension deadlocks,
    // meaning that the thread is safe for suspension
    assert(hythread_is_suspend_enabled());

    status = port_mutex_lock(&TM_LIBRARY->TM_LOCK);
    assert(status == TM_ERROR_NONE);

    // make sure we do not get a global thread lock
    // while being requested to suspend
    while (self->suspend_count) {
        // give up global thread lock before safepoint,
        // because this thread can be suspended at a safepoint
        status = port_mutex_unlock(&TM_LIBRARY->TM_LOCK);
        assert(status == TM_ERROR_NONE);
        hythread_safe_point();
        status = port_mutex_lock(&TM_LIBRARY->TM_LOCK);
        assert(status == TM_ERROR_NONE);
    }
    return TM_ERROR_NONE;
}
Ejemplo n.º 6
0
/**
 * Releases global lock of the library associated with the current thread.
 *
 * @param[in] self current thread
 */
void VMCALL hythread_lib_unlock(hythread_t self) {
    IDATA status;

    assert(self == hythread_self());
    status = port_mutex_unlock(&self->library->TM_LOCK);
    assert(status == TM_ERROR_NONE);
}
Ejemplo n.º 7
0
/**
 * Create a new OS thread.
 * 
 * The created thread is attached to the threading library.<br>
 * <br>
 * Unlike POSIX, this doesn't require an attributes structure.
 * Instead, any interesting attributes (e.g. stacksize) are
 * passed in with the arguments.
 *
 * @param[out] ret_thread a pointer to a hythread_t which will point to the thread (if successfully created)
 * @param[in] stacksize the size of the new thread's stack (bytes)<br>
 *                      0 indicates use default size
 * @param[in] priority priorities range from HYTHREAD_PRIORITY_MIN to HYTHREAD_PRIORITY_MAX (inclusive)
 * @param[in] suspend set to non-zero to create the thread in a suspended state.
 * @param[in] func pointer to the function which the thread will run
 * @param[in] data a value to pass to the entrypoint function
 *
 * @return  0 on success or negative value on failure
 *
 * @see hythread_exit, hythread_resume
 */
IDATA VMCALL hythread_create(hythread_t *handle, UDATA stacksize, UDATA priority, UDATA suspend, hythread_entrypoint_t func, void *data) {
    hythread_t thread = (hythread_t)calloc(1, hythread_get_struct_size());
    thread->need_to_free = 1;
    assert(thread);

#ifdef ORDER
    ((hythread_t)thread)->p_tid = hythread_self()->thread_id;
    hythread_self()->thread_create_count++;
    ((hythread_t)thread)->p_count = hythread_self()->thread_create_count;
#endif

    if (handle) {
        *handle = thread;
    }
    return hythread_create_ex(thread, NULL, stacksize, priority, NULL, func, data);
}
Ejemplo n.º 8
0
/**
 * Detach thread from group if it is attached to.
 */
IDATA VMCALL hythread_remove_from_group(hythread_t thread)
{
    IDATA status;

    if (!thread->group) {
        return TM_ERROR_NONE;
    }

    status = hythread_global_lock();
    assert(status == TM_ERROR_NONE);

    // The thread can be detached from the other thread in case
    // of forceful termination by hythread_cancel(), but thread
    // local storage can be zeroed only for current thread.
    if (thread == hythread_self() ) {
        hythread_set_self(NULL);
    }
    fast_thread_array[thread->thread_id] = NULL;

    thread->prev->next = thread->next;
    thread->next->prev = thread->prev;
    thread->group->threads_count--;
    thread->group = NULL;

    status = hythread_global_unlock();
    assert(status == TM_ERROR_NONE);

    return TM_ERROR_NONE;
}
Ejemplo n.º 9
0
/**
 * Completely releases the ownership over monitor.
 */
IDATA VMCALL hythread_thin_monitor_release(hythread_thin_monitor_t *lockword_ptr)
{
    IDATA status;
    U_32 lockword = *lockword_ptr;
    hythread_t self = hythread_self();

    if (self != hythread_thin_monitor_get_owner(lockword_ptr)) {
        // nothing to do, thread is not an owner of monitor
        return TM_ERROR_NONE;
    }
    if (IS_FAT_LOCK(lockword)) {
        // this is fat monitor
        hythread_monitor_t monitor =
            locktable_get_fat_monitor(FAT_LOCK_ID(lockword));
        monitor->recursion_count = 0;
        status = port_mutex_unlock(&monitor->mutex);
        assert(status == TM_ERROR_NONE);
    } else {
        // this is thin monitor
        while (RECURSION(lockword)) {
            RECURSION_DEC(lockword_ptr, lockword);
            lockword = *lockword_ptr;
        }
        *lockword_ptr = lockword & 0xffff;
    }
    return TM_ERROR_NONE;
}
Ejemplo n.º 10
0
extern HY_CFUNC void VMCALL 
hythread_exit (hythread_monitor_t monitor) {
   
    if (monitor !=NULL && monitor->owner == hythread_self()) {
        monitor->recursion_count = 0;
        hythread_monitor_exit(monitor);
    }
    hythread_detach_ex(NULL);
    port_thread_exit(0);
    // unreachable statement
    abort();
}
Ejemplo n.º 11
0
hy_inline void test_hythread_suspend_disable()
{
    register hythread_t thread = hythread_self();

    // Check that current thread is in default thread group.
    // Justification: GC suspends and enumerates threads from
    // default group only.
    assert(((HyThread_public *)thread)->group == get_java_thread_group());

    ((HyThread_public *)thread)->disable_count++;
    //port_rw_barrier();

    if (thread->request && thread->disable_count == 1) {
        // enter to safe point if suspend request was set
        // and suspend disable was made a moment ago
        // (it's a point of entry to the unsafe region)
        hythread_safe_point_other(thread);
    }
    return;
} // test_hythread_suspend_disable
Ejemplo n.º 12
0
/**
 * Attach an OS thread to the threading library.
 *
 * Create a new hythread_t to represent the existing OS thread.
 * Attaching a thread is required when a thread was created
 * outside of the Hy threading library wants to use any of the
 * Hy threading library functionality.
 *
 * If the OS thread is already attached, handle is set to point
 * to the existing hythread_t.
 *
 * @param[out] handle pointer to a hythread_t to be set (will be ignored if null)
 * @return  0 on success or negative value on failure
 *
 * @note (*handle) should be NULL or point to hythread_t structure  
 * @see hythread_detach
 */
IDATA VMCALL hythread_attach(hythread_t *handle) {
    hythread_t thread;
    IDATA status = TM_ERROR_NONE;
    hythread_t self = hythread_self();

    if (self) {
        // thread is already attached, nothing to do
        thread = self;
    } else {
        // create thread
        thread = (hythread_t)calloc(1, hythread_get_struct_size());
        assert(thread);
        // attach thread
        status = hythread_attach_ex(thread, NULL, NULL);
    }
    if (handle) {
        *handle = thread;
    }
    return status;
}
Ejemplo n.º 13
0
ncaiError JNICALL
ncaiTerminateThread(ncaiEnv *env, ncaiThread thread)
{
    TRACE2("ncai.thread", "TerminateThread called");
    SuspendEnabledChecker sec;

    if (env == NULL)
        return NCAI_ERROR_INVALID_ENVIRONMENT;

    if (thread == NULL)
        return NCAI_ERROR_INVALID_THREAD;

    hythread_t hythread = reinterpret_cast<hythread_t>(thread);

    hythread_t self = hythread_self();

    if (hythread == self)
        return NCAI_ERROR_INVALID_THREAD;

    assert(thread);

    // grab hythread global lock
    hythread_global_lock();

    if (!ncai_thread_is_alive(hythread))
    {
        hythread_global_unlock();
        return NCAI_ERROR_THREAD_NOT_ALIVE;
    }

    IDATA UNUSED status = jthread_vm_detach(jthread_get_vm_thread(hythread));
    assert(status == TM_ERROR_NONE);

    hythread_cancel(hythread);

    // release hythread global lock
    hythread_global_unlock();

    return NCAI_ERROR_NONE;
}
Ejemplo n.º 14
0
/**
 * Detaches current thread from VM.
 */
jint vm_detach(jobject java_thread)
{
    assert(hythread_is_suspend_enabled());

    hythread_t native_thread = jthread_get_native_thread(java_thread);
    assert(native_thread);
    vm_thread_t p_vm_thread = jthread_get_vm_thread(native_thread);
    assert(p_vm_thread);

    // Send Thread End event
    if(jvmti_should_report_event(JVMTI_EVENT_THREAD_END)) {
        jvmti_send_thread_start_end_event(p_vm_thread, 0);
    }

    // change java_status for native thread
    native_thread->java_status = TM_STATUS_ALLOCATED;

    if (native_thread == hythread_self()) {
        // Notify GC about thread detaching.
        // FIXME - GC notify detach thread works for current thread only
        gc_thread_kill(&p_vm_thread->_gc_private_information);
    }

    if (ti_is_enabled())
    {
        apr_status_t UNREF status;
        status = port_vmem_free(
            p_vm_thread->jvmti_thread.jvmti_jit_breakpoints_handling_buffer,
            TM_JVMTI_MAX_BUFFER_SIZE);
        assert(status == APR_SUCCESS);
    }

    // Destroy current VM_thread pool and zero VM_thread structure
    jthread_deallocate_vm_thread_pool(p_vm_thread);

    return JNI_OK;
}
Ejemplo n.º 15
0
Class* TypeDesc::load_type_desc()
{

#ifdef ORDER
    int class_status = 0;
    if(vm_order_record){
        class_status = clss ? 1 : 0;
        U_32 tid = hythread_self()->thread_id;
        if (order_system_call[tid] == NULL)
        {
            char name[40];
            sprintf(name, "SYSTEM_CALL.%d.log", tid);

            order_system_call[tid] = fopen64(name, "a+");
        }
#ifdef ORDER_DEBUG

        assert(order_system_call[tid] != NULL);

        fprintf(order_system_call[tid], "[%d] ", 21);
#endif
        fprintf(order_system_call[tid], "%d\n", class_status);

        if(class_status == 1){
#ifdef ORDER_DEBUG
//            printf("[TYPE_DESC]: class : %s already loaded, thread id : %d\n", clss->get_name()->bytes, hythread_self()->thread_id);
#endif
            return clss;
        }

    }
    else{
        
        U_32 tid = hythread_self()->thread_id;
        if (order_system_call[tid] == NULL)
        {
            char name[40];
            sprintf(name, "SYSTEM_CALL.%d.log", tid);

            order_system_call[tid] = fopen64(name, "r");
        }
#ifdef ORDER_DEBUG
        assert(order_system_call[tid] != NULL);

        int bit_num;
        fscanf(order_system_call[tid], "[%d]", &bit_num);
        assert(bit_num == 21);
#endif
        fscanf(order_system_call[tid], "%d\n", &class_status);

        if(class_status == 1){
            while(!clss){
                usleep(1000);
#ifdef ORDER_DEBUG
                printf("[TYPE_DESC]: usleep in TypeDesc::load_type_desc!!\n");
#endif
            }
            return clss;
        }

    }
	
#else

    if (clss) return clss; // class already loaded

#endif

    
    Global_Env* env = VM_Global_State::loader_env;
    Class* element_clss;

    switch (get_kind()) {
    case K_S1: return env->Byte_Class;
    case K_S2: return env->Short_Class;
    case K_S4: return env->Int_Class;
    case K_S8: return env->Long_Class;
    case K_F4: return env->Float_Class;
    case K_F8: return env->Double_Class;
    case K_Boolean: return env->Boolean_Class;
    case K_Char: return env->Char_Class;
    case K_Void: return env->Void_Class;
    case K_Object:
        assert (loader);
        assert (name);
        // FIXME: better to use LoadVerifyAndPrepareClass here - but this results in Recursive resolution collision in StartLoadingClass
        //c = loader->LoadVerifyAndPrepareClass(env, name);
        clss = loader->LoadClass(env, name);
        return clss;
    case K_Vector:
        assert (component_type);
        element_clss = component_type->load_type_desc();
        if (!element_clss) return NULL;
        clss = resolve_class_array_of_class(env, element_clss);
        return clss;
    default:
        // All other types are not Java types, so fail
        LDIE(73, "Unexpected kind");
        return NULL;
    }
}
Ejemplo n.º 16
0
static void report_loaded_unloaded_module(ncaiModule module, bool loaded)
{
    DebugUtilsTI *ti = VM_Global_State::loader_env->TI;

    hythread_t hythread = hythread_self();
    ncaiThread thread = reinterpret_cast<ncaiThread>(hythread);

    bool suspend_enabled = hythread_is_suspend_enabled();

    if (!suspend_enabled)
        hythread_suspend_enable();

    TIEnv *ti_env = ti->getEnvironments();
    TIEnv *next_ti_env;

    const char* trace_text = loaded ? "ModuleLoad" : "ModuleUnload";

    while (NULL != ti_env)
    {
        next_ti_env = ti_env->next;

        NCAIEnv* env = ti_env->ncai_env;

        if (NULL == env)
        {
            ti_env = next_ti_env;
            continue;
        }

        ncaiModuleLoad func_l =
            (ncaiModuleLoad)env->get_event_callback(NCAI_EVENT_MODULE_LOAD);
        ncaiModuleLoad func_u =
            (ncaiModuleUnload)env->get_event_callback(NCAI_EVENT_MODULE_UNLOAD);

        ncaiModule env_module = NULL;
        ncaiModLU func = loaded ? (ncaiModLU)func_l : (ncaiModLU)func_u;
        ncaiEventKind event =
            loaded ? NCAI_EVENT_MODULE_LOAD : NCAI_EVENT_MODULE_UNLOAD;

        if (NULL != func)
        {
            if (env->global_events[event - NCAI_MIN_EVENT_TYPE_VAL])
            {
                TRACE2("ncai.modules",
                    "Calling global " << trace_text << " callback for module "
                    << module->info->name);

                find_init_module_record(env, module, &env_module);
                func((ncaiEnv*)env, thread, env_module);

                TRACE2("ncai.modules",
                    "Finished global " << trace_text << " callback for module "
                    << module->info->name);

                ti_env = next_ti_env;
                continue;
            }

            ncaiEventThread* next_et;
            ncaiEventThread* first_et =
                env->event_threads[event - NCAI_MIN_EVENT_TYPE_VAL];

            for (ncaiEventThread* et = first_et; NULL != et; et = next_et)
            {
                next_et = et->next;

                if (et->thread == thread)
                {
                    TRACE2("ncai.modules",
                        "Calling local " << trace_text << " callback for module "
                        << module->info->name);

                    find_init_module_record(env, module, &env_module);
                    func((ncaiEnv*)env, thread, env_module);

                    TRACE2("ncai.modules",
                        "Finished local " << trace_text << " callback for module "
                        << module->info->name);
                }
                et = next_et;
            }
        }
        ti_env = next_ti_env;
    }

    if (!suspend_enabled)
        hythread_suspend_disable();
}
Ejemplo n.º 17
0
/**
 * Wrapper around user thread start proc.
 * Used to perform some duty jobs right after thread is started
 * and before thread is finished.
 */
static int HYTHREAD_PROC hythread_wrapper_start_proc(void *arg) {
    IDATA UNUSED status;
    hythread_t thread;
    hythread_start_proc_data start_proc_data;
    hythread_entrypoint_t start_proc;
    
    // store procedure arguments to local
    start_proc_data = *(hythread_start_proc_data_t) arg;
    free(arg);

    // get hythread global lock
    status = hythread_global_lock();
    assert(status == TM_ERROR_NONE);

    // get native thread
    thread = start_proc_data.thread;
    start_proc = start_proc_data.proc;

    CTRACE(("TM: native thread started: native: %p tm: %p",
        port_thread_current(), thread));

    // check hythread library state
    if (hythread_lib_state() != TM_LIBRARY_STATUS_INITIALIZED) {
        // set TERMINATED state
        port_mutex_lock(&thread->mutex);
        thread->state = TM_THREAD_STATE_TERMINATED;
        port_mutex_unlock(&thread->mutex);

        // set hythread_self()
        hythread_set_self(thread);
        assert(thread == hythread_self());

        // release thread structure data
        hythread_detach(thread);

        // zero hythread_self() because we don't do it in hythread_detach_ex()
        hythread_set_self(NULL);

        CTRACE(("TM: native thread terminated due to shutdown: native: %p tm: %p",
            port_thread_current(), thread));

        // release hythread global lock
        status = hythread_global_unlock();
        assert(status == TM_ERROR_NONE);

        return 0;
    }

    // register to group and set ALIVE & RUNNABLE states
    status = hythread_set_to_group(thread, start_proc_data.group);
    assert(status == TM_ERROR_NONE);

    // set hythread_self()
    hythread_set_self(thread);
    assert(thread == hythread_self());

    // set priority
    status = hythread_set_priority(thread, thread->priority);
    // FIXME - cannot set priority
    //assert(status == TM_ERROR_NONE);

    // release hythread global lock
    status = hythread_global_unlock();
    assert(status == TM_ERROR_NONE);

    // Do actual call of the thread body supplied by the user.
    start_proc(start_proc_data.proc_args);

    assert(hythread_is_suspend_enabled());

    // get hythread global lock
    status = hythread_global_lock();
    assert(status == TM_ERROR_NONE);

    // set TERMINATED state
    port_mutex_lock(&thread->mutex);
    thread->state = TM_THREAD_STATE_TERMINATED;
    port_mutex_unlock(&thread->mutex);

    // detach and free thread
    hythread_detach(thread);

    // release hythread global lock
    status = hythread_global_unlock();
    assert(status == TM_ERROR_NONE);

    return 0;
}
Ejemplo n.º 18
0
int test_hythread_thread_suspend_all(void)
{
    void **args; 
    hythread_t thread_list[THREAD_COUNT];
    hythread_thin_monitor_t lock;
    hythread_thin_monitor_t monitor;
    IDATA status;
    int i;

    // create monitors
    status = hythread_thin_monitor_create(&monitor);
    tf_assert_same(status, TM_ERROR_NONE);
    status = hythread_thin_monitor_create(&lock);
    tf_assert_same(status, TM_ERROR_NONE);

    // alloc and set thread start procedure args
    args = (void**)calloc(3, sizeof(void*));
    args[0] = &lock;
    args[1] = &monitor;
    args[2] = 0;

    // create threads
    hythread_suspend_disable();
    status = hythread_thin_monitor_enter(&lock);
    tf_assert_same(status, TM_ERROR_NONE);
    hythread_suspend_enable();

    started_thread_count = 0;
    for(i = 0; i < THREAD_COUNT; i++) {
        thread_list[i] = NULL;
        status = hythread_create(&thread_list[i], 0, 0, 0,
            (hythread_entrypoint_t)start_proc, args);
        tf_assert_same(status, TM_ERROR_NONE);
        log_info("%d thread is started", i + 1);
    } 

    // waiting start of tested thread
    hythread_suspend_disable();
    while (started_thread_count < 10) {
        status = hythread_thin_monitor_wait(&lock);
        tf_assert_same(status, TM_ERROR_NONE);
    }

    status = hythread_thin_monitor_exit(&lock);
    tf_assert_same(status, TM_ERROR_NONE);
    hythread_suspend_enable();

    // suspend tested thread
    status = hythread_suspend_all(NULL, ((HyThread_public*)hythread_self())->group);
    tf_assert_same(status, TM_ERROR_NONE);
    log_info("all threads are suspended");

    // notify tested threads
    hythread_suspend_disable();
    status = hythread_thin_monitor_enter(&monitor);
    tf_assert_same(status, TM_ERROR_NONE);
    status = hythread_thin_monitor_notify_all(&monitor);
    tf_assert_same(status, TM_ERROR_NONE);
    status = hythread_thin_monitor_exit(&monitor);
    tf_assert_same(status, TM_ERROR_NONE);
    hythread_suspend_enable();
    log_info("notify all suspended threads");

    // check tested argument
    for(i = 0; i < 1000; i++) {
        tf_assert_same(args[2], 0);
        hythread_sleep(1);
    }

    // resume thread
    status = hythread_resume_all(((HyThread_public*)hythread_self())->group);
    tf_assert_same(status, TM_ERROR_NONE);
    log_info("resume all suspended threads");

    for(i = 0; i < THREAD_COUNT; i++) {
        test_thread_join(thread_list[i], i);
        log_info("%d thread is terminated", i + 1);
    }

    tf_assert_same((IDATA)args[2], THREAD_COUNT);

    return 0;
}
Ejemplo n.º 19
0
IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) {
    IDATA status;
    IDATA result;
    hythread_t self;
    hythread_monitor_t mon;

    if (nanos == 0 && millis == 0) {
        hythread_yield();
        return TM_ERROR_NONE;
    }
    if (!(self = hythread_self())) {
        // Report error in case current thread is not attached
        return TM_ERROR_UNATTACHED_THREAD;
    }

    // Grab thread monitor
    mon = self->monitor;
    status = hythread_monitor_enter(mon);
    assert(status == TM_ERROR_NONE);
    assert(mon->recursion_count == 0);
    mon->owner = NULL;
    mon->wait_count++;

    // Set thread state
    status = port_mutex_lock(&self->mutex);
    assert(status == TM_ERROR_NONE);
    self->waited_monitor = mon;
    self->state |= TM_THREAD_STATE_SLEEPING;
    status = port_mutex_unlock(&self->mutex);
    assert(status == TM_ERROR_NONE);

    do {
        apr_time_t start;
        assert(mon->notify_count >= 0);
        assert(mon->notify_count < mon->wait_count);
        start = apr_time_now();

        result = condvar_wait_impl(&mon->condition, &mon->mutex, millis, nanos, interruptable);
        if (result != TM_ERROR_NONE) {
            break;
        }
        // we should not change millis and nanos if both are 0 (meaning "no timeout")
        if (millis || nanos) {
            apr_interval_time_t elapsed = apr_time_now() - start;
            nanos -= (IDATA)((elapsed % 1000) * 1000);
            if (nanos < 0) {
                millis -= elapsed/1000 + 1;
                nanos += 1000000;
            } else {
                millis -= elapsed/1000;
            }
            if (millis < 0) {
                assert(status == TM_ERROR_NONE);
                status = TM_ERROR_TIMEOUT;
                break;
            }
            assert(0 <= nanos && nanos < 1000000);
        }
    } while(1);

    // Restore thread state
    status = port_mutex_lock(&self->mutex);
    assert(status == TM_ERROR_NONE);
    self->state &= ~TM_THREAD_STATE_SLEEPING;
    self->waited_monitor = NULL;
    status = port_mutex_unlock(&self->mutex);
    assert(status == TM_ERROR_NONE);

    // Release thread monitor
    mon->wait_count--;
    mon->owner = self;
    assert(mon->notify_count <= mon->wait_count);
    status = hythread_monitor_exit(mon);
    assert(status == TM_ERROR_NONE);

    if (self->request) {
        hythread_safe_point();
        hythread_exception_safe_point();
    }

    return (result == TM_ERROR_INTERRUPT && interruptable)
        ? TM_ERROR_INTERRUPT : TM_ERROR_NONE;
}
Ejemplo n.º 20
0
hythread_t hythread_self_slow() {
    return hythread_self();
}
Ejemplo n.º 21
0
/**
 * Wait on the <code>object</code>'s monitor with the specified timeout.
 *
 * This function instructs the current thread to be scheduled off 
 * the processor and wait on the monitor until the following occurs: 
 * <UL>
 * <LI>another thread invokes <code>thread_notify(object)</code>
 * and VM chooses this thread to wake up;
 * <LI>another thread invokes <code>thread_notifyAll(object);</code>
 * <LI>another thread invokes <code>thread_interrupt(thread);</code>
 * <LI>real time elapsed from the waiting begin is
 * greater or equal the timeout specified.
 * </UL>
 *
 * @param[in] monitor object where monitor is located
 * @param[in] millis time to wait (in milliseconds)
 * @param[in] nanos time to wait (in nanoseconds)
 * @sa java.lang.Object.wait()
 */
IDATA VMCALL
jthread_monitor_timed_wait(jobject monitor, jlong millis, jint nanos)
{
    assert(monitor);

    hythread_suspend_disable();
    hythread_t native_thread = hythread_self();
    hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor);
    if (!hythread_is_fat_lock(*lockword)) {
        if (!hythread_owns_thin_lock(native_thread, *lockword)) {
            CTRACE(("ILLEGAL_STATE wait %x\n", lockword));
            hythread_suspend_enable();
            return TM_ERROR_ILLEGAL_STATE;
        }
        hythread_inflate_lock(lockword);
    }

    apr_time_t wait_begin;
    if (ti_is_enabled()) {
        int disable_count = hythread_reset_suspend_disable();
        jthread_set_wait_monitor(monitor);
        jthread_set_owned_monitor(monitor);
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAIT)) {
            jvmti_send_wait_monitor_event(monitor, (jlong) millis);
        }
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1);
        }
        hythread_set_suspend_disable(disable_count);

        // should be moved to event handler
        wait_begin = apr_time_now();
        jthread_remove_owned_monitor(monitor);
    }

    hythread_thread_lock(native_thread);
    IDATA state = hythread_get_state(native_thread);
    state &= ~TM_THREAD_STATE_RUNNABLE;
    state |= TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT;
    if ((millis > 0) || (nanos > 0)) {
        state |= TM_THREAD_STATE_WAITING_WITH_TIMEOUT;
    }
    else {
        state |= TM_THREAD_STATE_WAITING_INDEFINITELY;
    }
    IDATA status = hythread_set_state(native_thread, state);
    assert(status == TM_ERROR_NONE);
    hythread_thread_unlock(native_thread);

    status =
        hythread_thin_monitor_wait_interruptable(lockword, millis, nanos);

    hythread_thread_lock(native_thread);
    state = hythread_get_state(native_thread);
    if ((millis > 0) || (nanos > 0)) {
        state &= ~TM_THREAD_STATE_WAITING_WITH_TIMEOUT;
    }
    else {
        state &= ~TM_THREAD_STATE_WAITING_INDEFINITELY;
    }
    state &= ~(TM_THREAD_STATE_WAITING | TM_THREAD_STATE_IN_MONITOR_WAIT);
    state |= TM_THREAD_STATE_RUNNABLE;
    hythread_set_state(native_thread, state);
    hythread_thread_unlock(native_thread);

    hythread_suspend_enable();
    if (ti_is_enabled()) {
        jthread_add_owned_monitor(monitor);
        int disable_count = hythread_reset_suspend_disable();
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0);
        }
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_WAITED)) {
            jvmti_send_waited_monitor_event(monitor,
                ((status == APR_TIMEUP) ? (jboolean) 1 : (jboolean) 0));
        }
        hythread_set_suspend_disable(disable_count);
        // should be moved to event handler
        jvmti_thread_t jvmti_thread =
            jthread_get_jvmti_thread(hythread_self());
        jvmti_thread->waited_time += apr_time_now() - wait_begin;
    }
    return status;
} // jthread_monitor_timed_wait
Ejemplo n.º 22
0
/**
 * Gains the ownership over monitor.
 *
 * Current thread blocks if the specified monitor is owned by other thread.
 *
 * @param[in] monitor object where monitor is located
 * @sa JNI::MonitorEnter()
 */
IDATA VMCALL jthread_monitor_enter(jobject monitor)
{
    IDATA state;
    hythread_t native_thread;
    apr_time_t enter_begin;

    assert(monitor);
    hythread_suspend_disable();
    hythread_thin_monitor_t *lockword = vm_object_get_lockword_addr(monitor);
    IDATA status = hythread_thin_monitor_try_enter(lockword);
    if (status != TM_ERROR_EBUSY) {
        goto entered;
    }

#ifdef LOCK_RESERVATION
    // busy unreserve lock before blocking and inflating
    while (TM_ERROR_NONE != hythread_unreserve_lock(lockword)) {
        hythread_yield();
        hythread_safe_point();
        hythread_exception_safe_point();
        lockword = vm_object_get_lockword_addr(monitor);
    }
    status = hythread_thin_monitor_try_enter(lockword);
    if (status != TM_ERROR_EBUSY) {
        goto entered;
    }
#endif //LOCK_RESERVATION

    native_thread = hythread_self();
    hythread_thread_lock(native_thread);
    state = hythread_get_state(native_thread);
    state &= ~TM_THREAD_STATE_RUNNABLE;
    state |= TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
    status = hythread_set_state(native_thread, state);
    assert(status == TM_ERROR_NONE);
    hythread_thread_unlock(native_thread);

    // should be moved to event handler
    if (ti_is_enabled()) {
        enter_begin = apr_time_now();
        int disable_count = hythread_reset_suspend_disable();
        jthread_set_owned_monitor(monitor);
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTER)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 1);
        }
        hythread_set_suspend_disable(disable_count);
    }

    // busy wait and inflate
    // reload pointer after safepoints
    lockword = vm_object_get_lockword_addr(monitor);
    while ((status =
            hythread_thin_monitor_try_enter(lockword)) == TM_ERROR_EBUSY)
    {
        hythread_safe_point();
        hythread_exception_safe_point();
        lockword = vm_object_get_lockword_addr(monitor);

        if (hythread_is_fat_lock(*lockword)) {
            status = hythread_thin_monitor_enter(lockword);
            if (status != TM_ERROR_NONE) {
                hythread_suspend_enable();
                assert(0);
                return status;
            }
            goto contended_entered;
        }
        hythread_yield();
    }
    assert(status == TM_ERROR_NONE);
    if (!hythread_is_fat_lock(*lockword)) {
        hythread_inflate_lock(lockword);
    }

// do all ti staff here
contended_entered:
    if (ti_is_enabled()) {
        int disable_count = hythread_reset_suspend_disable();
        if(jvmti_should_report_event(JVMTI_EVENT_MONITOR_CONTENDED_ENTERED)) {
            jvmti_send_contended_enter_or_entered_monitor_event(monitor, 0);
        }
        hythread_set_suspend_disable(disable_count);
        // should be moved to event handler
        jvmti_thread_t jvmti_thread =
            jthread_get_jvmti_thread(hythread_self());
        jvmti_thread->blocked_time += apr_time_now() - enter_begin;
    }

    hythread_thread_lock(native_thread);
    state = hythread_get_state(native_thread);
    state &= ~TM_THREAD_STATE_BLOCKED_ON_MONITOR_ENTER;
    state |= TM_THREAD_STATE_RUNNABLE;
    status = hythread_set_state(native_thread, state);
    assert(status == TM_ERROR_NONE);
    hythread_thread_unlock(native_thread);

entered:
    if (ti_is_enabled()) {
        jthread_add_owned_monitor(monitor);
    }
    hythread_suspend_enable();
    return TM_ERROR_NONE;
} // jthread_monitor_enter
Ejemplo n.º 23
0
// Callback function for NCAI breakpoint processing
bool ncai_process_breakpoint_event(TIEnv *env, const VMBreakPoint* bp,
                                    const POINTER_SIZE_INT data)
{
    TRACE2("ncai.break", "BREAKPOINT occured, location = " << bp->addr);

    VM_thread* vm_thread = p_TLS_vmthread;
    if (!vm_thread)
        return false;

    jvmti_thread_t jvmti_thread = &vm_thread->jvmti_thread;

    // This check works for current thread only
    if (jvmti_thread->flag_ncai_handler) // Recursion
        return true;

    jvmti_thread->flag_ncai_handler = true;

    NCAIEnv* ncai_env = env->ncai_env;
    void* addr = (void*)bp->addr;

    bool suspend_enabled = hythread_is_suspend_enabled();

    if (!suspend_enabled)
        hythread_suspend_enable();

    hythread_t hythread = hythread_self();
    ncaiThread thread = reinterpret_cast<ncaiThread>(hythread);

    ncaiBreakpoint func =
        (ncaiBreakpoint)ncai_env->get_event_callback(NCAI_EVENT_BREAKPOINT);

    if (NULL != func)
    {
        if (ncai_env->global_events[NCAI_EVENT_BREAKPOINT - NCAI_MIN_EVENT_TYPE_VAL])
        {
            TRACE2("ncai.break", "Calling global breakpoint callback, address = " << addr);

            func((ncaiEnv*)ncai_env, thread, addr);

            TRACE2("ncai.break", "Finished global breakpoint callback, address = " << addr);
        }
        else
        {

            ncaiEventThread* next_et;
            ncaiEventThread* first_et =
                ncai_env->event_threads[NCAI_EVENT_BREAKPOINT - NCAI_MIN_EVENT_TYPE_VAL];

            for (ncaiEventThread* et = first_et; NULL != et; et = next_et)
            {
                next_et = et->next;

                if (et->thread == thread)
                {
                    TRACE2("ncai.break", "Calling local breakpoint callback, address = " << addr);

                    func((ncaiEnv*)ncai_env, thread, addr);

                    TRACE2("ncai.break", "Finished local breakpoint callback, address = " << addr);
                }

                et = next_et;
            }
        }
    }

    if (!suspend_enabled)
        hythread_suspend_disable();

    jvmti_thread->flag_ncai_handler = false;
    return true;
}
Ejemplo n.º 24
0
/**
 * Shut down the threading library associated with the current thread.
 * 
 * @return none
 * 
 * @see hythread_init
 */
void VMCALL hythread_shutdown() {
    hythread_lib_destroy(hythread_self()->library);
}
Ejemplo n.º 25
0
/**
 * Used lockword
 * Thin monitor functions used java monitor.
 */
IDATA VMCALL hythread_unreserve_lock(hythread_thin_monitor_t *lockword_ptr) {
    U_32 lockword = *lockword_ptr;
    U_32 lockword_new;
    uint16 lock_id;
    hythread_t owner;
    IDATA status;
    I_32 append;

    // trylock used to prevent cyclic suspend deadlock
    // the java_monitor_enter calls safe_point between attempts.
    /*status = port_mutex_trylock(&TM_LOCK);
      if (status !=TM_ERROR_NONE) {
      return status;
      }*/
    
    if (IS_FAT_LOCK(lockword)) {
        return TM_ERROR_NONE;
    }
    lock_id = THREAD_ID(lockword);
    owner = hythread_get_thread(lock_id);
    CTRACE(("Unreserved other %d \n", ++unreserve_count/*, vm_get_object_class_name(lockword_ptr-1)*/));
    if (!IS_RESERVED(lockword) || IS_FAT_LOCK(lockword)) {
        // port_mutex_unlock(&TM_LOCK);
        return TM_ERROR_NONE;
    }
    // suspend owner 
    if (owner) {
        assert(owner);
        assert(hythread_get_id(owner) == lock_id);
        assert(owner != hythread_self());
        if(owner->state
                & (TM_THREAD_STATE_TERMINATED
                    | TM_THREAD_STATE_WAITING
                    | TM_THREAD_STATE_WAITING_INDEFINITELY
                    | TM_THREAD_STATE_WAITING_WITH_TIMEOUT
                    | TM_THREAD_STATE_SLEEPING
                    | TM_THREAD_STATE_PARKED
                    | TM_THREAD_STATE_SUSPENDED
                    | TM_THREAD_STATE_IN_MONITOR_WAIT))
        {
            append = 0;
        } else {
            append = RESERVED_BITMASK;
        }

        status=hythread_suspend_other(owner);
        if (status !=TM_ERROR_NONE) {
            return status;
        }
    } else {
        append = 0;
    }

    if(!tm_properties || !tm_properties->use_soft_unreservation) {
	    append = RESERVED_BITMASK;
    }

    // prepare new unreserved lockword and try to CAS it with old one.
    while (IS_RESERVED(lockword)) {
        assert(!IS_FAT_LOCK(lockword));
        CTRACE(("unreserving lock"));
        if (RECURSION(lockword) != 0) {
            lockword_new = (lockword | RESERVED_BITMASK);
            assert(RECURSION(lockword) > 0);
            assert(RECURSION(lockword_new) > 0);
            RECURSION_DEC(&lockword_new, lockword_new);
        } else {
            lockword_new = (lockword | append);
            lockword_new =  lockword_new & 0x0000ffff; 
        }
        if (lockword == apr_atomic_cas32 (((volatile apr_uint32_t*) lockword_ptr), 
                                          (apr_uint32_t) lockword_new, lockword)) {
            CTRACE(("unreserved lock"));
            break;
        }
        lockword = *lockword_ptr;
    }

    // resume owner
    if (owner) {
        hythread_yield_other(owner);
        hythread_resume(owner);
    }

    /* status = port_mutex_unlock(&TM_LOCK);*/

    // Gregory - This lock, right after it was unreserved, may be
    // inflated by another thread and therefore instead of recursion
    // count and reserved flag it will have the fat monitor ID. The
    // assertion !IS_RESERVED(lockword) fails in this case. So it is
    // necessary to check first that monitor is not fat.
    // To avoid race condition between checking two different
    // conditions inside of assert, the lockword contents has to be
    // loaded before checking.
//    lockword = *lockword_ptr;
//    assert(IS_FAT_LOCK(lockword) || !IS_RESERVED(lockword));
    return TM_ERROR_NONE;
}
Ejemplo n.º 26
0
/**
 * Creates a new thread in a given group.
 *
 * @param[in] new_thread a new allocated thread.
 * @param[in] group      a thread group or NULL
 *                       in case of NULL this thread will go to the default group.
 * @param[in] stacksize  a new thread stack size or 0
 *                       in case of 0 the thread will be set the default stack size
 * @param[in] priority   a new thread priority or 0
 *                       in case of 0 the thread will be set HYTHREAD_PRIORITY_NORMAL priority
 * @param[in] func       a function to run in the new thread
 * @param[in] data       an argument to be passed to starting function
 */
IDATA VMCALL hythread_create_ex(hythread_t new_thread,
                                hythread_group_t group,
                                UDATA stacksize,
                                UDATA priority,
                                hythread_wrapper_t wrapper,
                                hythread_entrypoint_t func,
                                void *data)
{
    int result;
    hythread_t self;

    assert(new_thread);
	
#ifdef ORDER
    U_32 p_tid = new_thread->p_tid;
    U_32 p_count = new_thread->p_count;
#endif

    hythread_struct_init(new_thread);

#ifdef ORDER
    new_thread->p_tid = p_tid;
    new_thread->p_count = p_count;
#endif

    self = hythread_self();
    new_thread->library = self ? self->library : TM_LIBRARY;
    new_thread->priority = priority ? priority : HYTHREAD_PRIORITY_NORMAL;
    
    if (!wrapper) {
        hythread_start_proc_data_t start_proc_data;

        // No need to zero allocated memory because all fields are initilized below.
        start_proc_data =
            (hythread_start_proc_data_t) malloc(sizeof(hythread_start_proc_data));
        if (start_proc_data == NULL) {
            return TM_ERROR_OUT_OF_MEMORY;
        }

        // Set up thread body procedure 
        start_proc_data->thread = new_thread;
        start_proc_data->group = group == NULL ? TM_DEFAULT_GROUP : group;
        start_proc_data->proc = func;
        start_proc_data->proc_args = data;

        data = (void*)start_proc_data;

        // Set wrapper procedure
        wrapper = hythread_wrapper_start_proc;
    }

    // Need to make sure thread will not register itself with a thread group
    // until port_thread_create returned and initialized thread->os_handle properly.
    hythread_global_lock();
    result = port_thread_create(&new_thread->os_handle,
            stacksize ? stacksize : TM_DEFAULT_STACKSIZE,
            priority, wrapper, data);
    assert(/* error */ result || new_thread->os_handle /* or thread created ok */);

    hythread_global_unlock();

    return result;
}
Ejemplo n.º 27
0
BOOLEAN class_cp_is_entry_resolved(Class_Handle clazz, U_16 cp_index) {
    ConstantPool& cp = clazz->get_constant_pool();

#ifdef ORDER
    bool res = cp.is_entry_resolved(cp_index, (Class *)clazz);
#else
    bool res = cp.is_entry_resolved(cp_index);
#endif

    if (!res) {
        unsigned char tag = cp.get_tag(cp_index);
        //during the loading of a class not all items in it's constant pool are updated
        if (tag == CONSTANT_Fieldref || tag == CONSTANT_Methodref  
            || tag == CONSTANT_InterfaceMethodref || tag == CONSTANT_Class)
        {
            uint16 typeIndex = tag == CONSTANT_Class ? cp_index : cp.get_ref_class_index(cp_index);

#ifdef ORDER
            res = cp.is_entry_resolved(typeIndex,  (Class *)clazz);
#else
            res = cp.is_entry_resolved(typeIndex);
#endif
            if (!res) {
                // the type is not marked as loaded in local constant pool
                // ask classloader directly
                uint16 nameIdx = cp.get_class_name_index(typeIndex);
                String* typeName = cp.get_utf8_string(nameIdx);
                assert(typeName!=NULL);


#ifdef ORDER
                Class* type = NULL;
                if(vm_order_record){ // record mode
                    type = clazz->get_class_loader()->LookupClass(typeName);
                    U_32 tid = hythread_self()->thread_id;

                    if (order_system_call[tid] == NULL)
                    {
                        char name[40];
                        sprintf(name, "SYSTEM_CALL.%d.log", tid);

                        order_system_call[tid] = fopen64(name, "a+");
#ifdef ORDER_DEBUG
                        assert(order_system_call[tid]);
#endif
                    }
#ifdef ORDER_DEBUG
                    fprintf(order_system_call[tid], "[%d] ", 38);
#endif
                    fprintf(order_system_call[tid], "%d\n", type == NULL ? 0 : 1);
                }
                else{ // replay mode
                    U_32 tid = hythread_self()->thread_id;

                    if (order_system_call[tid] == NULL)
                    {
                        char name[40];
                        sprintf(name, "SYSTEM_CALL.%d.log", tid);

                        order_system_call[tid] = fopen64(name, "r");
#ifdef ORDER_DEBUG
                        assert(order_system_call[tid]);
#endif
                    }
#ifdef ORDER_DEBUG
                    int bit_num;
                    fscanf(order_system_call[tid], "[%d] ", &bit_num);
                    assert(bit_num == 38);

#endif
                    int type_state = 0;
                    fscanf(order_system_call[tid], "%d\n", &type_state);

                    if(type_state == 0){
                        type = NULL;
                    }
                    else{
                        while((type = clazz->get_class_loader()->LookupClass(typeName)) == NULL){
#ifdef ORDER_DEBUG
		           printf("(type = clazz->get_class_loader()->LookupClass(typeName)) == NULL!!! \n");
#endif
		           usleep(100);
                        }
#ifdef ORDER_DEBUG
		      assert(type);
#endif
                    }

                }
#else //NOT define ORDER
                Class* type = clazz->get_class_loader()->LookupClass(typeName);
#endif //#ifdef ORDER

                if (type) {
                    /*TODO: uncommenting this code lead to a crash in StressLoader test
                    clazz->lock();
                    cp.resolve_entry(typeIndex, type);
                    clazz->unlock();*/
                    res = true;
                }

                //if array of primitives -> return true;
                if (*typeName->bytes=='[' && !strchr(typeName->bytes, 'L')) {
                    return true;
                }
            }

        } 
    }
    return res;
}