/** * Initializes raw monitor. * * Raw monitors are a simple combination of mutex and conditional variable which is * not associated with any Java object. This function creates the raw monitor at the * address specified as mon_ptr. * User needs to allocate space equal to sizeof(jrawMonitorID) before doing this call. * * @param[in] mon_ptr address where monitor needs to be created and initialized. */ IDATA VMCALL jthread_raw_monitor_create(jrawMonitorID * mon_ptr) { assert(mon_ptr); hythread_monitor_t monitor; IDATA status = hythread_monitor_init(&monitor, 0); if (status != TM_ERROR_NONE) { return status; } // possibly should be moved to jvmti(environment?) init section if (!jvmti_monitor_table) { status = jthread_init_jvmti_monitor_table(); if (status != TM_ERROR_NONE) { return status; } } status = port_mutex_lock(&jvmti_monitor_table_lock); if (status != TM_ERROR_NONE) { return status; } *mon_ptr = (jrawMonitorID)array_add(jvmti_monitor_table, monitor); if (!(*mon_ptr)) { port_mutex_unlock(&jvmti_monitor_table_lock); return TM_ERROR_OUT_OF_MEMORY; } status = port_mutex_unlock(&jvmti_monitor_table_lock); return status; } // jthread_raw_monitor_create
IDATA VMCALL hythread_wait_for_nondaemon_threads(hythread_t thread, IDATA threads_to_keep) { IDATA status; hythread_library_t lib; assert(thread); lib = thread->library; status = port_mutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) { return status; } while (lib->nondaemon_thread_count - threads_to_keep > 0) { // check interruption and other problems status = hycond_wait(&lib->nondaemon_thread_cond, &lib->TM_LOCK); CTRACE(("TM wait for nondaemons notified, count: %d", lib->nondaemon_thread_count)); if (status != TM_ERROR_NONE) { port_mutex_unlock(&lib->TM_LOCK); return status; } } status = port_mutex_unlock(&lib->TM_LOCK); return status; } // hythread_wait_for_nondaemon_threads
IDATA VMCALL hythread_decrease_nondaemon_threads_count(hythread_t thread, IDATA threads_to_keep) { hythread_library_t lib = thread->library; IDATA status = port_mutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) { return status; } if (lib->nondaemon_thread_count <= 0) { status = port_mutex_unlock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) { return status; } return TM_ERROR_ILLEGAL_STATE; } CTRACE(("TM: nondaemons decreased, thread: %p count: %d\n", thread, lib->nondaemon_thread_count)); lib->nondaemon_thread_count--; if (lib->nondaemon_thread_count - threads_to_keep <= 0) { status = hycond_notify_all(&lib->nondaemon_thread_cond); CTRACE(("TM: nondaemons all dead, thread: %p count: %d\n", thread, lib->nondaemon_thread_count)); if (status != TM_ERROR_NONE) { port_mutex_unlock(&lib->TM_LOCK); return status; } } status = port_mutex_unlock(&lib->TM_LOCK); return status; } // hythread_countdown_nondaemon_threads
void EBProfileCollector::onTimeout() { assert(mode == EB_PCMODE_ASYNC); if(!newProfiles.empty()) { port_mutex_lock(&profilesLock); greenProfiles.insert(greenProfiles.end(), newProfiles.begin(), newProfiles.end()); newProfiles.clear(); port_mutex_unlock(&profilesLock); } if (!unloadedMethodProfiles.empty()) { cleanUnloadedProfiles(true); } for (EBProfiles::iterator it = greenProfiles.begin(), end = greenProfiles.end(); it!=end; ++it) { EBMethodProfile* profile = *it; if (profile->entryCounter >= eThreshold || profile->backedgeCounter >= bThreshold) { tmpProfiles.push_back(profile); *it = NULL; } } if (!tmpProfiles.empty()) { port_mutex_lock(&profilesLock); std::remove(greenProfiles.begin(), greenProfiles.end(), (EBMethodProfile*)NULL); greenProfiles.resize(greenProfiles.size() - tmpProfiles.size()); port_mutex_unlock(&profilesLock); for (EBProfiles::iterator it = tmpProfiles.begin(), end = tmpProfiles.end(); it!=end; ++it) { EBMethodProfile* profile = *it; if (loggingEnabled) { logReadyProfile(catName, name, profile); } em->methodProfileIsReady(profile); } tmpProfiles.clear(); } }
/** * Releases global lock of the library associated with the current thread. * * @param[in] self current thread */ void VMCALL hythread_lib_unlock(hythread_t self) { IDATA status; assert(self == hythread_self()); status = port_mutex_unlock(&self->library->TM_LOCK); assert(status == TM_ERROR_NONE); }
/** * Releases the lock over threading subsystem. * */ IDATA VMCALL hythread_global_unlock() { IDATA status; assert(!hythread_self() || hythread_is_suspend_enabled()); status = port_mutex_unlock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); return TM_ERROR_NONE; }
/** * Completely releases the ownership over monitor. */ IDATA VMCALL hythread_thin_monitor_release(hythread_thin_monitor_t *lockword_ptr) { IDATA status; U_32 lockword = *lockword_ptr; hythread_t self = hythread_self(); if (self != hythread_thin_monitor_get_owner(lockword_ptr)) { // nothing to do, thread is not an owner of monitor return TM_ERROR_NONE; } if (IS_FAT_LOCK(lockword)) { // this is fat monitor hythread_monitor_t monitor = locktable_get_fat_monitor(FAT_LOCK_ID(lockword)); monitor->recursion_count = 0; status = port_mutex_unlock(&monitor->mutex); assert(status == TM_ERROR_NONE); } else { // this is thin monitor while (RECURSION(lockword)) { RECURSION_DEC(lockword_ptr, lockword); lockword = *lockword_ptr; } *lockword_ptr = lockword & 0xffff; } return TM_ERROR_NONE; }
IDATA VMCALL hythread_set_state(hythread_t thread, IDATA state) { assert(thread); port_mutex_lock(&thread->mutex); thread->state = state; port_mutex_unlock(&thread->mutex); return TM_ERROR_NONE; } // hythread_set_state
/** * Acquires the lock over threading subsystem. * * The lock blocks new thread creation and thread exit operations. */ IDATA VMCALL hythread_global_lock() { IDATA status; hythread_t self = hythread_self(); // we need not care about suspension if the thread // is not even attached to hythread if (self == NULL) { return port_mutex_lock(&TM_LIBRARY->TM_LOCK); } // disable_count must be 0 on potentially // blocking operation to prevent suspension deadlocks, // meaning that the thread is safe for suspension assert(hythread_is_suspend_enabled()); status = port_mutex_lock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); // make sure we do not get a global thread lock // while being requested to suspend while (self->suspend_count) { // give up global thread lock before safepoint, // because this thread can be suspended at a safepoint status = port_mutex_unlock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); hythread_safe_point(); status = port_mutex_lock(&TM_LIBRARY->TM_LOCK); assert(status == TM_ERROR_NONE); } return TM_ERROR_NONE; }
IDATA VMCALL hythread_get_state(hythread_t thread) { IDATA state; assert(thread); port_mutex_lock(&thread->mutex); state = thread->state; port_mutex_unlock(&thread->mutex); return state; } // hythread_get_state
MethodProfile* EBProfileCollector::getMethodProfile(Method_Handle mh) const { port_mutex_lock(&profilesLock); MethodProfile* res = NULL; EBProfilesMap::const_iterator it = profilesByMethod.find(mh); if (it != profilesByMethod.end()) { res = it->second; } port_mutex_unlock(&profilesLock); return res; }
/** * Sets the count for latch to the specific value. * * @param[in] latch the latch * @param[in] count new count value */ IDATA VMCALL hylatch_set(hylatch_t latch, IDATA count) { IDATA status; status = port_mutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; latch->count = count; status = port_mutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; }
IDATA VMCALL hythread_increase_nondaemon_threads_count(hythread_t thread) { hythread_library_t lib = thread->library; IDATA status = port_mutex_lock(&lib->TM_LOCK); if (status != TM_ERROR_NONE) { return status; } lib->nondaemon_thread_count++; status = port_mutex_unlock(&lib->TM_LOCK); return status; } // hythread_increase_nondaemon_threads_count_in_library
/** * Returns the count for this latch. * * The count value for the latch determines how many times it needs to be counted down * before the threads awaiting on the latch can be unblocked. * @param[out] count count value * @param[in] latch the latch */ IDATA VMCALL hylatch_get_count(IDATA *count, hylatch_t latch) { IDATA status; status = port_mutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; *count = latch->count; status = port_mutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; }
//wait method implementation //// static IDATA latch_wait_impl(hylatch_t latch, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; status = port_mutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) { return status; } while (latch->count) { status = condvar_wait_impl(&latch->condition, &latch->mutex, ms, nano, interruptable); //check interruption and other problems if (status != TM_ERROR_NONE) { port_mutex_unlock(&latch->mutex); return status; } if (ms || nano) break; } status = port_mutex_unlock(&latch->mutex); return status; }
/** * waits on a condition variable, directly using OS interfaces. * * This function does not implement interruptability and thread state * functionality, thus the caller of this function have to handle it. */ int os_cond_timedwait(hycond_t *cond, osmutex_t *mutex, I_64 ms, IDATA nano) { int r = 0; struct waiting_node node; DWORD res; DWORD timeout; if (!ms && !nano) { timeout = INFINITE; } else { timeout = (DWORD)ms + (nano ? 1:0); } // NULL attributes, manual reset, initially unsignalled, NULL name node.event = CreateEvent(NULL, TRUE, FALSE, NULL); port_mutex_lock(&cond->queue_mutex); _enqueue(cond, &node); port_mutex_unlock(&cond->queue_mutex); // release mutex and wait for signal port_mutex_unlock(mutex); res = WaitForSingleObject(node.event, timeout); if (res != WAIT_OBJECT_0) { if (res == WAIT_TIMEOUT) r = TM_ERROR_TIMEOUT; else r = (int)GetLastError(); } // re-acquire mutex associated with condition variable port_mutex_lock(mutex); port_mutex_lock(&cond->queue_mutex); _remove_from_queue(cond, &node); CloseHandle(node.event); port_mutex_unlock(&cond->queue_mutex); return r; }
void EBProfileCollector::classloaderUnloadingCallback(Class_Loader_Handle h) { port_mutex_lock(&profilesLock); //can't modify profiles map in async mode here -> it could be iterated by the checker thread without lock bool erase = mode != EB_PCMODE_ASYNC; addProfilesForClassloader(h, greenProfiles, unloadedMethodProfiles, erase); addProfilesForClassloader(h, newProfiles, unloadedMethodProfiles, erase); if (erase) { cleanUnloadedProfiles(false); } port_mutex_unlock(&profilesLock); }
/** * Decreases the count for latch. * * If the count reaches zero, all threads awaiting on the latch are unblocked. * @param[in] latch the latch * @sa java.util.concurrent.CountDownLatch.countDown() */ IDATA VMCALL hylatch_count_down(hylatch_t latch) { IDATA status; status = port_mutex_lock(&latch->mutex); if (status != TM_ERROR_NONE) return status; if (latch->count <= 0) { status = port_mutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_ILLEGAL_STATE; } latch->count--; if (latch->count == 0) { status = hycond_notify_all(&latch->condition); if (status != TM_ERROR_NONE) { port_mutex_unlock(&latch->mutex); return status; } } status = port_mutex_unlock(&latch->mutex); if (status != TM_ERROR_NONE) return status; return TM_ERROR_NONE; }
/** * Signals all threads blocking on the given condition variable. * * @param[in] cond the condition variable on which to produce the broadcast. * @sa apr_thread_cond_broadcast() */ IDATA VMCALL hycond_notify_all (hycond_t *cond) { int r = 0; DWORD res; struct waiting_node *node; port_mutex_lock(&cond->queue_mutex); for (node = _dequeue(cond); node != NULL; node = _dequeue(cond)) { res = SetEvent(node->event); if (res == 0) { r = GetLastError(); } } port_mutex_unlock(&cond->queue_mutex); return r; }
EBMethodProfile* EBProfileCollector::createProfile(Method_Handle mh) { EBMethodProfile* profile = new EBMethodProfile(this, mh); port_mutex_lock(&profilesLock); assert(profilesByMethod.find(mh) == profilesByMethod.end()); profilesByMethod[mh] = profile; if (mode == EB_PCMODE_ASYNC) { //can't modify profiles map -> it could be iterated by the checker thread without lock newProfiles.push_back(profile); } port_mutex_unlock(&profilesLock); return profile; }
/* * Exit locktable write section */ static void locktable_writer_exit() { IDATA status = port_mutex_lock(&lock_table->mutex); assert(status == TM_ERROR_NONE); if (lock_table->readers_reading > 0) { lock_table->readers_reading = lock_table->readers_waiting; lock_table->readers_waiting = 0; lock_table->state = HYTHREAD_LOCKTABLE_READING; hycond_notify_all(&lock_table->read); } else if (lock_table->writers_waiting > 0) { hycond_notify(&lock_table->write); } else { lock_table->state = HYTHREAD_LOCKTABLE_IDLE; } status = port_mutex_unlock(&lock_table->mutex); assert(status == TM_ERROR_NONE); }
/* * Enter locktable write section */ static void locktable_writer_enter() { IDATA status = port_mutex_lock(&lock_table->mutex); assert(status == TM_ERROR_NONE); if (lock_table->state != HYTHREAD_LOCKTABLE_IDLE) { lock_table->writers_waiting++; hycond_wait_timed_raw(&lock_table->write, &lock_table->mutex, 0, 0); // We are asserting here that we exited wait with the correct state assert(lock_table->state == HYTHREAD_LOCKTABLE_WRITING); lock_table->writers_waiting--; } else { lock_table->state = HYTHREAD_LOCKTABLE_WRITING; } status = port_mutex_unlock(&lock_table->mutex); assert(status == TM_ERROR_NONE); }
/** * Exit a monitor. * * Exit a monitor, and if the owning count is zero, release it. * * @param[in] mon_ptr a monitor to be exited * @return 0 on success, <br>HYTHREAD_ILLEGAL_MONITOR_STATE if the current thread does not own the monitor * * @see hythread_monitor_exit_using_threadId, hythread_monitor_enter, hythread_monitor_enter_using_threadId */ IDATA VMCALL hythread_monitor_exit(hythread_monitor_t mon_ptr) { IDATA status = TM_ERROR_NONE; assert(mon_ptr->recursion_count >= 0); if (mon_ptr->owner != tm_self_tls) { CTRACE(("exit TM_ERROR_ILLEGAL_STATE owner: %d self: %d, rec: %d\n", mon_ptr->owner ? mon_ptr->owner->thread_id : 0, tm_self_tls->thread_id, mon_ptr->recursion_count)); return TM_ERROR_ILLEGAL_STATE; } if (mon_ptr->recursion_count == 0) { mon_ptr->owner = NULL; status = port_mutex_unlock(&mon_ptr->mutex); } else { mon_ptr->recursion_count--; } assert(status == TM_ERROR_NONE); return status; }
void sig_process_crash_flags_change(unsigned added, unsigned removed) { apr_status_t aprarr = port_mutex_lock(&g_mutex); if (aprarr != APR_SUCCESS) return; if ((added & PORT_CRASH_CALL_DEBUGGER) != 0 && asserts_disabled) { restore_assert_dialogs(); asserts_disabled = false; signal(SIGABRT, (sigh_t)final_sigabrt_handler); } if ((removed & PORT_CRASH_CALL_DEBUGGER) != 0 && !asserts_disabled) { disable_assert_dialogs(); asserts_disabled = true; signal(SIGABRT, (sigh_t)sigabrt_handler); } port_mutex_unlock(&g_mutex); }
/** * Destroys raw monitor. * * @param[in] mon_ptr address where monitor needs to be destroyed. */ IDATA VMCALL jthread_raw_monitor_destroy(jrawMonitorID mon_ptr) { hythread_monitor_t monitor = (hythread_monitor_t)array_get(jvmti_monitor_table, (UDATA)mon_ptr); if (!monitor) { return TM_ERROR_INVALID_MONITOR; } while (hythread_monitor_destroy((hythread_monitor_t)monitor) != TM_ERROR_NONE) { IDATA status = hythread_monitor_exit((hythread_monitor_t) monitor); if (status != TM_ERROR_NONE) { return status; } } IDATA status = port_mutex_lock(&jvmti_monitor_table_lock); if (status != TM_ERROR_NONE) { return status; } array_delete(jvmti_monitor_table, (UDATA) mon_ptr); status = port_mutex_unlock(&jvmti_monitor_table_lock); return status; } // jthread_raw_monitor_destroy
IDATA thread_sleep_impl(I_64 millis, IDATA nanos, IDATA interruptable) { IDATA status; IDATA result; hythread_t self; hythread_monitor_t mon; if (nanos == 0 && millis == 0) { hythread_yield(); return TM_ERROR_NONE; } if (!(self = hythread_self())) { // Report error in case current thread is not attached return TM_ERROR_UNATTACHED_THREAD; } // Grab thread monitor mon = self->monitor; status = hythread_monitor_enter(mon); assert(status == TM_ERROR_NONE); assert(mon->recursion_count == 0); mon->owner = NULL; mon->wait_count++; // Set thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->waited_monitor = mon; self->state |= TM_THREAD_STATE_SLEEPING; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); do { apr_time_t start; assert(mon->notify_count >= 0); assert(mon->notify_count < mon->wait_count); start = apr_time_now(); result = condvar_wait_impl(&mon->condition, &mon->mutex, millis, nanos, interruptable); if (result != TM_ERROR_NONE) { break; } // we should not change millis and nanos if both are 0 (meaning "no timeout") if (millis || nanos) { apr_interval_time_t elapsed = apr_time_now() - start; nanos -= (IDATA)((elapsed % 1000) * 1000); if (nanos < 0) { millis -= elapsed/1000 + 1; nanos += 1000000; } else { millis -= elapsed/1000; } if (millis < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nanos && nanos < 1000000); } } while(1); // Restore thread state status = port_mutex_lock(&self->mutex); assert(status == TM_ERROR_NONE); self->state &= ~TM_THREAD_STATE_SLEEPING; self->waited_monitor = NULL; status = port_mutex_unlock(&self->mutex); assert(status == TM_ERROR_NONE); // Release thread monitor mon->wait_count--; mon->owner = self; assert(mon->notify_count <= mon->wait_count); status = hythread_monitor_exit(mon); assert(status == TM_ERROR_NONE); if (self->request) { hythread_safe_point(); hythread_exception_safe_point(); } return (result == TM_ERROR_INTERRUPT && interruptable) ? TM_ERROR_INTERRUPT : TM_ERROR_NONE; }
IDATA monitor_wait_impl(hythread_monitor_t mon_ptr, I_64 ms, IDATA nano, IDATA interruptable) { IDATA status; int saved_recursion; //int saved_disable_count; hythread_t self = tm_self_tls; if (mon_ptr->owner != self) { return TM_ERROR_ILLEGAL_STATE; } saved_recursion = mon_ptr->recursion_count; assert(saved_recursion>=0); mon_ptr->owner = NULL; mon_ptr->recursion_count =0; mon_ptr->wait_count++; port_mutex_lock(&self->mutex); self->state |= TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = mon_ptr; port_mutex_unlock(&self->mutex); do { apr_time_t start; assert(mon_ptr->notify_count >= 0); assert(mon_ptr->notify_count < mon_ptr->wait_count); start = apr_time_now(); status = condvar_wait_impl(&mon_ptr->condition, &mon_ptr->mutex, ms, nano, interruptable); if (status != TM_ERROR_NONE || mon_ptr->notify_count) { break; } // we should not change ms and nano if both are 0 (meaning "no timeout") if (ms || nano) { apr_interval_time_t elapsed; elapsed = apr_time_now() - start; // microseconds nano -= (IDATA)((elapsed % 1000) * 1000); if (nano < 0) { ms -= elapsed/1000 + 1; nano += 1000000; } else { ms -= elapsed/1000; } if (ms < 0) { assert(status == TM_ERROR_NONE); status = TM_ERROR_TIMEOUT; break; } assert(0 <= nano && nano < 1000000); } } while (1); // consume the notify_count unless we got an error (or were interrupted) if (mon_ptr->notify_count > 0 && (status == TM_ERROR_NONE || mon_ptr->notify_count == mon_ptr->wait_count)) { mon_ptr->notify_count--; } port_mutex_lock(&self->mutex); self->state &= ~TM_THREAD_STATE_IN_MONITOR_WAIT; self->waited_monitor = NULL; port_mutex_unlock(&self->mutex); mon_ptr->wait_count--; assert(mon_ptr->notify_count <= mon_ptr->wait_count); if (self->request) { int save_count; port_mutex_unlock(&mon_ptr->mutex); hythread_safe_point(); hythread_exception_safe_point(); save_count = hythread_reset_suspend_disable(); port_mutex_lock(&mon_ptr->mutex); hythread_set_suspend_disable(save_count); } mon_ptr->recursion_count = saved_recursion; mon_ptr->owner = self; assert(mon_ptr->owner); return status; }
/** * Wrapper around user thread start proc. * Used to perform some duty jobs right after thread is started * and before thread is finished. */ static int HYTHREAD_PROC hythread_wrapper_start_proc(void *arg) { IDATA UNUSED status; hythread_t thread; hythread_start_proc_data start_proc_data; hythread_entrypoint_t start_proc; // store procedure arguments to local start_proc_data = *(hythread_start_proc_data_t) arg; free(arg); // get hythread global lock status = hythread_global_lock(); assert(status == TM_ERROR_NONE); // get native thread thread = start_proc_data.thread; start_proc = start_proc_data.proc; CTRACE(("TM: native thread started: native: %p tm: %p", port_thread_current(), thread)); // check hythread library state if (hythread_lib_state() != TM_LIBRARY_STATUS_INITIALIZED) { // set TERMINATED state port_mutex_lock(&thread->mutex); thread->state = TM_THREAD_STATE_TERMINATED; port_mutex_unlock(&thread->mutex); // set hythread_self() hythread_set_self(thread); assert(thread == hythread_self()); // release thread structure data hythread_detach(thread); // zero hythread_self() because we don't do it in hythread_detach_ex() hythread_set_self(NULL); CTRACE(("TM: native thread terminated due to shutdown: native: %p tm: %p", port_thread_current(), thread)); // release hythread global lock status = hythread_global_unlock(); assert(status == TM_ERROR_NONE); return 0; } // register to group and set ALIVE & RUNNABLE states status = hythread_set_to_group(thread, start_proc_data.group); assert(status == TM_ERROR_NONE); // set hythread_self() hythread_set_self(thread); assert(thread == hythread_self()); // set priority status = hythread_set_priority(thread, thread->priority); // FIXME - cannot set priority //assert(status == TM_ERROR_NONE); // release hythread global lock status = hythread_global_unlock(); assert(status == TM_ERROR_NONE); // Do actual call of the thread body supplied by the user. start_proc(start_proc_data.proc_args); assert(hythread_is_suspend_enabled()); // get hythread global lock status = hythread_global_lock(); assert(status == TM_ERROR_NONE); // set TERMINATED state port_mutex_lock(&thread->mutex); thread->state = TM_THREAD_STATE_TERMINATED; port_mutex_unlock(&thread->mutex); // detach and free thread hythread_detach(thread); // release hythread global lock status = hythread_global_unlock(); assert(status == TM_ERROR_NONE); return 0; }
/** * Initializes a new thread structure. */ IDATA VMCALL hythread_struct_init(hythread_t new_thread) { char jstatus; IDATA status; assert(new_thread); jstatus = new_thread->java_status; if (!new_thread->os_handle) { // new thread, create thread primitives memset(new_thread, 0, sizeof(HyThread)); status = hysem_create(&new_thread->resume_event, 0, 1); assert(status == TM_ERROR_NONE); status = port_mutex_create(&new_thread->mutex, APR_THREAD_MUTEX_NESTED); assert(status == TM_ERROR_NONE); status = hythread_monitor_init(&new_thread->monitor, 0); assert(status == TM_ERROR_NONE); } else { // old thread, reset structure int result; hysem_t resume; osmutex_t mutex; hythread_monitor_t monitor; // release thread OS handle result = port_thread_free_handle(new_thread->os_handle); assert(0 == result); resume = new_thread->resume_event; mutex = new_thread->mutex; monitor = new_thread->monitor; // zero new thread memset(new_thread, 0, sizeof(HyThread)); new_thread->resume_event = resume; new_thread->mutex = mutex; new_thread->monitor = monitor; } assert(new_thread->os_handle == 0); new_thread->java_status = jstatus; new_thread->priority = HYTHREAD_PRIORITY_NORMAL; #ifdef ORDER new_thread->alloc_count = 0; new_thread->thread_create_count = 0; new_thread->p_tid = 0; new_thread->p_count = 0; // new_thread->isInVMRegistry = 0; #endif port_mutex_lock(&new_thread->mutex); new_thread->state = TM_THREAD_STATE_NEW; port_mutex_unlock(&new_thread->mutex); status = hysem_set(new_thread->resume_event, 0); assert(status == TM_ERROR_NONE); return TM_ERROR_NONE; }
IDATA VMCALL hythread_set_to_group(hythread_t thread, hythread_group_t group) { #ifdef ORDER int map_id; #endif IDATA status; hythread_t cur, prev; assert(thread); assert(group); Retry_lock: // Acquire global TM lock to prevent concurrent access to thread list status = hythread_global_lock(); assert(status == TM_ERROR_NONE); #ifdef ORDER #ifdef ORDER_DEBUG printf("[TEST]: hythread mapping to object (%d, %d)\n", thread->p_tid, thread->p_count); #endif if(hythread_vm_is_initializing || (thread->p_tid == 0 && thread->p_count == 0)){ } else{ if(hythread_get_IsRecord()){ #ifdef ORDER_DEBUG printf("[RECORD]: RECORD IN hythread_set_to_group!!!\n"); #endif threadRunOrderFile = fopen("THREAD_CREATE_ORDER.log", "a+"); #ifdef ORDER_DEBUG assert(threadRunOrderFile); #endif fprintf(threadRunOrderFile, "%d %d\n", thread->p_tid, thread->p_count); fflush(threadRunOrderFile); fclose(threadRunOrderFile); threadRunOrderFile = NULL; } else{ //#ifdef ORDER_DEBUG printf("[REPLAY]: REPLAY IN hythread_set_to_group!!!\n"); //#endif if(threadRunOrderFile == NULL){ threadRunOrderFile = fopen("THREAD_CREATE_ORDER.log", "r"); } #ifdef ORDER_DEBUG assert(threadRunOrderFile); #endif if(p_tid == -1 && p_count == -1){ #ifdef ORDER_DEBUG if(feof(threadRunOrderFile)){ assert(0); } #endif fscanf(threadRunOrderFile, "%d %d\n", &p_tid, &p_count); } if(p_tid == thread->p_tid && p_count == thread->p_count){ p_tid = -1; p_count = -1; } else{ IDATA status_temp = hythread_global_unlock(); assert(status_temp == TM_ERROR_NONE); //#ifdef ORDER_DEBUG printf("[THREAD_CREATE]: This is not the correct order of thread create, pthread_self %d\n", pthread_self()); //#endif usleep(1000); hythread_yield(); goto Retry_lock; } } } #endif assert(thread->os_handle); if (!thread->thread_id) { char free_slot_found = 0; unsigned int i; for(i = 0; i < MAX_ID; i++) { // increase next_id to allow thread_id change next_id++; if (next_id == MAX_ID) { next_id = 1; } if (fast_thread_array[next_id] == NULL) { thread->thread_id = next_id; free_slot_found = 1; #ifdef ORDER { char name[40]; FILE* thread_map = NULL; int current_pthread_id = (int)thread->os_handle; sprintf(name, "THREAD_MAP_WORKING_CLASSLIB.log"); thread_map = fopen(name, "a+"); #ifdef ORDER_DEBUG assert(thread_map); #endif fwrite(&next_id, sizeof(int), 1, thread_map); fwrite(¤t_pthread_id, sizeof(int), 1, thread_map); fflush(thread_map); fclose(thread_map); } // printf("create thread id : %d\n", (int)new_thread->os_handle); for (map_id = 0 ; map_id < ORDER_THREAD_NUM ; map_id ++ ) { if (pthreadid_tid_mapping[map_id][0] == (int)thread->os_handle) { Thread_Map tmap; tmap.thread_global_id = next_id; tmap.pthread_id = (int)thread->os_handle; tmap.thread_assigned_id = pthreadid_tid_mapping[map_id][1]; // if (threadMapFile == NULL) threadMapFile = fopen("RECORD_THREAD_MAP.log", "a+"); fwrite((char *)&tmap, 1, sizeof(Thread_Map), threadMapFile); fflush(threadMapFile); fclose(threadMapFile); threadMapFile = NULL; #ifdef ORDER_DEBUG printf("pthread id exists : %d\n", (int)pthreadid_tid_mapping[map_id][0]); printf("tid mapping : %d -> %d\n",pthreadid_tid_mapping[map_id][0], pthreadid_tid_mapping[map_id][1]); #endif break; } else if (pthreadid_tid_mapping[map_id][0] == 0) { Thread_Map tmap; tmap.thread_global_id = next_id; tmap.pthread_id = (int)thread->os_handle; tmap.thread_assigned_id = next_id; // if (threadMapFile == NULL) threadMapFile = fopen("RECORD_THREAD_MAP.log", "a+"); fwrite((char *)&tmap, 1, sizeof(Thread_Map), threadMapFile); fflush(threadMapFile); fclose(threadMapFile); threadMapFile = NULL; pthreadid_tid_mapping[map_id][0] = (int)(int)thread->os_handle; pthreadid_tid_mapping[map_id][1] = next_id; #ifdef ORDER_DEBUG printf("new pthread id : %d\n", (int)pthreadid_tid_mapping[map_id][0]); printf("tid mapping : %d -> %d\n", pthreadid_tid_mapping[map_id][0], pthreadid_tid_mapping[map_id][1]); #endif break; } if(i == (ORDER_THREAD_NUM - 1)) { printf("[yzm]Error : Thread Map overflow!\n"); assert(0); exit(0); } } #endif break; } } if (!free_slot_found) { status = hythread_global_unlock(); assert(status == TM_ERROR_NONE); return TM_ERROR_OUT_OF_MEMORY; } } assert(thread->thread_id); fast_thread_array[thread->thread_id] = thread; thread->group = group; group->threads_count++; cur = group->thread_list->next; prev = cur->prev; thread->next = cur; thread->prev = prev; prev->next = cur->prev = thread; port_mutex_lock(&thread->mutex); thread->state |= TM_THREAD_STATE_ALIVE | TM_THREAD_STATE_RUNNABLE; port_mutex_unlock(&thread->mutex); status = hythread_global_unlock(); assert(status == TM_ERROR_NONE); return TM_ERROR_NONE; }