virtual void run() { // This is the code that each of the spawned threads runs. // All threads move together throught the started - middle - done sequence together, // waiting for all other threads to reach each point before advancing. umtx_lock(&gTestMutexA); gThreadsStarted += 1; umtx_condBroadcast(&gThreadsCountChanged); while (gThreadsStarted < TESTMUTEX_THREAD_COUNT) { if (gThreadsInMiddle != 0) { IntlTest::gTest->errln( "%s:%d gThreadsInMiddle = %d. Expected 0.", __FILE__, __LINE__, gThreadsInMiddle); return; } umtx_condWait(&gThreadsCountChanged, &gTestMutexA); } gThreadsInMiddle += 1; umtx_condBroadcast(&gThreadsCountChanged); while (gThreadsInMiddle < TESTMUTEX_THREAD_COUNT) { if (gThreadsDone != 0) { IntlTest::gTest->errln( "%s:%d gThreadsDone = %d. Expected 0.", __FILE__, __LINE__, gThreadsDone); return; } umtx_condWait(&gThreadsCountChanged, &gTestMutexA); } gThreadsDone += 1; umtx_condBroadcast(&gThreadsCountChanged); while (gThreadsDone < TESTMUTEX_THREAD_COUNT) { umtx_condWait(&gThreadsCountChanged, &gTestMutexA); } umtx_unlock(&gTestMutexA); }
void MultithreadTest::TestConditionVariables() { gStartedThreads = 0; gFinishedThreads = 0; int i; umtx_lock(&gCTMutex); CondThread *threads[NUMTHREADS]; for (i=0; i<NUMTHREADS; ++i) { threads[i] = new CondThread; threads[i]->start(); } while (gStartedThreads < NUMTHREADS) { umtx_condWait(&gCTConditionVar, &gCTMutex); } while (gFinishedThreads < NUMTHREADS) { umtx_condWait(&gCTConditionVar, &gCTMutex); } umtx_unlock(&gCTMutex); for (i=0; i<NUMTHREADS; ++i) { if (!threads[i]->fFinished) { errln("File %s, Line %d: Error, threads[%d]->fFinished == false", __FILE__, __LINE__, i); } } for (i=0; i<NUMTHREADS; ++i) { threads[i]->join(); delete threads[i]; } }
UBool UnifiedCache::_poll( const CacheKeyBase &key, const SharedObject *&value, UErrorCode &status) const { U_ASSERT(value == NULL); U_ASSERT(status == U_ZERO_ERROR); Mutex lock(gCacheMutex()); const UHashElement *element = uhash_find(fHashtable, &key); // If the hash table contains an inProgress placeholder entry for this key, // this means that another thread is currently constructing the value object. // Loop, waiting for that construction to complete. while (element != NULL && _inProgress(element)) { umtx_condWait(gInProgressValueAddedCond(), gCacheMutex()); element = uhash_find(fHashtable, &key); } // If the hash table contains an entry for the key, // fetch out the contents and return them. if (element != NULL) { _fetch(element, value, status); return TRUE; } // The hash table contained nothing for this key. // Insert an inProgress place holder value. // Our caller will create the final value and update the hash table. _putNew(key, fNoValue, U_ZERO_ERROR, status); return FALSE; }
void MultithreadTest::TestMutex() { gThreadsStarted = 0; gThreadsInMiddle = 0; gThreadsDone = 0; int32_t i = 0; TestMutexThread threads[TESTMUTEX_THREAD_COUNT]; umtx_lock(&gTestMutexA); for (i=0; i<TESTMUTEX_THREAD_COUNT; i++) { if (threads[i].start() != 0) { errln("%s:%d Error starting thread %d", __FILE__, __LINE__, i); return; } } // Because we are holding gTestMutexA, all of the threads should be blocked // at the start of their run() function. if (gThreadsStarted != 0) { errln("%s:%d gThreadsStarted=%d. Expected 0.", __FILE__, __LINE__, gThreadsStarted); return; } while (gThreadsInMiddle < TESTMUTEX_THREAD_COUNT) { if (gThreadsDone != 0) { errln("%s:%d gThreadsDone=%d. Expected 0.", __FILE__, __LINE__, gThreadsStarted); return; } umtx_condWait(&gThreadsCountChanged, &gTestMutexA); } while (gThreadsDone < TESTMUTEX_THREAD_COUNT) { umtx_condWait(&gThreadsCountChanged, &gTestMutexA); } umtx_unlock(&gTestMutexA); for (i=0; i<TESTMUTEX_THREAD_COUNT; i++) { threads[i].join(); } }
// Worker thread function. void CondThread::run() { umtx_lock(&gCTMutex); gStartedThreads += gConditionTestOne; umtx_condBroadcast(&gCTConditionVar); while (gStartedThreads < NUMTHREADS) { if (gFinishedThreads != 0) { IntlTest::gTest->errln("File %s, Line %d: Error, gStartedThreads = %d, gFinishedThreads = %d", __FILE__, __LINE__, gStartedThreads, gFinishedThreads); } umtx_condWait(&gCTConditionVar, &gCTMutex); } gFinishedThreads += gConditionTestOne; fFinished = true; umtx_condBroadcast(&gCTConditionVar); while (gFinishedThreads < NUMTHREADS) { umtx_condWait(&gCTConditionVar, &gCTMutex); } umtx_unlock(&gCTMutex); }
template<> U_EXPORT const UCTMultiThreadItem *LocaleCacheKey<UCTMultiThreadItem>::createObject( const void *context, UErrorCode &status) const { const UnifiedCache *cacheContext = (const UnifiedCache *) context; if (uprv_strcmp(fLoc.getLanguage(), fLoc.getName()) != 0) { const UCTMultiThreadItem *result = NULL; if (cacheContext == NULL) { UnifiedCache::getByLocale(fLoc.getLanguage(), result, status); return result; } cacheContext->get(LocaleCacheKey<UCTMultiThreadItem>(fLoc.getLanguage()), result, status); return result; } umtx_lock(&gCTMutex); bool firstObject = (gObjectsCreated == 0); if (firstObject) { // Force the first object creation that comes through to wait // until other have completed. Verifies that cache doesn't // deadlock when a creation is slow. // Note that gObjectsCreated needs to be incremeneted from 0 to 1 // early, to keep subsequent threads from entering this path. gObjectsCreated = 1; while (gObjectsCreated < 3) { umtx_condWait(&gCTConditionVar, &gCTMutex); } } umtx_unlock(&gCTMutex); const UCTMultiThreadItem *result = new UCTMultiThreadItem(fLoc.getLanguage()); if (result == NULL) { status = U_MEMORY_ALLOCATION_ERROR; } else { result->addRef(); } // Log that we created an object. The first object was already counted, // don't do it again. umtx_lock(&gCTMutex); if (!firstObject) { gObjectsCreated += 1; } umtx_condBroadcast(&gCTConditionVar); umtx_unlock(&gCTMutex); return result; }
// Attempts to fetch value and status for key from cache. // On entry, gCacheMutex must not be held value must be NULL and status must // be U_ZERO_ERROR. // On exit, either returns FALSE (In this // case caller should try to create the object) or returns TRUE with value // pointing to the fetched value and status set to fetched status. When // FALSE is returned status may be set to failure if an in progress hash // entry could not be made but value will remain unchanged. When TRUE is // returned, caler must call removeRef() on value. UBool UnifiedCache::_poll( const CacheKeyBase &key, const SharedObject *&value, UErrorCode &status) const { U_ASSERT(value == NULL); U_ASSERT(status == U_ZERO_ERROR); Mutex lock(&gCacheMutex); const UHashElement *element = uhash_find(fHashtable, &key); while (element != NULL && _inProgress(element)) { umtx_condWait(&gInProgressValueAddedCond, &gCacheMutex); element = uhash_find(fHashtable, &key); } if (element != NULL) { _fetch(element, value, status); return TRUE; } _putNew(key, gNoValue, U_ZERO_ERROR, status); return FALSE; }