示例#1
0
    virtual void run() {
        // This is the code that each of the spawned threads runs.
        // All threads move together throught the started - middle - done sequence together,
        // waiting for all other threads to reach each point before advancing.
        umtx_lock(&gTestMutexA);
        gThreadsStarted += 1;
        umtx_condBroadcast(&gThreadsCountChanged);
        while (gThreadsStarted < TESTMUTEX_THREAD_COUNT) {
            if (gThreadsInMiddle != 0) {
                IntlTest::gTest->errln(
                    "%s:%d gThreadsInMiddle = %d. Expected 0.", __FILE__, __LINE__, gThreadsInMiddle);
                return;
            }
            umtx_condWait(&gThreadsCountChanged, &gTestMutexA);
        }

        gThreadsInMiddle += 1;
        umtx_condBroadcast(&gThreadsCountChanged);
        while (gThreadsInMiddle < TESTMUTEX_THREAD_COUNT) {
            if (gThreadsDone != 0) {
                IntlTest::gTest->errln(
                    "%s:%d gThreadsDone = %d. Expected 0.", __FILE__, __LINE__, gThreadsDone);
                return;
            }
            umtx_condWait(&gThreadsCountChanged, &gTestMutexA);
        }

        gThreadsDone += 1;
        umtx_condBroadcast(&gThreadsCountChanged);
        while (gThreadsDone < TESTMUTEX_THREAD_COUNT) {
            umtx_condWait(&gThreadsCountChanged, &gTestMutexA);
        }
        umtx_unlock(&gTestMutexA);
    }
示例#2
0
void UnifiedCache::flush() const {
    Mutex lock(&gCacheMutex);

    // Use a loop in case cache items that are flushed held hard references to
    // other cache items making those additional cache items eligible for
    // flushing.
    while (_flush(FALSE));
    umtx_condBroadcast(&gInProgressValueAddedCond);
}
示例#3
0
// Worker thread function.
void CondThread::run() {
    umtx_lock(&gCTMutex);
    gStartedThreads += gConditionTestOne;
    umtx_condBroadcast(&gCTConditionVar);

    while (gStartedThreads < NUMTHREADS) {
        if (gFinishedThreads != 0) {
            IntlTest::gTest->errln("File %s, Line %d: Error, gStartedThreads = %d, gFinishedThreads = %d",
                             __FILE__, __LINE__, gStartedThreads, gFinishedThreads);
        }
        umtx_condWait(&gCTConditionVar, &gCTMutex);
    }

    gFinishedThreads += gConditionTestOne;
    fFinished = true;
    umtx_condBroadcast(&gCTConditionVar);

    while (gFinishedThreads < NUMTHREADS) {
        umtx_condWait(&gCTConditionVar, &gCTMutex);
    }
    umtx_unlock(&gCTMutex);
}
示例#4
0
template<> U_EXPORT
const UCTMultiThreadItem *LocaleCacheKey<UCTMultiThreadItem>::createObject(
        const void *context, UErrorCode &status) const {
    const UnifiedCache *cacheContext = (const UnifiedCache *) context;

    if (uprv_strcmp(fLoc.getLanguage(), fLoc.getName()) != 0) {
        const UCTMultiThreadItem *result = NULL;
        if (cacheContext == NULL) {
            UnifiedCache::getByLocale(fLoc.getLanguage(), result, status);
            return result;
        }
        cacheContext->get(LocaleCacheKey<UCTMultiThreadItem>(fLoc.getLanguage()), result, status);
        return result;
    }

    umtx_lock(&gCTMutex);
    bool firstObject = (gObjectsCreated == 0);
    if (firstObject) {
        // Force the first object creation that comes through to wait
        // until other have completed. Verifies that cache doesn't
        // deadlock when a creation is slow.

        // Note that gObjectsCreated needs to be incremeneted from 0 to 1
        // early, to keep subsequent threads from entering this path.
        gObjectsCreated = 1;
        while (gObjectsCreated < 3) {
            umtx_condWait(&gCTConditionVar, &gCTMutex);
        }
    }
    umtx_unlock(&gCTMutex);

    const UCTMultiThreadItem *result =
        new UCTMultiThreadItem(fLoc.getLanguage());
    if (result == NULL) {
        status = U_MEMORY_ALLOCATION_ERROR;
    } else {
        result->addRef();
    }
    
    // Log that we created an object. The first object was already counted,
    //    don't do it again.
    umtx_lock(&gCTMutex);
    if (!firstObject) {
        gObjectsCreated += 1;
    }
    umtx_condBroadcast(&gCTConditionVar);
    umtx_unlock(&gCTMutex);

    return result;
}
示例#5
0
// Store a value and error in given hash entry.
// On entry, gCacheMutex must be held. Hash entry element must be in progress.
// value must be non NULL.
// On Exit, soft reference added to value. value and status stored in hash
// entry. Soft reference removed from previous stored value. Waiting
// threads notified.
void UnifiedCache::_put(
        const UHashElement *element,
        const SharedObject *value,
        const UErrorCode status) {
    U_ASSERT(_inProgress(element));
    const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
    const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
    theKey->creationStatus = status;
    value->addSoftRef();
    UHashElement *ptr = const_cast<UHashElement *>(element);
    ptr->value.pointer = (void *) value;
    oldValue->removeSoftRef();

    // Tell waiting threads that we replace in-progress status with
    // an error.
    umtx_condBroadcast(&gInProgressValueAddedCond);
}
示例#6
0
void UnifiedCache::_put(
        const UHashElement *element,
        const SharedObject *value,
        const UErrorCode status) const {
    U_ASSERT(_inProgress(element));
    const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
    const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
    theKey->fCreationStatus = status;
    if (value->softRefCount == 0) {
        _registerMaster(theKey, value);
    }
    value->softRefCount++;
    UHashElement *ptr = const_cast<UHashElement *>(element);
    ptr->value.pointer = (void *) value;
    U_ASSERT(oldValue == fNoValue);
    removeSoftRef(oldValue);

    // Tell waiting threads that we replace in-progress status with
    // an error.
    umtx_condBroadcast(gInProgressValueAddedCond());
}