U_CAPI void U_EXPORT2 u_setAtomicIncDecFunctions(const void *context, UMtxAtomicFn *ip, UMtxAtomicFn *dp, UErrorCode *status) { if (U_FAILURE(*status)) { return; } /* Can not set a mutex function to a NULL value */ if (ip==NULL || dp==NULL) { *status = U_ILLEGAL_ARGUMENT_ERROR; return; } /* If ICU is not in an initial state, disallow this operation. */ if (cmemory_inUse()) { *status = U_INVALID_STATE_ERROR; return; } pIncFn = ip; pDecFn = dp; gIncDecContext = context; #if !U_RELEASE { int32_t testInt = 0; U_ASSERT(umtx_atomic_inc(&testInt) == 1); /* Sanity Check. Do the functions work at all? */ U_ASSERT(testInt == 1); U_ASSERT(umtx_atomic_dec(&testInt) == 0); U_ASSERT(testInt == 0); } #endif }
//----------------------------------------------------------------------------- // // Reference Counting. A single RBBIDataWrapper object is shared among // however many RulesBasedBreakIterator instances are // referencing the same data. // //----------------------------------------------------------------------------- void RBBIDataWrapper::removeReference() { if (umtx_atomic_dec(&fRefCount) == 0) { delete this; } }
int32_t UnifiedCache::removeHardRef(const SharedObject *value) const { int refCount = 0; if (value) { refCount = umtx_atomic_dec(&value->hardRefCount); U_ASSERT(refCount >= 0); if (refCount == 0) { --fNumValuesInUse; } } return refCount; }
static void TestIncDecFunctions() { UErrorCode status = U_ZERO_ERROR; int32_t t = 1; /* random value to make sure that Inc/dec works */ char *dataDir; /* Save ICU's data dir and tracing functions so that they can be resored after cleanup and reinit. */ dataDir = safeGetICUDataDirectory(); /* Verify that ICU can be cleaned up and reinitialized successfully. * Failure here usually means that some ICU service didn't clean up successfully, * probably because some earlier test accidently left something open. */ ctest_resetICU(); /* Can not set mutex functions if ICU is already initialized */ u_setAtomicIncDecFunctions(&gIncDecContext, myIncFunc, myDecFunc, &status); TEST_STATUS(status, U_INVALID_STATE_ERROR); /* Clean up ICU */ u_cleanup(); /* Can not set functions with NULL values */ status = U_ZERO_ERROR; u_setAtomicIncDecFunctions(&gIncDecContext, NULL, myDecFunc, &status); TEST_STATUS(status, U_ILLEGAL_ARGUMENT_ERROR); status = U_ZERO_ERROR; u_setAtomicIncDecFunctions(&gIncDecContext, myIncFunc, NULL, &status); TEST_STATUS(status, U_ILLEGAL_ARGUMENT_ERROR); /* u_setIncDecFunctions() should work with null or non-null context pointer */ status = U_ZERO_ERROR; gExpectedContext = NULL; u_setAtomicIncDecFunctions(NULL, myIncFunc, myDecFunc, &status); TEST_STATUS(status, U_ZERO_ERROR); gExpectedContext = &gIncDecContext; u_setAtomicIncDecFunctions(&gIncDecContext, myIncFunc, myDecFunc, &status); TEST_STATUS(status, U_ZERO_ERROR); /* After reinitializing ICU, we should not be able to set the inc/dec funcs again. */ status = U_ZERO_ERROR; u_setDataDirectory(dataDir); u_init(&status); TEST_STATUS(status, U_ZERO_ERROR); gExpectedContext = &gIncDecContext; u_setAtomicIncDecFunctions(&gIncDecContext, myIncFunc, myDecFunc, &status); TEST_STATUS(status, U_INVALID_STATE_ERROR); /* Doing ICU operations should cause our functions to be called */ gIncCount = 0; gDecCount = 0; umtx_atomic_inc(&t); TEST_ASSERT(t == 2); umtx_atomic_dec(&t); TEST_ASSERT(t == 1); TEST_ASSERT(gIncCount > 0); TEST_ASSERT(gDecCount > 0); /* Cleanup should cancel use of our inc/dec functions. */ /* Additional ICU operations should not use them */ ctest_resetICU(); gIncCount = 0; gDecCount = 0; status = U_ZERO_ERROR; u_setDataDirectory(dataDir); u_init(&status); TEST_ASSERT(gIncCount == 0); TEST_ASSERT(gDecCount == 0); status = U_ZERO_ERROR; umtx_atomic_inc(&t); umtx_atomic_dec(&t); TEST_STATUS(status, U_ZERO_ERROR); TEST_ASSERT(gIncCount == 0); TEST_ASSERT(gDecCount == 0); free(dataDir); }
static void U_CALLCONV ctest_libFree(const void *context, void *mem) { if (mem != NULL) { umtx_atomic_dec(&ALLOCATION_COUNT); } free(mem); }
void SpoofData::removeReference() { if (umtx_atomic_dec(&fRefCount) == 0) { delete this; } }
void SharedObject::removeRef() const { if(umtx_atomic_dec(&refCount) == 0) { delete this; } }
void threadMain (void *param) { ThreadInfo *thInfo = (ThreadInfo *)param; if (gRunInfo.verbose) printf("Thread #%d: starting\n", thInfo->fThreadNum); umtx_atomic_inc(&gRunInfo.runningThreads); // // while (true) { if (gRunInfo.verbose ) printf("Thread #%d: starting loop\n", thInfo->fThreadNum); // // If the main thread is asking us to wait, do so by locking gStopMutex // which will block us, since the main thread will be holding it already. // umtx_lock(&gInfoMutex); UBool stop = gRunInfo.stopFlag; // Need mutex for processors with flakey memory models. umtx_unlock(&gInfoMutex); if (stop) { if (gRunInfo.verbose) { fprintf(stderr, "Thread #%d: suspending\n", thInfo->fThreadNum); } umtx_atomic_dec(&gRunInfo.runningThreads); while (gRunInfo.stopFlag) { umtx_lock(&gStopMutex); umtx_unlock(&gStopMutex); } umtx_atomic_inc(&gRunInfo.runningThreads); if (gRunInfo.verbose) { fprintf(stderr, "Thread #%d: restarting\n", thInfo->fThreadNum); } } // // The real work of the test happens here. // gRunInfo.fTest->runOnce(); umtx_lock(&gInfoMutex); thInfo->fHeartBeat = true; thInfo->fCycles++; UBool exitNow = gRunInfo.exitFlag; umtx_unlock(&gInfoMutex); // // If main thread says it's time to exit, break out of the loop. // if (exitNow) { break; } } umtx_atomic_dec(&gRunInfo.runningThreads); // Returning will kill the thread. return; }