PRIntn main(PRIntn argc, char **argv) { PRInt32 rv, test, result = 0; PRFileDesc *output = PR_GetSpecialFD(PR_StandardOutput); test = -2; rv = PR_AtomicIncrement(&test); result = result | ((rv < 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicIncrement(%d) == %d: %s\n", test, rv, (rv < 0) ? "PASSED" : "FAILED"); rv = PR_AtomicIncrement(&test); result = result | ((rv == 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicIncrement(%d) == %d: %s\n", test, rv, (rv == 0) ? "PASSED" : "FAILED"); rv = PR_AtomicIncrement(&test); result = result | ((rv > 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicIncrement(%d) == %d: %s\n", test, rv, (rv > 0) ? "PASSED" : "FAILED"); test = 2; rv = PR_AtomicDecrement(&test); result = result | ((rv > 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicDecrement(%d) == %d: %s\n", test, rv, (rv > 0) ? "PASSED" : "FAILED"); rv = PR_AtomicDecrement(&test); result = result | ((rv == 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicDecrement(%d) == %d: %s\n", test, rv, (rv == 0) ? "PASSED" : "FAILED"); rv = PR_AtomicDecrement(&test); result = result | ((rv < 0) ? 0 : 1); PR_fprintf( output, "PR_AtomicDecrement(%d) == %d: %s\n", test, rv, (rv < 0) ? "PASSED" : "FAILED"); test = -2; rv = PR_AtomicSet(&test, 2); result = result | (((rv == -2) && (test == 2)) ? 0 : 1); PR_fprintf( output, "PR_AtomicSet(%d) == %d: %s\n", test, rv, ((rv == -2) && (test == 2)) ? "PASSED" : "FAILED"); PR_fprintf( output, "Atomic operations test %s\n", (result == 0) ? "PASSED" : "FAILED"); return result; } /* main */
// // Choose a poll array index based on load. The algorithm was designed to // scale (it's O(1) with respect to the number of poll arrays) and reduce lock // contention (it is unlikely that two separate threads will attempt to access // a given poll array at the same) while effectively balancing load without // ringing or primary clustering. // // UMA: This should be GetKAPollThreadIndex --> determines which KaPollThread // to use. PRUint32 PollManager::GetPollArrayIndex() { PRUint32 paIndex; if (numThreads_ == 1) { paIndex = 0; } else { unsigned paRoundRobin = (PRUint32) PR_AtomicDecrement(&paRoundRobin_); unsigned halfNumThreads = (numThreads_ + 1) / 2; unsigned quotient = paRoundRobin / halfNumThreads; unsigned remainder = paRoundRobin - quotient * halfNumThreads; unsigned x = remainder * 2 + quotient; paIndex = x % numThreads_; PRUint32 paAlternateIndex = paIndex + 1; if (paAlternateIndex == numThreads_) paAlternateIndex = 0; if (threads_[paAlternateIndex]->GetLoad() < threads_[paIndex]->GetLoad()) paIndex = paAlternateIndex; } return paIndex; }
/* periodically generate a csn and dump it to the error log */ static void _csngen_gen_tester_main (void *data) { CSNGen *gen = (CSNGen*)data; CSN *csn = NULL; char buff [CSN_STRSIZE]; int rc; PR_ASSERT (gen); while (!s_must_exit) { rc = csngen_new_csn (gen, &csn, PR_FALSE); if (rc != CSN_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, "_csngen_gen_tester_main", "failed to generate csn; csn error - %d\n", rc); } else { slapi_log_err(SLAPI_LOG_INFO, "_csngen_gen_tester_main", "generate csn %s\n", csn_as_string(csn, PR_FALSE, buff)); } csn_free(&csn); /* sleep for 30 seconds */ DS_Sleep (PR_SecondsToInterval(10)); } PR_AtomicDecrement (&s_thread_count); }
SECStatus sslMutex_Unlock(sslMutex *pMutex) { PRInt32 newValue; if (PR_FALSE == pMutex->isMultiProcess) { return single_process_sslMutex_Unlock(pMutex); } if (pMutex->u.pipeStr.mPipes[2] != SSL_MUTEX_MAGIC) { PORT_SetError(PR_INVALID_ARGUMENT_ERROR); return SECFailure; } /* Do Memory Barrier here. */ newValue = PR_AtomicDecrement(&pMutex->u.pipeStr.nWaiters); if (newValue > 0) { int cc; char c = 1; do { cc = write(pMutex->u.pipeStr.mPipes[1], &c, 1); } while (cc < 0 && (errno == EINTR || errno == EAGAIN)); if (cc != 1) { if (cc < 0) nss_MD_unix_map_default_error(errno); else PORT_SetError(PR_UNKNOWN_ERROR); return SECFailure; } } return SECSuccess; }
nsPipeOutputStream::Release() { nsrefcnt count = PR_AtomicDecrement((PRInt32 *)&mWriterRefCnt); if (count == 0) Close(); return mPipe->Release(); }
nsPipeInputStream::Release(void) { nsrefcnt count = PR_AtomicDecrement((PRInt32 *)&mReaderRefCnt); if (count == 0) Close(); return mPipe->Release(); }
nsExceptionManager::~nsExceptionManager() { /* destructor code */ #ifdef NS_DEBUG PR_AtomicDecrement(&totalInstances); #endif // NS_DEBUG }
nsExceptionService::~nsExceptionService() { Shutdown(); /* destructor code */ #ifdef NS_DEBUG PR_AtomicDecrement(&totalInstances); #endif }
nsPlatformCharset::~nsPlatformCharset() { PR_AtomicDecrement(&gCnt); if((0 == gCnt) && (nsnull != gInfo)) { delete gInfo; gInfo = nsnull; } }
void nsNSSHttpRequestSession::Release() { PRInt32 newRefCount = PR_AtomicDecrement(&mRefCount); if (!newRefCount) { delete this; } }
static JSBool Resume(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval) { nsJSSh* shell; if (!GetJSShGlobal(cx, obj, &shell)) return JS_FALSE; PR_AtomicDecrement(&shell->mSuspendCount); return JS_TRUE; }
NS_IMETHODIMP PlugletEngine::LockFactory(PRBool aLock) { if(aLock) { PR_AtomicIncrement(&lockCount); } else { PR_AtomicDecrement(&lockCount); } return NS_OK; }
NS_IMETHODIMP_(nsrefcnt) nsTimerImpl::Release(void) { nsrefcnt count; NS_PRECONDITION(0 != mRefCnt, "dup release"); count = PR_AtomicDecrement((PRInt32 *)&mRefCnt); NS_LOG_RELEASE(this, count, "nsTimerImpl"); if (count == 0) { mRefCnt = 1; /* stabilize */ /* enable this to find non-threadsafe destructors: */ /* NS_ASSERT_OWNINGTHREAD(nsTimerImpl); */ NS_DELETEXPCOM(this); return 0; } // If only one reference remains, and mArmed is set, then the ref must be // from the TimerThread::mTimers array, so we Cancel this timer to remove // the mTimers element, and return 0 if Cancel in fact disarmed the timer. // // We use an inlined version of nsTimerImpl::Cancel here to check for the // NS_ERROR_NOT_AVAILABLE code returned by gThread->RemoveTimer when this // timer is not found in the mTimers array -- i.e., when the timer was not // in fact armed once we acquired TimerThread::mLock, in spite of mArmed // being true here. That can happen if the armed timer is being fired by // TimerThread::Run as we race and test mArmed just before it is cleared by // the timer thread. If the RemoveTimer call below doesn't find this timer // in the mTimers array, then the last ref to this timer is held manually // and temporarily by the TimerThread, so we should fall through to the // final return and return 1, not 0. // // The original version of this thread-based timer code kept weak refs from // TimerThread::mTimers, removing this timer's weak ref in the destructor, // but that leads to double-destructions in the race described above, and // adding mArmed doesn't help, because destructors can't be deferred, once // begun. But by combining reference-counting and a specialized Release // method with "is this timer still in the mTimers array once we acquire // the TimerThread's lock" testing, we defer destruction until we're sure // that only one thread has its hot little hands on this timer. // // Note that both approaches preclude a timer creator, and everyone else // except the TimerThread who might have a strong ref, from dropping all // their strong refs without implicitly canceling the timer. Timers need // non-mTimers-element strong refs to stay alive. if (count == 1 && mArmed) { mCanceled = PR_TRUE; NS_ASSERTION(gThread, "An armed timer exists after the thread timer stopped."); if (NS_SUCCEEDED(gThread->RemoveTimer(this))) return 0; } return count; }
nsSystemPrincipal::Release() { NS_PRECONDITION(0 != mJSPrincipals.refcount, "dup release"); nsrefcnt count = PR_AtomicDecrement((PRInt32 *)&mJSPrincipals.refcount); NS_LOG_RELEASE(this, count, "nsSystemPrincipal"); if (count == 0) { NS_DELETEXPCOM(this); } return count; }
NS_DECL_THREADSAFE_ISUPPORTS NS_IMETHOD Run() override { EXPECT_FALSE(mWasRun); mWasRun = true; PR_Sleep(1); if (!PR_AtomicDecrement(&gNum)) { printf(" last thread was %d\n", mNum); } return NS_OK; }
STDMETHODIMP_(ULONG) CMapiFactory::Release() { PRInt32 temp; temp = PR_AtomicDecrement(&m_cRef); if (m_cRef == 0) { delete this; return 0; } return temp; }
PR_IMPLEMENT(void) PR_DestroyCondVar(PRCondVar *cvar) { if (0 > PR_AtomicDecrement(&cvar->notify_pending)) { PRIntn rv = pthread_cond_destroy(&cvar->cv); PR_ASSERT(0 == rv); #if defined(DEBUG) memset(cvar, 0xaf, sizeof(PRCondVar)); pt_debug.cvars_destroyed += 1; #endif PR_DELETE(cvar); } } /* PR_DestroyCondVar */
nsrefcnt PyG_Base::Release(void) { nsrefcnt cnt = (nsrefcnt) PR_AtomicDecrement((PRInt32*)&mRefCnt); #ifdef NS_BUILD_REFCNT_LOGGING if (m_pBaseObject == NULL) NS_LOG_RELEASE(this, cnt, refcntLogRepr); #endif if ( cnt == 0 ) delete this; return cnt; }
NSS_IMPLEMENT PRStatus nssSlot_Destroy ( NSSSlot *slot ) { if (slot) { if (PR_AtomicDecrement(&slot->base.refCount) == 0) { PZ_DestroyLock(slot->base.lock); return nssArena_Destroy(slot->base.arena); } } return PR_SUCCESS; }
nsrefcnt nsNodeInfoManager::Release() { NS_PRECONDITION(0 != mRefCnt, "dup release"); nsrefcnt count = PR_AtomicDecrement((PRInt32 *)&mRefCnt); NS_LOG_RELEASE(this, count, "nsNodeInfoManager"); if (count == 0) { mRefCnt = 1; /* stabilize */ delete this; } return count; }
NS_IMETHODIMP_(nsrefcnt) nsPrintProgress::Release(void) { nsrefcnt count; NS_PRECONDITION(0 != mRefCnt, "dup release"); count = PR_AtomicDecrement((PRInt32 *)&mRefCnt); //NS_LOG_RELEASE(this, count, "nsPrintProgress"); if (0 == count) { mRefCnt = 1; /* stabilize */ /* enable this to find non-threadsafe destructors: */ /* NS_ASSERT_OWNINGTHREAD(nsPrintProgress); */ NS_DELETEXPCOM(this); return 0; } return count; }
nsHttpTransaction::Release() { nsrefcnt count; NS_PRECONDITION(0 != mRefCnt, "dup release"); count = PR_AtomicDecrement((PRInt32 *) &mRefCnt); NS_LOG_RELEASE(this, count, "nsHttpTransaction"); if (0 == count) { mRefCnt = 1; /* stablize */ // it is essential that the transaction be destroyed on the consumer // thread (we could be holding the last reference to our consumer). DeleteSelfOnConsumerThread(); return 0; } return count; }
XPCWrappedNativeProto::~XPCWrappedNativeProto() { NS_ASSERTION(!mJSProtoObject, "JSProtoObject still alive"); MOZ_COUNT_DTOR(XPCWrappedNativeProto); #ifdef DEBUG PR_AtomicDecrement(&gDEBUG_LiveProtoCount); #endif // Note that our weak ref to mScope is not to be trusted at this point. XPCNativeSet::ClearCacheEntryForClassInfo(mClassInfo); delete mScriptableInfo; }
nsPlatformCharset::~nsPlatformCharset() { PR_AtomicDecrement(&gCnt); if (!gCnt) { if (gNLInfo) { delete gNLInfo; gNLInfo = nsnull; PR_DestroyLock(gLock); gLock = nsnull; } if (gInfo_deprecated) { delete gInfo_deprecated; gInfo_deprecated = nsnull; } } }
// // Request a reservation in the keep-alive system // PRBool PollManager::RequestReservation(void) { PR_ASSERT(numKeepAlives_ >= 0); PRInt32 numKeepAlives = PR_AtomicIncrement(&numKeepAlives_); if (numKeepAlives > maxKeepAlives_) { PR_AtomicDecrement(&numKeepAlives_); if (PR_AtomicIncrement(&numKeepAliveRefusals_) == 1) ereport(LOG_VERBOSE, "PollManager::RequestReservation() keep-alive subsystem full"); return PR_FALSE; } return PR_TRUE; }
/* simulate local clock being set back */ static void _csngen_local_tester_main (void *data) { CSNGen *gen = (CSNGen*)data; PR_ASSERT (gen); while (!s_must_exit) { /* sleep for 30 seconds */ DS_Sleep (PR_SecondsToInterval(60)); g_sampled_time -= slapi_rand () % 100; csngen_dump_state (gen); } PR_AtomicDecrement (&s_thread_count); }
/* * Release a reference to an object. The pointer to the object * should not be referenced after this call is made, since the * object may be destroyed if this is the last reference to it. */ void object_release(Object *o) { PRInt32 refcnt_after_release; PR_ASSERT(NULL != o); refcnt_after_release = PR_AtomicDecrement(&o->refcnt); PR_ASSERT(refcnt_after_release >= 0); if (refcnt_after_release == 0) { /* Object can be destroyed */ if (o->destructor) o->destructor(&o->data); /* Make it harder to reuse a dangling pointer */ o->data = NULL; o->destructor = NULL; o->refcnt = -9999; slapi_ch_free((void **)&o); } }
/* simulate clock skew with remote servers that causes generator to advance its remote offset */ static void _csngen_remote_tester_main (void *data) { CSNGen *gen = (CSNGen*)data; CSN *csn; time_t csn_time; int rc; PR_ASSERT (gen); while (!s_must_exit) { rc = csngen_new_csn (gen, &csn, PR_FALSE); if (rc != CSN_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, "_csngen_remote_tester_main", "Failed to generate csn; csn error - %d\n", rc); } else { csn_time = csn_get_time(csn); csn_set_time (csn, csn_time + slapi_rand () % 100); rc = csngen_adjust_time (gen, csn); if (rc != CSN_SUCCESS) { slapi_log_err(SLAPI_LOG_ERR, "_csngen_remote_tester_main", "Failed to adjust generator's time; csn error - %d\n", rc); } csngen_dump_state (gen); } csn_free(&csn); /* sleep for 30 seconds */ DS_Sleep (PR_SecondsToInterval(60)); } PR_AtomicDecrement (&s_thread_count); }
PyG_Base::~PyG_Base() { PR_AtomicDecrement(&cGateways); #ifdef DEBUG_LIFETIMES PYXPCOM_LOG_DEBUG("PyG_Base: deleted %p", this); #endif if ( m_pPyObject ) { CEnterLeavePython celp; Py_DECREF(m_pPyObject); } if (m_pBaseObject) m_pBaseObject->Release(); if (m_pWeakRef) { // Need to ensure some other thread isnt doing a QueryReferent on // our weak reference at the same time CEnterLeaveXPCOMFramework _celf; PyXPCOM_GatewayWeakReference *p = (PyXPCOM_GatewayWeakReference *)(nsISupports *)m_pWeakRef; p->m_pBase = nsnull; m_pWeakRef = nsnull; } PyXPCOM_DLLRelease(); }
nsrefcnt nsLDAPConnection::Release(void) { nsrefcnt count; NS_PRECONDITION(0 != mRefCnt, "dup release"); count = PR_AtomicDecrement((PRInt32 *)&mRefCnt); NS_LOG_RELEASE(this, count, "nsLDAPConnection"); if (0 == count) { // As commented by danm: In the object's destructor, if by some // convoluted, indirect means it happens to run into some code // that temporarily references it (addref/release), then if the // refcount had been left at 0 the unexpected release would // attempt to reenter the object's destructor. // mRefCnt = 1; /* stabilize */ // If we have a mRunnable object, we need to make sure to lock it's // mLock before we try to DELETE. This is to avoid a race condition. // We also make sure to keep a strong reference to the runnable // object, to make sure it doesn't get GCed from underneath us, // while we are still holding a lock for instance. // if (mRunnable && mRunnable->mLock) { nsLDAPConnectionLoop *runnable = mRunnable; NS_ADDREF(runnable); PR_Lock(runnable->mLock); NS_DELETEXPCOM(this); PR_Unlock(runnable->mLock); NS_RELEASE(runnable); } else { NS_DELETEXPCOM(this); } return 0; } return count; }