void testAtomicOps() { volatile esint32 key = 0; EAssert(AtomicIncrement(&key) == 1, "key != 1"); EAssert(AtomicDecrement(&key) == 0, "key != 0"); AtomicCompareExchange(1, 0, &key); EAssert(AtomicIncrement(&key) == 2, "key != 2"); }
LargeMemoryBlock *ExtMemoryPool::mallocLargeObject(size_t allocationSize) { #if __TBB_MALLOC_LOCACHE_STAT AtomicIncrement(mallocCalls); AtomicAdd(memAllocKB, allocationSize/1024); #endif LargeMemoryBlock* lmb = loc.get(allocationSize); if (!lmb) { BackRefIdx backRefIdx = BackRefIdx::newBackRef(/*largeObj=*/true); if (backRefIdx.isInvalid()) return NULL; // unalignedSize is set in getLargeBlock lmb = backend.getLargeBlock(allocationSize); if (!lmb) { removeBackRef(backRefIdx); loc.rollbackCacheState(allocationSize); return NULL; } lmb->backRefIdx = backRefIdx; STAT_increment(getThreadId(), ThreadCommonCounters, allocNewLargeObj); } else { #if __TBB_MALLOC_LOCACHE_STAT AtomicIncrement(cacheHits); AtomicAdd(memHitKB, allocationSize/1024); #endif } return lmb; }
void AddonClass::Acquire() const { if (isDeleted) CLog::Log(LOGERROR,"NEWADDON REFCNT Acquiring dead class %s 0x%lx", GetClassname(), (long)(((void*)this))); #ifdef LOG_LIFECYCLE_EVENTS CLog::Log(LOGDEBUG,"NEWADDON REFCNT incrementing to %ld on %s 0x%lx", AtomicIncrement((long*)&refs),GetClassname(), (long)(((void*)this))); #else AtomicIncrement((long*)&refs); #endif }
//----------------------------------------------------------------------------- // LoadTextureXD // Loads a texture //----------------------------------------------------------------------------- sint CRenderer::LoadTexture2D( const char* szFilename ) { sint nIndex = AtomicIncrement( &m_nNumTextures ) - 1; m_ppTextures[ nIndex ] = m_pDevice->LoadTexture( szFilename ); return nIndex; }
//----------------------------------------------------------------------------- // CreateMesh // Creates a mesh from the file //----------------------------------------------------------------------------- sint CRenderer::CreateMesh( uint nVertexStride, uint nVertexCount, uint nIndexSize, uint nIndexCount, void* pVertices, void* pIndices, GFX_BUFFER_USAGE nUsage ) { ASSERT( m_nNumMeshes < MAX_MESHES ); CMesh* pMesh = new CMesh; ////////////////////////////////////////// // Create the vertex buffer pMesh->m_pVertexBuffer = m_pDevice->CreateVertexBuffer( nVertexStride * nVertexCount, pVertices, nUsage ); ////////////////////////////////////////// // Create the index buffer uint nSize = nIndexCount * nIndexSize; pMesh->m_pIndexBuffer = m_pDevice->CreateIndexBuffer( nSize, pIndices ); pMesh->m_nIndexCount = nIndexCount; pMesh->m_nIndexSize = nIndexSize; pMesh->m_nVertexStride = nVertexStride; pMesh->m_pDevice = m_pDevice; // Allow the mesh access to the device // sint nIndex = AtomicIncrement( &m_nNumMeshes ) - 1; m_ppMeshes[ nIndex ] = pMesh; return nIndex; }
HRESULT CTestAtomicHelpers::Run() { HRESULT hr = S_OK; int value = -2000; int nextexpected = -2000; int result = 0; while (value < 2000) { nextexpected++; result = AtomicIncrement(&value); ChkIf(result != nextexpected, E_UNEXPECTED); ChkIf(result != value, E_UNEXPECTED); } value = 2000; nextexpected = 2000; while (value > -2000) { nextexpected--; result = AtomicDecrement(&value); ChkIf(result != nextexpected, E_UNEXPECTED); ChkIf(result != value, E_UNEXPECTED); } Cleanup: return hr; }
Instance Lock() { ELock_lock(&m_interlock); if (!m_tid) { m_tid = pthread_self(); } else { if (m_tid != pthread_self()) { /// 这里先解锁了 ELock_unlock(&m_interlock); /// while (1) { ELock_lock(&m_interlock); if (!m_tid) { /// break以后实际上是锁住的 m_tid = pthread_self(); break; } ELock_unlock(&m_interlock); } } } AtomicIncrement(&m_lock); ELock_unlock(&m_interlock); return Instance(this); }
//----------------------------------------------------------------------------- // DrawDebugRay // Draws a vector from the start point the length of the vector //----------------------------------------------------------------------------- void CRenderer::DrawDebugRay( const RVector3& start, const RVector3& dir ) { sint nIndex = AtomicIncrement( &m_nNumRays ) - 1; m_pCurrDebugRays[nIndex].start = start; m_pCurrDebugRays[nIndex].end = start + dir; }
CDVDMediaCodecInfo* CDVDMediaCodecInfo::Retain() { AtomicIncrement(&m_refs); m_isReleased = false; return this; }
bool LargeObjectCacheImpl<Props>::regularCleanup(Backend *backend, uintptr_t currTime, bool doThreshDecr) { bool released = false; BinsSummary binsSummary; for (int i = bitMask.getMaxTrue(numBins-1); i >= 0; i = bitMask.getMaxTrue(i-1)) { bin[i].updateBinsSummary(&binsSummary); if (!doThreshDecr && tooLargeLOC>2 && binsSummary.isLOCTooLarge()) { // if LOC is too large for quite long time, decrease the threshold // based on bin hit statistics. // For this, redo cleanup from the beginnig. // Note: on this iteration total usedSz can be not too large // in comparison to total cachedSz, as we calculated it only // partially. We are ok this it. i = bitMask.getMaxTrue(numBins-1); doThreshDecr = true; binsSummary.reset(); continue; } if (doThreshDecr) bin[i].decreaseThreshold(); if (bin[i].cleanToThreshold(backend, &bitMask, currTime, i)) released = true; } // We want to find if LOC was too large for some time continuously, // so OK with races between incrementing and zeroing, but incrementing // must be atomic. if (binsSummary.isLOCTooLarge()) AtomicIncrement(tooLargeLOC); else tooLargeLOC = 0; return released; }
int NaClAppThreadUnblockIfFaulted(struct NaClAppThread *natp, int *signal) { DWORD previous_suspend_count; if (natp->fault_signal == 0) { return 0; } *signal = natp->fault_signal; natp->fault_signal = 0; AtomicIncrement(&natp->nap->faulted_thread_count, -1); /* * Decrement Windows' suspension count for the thread. This undoes * the effect of debug_exception_handler.c's SuspendThread() call. */ previous_suspend_count = ResumeThread(GetHostThreadHandle(natp)); if (previous_suspend_count == (DWORD) -1) { NaClLog(LOG_FATAL, "NaClAppThreadUnblockIfFaulted: " "ResumeThread() call failed\n"); } /* * This thread should already have been suspended using * NaClUntrustedThreadSuspend(), so the thread will not actually * resume until NaClUntrustedThreadResume() is called. */ DCHECK(previous_suspend_count >= 2); return 1; }
HRESULT CStunMessageBuilder::AddRandomTransactionId(StunTransactionId* pTransId) { StunTransactionId transid; uint32_t stun_cookie_nbo = htonl(STUN_COOKIE); uint32_t entropy=0; // on x86, the rdtsc instruction is about as good as it gets for a random sequence number // on linux, there's /dev/urandom #ifdef _WIN32 // on windows, there's lots of simple stuff we can get at to give us a random number // the rdtsc instruction is about as good as it gets uint64_t clock = __rdtsc(); entropy ^= (uint32_t)(clock); #else // on linux, /dev/urandom should be sufficient { int randomfile = ::open("/dev/urandom", O_RDONLY); if (randomfile >= 0) { int readret = read(randomfile, &entropy, sizeof(entropy)); UNREFERENCED_VARIABLE(readret); ASSERT(readret > 0); close(randomfile); } } if (entropy == 0) { entropy ^= getpid(); entropy ^= reinterpret_cast<uintptr_t>(this); entropy ^= time(NULL); entropy ^= AtomicIncrement(&g_sequence_number); } #endif srand(entropy); // the first four bytes of the transaction id is always the magic cookie // followed by 12 bytes of the real transaction id memcpy(transid.id, &stun_cookie_nbo, sizeof(stun_cookie_nbo)); for (int x = 4; x < (STUN_TRANSACTION_ID_LENGTH-4); x++) { transid.id[x] = (uint8_t)(rand() % 256); } if (pTransId) { *pTransId = transid; } return AddTransactionId(transid); }
CMMALVideoBuffer* CMMALVideoBuffer::Acquire() { long count = AtomicIncrement(&m_refs); if (g_advancedSettings.CanLogComponent(LOGVIDEO)) CLog::Log(LOGDEBUG, "%s::%s %p (%p) ref:%ld", CLASSNAME, __func__, this, mmal_buffer, count); (void)count; return this; }
CYUVVideoBuffer *CYUVVideoBuffer::Acquire() { long count = AtomicIncrement(&m_refs); #ifdef MMAL_DEBUG_VERBOSE CLog::Log(LOGDEBUG, "%s::%s omvb:%p mmal:%p ref:%ld", CLASSNAME, __func__, this, mmal_buffer, count); #endif (void)count; return this; }
//----------------------------------------------------------------------------- // AddCommand // Adds a renderable object to the command buffer //----------------------------------------------------------------------------- void CRenderer::AddCommand( uint64 nCmd, RTransform& transform ) { ASSERT( m_nNumCommands < MAX_RENDER_COMMANDS ); uint nIndex = AtomicIncrement( &m_nNumCommands ) - 1; m_pCurrCommands[nIndex] = nCmd; m_pCurrTransforms[nIndex] = transform; }
void CRenderer::AddDirLight( const RVector3& vDir, const RVector3& vColor ) { ASSERT( m_pCurrLights->nNumActiveLights < MAX_LIGHTS ); sint nIndex = AtomicIncrement( &m_pCurrLights->nNumActiveLights ) - 1; m_pCurrLights->vLight[ nIndex ] = Homogonize( Normalize( -vDir ) ); m_pCurrLights->vColor[ nIndex ] = RVector4( vColor, 1.0f ); m_pCurrLights->nLightType &= ( 0 << nIndex); }
//----------------------------------------------------------------------------- // AddLight // Adds a light to the scene //----------------------------------------------------------------------------- void CRenderer::AddPointLight( const RVector3& vPos, const RVector3& vColor, float fRange ) { ASSERT( m_pCurrLights->nNumActiveLights < MAX_LIGHTS ); sint nIndex = AtomicIncrement( &m_pCurrLights->nNumActiveLights ) - 1; m_pCurrLights->vLight[ nIndex ] = RVector4( vPos, fRange ); m_pCurrLights->vColor[ nIndex ] = RVector4( vColor, 1.0f ); m_pCurrLights->nLightType |= (1 << nIndex); }
void ScanCacheInsert( ScanCache* self, const HashDigest& key, uint64_t timestamp, const char** included_files, int count) { AtomicIncrement(&g_Stats.m_ScanCacheInserts); ReadWriteLockWrite(&self->m_Lock); ScanCache::Record* record = LookupDynamic(self, key); // See if we have this record already (races to insert same include set are possible) if (nullptr == record || record->m_FileTimestamp != timestamp) { // Make sure we have room to insert. ScanCachePrepareInsert(self); uint32_t table_size = self->m_TableSize; #if ENABLED(USE_SHA1_HASH) uint32_t hash = key.m_Words.m_C; #elif ENABLED(USE_FAST_HASH) uint32_t hash = key.m_Words32[0]; #endif uint32_t index = hash &(table_size - 1); // Allocate a new record if needed const bool is_fresh = record == nullptr; if (is_fresh) { record = LinearAllocate<ScanCache::Record>(self->m_Allocator); record->m_Key = key; } record->m_FileTimestamp = timestamp; record->m_IncludeCount = count; record->m_Includes = LinearAllocateArray<FileAndHash>(self->m_Allocator, count); for (int i = 0; i < count; ++i) { record->m_Includes[i].m_Filename = StrDup(self->m_Allocator, included_files[i]); record->m_Includes[i].m_Hash = Djb2HashPath(included_files[i]); } if (is_fresh) { record->m_Next = self->m_Table[index]; self->m_Table[index] = record; self->m_RecordCount++; } } ReadWriteUnlockWrite(&self->m_Lock); }
//----------------------------------------------------------------------------- // DrawDebugBox // Renders a wireframe debug AAB //----------------------------------------------------------------------------- void CRenderer::DrawDebugBox( const RAABB& box, const RVector3& vColor ) { if( !gnShowBoundingVolumes ) return; sint nIndex = AtomicIncrement( &m_nNumBoxes ) - 1; m_pCurrDebugBoxes[nIndex].box = box; m_pCurrDebugBoxes[nIndex].color = vColor; }
CDVDVideoCodecIMXBuffer::CDVDVideoCodecIMXBuffer(int idx) : m_refs(1) , m_idx(idx) #else CDVDVideoCodecIMXBuffer::CDVDVideoCodecIMXBuffer() : m_refs(1) #endif , m_frameBuffer(NULL) , m_rendered(false) , m_pts(DVD_NOPTS_VALUE) , m_previousBuffer(NULL) { } void CDVDVideoCodecIMXBuffer::Lock() { #ifdef TRACE_FRAMES long count = AtomicIncrement(&m_refs); CLog::Log(LOGDEBUG, "R+ %02d - ref : %d (VPU)\n", m_idx, count); #else AtomicIncrement(&m_refs); #endif }
int NaClAppThreadUnblockIfFaulted(struct NaClAppThread *natp, int *signal) { /* This function may only be called on a thread that is suspended. */ DCHECK(natp->suspend_state == (NACL_APP_THREAD_UNTRUSTED | NACL_APP_THREAD_SUSPENDING | NACL_APP_THREAD_SUSPENDED) || natp->suspend_state == (NACL_APP_THREAD_TRUSTED | NACL_APP_THREAD_SUSPENDING)); if (natp->fault_signal == 0) { return 0; } *signal = natp->fault_signal; natp->fault_signal = 0; AtomicIncrement(&natp->nap->faulted_thread_count, -1); return 1; }
uintptr_t LargeObjectCache::cleanupCacheIfNeed(ExtMemoryPool *extMemPool) { /* loCacheStat.age overflow is OK, as we only want difference between * its current value and some recent. * * Both malloc and free should increment loCacheStat.age, as in * a different case multiple cached blocks would have same age, * and accuracy of predictors suffers. */ uintptr_t currAge = (uintptr_t)AtomicIncrement((intptr_t&)loCacheStat.age); if ( 0 == currAge % cacheCleanupFreq ) regularCleanup(extMemPool, currAge); return currAge; }
EventData* Event::Start(const EventDescription& description) { EventData* result = nullptr; if (EventStorage* storage = Core::storage) { result = &storage->NextEvent(); result->description = &description; result->Start(); if (description.isSampling) { AtomicIncrement(&storage->isSampling); } } return result; }
void WorkerThread::Idle() { // decrease the active worker count AtomicDecrement(&Pool_->ActiveWorkers_); // were we the last active worker? if(AtomicRead(&Pool_->ActiveWorkers_) == 1) // notify the pool that everyone is now idle ConditionWakeAll(Pool_->AllWorkersIdle_); // wait until we get woken up by the pool, immediately unlock our locked local mutex MutexLock(TaskMutex_); ConditionWait(Pool_->WakeupNotifiction_, TaskMutex_); MutexUnlock(TaskMutex_); // increase the number of active workers AtomicIncrement(&Pool_->ActiveWorkers_); }
int NaClAppThreadUnblockIfFaulted(struct NaClAppThread *natp, int *signal) { kern_return_t result; if (natp->fault_signal == 0) { return 0; } *signal = natp->fault_signal; natp->fault_signal = 0; AtomicIncrement(&natp->nap->faulted_thread_count, -1); /* * Decrement the kernel's suspension count for the thread. This * undoes the effect of mach_exception_handler.c's thread_suspend() * call. */ result = thread_resume(GetHostThreadPort(natp)); if (result != KERN_SUCCESS) { NaClLog(LOG_FATAL, "NaClAppThreadUnblockIfFaulted: " "thread_resume() call failed: error %d\n", (int) result); } return 1; }
static void HandleUntrustedFault(int signal, struct NaClSignalContext *regs, struct NaClAppThread *natp) { /* Sanity check. */ if ((natp->suspend_state & NACL_APP_THREAD_UNTRUSTED) == 0) { NaClSignalErrorMessage("HandleUntrustedFault: Unexpected suspend_state\n"); NaClAbort(); } /* Notify the debug stub by marking this thread as faulted. */ natp->fault_signal = signal; AtomicIncrement(&natp->nap->faulted_thread_count, 1); /* * We now expect the debug stub to suspend this thread via the * thread suspension API. This will allow the debug stub to get the * register state at the point the fault happened. The debug stub * will be able to modify the register state before unblocking the * thread using NaClAppThreadUnblockIfFaulted(). */ do { int new_signal; sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, NACL_THREAD_SUSPEND_SIGNAL); if (sigwait(&sigset, &new_signal) != 0) { NaClSignalErrorMessage("HandleUntrustedFault: sigwait() failed\n"); NaClAbort(); } if (new_signal != NACL_THREAD_SUSPEND_SIGNAL) { NaClSignalErrorMessage("HandleUntrustedFault: " "sigwait() returned unexpected result\n"); NaClAbort(); } HandleSuspendSignal(regs); } while (natp->fault_signal != 0); }
//! @brief Increment the reference count void acquire( ) const { AtomicIncrement( &m_refCount ); }
inline static void IncThrows() { AtomicIncrement(Throws_); }
inline static void IncProceed() { AtomicIncrement(Proceed_); }
static inline T RandomizeSleepTime(T t) throw () { static TAtomic counter = 0; const T rndNum = IntHash((T)AtomicIncrement(counter)); return (t * (T)4 + (rndNum % t) * (T)2) / (T)5; }