TInt WriteEntryPoint(TAny* aArg) { *(TBool*)aArg = ETrue; __e32_atomic_add_ord32(&ThreadsRunning, 1); TheLock.WriteLock(); const TInt index = __e32_atomic_add_ord32(&LogIndex, 1); LogReaders[index] = EFalse; TheLock.Unlock(); __e32_atomic_add_ord32(&ThreadsRunning, TUint32(-1)); return KErrNone; }
/** @return True if segment still exists, false if segment was deleted. */ TBool RPageArray::TSegment::Unlock(TSegment*& aSegment, TUint aCount) { __NK_ASSERT_DEBUG(MmuLock::IsHeld()); TSegment* s = aSegment; __NK_ASSERT_DEBUG(s); TUint oldCounts = (TUint)__e32_atomic_add_ord32(&s->iCounts, (TUint32)-(TInt)aCount); __NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing #ifdef _DEBUG if((oldCounts&KPageArraySegmentLockCountMask)==aCount) { // check alloc count is consistent... TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift; __NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize); TUint realAllocCount = 0; TPhysAddr* p = s->iPages; TPhysAddr* pEnd = p+KPageArraySegmentSize; do { if(IsPresent(*p++)) ++realAllocCount; } while(p<pEnd); if(realAllocCount!=allocCount) { Kern::Printf("TSegment::Unlock alloc count missmatch %u!=%u",realAllocCount,allocCount); __NK_ASSERT_DEBUG(0); } }
/** Constructs the object and initializes the reference count to one. Once constructed, a reference counting object cannot be deleted until its reference count is reduced to zero. @see CFsObject::Close */ EXPORT_C CFsObject::CFsObject() { #if defined(_DEBUG) || defined(_DEBUG_RELEASE) __e32_atomic_add_ord32(&ObjectCount, 1); #endif // iContainer=NULL; // iName=NULL; iAccessCount=1; }
TBool BTrace::DoOutBig(TUint32 a0, TUint32 a1, const TAny* aData, TInt aDataSize, TUint32 aContext, TUint32 aPc) { SBTraceData& traceData = BTraceData; // see if trace is small enough to fit in single record... if(TUint(aDataSize)<=TUint(KMaxBTraceDataArray+4)) { a0 += aDataSize; TUint32 a2 = 0; TUint32 a3 = 0; if(aDataSize) { a2 = *((TUint32*&)aData)++; // first 4 bytes into a2 if(aDataSize>=4 && aDataSize<=8) a3 = *(TUint32*)aData; // only 4 more bytes, so pass by value, not pointer else a3 = (TUint32)aData; } __ACQUIRE_BTRACE_LOCK(); TBool r = traceData.iHandler(a0,0,aContext,a1,a2,a3,0,aPc); __RELEASE_BTRACE_LOCK(); return r; } // adjust for header2, extra, and size word... a0 |= BTrace::EHeader2Present<<(BTrace::EFlagsIndex*8)|BTrace::EExtraPresent<<(BTrace::EFlagsIndex*8); a0 += 12; TUint32 traceId = __e32_atomic_add_ord32(&BigTraceId, 1); TUint32 header2 = BTrace::EMultipartFirst; TInt offset = 0; do { TUint32 size = aDataSize-offset; if(size>KMaxBTraceDataArray) size = KMaxBTraceDataArray; else header2 = BTrace::EMultipartLast; if(size<=4) *(TUint32*)&aData = *(TUint32*)aData; // 4 bytes or less are passed by value, not pointer __ACQUIRE_BTRACE_LOCK(); TBool result = traceData.iHandler(a0+size,header2,aContext,aDataSize,a1,(TUint32)aData,traceId,aPc); __RELEASE_BTRACE_LOCK(); if (!result) return result; offset += size; *(TUint8**)&aData += size; header2 = BTrace::EMultipartMiddle; a1 = offset; } while(offset<aDataSize); return TRUE; }
/** Destructor. Deallocates memory associated with this objects name, if a name has been set. @panic FSERV 104 if the reference count is not zero when the destructor is called. */ EXPORT_C CFsObject::~CFsObject() { __PRINT1(_L("CFsObject::~CFsObject() 0x%x"),this); __ASSERT_ALWAYS(Dec()==0,Fault(EObjDestructorAccessCount)); __ASSERT_ALWAYS(!iContainer,Fault(EObjDestructorContainer)); if(iName) User::Free(iName); #if defined(_DEBUG) || defined(_DEBUG_RELEASE) __e32_atomic_add_ord32(&ObjectCount, (TUint32) -1); #endif }
void FSTest2Signaller0(TAny* a) { SFSTest2Info& info = *(SFSTest2Info*)a; while (!info.iStart) { } // NThreadBase* t = info.iSem.iOwningThread; while (!info.iStop) { ++info.iBlockCount; __e32_atomic_add_ord32(&info.iSignals, 1); NKern::FSSignal(&info.iSem); } TEST_PRINT1("Ran %d times", info.iBlockCount); }
void FSTest2Signaller(TAny* a) { SFSTest2Info& info = *(SFSTest2Info*)a; while (!info.iStart) { } NThreadBase* t = info.iSem.iOwningThread; TInt count0=0; TInt countneg=0; TInt blocked=0; TInt prev_block_count = info.iBlockCount; TInt tries = 1; TUint32 seed[2]; seed[0] = NKern::CurrentCpu()+1; seed[1] = 0; while (!info.iStop) { TInt c = info.iSem.iCount; if (c>=1) continue; if (--tries==0) { TInt bc; do { bc = info.iBlockCount; } while (bc<=prev_block_count); prev_block_count = bc; tries = random(seed) & 127; tries += 71; } TUint32 x = random(seed) & 63; while (x) --x; c = info.iSem.iCount; NKern::FSSignal(&info.iSem); __e32_atomic_add_ord32(&info.iSignals, 1); if (c==0) ++count0; if (c<0) ++countneg; #ifdef __SMP__ if (NKTest::ThreadIsBlocked(t)) ++blocked; #else if (t->iNState == NThread::EWaitFastSemaphore) ++blocked; #endif } TEST_PRINT1("Count =0 %d times", count0); TEST_PRINT1("Count <0 %d times", countneg); TEST_PRINT1("Blocked %d times", blocked); }
// Test thread, just looks for flags being set void TiedEventThread(TAny*) { TInt cpu, i, j; TUint32 set=0; for_each_cpu(cpu) { NKern::ThreadSetCpuAffinity(NKern::CurrentThread(), cpu); for (i=0; i<LoopCount; ++i) { for (j=0; j<FlagCount; ++j) if (__e32_atomic_load_acq32(&Flags[j])) ++set; } } __e32_atomic_add_ord32(&FlagsSet, set); NKern::FSSignal(DoneSem); NKern::WaitForAnyRequest(); }
void FMTest1PInterfererThread(TAny* a) { SFMTest1Info& info = *(SFMTest1Info*)a; NThread* pC = NKern::CurrentThread(); TEST_PRINT1("Thread %T start", pC); TUint32 seed[2] = {(TUint32)pC, 0}; NThread* t0 = info.iThreads[0]; TInt n = 0; while (!__e32_atomic_load_acq32(&info.iStop)) { while (!__e32_atomic_load_acq32(&info.iStop) && t0->iPriority != 11) __chill(); TUint32 x = random(seed) & 2047; while(x) { __e32_atomic_add_ord32(&x, TUint32(-1)); } if (__e32_atomic_load_acq32(&info.iStop)) break; NKern::ThreadSetPriority(t0, 9); ++n; } TEST_PRINT2("Thread %T ran %d times", pC, n); }
Q_CORE_EXPORT int QBasicAtomicInt_fetchAndAddOrdered(volatile int *_q_value, int valueToAdd) { return static_cast<int>(__e32_atomic_add_ord32(_q_value, valueToAdd)); }
TInt DPowerManager::PowerDown() { // called by ExecHandler __KTRACE_OPT(KPOWER,Kern::Printf(">PowerManger::PowerDown(0x%x) Enter", iPowerController->iTargetState)); __ASSERT_CRITICAL; Lock(); if (iPowerController->iTargetState == EPwActive) { Unlock(); return KErrNotReady; } __PM_ASSERT(iHandlers); NFastSemaphore shutdownSem(0); NTimer ntimer; TDfc dfc(ShutDownTimeoutFn, &shutdownSem); #ifndef _DEBUG_POWER iPendingShutdownCount = 0; #endif DPowerHandler* ph = iHandlers; //Power down in reverse order of handle registration. do { #ifdef _DEBUG_POWER __PM_ASSERT(!(ph->iStatus & DPowerHandler::EDone)); #endif ph->iSem = &shutdownSem; ph->PowerDown(iPowerController->iTargetState); #ifndef _DEBUG_POWER iPendingShutdownCount++; #else if(iPslShutdownTimeoutMs>0) { // Fire shut down timeout timer ntimer.OneShot(iPslShutdownTimeoutMs, dfc); } NKern::FSWait(&shutdownSem); // power down drivers one after another to simplify debug __e32_atomic_and_ord32(&(ph->iStatus), ~DPowerHandler::EDone); // timeout condition if(iPslShutdownTimeoutMs>0 && ph->iSem) { __e32_atomic_store_ord_ptr(&ph->iSem, 0); } ntimer.Cancel(); #endif ph = ph->iPrev; }while(ph != iHandlers); #ifndef _DEBUG_POWER if(iPslShutdownTimeoutMs>0) { // Fire shut down timeout timer ntimer.OneShot(iPslShutdownTimeoutMs, dfc); } ph = iHandlers; do { NKern::FSWait(&shutdownSem); if(__e32_atomic_load_acq32(&iPendingShutdownCount)==ESHUTDOWN_TIMEOUT) { iPendingShutdownCount = 0; NKern::Lock(); shutdownSem.Reset(); // iPendingShutdownCount could be altered while ShutDownTimeoutFn is running // reset it to make sure shutdownSem is completely clean. NKern::Unlock(); break; } __e32_atomic_add_ord32(&iPendingShutdownCount, (TUint)(~0x0)); // iPendingShutDownCount--; ph = ph->iPrev; }while(ph != iHandlers); ntimer.Cancel(); #endif TTickQ::Wait(); iPowerController->PowerDown(K::SecondQ->WakeupTime()); __PM_ASSERT(iPowerController->iTargetState != EPwOff); iPowerController->iTargetState = EPwActive; K::SecondQ->WakeUp(); TTickQ::Signal(); NFastSemaphore powerupSem(0); ph = iHandlers->iNext; //Power up in same order of handle registration. do { #ifdef _DEBUG_POWER __PM_ASSERT(!(ph->iStatus & DPowerHandler::EDone)); #endif ph->iSem = &powerupSem; ph->PowerUp(); #ifdef _DEBUG_POWER NKern::FSWait(&powerupSem); // power down drivers one after another to simplify debug __PM_ASSERT(!ph->iSem); __PM_ASSERT(ph->iStatus & DPowerHandler::EDone); ph->iStatus &= ~DPowerHandler::EDone; #endif ph = ph->iNext; }while(ph != iHandlers->iNext); #ifndef _DEBUG_POWER ph = iHandlers->iNext; do { NKern::FSWait(&powerupSem); ph = ph->iNext; }while(ph != iHandlers->iNext); #endif // complete wakeup notification request if any NotifyWakeupEvent(KErrNone); Unlock(); __KTRACE_OPT(KPOWER,Kern::Printf("<PowerManger::PowerDown() Leave")); return KErrNone; }
FORCE_INLINE void RPageArray::TSegment::Lock(TUint aCount) { __NK_ASSERT_DEBUG(MmuLock::IsHeld()); __e32_atomic_add_ord32(&iCounts, (TUint32)aCount); __NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask)); }
/** Atomically (i.e. in a manner which is safe against concurrent access by other threads) decrements a TInt value by 1. As an example of its use, the function is used in the implementation of critical sections. @param aValue A reference to an integer whose value is to be decremented. On return contains the decremented value. @return The value of aValue before it is decremented. @see User::LockedInc @see RCrticalSection */ EXPORT_C TInt User::LockedDec(TInt& aValue) { return (TInt)__e32_atomic_add_ord32(&aValue, 0xFFFFFFFF); }
/** Atomically (i.e. in a manner which is safe against concurrent access by other threads) increments a TInt value by 1. As an example of its use, the function is used in the implementation of critical sections. @param aValue A reference to an integer whose value is to be incremented. On return contains the incremented value. @return The value of aValue before it is incremented. @see User::LockedDec @see RCrticalSection */ EXPORT_C TInt User::LockedInc(TInt& aValue) { return (TInt)__e32_atomic_add_ord32(&aValue, 1); }