void dvmHeapSourceRegisterNativeAllocation(int bytes) { /* If we have just done a GC, ensure that the finalizers are done and update * the native watermarks. */ if (gHs->nativeNeedToRunFinalization) { dvmRunFinalization(); dvmHeapSourceUpdateMaxNativeFootprint(); gHs->nativeNeedToRunFinalization = false; } android_atomic_add(bytes, &gHs->nativeBytesAllocated); if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintGCWatermark) { /* The second watermark is higher than the gc watermark. If you hit * this it means you are allocating native objects faster than the GC * can keep up with. If this occurs, we do a GC for alloc. */ if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { Thread* self = dvmThreadSelf(); dvmRunFinalization(); if (dvmCheckException(self)) { return; } dvmLockHeap(); bool waited = dvmWaitForConcurrentGcToComplete(); dvmUnlockHeap(); if (waited) { // Just finished a GC, attempt to run finalizers. dvmRunFinalization(); if (dvmCheckException(self)) { return; } } // If we still are over the watermark, attempt a GC for alloc and run finalizers. if ((size_t)gHs->nativeBytesAllocated > gHs->nativeFootprintLimit) { dvmLockHeap(); dvmWaitForConcurrentGcToComplete(); dvmCollectGarbageInternal(GC_FOR_MALLOC); dvmUnlockHeap(); dvmRunFinalization(); gHs->nativeNeedToRunFinalization = false; if (dvmCheckException(self)) { return; } } /* We have just run finalizers, update the native watermark since * it is very likely that finalizers released native managed * allocations. */ dvmHeapSourceUpdateMaxNativeFootprint(); } else { dvmSignalCond(&gHs->gcThreadCond); } } }
void ProcessState::spawnPooledThread(bool isMain) { if (mThreadPoolStarted) { int32_t s = android_atomic_add(1, &mThreadPoolSeq); char buf[16]; snprintf(buf, sizeof(buf), "Binder_%X", s); ALOGV("Spawning new pooled thread, name=%s\n", buf); sp<Thread> t = new PoolThread(isMain); t->run(buf); } }
/* * Exercise several of the atomic ops. */ static void doAtomicTest(int num) { int addVal = (num & 0x01) + 1; int i; for (i = 0; i < ITERATION_COUNT; i++) { if (USE_ATOMIC) { android_atomic_inc(&incTest); android_atomic_dec(&decTest); android_atomic_add(addVal, &addTest); int val; do { val = casTest; } while (android_atomic_release_cas(val, val + 3, &casTest) != 0); do { val = casTest; } while (android_atomic_acquire_cas(val, val - 1, &casTest) != 0); int64_t wval; do { wval = dvmQuasiAtomicRead64(&wideCasTest); } while (dvmQuasiAtomicCas64(wval, wval + 0x0000002000000001LL, &wideCasTest) != 0); do { wval = dvmQuasiAtomicRead64(&wideCasTest); } while (dvmQuasiAtomicCas64(wval, wval - 0x0000002000000001LL, &wideCasTest) != 0); } else { incr(); decr(); add(addVal); int val; do { val = casTest; } while (compareAndSwap(val, val + 3, &casTest) != 0); do { val = casTest; } while (compareAndSwap(val, val - 1, &casTest) != 0); int64_t wval; do { wval = wideCasTest; } while (compareAndSwapWide(wval, wval + 0x0000002000000001LL, &wideCasTest) != 0); do { wval = wideCasTest; } while (compareAndSwapWide(wval, wval - 0x0000002000000001LL, &wideCasTest) != 0); } } }
void *test_func(void *arg) { int i=0; for(i=0; i<20000; ++i){ #if 1 android_atomic_add(1, &count); #else count += 1; #endif } return NULL; }
void RefBase::incStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->incWeak(id); refs->addStrongRef(id); const int32_t c = android_atomic_inc(&refs->mStrong); ALOG_ASSERT(c > 0, "incStrong() called on %p after last strong ref", refs); #if PRINT_REFS ALOGD("incStrong of %p from %p: cnt=%d\n", this, id, c); #endif if (c != INITIAL_STRONG_VALUE) { return; } android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong); refs->mBase->onFirstRef(); }
void RefBase::forceIncStrong(const void* id) const { weakref_impl* const refs = mRefs; refs->incWeak(id); refs->addStrongRef(id); const int32_t c = android_atomic_inc(&refs->mStrong); ALOG_ASSERT(c >= 0, "forceIncStrong called on %p after ref count underflow", refs); #if PRINT_REFS ALOGD("forceIncStrong of %p from %p: cnt=%d\n", this, id, c); #endif switch (c) { case INITIAL_STRONG_VALUE: android_atomic_add(-INITIAL_STRONG_VALUE, &refs->mStrong); // fall through... case 0: refs->mBase->onFirstRef(); } }
bool RefBase::weakref_type::attemptIncStrong(const void* id) { incWeak(id); weakref_impl* const impl = static_cast<weakref_impl*>(this); int32_t curCount = impl->mStrong; ALOG_ASSERT(curCount >= 0, "attemptIncStrong called on %p after underflow", this); while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) { if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mStrong) == 0) { break; } curCount = impl->mStrong; } if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) { bool allow; if (curCount == INITIAL_STRONG_VALUE) { // Attempting to acquire first strong reference... this is allowed // if the object does NOT have a longer lifetime (meaning the // implementation doesn't need to see this), or if the implementation // allows it to happen. allow = (impl->mFlags&OBJECT_LIFETIME_WEAK) != OBJECT_LIFETIME_WEAK || impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id); } else { // Attempting to revive the object... this is allowed // if the object DOES have a longer lifetime (so we can safely // call the object with only a weak ref) and the implementation // allows it to happen. allow = (impl->mFlags&OBJECT_LIFETIME_WEAK) == OBJECT_LIFETIME_WEAK && impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id); } if (!allow) { decWeak(id); return false; } curCount = android_atomic_inc(&impl->mStrong); // If the strong reference count has already been incremented by // someone else, the implementor of onIncStrongAttempted() is holding // an unneeded reference. So call onLastStrongRef() here to remove it. // (No, this is not pretty.) Note that we MUST NOT do this if we // are in fact acquiring the first reference. if (curCount > 0 && curCount < INITIAL_STRONG_VALUE) { impl->mBase->onLastStrongRef(id); } } impl->addStrongRef(id); #if PRINT_REFS ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount); #endif if (curCount == INITIAL_STRONG_VALUE) { android_atomic_add(-INITIAL_STRONG_VALUE, &impl->mStrong); impl->mBase->onFirstRef(); } return true; }
int32_t OSAtomicAdd32(int32_t value,volatile int32_t* target) { return android_atomic_add(value,target) + value; }
String8 ProcessState::makeBinderThreadName() { int32_t s = android_atomic_add(1, &mThreadPoolSeq); String8 name; name.appendFormat("Binder_%X", s); return name; }