int main(int argc, const char *argv[]) { int x = 5; while (x > -20) { printf("old_x=%d\n", __atomic_dec(&x)); printf("x=%d\n", x); } printf ("OK\n"); return 0; }
__hidden void filefd_closed(int fd) { ALOGV("%s(fd:%d) {", __func__, fd); if (fd >= 0 && fd < __FD_SETSIZE) { if (filefd_mapped_file[fd] != UNUSED_FD_TYPE) { filefd_mapped_file[fd] = UNUSED_FD_TYPE; filefd_FD_CLOEXEC_file[fd] = 0; __atomic_dec(&filefd_mapped_files); } } ALOGV("%s: }", __func__); }
void CPacketPool::Release(CPacket *pBuffer) { if (__atomic_dec(&pBuffer->mRefCount) == 1) { // from 1 to 0 CPacket *pBuffer2 = pBuffer->mpNext; OnReleaseBuffer(pBuffer); AM_ENSURE_OK_( mpBufferQ->PostMsg((void*)&pBuffer, sizeof(CPacket*))); if (pBuffer2) { OnReleaseBuffer(pBuffer2); AM_ENSURE_OK_( mpBufferQ->PostMsg((void*)&pBuffer2, sizeof(CPacket*))); } } }
void MediaBuffer::release() { if (mObserver == NULL) { CHECK_EQ(mRefCount, 0); delete this; return; } int prevCount = __atomic_dec(&mRefCount); if (prevCount == 1) { if (mObserver == NULL) { delete this; return; } mObserver->signalBufferReturned(this); } CHECK(prevCount > 0); }
void CPacketPool::Release() { if (__atomic_dec(&mRefCount) == 1) { inherited::Delete(); } }
/* * Start tests, show results. */ bool dvmTestAtomicSpeed() { pthread_t threads[THREAD_COUNT]; void *(*startRoutine)(void *) = atomicTest; int64_t startWhen, endWhen; #if defined(__ARM_ARCH__) dvmFprintf(stdout, "__ARM_ARCH__ is %d\n", __ARM_ARCH__); #endif #if defined(ANDROID_SMP) dvmFprintf(stdout, "ANDROID_SMP is %d\n", ANDROID_SMP); #endif dvmFprintf(stdout, "Creating threads\n"); int i; for (i = 0; i < THREAD_COUNT; i++) { void *arg = (void *) i; if (pthread_create(&threads[i], NULL, startRoutine, arg) != 0) { dvmFprintf(stderr, "thread create failed\n"); } } /* wait for all the threads to reach the starting line */ while (1) { pthread_mutex_lock(&waitLock); if (threadsStarted == THREAD_COUNT) { dvmFprintf(stdout, "Starting test\n"); startWhen = getRelativeTimeNsec(); pthread_cond_broadcast(&waitCond); pthread_mutex_unlock(&waitLock); break; } pthread_mutex_unlock(&waitLock); usleep(100000); } for (i = 0; i < THREAD_COUNT; i++) { void *retval; if (pthread_join(threads[i], &retval) != 0) { dvmFprintf(stderr, "thread join (%d) failed\n", i); } } endWhen = getRelativeTimeNsec(); dvmFprintf(stdout, "All threads stopped, time is %.6fms\n", (endWhen - startWhen) / 1000000.0); /* * Show results; expecting: * * incTest = 5000000 * decTest = -5000000 * addTest = 7500000 * casTest = 10000000 * wideCasTest = 0x6600000077000000 */ dvmFprintf(stdout, "incTest = %d\n", incTest); dvmFprintf(stdout, "decTest = %d\n", decTest); dvmFprintf(stdout, "addTest = %d\n", addTest); dvmFprintf(stdout, "casTest = %d\n", casTest); dvmFprintf(stdout, "wideCasTest = 0x%llx\n", wideCasTest); /* do again, serially (SMP check) */ startWhen = getRelativeTimeNsec(); for (i = 0; i < THREAD_COUNT; i++) { doAtomicTest(i); } endWhen = getRelativeTimeNsec(); dvmFprintf(stdout, "Same iterations done serially: time is %.6fms\n", (endWhen - startWhen) / 1000000.0); /* * Hard to do a meaningful thrash test on these, so just do a simple * function test. */ andTest = 0xffd7fa96; orTest = 0x122221ff; android_atomic_and(0xfffdaf96, &andTest); android_atomic_or(0xdeaaeb00, &orTest); if (android_atomic_release_cas(failingCasTest + 1, failingCasTest - 1, &failingCasTest) == 0) dvmFprintf(stdout, "failing test did not fail!\n"); dvmFprintf(stdout, "andTest = %#x\n", andTest); dvmFprintf(stdout, "orTest = %#x\n", orTest); dvmFprintf(stdout, "failingCasTest = %d\n", failingCasTest); #ifdef TEST_BIONIC /* * Quick function test on the bionic ops. */ int prev; int tester = 7; prev = __atomic_inc(&tester); __atomic_inc(&tester); __atomic_inc(&tester); dvmFprintf(stdout, "bionic 3 inc: %d -> %d\n", prev, tester); prev = __atomic_dec(&tester); __atomic_dec(&tester); __atomic_dec(&tester); dvmFprintf(stdout, "bionic 3 dec: %d -> %d\n", prev, tester); prev = __atomic_swap(27, &tester); dvmFprintf(stdout, "bionic swap: %d -> %d\n", prev, tester); int swapok = __atomic_cmpxchg(27, 72, &tester); dvmFprintf(stdout, "bionic cmpxchg: %d (%d)\n", tester, swapok); #endif testAtomicSpeed(); return 0; }
/* Internal version of pthread_create. See comment in pt-internal.h. */ int __pthread_create_internal (struct __pthread **thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg) { int err; struct __pthread *pthread; const struct __pthread_attr *setup; sigset_t sigset; /* Allocate a new thread structure. */ err = __pthread_alloc (&pthread); if (err) goto failed; /* Use the default attributes if ATTR is NULL. */ setup = attr ? attr : &__pthread_default_attr; /* Initialize the thread state. */ pthread->state = (setup->detachstate == PTHREAD_CREATE_DETACHED ? PTHREAD_DETACHED : PTHREAD_JOINABLE); /* If the user supplied a stack, it is not our responsibility to setup a stack guard. */ if (setup->stackaddr) pthread->guardsize = 0; else pthread->guardsize = (setup->guardsize <= setup->stacksize ? setup->guardsize : setup->stacksize); /* Find a stack. There are several scenarios: if a detached thread kills itself, it has no way to deallocate its stack, thus it leaves PTHREAD->stack set to true. We try to reuse it here, however, if the user supplied a stack, we cannot use the old one. Right now, we simply deallocate it. */ if (pthread->stack) { if (setup->stackaddr != __pthread_default_attr.stackaddr) { __pthread_stack_dealloc (pthread->stackaddr, pthread->stacksize); pthread->stackaddr = setup->stackaddr; pthread->stacksize = setup->stacksize; } } else { err = __pthread_stack_alloc (&pthread->stackaddr, setup->stacksize); if (err) goto failed_stack_alloc; pthread->stacksize = setup->stacksize; pthread->stack = 1; } /* Allocate the kernel thread and other required resources. */ err = __pthread_thread_alloc (pthread); if (err) goto failed_thread_alloc; #ifdef ENABLE_TLS pthread->tcb = _dl_allocate_tls (NULL); if (!pthread->tcb) goto failed_thread_tls_alloc; pthread->tcb->tcb = pthread->tcb; #endif /* ENABLE_TLS */ /* And initialize the rest of the machine context. This may include additional machine- and system-specific initializations that prove convenient. */ err = __pthread_setup (pthread, entry_point, start_routine, arg); if (err) goto failed_setup; /* Initialize the system-specific signal state for the new thread. */ err = __pthread_sigstate_init (pthread); if (err) goto failed_sigstate; /* Set the new thread's signal mask and set the pending signals to empty. POSIX says: "The signal mask shall be inherited from the creating thread. The set of signals pending for the new thread shall be empty." If the currnet thread is not a pthread then we just inherit the process' sigmask. */ if (__pthread_num_threads == 1) err = sigprocmask (0, 0, &sigset); else err = __pthread_sigstate (_pthread_self (), 0, 0, &sigset, 0); assert_perror (err); err = __pthread_sigstate (pthread, SIG_SETMASK, &sigset, 0, 1); assert_perror (err); /* Increase the total number of threads. We do this before actually starting the new thread, since the new thread might immediately call `pthread_exit' which decreases the number of threads and calls `exit' if the number of threads reaches zero. Increasing the number of threads from within the new thread isn't an option since this thread might return and call `pthread_exit' before the new thread runs. */ __atomic_inc (&__pthread_total); /* Store a pointer to this thread in the thread ID lookup table. We could use __thread_setid, however, we only lock for reading as no other thread should be using this entry (we also assume that the store is atomic). */ pthread_rwlock_rdlock (&__pthread_threads_lock); __pthread_threads[pthread->thread - 1] = pthread; pthread_rwlock_unlock (&__pthread_threads_lock); /* At this point it is possible to guess our pthread ID. We have to make sure that all functions taking a pthread_t argument can handle the fact that this thread isn't really running yet. */ /* Schedule the new thread. */ err = __pthread_thread_start (pthread); if (err) goto failed_starting; /* At this point the new thread is up and running. */ *thread = pthread; return 0; failed_starting: __pthread_setid (pthread->thread, NULL); __atomic_dec (&__pthread_total); failed_sigstate: __pthread_sigstate_destroy (pthread); failed_setup: #ifdef ENABLE_TLS _dl_deallocate_tls (pthread->tcb, 1); failed_thread_tls_alloc: #endif /* ENABLE_TLS */ __pthread_thread_dealloc (pthread); __pthread_thread_halt (pthread); failed_thread_alloc: __pthread_stack_dealloc (pthread->stackaddr, pthread->stacksize); pthread->stack = 0; failed_stack_alloc: __pthread_dealloc (pthread); failed: return err; }