ssize_t SharedBufferServer::RetireUpdate::operator()() { int32_t head = stack.head; if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) return BAD_VALUE; // Preventively lock the current buffer before updating queued. android_atomic_write(stack.headBuf, &stack.inUse); // Decrement the number of queued buffers int32_t queued; do { queued = stack.queued; if (queued == 0) { return NOT_ENOUGH_DATA; } } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued)); // lock the buffer before advancing head, which automatically unlocks // the buffer we preventively locked upon entering this function head = (head + 1) % numBuffers; const int8_t headBuf = stack.index[head]; stack.headBuf = headBuf; android_atomic_write(headBuf, &stack.inUse); // head is only modified here, so we don't need to use cmpxchg android_atomic_write(head, &stack.head); // now that head has moved, we can increment the number of available buffers android_atomic_inc(&stack.available); return head; }
static int get_pmem_file_info(dev_t* device, ino_t* serial_number) { static int write_values_initiated = 0; static int error_occured_when_retreiving_pmem_file_info = 0; static int error_code_produced_when_retreiving_pmem_file_info; static int cached_values_present = 0; static dev_t pmem_files_device; static ino_t pmem_files_serial_number; struct stat pmem_file_info; if (error_occured_when_retreiving_pmem_file_info) { errno = error_code_produced_when_retreiving_pmem_file_info; return 0; } else if (cached_values_present) { *device = pmem_files_device; *serial_number = pmem_files_serial_number; return 1; } if (stat("/dev/pmem_hwb", &pmem_file_info) < 0) { if (0 == android_atomic_cmpxchg(0, 1, &write_values_initiated)) { error_code_produced_when_retreiving_pmem_file_info = errno; android_atomic_write(1, &error_occured_when_retreiving_pmem_file_info); } return 0; } if (0 == android_atomic_cmpxchg(0, 1, &write_values_initiated)) { pmem_files_device = pmem_file_info.st_dev; pmem_files_serial_number = pmem_file_info.st_ino; android_atomic_write(1, &cached_values_present); } *device = pmem_file_info.st_dev; *serial_number = pmem_file_info.st_ino; return 1; }
/* * Return the pid of the process. */ static int gralloc_drm_get_pid(void) { if (unlikely(!gralloc_drm_pid)) android_atomic_write((int32_t) getpid(), &gralloc_drm_pid); return gralloc_drm_pid; }
ssize_t SharedBufferServer::UnlockUpdate::operator()() { if (stack.inUse != lockedBuffer) { LOGE("unlocking %d, but currently locked buffer is %d " "(identity=%d, token=%d)", lockedBuffer, stack.inUse, stack.identity, stack.token); return BAD_VALUE; } android_atomic_write(-1, &stack.inUse); return NO_ERROR; }
void BpMemoryHeap::assertReallyMapped() const { if (mHeapId == -1) { // remote call without mLock held, worse case scenario, we end up // calling transact() from multiple threads, but that's not a problem, // only mmap below must be in the critical section. Parcel data, reply; data.writeInterfaceToken(IMemoryHeap::getInterfaceDescriptor()); status_t err = remote()->transact(HEAP_ID, data, &reply); int parcel_fd = reply.readFileDescriptor(); ssize_t size = reply.readInt32(); uint32_t flags = reply.readInt32(); #ifndef BINDER_COMPAT uint32_t offset = reply.readInt32(); #else uint32_t offset = 0; #endif ALOGE_IF(err, "binder=%p transaction failed fd=%d, size=%ld, err=%d (%s)", asBinder().get(), parcel_fd, size, err, strerror(-err)); int fd = dup( parcel_fd ); ALOGE_IF(fd==-1, "cannot dup fd=%d, size=%ld, err=%d (%s)", parcel_fd, size, err, strerror(errno)); int access = PROT_READ; if (!(flags & READ_ONLY)) { access |= PROT_WRITE; } Mutex::Autolock _l(mLock); if (mHeapId == -1) { mRealHeap = true; mBase = mmap(0, size, access, MAP_SHARED, fd, offset); if (mBase == MAP_FAILED) { ALOGE("cannot map BpMemoryHeap (binder=%p), size=%ld, fd=%d (%s)", asBinder().get(), size, fd, strerror(errno)); close(fd); } else { mSize = size; mFlags = flags; #ifndef BINDER_COMPAT mOffset = offset; #endif android_atomic_write(fd, &mHeapId); } } } }
void BpMemoryHeap::assertMapped() const { if (mHeapId == -1) { sp<IBinder> binder(const_cast<BpMemoryHeap*>(this)->asBinder()); sp<BpMemoryHeap> heap(static_cast<BpMemoryHeap*>(find_heap(binder).get())); heap->assertReallyMapped(); if (heap->mBase != MAP_FAILED) { Mutex::Autolock _l(mLock); if (mHeapId == -1) { mBase = heap->mBase; mSize = heap->mSize; android_atomic_write( dup( heap->mHeapId ), &mHeapId ); } } else { // something went wrong free_heap(binder); } } }
static void setLogLevel(int level) { android_atomic_write(level, &gLogLevel); }
void CameraService::setCameraFree(int cameraId) { android_atomic_write(0, &mBusy[cameraId]); }
// The reason we need this busy bit is a new CameraService::connect() request // may come in while the previous Client's destructor has not been run or is // still running. If the last strong reference of the previous Client is gone // but the destructor has not been finished, we should not allow the new Client // to be created because we need to wait for the previous Client to tear down // the hardware first. void CameraService::setCameraBusy(int cameraId) { android_atomic_write(1, &mBusy[cameraId]); }
ssize_t SharedBufferServer::StatusUpdate::operator()() { android_atomic_write(status, &stack.status); return NO_ERROR; }
void CameraService::setCameraFree(int cameraId) { android_atomic_write(0, &mBusy[cameraId]); ALOGV("setCameraFree cameraId=%d", cameraId); }