static int get_pmem_file_info(dev_t* device, ino_t* serial_number)
{
    static int write_values_initiated = 0;
    static int error_occured_when_retreiving_pmem_file_info = 0;
    static int error_code_produced_when_retreiving_pmem_file_info;
    static int cached_values_present = 0;
    static dev_t pmem_files_device;
    static ino_t pmem_files_serial_number;

    struct stat pmem_file_info;

    if (error_occured_when_retreiving_pmem_file_info)
    {
        errno = error_code_produced_when_retreiving_pmem_file_info;

        return 0;
    }
    else if (cached_values_present)
    {
        *device = pmem_files_device;
        *serial_number = pmem_files_serial_number;

        return 1;
    }

    if (stat("/dev/pmem_hwb", &pmem_file_info) < 0)
    {
        if (0 == android_atomic_cmpxchg(0, 1, &write_values_initiated))
        {
            error_code_produced_when_retreiving_pmem_file_info = errno;
            android_atomic_write(1, &error_occured_when_retreiving_pmem_file_info);
        }

        return 0;
    }

    if (0 == android_atomic_cmpxchg(0, 1, &write_values_initiated))
    {
        pmem_files_device = pmem_file_info.st_dev;
        pmem_files_serial_number = pmem_file_info.st_ino;
        android_atomic_write(1, &cached_values_present);
    }

    *device = pmem_file_info.st_dev;
    *serial_number = pmem_file_info.st_ino;

    return 1;
}
int gralloc_unlock(gralloc_module_t const* module,
                   buffer_handle_t handle)
{
    if (private_handle_t::validate(handle) < 0)
        return -EINVAL;

    private_handle_t* hnd = (private_handle_t*)handle;
    int32_t current_value, new_value;

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & private_handle_t::LOCK_STATE_WRITE) {
            // locked for write
            if (hnd->writeOwner == gettid()) {
                hnd->writeOwner = 0;
                new_value &= ~private_handle_t::LOCK_STATE_WRITE;
            }
        }

        if ((new_value & private_handle_t::LOCK_STATE_READ_MASK) == 0) {
            LOGE("handle %p not locked", handle);
            return -EINVAL;
        }

        new_value--;

    } while (android_atomic_cmpxchg(current_value, new_value,
                                    (volatile int32_t*)&hnd->lockState));

    return 0;
}
void BBinder::attachObject(
    const void* objectID, void* object, void* cleanupCookie,
    object_cleanup_func func)
{
    Extras* e = mExtras;

    if (!e) {
        e = new Extras;
#ifdef __LP64__
        if (android_atomic_release_cas64(0, reinterpret_cast<int64_t>(e),
                reinterpret_cast<volatile int64_t*>(&mExtras)) != 0) {
#else
        if (android_atomic_cmpxchg(0, reinterpret_cast<int32_t>(e),
                reinterpret_cast<volatile int32_t*>(&mExtras)) != 0) {
#endif
            delete e;
            e = mExtras;
        }
        if (e == 0) return; // out of memory
    }

    AutoMutex _l(e->mLock);
    e->mObjects.attach(objectID, object, cleanupCookie, func);
}

void* BBinder::findObject(const void* objectID) const
{
    Extras* e = mExtras;
    if (!e) return NULL;

    AutoMutex _l(e->mLock);
    return e->mObjects.find(objectID);
}
int gralloc_unlock_pmem(gralloc_module_t const* module,
        struct hwmem_gralloc_buf_handle_t* hnd)
{
    int32_t current_value, new_value;

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & LOCK_STATE_WRITE) {
            // locked for write
            if (hnd->writeOwner == gettid()) {
                hnd->writeOwner = 0;
                new_value &= ~LOCK_STATE_WRITE;
            }
        }

        if ((new_value & LOCK_STATE_READ_MASK) == 0) {
            ALOGE("handle %p not locked", hnd);
            return -EINVAL;
        }

        new_value--;

    } while (android_atomic_cmpxchg(current_value, new_value,
            (volatile int32_t*)&hnd->lockState));

    return 0;
}
ssize_t SharedBufferServer::RetireUpdate::operator()() {
    int32_t head = stack.head;
    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
        return BAD_VALUE;

    // Decrement the number of queued buffers 
    int32_t queued;
    do {
        queued = stack.queued;
        if (queued == 0) {
            return NOT_ENOUGH_DATA;
        }
    } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
    
    // lock the buffer before advancing head, which automatically unlocks
    // the buffer we preventively locked upon entering this function

    head = (head + 1) % numBuffers;
    const int8_t headBuf = stack.index[head];
    stack.headBuf = headBuf;

    // head is only modified here, so we don't need to use cmpxchg
    android_atomic_write(head, &stack.head);

    // now that head has moved, we can increment the number of available buffers
    android_atomic_inc(&stack.available);
    return head;
}
UINT64 GKI_now_us()
{
    struct timespec ts_now;

#ifdef HAVE_ANDROID_OS
    static int s_fd = -1;
    int result;

    if (s_fd == -1) {
        int fd = open("/dev/alarm", O_RDONLY);
        if (android_atomic_cmpxchg(-1, fd, &s_fd)) {
            close(fd);
        }
    }

    result = ioctl(s_fd,
            ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME), &ts_now);
    if (result != 0) {
#endif
    clock_gettime(CLOCK_BOOTTIME, &ts_now);
#ifdef HAVE_ANDROID_OS
    }
#endif

    return ((UINT64)ts_now.tv_sec * USEC_PER_SEC) + ((UINT64)ts_now.tv_nsec / NSEC_PER_USEC);
}
Example #7
0
/*
 * native public static long elapsedRealtime();
 */
int64_t elapsedRealtime()
{
#ifdef HAVE_ANDROID_OS
    static int s_fd = -1;

    if (s_fd == -1) {
        int fd = open("/dev/alarm", O_RDONLY);
        if (android_atomic_cmpxchg(-1, fd, &s_fd)) {
            close(fd);
        }
    }

    struct timespec ts;
    int result = ioctl(s_fd,
            ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME), &ts);

    if (result == 0) {
        int64_t when = seconds_to_nanoseconds(ts.tv_sec) + ts.tv_nsec;
        return (int64_t) nanoseconds_to_milliseconds(when);
    } else {
        // XXX: there was an error, probably because the driver didn't
        // exist ... this should return
        // a real error, like an exception!
        int64_t when = systemTime(SYSTEM_TIME_MONOTONIC);
        return (int64_t) nanoseconds_to_milliseconds(when);
    }
#else
    int64_t when = systemTime(SYSTEM_TIME_MONOTONIC);
    return (int64_t) nanoseconds_to_milliseconds(when);
#endif
}
Example #8
0
int gralloc_unlock(gralloc_module_t const* module, 
        buffer_handle_t handle)
{
    if (private_handle_t::validate(handle) < 0)
        return -EINVAL;

    private_handle_t* hnd = (private_handle_t*)handle;
    int32_t current_value, new_value;


#ifndef BOARD_NO_CACHED_BUFFERS
    if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
        struct pmem_region region;
        int err;

        region.offset = hnd->offset;
        region.len = hnd->size;
        err = ioctl(hnd->fd, PMEM_CACHE_FLUSH, &region);
	if (err < 0)
	{
		struct pmem_addr pmem_addr;

		pmem_addr.vaddr = hnd->base;
		pmem_addr.offset = hnd->offset;
		pmem_addr.length = hnd->size;
		err = ioctl( hnd->fd, PMEM_CLEAN_CACHES,  &pmem_addr);
	}

        LOGE_IF(err < 0, "cannot flush handle %p (offs=%x len=%x)\n",
                hnd, hnd->offset, hnd->size);
        hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
    }
#endif

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & private_handle_t::LOCK_STATE_WRITE) {
            // locked for write
            if (hnd->writeOwner == gettid()) {
                hnd->writeOwner = 0;
                new_value &= ~private_handle_t::LOCK_STATE_WRITE;
            }
        }

        if ((new_value & private_handle_t::LOCK_STATE_READ_MASK) == 0) {
            LOGE("handle %p not locked", handle);
            return -EINVAL;
        }

        new_value--;

    } while (android_atomic_cmpxchg(current_value, new_value, 
            (volatile int32_t*)&hnd->lockState));

    return 0;
}
Example #9
0
int gralloc_unlock(gralloc_module_t const* module, 
        buffer_handle_t handle)
{
    if (private_handle_t::validate(handle) < 0)
        return -EINVAL;

    private_handle_t* hnd = (private_handle_t*)handle;
    int32_t current_value, new_value;

    if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
        int err;
        if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
                          private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)) {
            struct pmem_addr pmem_addr;
            pmem_addr.vaddr = hnd->base;
            pmem_addr.offset = hnd->offset;
            pmem_addr.length = hnd->size;
            err = ioctl( hnd->fd, PMEM_CLEAN_CACHES,  &pmem_addr);
        } else if ((hnd->flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)) {
            unsigned long addr = hnd->base + hnd->offset;
            err = ioctl(hnd->fd, ASHMEM_CACHE_FLUSH_RANGE, NULL);
        }         

        ALOGE_IF(err < 0, "cannot flush handle %p (offs=%x len=%x, flags = 0x%x) err=%s\n",
                hnd, hnd->offset, hnd->size, hnd->flags, strerror(errno));
        hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
    }

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & private_handle_t::LOCK_STATE_WRITE) {
            // locked for write
            if (hnd->writeOwner == gettid()) {
                hnd->writeOwner = 0;
                new_value &= ~private_handle_t::LOCK_STATE_WRITE;
            }
        }

        if ((new_value & private_handle_t::LOCK_STATE_READ_MASK) == 0) {
            ALOGE("handle %p not locked", handle);
            return -EINVAL;
        }

        new_value--;

    } while (android_atomic_cmpxchg(current_value, new_value, 
            (volatile int32_t*)&hnd->lockState));

    return 0;
}
Example #10
0
/*
 * native public static long elapsedRealtimeNano();
 */
int64_t elapsedRealtimeNano()
{
#ifdef HAVE_ANDROID_OS
    struct timespec ts;
    int result;
    int64_t timestamp;
#if DEBUG_TIMESTAMP
    static volatile int64_t prevTimestamp;
    static volatile int prevMethod;
#endif

    static int s_fd = -1;

    if (s_fd == -1) {
        int fd = open("/dev/alarm", O_RDONLY);
        if (android_atomic_cmpxchg(-1, fd, &s_fd)) {
            close(fd);
        }
    }

    result = ioctl(s_fd,
            ANDROID_ALARM_GET_TIME(ANDROID_ALARM_ELAPSED_REALTIME), &ts);

    if (result == 0) {
        timestamp = seconds_to_nanoseconds(ts.tv_sec) + ts.tv_nsec;
        checkTimeStamps(timestamp, &prevTimestamp, &prevMethod, METHOD_IOCTL);
        return timestamp;
    }

    // /dev/alarm doesn't exist, fallback to CLOCK_BOOTTIME
    result = clock_gettime(CLOCK_BOOTTIME, &ts);
    if (result == 0) {
        timestamp = seconds_to_nanoseconds(ts.tv_sec) + ts.tv_nsec;
        checkTimeStamps(timestamp, &prevTimestamp, &prevMethod,
                        METHOD_CLOCK_GETTIME);
        return timestamp;
    }

    // XXX: there was an error, probably because the driver didn't
    // exist ... this should return
    // a real error, like an exception!
    timestamp = systemTime(SYSTEM_TIME_MONOTONIC);
    checkTimeStamps(timestamp, &prevTimestamp, &prevMethod,
                    METHOD_SYSTEMTIME);
    return timestamp;
#else
    return systemTime(SYSTEM_TIME_MONOTONIC);
#endif
}
void BBinder::attachObject(
    const void* objectID, void* object, void* cleanupCookie,
    object_cleanup_func func)
{
    Extras* e = mExtras;

    if (!e) {
        e = new Extras;
        if (android_atomic_cmpxchg(0, reinterpret_cast<int32_t>(e),
                reinterpret_cast<volatile int32_t*>(&mExtras)) != 0) {
            delete e;
            e = mExtras;
        }
        if (e == 0) return; // out of memory
    }

    AutoMutex _l(e->mLock);
    e->mObjects.attach(objectID, object, cleanupCookie, func);
}
Example #12
0
int gralloc_unlock(gralloc_module_t const* module, 
        buffer_handle_t handle)
{
    if (private_handle_t::validate(handle) < 0)
        return -EINVAL;

    private_handle_t* hnd = (private_handle_t*)handle;
    int32_t current_value, new_value;

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & private_handle_t::LOCK_STATE_WRITE) {
            // locked for write
            if (hnd->writeOwner == gettid()) {
                hnd->writeOwner = 0;
                new_value &= ~private_handle_t::LOCK_STATE_WRITE;
            }
        }

        if ((new_value & private_handle_t::LOCK_STATE_READ_MASK) == 0) {
            LOGE("handle %p not locked", handle);
            return -EINVAL;
        }

        new_value--;

    } while (android_atomic_cmpxchg(current_value, new_value, 
            (volatile int32_t*)&hnd->lockState));

    // If this was locked for a software write, send an ioctl to flush the cache
    if ( hnd->swWrite == 1) {
        struct pmem_addr pmem_addr;
        pmem_addr.vaddr = hnd->base;
        pmem_addr.offset = hnd->offset;
        pmem_addr.length = hnd->size;
        ioctl( hnd->fd, PMEM_CLEAN_CACHES,  &pmem_addr);
    }

    return 0;
}
Example #13
0
bool RefBase::weakref_type::attemptIncWeak(const void* id)
{
    weakref_impl* const impl = static_cast<weakref_impl*>(this);

    int32_t curCount = impl->mWeak;
    ALOG_ASSERT(curCount >= 0, "attemptIncWeak called on %p after underflow",
               this);
    while (curCount > 0) {
        if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mWeak) == 0) {
            break;
        }
        curCount = impl->mWeak;
    }

    if (curCount > 0) {
        impl->addWeakRef(id);
    }

    return curCount > 0;
}
Example #14
0
int gralloc_lock(gralloc_module_t const* module,
		buffer_handle_t handle, int usage,
		int l, int t, int w, int h,
		void** vaddr)
{
	DEBUG_ENTER();
	if (private_handle_t::validate(handle) < 0)
		return -EINVAL;

	int err = 0;
	private_handle_t* hnd = (private_handle_t*)handle;
	int32_t current_value, new_value;
	int retry;

	do {
		current_value = hnd->lockState;
		new_value = current_value;

		if (current_value & private_handle_t::LOCK_STATE_WRITE) {
			// already locked for write
			LOGE("handle %p already locked for write", handle);
			return -EBUSY;
		} else if (current_value & private_handle_t::LOCK_STATE_READ_MASK) {
			// already locked for read
			if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
				LOGE("handle %p already locked for read", handle);
				return -EBUSY;
			} else {
				// this is not an error
				//LOGD("%p already locked for read... count = %d",
				//        handle, (current_value & ~(1<<31)));
			}
		}

		// not currently locked
		if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
			// locking for write
			new_value |= private_handle_t::LOCK_STATE_WRITE;
		}
		new_value++;

		retry = android_atomic_cmpxchg(current_value, new_value,
					(volatile int32_t*)&hnd->lockState);
	} while (retry);

	if (new_value & private_handle_t::LOCK_STATE_WRITE) {
		// locking for write, store the tid
		hnd->writeOwner = gettid();
	}

	// if requesting sw write for non-framebuffer handles, flag for
	// flushing at unlock
	if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) &&
			!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
		hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
	}

	if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
		if (!(current_value & private_handle_t::LOCK_STATE_MAPPED)) {
			// we need to map for real
			pthread_mutex_t* const lock = &sMapLock;
			pthread_mutex_lock(lock);
			if (!(hnd->lockState & private_handle_t::LOCK_STATE_MAPPED)) {
				err = gralloc_map(module, handle, vaddr);
				if (err == 0) {
					android_atomic_or(private_handle_t::LOCK_STATE_MAPPED,
							(volatile int32_t*)&(hnd->lockState));
				}
			}
			pthread_mutex_unlock(lock);
		}
		*vaddr = (void*)hnd->base;
	}

	DEBUG_LEAVE();
	return err;
}
Example #15
0
bool RefBase::weakref_type::attemptIncStrong(const void* id)
{
    incWeak(id);
    
    weakref_impl* const impl = static_cast<weakref_impl*>(this);
    int32_t curCount = impl->mStrong;

    ALOG_ASSERT(curCount >= 0,
            "attemptIncStrong called on %p after underflow", this);

    while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
        // we're in the easy/common case of promoting a weak-reference
        // from an existing strong reference.
        if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mStrong) == 0) {
            break;
        }
        // the strong count has changed on us, we need to re-assert our
        // situation.
        curCount = impl->mStrong;
    }
    
    if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
        // we're now in the harder case of either:
        // - there never was a strong reference on us
        // - or, all strong references have been released
        if ((impl->mFlags&OBJECT_LIFETIME_WEAK) == OBJECT_LIFETIME_STRONG) {
            // this object has a "normal" life-time, i.e.: it gets destroyed
            // when the last strong reference goes away
            if (curCount <= 0) {
                // the last strong-reference got released, the object cannot
                // be revived.
                decWeak(id);
                return false;
            }

            // here, curCount == INITIAL_STRONG_VALUE, which means
            // there never was a strong-reference, so we can try to
            // promote this object; we need to do that atomically.
            while (curCount > 0) {
                if (android_atomic_cmpxchg(curCount, curCount + 1,
                        &impl->mStrong) == 0) {
                    break;
                }
                // the strong count has changed on us, we need to re-assert our
                // situation (e.g.: another thread has inc/decStrong'ed us)
                curCount = impl->mStrong;
            }

            if (curCount <= 0) {
                // promote() failed, some other thread destroyed us in the
                // meantime (i.e.: strong count reached zero).
                decWeak(id);
                return false;
            }
        } else {
            // this object has an "extended" life-time, i.e.: it can be
            // revived from a weak-reference only.
            // Ask the object's implementation if it agrees to be revived
            if (!impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id)) {
                // it didn't so give-up.
                decWeak(id);
                return false;
            }
            // grab a strong-reference, which is always safe due to the
            // extended life-time.
            curCount = android_atomic_inc(&impl->mStrong);
        }

        // If the strong reference count has already been incremented by
        // someone else, the implementor of onIncStrongAttempted() is holding
        // an unneeded reference.  So call onLastStrongRef() here to remove it.
        // (No, this is not pretty.)  Note that we MUST NOT do this if we
        // are in fact acquiring the first reference.
        if (curCount > 0 && curCount < INITIAL_STRONG_VALUE) {
            impl->mBase->onLastStrongRef(id);
        }
    }
    
    impl->addStrongRef(id);

#if PRINT_REFS
    ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
#endif

    // now we need to fix-up the count if it was INITIAL_STRONG_VALUE
    // this must be done safely, i.e.: handle the case where several threads
    // were here in attemptIncStrong().
    curCount = impl->mStrong;
    while (curCount >= INITIAL_STRONG_VALUE) {
        ALOG_ASSERT(curCount > INITIAL_STRONG_VALUE,
                "attemptIncStrong in %p underflowed to INITIAL_STRONG_VALUE",
                this);
        if (android_atomic_cmpxchg(curCount, curCount-INITIAL_STRONG_VALUE,
                &impl->mStrong) == 0) {
            break;
        }
        // the strong-count changed on us, we need to re-assert the situation,
        // for e.g.: it's possible the fix-up happened in another thread.
        curCount = impl->mStrong;
    }

    return true;
}
Example #16
0
bool RefBase::weakref_type::attemptIncStrong(const void* id)
{
    incWeak(id);
    
    weakref_impl* const impl = static_cast<weakref_impl*>(this);
    
    int32_t curCount = impl->mStrong;
    ALOG_ASSERT(curCount >= 0, "attemptIncStrong called on %p after underflow",
               this);
    while (curCount > 0 && curCount != INITIAL_STRONG_VALUE) {
        if (android_atomic_cmpxchg(curCount, curCount+1, &impl->mStrong) == 0) {
            break;
        }
        curCount = impl->mStrong;
    }
    
    if (curCount <= 0 || curCount == INITIAL_STRONG_VALUE) {
        bool allow;
        if (curCount == INITIAL_STRONG_VALUE) {
            // Attempting to acquire first strong reference...  this is allowed
            // if the object does NOT have a longer lifetime (meaning the
            // implementation doesn't need to see this), or if the implementation
            // allows it to happen.
            allow = (impl->mFlags&OBJECT_LIFETIME_WEAK) != OBJECT_LIFETIME_WEAK
                  || impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id);
        } else {
            // Attempting to revive the object...  this is allowed
            // if the object DOES have a longer lifetime (so we can safely
            // call the object with only a weak ref) and the implementation
            // allows it to happen.
            allow = (impl->mFlags&OBJECT_LIFETIME_WEAK) == OBJECT_LIFETIME_WEAK
                  && impl->mBase->onIncStrongAttempted(FIRST_INC_STRONG, id);
        }
        if (!allow) {
            decWeak(id);
            return false;
        }
        curCount = android_atomic_inc(&impl->mStrong);

        // If the strong reference count has already been incremented by
        // someone else, the implementor of onIncStrongAttempted() is holding
        // an unneeded reference.  So call onLastStrongRef() here to remove it.
        // (No, this is not pretty.)  Note that we MUST NOT do this if we
        // are in fact acquiring the first reference.
        if (curCount > 0 && curCount < INITIAL_STRONG_VALUE) {
            impl->mBase->onLastStrongRef(id);
        }
    }
    
    impl->addStrongRef(id);

#if PRINT_REFS
    ALOGD("attemptIncStrong of %p from %p: cnt=%d\n", this, id, curCount);
#endif

    if (curCount == INITIAL_STRONG_VALUE) {
        android_atomic_add(-INITIAL_STRONG_VALUE, &impl->mStrong);
        impl->mBase->onFirstRef();
    }
    
    return true;
}
int gralloc_lock_pmem(gralloc_module_t const* module,
        struct hwmem_gralloc_buf_handle_t* hnd, int usage,
        int l, int t, int w, int h,
        void** vaddr)
{
    int err = 0;
    int32_t current_value, new_value;
    int retry;

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & LOCK_STATE_WRITE) {
            // already locked for write
            ALOGE("handle %p already locked for write", hnd);
            return -EBUSY;
        } else if (current_value & LOCK_STATE_READ_MASK) {
            // already locked for read
            if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
                ALOGE("handle %p already locked for read", hnd);
                return -EBUSY;
            } else {
                // this is not an error
                //ALOGD("%p already locked for read... count = %d",
                //        hnd, (current_value & ~(1<<31)));
            }
        }

        // not currently locked
        if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
            // locking for write
            new_value |= LOCK_STATE_WRITE;
        }
        new_value++;

        retry = android_atomic_cmpxchg(current_value, new_value,
                (volatile int32_t*)&hnd->lockState);
    } while (retry);

    if (new_value & LOCK_STATE_WRITE) {
        // locking for write, store the tid
        hnd->writeOwner = gettid();
    }

    if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
        if (!(current_value & LOCK_STATE_MAPPED)) {
            // we need to map for real
            pthread_mutex_t* const lock = &sMapLock;
            pthread_mutex_lock(lock);
            if (!(hnd->lockState & LOCK_STATE_MAPPED)) {
                err = gralloc_map(module, hnd, vaddr);
                if (err == 0) {
                    android_atomic_or(LOCK_STATE_MAPPED,
                            (volatile int32_t*)&(hnd->lockState));
                }
            }
            pthread_mutex_unlock(lock);
        }
        *vaddr = (void*)hnd->base_addr;
    }

    return err;
}
Example #18
0
int OSAtomicCompareAndSwapInt(int oldValue,int newValue,volatile int* target) {
    return !android_atomic_cmpxchg(oldValue,newValue,target);
}
int gralloc_lock(gralloc_module_t const* module,
        buffer_handle_t handle, int usage,
        int l, int t, int w, int h,
        void** vaddr)
{
    gralloc_module_t const* gralloc_viv = gralloc_get(module, handle);
    if(gralloc_viv){
        return gralloc_viv->lock(gralloc_viv, handle, usage, l, t, w, h, vaddr);
    }

    if (private_handle_t::validate(handle) < 0)
        return -EINVAL;

    int err = 0;
    private_handle_t* hnd = (private_handle_t*)handle;
    int32_t current_value, new_value;
    int retry;

    do {
        current_value = hnd->lockState;
        new_value = current_value;

        if (current_value & private_handle_t::LOCK_STATE_WRITE) {
            // already locked for write 
            LOGE("handle %p already locked for write", handle);
            return -EBUSY;
        } else if (current_value & private_handle_t::LOCK_STATE_READ_MASK) {
            // already locked for read
            if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
                LOGE("handle %p already locked for read", handle);
                return -EBUSY;
            } else {
                // this is not an error
                //LOGD("%p already locked for read... count = %d", 
                //        handle, (current_value & ~(1<<31)));
            }
        }

        // not currently locked
        if (usage & (GRALLOC_USAGE_SW_WRITE_MASK | GRALLOC_USAGE_HW_RENDER)) {
            // locking for write
            new_value |= private_handle_t::LOCK_STATE_WRITE;
        }
        new_value++;

        retry = android_atomic_cmpxchg(current_value, new_value, 
                (volatile int32_t*)&hnd->lockState);
    } while (retry);

    if (new_value & private_handle_t::LOCK_STATE_WRITE) {
        // locking for write, store the tid
        hnd->writeOwner = getpid();
    }

    if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
        if (!(current_value & private_handle_t::LOCK_STATE_MAPPED)) {
            // we need to map for real
            pthread_mutex_t* const lock = &sMapLock;
            pthread_mutex_lock(lock);
            if (!(hnd->lockState & private_handle_t::LOCK_STATE_MAPPED)) {
                err = gralloc_map(module, handle, vaddr);
                if (err == 0) {
                    android_atomic_or(private_handle_t::LOCK_STATE_MAPPED,
                            (volatile int32_t*)&(hnd->lockState));
                }
            }
            pthread_mutex_unlock(lock);
        }
        *vaddr = (void*)hnd->base;
    }

    return err;
}