int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { // we're done with a software buffer. nothing to do in this // implementation. typically this is used to flush the data cache. private_handle_t* hnd = (private_handle_t*)handle; ion_sync_fd(getIonFd(module), hnd->fd); if (hnd->fd1 >= 0) ion_sync_fd(getIonFd(module), hnd->fd1); if (hnd->fd2 >= 0) ion_sync_fd(getIonFd(module), hnd->fd2); if (private_handle_t::validate(handle) < 0) return -EINVAL; return 0; }
TEST_F(Device, DMAWriteCachedNeedsSync) { auto alloc_ptr = std::make_unique<char[]>(8192 + 1024); void *buf = (void *)(ALIGN((unsigned long)alloc_ptr.get(), 4096) + 1024); for (int i = 0; i < 4096; i++) ((char *)buf)[i] = i; for (unsigned int heapMask : m_allHeaps) { SCOPED_TRACE(::testing::Message() << "heap " << heapMask); int map_fd = -1; unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC; ASSERT_EQ(0, ion_alloc_fd(m_ionFd, 4096, 0, heapMask, flags, &map_fd)); ASSERT_GE(map_fd, 0); void *ptr; ptr = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); ASSERT_TRUE(ptr != NULL); dirtyCache(ptr, 4096); writeDMA(map_fd, buf, 4096); ion_sync_fd(m_ionFd, map_fd); for (int i = 0; i < 4096; i++) ASSERT_EQ((char)i, ((char *)ptr)[i]) << i; ASSERT_EQ(0, munmap(ptr, 4096)); ASSERT_EQ(0, close(map_fd)); } }
static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { MALI_IGNORE(module); if (private_handle_t::validate(handle) < 0) { AERR("Unlocking invalid buffer 0x%p, returning error", handle); return -EINVAL; } private_handle_t *hnd = (private_handle_t *)handle; int32_t current_value; int32_t new_value; int retry; if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner) { #if GRALLOC_ARM_UMP_MODULE ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size); #else AERR("Buffer 0x%p is UMP type but it is not supported", hnd); #endif } else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner) { #if GRALLOC_ARM_DMA_BUF_MODULE private_module_t *m = (private_module_t*)module; ion_sync_fd(m->ion_client, hnd->share_fd); #endif } return 0; }
unsigned int IonGetAddr(void *handle) { unsigned int phy_adr=0; struct ion_handle *handle_ion; private_handle_t* hnd = NULL; SUNXI_hwcdev_context_t *Globctx = &gSunxiHwcDevice; Globctx->ion_fd = ion_open(); if( Globctx->ion_fd != -1 ) { hnd = (private_handle_t*)handle; ion_import(Globctx->ion_fd,hnd->share_fd, &handle_ion); phy_adr= (unsigned int)ion_getphyadr(Globctx->ion_fd,(void *)(handle_ion)); ion_sync_fd(Globctx->ion_fd,hnd->share_fd); ion_close(Globctx->ion_fd); Globctx->ion_fd = -1; } return phy_adr; }
static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { if (private_handle_t::validate(handle) < 0) { AERR( "Unlocking invalid buffer 0x%x, returning error", (int)handle ); return -EINVAL; } private_handle_t* hnd = (private_handle_t*)handle; int32_t current_value; int32_t new_value; int retry; if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner) { #if GRALLOC_ARM_UMP_MODULE ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void*)hnd->base, hnd->size); #else AERR( "Buffer 0x%x is UMP type but it is not supported", (unsigned int)hnd ); #endif } else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner) { #if GRALLOC_ARM_DMA_BUF_MODULE hw_module_t * pmodule = NULL; private_module_t *m=NULL; if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0) { m = reinterpret_cast<private_module_t *>(pmodule); ion_sync_fd(m->ion_client, hnd->share_fd); } else { AERR("Couldnot get gralloc module for handle 0x%x\n", (unsigned int)handle); } #endif } return 0; }