static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { MALI_IGNORE(module); if (private_handle_t::validate(handle) < 0) { AERR("Unlocking invalid buffer 0x%p, returning error", handle); return -EINVAL; } private_handle_t *hnd = (private_handle_t *)handle; int32_t current_value; int32_t new_value; int retry; if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner) { #if GRALLOC_ARM_UMP_MODULE ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size); #else AERR("Buffer 0x%p is UMP type but it is not supported", hnd); #endif } else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner) { #if GRALLOC_ARM_DMA_BUF_MODULE private_module_t *m = (private_module_t*)module; ion_sync_fd(m->ion_client, hnd->share_fd); #endif } return 0; }
/* Allocate a buffer which can be used directly by hardware, 4kb aligned */ static ump_handle ump_ref_drv_allocate_internal(unsigned long size, ump_alloc_constraints constraints, ump_cache_enabled cache) { ump_secure_id secure_id; unsigned long allocated_size = size; UMP_DEBUG_PRINT(4, ("Allocating UMP memory of size %lu", size)); secure_id = ump_arch_allocate(&allocated_size, constraints); if (secure_id != UMP_INVALID_SECURE_ID) { unsigned long cookie; void * mapping; mapping = ump_arch_map(secure_id, allocated_size, cache, &cookie); if (NULL != mapping) { /* * PS: By now we have actually increased the ref count in the device driver by 2, * one for the allocation iteself, and one for the mapping. */ ump_mem * mem; mem = _ump_osu_calloc(1, sizeof(*mem)); if (NULL != mem) { mem->secure_id = secure_id; mem->mapped_mem = mapping; mem->size = allocated_size; mem->cookie = cookie; mem->is_cached = 1; /* Default to ON, is disabled later if not */ _ump_osu_lock_auto_init(&mem->ref_lock, 0, 0, 0); UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n")); mem->ref_count = 1; /* * ump_arch_allocate() gave us a kernel space reference, and the same did ump_arch_map() * We release the one from ump_arch_allocate(), and rely solely on the one from the ump_arch_map() * That is, ump_arch_unmap() should now do the final release towards the UMP kernel space driver. */ ump_arch_reference_release(secure_id); /* This is called only to set the cache settings in this handle */ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0); UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem)); return (ump_handle)mem; } ump_arch_unmap(mapping, allocated_size, cookie); /* Unmap the memory */ ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */ } ump_arch_reference_release(secure_id); /* Release reference added when we allocated the UMP memory */ } UMP_DEBUG_PRINT(4, ("Allocation of UMP memory failed")); return UMP_INVALID_MEMORY_HANDLE; }
static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { if (private_handle_t::validate(handle) < 0) { ALOGE("Unlocking invalid buffer, returning error"); return -EINVAL; } private_handle_t* hnd = (private_handle_t*)handle; #ifdef SAMSUNG_EXYNOS_CACHE_UMP if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP) { #ifdef USE_PARTIAL_FLUSH private_handle_rect *psRect; psRect = find_rect((int)hnd->ump_id); ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN, (void *)(hnd->base + (psRect->stride * psRect->t)), psRect->stride * psRect->h ); return 0; #endif ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, NULL, 0); } #endif if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION) ion_msync(hnd->ion_client, hnd->fd, IMSYNC_DEV_TO_RW | IMSYNC_SYNC_FOR_DEV, hnd->size, hnd->offset); if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_IOCTL) { int ret; exynos_mem_flush_range mem; mem.start = hnd->paddr; mem.length = hnd->size; ret = ioctl(gMemfd, EXYNOS_MEM_PADDR_CACHE_FLUSH, &mem); if (ret < 0) { ALOGE("Error in exynos-mem : EXYNOS_MEM_PADDR_CACHE_FLUSH (%d)\n", ret); return false; } } return 0; }
void gralloc_backend_sync(private_handle_t* hnd) { switch (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_UMP | private_handle_t::PRIV_FLAGS_USES_ION)) { case private_handle_t::PRIV_FLAGS_USES_UMP: ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void*)hnd->base, hnd->size); break; case private_handle_t::PRIV_FLAGS_USES_ION: AERR( "Buffer %p is DMA_BUF type but it is not supported", hnd ); break; } }
UMP_API_EXPORT ump_handle ump_handle_create_from_secure_id(ump_secure_id secure_id) { unsigned long size; UMP_DEBUG_ASSERT(UMP_INVALID_SECURE_ID != secure_id, ("Secure ID is invalid")); size = ump_arch_size_get(secure_id); if (0 != size) { unsigned long cookie; /* * The UMP memory which the secure_id referes to could now be deleted and re-created * since we don't have any references to it yet. The mapping below will however fail if * we have supplied incorrect size, so we are safe. */ void *mapping = ump_arch_map(secure_id, size, UMP_CACHE_DISABLE, &cookie); if (NULL != mapping) { ump_mem *mem = _ump_osu_calloc(1, sizeof(*mem)); if (NULL != mem) { mem->secure_id = secure_id; mem->mapped_mem = mapping; mem->size = size; mem->cookie = cookie; mem->is_cached = UMP_CACHE_ENABLE; /* Is set to actually check in the ump_cpu_msync_now() function */ _ump_osu_lock_auto_init(&mem->ref_lock, _UMP_OSU_LOCKFLAG_DEFAULT, 0, 0); UMP_DEBUG_ASSERT(NULL != mem->ref_lock, ("Failed to initialize lock\n")); mem->ref_count = 1; /* This is called only to set the cache settings in this handle */ ump_cpu_msync_now((ump_handle)mem, UMP_MSYNC_READOUT_CACHE_ENABLED, NULL, 0); UMP_DEBUG_PRINT(4, ("UMP handle created for ID %u of size %lu, mapped into address 0x%08lx", mem->secure_id, mem->size, (unsigned long)mem->mapped_mem)); return (ump_handle)mem; } ump_arch_unmap(mapping, size, cookie); } } UMP_DEBUG_PRINT(2, ("UMP handle creation failed for ID %u", secure_id)); return UMP_INVALID_MEMORY_HANDLE; }
static int gralloc_unlock(gralloc_module_t const *module, buffer_handle_t handle) { MALI_IGNORE(module); if (private_handle_t::validate(handle) < 0) { AERR("Unlocking invalid buffer 0x%p, returning error", handle); return -EINVAL; } private_handle_t *hnd = (private_handle_t *)handle; int32_t current_value; int32_t new_value; int retry; if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner) { #if GRALLOC_ARM_UMP_MODULE ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void *)hnd->base, hnd->size); #else AERR("Buffer 0x%p is UMP type but it is not supported", hnd); #endif } else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION && hnd->writeOwner) { #if GRALLOC_ARM_DMA_BUF_MODULE hw_module_t *pmodule = NULL; private_module_t *m = NULL; if (hw_get_module(GRALLOC_HARDWARE_MODULE_ID, (const hw_module_t **)&pmodule) == 0) { m = reinterpret_cast<private_module_t *>(pmodule); //ion_sync_fd(m->ion_client, hnd->share_fd); } else { AERR("Couldnot get gralloc module for handle 0x%p\n", handle); } #endif } return 0; }
static int gralloc_unlock(gralloc_module_t const* module, buffer_handle_t handle) { if (private_handle_t::validate(handle) < 0) { LOGE("Unlocking invalid buffer, returning error"); return -EINVAL; } private_handle_t* hnd = (private_handle_t*)handle; int32_t current_value; int32_t new_value; int retry; if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP && hnd->writeOwner) { ump_cpu_msync_now((ump_handle)hnd->ump_mem_handle, UMP_MSYNC_CLEAN_AND_INVALIDATE, (void*)hnd->base, hnd->size); } return 0; }
int main(int argc, char *argv[]) { int result = EXIT_FAILURE; if (UMP_OK == ump_open()) { const int SZ_4KB = 4096; const int alloc_size = 16 * SZ_4KB; const int resize_1 = 1 * SZ_4KB; const int resize_2 = -9 * SZ_4KB; #define NUM_ALLOCS 128 ump_handle h[NUM_ALLOCS]; int i; for (i = 0; i < NUM_ALLOCS; i++) { h[i] = UMP_INVALID_MEMORY_HANDLE; } for (i = 0; i < NUM_ALLOCS; i++) { u8 * p; h[i] = ump_allocate(alloc_size, UMP_PROT_CPU_RD | UMP_PROT_CPU_WR | UMP_HINT_CPU_RD | UMP_HINT_CPU_WR | UMP_PROT_W_RD | UMP_PROT_W_WR); if (UMP_INVALID_MEMORY_HANDLE == h[i]) { printf("Failed to allocate\n"); break; } p = ump_map(h[i], 0, alloc_size); if (NULL != p) { STDLIB_MEMSET(p, 0xFF, alloc_size); ump_cpu_msync_now(h[i], UMP_MSYNC_CLEAN, p, alloc_size); ump_unmap(h[i], p, alloc_size); } else { printf("Failed to map\n"); break; } } if (i == NUM_ALLOCS) { int resize_failure = 0; printf("%d allocations succeeded\n", NUM_ALLOCS); for (i = 0; i < NUM_ALLOCS; i++) { ump_secure_id check_id; check_id = ump_secure_id_get(h[i]); if (UMP_INVALID_SECURE_ID == check_id) { printf("Handle %d has an invalid secure id!\n", i); } else { int j; for (j = i + 1; j < NUM_ALLOCS; j++) { ump_secure_id id; id = ump_secure_id_get(h[j]); if (id == check_id) { printf("Duplicate IDs found for handles %d and %d, both have %u\n", i, j, id); } } } } printf("doing resize check\n"); for (i = 0; i < NUM_ALLOCS; i++) { u64 new_size; ump_resize_result res; res = ump_resize(h[i], resize_1, &new_size); if (UMP_RESIZE_OK != res) { printf("resize failed with error code 0x%08X\n", res); resize_failure++; } else { if (new_size != (alloc_size + resize_1)) { printf("Hmm, the new size isn't what I expected: %llu != %d\n", (unsigned long long)new_size, alloc_size + resize_1); resize_failure++; } else { int j; u8 * map; map = (u8*)ump_map(h[i], 0, new_size); if (NULL == map) { resize_failure++; /* record this as a resize failure */ break; } for (j = 0; j < alloc_size; j++) { if (map[j] != 0xFF) { printf("Expected 0xFF, have 0x%02X\n", map[j]); resize_failure++; } } for (j = alloc_size; j < new_size; j++) { if (map[j] != 0) { printf("Expected 0x00, have 0x%02X\n", map[j]); resize_failure++; } } ump_unmap(h[i], map, new_size); } res = ump_resize(h[i], resize_2, &new_size); if (UMP_RESIZE_OK != res) { printf("resize failed with error code 0x%08x\n", res); resize_failure++; } else { if (new_size != (alloc_size + resize_1 + resize_2)) { printf("Hmm, the new size isn't what I expected: %llu != %d\n", (unsigned long long)new_size, alloc_size + resize_1 + resize_2); resize_failure++; } else { int j; u8 * map; map = (u8*)ump_map(h[i], 0, new_size); if (NULL == map) { resize_failure++; break; } ump_cpu_msync_now(h[i], UMP_MSYNC_CLEAN_AND_INVALIDATE, map, new_size); for (j = 0; j < new_size; j++) { if (map[j] != 0xFF) { printf("expected 0xFF, have 0x%02X\n", map[j]); resize_failure++; } } ump_unmap(h[i], map, new_size); } } } } if (resize_failure) { printf("%d resize failures found\n", resize_failure); } else { printf("resize check OK\n"); result = EXIT_SUCCESS; } } for (i = 0; i < NUM_ALLOCS; i++) { ump_release(h[i]); } ump_close(); } return result; }