int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags, int *handle_fd) { ion_user_handle_t handle; int ret; ret = ion_alloc(fd, len, align, heap_mask, flags, &handle); if (ret < 0) return ret; ret = ion_share(fd, handle, handle_fd); ion_free(fd, handle); return ret; }
bool_e omaprpc_register(omaprpc_t *rpc, int memdevice, void *ptr, void **reserved) { bool_e registered = false_e; if (rpc && memdevice && ptr && reserved) { #if defined(OMAPRPC_USE_ION) struct ion_fd_data *data = calloc(1, sizeof(struct ion_fd_data)); if (data) { struct ion_handle *ih = *((struct ion_handle **)reserved); int ret = ion_share(memdevice, ih, &data->fd); if (ret < 0) { OMAPRPC_PRINT(OMAPRPC_ZONE_ERROR, "Failed to share ION memory! (err=%d)\n", ret); } else { ret = ioctl(rpc->device, OMAPRPC_IOC_IONREGISTER, data); if (ret < 0) { OMAPRPC_PRINT(OMAPRPC_ZONE_ERROR, "Failed to register ION buffer with OMAPRPC:%u! (err=%d)\n", rpc->device, ret); close(data->fd); } else { node_t *node = node_create((value_t)data); if (node) { list_append(&rpc->fd_list, node); } else { OMAPRPC_PRINT(OMAPRPC_ZONE_ERROR, "Failed to remember shared fd. LEAK!\n"); } OMAPRPC_PRINT(OMAPRPC_ZONE_INFO, "Registered %p with OMAPRPC:%u!\n", data->handle, rpc->device); if (data->handle != ih) { OMAPRPC_PRINT(OMAPRPC_ZONE_ERROR, "ERROR: Handle from registration has changed! Was %p now %p\n", ih, data->handle); } *((struct ion_handle **)reserved) = data->handle; registered = true_e; } } } else { OMAPRPC_PRINT(OMAPRPC_ZONE_ERROR, "ERROR: Failed to create association structure!\n"); } #endif } return registered; }
int omap_ion_share_fd_to_buffers(int fd, struct ion_buffer **buffers, int *num_handles) { struct ion_handle **handles; struct ion_client *client; int i = 0, ret = 0; handles = kzalloc(*num_handles * sizeof(struct ion_handle *), GFP_KERNEL); if (!handles) return -ENOMEM; #ifdef CONFIG_PVR_SGX if (*num_handles == 2) { PVRSRVExportFDToIONHandles(fd, &client, handles); } else if (*num_handles == 1) { handles[0] = PVRSRVExportFDToIONHandle(fd, &client); } else { ret = -EINVAL; goto exit; } #else if (export_fd_to_ion_handles) { export_fd_to_ion_handles(fd, &client, handles, num_handles); } else { pr_err("%s: export_fd_to_ion_handles" "not initiazied", __func__); ret = -EINVAL; goto exit; } #endif for (i = 0; i < *num_handles; i++) { if (handles[i]) buffers[i] = ion_share(client, handles[i]); } exit: kfree(handles); return ret; }
TEST_F(FormerlyValidHandle, share) { int share_fd; ASSERT_EQ(-EINVAL, ion_share(m_ionFd, m_handle, &share_fd)); }
int ion_m4u_misc_using() { int i; int ion_fd; int ion_test_fd; ion_user_handle_t handle; int share_fd; volatile char* pBuf; pid_t pid; unsigned int bufsize = 1*1024*1024; ion_fd = ion_open(); if (ion_fd < 0) { printf("Cannot open ion device.\n"); return 0; } if (ion_alloc_mm(ion_fd, bufsize, 4, 0, &handle)) { printf("IOCTL[ION_IOC_ALLOC] failed!\n"); return 0; } if (ion_share(ion_fd, handle, &share_fd)) { printf("IOCTL[ION_IOC_SHARE] failed!\n"); return 0; } pBuf = (char*)ion_mmap(ion_fd, NULL, bufsize, PROT_READ|PROT_WRITE, MAP_SHARED, share_fd, 0); printf("ion_map: pBuf = 0x%lx\n", (unsigned long)pBuf); if (!pBuf) { printf("Cannot map ion buffer.\n"); return 0; } MTKM4UDrv CM4u; unsigned int BufMVA; int ret; ret = CM4u.m4u_alloc_mva(0, (unsigned long)pBuf, bufsize, M4U_PROT_READ|M4U_PROT_WRITE, M4U_FLAGS_SEQ_ACCESS, &BufMVA); if(ret) { printf("allocate mva fail. ret=0x%x\n", ret); return ret; } printf("mva=0x%x\n", BufMVA); ret = CM4u.m4u_cache_sync(0, M4U_CACHE_FLUSH_BY_RANGE, (unsigned long)pBuf,bufsize, BufMVA); if(ret) { printf("cache flush fail. ret=%d,va=0x%lx,size=0x%x\n", ret,(unsigned long)pBuf,bufsize); return ret; } ret = CM4u.m4u_dealloc_mva(0, (unsigned long)pBuf,bufsize, BufMVA); if(ret) { printf("m4u_dealloc_mva fail. ret=%d, mva=0x%x\n", ret, BufMVA); } return 0; }
void ion_share_test() { ion_user_handle_t handle; int sd[2]; int num_fd = 1; struct iovec count_vec = { .iov_base = &num_fd, .iov_len = sizeof num_fd, }; char buf[CMSG_SPACE(sizeof(int))]; socketpair(AF_UNIX, SOCK_STREAM, 0, sd); if (fork()) { struct msghdr msg = { .msg_control = buf, .msg_controllen = sizeof buf, .msg_iov = &count_vec, .msg_iovlen = 1, }; struct cmsghdr *cmsg; int fd, share_fd, ret; char *ptr; /* parent */ if(_ion_alloc_test(&fd, &handle)) return; ret = ion_share(fd, handle, &share_fd); if (ret) printf("share failed %s\n", strerror(errno)); ptr = mmap(NULL, len, prot, map_flags, share_fd, 0); if (ptr == MAP_FAILED) { return; } strcpy(ptr, "master"); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(int)); *(int *)CMSG_DATA(cmsg) = share_fd; /* send the fd */ printf("master? [%10s] should be [master]\n", ptr); printf("master sending msg 1\n"); sendmsg(sd[0], &msg, 0); if (recvmsg(sd[0], &msg, 0) < 0) perror("master recv msg 2"); printf("master? [%10s] should be [child]\n", ptr); /* send ping */ sendmsg(sd[0], &msg, 0); printf("master->master? [%10s]\n", ptr); if (recvmsg(sd[0], &msg, 0) < 0) perror("master recv 1"); } else { struct msghdr msg; struct cmsghdr *cmsg; char* ptr; int fd, recv_fd; char* child_buf[100]; /* child */ struct iovec count_vec = { .iov_base = child_buf, .iov_len = sizeof child_buf, }; struct msghdr child_msg = { .msg_control = buf, .msg_controllen = sizeof buf, .msg_iov = &count_vec, .msg_iovlen = 1, }; if (recvmsg(sd[1], &child_msg, 0) < 0) perror("child recv msg 1"); cmsg = CMSG_FIRSTHDR(&child_msg); if (cmsg == NULL) { printf("no cmsg rcvd in child"); return; } recv_fd = *(int*)CMSG_DATA(cmsg); if (recv_fd < 0) { printf("could not get recv_fd from socket"); return; } printf("child %d\n", recv_fd); fd = ion_open(); ptr = mmap(NULL, len, prot, map_flags, recv_fd, 0); if (ptr == MAP_FAILED) { return; } printf("child? [%10s] should be [master]\n", ptr); strcpy(ptr, "child"); printf("child sending msg 2\n"); sendmsg(sd[1], &child_msg, 0); } } int main(int argc, char* argv[]) { int c; enum tests { ALLOC_TEST = 0, MAP_TEST, SHARE_TEST, }; while (1) { static struct option opts[] = { {"alloc", no_argument, 0, 'a'}, {"alloc_flags", required_argument, 0, 'f'}, {"map", no_argument, 0, 'm'}, {"share", no_argument, 0, 's'}, {"len", required_argument, 0, 'l'}, {"align", required_argument, 0, 'g'}, {"map_flags", required_argument, 0, 'z'}, {"prot", required_argument, 0, 'p'}, {"width", required_argument, 0, 'w'}, {"height", required_argument, 0, 'h'}, }; int i = 0; c = getopt_long(argc, argv, "af:h:l:mr:stw:", opts, &i); if (c == -1) break; switch (c) { case 'l': len = atol(optarg); break; case 'g': align = atol(optarg); break; case 'z': map_flags = 0; map_flags |= strstr(optarg, "PROT_EXEC") ? PROT_EXEC : 0; map_flags |= strstr(optarg, "PROT_READ") ? PROT_READ: 0; map_flags |= strstr(optarg, "PROT_WRITE") ? PROT_WRITE: 0; map_flags |= strstr(optarg, "PROT_NONE") ? PROT_NONE: 0; break; case 'p': prot = 0; prot |= strstr(optarg, "MAP_PRIVATE") ? MAP_PRIVATE : 0; prot |= strstr(optarg, "MAP_SHARED") ? MAP_PRIVATE : 0; break; case 'f': alloc_flags = atol(optarg); break; case 'a': test = ALLOC_TEST; break; case 'm': test = MAP_TEST; break; case 's': test = SHARE_TEST; break; case 'w': width = atol(optarg); break; case 'h': height = atol(optarg); break; } } printf("test %d, len %u, width %u, height %u align %u, " "map_flags %d, prot %d, alloc_flags %d\n", test, len, width, height, align, map_flags, prot, alloc_flags); switch (test) { case ALLOC_TEST: ion_alloc_test(); break; case MAP_TEST: ion_map_test(); break; case SHARE_TEST: ion_share_test(); break; default: printf("must specify a test (alloc, map, share)\n"); } return 0; }
static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle) { #if GRALLOC_ARM_DMA_BUF_MODULE { private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); ion_user_handle_t ion_hnd; unsigned char *cpu_ptr; int shared_fd; int ret; unsigned int ion_flags = 0; if( (usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN ) ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC; if (usage & GRALLOC_USAGE_PRIVATE_1) { ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_CARVEOUT_MASK, ion_flags, &ion_hnd); } else { ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, ion_flags, &ion_hnd); } if (ret != 0) { AERR("Failed to ion_alloc from ion_client:%d", m->ion_client); return -1; } ret = ion_share(m->ion_client, ion_hnd, &shared_fd); if (ret != 0) { AERR("ion_share( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0); if (MAP_FAILED == cpu_ptr) { AERR("ion_map( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } close(shared_fd); return -1; } private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED); if (NULL != hnd) { hnd->share_fd = shared_fd; hnd->ion_hnd = ion_hnd; *pHandle = hnd; return 0; } else { AERR("Gralloc out of mem for ion_client:%d", m->ion_client); } close(shared_fd); ret = munmap(cpu_ptr, size); if (0 != ret) { AERR("munmap failed for base:%p size: %d", cpu_ptr, size); } ret = ion_free(m->ion_client, ion_hnd); if (0 != ret) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } #endif #if GRALLOC_ARM_UMP_MODULE { ump_handle ump_mem_handle; void *cpu_ptr; ump_secure_id ump_id; ump_alloc_constraints constraints; size = round_up_to_page_size(size); if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN) { constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE; } else { constraints = UMP_REF_DRV_CONSTRAINT_NONE; } #ifdef GRALLOC_SIMULATE_FAILURES /* if the failure condition matches, fail this iteration */ if (__ump_alloc_should_fail()) { ump_mem_handle = UMP_INVALID_MEMORY_HANDLE; } else #endif { ump_mem_handle = ump_ref_drv_allocate(size, constraints); if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle) { cpu_ptr = ump_mapped_pointer_get(ump_mem_handle); if (NULL != cpu_ptr) { ump_id = ump_secure_id_get(ump_mem_handle); if (UMP_INVALID_SECURE_ID != ump_id) { private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle); if (NULL != hnd) { *pHandle = hnd; return 0; } else { AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id); } } else { AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle); } ump_mapped_pointer_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle); } ump_reference_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints); } } return -1; } #endif }
/* * Invokes a client command to the Secure World */ int tf_invoke_client_command( struct tf_connection *connection, union tf_command *command, union tf_answer *answer) { int error = 0; struct tf_shmem_desc *shmem_desc[4] = {NULL}; int i; #ifdef CONFIG_TF_ION struct ion_handle *new_handle = NULL; #endif /* CONFIG_TF_ION */ dprintk(KERN_INFO "tf_invoke_client_command(%p)\n", connection); command->release_shared_memory.message_size = (sizeof(struct tf_command_invoke_client_command) - sizeof(struct tf_command_header)) / 4; #ifdef CONFIG_TF_ZEBRA error = tf_crypto_try_shortcuted_update(connection, (struct tf_command_invoke_client_command *) command, (struct tf_answer_invoke_client_command *) answer); if (error == 0) return error; #endif /* Map the tmprefs */ for (i = 0; i < 4; i++) { int param_type = TF_GET_PARAM_TYPE( command->invoke_client_command.param_types, i); if ((param_type & (TF_PARAM_TYPE_MEMREF_FLAG | TF_PARAM_TYPE_REGISTERED_MEMREF_FLAG)) == TF_PARAM_TYPE_MEMREF_FLAG) { /* A temporary memref: map it */ error = tf_map_temp_shmem(connection, &command->invoke_client_command. params[i].temp_memref, param_type, &shmem_desc[i]); if (error != 0) { dprintk(KERN_ERR "tf_invoke_client_command: " "unable to map temporary memory " "block\n (%08X)", error); goto error; } } #ifdef CONFIG_TF_ION else if (param_type == TF_PARAM_TYPE_MEMREF_ION_HANDLE) { struct tf_command_invoke_client_command *invoke; ion_phys_addr_t ion_addr; size_t ion_len; struct ion_buffer *buffer; if (connection->ion_client == NULL) { connection->ion_client = ion_client_create( zebra_ion_device, (1 << ION_HEAP_TYPE_CARVEOUT), "tf"); } if (connection->ion_client == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to create ion client\n", __func__, connection); error = -EFAULT; goto error; } invoke = &command->invoke_client_command; dprintk(KERN_INFO "ion_handle %x", invoke->params[i].value.a); buffer = ion_share(connection->ion_client, (struct ion_handle *)invoke->params[i].value.a); if (buffer == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to share ion handle\n", __func__, connection); error = -EFAULT; goto error; } dprintk(KERN_INFO "ion_buffer %p", buffer); new_handle = ion_import(connection->ion_client, buffer); if (new_handle == NULL) { dprintk(KERN_ERR "%s(%p): " "unable to import ion buffer\n", __func__, connection); error = -EFAULT; goto error; } dprintk(KERN_INFO "new_handle %x", new_handle); error = ion_phys(connection->ion_client, new_handle, &ion_addr, &ion_len); if (error) { dprintk(KERN_ERR "%s: unable to convert ion handle " "0x%08X (error code 0x%08X)\n", __func__, new_handle, error); error = -EINVAL; goto error; } dprintk(KERN_INFO "%s: handle=0x%08x phys_add=0x%08x length=0x%08x\n", __func__, invoke->params[i].value.a, ion_addr, ion_len); invoke->params[i].value.a = (u32) ion_addr; invoke->params[i].value.b = (u32) ion_len; invoke->param_types &= ~((0xF) << (4*i)); invoke->param_types |= TF_PARAM_TYPE_VALUE_INPUT << (4*i); } #endif /* CONFIG_TF_ION */ } command->invoke_client_command.device_context = connection->device_context; error = tf_send_receive(&connection->dev->sm, command, answer, connection, true); error: #ifdef CONFIG_TF_ION if (new_handle != NULL) ion_free(connection->ion_client, new_handle); #endif /* CONFIG_TF_ION */ /* Unmap de temp mem refs */ for (i = 0; i < 4; i++) { if (shmem_desc[i] != NULL) { dprintk(KERN_INFO "tf_invoke_client_command: " "UnMatemp_memref %d\n ", i); tf_unmap_shmem(connection, shmem_desc[i], 0); } } if (error != 0) dprintk(KERN_ERR "tf_invoke_client_command returns %d\n", error); else dprintk(KERN_ERR "tf_invoke_client_command returns " "error_code 0x%08X\n", answer->invoke_client_command.error_code); return error; }
int IonDmaMemManager::createIonBuffer(struct bufferinfo_s* ionbuf) { int ret =0,i = 0; int numBufs; int frame_size; camera_ionbuf_t* tmpalloc = NULL; struct bufferinfo_s* tmp_buf = NULL; #ifdef ROCKCHIP_ION_VERSION ion_user_handle_t handle = 0; #else struct ion_handle* handle = NULL; #endif int map_fd; long temp_handle = 0; unsigned long vir_addr = 0; if (!ionbuf) { LOGE("ion_alloc malloc buffer failed"); return -1; } numBufs = ionbuf->mNumBffers; frame_size = ionbuf->mPerBuffersize; ionbuf->mBufferSizes = numBufs*PAGE_ALIGN(frame_size); switch(ionbuf->mBufType) { case PREVIEWBUFFER: tmpalloc = mPreviewData ; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mPreviewBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case RAWBUFFER: tmpalloc = mRawData; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mRawBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case JPEGBUFFER: tmpalloc = mJpegData; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL ){ mJpegBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case VIDEOENCBUFFER: tmpalloc = mVideoEncData ; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mVideoEncBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; default: return -1; } for(i = 0;i < numBufs;i++){ memset(tmpalloc,0,sizeof(struct camera_ionbuf_s)); if((!mIommuEnabled) || (!ionbuf->mIsForceIommuBuf)) ret = ion_alloc(client_fd, ionbuf->mPerBuffersize, PAGE_SIZE, ION_HEAP(ION_CMA_HEAP_ID), 0, &handle); else ret = ion_alloc(client_fd, ionbuf->mPerBuffersize, PAGE_SIZE, ION_HEAP(ION_VMALLOC_HEAP_ID), 0, &handle); if (ret) { LOGE("ion alloc failed\n"); break; } LOG1("handle %d\n", handle); ret = ion_share(client_fd,handle,&map_fd); if (ret) { LOGE("ion map failed\n"); ion_free(client_fd,handle); break; } vir_addr = (unsigned long )mmap(NULL, ionbuf->mPerBuffersize, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (vir_addr == 0) { LOGE("ion mmap failed\n"); ret = -1; ion_free(client_fd,handle); break; } if((!mIommuEnabled) || (!ionbuf->mIsForceIommuBuf)) ion_get_phys(client_fd,handle,&(tmpalloc->phy_addr)); else tmpalloc->phy_addr = map_fd; tmpalloc->size = ionbuf->mPerBuffersize; tmpalloc->vir_addr = vir_addr; temp_handle = handle; tmpalloc->ion_hdl = (void*)temp_handle; tmpalloc->map_fd = map_fd; ionbuf->mPhyBaseAddr = (unsigned long)tmpalloc->phy_addr; ionbuf->mVirBaseAddr = (unsigned long)tmpalloc->vir_addr; ionbuf->mPerBuffersize = PAGE_ALIGN(frame_size); ionbuf->mShareFd = (unsigned int)tmpalloc->map_fd; *tmp_buf = *ionbuf; tmp_buf++; tmpalloc++; } if(ret < 0){ LOGE(" failed !"); while(--i >= 0){ --tmpalloc; --tmp_buf; munmap((void *)tmpalloc->vir_addr, tmpalloc->size); ion_free(client_fd, tmpalloc->ion_hdl); } free(tmpalloc); free(tmp_buf); } return ret; }
//----------------------------------------------------------------------------- MINT32 IMemDrvImp::allocVirtBuf( IMEM_BUF_INFO* pInfo) { #if defined (__ISP_USE_PMEM__) IMEM_DBG("__ISP_USE_PMEM__"); // //pInfo->type = BUF_TYPE_PMEM; pInfo->virtAddr= (MUINT32) ::pmem_alloc_sync(pInfo->size, &pInfo->memID); // IMEM_DBG("memID[0x%x]",pInfo->memID); #elif defined (__ISP_USE_STD_M4U__) IMEM_DBG("__ISP_USE_STD_M4U__"); // //pInfo->type = BUF_TYPE_STD_M4U; pInfo->memID = (MINT32)(IMEM_MIN_ION_FD-1); pInfo->virtAddr = (MUINT32)::memalign(L1_CACHE_BYTES, pInfo->size); //32Bytes align //will call allocM4UMemory function(we show information there) //IMEM_INF("[Std M4U] mID[0x%x]/size[0x%x]/VA[0x%x]",pInfo->memID,pInfo->size,pInfo->virtAddr); #elif defined (__ISP_USE_ION__) IMEM_DBG("__ISP_USE_ION__"); // struct ion_handle *pIonHandle; MINT32 IonBufFd; struct ion_fd_data fd_data; //a. Allocate a buffer if(ion_alloc_mm( mIonDrv, pInfo->size, 32, 0, &pIonHandle)) { IMEM_ERR("ion_alloc_mm fail"); IMEM_ERR("cBuf ID[0x%x]/size[0x%x]",pInfo->memID,pInfo->size); return -1; } //b. Map a new fd for client. if(ion_share( mIonDrv, pIonHandle, &IonBufFd)) { IMEM_ERR("ion_share fail"); IMEM_ERR("cBuf ID[0x%x]/size[0x%x]",pInfo->memID,pInfo->size); return -1; } pInfo->memID = (MINT32)IonBufFd; // Tianshu suggest to keep this fd //c. Map FD to a virtual space. if(pInfo->useNoncache) { pInfo->virtAddr = (MUINT32)ion_mmap(mIonDrv,NULL, pInfo->size, PROT_READ|PROT_WRITE|PROT_NOCACHE, MAP_SHARED, IonBufFd, 0); } else { pInfo->virtAddr = (MUINT32)ion_mmap(mIonDrv,NULL, pInfo->size, PROT_READ|PROT_WRITE, MAP_SHARED, IonBufFd, 0); } if (!pInfo->virtAddr) { IMEM_ERR("Cannot map ion buffer."); IMEM_ERR("cBuf ID[0x%x]/size[0x%x]",pInfo->memID,pInfo->size); return -1; } // IMEM_DBG("ionFd[0x%x]",pInfo->memID); #endif IMEM_DBG("mID[0x%x]/size[0x%x]/VA[0x%x]",pInfo->memID,pInfo->size,pInfo->virtAddr); //kk test return 0; }
MEMPLUGIN_ERRORTYPE MemPlugin_ION_Alloc(void *pMemPluginHandle, OMX_U32 nClient, MEMPLUGIN_BUFFER_PARAMS *pIonBufferParams, MEMPLUGIN_BUFFER_PROPERTIES *pIonBufferProp) { OMX_S16 ret; struct ion_handle *temp; size_t stride; MEMPLUGIN_ERRORTYPE eError = MEMPLUGIN_ERROR_NONE; MEMPLUGIN_ION_PARAMS sIonParams; MEMPLUGIN_OBJECT *pMemPluginHdl = (MEMPLUGIN_OBJECT *)pMemPluginHandle; if(pIonBufferParams->nWidth <= 0) { eError = MEMPLUGIN_ERROR_BADPARAMETER; DOMX_ERROR("%s: width should be positive %d", __FUNCTION__,pIonBufferParams->nWidth); goto EXIT; } if(pMemPluginHdl->pPluginExtendedInfo == NULL) { MEMPLUGIN_ION_PARAMS_INIT(&sIonParams); } else { MEMPLUGIN_ION_PARAMS_COPY(((MEMPLUGIN_ION_PARAMS *)pMemPluginHdl->pPluginExtendedInfo),sIonParams); } if(pIonBufferParams->eBuffer_type == DEFAULT) { ret = (OMX_S16)ion_alloc(nClient, pIonBufferParams->nWidth, sIonParams.nAlign, sIonParams.alloc_flags, &temp); if(ret || (int)temp == -ENOMEM) { if(sIonParams.alloc_flags != OMAP_ION_HEAP_SECURE_INPUT) { //for non default types of allocation - no retry with tiler 1d - throw error //STARGO: ducati secure heap is too small, need to allocate from heap #if 0 DOMX_ERROR("FAILED to allocate secure buffer of size=%d. ret=0x%x",pIonBufferParams->nWidth, ret); eError = MEMPLUGIN_ERROR_NORESOURCES; goto EXIT; #endif DOMX_ERROR("FAILED to allocate secure buffer of size=%d. ret=0x%x - trying tiler 1d space",pIonBufferParams->nWidth, ret); pIonBufferParams->eBuffer_type = TILER1D; pIonBufferParams->eTiler_format = MEMPLUGIN_TILER_FORMAT_PAGE; sIonParams.alloc_flags = OMAP_ION_HEAP_TILER_MASK; sIonParams.nAlign = -1; } else { // for default non tiler (OMAP_ION_HEAP_SECURE_INPUT) retry allocating from tiler 1D DOMX_DEBUG("FAILED to allocate from non tiler space - trying tiler 1d space"); pIonBufferParams->eBuffer_type = TILER1D; pIonBufferParams->eTiler_format = MEMPLUGIN_TILER_FORMAT_PAGE; sIonParams.alloc_flags = OMAP_ION_HEAP_TILER_MASK; sIonParams.nAlign = -1; } } } if(pIonBufferParams->eBuffer_type == TILER1D) { ret = (OMX_S16)ion_alloc_tiler(nClient, pIonBufferParams->nWidth, pIonBufferParams->nHeight, pIonBufferParams->eTiler_format, sIonParams.alloc_flags, &temp, &(pIonBufferProp->nStride)); if (ret || ((int)temp == -ENOMEM)) { DOMX_ERROR("FAILED to allocate buffer of size=%d. ret=0x%x",pIonBufferParams->nWidth, ret); eError = MEMPLUGIN_ERROR_NORESOURCES; goto EXIT; } } else if(pIonBufferParams->eBuffer_type == TILER2D) { DOMX_ERROR("Tiler 2D not implemented"); eError = MEMPLUGIN_ERROR_NOTIMPLEMENTED; goto EXIT; } else if(!temp) { DOMX_ERROR("Undefined option for buffer type"); eError = MEMPLUGIN_ERROR_UNDEFINED; goto EXIT; } pIonBufferProp->sBuffer_accessor.pBufferHandle = (OMX_PTR)temp; pIonBufferProp->nStride = stride; if(pIonBufferParams->bMap == OMX_TRUE) { ret = (OMX_S16) ion_map(nClient, pIonBufferProp->sBuffer_accessor.pBufferHandle, pIonBufferParams->nWidth*pIonBufferParams->nHeight, sIonParams.prot, sIonParams.map_flags, sIonParams.nOffset, (unsigned char **) &(pIonBufferProp->sBuffer_accessor.pBufferMappedAddress), &(pIonBufferProp->sBuffer_accessor.bufferFd)); if(ret < 0) { DOMX_ERROR("userspace mapping of ION buffers returned error"); eError = MEMPLUGIN_ERROR_NORESOURCES; goto EXIT; } } else { ret = (OMX_S16) ion_share(nClient, pIonBufferProp->sBuffer_accessor.pBufferHandle, &(pIonBufferProp->sBuffer_accessor.bufferFd)); if(ret < 0) { DOMX_ERROR("ION share returned error"); eError = MEMPLUGIN_ERROR_NORESOURCES; goto EXIT; } } EXIT: if (eError != MEMPLUGIN_ERROR_NONE) { DOMX_EXIT("%s exited with error 0x%x",__FUNCTION__,eError); return eError; } else { DOMX_EXIT("%s executed successfully",__FUNCTION__); return MEMPLUGIN_ERROR_NONE; } }
//static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, bool reserve) static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, int reserve) { #if GRALLOC_ARM_DMA_BUF_MODULE { private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); ion_user_handle_t ion_hnd; unsigned char *cpu_ptr; int shared_fd; int ret; unsigned int heap_mask; int Ion_type; bool Ishwc = false; int Ion_flag = 0; if(usage == (GRALLOC_USAGE_HW_COMPOSER|GRALLOC_USAGE_HW_RENDER)) Ishwc = true; //ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd); #ifdef USE_X86 if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) Ion_flag = (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC); if(is_out_log()) ALOGD("usage=%x,protect=%x,ion_flag=%x,mmu=%d",usage,GRALLOC_USAGE_PROTECTED,Ion_flag,g_MMU_stat); if (usage & GRALLOC_USAGE_PROTECTED) //secrue memery { unsigned long phys; ret = ion_secure_alloc(m->ion_client, size,&phys); //ALOGD("secure_alloc ret=%d,phys=%x",ret,(int)phys); if(ret != 0) { AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size); return -1; } private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, 0, 0); if (NULL != hnd) { hnd->share_fd = 0; hnd->ion_hnd = 0; hnd->type = 0; hnd->phy_addr = (int)phys; *pHandle = hnd; if(is_out_log()) ALOGD("secure_alloc_ok phy=%x",usage,hnd->phy_addr); return 0; } else { AERR("Gralloc out of mem for ion_client:%d", m->ion_client); } close(shared_fd); return -1; } #endif //ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd); #ifdef USE_X86 if(g_MMU_stat && ((usage&GRALLOC_USAGE_HW_CAMERA_WRITE)==0) && !(usage & GRALLOC_USAGE_PRIVATE_2) && !Ishwc) #else if(g_MMU_stat) #endif { heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID); #ifdef USE_X86 if (usage & GRALLOC_USAGE_PRIVATE_2) { heap_mask |= ION_HEAP(ION_SECURE_HEAP_ID); } #endif ret = ion_alloc(m->ion_client, size, 0, heap_mask, Ion_flag, &ion_hnd); Ion_type = 1; } else { heap_mask = ION_HEAP(ION_CMA_HEAP_ID); #ifdef USE_X86 if (usage & GRALLOC_USAGE_PRIVATE_2) { heap_mask |= ION_HEAP(ION_SECURE_HEAP_ID); } #endif if (usage == (GRALLOC_USAGE_HW_CAMERA_WRITE|GRALLOC_USAGE_SW_READ_OFTEN)) { ret = ion_alloc(m->ion_client, size, 0,heap_mask, (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC), &ion_hnd); } else { ret = ion_alloc(m->ion_client, size, 0,heap_mask, Ion_flag, &ion_hnd); } #ifdef USE_X86 if(g_MMU_stat && Ishwc) { Ion_type = 1; } else #endif Ion_type = 0; } if (ret != 0) { if( (heap_mask & ION_HEAP(ION_CMA_HEAP_ID)) #ifdef USE_X86 && !Ishwc #endif ) { #ifdef BOARD_WITH_IOMMU heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID); #else heap_mask = ION_HEAP(ION_CARVEOUT_HEAP_ID); #endif ret = ion_alloc(m->ion_client, size, 0, heap_mask, 0, &ion_hnd ); { if( ret != 0) { AERR("Force to VMALLOC fail ion_client:%d", m->ion_client); return -1; } else { ALOGD("Force to VMALLOC sucess !"); Ion_type = 1; } } } else { AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size); return -1; } } ret = ion_share(m->ion_client, ion_hnd, &shared_fd); if (ret != 0) { AERR("ion_share( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0); #ifdef USE_X86 //memset(cpu_ptr, 0, size); #endif if (MAP_FAILED == cpu_ptr) { AERR("ion_map( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } close(shared_fd); return -1; } private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED); if (NULL != hnd) { unsigned long cma_phys = 0; hnd->share_fd = shared_fd; hnd->ion_hnd = ion_hnd; hnd->type = Ion_type; if(!Ion_type) { int pret; pret = ion_get_phys(m->ion_client, ion_hnd, &cma_phys); //ALOGD("ion_get_phy ret=%d,cma_phys=%x",pret,cma_phys); } hnd->phy_addr = (int)cma_phys; *pHandle = hnd; if(is_out_log()) ALOGD("alloc_info fd[%d],type=%d,phy=%x",hnd->share_fd,hnd->type,hnd->phy_addr); return 0; } else { AERR("Gralloc out of mem for ion_client:%d", m->ion_client); } close(shared_fd); ret = munmap(cpu_ptr, size); if (0 != ret) { AERR("munmap failed for base:%p size: %d", cpu_ptr, size); } ret = ion_free(m->ion_client, ion_hnd); if (0 != ret) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } #endif #if GRALLOC_ARM_UMP_MODULE { ump_handle ump_mem_handle; void *cpu_ptr; ump_secure_id ump_id; int constraints; size = round_up_to_page_size(size); if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN) { constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE; } else { constraints = UMP_REF_DRV_CONSTRAINT_NONE; } if ( reserve & 0x01) { constraints |= UMP_REF_DRV_CONSTRAINT_PRE_RESERVE; } if( reserve & 0x02) { constraints |= UMP_REF_DRV_UK_CONSTRAINT_MEM_SWITCH; } #ifdef GRALLOC_SIMULATE_FAILURES /* if the failure condition matches, fail this iteration */ if (__ump_alloc_should_fail()) { ump_mem_handle = UMP_INVALID_MEMORY_HANDLE; } else #endif { ump_mem_handle = ump_ref_drv_allocate(size, (ump_alloc_constraints)constraints); if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle) { cpu_ptr = ump_mapped_pointer_get(ump_mem_handle); if (NULL != cpu_ptr) { ump_id = ump_secure_id_get(ump_mem_handle); if (UMP_INVALID_SECURE_ID != ump_id) { private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle); if (NULL != hnd) { #ifdef USE_LCDC_COMPOSER if( reserve & 0x02) { hnd->phy_addr = 0; } else { hnd->phy_addr = ump_phy_addr_get(ump_mem_handle); } #endif *pHandle = hnd; return 0; } else { AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id); } } else { AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle); } ump_mapped_pointer_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle); } ump_reference_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints); } } return -1; } #endif