int res_trk_close_secure_session() { int rc; if (res_trk_check_for_sec_session() == 1 && resource_context.sec_clk_heap) { pr_err("Unsecuring....\n"); mutex_lock(&resource_context.secure_lock); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while close\n"); goto error_close; } msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); if (resource_context.vidc_platform_data->secure_wb_heap) msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID)); res_trk_disable_iommu_clocks(); resource_context.sec_clk_heap = 0; mutex_unlock(&resource_context.secure_lock); } return 0; error_close: mutex_unlock(&resource_context.secure_lock); return rc; }
int res_trk_get_mem_type(void) { int mem_type = -1; switch (resource_context.res_mem_type) { case DDL_FW_MEM: if (res_trk_get_enable_ion()) mem_type = ION_HEAP(ION_MM_FIRMWARE_HEAP_ID); else mem_type = MEMTYPE_SMI_KERNEL; break; case DDL_MM_MEM: if (res_trk_get_enable_ion()) mem_type = ION_HEAP(ION_CP_MM_HEAP_ID); else mem_type = MEMTYPE_SMI_KERNEL; break; case DDL_CMD_MEM: if (res_trk_get_enable_ion()) mem_type = ION_HEAP(ION_CP_MFC_HEAP_ID); else mem_type = MEMTYPE_SMI_KERNEL; break; default: return mem_type; } return mem_type; }
int res_trk_get_mem_type(void) { int mem_type = -1; switch (resource_context.res_mem_type) { case DDL_FW_MEM: mem_type = ION_HEAP(resource_context.fw_mem_type); return mem_type; case DDL_MM_MEM: mem_type = resource_context.memtype; break; case DDL_CMD_MEM: if (res_trk_check_for_sec_session()) mem_type = resource_context.cmd_mem_type; else mem_type = resource_context.memtype; break; default: return mem_type; } if (resource_context.vidc_platform_data->enable_ion) { if (res_trk_check_for_sec_session()) { mem_type = ION_HEAP(mem_type); if (resource_context.res_mem_type != DDL_FW_MEM) mem_type |= ION_SECURE; else if (res_trk_is_cp_enabled()) mem_type |= ION_SECURE; } else mem_type = (ION_HEAP(mem_type) | ION_HEAP(ION_IOMMU_HEAP_ID)); } return mem_type; }
int res_trk_open_secure_session() { int rc, memtype; if (!res_trk_check_for_sec_session()) { pr_err("Secure sessions are not active\n"); return -EINVAL; } mutex_lock(&resource_context.secure_lock); if (!resource_context.sec_clk_heap) { pr_err("Securing...\n"); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while open"); goto error_open; } memtype = ION_HEAP(resource_context.memtype); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("ION heap secure failed heap id %d rc %d\n", resource_context.memtype, rc); goto disable_iommu_clks; } memtype = ION_HEAP(resource_context.cmd_mem_type); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("ION heap secure failed heap id %d rc %d\n", resource_context.cmd_mem_type, rc); goto unsecure_memtype_heap; } if (resource_context.vidc_platform_data->secure_wb_heap) { memtype = ION_HEAP(ION_CP_WB_HEAP_ID); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("WB_HEAP_ID secure failed rc %d\n", rc); goto unsecure_cmd_heap; } } resource_context.sec_clk_heap = 1; res_trk_disable_iommu_clocks(); } mutex_unlock(&resource_context.secure_lock); return 0; unsecure_cmd_heap: msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); unsecure_memtype_heap: msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); disable_iommu_clks: res_trk_disable_iommu_clocks(); error_open: resource_context.sec_clk_heap = 0; mutex_unlock(&resource_context.secure_lock); return rc; }
int alloc_ion_mem(unsigned int size) { if (!overlay_supported) return -EINVAL; int result; struct ion_fd_data fd_data; struct ion_allocation_data ionAllocData; mem_info.ion_fd = open("/dev/ion", O_RDWR|O_DSYNC); if (mem_info.ion_fd < 0) { perror("ERROR: Can't open ion "); return -errno; } ionAllocData.flags = 0; ionAllocData.len = size; ionAllocData.align = sysconf(_SC_PAGESIZE); #ifdef NEW_ION_HEAP ionAllocData.heap_id_mask = #else ionAllocData.heap_mask = #endif ION_HEAP(ION_IOMMU_HEAP_ID) | ION_HEAP(ION_SYSTEM_CONTIG_HEAP_ID); result = ioctl(mem_info.ion_fd, ION_IOC_ALLOC, &ionAllocData); if(result){ perror("ION_IOC_ALLOC Failed "); close(mem_info.ion_fd); return result; } fd_data.handle = ionAllocData.handle; mem_info.handle_data.handle = ionAllocData.handle; result = ioctl(mem_info.ion_fd, ION_IOC_MAP, &fd_data); if (result) { perror("ION_IOC_MAP Failed "); free_ion_mem(); return result; } mem_info.mem_buf = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_data.fd, 0); mem_info.mem_fd = fd_data.fd; if (!mem_info.mem_buf) { perror("ERROR: mem_buf MAP_FAILED "); free_ion_mem(); return -ENOMEM; } return 0; }
static int alloc_ion_mem(struct fb_qcom_overlay_data *data, unsigned int size) { int result; struct ion_fd_data fd_data; struct ion_allocation_data ionAllocData; data->ion_fd = open("/dev/ion", O_RDWR|O_DSYNC); if (data->ion_fd < 0) { ERROR("ERROR: Can't open ion "); return -errno; } ionAllocData.flags = 0; ionAllocData.len = size; ionAllocData.align = sysconf(_SC_PAGESIZE); ionAllocData.heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID) | ION_HEAP(21); // ION_SYSTEM_CONTIG_HEAP_ID result = ioctl(data->ion_fd, ION_IOC_ALLOC, &ionAllocData); if(result) { ERROR("ION_IOC_ALLOC Failed "); close(data->ion_fd); return result; } fd_data.handle = ionAllocData.handle; data->handle_data.handle = ionAllocData.handle; result = ioctl(data->ion_fd, ION_IOC_MAP, &fd_data); if (result) { ERROR("ION_IOC_MAP Failed "); free_ion_mem(data); return result; } data->mem_buf = (uint8_t*)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd_data.fd, 0); data->mem_fd = fd_data.fd; if (!data->mem_buf) { ERROR("ERROR: mem_buf MAP_FAILED "); free_ion_mem(data); return -ENOMEM; } return 0; }
int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, void *data) { struct rb_node *n; int ret_val = 0; /* * traverse the list of heaps available in this system * and find the heap that is specified. */ mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); if (heap->type != ION_HEAP_TYPE_CP) continue; if (ION_HEAP(heap->id) != heap_id) continue; if (heap->ops->secure_heap) ret_val = heap->ops->unsecure_heap(heap, version, data); else ret_val = -EINVAL; break; } mutex_unlock(&dev->lock); return ret_val; }
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, struct msm_smem *mem) { struct ion_handle *hndl; size_t len; int rc = 0; flags = flags | ION_HEAP(ION_CP_MM_HEAP_ID); hndl = ion_alloc(client->clnt, size, align, flags); if (IS_ERR_OR_NULL(hndl)) { pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n", client, size, align, flags); rc = -ENOMEM; goto fail_shared_mem_alloc; } mem->mem_type = client->mem_type; mem->smem_priv = hndl; if (ion_phys(client->clnt, hndl, &mem->paddr, &len)) { pr_err("Failed to get physical address\n"); rc = -EIO; goto fail_map; } mem->device_addr = mem->paddr; mem->size = size; mem->kvaddr = ion_map_kernel(client->clnt, hndl, 0); if (!mem->kvaddr) { pr_err("Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } return rc; fail_map: ion_free(client->clnt, hndl); fail_shared_mem_alloc: return rc; }
int ion_secure_heap(struct ion_device *dev, int heap_id, int version, void *data) { struct rb_node *n; int ret_val = 0; /* * traverse the list of heaps available in this system * and find the heap that is specified. */ #if defined(CONFIG_MACH_LGE_L9II_OPEN_EU) down_write(&dev->lock); #else mutex_lock(&dev->lock); #endif for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); if (heap->type != ION_HEAP_TYPE_CP) continue; if (ION_HEAP(heap->id) != heap_id) continue; if (heap->ops->secure_heap) ret_val = heap->ops->secure_heap(heap, version, data); else ret_val = -EINVAL; break; } #if defined(CONFIG_MACH_LGE_L9II_OPEN_EU) up_write(&dev->lock); #else mutex_unlock(&dev->lock); #endif return ret_val; }
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, int domain, int partition, struct msm_smem *mem) { struct ion_handle *hndl; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long ionflags = 0; unsigned long heap_mask = 0; int rc = 0; if (flags == SMEM_CACHED) ionflags = ION_SET_CACHED(ionflags); else ionflags = ION_SET_UNCACHED(ionflags); heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); if (align < 4096) align = 4096; size = (size + 4095) & (~4095); pr_debug("\n in %s domain: %d, Partition: %d\n", __func__, domain, partition); hndl = ion_alloc(client->clnt, size, align, heap_mask, ionflags); if (IS_ERR_OR_NULL(hndl)) { pr_err("Failed to allocate shared memory = %p, %d, %d, 0x%x\n", client, size, align, ionflags); rc = -ENOMEM; goto fail_shared_mem_alloc; } mem->mem_type = client->mem_type; mem->smem_priv = hndl; mem->domain = domain; mem->partition_num = partition; mem->kvaddr = ion_map_kernel(client->clnt, hndl); if (!mem->kvaddr) { pr_err("Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } rc = get_device_address(client->clnt, hndl, mem->domain, mem->partition_num, align, &iova, &buffer_size); if (rc) { pr_err("Failed to get device address: %d\n", rc); goto fail_device_address; } mem->device_addr = iova; pr_debug("device_address = 0x%lx, kvaddr = 0x%p\n", mem->device_addr, mem->kvaddr); mem->size = size; return rc; fail_device_address: ion_unmap_kernel(client->clnt, hndl); fail_map: ion_free(client->clnt, hndl); fail_shared_mem_alloc: return rc; }
int res_trk_close_secure_session() { int rc; mutex_lock(&resource_context.secure_lock); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while close"); goto error_close; } msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); res_trk_disable_iommu_clocks(); resource_context.secure_session = 0; mutex_unlock(&resource_context.secure_lock); return 0; error_close: mutex_unlock(&resource_context.secure_lock); return rc; }
u32 res_trk_get_mem_type(void) { u32 mem_type; if (res_trk_get_enable_ion()) mem_type = ION_HEAP(resource_context.memtype); else mem_type = resource_context.vidc_platform_data->memtype_pmem; return mem_type; }
int res_trk_open_secure_session() { int rc; if (res_trk_check_for_sec_session() == 1) { mutex_lock(&resource_context.secure_lock); pr_err("Securing...\n"); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while open"); goto error_open; } msm_ion_secure_heap(ION_HEAP(resource_context.memtype)); msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type)); res_trk_disable_iommu_clocks(); mutex_unlock(&resource_context.secure_lock); } return 0; error_open: mutex_unlock(&resource_context.secure_lock); return rc; }
int res_trk_open_secure_session() { int rc; mutex_lock(&resource_context.secure_lock); if (resource_context.secure_session) { pr_err("Secure session already open"); rc = -EBUSY; goto error_open; } resource_context.secure_session = 1; rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while open"); goto error_open; } msm_ion_secure_heap(ION_HEAP(resource_context.memtype)); msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type)); res_trk_disable_iommu_clocks(); mutex_unlock(&resource_context.secure_lock); return 0; error_open: mutex_unlock(&resource_context.secure_lock); return rc; }
uint8_t *do_mmap_ion(int ion_fd, struct ion_allocation_data *alloc, struct ion_fd_data *ion_info_fd, int *mapFd) { void *ret; /* returned virtual address */ int rc = 0; struct ion_handle_data handle_data; /* to make it page size aligned */ alloc->len = (alloc->len + 4095) & (~4095); #ifdef TARGET_7x27A alloc->flags = ION_HEAP(CAMERA_ION_HEAP_ID); #endif rc = ioctl(ion_fd, ION_IOC_ALLOC, alloc); if (rc < 0) { CDBG_ERROR("ION allocation failed\n"); goto ION_ALLOC_FAILED; } ion_info_fd->handle = alloc->handle; rc = ioctl(ion_fd, ION_IOC_SHARE, ion_info_fd); if (rc < 0) { CDBG_ERROR("ION map failed %s\n", strerror(errno)); goto ION_MAP_FAILED; } *mapFd = ion_info_fd->fd; ret = mmap(NULL, alloc->len, PROT_READ | PROT_WRITE, MAP_SHARED, *mapFd, 0); if (ret == MAP_FAILED) { CDBG_ERROR("ION_MMAP_FAILED: %s (%d)\n", strerror(errno), errno); goto ION_MAP_FAILED; } return ret; ION_MAP_FAILED: handle_data.handle = ion_info_fd->handle; ioctl(ion_fd, ION_IOC_FREE, &handle_data); ION_ALLOC_FAILED: return NULL; }
/** * Allocate memory for channel output of specific TSIF. * * @tsif: The TSIF id to which memory should be allocated. * * Return error status */ static int mpq_dmx_channel_mem_alloc(int tsif) { int result; size_t len; MPQ_DVB_DBG_PRINT("%s(%d)\n", __func__, tsif); mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle = ion_alloc(mpq_dmx_tspp_info.ion_client, (mpq_dmx_tspp_info.tsif[tsif].buffer_count * TSPP_DESCRIPTOR_SIZE), SZ_4K, ION_HEAP(tspp_out_ion_heap), 0); /* non-cached */ if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle)) { MPQ_DVB_ERR_PRINT("%s: ion_alloc() failed\n", __func__); mpq_dmx_channel_mem_free(tsif); return -ENOMEM; } /* save virtual base address of heap */ mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_virt_base = ion_map_kernel(mpq_dmx_tspp_info.ion_client, mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle); if (IS_ERR_OR_NULL(mpq_dmx_tspp_info.tsif[tsif]. ch_mem_heap_virt_base)) { MPQ_DVB_ERR_PRINT("%s: ion_map_kernel() failed\n", __func__); mpq_dmx_channel_mem_free(tsif); return -ENOMEM; } /* save physical base address of heap */ result = ion_phys(mpq_dmx_tspp_info.ion_client, mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_handle, &(mpq_dmx_tspp_info.tsif[tsif].ch_mem_heap_phys_base), &len); if (result < 0) { MPQ_DVB_ERR_PRINT("%s: ion_phys() failed\n", __func__); mpq_dmx_channel_mem_free(tsif); return -ENOMEM; } return 0; }
int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version, void *data) { struct rb_node *n; int ret_val = 0; mutex_lock(&dev->lock); for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) { struct ion_heap *heap = rb_entry(n, struct ion_heap, node); if (heap->type != ION_HEAP_TYPE_CP) continue; if (ION_HEAP(heap->id) != heap_id) continue; if (heap->ops->secure_heap) ret_val = heap->ops->unsecure_heap(heap, version, data); else ret_val = -EINVAL; break; } mutex_unlock(&dev->lock); return ret_val; }
static int32_t qcom_km_ION_memalloc(struct qcom_km_ion_info_t *handle, uint32_t size) { int32_t ret = 0; int32_t iret = 0; int32_t fd = 0; unsigned char *v_addr; struct ion_allocation_data ion_alloc_data; int32_t ion_fd; int32_t rc; struct ion_fd_data ifd_data; struct ion_handle_data handle_data; /* open ION device for memory management * O_DSYNC -> uncached memory */ if(handle == NULL){ ALOGE("Error:: null handle received"); return -1; } ion_fd = open("/dev/ion", O_RDONLY | O_DSYNC); if (ion_fd < 0) { ALOGE("Error::Cannot open ION device"); return -1; } handle->ion_sbuffer = NULL; handle->ifd_data_fd = 0; /* Size of allocation */ ion_alloc_data.len = (size + 4095) & (~4095); /* 4K aligned */ ion_alloc_data.align = 4096; /* memory is allocated from EBI heap */ ion_alloc_data.heap_mask= ION_HEAP(ION_QSECOM_HEAP_ID); /* Set the memory to be uncached */ ion_alloc_data.flags = 0; /* IOCTL call to ION for memory request */ rc = ioctl(ion_fd, ION_IOC_ALLOC, &ion_alloc_data); if (rc) { ret = -1; goto alloc_fail; } if (ion_alloc_data.handle != NULL) { ifd_data.handle = ion_alloc_data.handle; } else { ret = -1; goto alloc_fail; } /* Call MAP ioctl to retrieve the ifd_data.fd file descriptor */ rc = ioctl(ion_fd, ION_IOC_MAP, &ifd_data); if (rc) { ret = -1; goto ioctl_fail; } /* Make the ion mmap call */ v_addr = (unsigned char *)mmap(NULL, ion_alloc_data.len, PROT_READ | PROT_WRITE, MAP_SHARED, ifd_data.fd, 0); if (v_addr == MAP_FAILED) { ALOGE("Error::ION MMAP failed"); ret = -1; goto map_fail; } handle->ion_fd = ion_fd; handle->ifd_data_fd = ifd_data.fd; handle->ion_sbuffer = v_addr; handle->ion_alloc_handle.handle = ion_alloc_data.handle; handle->sbuf_len = size; return ret; map_fail: if (handle->ion_sbuffer != NULL) { iret = munmap(handle->ion_sbuffer, ion_alloc_data.len); if (iret) ALOGE("Error::Failed to unmap memory for load image. ret = %d", ret); } ioctl_fail: handle_data.handle = ion_alloc_data.handle; if (handle->ifd_data_fd) close(handle->ifd_data_fd); iret = ioctl(ion_fd, ION_IOC_FREE, &handle_data); if (iret) { ALOGE("Error::ION FREE ioctl returned error = %d",iret); } alloc_fail: if (ion_fd > 0) close(ion_fd); return ret; }
static int ion_memalloc(struct ion_buf_handle *buf, uint32_t size, uint32_t heap) { struct ion_allocation_data alloc_data; struct ion_fd_data fd_data; unsigned char *va; struct ion_handle_data handle_data; int ion_fd; int rc; ion_fd = open("/dev/ion", O_RDONLY); if (ion_fd < 0) { fprintf(stderr, "Cannot open ION device (%s)\n", strerror(errno)); return -1; } alloc_data.len = (size + 4095) & ~4095; alloc_data.align = 4096; alloc_data.flags = 0; alloc_data.heap_id_mask = ION_HEAP(heap); /* Set the buffers to be uncached */ alloc_data.flags = 0; rc = ioctl(ion_fd, ION_IOC_ALLOC, &alloc_data); if (rc) { fprintf(stderr, "ION buffer allocation failed (%s)\n", strerror(errno)); goto alloc_fail; } if (alloc_data.handle) { fd_data.handle = alloc_data.handle; } else { fprintf(stderr, "ION alloc data returned NULL\n"); rc = -1; goto alloc_fail; } rc = ioctl(ion_fd, ION_IOC_MAP, &fd_data); if (rc) { fprintf(stderr, "ION map call failed(%s)\n", strerror(errno)); goto ioctl_fail; } va = mmap(NULL, alloc_data.len, PROT_READ | PROT_WRITE, MAP_SHARED, fd_data.fd, 0); if (va == MAP_FAILED) { fprintf(stderr, "ION memory map failed (%s)\n", strerror(errno)); rc = -1; goto map_fail; } buf->ion_fd = ion_fd; buf->ifd_data_fd = fd_data.fd; buf->buffer = va; buf->ion_alloc_handle.handle = alloc_data.handle; buf->buffer_len = alloc_data.len; memset(buf->buffer, 0, buf->buffer_len); return 0; map_fail: ioctl_fail: handle_data.handle = alloc_data.handle; if (buf->ifd_data_fd) close(buf->ifd_data_fd); rc = ioctl(ion_fd, ION_IOC_FREE, &handle_data); if (rc) fprintf(stderr, "ION free failed (%s)\n", strerror(errno)); alloc_fail: if (ion_fd >= 0) close(ion_fd); buf->ion_fd = -1; return rc; }
int IonDmaMemManager::createIonBuffer(struct bufferinfo_s* ionbuf) { int ret =0,i = 0; int numBufs; int frame_size; camera_ionbuf_t* tmpalloc = NULL; struct bufferinfo_s* tmp_buf = NULL; #ifdef ROCKCHIP_ION_VERSION ion_user_handle_t handle = 0; #else struct ion_handle* handle = NULL; #endif int map_fd; long temp_handle = 0; unsigned long vir_addr = 0; if (!ionbuf) { LOGE("ion_alloc malloc buffer failed"); return -1; } numBufs = ionbuf->mNumBffers; frame_size = ionbuf->mPerBuffersize; ionbuf->mBufferSizes = numBufs*PAGE_ALIGN(frame_size); switch(ionbuf->mBufType) { case PREVIEWBUFFER: tmpalloc = mPreviewData ; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mPreviewBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case RAWBUFFER: tmpalloc = mRawData; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mRawBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case JPEGBUFFER: tmpalloc = mJpegData; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL ){ mJpegBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; case VIDEOENCBUFFER: tmpalloc = mVideoEncData ; if((tmp_buf = (struct bufferinfo_s*)malloc(numBufs*sizeof(struct bufferinfo_s))) != NULL){ mVideoEncBufferInfo = tmp_buf; }else{ LOGE("ion_alloc malloc buffer failed"); return -1; } break; default: return -1; } for(i = 0;i < numBufs;i++){ memset(tmpalloc,0,sizeof(struct camera_ionbuf_s)); if((!mIommuEnabled) || (!ionbuf->mIsForceIommuBuf)) ret = ion_alloc(client_fd, ionbuf->mPerBuffersize, PAGE_SIZE, ION_HEAP(ION_CMA_HEAP_ID), 0, &handle); else ret = ion_alloc(client_fd, ionbuf->mPerBuffersize, PAGE_SIZE, ION_HEAP(ION_VMALLOC_HEAP_ID), 0, &handle); if (ret) { LOGE("ion alloc failed\n"); break; } LOG1("handle %d\n", handle); ret = ion_share(client_fd,handle,&map_fd); if (ret) { LOGE("ion map failed\n"); ion_free(client_fd,handle); break; } vir_addr = (unsigned long )mmap(NULL, ionbuf->mPerBuffersize, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); if (vir_addr == 0) { LOGE("ion mmap failed\n"); ret = -1; ion_free(client_fd,handle); break; } if((!mIommuEnabled) || (!ionbuf->mIsForceIommuBuf)) ion_get_phys(client_fd,handle,&(tmpalloc->phy_addr)); else tmpalloc->phy_addr = map_fd; tmpalloc->size = ionbuf->mPerBuffersize; tmpalloc->vir_addr = vir_addr; temp_handle = handle; tmpalloc->ion_hdl = (void*)temp_handle; tmpalloc->map_fd = map_fd; ionbuf->mPhyBaseAddr = (unsigned long)tmpalloc->phy_addr; ionbuf->mVirBaseAddr = (unsigned long)tmpalloc->vir_addr; ionbuf->mPerBuffersize = PAGE_ALIGN(frame_size); ionbuf->mShareFd = (unsigned int)tmpalloc->map_fd; *tmp_buf = *ionbuf; tmp_buf++; tmpalloc++; } if(ret < 0){ LOGE(" failed !"); while(--i >= 0){ --tmpalloc; --tmp_buf; munmap((void *)tmpalloc->vir_addr, tmpalloc->size); ion_free(client_fd, tmpalloc->ion_hdl); } free(tmpalloc); free(tmp_buf); } return ret; }
static int audpcm_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_in; int rc; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int encid; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->mode = MSM_AUD_ENC_MODE_TUNNEL; audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025; audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; audio->buffer_size = MONO_DATA_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode; rc = audmgr_open(&audio->audmgr); if (rc) goto done; encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } rc = msm_adsp_get("AUDPREPROCTASK", &audio->audpre, &audpre_adsp_ops, audio); if (rc) { msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); goto done; } audio->dsp_cnt = 0; audio->stopped = 0; audpcm_in_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", DMASZ); handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->data = ion_map_kernel(client, handle); if (IS_ERR(audio->data)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); file->private_data = audio; audio->opened = 1; rc = 0; done: mutex_unlock(&audio->lock); return rc; output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); msm_adsp_put(audio->audpre); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; }
#include <fcntl.h> #include <sys/ioctl.h> #include <linux/types.h> #include <sys/types.h> #include <ctype.h> #include <linux/input.h> #include <linux/msm_ion.h> #include <sys/mman.h> #include "ion_test_plan.h" #include "ion_test_utils.h" #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) static struct ion_test_data mm_heap_test = { .align = 0x1000, .size = 0x1000, .heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID), }; static struct ion_test_data adv_mm_heap_test = { .align = 0x1000, .size = 0xC0000000, .heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID), }; static struct ion_test_data adv_system_heap_test = { .align = 0x1000, .size = 0x1000, .heap_mask = ION_HEAP(ION_SYSTEM_HEAP_ID), }; static struct ion_test_data *mm_heap_data_settings[] = { [NOMINAL_TEST] = &mm_heap_test, [ADV_TEST] = &adv_mm_heap_test,
u32 res_trk_get_mem_type(void) { u32 mem_type = ION_HEAP(resource_context.memtype); return mem_type; }
void apr_pmem_alloc_ion_uncached(struct mmap_info_ion *pmapion, uint32_t ionheapid) { struct ion_handle_data handle_data; struct ion_allocation_data alloc; struct ion_client *ion_client; struct ion_handle *ion_handle; int32_t rc; int32_t ion_fd; uint32_t ret; int32_t p_pmem_fd; struct ion_fd_data ion_info_fd; struct ion_fd_data test_fd; struct ion_custom_data data; ion_fd = open("/dev/ion", O_RDONLY); if (ion_fd < 0) { CDBG_ERROR("\n apr_pmem_alloc_ion : Open ion device failed"); pmapion->pVirtualAddr = NULL; return; } alloc.len = pmapion->bufsize; alloc.align = 4096; alloc.heap_mask = ionheapid; if (ionheapid == ION_HEAP(ION_CP_MM_HEAP_ID)) { alloc.flags = ION_SECURE; } else { alloc.flags = 0; } rc = ioctl(ion_fd, ION_IOC_ALLOC, &alloc); if (rc < 0) { handle_data.handle = (pmapion->ion_info_fd).handle; CDBG_ERROR("\n apr_pmem_alloc_ion : ION alloc length %d %d", rc, alloc.len); close(ion_fd); pmapion->pVirtualAddr = NULL; return; } else { pmapion->ion_info_fd.handle = alloc.handle; rc = ioctl(ion_fd, ION_IOC_SHARE, &(pmapion->ion_info_fd)); if (rc < 0) { CDBG_ERROR("\n apr_pmem_alloc_ion : ION map call failed %d", rc); handle_data.handle = pmapion->ion_info_fd.handle; ioctl(ion_fd, ION_IOC_FREE, &handle_data); close(ion_fd); pmapion->pVirtualAddr = NULL; return; } else { p_pmem_fd = pmapion->ion_info_fd.fd; ret = (uint32_t)mmap(NULL, alloc.len, PROT_READ | PROT_WRITE, MAP_SHARED, p_pmem_fd, 0); if (ret == (uint32_t)MAP_FAILED) { CDBG_ERROR("\n apr_pmem_alloc_ion : mmap call failed %d", rc); handle_data.handle = (pmapion->ion_info_fd).handle; ioctl(ion_fd, ION_IOC_FREE, &handle_data); close(ion_fd); pmapion->pVirtualAddr = NULL; return; } else { CDBG_ERROR("\n Ion allocation success virtaddr : %u fd %u", (uint32_t)ret, (uint32_t)p_pmem_fd); data.cmd = p_pmem_fd; pmapion->pVirtualAddr = (void *)ret; pmapion->ion_fd = ion_fd; } } } return; }
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem, int map_kernel) { struct ion_handle *hndl; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long heap_mask = 0; int rc = 0; align = ALIGN(align, SZ_4K); size = ALIGN(size, SZ_4K); if (flags & SMEM_SECURE) { size = ALIGN(size, SZ_1M); align = ALIGN(align, SZ_1M); } if (is_iommu_present(client->res)) { heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID); } else { dprintk(VIDC_DBG, "allocate shared memory from adsp heap size %d align %d\n", size, align); heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); } if (flags & SMEM_SECURE) heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); hndl = ion_alloc(client->clnt, size, align, heap_mask, flags); if (IS_ERR_OR_NULL(hndl)) { dprintk(VIDC_ERR, "Failed to allocate shared memory = %p, %d, %d, 0x%x\n", client, size, align, flags); rc = -ENOMEM; goto fail_shared_mem_alloc; } mem->mem_type = client->mem_type; mem->smem_priv = hndl; mem->flags = flags; mem->buffer_type = buffer_type; if (map_kernel) { mem->kvaddr = ion_map_kernel(client->clnt, hndl); if (!mem->kvaddr) { dprintk(VIDC_ERR, "Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } } else mem->kvaddr = NULL; rc = get_device_address(client, hndl, align, &iova, &buffer_size, flags, buffer_type); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto fail_device_address; } mem->device_addr = iova; dprintk(VIDC_DBG, "device_address = 0x%lx, kvaddr = 0x%p, size = %d\n", mem->device_addr, mem->kvaddr, size); mem->size = size; return rc; fail_device_address: ion_unmap_kernel(client->clnt, hndl); fail_map: ion_free(client->clnt, hndl); fail_shared_mem_alloc: return rc; }
//static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, bool reserve) static int gralloc_alloc_buffer(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle, int reserve) { #if GRALLOC_ARM_DMA_BUF_MODULE { private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module); ion_user_handle_t ion_hnd; unsigned char *cpu_ptr; int shared_fd; int ret; unsigned int heap_mask; int Ion_type; bool Ishwc = false; int Ion_flag = 0; if(usage == (GRALLOC_USAGE_HW_COMPOSER|GRALLOC_USAGE_HW_RENDER)) Ishwc = true; //ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd); #ifdef USE_X86 if(usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) Ion_flag = (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC); if(is_out_log()) ALOGD("usage=%x,protect=%x,ion_flag=%x,mmu=%d",usage,GRALLOC_USAGE_PROTECTED,Ion_flag,g_MMU_stat); if (usage & GRALLOC_USAGE_PROTECTED) //secrue memery { unsigned long phys; ret = ion_secure_alloc(m->ion_client, size,&phys); //ALOGD("secure_alloc ret=%d,phys=%x",ret,(int)phys); if(ret != 0) { AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size); return -1; } private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, 0, 0); if (NULL != hnd) { hnd->share_fd = 0; hnd->ion_hnd = 0; hnd->type = 0; hnd->phy_addr = (int)phys; *pHandle = hnd; if(is_out_log()) ALOGD("secure_alloc_ok phy=%x",usage,hnd->phy_addr); return 0; } else { AERR("Gralloc out of mem for ion_client:%d", m->ion_client); } close(shared_fd); return -1; } #endif //ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, 0, &ion_hnd); #ifdef USE_X86 if(g_MMU_stat && ((usage&GRALLOC_USAGE_HW_CAMERA_WRITE)==0) && !(usage & GRALLOC_USAGE_PRIVATE_2) && !Ishwc) #else if(g_MMU_stat) #endif { heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID); #ifdef USE_X86 if (usage & GRALLOC_USAGE_PRIVATE_2) { heap_mask |= ION_HEAP(ION_SECURE_HEAP_ID); } #endif ret = ion_alloc(m->ion_client, size, 0, heap_mask, Ion_flag, &ion_hnd); Ion_type = 1; } else { heap_mask = ION_HEAP(ION_CMA_HEAP_ID); #ifdef USE_X86 if (usage & GRALLOC_USAGE_PRIVATE_2) { heap_mask |= ION_HEAP(ION_SECURE_HEAP_ID); } #endif if (usage == (GRALLOC_USAGE_HW_CAMERA_WRITE|GRALLOC_USAGE_SW_READ_OFTEN)) { ret = ion_alloc(m->ion_client, size, 0,heap_mask, (ION_FLAG_CACHED|ION_FLAG_CACHED_NEEDS_SYNC), &ion_hnd); } else { ret = ion_alloc(m->ion_client, size, 0,heap_mask, Ion_flag, &ion_hnd); } #ifdef USE_X86 if(g_MMU_stat && Ishwc) { Ion_type = 1; } else #endif Ion_type = 0; } if (ret != 0) { if( (heap_mask & ION_HEAP(ION_CMA_HEAP_ID)) #ifdef USE_X86 && !Ishwc #endif ) { #ifdef BOARD_WITH_IOMMU heap_mask = ION_HEAP(ION_VMALLOC_HEAP_ID); #else heap_mask = ION_HEAP(ION_CARVEOUT_HEAP_ID); #endif ret = ion_alloc(m->ion_client, size, 0, heap_mask, 0, &ion_hnd ); { if( ret != 0) { AERR("Force to VMALLOC fail ion_client:%d", m->ion_client); return -1; } else { ALOGD("Force to VMALLOC sucess !"); Ion_type = 1; } } } else { AERR("Failed to ion_alloc from ion_client:%d, size: %d", m->ion_client, size); return -1; } } ret = ion_share(m->ion_client, ion_hnd, &shared_fd); if (ret != 0) { AERR("ion_share( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0); #ifdef USE_X86 //memset(cpu_ptr, 0, size); #endif if (MAP_FAILED == cpu_ptr) { AERR("ion_map( %d ) failed", m->ion_client); if (0 != ion_free(m->ion_client, ion_hnd)) { AERR("ion_free( %d ) failed", m->ion_client); } close(shared_fd); return -1; } private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED); if (NULL != hnd) { unsigned long cma_phys = 0; hnd->share_fd = shared_fd; hnd->ion_hnd = ion_hnd; hnd->type = Ion_type; if(!Ion_type) { int pret; pret = ion_get_phys(m->ion_client, ion_hnd, &cma_phys); //ALOGD("ion_get_phy ret=%d,cma_phys=%x",pret,cma_phys); } hnd->phy_addr = (int)cma_phys; *pHandle = hnd; if(is_out_log()) ALOGD("alloc_info fd[%d],type=%d,phy=%x",hnd->share_fd,hnd->type,hnd->phy_addr); return 0; } else { AERR("Gralloc out of mem for ion_client:%d", m->ion_client); } close(shared_fd); ret = munmap(cpu_ptr, size); if (0 != ret) { AERR("munmap failed for base:%p size: %d", cpu_ptr, size); } ret = ion_free(m->ion_client, ion_hnd); if (0 != ret) { AERR("ion_free( %d ) failed", m->ion_client); } return -1; } #endif #if GRALLOC_ARM_UMP_MODULE { ump_handle ump_mem_handle; void *cpu_ptr; ump_secure_id ump_id; int constraints; size = round_up_to_page_size(size); if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN) { constraints = UMP_REF_DRV_CONSTRAINT_USE_CACHE; } else { constraints = UMP_REF_DRV_CONSTRAINT_NONE; } if ( reserve & 0x01) { constraints |= UMP_REF_DRV_CONSTRAINT_PRE_RESERVE; } if( reserve & 0x02) { constraints |= UMP_REF_DRV_UK_CONSTRAINT_MEM_SWITCH; } #ifdef GRALLOC_SIMULATE_FAILURES /* if the failure condition matches, fail this iteration */ if (__ump_alloc_should_fail()) { ump_mem_handle = UMP_INVALID_MEMORY_HANDLE; } else #endif { ump_mem_handle = ump_ref_drv_allocate(size, (ump_alloc_constraints)constraints); if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle) { cpu_ptr = ump_mapped_pointer_get(ump_mem_handle); if (NULL != cpu_ptr) { ump_id = ump_secure_id_get(ump_mem_handle); if (UMP_INVALID_SECURE_ID != ump_id) { private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle); if (NULL != hnd) { #ifdef USE_LCDC_COMPOSER if( reserve & 0x02) { hnd->phy_addr = 0; } else { hnd->phy_addr = ump_phy_addr_get(ump_mem_handle); } #endif *pHandle = hnd; return 0; } else { AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id); } } else { AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle); } ump_mapped_pointer_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle); } ump_reference_release(ump_mem_handle); } else { AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints); } } return -1; } #endif
static int audamrnb_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_amrnb_in; int rc; int encid; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } client = msm_ion_client_create(UINT_MAX, "Audio_AMR_In_Client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID),0); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto buff_alloc_error; } audio->buff_handle = handle; rc = ion_phys(client, handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); goto buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); goto buff_get_flags_error; } audio->map_v_read = ion_map_kernel(client, handle); if (IS_ERR(audio->map_v_read)) { MM_ERR("could not map write buffers\n"); rc = -ENOMEM; goto buff_map_error; } audio->data = audio->map_v_read; MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); MM_DBG("Memory addr = 0x%8x phy addr = 0x%8x\n",\ (int) audio->data, (int) audio->phys); if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = -EACCES; MM_ERR("Non tunnel encoding is not supported\n"); goto buff_map_error; } else if (!(file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { audio->mode = MSM_AUD_ENC_MODE_TUNNEL; MM_DBG("Opened for tunnel mode encoding\n"); } else { rc = -EACCES; goto buff_map_error; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->buffer_size = (FRAME_SIZE - 8); audio->enc_type = ENC_TYPE_AMRNB | audio->mode; audio->dtx_mode = -1; audio->frame_format = 0; audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */ encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_ERR("No free encoder available\n"); rc = -ENODEV; goto aenc_alloc_error; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_amrnb_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto aenc_alloc_error; } audio->stopped = 0; audio->source = 0; audamrnb_in_flush(audio); audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS | AUDDEV_EVT_VOICE_STATE_CHG; audio->voice_state = msm_get_voice_state(); rc = auddev_register_evt_listner(audio->device_events, AUDDEV_CLNT_ENC, audio->enc_id, amrnb_in_listener, (void *) audio); if (rc) { MM_ERR("failed to register device event listener\n"); goto evt_error; } audio->build_id = socinfo_get_build_id(); MM_DBG("Modem build id = %s\n", audio->build_id); file->private_data = audio; audio->opened = 1; mutex_unlock(&audio->lock); return rc; evt_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); ion_unmap_kernel(client, audio->buff_handle); aenc_alloc_error: buff_map_error: buff_get_phys_error: buff_get_flags_error: ion_free(client, audio->buff_handle); buff_alloc_error: ion_client_destroy(client); client_create_error: done: mutex_unlock(&audio->lock); return rc; }
static int audpcm_in_open(struct inode *inode, struct file *file) { struct audio_in *audio = &the_audio_in; int rc; int len = 0; unsigned long ionflag = 0; ion_phys_addr_t addr = 0; struct ion_handle *handle = NULL; struct ion_client *client = NULL; int encid; struct timespec ts; struct rtc_time tm; mutex_lock(&audio->lock); if (audio->opened) { rc = -EBUSY; goto done; } audio->mode = MSM_AUD_ENC_MODE_TUNNEL; audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025; audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025; audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO; audio->buffer_size = MONO_DATA_SIZE; audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode; rc = audmgr_open(&audio->audmgr); if (rc) goto done; encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name, &audio->queue_ids); if (encid < 0) { MM_AUD_ERR("No free encoder available\n"); rc = -ENODEV; goto done; } audio->enc_id = encid; rc = msm_adsp_get(audio->module_name, &audio->audrec, &audrec_adsp_ops, audio); if (rc) { audpreproc_aenc_free(audio->enc_id); goto done; } audio->dsp_cnt = 0; audio->stopped = 0; audpcm_in_flush(audio); client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client"); if (IS_ERR_OR_NULL(client)) { MM_ERR("Unable to create ION client\n"); rc = -ENOMEM; goto client_create_error; } audio->client = client; MM_DBG("allocating mem sz = %d\n", DMASZ); handle = ion_alloc(client, DMASZ, SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID)); if (IS_ERR_OR_NULL(handle)) { MM_ERR("Unable to create allocate O/P buffers\n"); rc = -ENOMEM; goto output_buff_alloc_error; } audio->output_buff_handle = handle; rc = ion_phys(client , handle, &addr, &len); if (rc) { MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); rc = -ENOMEM; goto output_buff_get_phys_error; } else { MM_INFO("O/P buffers:valid phy: %x sz: %x\n", (unsigned int) addr, (unsigned int) len); } audio->phys = (int32_t)addr; rc = ion_handle_get_flags(client, handle, &ionflag); if (rc) { MM_ERR("could not get flags for the handle\n"); rc = -ENOMEM; goto output_buff_get_flags_error; } audio->data = ion_map_kernel(client, handle, ionflag); if (IS_ERR(audio->data)) { MM_ERR("could not map read buffers,freeing instance 0x%08x\n", (int)audio); rc = -ENOMEM; goto output_buff_map_error; } MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n", audio->phys, (int)audio->data); file->private_data = audio; audio->opened = 1; rc = 0; done: mutex_unlock(&audio->lock); getnstimeofday(&ts); rtc_time_to_tm(ts.tv_sec, &tm); pr_aud_info1("[ATS][start_recording][successful] at %lld \ (%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n", ktime_to_ns(ktime_get()), tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); return rc; output_buff_map_error: output_buff_get_phys_error: output_buff_get_flags_error: ion_free(client, audio->output_buff_handle); output_buff_alloc_error: ion_client_destroy(client); client_create_error: msm_adsp_put(audio->audrec); audpreproc_aenc_free(audio->enc_id); mutex_unlock(&audio->lock); return rc; }