uint32_t msm_jpeg_platform_v2p(struct msm_jpeg_device *pgmn_dev, int fd, uint32_t len, struct file **file_p, struct ion_handle **ionhandle, int domain_num) { unsigned long paddr; unsigned long size; int rc; *ionhandle = ion_import_dma_buf(pgmn_dev->jpeg_client, fd); if (IS_ERR_OR_NULL(*ionhandle)) return 0; rc = ion_map_iommu(pgmn_dev->jpeg_client, *ionhandle, domain_num, 0, SZ_4K, 0, &paddr, (unsigned long *)&size, 0, 0); JPEG_DBG("%s:%d] addr 0x%x size %ld", __func__, __LINE__, (uint32_t)paddr, size); if (rc < 0) { JPEG_PR_ERR("%s: ion_map_iommu fd %d error %d\n", __func__, fd, rc); goto error1; } /* validate user input */ if (len > size) { JPEG_PR_ERR("%s: invalid offset + len\n", __func__); goto error1; } return paddr; error1: ion_free(pgmn_dev->jpeg_client, *ionhandle); return 0; }
uint32_t msm_gemini_platform_v2p(int fd, uint32_t len, struct file **file_p, struct ion_handle **ionhandle) { unsigned long paddr; unsigned long size; int rc; *ionhandle = ion_import_dma_buf(gemini_client, fd); if (IS_ERR_OR_NULL(*ionhandle)) return 0; rc = ion_map_iommu(gemini_client, *ionhandle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, &paddr, (unsigned long *)&size, 0, 0); if (rc < 0) { GMN_PR_ERR("%s: get_pmem_file fd %d error %d\n", __func__, fd, rc); goto error1; } /* validate user input */ if (len > size) { GMN_PR_ERR("%s: invalid offset + len\n", __func__); goto error1; } return paddr; error1: ion_free(gemini_client, *ionhandle); return 0; }
static int get_device_address(struct smem_client *smem_client, struct ion_handle *hndl, unsigned long align, dma_addr_t *iova, unsigned long *buffer_size, u32 flags, enum hal_buffer buffer_type) { int rc = 0; int domain, partition; struct ion_client *clnt = NULL; if (!iova || !buffer_size || !hndl || !smem_client) { dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n", smem_client, hndl, iova, buffer_size); return -EINVAL; } clnt = smem_client->clnt; if (!clnt) { dprintk(VIDC_ERR, "Invalid client\n"); return -EINVAL; } rc = msm_smem_get_domain_partition(smem_client, flags, buffer_type, &domain, &partition); if (rc) { dprintk(VIDC_ERR, "Failed to get domain and partition: %d\n", rc); goto mem_domain_get_failed; } if (flags & SMEM_SECURE) { rc = msm_ion_secure_buffer(clnt, hndl, get_tz_usage(smem_client, buffer_type), 0); if (rc) { dprintk(VIDC_ERR, "Failed to secure memory\n"); goto mem_domain_get_failed; } } if (is_iommu_present(smem_client->res)) { dprintk(VIDC_DBG, "Calling ion_map_iommu - domain: %d, partition: %d\n", domain, partition); rc = ion_map_iommu(clnt, hndl, domain, partition, align, 0, iova, buffer_size, 0, 0); } else { dprintk(VIDC_DBG, "Using physical memory address\n"); rc = ion_phys(clnt, hndl, iova, (size_t *)buffer_size); } if (rc) { dprintk(VIDC_ERR, "ion memory map failed - %d\n", rc); goto mem_map_failed; } return 0; mem_map_failed: if (flags & SMEM_SECURE) msm_ion_unsecure_buffer(clnt, hndl); mem_domain_get_failed: return rc; }
static unsigned long msm_vpe_queue_buffer_info(struct vpe_device *vpe_dev, struct msm_vpe_buff_queue_info_t *buff_queue, struct msm_vpe_buffer_info_t *buffer_info) { struct list_head *buff_head; struct msm_vpe_buffer_map_list_t *buff, *save; int rc = 0; if (buffer_info->native_buff) buff_head = &buff_queue->native_buff_head; else buff_head = &buff_queue->vb2_buff_head; list_for_each_entry_safe(buff, save, buff_head, entry) { if (buff->map_info.buff_info.index == buffer_info->index) { pr_err("error buffer index already queued\n"); return -EINVAL; } } buff = kzalloc( sizeof(struct msm_vpe_buffer_map_list_t), GFP_KERNEL); if (!buff) { pr_err("error allocating memory\n"); return -EINVAL; } buff->map_info.buff_info = *buffer_info; buff->map_info.ion_handle = ion_import_dma_buf(vpe_dev->client, buffer_info->fd); if (IS_ERR_OR_NULL(buff->map_info.ion_handle)) { pr_err("ION import failed\n"); goto queue_buff_error1; } rc = ion_map_iommu(vpe_dev->client, buff->map_info.ion_handle, vpe_dev->domain_num, 0, SZ_4K, 0, (unsigned long *)&buff->map_info.phy_addr, &buff->map_info.len, 0, 0); if (rc < 0) { pr_err("ION mmap failed\n"); goto queue_buff_error2; } INIT_LIST_HEAD(&buff->entry); list_add_tail(&buff->entry, buff_head); return buff->map_info.phy_addr; queue_buff_error2: ion_unmap_iommu(vpe_dev->client, buff->map_info.ion_handle, vpe_dev->domain_num, 0); queue_buff_error1: ion_free(vpe_dev->client, buff->map_info.ion_handle); buff->map_info.ion_handle = NULL; kzfree(buff); return 0; }
static int mdp_mmap(struct v4l2_subdev *sd, void *arg) { int rc = 0, align = 0; struct mem_region_map *mmap = arg; struct mem_region *mregion; int domain = -1; struct mdp_instance *inst = NULL; if (!mmap || !mmap->mregion || !mmap->cookie) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = mmap->cookie; mregion = mmap->mregion; align = inst->secure ? SZ_1M : SZ_4K; if (mregion->size % align != 0) { WFD_MSG_ERR("Memregion not aligned to %d\n", align); return -EINVAL; } msm_fb_writeback_iommu_ref(inst->mdp, true); if (inst->secure) { rc = msm_ion_secure_buffer(mmap->ion_client, mregion->ion_handle, VIDEO_PIXEL, 0); if (rc) { WFD_MSG_ERR("Failed to secure input buffer\n"); goto secure_fail; } } domain = msm_fb_get_iommu_domain(inst->mdp, inst->secure ? MDP_IOMMU_DOMAIN_CP : MDP_IOMMU_DOMAIN_NS); rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle, domain, 0, align, 0, (unsigned long *)&mregion->paddr, (unsigned long *)&mregion->size, 0, 0); if (rc) { WFD_MSG_ERR("Failed to map into %ssecure domain: %d\n", !inst->secure ? "non" : "", rc); goto iommu_fail; } msm_fb_writeback_iommu_ref(inst->mdp, false); return 0; iommu_fail: if (inst->secure) msm_ion_unsecure_buffer(mmap->ion_client, mregion->ion_handle); secure_fail: msm_fb_writeback_iommu_ref(inst->mdp, false); return rc; }
static int iep_bufid_to_iova(iep_service_info *pservice, u8 *tbl, int size, struct iep_reg *reg) { int i; int usr_fd = 0; int offset = 0; if (tbl == NULL || size <= 0) { dev_err(pservice->iommu_dev, "input arguments invalidate\n"); return -1; } for (i=0; i<size; i++) { usr_fd = reg->reg[tbl[i]] & 0x3FF; offset = reg->reg[tbl[i]] >> 10; if (usr_fd != 0) { struct ion_handle *hdl; int ret; struct iep_mem_region *mem_region; hdl = ion_import_dma_buf(pservice->ion_client, usr_fd); if (IS_ERR(hdl)) { dev_err(pservice->iommu_dev, "import dma-buf from fd %d failed, reg[%d]\n", usr_fd, tbl[i]); return PTR_ERR(hdl); } mem_region = kzalloc(sizeof(struct iep_mem_region), GFP_KERNEL); if (mem_region == NULL) { dev_err(pservice->iommu_dev, "allocate memory for iommu memory region failed\n"); ion_free(pservice->ion_client, hdl); return -1; } mem_region->hdl = hdl; ret = ion_map_iommu(pservice->iommu_dev, pservice->ion_client, mem_region->hdl, &mem_region->iova, &mem_region->len); if (ret < 0) { dev_err(pservice->iommu_dev, "ion map iommu failed\n"); kfree(mem_region); ion_free(pservice->ion_client, hdl); return ret; } reg->reg[tbl[i]] = mem_region->iova + offset; INIT_LIST_HEAD(&mem_region->reg_lnk); list_add_tail(&mem_region->reg_lnk, ®->mem_region_list); } } return 0; }
/** * videobuf_pmem_contig_user_get() - setup user space memory pointer * @mem: per-buffer private videobuf-contig-pmem data * @vb: video buffer to map * * This function validates and sets up a pointer to user space memory. * Only physically contiguous pfn-mapped memory is accepted. * * Returns 0 if successful. */ int videobuf2_pmem_contig_user_get(struct videobuf2_contig_pmem *mem, struct videobuf2_msm_offset *offset, enum videobuf2_buffer_type buffer_type, uint32_t addr_offset, int path, struct ion_client *client) { unsigned long len; int rc = 0; #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION unsigned long kvstart; #endif unsigned long paddr = 0; if (mem->phyaddr != 0) return 0; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION mem->ion_handle = ion_import_fd(client, (int)mem->vaddr); if (IS_ERR_OR_NULL(mem->ion_handle)) { pr_err("%s ION import failed\n", __func__); return PTR_ERR(mem->ion_handle); } rc = ion_map_iommu(client, mem->ion_handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, (unsigned long *)&mem->phyaddr, &len, UNCACHED, 0); if (rc < 0) ion_free(client, mem->ion_handle); rc = ion_handle_get_flags(client, mem->ion_handle, &mem->ion_flags); mem->kernel_vaddr = ion_map_kernel(client, mem->ion_handle, mem->ion_flags); #elif CONFIG_ANDROID_PMEM rc = get_pmem_file((int)mem->vaddr, (unsigned long *)&mem->phyaddr, &kvstart, &len, &mem->file); if (rc < 0) { pr_err("%s: get_pmem_file fd %d error %d\n", __func__, (int)mem->vaddr, rc); return rc; } #else paddr = 0; kvstart = 0; #endif if (offset) mem->offset = *offset; else memset(&mem->offset, 0, sizeof(struct videobuf2_msm_offset)); mem->path = path; mem->buffer_type = buffer_type; paddr = mem->phyaddr; mem->mapped_phyaddr = paddr + addr_offset; mem->addr_offset = addr_offset; return rc; }
int mdp_mmap(struct v4l2_subdev *sd, void *arg) { int rc = 0, domain = -1; struct mem_region_map *mmap = arg; struct mem_region *mregion; bool use_iommu = true; struct mdp_instance *inst = NULL; if (!mmap || !mmap->mregion || !mmap->cookie) { WFD_MSG_ERR("Invalid argument\n"); return -EINVAL; } inst = mmap->cookie; mregion = mmap->mregion; if (mregion->size % SZ_4K != 0) { WFD_MSG_ERR("Memregion not aligned to %d\n", SZ_4K); return -EINVAL; } if (inst->uses_iommu_split_domain) { if (inst->secure) use_iommu = false; else domain = DISPLAY_WRITE_DOMAIN; } else { domain = DISPLAY_READ_DOMAIN; } if (use_iommu) { rc = ion_map_iommu(mmap->ion_client, mregion->ion_handle, domain, GEN_POOL, SZ_4K, 0, (unsigned long *)&mregion->paddr, (unsigned long *)&mregion->size, 0, 0); } else { rc = ion_phys(mmap->ion_client, mregion->ion_handle, (unsigned long *)&mregion->paddr, (size_t *)&mregion->size); } return rc; }
uint32_t msm_mercury_platform_v2p(int fd, uint32_t len, struct file **file_p, struct ion_handle **ionhandle) { unsigned long paddr; unsigned long size; int rc; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION *ionhandle = ion_import_dma_buf(mercury_client, fd); if (IS_ERR_OR_NULL(*ionhandle)) return 0; rc = ion_map_iommu(mercury_client, *ionhandle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, &paddr, (unsigned long *)&size, 0, 0); #elif CONFIG_ANDROID_PMEM unsigned long kvstart; rc = get_pmem_file(fd, &paddr, &kvstart, &size, file_p); #else rc = 0; paddr = 0; size = 0; #endif if (rc < 0) { MCR_PR_ERR("%s: get_pmem_file fd %d error %d\n", __func__, fd, rc); goto error1; } /* validate user input */ if (len > size) { MCR_PR_ERR("%s: invalid offset + len\n", __func__); goto error1; } return paddr; error1: #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_free(mercury_client, *ionhandle); #endif return 0; }
static unsigned long msm_mem_allocate(struct videobuf2_contig_pmem *mem) { unsigned long phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION int rc, len; mem->client = msm_ion_client_create(-1, "camera"); if (IS_ERR((void *)mem->client)) { pr_err("%s Could not create client\n", __func__); goto client_failed; } mem->ion_handle = ion_alloc(mem->client, mem->size, SZ_4K, (0x1 << ION_CP_MM_HEAP_ID | 0x1 << ION_IOMMU_HEAP_ID)); if (IS_ERR((void *)mem->ion_handle)) { pr_err("%s Could not allocate\n", __func__); goto alloc_failed; } rc = ion_map_iommu(mem->client, mem->ion_handle, -1, 0, SZ_4K, 0, (unsigned long *)&phyaddr, (unsigned long *)&len, UNCACHED, 0); if (rc < 0) { pr_err("%s Could not get physical address\n", __func__); goto phys_failed; } #else phyaddr = allocate_contiguous_ebi_nomap(mem->size, SZ_4K); #endif return phyaddr; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION phys_failed: ion_free(mem->client, mem->ion_handle); alloc_failed: ion_client_destroy(mem->client); client_failed: return 0; #endif }
static int get_device_address(struct ion_client *clnt, struct ion_handle *hndl, int domain_num, int partition_num, unsigned long align, unsigned long *iova, unsigned long *buffer_size, unsigned long flags) { int rc; if (!iova || !buffer_size || !hndl || !clnt) { dprintk(VIDC_ERR, "Invalid params: %p, %p, %p, %p\n", clnt, hndl, iova, buffer_size); return -EINVAL; } if (align < 4096) align = 4096; dprintk(VIDC_DBG, "domain: %d, partition: %d\n", domain_num, partition_num); rc = ion_map_iommu(clnt, hndl, domain_num, partition_num, align, 0, iova, buffer_size, UNCACHED, 0); if (rc) dprintk(VIDC_ERR, "ion_map_iommu failed(%d).domain: %d,partition: %d\n", rc, domain_num, partition_num); return rc; }
static int get_device_address(struct ion_client *clnt, struct ion_handle *hndl, int domain_num, int partition_num, unsigned long align, unsigned long *iova, unsigned long *buffer_size) { int rc; if (!iova || !buffer_size || !hndl || !clnt) { pr_err("Invalid params: %p, %p, %p, %p\n", clnt, hndl, iova, buffer_size); return -EINVAL; } if (align < 4096) align = 4096; pr_debug("\n In %s domain: %d, Partition: %d\n", __func__, domain_num, partition_num); rc = ion_map_iommu(clnt, hndl, domain_num, partition_num, align, 0, iova, buffer_size, 0, 0); if (rc) pr_err("ion_map_iommu failed(%d).domain: %d,partition: %d\n", rc, domain_num, partition_num); return rc; }
int mdss_mdp_get_img(struct msmfb_data *img, struct mdss_mdp_img_data *data) { struct file *file; int ret = -EINVAL; int fb_num; unsigned long *start, *len; struct ion_client *iclient = mdss_get_ionclient(); start = (unsigned long *) &data->addr; len = (unsigned long *) &data->len; data->flags |= img->flags; data->p_need = 0; if (img->flags & MDP_BLIT_SRC_GEM) { data->srcp_file = NULL; ret = kgsl_gem_obj_addr(img->memory_id, (int) img->priv, start, len); } else if (img->flags & MDP_MEMORY_ID_TYPE_FB) { file = fget_light(img->memory_id, &data->p_need); if (file == NULL) { pr_err("invalid framebuffer file (%d)\n", img->memory_id); return -EINVAL; } data->srcp_file = file; if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) { fb_num = MINOR(file->f_dentry->d_inode->i_rdev); ret = mdss_fb_get_phys_info(start, len, fb_num); if (ret) pr_err("mdss_fb_get_phys_info() failed\n"); } else { pr_err("invalid FB_MAJOR\n"); ret = -1; } } else if (iclient) { data->srcp_ihdl = ion_import_dma_buf(iclient, img->memory_id); if (IS_ERR_OR_NULL(data->srcp_ihdl)) { pr_err("error on ion_import_fd\n"); ret = PTR_ERR(data->srcp_ihdl); data->srcp_ihdl = NULL; return ret; } if (is_mdss_iommu_attached()) { int domain; if (data->flags & MDP_SECURE_OVERLAY_SESSION) { domain = MDSS_IOMMU_DOMAIN_SECURE; mdss_mdp_secure_vote(1); ret = msm_ion_secure_buffer(iclient, data->srcp_ihdl, 0x2, 0); if (IS_ERR_VALUE(ret)) { ion_free(iclient, data->srcp_ihdl); pr_err("failed to secure handle (%d)\n", ret); return ret; } } else { domain = MDSS_IOMMU_DOMAIN_UNSECURE; } ret = ion_map_iommu(iclient, data->srcp_ihdl, mdss_get_iommu_domain(domain), 0, SZ_4K, 0, start, len, 0, 0); } else { ret = ion_phys(iclient, data->srcp_ihdl, start, (size_t *) len); } if (IS_ERR_VALUE(ret)) { ion_free(iclient, data->srcp_ihdl); pr_err("failed to map ion handle (%d)\n", ret); return ret; } } if (!*start) { pr_err("start address is zero!\n"); return -ENOMEM; } if (!ret && (img->offset < data->len)) { data->addr += img->offset; data->len -= img->offset; pr_debug("mem=%d ihdl=%p buf=0x%x len=0x%x\n", img->memory_id, data->srcp_ihdl, data->addr, data->len); } else { return -EINVAL; } return ret; }
static int msm_pmem_table_add(struct hlist_head *ptype, struct msm_pmem_info *info, struct ion_client *client) { unsigned long paddr; #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION unsigned long kvstart; struct file *file; #endif int rc = -ENOMEM; unsigned long len; struct msm_pmem_region *region; region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL); if (!region) goto out; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION region->handle = ion_import_fd(client, info->fd); if (IS_ERR_OR_NULL(region->handle)) goto out1; if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL, SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0) goto out2; #elif CONFIG_ANDROID_PMEM rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file); if (rc < 0) { pr_err("%s: get_pmem_file fd %d error %d\n", __func__, info->fd, rc); goto out1; } region->file = file; #else paddr = 0; file = NULL; kvstart = 0; #endif if (!info->len) info->len = len; rc = check_pmem_info(info, len); if (rc < 0) goto out3; paddr += info->offset; len = info->len; if (check_overlap(ptype, paddr, len) < 0) { rc = -EINVAL; goto out3; } CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n", __func__, info->type, info->active, paddr, (unsigned long)info->vaddr); INIT_HLIST_NODE(®ion->list); region->paddr = paddr; region->len = len; memcpy(®ion->info, info, sizeof(region->info)); D("%s Adding region to list with type %d\n", __func__, region->info.type); D("%s pmem_stats address is 0x%p\n", __func__, ptype); hlist_add_head(&(region->list), ptype); return 0; out3: #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL); #endif #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION out2: ion_free(client, region->handle); #elif CONFIG_ANDROID_PMEM put_pmem_file(region->file); #endif out1: kfree(region); out: return rc; }
static void *res_trk_pmem_map (struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 offset = 0, flags = 0; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; int ret = 0; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; ddl_context = ddl_get_context(); if (res_trk_get_enable_ion() && addr->alloc_handle) { kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle, UNCACHED); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s():DDL ION client map failed\n", __func__); goto ion_bail_out; } addr->virtual_base_addr = (u8 *) kernel_vaddr; ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_FIRMWARE_POOL, SZ_4K, 0, &iova, &buffer_size, UNCACHED, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto ion_unmap_bail_out; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *)iova; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = buffer_size; } else { if (!res_trk_check_for_sec_session()) { if (!addr->alloced_phys_addr) { pr_err(" %s() alloced addres NULL", __func__); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer( (unsigned long)addr->alloced_phys_addr, sz, flags, &restrk_mmu_subsystem[index], sizeof(restrk_mmu_subsystem[index])/ sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto bail_out; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto bail_out; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; } else { addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->virtual_base_addr = (u8 *)addr->alloced_phys_addr; } addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; bail_out: if (IS_ERR(addr->mapped_buffer)) msm_subsystem_unmap_buffer(addr->mapped_buffer); return NULL; ion_unmap_bail_out: if (!IS_ERR_OR_NULL(addr->alloc_handle)) { ion_unmap_kernel(resource_context. res_ion_client, addr->alloc_handle); } ion_bail_out: return NULL; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; unsigned long ionflag = 0; unsigned long flags = 0; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } if (res_trk_check_for_sec_session() || addr->mem_type == DDL_FW_MEM) ionflag = UNCACHED; else ionflag = CACHED; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle, ionflag); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, UNCACHED, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; return NULL; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; }
static int msm_pmem_table_add(struct hlist_head *ptype, struct msm_pmem_info *info, struct ion_client *client, int domain_num) { unsigned long paddr; #ifndef CONFIG_MSM_MULTIMEDIA_USE_ION unsigned long kvstart; struct file *file; #endif int rc = -ENOMEM; unsigned long len; struct msm_pmem_region *region; region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL); if (!region) goto out; #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION region->handle = ion_import_dma_buf(client, info->fd); if (IS_ERR_OR_NULL(region->handle)) goto out1; if (ion_map_iommu(client, region->handle, domain_num, 0, SZ_4K, 0, &paddr, &len, 0, 0) < 0) goto out2; // pr_err("%s: IOMMU mapped address is 0x%x\n", __func__, (unsigned int)paddr); // #else paddr = 0; file = NULL; kvstart = 0; #endif if (!info->len) info->len = len; paddr += info->offset; len = info->len; if (check_overlap(ptype, paddr, len) < 0) { rc = -EINVAL; goto out3; } CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n", __func__, info->type, info->active, paddr, (unsigned long)info->vaddr); INIT_HLIST_NODE(®ion->list); region->paddr = paddr; region->len = len; memcpy(®ion->info, info, sizeof(region->info)); D("%s Adding region to list with type %d\n", __func__, region->info.type); D("%s pmem_stats address is 0x%p\n", __func__, ptype); hlist_add_head(&(region->list), ptype); return 0; out3: #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION ion_unmap_iommu(client, region->handle, domain_num, 0); #endif #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION out2: ion_free(client, region->handle); #endif out1: kfree(region); out: return rc; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; struct ddl_context *ddl_context; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type(), res_trk_get_ion_flags()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, 0, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { pr_err("ION must be enabled."); goto bail_out; } return addr->virtual_base_addr; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; }