void ddl_fw_release(struct ddl_buf_addr *dram_base) { void *cookie = dram_base->pil_cookie; if (res_trk_is_cp_enabled() && res_trk_check_for_sec_session()) { res_trk_close_secure_session(); if (IS_ERR_OR_NULL(cookie)) { pr_err("Invalid params"); return; } if (res_trk_enable_footswitch()) { pr_err("Failed to enable footswitch"); return; } if (res_trk_enable_iommu_clocks()) { res_trk_disable_footswitch(); pr_err("Failed to enable iommu clocks\n"); return; } pil_put(cookie); if (res_trk_disable_iommu_clocks()) pr_err("Failed to disable iommu clocks\n"); if (res_trk_disable_footswitch()) pr_err("Failed to disable footswitch\n"); } else { if (res_trk_check_for_sec_session()) res_trk_close_secure_session(); res_trk_release_fw_addr(); } }
int res_trk_get_mem_type(void) { int mem_type = -1; switch (resource_context.res_mem_type) { case DDL_FW_MEM: mem_type = ION_HEAP(resource_context.fw_mem_type); return mem_type; case DDL_MM_MEM: mem_type = resource_context.memtype; break; case DDL_CMD_MEM: if (res_trk_check_for_sec_session()) mem_type = resource_context.cmd_mem_type; else mem_type = resource_context.memtype; break; default: return mem_type; } if (resource_context.vidc_platform_data->enable_ion) { if (res_trk_check_for_sec_session()) { mem_type = ION_HEAP(mem_type); if (resource_context.res_mem_type != DDL_FW_MEM) mem_type |= ION_SECURE; else if (res_trk_is_cp_enabled()) mem_type |= ION_SECURE; } else mem_type = (ION_HEAP(mem_type) | ION_HEAP(ION_IOMMU_HEAP_ID)); } return mem_type; }
void ddl_pmem_free(struct ddl_buf_addr *addr) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (!addr) { pr_err("%s() invalid args\n", __func__); return; } if (ddl_context->video_ion_client) { if (!IS_ERR_OR_NULL(addr->alloc_handle)) { ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(ddl_context->video_ion_client, addr->alloc_handle); } } else { if (addr->mapped_buffer) msm_subsystem_unmap_buffer(addr->mapped_buffer); if (addr->alloced_phys_addr) free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); } memset(addr, 0, sizeof(struct ddl_buf_addr)); }
int res_trk_close_secure_session() { int rc; if (res_trk_check_for_sec_session() == 1 && resource_context.sec_clk_heap) { pr_err("Unsecuring....\n"); mutex_lock(&resource_context.secure_lock); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while close\n"); goto error_close; } msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); if (resource_context.vidc_platform_data->secure_wb_heap) msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID)); res_trk_disable_iommu_clocks(); resource_context.sec_clk_heap = 0; mutex_unlock(&resource_context.secure_lock); } return 0; error_close: mutex_unlock(&resource_context.secure_lock); return rc; }
static int res_trk_pmem_alloc (struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size; struct ddl_context *ddl_context; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); rc = -EINVAL; goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!res_trk_is_cp_enabled() || !res_trk_check_for_sec_session()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR( "%s() :DDL ION Client Invalid handle\n", __func__); rc = -ENOMEM; goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); rc = -ENOMEM; goto bail_out; } } else { addr->alloc_handle = NULL; addr->alloced_phys_addr = PIL_FW_BASE_ADDR; addr->buffer_size = sz; } } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); rc = -ENOMEM; goto bail_out; } addr->buffer_size = sz; return rc; } bail_out: return rc; }
u32 ddl_fw_init(struct ddl_buf_addr *dram_base) { u8 *dest_addr; dest_addr = DDL_GET_ALIGNED_VITUAL(*dram_base); DDL_MSG_LOW("FW Addr / FW Size : %x/%d", (u32)vidc_video_codec_fw, vidc_video_codec_fw_size); if (res_trk_check_for_sec_session() && res_trk_is_cp_enabled()) { if (res_trk_enable_footswitch()) { pr_err("Failed to enable footswitch"); return false; } if (res_trk_enable_iommu_clocks()) { res_trk_disable_footswitch(); pr_err("Failed to enable iommu clocks\n"); return false; } dram_base->pil_cookie = pil_get("vidc"); if (res_trk_disable_iommu_clocks()) pr_err("Failed to disable iommu clocks\n"); if (IS_ERR_OR_NULL(dram_base->pil_cookie)) { res_trk_disable_footswitch(); pr_err("pil_get failed\n"); return false; } } else { if (vidc_video_codec_fw_size > dram_base->buffer_size || !vidc_video_codec_fw) return false; memcpy(dest_addr, vidc_video_codec_fw, vidc_video_codec_fw_size); } return true; }
void res_trk_init(struct device *device, u32 irq) { if (resource_context.device || resource_context.irq_num || !device) { VCDRES_MSG_ERROR("%s() Resource Tracker Init error\n", __func__); } else { memset(&resource_context, 0, sizeof(resource_context)); mutex_init(&resource_context.lock); mutex_init(&resource_context.secure_lock); resource_context.device = device; resource_context.irq_num = irq; resource_context.vidc_platform_data = (struct msm_vidc_platform_data *) device->platform_data; if (resource_context.vidc_platform_data) { resource_context.memtype = resource_context.vidc_platform_data->memtype; resource_context.fw_mem_type = resource_context.vidc_platform_data->memtype; resource_context.cmd_mem_type = resource_context.vidc_platform_data->memtype; if (resource_context.vidc_platform_data->enable_ion) { resource_context.res_ion_client = res_trk_create_ion_client(); if (!(resource_context.res_ion_client)) { VCDRES_MSG_ERROR("%s()ION createfail\n", __func__); return; } resource_context.fw_mem_type = ION_MM_FIRMWARE_HEAP_ID; resource_context.cmd_mem_type = ION_CP_MFC_HEAP_ID; } resource_context.disable_dmx = resource_context.vidc_platform_data->disable_dmx; resource_context.disable_fullhd = resource_context.vidc_platform_data->disable_fullhd; #ifdef CONFIG_MSM_BUS_SCALING resource_context.vidc_bus_client_pdata = resource_context.vidc_platform_data-> vidc_bus_client_pdata; #endif } else { resource_context.memtype = -1; resource_context.disable_dmx = 0; } resource_context.core_type = VCD_CORE_1080P; resource_context.firmware_addr.mem_type = DDL_FW_MEM; if (!res_trk_pmem_alloc(&resource_context.firmware_addr, VIDC_FW_SIZE, DDL_KILO_BYTE(128))) { pr_err("%s() Firmware buffer allocation failed", __func__); if (!res_trk_check_for_sec_session()) memset(&resource_context.firmware_addr, 0, sizeof(resource_context.firmware_addr)); } } }
int res_trk_open_secure_session() { int rc, memtype; if (!res_trk_check_for_sec_session()) { pr_err("Secure sessions are not active\n"); return -EINVAL; } mutex_lock(&resource_context.secure_lock); if (!resource_context.sec_clk_heap) { pr_err("Securing...\n"); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while open"); goto error_open; } memtype = ION_HEAP(resource_context.memtype); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("ION heap secure failed heap id %d rc %d\n", resource_context.memtype, rc); goto disable_iommu_clks; } memtype = ION_HEAP(resource_context.cmd_mem_type); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("ION heap secure failed heap id %d rc %d\n", resource_context.cmd_mem_type, rc); goto unsecure_memtype_heap; } if (resource_context.vidc_platform_data->secure_wb_heap) { memtype = ION_HEAP(ION_CP_WB_HEAP_ID); rc = msm_ion_secure_heap(memtype); if (rc) { pr_err("WB_HEAP_ID secure failed rc %d\n", rc); goto unsecure_cmd_heap; } } resource_context.sec_clk_heap = 1; res_trk_disable_iommu_clocks(); } mutex_unlock(&resource_context.secure_lock); return 0; unsecure_cmd_heap: msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type)); unsecure_memtype_heap: msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype)); disable_iommu_clks: res_trk_disable_iommu_clocks(); error_open: resource_context.sec_clk_heap = 0; mutex_unlock(&resource_context.secure_lock); return rc; }
unsigned int res_trk_get_ion_flags(void) { unsigned int flags = 0; if (resource_context.res_mem_type == DDL_FW_MEM) return flags; if (resource_context.vidc_platform_data->enable_ion) { if (res_trk_check_for_sec_session()) { if (resource_context.res_mem_type != DDL_FW_MEM) flags |= ION_SECURE; else if (res_trk_is_cp_enabled()) flags |= ION_SECURE; } } return flags; }
u32 vcd_open(s32 driver_handle, u32 decoding, void (*callback) (u32 event, u32 status, void *info, size_t sz, void *handle, void *const client_data), void *client_data) { u32 rc = VCD_S_SUCCESS; struct vcd_drv_ctxt *drv_ctxt; VCD_MSG_MED("vcd_open:"); if (!callback) { VCD_MSG_ERROR("Bad parameters"); return VCD_ERR_ILLEGAL_PARM; } #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION if (res_trk_check_for_sec_session() && vcd_get_num_of_clients()) { VCD_MSG_ERROR("Secure session in progress"); return VCD_ERR_BAD_STATE; } #endif drv_ctxt = vcd_get_drv_context(); mutex_lock(&drv_ctxt->dev_mutex); if (drv_ctxt->dev_state.state_table->ev_hdlr.open) { rc = drv_ctxt->dev_state.state_table->ev_hdlr. open(drv_ctxt, driver_handle, decoding, callback, client_data); } else { VCD_MSG_ERROR("Unsupported API in device state %d", drv_ctxt->dev_state.state); rc = VCD_ERR_BAD_STATE; } mutex_unlock(&drv_ctxt->dev_mutex); return rc; }
int res_trk_open_secure_session() { int rc; if (res_trk_check_for_sec_session() == 1) { mutex_lock(&resource_context.secure_lock); pr_err("Securing...\n"); rc = res_trk_enable_iommu_clocks(); if (rc) { pr_err("IOMMU clock enabled failed while open"); goto error_open; } msm_ion_secure_heap(ION_HEAP(resource_context.memtype)); msm_ion_secure_heap(ION_HEAP(resource_context.cmd_mem_type)); res_trk_disable_iommu_clocks(); mutex_unlock(&resource_context.secure_lock); } return 0; error_open: mutex_unlock(&resource_context.secure_lock); return rc; }
u32 res_trk_get_firmware_addr(struct ddl_buf_addr *firm_addr) { int rc = 0; size_t size = 0; if (!firm_addr || resource_context.firmware_addr.mapped_buffer) { pr_err("%s() invalid params", __func__); return -EINVAL; } if (res_trk_is_cp_enabled() && res_trk_check_for_sec_session()) size = PIL_FW_SIZE; else size = VIDC_FW_SIZE; if (res_trk_pmem_alloc(&resource_context.firmware_addr, size, DDL_KILO_BYTE(128))) { pr_err("%s() Firmware buffer allocation failed", __func__); memset(&resource_context.firmware_addr, 0, sizeof(resource_context.firmware_addr)); rc = -ENOMEM; goto fail_alloc; } if (!res_trk_pmem_map(&resource_context.firmware_addr, resource_context.firmware_addr.buffer_size, DDL_KILO_BYTE(128))) { pr_err("%s() Firmware buffer mapping failed", __func__); rc = -ENOMEM; goto fail_map; } memcpy(firm_addr, &resource_context.firmware_addr, sizeof(struct ddl_buf_addr)); return 0; fail_map: res_trk_pmem_free(&resource_context.firmware_addr); fail_alloc: return rc; }
static void res_trk_pmem_unmap(struct ddl_buf_addr *addr) { if (!addr) { pr_err("%s() invalid args\n", __func__); return; } if (!IS_ERR_OR_NULL(addr->alloc_handle)) { if (addr->physical_base_addr) { ion_unmap_kernel(resource_context.res_ion_client, addr->alloc_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu(resource_context.res_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_FIRMWARE_POOL); } addr->virtual_base_addr = NULL; addr->physical_base_addr = NULL; } } else if (addr->mapped_buffer) msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; }
void ddl_pmem_free(struct ddl_buf_addr *addr) { struct ddl_context *ddl_context; ddl_context = ddl_get_context(); if (!addr) { pr_err("%s() invalid args\n", __func__); return; } if (ddl_context->video_ion_client) { if (!IS_ERR_OR_NULL(addr->alloc_handle)) { ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); if (!res_trk_check_for_sec_session()) { ion_unmap_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL); } ion_free(ddl_context->video_ion_client, addr->alloc_handle); } } memset(addr, 0, sizeof(struct ddl_buf_addr)); }
u32 ddl_open(u32 **ddl_handle, u32 decoding) { struct ddl_context *ddl_context; struct ddl_client_context *ddl; void *ptr; u32 status; DDL_MSG_HIGH("ddl_open"); if (!ddl_handle) { DDL_MSG_ERROR("ddl_open:Bad_handle"); return VCD_ERR_BAD_HANDLE; } ddl_context = ddl_get_context(); if (!DDL_IS_INITIALIZED(ddl_context)) { DDL_MSG_ERROR("ddl_open:Not_inited"); return VCD_ERR_ILLEGAL_OP; } status = ddl_client_transact(DDL_GET_CLIENT, &ddl); if (status) { DDL_MSG_ERROR("ddl_open:Client_trasac_failed"); return status; } if (res_trk_check_for_sec_session()) ddl->shared_mem[0].mem_type = DDL_CMD_MEM; else ddl->shared_mem[0].mem_type = DDL_FW_MEM; ptr = ddl_pmem_alloc(&ddl->shared_mem[0], DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0); if (!ptr) status = VCD_ERR_ALLOC_FAIL; if (!status && ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) { if (res_trk_check_for_sec_session()) ddl->shared_mem[1].mem_type = DDL_CMD_MEM; else ddl->shared_mem[1].mem_type = DDL_FW_MEM; ptr = ddl_pmem_alloc(&ddl->shared_mem[1], DDL_FW_AUX_HOST_CMD_SPACE_SIZE, 0); if (!ptr) { ddl_pmem_free(&ddl->shared_mem[0]); status = VCD_ERR_ALLOC_FAIL; } } if (!status) { memset(ddl->shared_mem[0].align_virtual_addr, 0, DDL_FW_AUX_HOST_CMD_SPACE_SIZE); if (ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) { memset(ddl->shared_mem[1].align_virtual_addr, 0, DDL_FW_AUX_HOST_CMD_SPACE_SIZE); } DDL_MSG_LOW("ddl_state_transition: %s ~~> DDL_CLIENT_OPEN", ddl_get_state_string(ddl->client_state)); ddl->client_state = DDL_CLIENT_OPEN; ddl->codec_data.hdr.decoding = decoding; ddl->decoding = decoding; if (!res_trk_check_for_sec_session() || res_trk_get_enable_sec_metadata()) ddl_set_default_meta_data_hdr(ddl); ddl_set_initial_default_values(ddl); *ddl_handle = (u32 *) ddl; } else { ddl_pmem_free(&ddl->shared_mem[0]); if (ddl_context->frame_channel_depth == VCD_DUAL_FRAME_COMMAND_CHANNEL) ddl_pmem_free(&ddl->shared_mem[1]); ddl_client_transact(DDL_FREE_CLIENT, &ddl); } return status; }
static void *res_trk_pmem_map (struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 offset = 0, flags = 0; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; int ret = 0; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; ddl_context = ddl_get_context(); if (res_trk_get_enable_ion() && addr->alloc_handle) { kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle, UNCACHED); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s():DDL ION client map failed\n", __func__); goto ion_bail_out; } addr->virtual_base_addr = (u8 *) kernel_vaddr; ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_FIRMWARE_POOL, SZ_4K, 0, &iova, &buffer_size, UNCACHED, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION client iommu map failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto ion_unmap_bail_out; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *)iova; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = buffer_size; } else { if (!res_trk_check_for_sec_session()) { if (!addr->alloced_phys_addr) { pr_err(" %s() alloced addres NULL", __func__); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer( (unsigned long)addr->alloced_phys_addr, sz, flags, &restrk_mmu_subsystem[index], sizeof(restrk_mmu_subsystem[index])/ sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto bail_out; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto bail_out; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; } else { addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->virtual_base_addr = (u8 *)addr->alloced_phys_addr; } addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; bail_out: if (IS_ERR(addr->mapped_buffer)) msm_subsystem_unmap_buffer(addr->mapped_buffer); return NULL; ion_unmap_bail_out: if (!IS_ERR_OR_NULL(addr->alloc_handle)) { ion_unmap_kernel(resource_context. res_ion_client, addr->alloc_handle); } ion_bail_out: return NULL; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; struct ddl_context *ddl_context; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type(), res_trk_get_ion_flags()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, 0, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { pr_err("ION must be enabled."); goto bail_out; } return addr->virtual_base_addr; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; }
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment) { u32 alloc_size, offset = 0 ; u32 index = 0; struct ddl_context *ddl_context; struct msm_mapped_buffer *mapped_buffer = NULL; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long *kernel_vaddr = NULL; unsigned long ionflag = 0; unsigned long flags = 0; int ret = 0; ion_phys_addr_t phyaddr = 0; size_t len = 0; int rc = 0; DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz); if (!addr) { DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__); goto bail_out; } ddl_context = ddl_get_context(); res_trk_set_mem_type(addr->mem_type); alloc_size = (sz + alignment); if (res_trk_get_enable_ion()) { if (!ddl_context->video_ion_client) ddl_context->video_ion_client = res_trk_get_ion_client(); if (!ddl_context->video_ion_client) { DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n", __func__); goto bail_out; } alloc_size = (alloc_size+4095) & ~4095; addr->alloc_handle = ion_alloc( ddl_context->video_ion_client, alloc_size, SZ_4K, res_trk_get_mem_type()); if (IS_ERR_OR_NULL(addr->alloc_handle)) { DDL_MSG_ERROR("%s() :DDL ION alloc failed\n", __func__); goto bail_out; } if (res_trk_check_for_sec_session() || addr->mem_type == DDL_FW_MEM) ionflag = UNCACHED; else ionflag = CACHED; kernel_vaddr = (unsigned long *) ion_map_kernel( ddl_context->video_ion_client, addr->alloc_handle, ionflag); if (IS_ERR_OR_NULL(kernel_vaddr)) { DDL_MSG_ERROR("%s() :DDL ION map failed\n", __func__); goto free_ion_alloc; } addr->virtual_base_addr = (u8 *) kernel_vaddr; if (res_trk_check_for_sec_session()) { rc = ion_phys(ddl_context->video_ion_client, addr->alloc_handle, &phyaddr, &len); if (rc || !phyaddr) { DDL_MSG_ERROR( "%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->alloced_phys_addr = phyaddr; } else { ret = ion_map_iommu(ddl_context->video_ion_client, addr->alloc_handle, VIDEO_DOMAIN, VIDEO_MAIN_POOL, SZ_4K, 0, &iova, &buffer_size, UNCACHED, 0); if (ret || !iova) { DDL_MSG_ERROR( "%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n", __func__, ret, iova); goto unmap_ion_alloc; } addr->alloced_phys_addr = (phys_addr_t) iova; } if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s():DDL ION client physical failed\n", __func__); goto unmap_ion_alloc; } addr->mapped_buffer = NULL; addr->physical_base_addr = (u8 *) addr->alloced_phys_addr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = alloc_size; } else { addr->alloced_phys_addr = (phys_addr_t) allocate_contiguous_memory_nomap(alloc_size, res_trk_get_mem_type(), SZ_4K); if (!addr->alloced_phys_addr) { DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n", __func__, alloc_size); goto bail_out; } flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR; if (alignment == DDL_KILO_BYTE(128)) index = 1; else if (alignment > SZ_4K) flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K; addr->mapped_buffer = msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr, alloc_size, flags, &vidc_mmu_subsystem[index], sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int)); if (IS_ERR(addr->mapped_buffer)) { pr_err(" %s() buffer map failed", __func__); goto free_acm_alloc; } mapped_buffer = addr->mapped_buffer; if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) { pr_err("%s() map buffers failed\n", __func__); goto free_map_buffers; } addr->physical_base_addr = (u8 *)mapped_buffer->iova[0]; addr->virtual_base_addr = mapped_buffer->vaddr; addr->align_physical_addr = (u8 *) DDL_ALIGN((u32) addr->physical_base_addr, alignment); offset = (u32)(addr->align_physical_addr - addr->physical_base_addr); addr->align_virtual_addr = addr->virtual_base_addr + offset; addr->buffer_size = sz; } return addr->virtual_base_addr; free_map_buffers: msm_subsystem_unmap_buffer(addr->mapped_buffer); addr->mapped_buffer = NULL; free_acm_alloc: free_contiguous_memory_by_paddr( (unsigned long)addr->alloced_phys_addr); addr->alloced_phys_addr = (phys_addr_t)NULL; return NULL; unmap_ion_alloc: ion_unmap_kernel(ddl_context->video_ion_client, addr->alloc_handle); addr->virtual_base_addr = NULL; addr->alloced_phys_addr = (phys_addr_t)NULL; free_ion_alloc: ion_free(ddl_context->video_ion_client, addr->alloc_handle); addr->alloc_handle = NULL; bail_out: return NULL; }