static void put_device_address(struct smem_client *smem_client, struct ion_handle *hndl, int domain_num, int partition_num, u32 flags) { struct ion_client *clnt = NULL; if (!hndl || !smem_client) { dprintk(VIDC_WARN, "Invalid params: %p, %p\n", smem_client, hndl); return; } clnt = smem_client->clnt; if (!clnt) { dprintk(VIDC_WARN, "Invalid client"); return; } if (is_iommu_present(smem_client->res)) { dprintk(VIDC_DBG, "Calling ion_unmap_iommu - domain: %d, parition: %d", domain_num, partition_num); ion_unmap_iommu(clnt, hndl, domain_num, partition_num); } if (flags & SMEM_SECURE) { if (msm_ion_unsecure_buffer(clnt, hndl)) dprintk(VIDC_ERR, "Failed to unsecure memory\n"); } }
static int get_device_address(struct smem_client *smem_client, struct ion_handle *hndl, unsigned long align, dma_addr_t *iova, unsigned long *buffer_size, u32 flags, enum hal_buffer buffer_type) { int rc = 0; int domain, partition; struct ion_client *clnt = NULL; if (!iova || !buffer_size || !hndl || !smem_client) { dprintk(VIDC_ERR, "Invalid params: %pK, %pK, %pK, %pK\n", smem_client, hndl, iova, buffer_size); return -EINVAL; } clnt = smem_client->clnt; if (!clnt) { dprintk(VIDC_ERR, "Invalid client\n"); return -EINVAL; } rc = msm_smem_get_domain_partition(smem_client, flags, buffer_type, &domain, &partition); if (rc) { dprintk(VIDC_ERR, "Failed to get domain and partition: %d\n", rc); goto mem_domain_get_failed; } if (flags & SMEM_SECURE) { rc = msm_ion_secure_buffer(clnt, hndl, get_tz_usage(smem_client, buffer_type), 0); if (rc) { dprintk(VIDC_ERR, "Failed to secure memory\n"); goto mem_domain_get_failed; } } if (is_iommu_present(smem_client->res)) { dprintk(VIDC_DBG, "Calling ion_map_iommu - domain: %d, partition: %d\n", domain, partition); rc = ion_map_iommu(clnt, hndl, domain, partition, align, 0, iova, buffer_size, 0, 0); } else { dprintk(VIDC_DBG, "Using physical memory address\n"); rc = ion_phys(clnt, hndl, iova, (size_t *)buffer_size); } if (rc) { dprintk(VIDC_ERR, "ion memory map failed - %d\n", rc); goto mem_map_failed; } return 0; mem_map_failed: if (flags & SMEM_SECURE) msm_ion_unsecure_buffer(clnt, hndl); mem_domain_get_failed: return rc; }
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align, u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem, int map_kernel) { struct ion_handle *hndl; unsigned long iova = 0; unsigned long buffer_size = 0; unsigned long heap_mask = 0; int rc = 0; align = ALIGN(align, SZ_4K); size = ALIGN(size, SZ_4K); if (flags & SMEM_SECURE) { size = ALIGN(size, SZ_1M); align = ALIGN(align, SZ_1M); } if (is_iommu_present(client->res)) { heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID); } else { dprintk(VIDC_DBG, "allocate shared memory from adsp heap size %d align %d\n", size, align); heap_mask = ION_HEAP(ION_ADSP_HEAP_ID); } if (flags & SMEM_SECURE) heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID); hndl = ion_alloc(client->clnt, size, align, heap_mask, flags); if (IS_ERR_OR_NULL(hndl)) { dprintk(VIDC_ERR, "Failed to allocate shared memory = %p, %d, %d, 0x%x\n", client, size, align, flags); rc = -ENOMEM; goto fail_shared_mem_alloc; } mem->mem_type = client->mem_type; mem->smem_priv = hndl; mem->flags = flags; mem->buffer_type = buffer_type; if (map_kernel) { mem->kvaddr = ion_map_kernel(client->clnt, hndl); if (!mem->kvaddr) { dprintk(VIDC_ERR, "Failed to map shared mem in kernel\n"); rc = -EIO; goto fail_map; } } else mem->kvaddr = NULL; rc = get_device_address(client, hndl, align, &iova, &buffer_size, flags, buffer_type); if (rc) { dprintk(VIDC_ERR, "Failed to get device address: %d\n", rc); goto fail_device_address; } mem->device_addr = iova; dprintk(VIDC_DBG, "device_address = 0x%lx, kvaddr = 0x%p, size = %d\n", mem->device_addr, mem->kvaddr, size); mem->size = size; return rc; fail_device_address: ion_unmap_kernel(client->clnt, hndl); fail_map: ion_free(client->clnt, hndl); fail_shared_mem_alloc: return rc; }
static int venus_register_domain(u32 fw_max_sz) { struct msm_iova_partition venus_fw_partition = { .start = 0, .size = fw_max_sz, }; struct msm_iova_layout venus_fw_layout = { .partitions = &venus_fw_partition, .npartitions = 1, .client_name = "pil_venus", .domain_flags = 0, }; return msm_register_domain(&venus_fw_layout); } static int pil_venus_mem_setup(struct platform_device *pdev, size_t size) { int domain; venus_data->iommu_fw_ctx = msm_iommu_get_ctx("venus_fw"); if (!venus_data->iommu_fw_ctx) { dprintk(VIDC_ERR, "No iommu fw context found\n"); return -ENODEV; } if (!venus_data->venus_domain_num) { size = round_up(size, SZ_4K); domain = venus_register_domain(size); if (domain < 0) { dprintk(VIDC_ERR, "Venus fw iommu domain register failed\n"); return -ENODEV; } venus_data->iommu_fw_domain = msm_get_iommu_domain(domain); if (!venus_data->iommu_fw_domain) { dprintk(VIDC_ERR, "No iommu fw domain found\n"); return -ENODEV; } venus_data->venus_domain_num = domain; venus_data->fw_sz = size; } return 0; } static int pil_venus_auth_and_reset(struct platform_device *pdev) { int rc; phys_addr_t fw_bias = venus_data->resources->firmware_base; void __iomem *reg_base = venus_data->reg_base; u32 ver; bool iommu_present = is_iommu_present(venus_data->resources); if (!fw_bias) { dprintk(VIDC_ERR, "FW bias is not valid\n"); return -EINVAL; } /* Get Venus version number */ if (!venus_data->hw_ver_checked) { ver = readl_relaxed(reg_base + VIDC_WRAPPER_HW_VERSION); venus_data->hw_ver_minor = (ver & 0x0FFF0000) >> 16; venus_data->hw_ver_major = (ver & 0xF0000000) >> 28; venus_data->hw_ver_checked = 1; }