int res_trk_close_secure_session()
{
	int rc;
	if (res_trk_check_for_sec_session() == 1 &&
		resource_context.sec_clk_heap) {
		pr_err("Unsecuring....\n");
		mutex_lock(&resource_context.secure_lock);
		rc = res_trk_enable_iommu_clocks();
		if (rc) {
			pr_err("IOMMU clock enabled failed while close\n");
			goto error_close;
		}
		msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
		msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));

		if (resource_context.vidc_platform_data->secure_wb_heap)
			msm_ion_unsecure_heap(ION_HEAP(ION_CP_WB_HEAP_ID));

		res_trk_disable_iommu_clocks();
		resource_context.sec_clk_heap = 0;
		mutex_unlock(&resource_context.secure_lock);
	}
	return 0;
error_close:
	mutex_unlock(&resource_context.secure_lock);
	return rc;
}
Esempio n. 2
0
int res_trk_open_secure_session()
{
	int rc, memtype;
	if (!res_trk_check_for_sec_session()) {
		pr_err("Secure sessions are not active\n");
		return -EINVAL;
	}
	mutex_lock(&resource_context.secure_lock);
	if (!resource_context.sec_clk_heap) {
	
		pr_err("Securing...\n");
		rc = res_trk_enable_iommu_clocks();
		if (rc) {
			pr_err("IOMMU clock enabled failed while open");
			goto error_open;
		}

		memtype = ION_HEAP(resource_context.memtype);
		rc = msm_ion_secure_heap(memtype);
		if (rc) {
			pr_err("ION heap secure failed heap id %d rc %d\n",
				   resource_context.memtype, rc);
			goto disable_iommu_clks;
		}
		memtype = ION_HEAP(resource_context.cmd_mem_type);
		rc = msm_ion_secure_heap(memtype);
		if (rc) {
			pr_err("ION heap secure failed heap id %d rc %d\n",
				   resource_context.cmd_mem_type, rc);
			goto unsecure_memtype_heap;
		}
		if (resource_context.vidc_platform_data->secure_wb_heap) {
			memtype = ION_HEAP(ION_CP_WB_HEAP_ID);
			rc = msm_ion_secure_heap(memtype);
			if (rc) {
				pr_err("WB_HEAP_ID secure failed rc %d\n", rc);
				goto unsecure_cmd_heap;
			}
		}
		resource_context.sec_clk_heap = 1;
		
		res_trk_disable_iommu_clocks();
	}
	mutex_unlock(&resource_context.secure_lock);
	return 0;
unsecure_cmd_heap:
	msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
unsecure_memtype_heap:
	msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));
disable_iommu_clks:
	res_trk_disable_iommu_clocks();
error_open:
	resource_context.sec_clk_heap = 0;
	mutex_unlock(&resource_context.secure_lock);
	return rc;
}
Esempio n. 3
0
int res_trk_close_secure_session()
{
	int rc;
	mutex_lock(&resource_context.secure_lock);
	rc = res_trk_enable_iommu_clocks();
	if (rc) {
		pr_err("IOMMU clock enabled failed while close");
		goto error_close;
	}
	msm_ion_unsecure_heap(ION_HEAP(resource_context.memtype));
	msm_ion_unsecure_heap(ION_HEAP(resource_context.cmd_mem_type));
	res_trk_disable_iommu_clocks();
	resource_context.secure_session = 0;
	mutex_unlock(&resource_context.secure_lock);
	return 0;
error_close:
	mutex_unlock(&resource_context.secure_lock);
	return rc;
}
static long ion_test_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
	int ret;
	ion_phys_addr_t phys_addr;
	void *addr;
	size_t len;
	unsigned long flags, size;
	struct msm_ion_test *ion_test = file->private_data;
	struct ion_test_data *test_data = &ion_test->test_data;

	switch (cmd) {
	case IOC_ION_KCLIENT_CREATE:
	{
		ret = create_ion_client(ion_test);
		break;
	}
	case IOC_ION_KCLIENT_DESTROY:
	{
		free_ion_client(ion_test);
		ret = 0;
		break;
	}
	case IOC_ION_KALLOC:
	{
		if (copy_from_user(test_data, (void __user *)arg,
						sizeof(struct ion_test_data)))
			return -EFAULT;
		ret = alloc_ion_buf(ion_test, test_data);
		if (ret)
			pr_info("allocating ion buffer failed\n");
		break;
	}
	case IOC_ION_KFREE:
	{
		free_ion_buf(ion_test);
		ret = 0;
		break;
	}
	case IOC_ION_KPHYS:
	{
		ret = ion_phys(ion_test->ion_client, ion_test->ion_handle,
							&phys_addr, &len);
		if (!ret)
			pr_info("size is 0x%x\n phys addr 0x%x", len,
						(unsigned int)phys_addr);
		break;
	}
	case IOC_ION_KMAP:
	{
		addr = ion_map_kernel(ion_test->ion_client,
					ion_test->ion_handle);
		if (IS_ERR_OR_NULL(addr)) {
			ret = -EIO;
			pr_info("mapping kernel buffer failed\n");
		} else {
			ret = 0;
			test_data->vaddr = (unsigned long)addr;
		}
		break;
	}
	case IOC_ION_KUMAP:
	{
		ion_unmap_kernel(ion_test->ion_client, ion_test->ion_handle);
		ret = 0;
		break;
	}
	case IOC_ION_UIMPORT:
	{
		if (copy_from_user(test_data, (void __user *)arg,
						sizeof(struct ion_test_data)))
			return -EFAULT;
		ion_test->ion_handle = ion_import_dma_buf(ion_test->ion_client,
							test_data->shared_fd);
		if (IS_ERR_OR_NULL(ion_test->ion_handle)) {
			ret = -EIO;
			pr_info("import of user buf failed\n");
		} else
			ret = 0;
		break;
	}
	case IOC_ION_UBUF_FLAGS:
	{
		ret = ion_handle_get_flags(ion_test->ion_client,
						ion_test->ion_handle, &flags);
		if (ret)
			pr_info("user flags cannot be retrieved\n");
		else
			if (copy_to_user((void __user *)arg, &flags,
						sizeof(unsigned long)))
				ret = -EFAULT;
		break;
	}
	case IOC_ION_UBUF_SIZE:
	{
		ret = ion_handle_get_size(ion_test->ion_client,
						ion_test->ion_handle, &size);
		if (ret)
			pr_info("buffer size cannot be retrieved\n");
		else
			if (copy_to_user((void __user *)arg, &size,
							sizeof(unsigned long)))
				ret = -EFAULT;
		break;
	}
	case IOC_ION_WRITE_VERIFY:
	{
		write_pattern(test_data->vaddr, test_data->size);
		if (verify_pattern(test_data->vaddr, test_data->size)) {
			pr_info("verify of mapped buf failed\n");
			ret = -EIO;
		} else
			ret = 0;
		break;
	}
	case IOC_ION_VERIFY:
	{
		if (verify_pattern(test_data->vaddr, test_data->size)) {
			pr_info("fail in verifying imported buffer\n");
			ret = -EIO;
		} else
			ret = 0;
		break;
	}
	case IOC_ION_SEC:
	{
		ret = msm_ion_secure_heap(ION_CP_MM_HEAP_ID);
		if (ret)
			pr_info("unable to secure heap\n");
		else
			pr_info("able to secure heap\n");
		break;
	}
	case IOC_ION_UNSEC:
	{
		ret = msm_ion_unsecure_heap(ION_CP_MM_HEAP_ID);
		if (ret)
			pr_info("unable to unsecure heap\n");
		else
			pr_info("able to unsecure heap\n");
		break;
	}
	default:
	{
		pr_info("command not supproted\n");
		ret = -EINVAL;
	}
	};
	return ret;
}