TEST_F(FormerlyValidHandle, free)
{
    ASSERT_EQ(-EINVAL, ion_free(m_ionFd, m_handle));
}
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	struct ddl_context *ddl_context;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	int ret = 0;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int rc = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type(), res_trk_get_ion_flags());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		if (res_trk_check_for_sec_session()) {
			rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
					&len);
			if (rc || !phyaddr) {
				DDL_MSG_ERROR(
				"%s():DDL ION client physical failed\n",
				__func__);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = phyaddr;
		} else {
			ret = ion_map_iommu(ddl_context->video_ion_client,
					addr->alloc_handle,
					VIDEO_DOMAIN,
					VIDEO_MAIN_POOL,
					SZ_4K,
					0,
					&iova,
					&buffer_size,
					0, 0);
			if (ret || !iova) {
				DDL_MSG_ERROR(
				"%s():DDL ION ion map iommu failed, ret = %d iova = 0x%lx\n",
					__func__, ret, iova);
				goto unmap_ion_alloc;
			}
			addr->alloced_phys_addr = (phys_addr_t) iova;
		}
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						__func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) addr->alloced_phys_addr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		pr_err("ION must be enabled.");
		goto bail_out;
	}
	return addr->virtual_base_addr;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
static long tf_device_ioctl(struct file *file, unsigned int ioctl_num,
	unsigned long ioctl_param)
{
	int result = S_SUCCESS;
	struct tf_connection *connection;
	union tf_command command;
	struct tf_command_header header;
	union tf_answer answer;
	u32 command_size;
	u32 answer_size;
	void *user_answer;

	dprintk(KERN_INFO "tf_device_ioctl(%p, %u, %p)\n",
		file, ioctl_num, (void *) ioctl_param);

	switch (ioctl_num) {
	case IOCTL_TF_GET_VERSION:
		/* ioctl is asking for the driver interface version */
		result = TF_DRIVER_INTERFACE_VERSION;
		goto exit;

#ifdef CONFIG_TF_ION
	case IOCTL_TF_ION_REGISTER: {
		int ion_register;
		/* ioctl is asking to register an ion handle */
		if (copy_from_user(&ion_register,
				(int *) ioctl_param,
				sizeof(int))) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"copy_from_user failed\n",
				file);
			result = -EFAULT;
			goto exit;
		}

		connection = tf_conn_from_file(file);
		BUG_ON(connection == NULL);

		/* Initialize ION connection */
		if (connection->ion_client == NULL) {
			connection->ion_client = ion_client_create(
						omap_ion_device,
						(1 << ION_HEAP_TYPE_CARVEOUT),
						"smc");
		}

		if (connection->ion_client == NULL) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"unable to create ion client\n",
				file);
			result = -EFAULT;
			goto exit;
		}

		/*
		 * TODO: We should use a reference count on this handle in order
		 * to not unregistered it while using it.
		 */
	       return (long)ion_import_fd(connection->ion_client, ion_register);
	}

	case IOCTL_TF_ION_UNREGISTER: {
		int ion_register;
		/* ioctl is asking to unregister an ion handle */

		if (copy_from_user(&ion_register,
				(int *) ioctl_param,
				sizeof(int))) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"copy_from_user failed\n",
				file);
			result = -EFAULT;
			goto exit;
		}

		connection = tf_conn_from_file(file);
		BUG_ON(connection == NULL);

		if (connection->ion_client == NULL) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"ion client does not exist\n",
				file);
			result = -EFAULT;
			goto exit;
		}

		ion_free(connection->ion_client,
			(struct ion_handle *) ion_register);

		return S_SUCCESS;
	}
#endif

	case IOCTL_TF_EXCHANGE:
		/*
		 * ioctl is asking to perform a message exchange with the Secure
		 * Module
		 */

		/*
		 * Make a local copy of the data from the user application
		 * This routine checks the data is readable
		 *
		 * Get the header first.
		 */
		if (copy_from_user(&header,
				(struct tf_command_header *)ioctl_param,
				sizeof(struct tf_command_header))) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"Cannot access ioctl parameter %p\n",
				file, (void *) ioctl_param);
			result = -EFAULT;
			goto exit;
		}

		/* size in words of u32 */
		command_size = header.message_size +
			sizeof(struct tf_command_header)/sizeof(u32);
		if (command_size > sizeof(command)/sizeof(u32)) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"Buffer overflow: too many bytes to copy %d\n",
				file, command_size);
			result = -EFAULT;
			goto exit;
		}

		if (copy_from_user(&command,
				(union tf_command *)ioctl_param,
				command_size * sizeof(u32))) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"Cannot access ioctl parameter %p\n",
				file, (void *) ioctl_param);
			result = -EFAULT;
			goto exit;
		}

		connection = tf_conn_from_file(file);
		BUG_ON(connection == NULL);

		/*
		 * The answer memory space address is in the operation_id field
		 */
		user_answer = (void *) command.header.operation_id;

		atomic_inc(&(connection->pending_op_count));

		dprintk(KERN_WARNING "tf_device_ioctl(%p): "
			"Sending message type  0x%08x\n",
			file, command.header.message_type);

		switch (command.header.message_type) {
		case TF_MESSAGE_TYPE_OPEN_CLIENT_SESSION:
			result = tf_open_client_session(connection,
				&command, &answer);
			break;

		case TF_MESSAGE_TYPE_CLOSE_CLIENT_SESSION:
			result = tf_close_client_session(connection,
				&command, &answer);
			break;

		case TF_MESSAGE_TYPE_REGISTER_SHARED_MEMORY:
			result = tf_register_shared_memory(connection,
				&command, &answer);
			break;

		case TF_MESSAGE_TYPE_RELEASE_SHARED_MEMORY:
			result = tf_release_shared_memory(connection,
				&command, &answer);
			break;

		case TF_MESSAGE_TYPE_INVOKE_CLIENT_COMMAND:
			result = tf_invoke_client_command(connection,
				&command, &answer);
			break;

		case TF_MESSAGE_TYPE_CANCEL_CLIENT_COMMAND:
			result = tf_cancel_client_command(connection,
				&command, &answer);
			break;

		default:
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"Incorrect message type (0x%08x)!\n",
				connection, command.header.message_type);
			result = -EOPNOTSUPP;
			break;
		}

		atomic_dec(&(connection->pending_op_count));

		if (result != 0) {
			dprintk(KERN_WARNING "tf_device_ioctl(%p): "
				"Operation returning error code 0x%08x)!\n",
				file, result);
			goto exit;
		}

		/*
		 * Copy the answer back to the user space application.
		 * The driver does not check this field, only copy back to user
		 * space the data handed over by Secure World
		 */
		answer_size = answer.header.message_size +
			sizeof(struct tf_answer_header)/sizeof(u32);
		if (copy_to_user(user_answer,
				&answer, answer_size * sizeof(u32))) {
			dprintk(KERN_WARNING "tf_device_ioctl(%p): "
				"Failed to copy back the full command "
				"answer to %p\n", file, user_answer);
			result = -EFAULT;
			goto exit;
		}

		/* successful completion */
		dprintk(KERN_INFO "tf_device_ioctl(%p): Success\n", file);
		break;

	case  IOCTL_TF_GET_DESCRIPTION: {
		/* ioctl asking for the version information buffer */
		struct tf_version_information_buffer *pInfoBuffer;

		dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION:(%p, %u, %p)\n",
			file, ioctl_num, (void *) ioctl_param);

		pInfoBuffer =
			((struct tf_version_information_buffer *) ioctl_param);

		dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION1: "
			"driver_description=\"%64s\"\n", S_VERSION_STRING);

		if (copy_to_user(pInfoBuffer->driver_description,
				S_VERSION_STRING,
				strlen(S_VERSION_STRING) + 1)) {
			dprintk(KERN_ERR "tf_device_ioctl(%p): "
				"Fail to copy back the driver description "
				"to  %p\n",
				file, pInfoBuffer->driver_description);
			result = -EFAULT;
			goto exit;
		}

		dprintk(KERN_INFO "IOCTL_TF_GET_DESCRIPTION2: "
			"secure_world_description=\"%64s\"\n",
			tf_get_description(&g_tf_dev.sm));

		if (copy_to_user(pInfoBuffer->secure_world_description,
				tf_get_description(&g_tf_dev.sm),
				TF_DESCRIPTION_BUFFER_LENGTH)) {
			dprintk(KERN_WARNING "tf_device_ioctl(%p): "
				"Failed to copy back the secure world "
				"description to %p\n",
				file, pInfoBuffer->secure_world_description);
			result = -EFAULT;
			goto exit;
		}
		break;
	}

	default:
		dprintk(KERN_ERR "tf_device_ioctl(%p): "
			"Unknown IOCTL code 0x%08x!\n",
			file, ioctl_num);
		result = -EOPNOTSUPP;
		goto exit;
	}

exit:
	return result;
}
/**
 * Go on allocating buffers of specified size & type, untill the allocation fails.
 * Then free 10 buffers and allocate 10 buffers again.
 */
int ion_alloc_fail_alloc_test()
{
	int fd, ret = 0, i;
	struct ion_handle **handle;
	const int  COUNT_ALLOC_MAX = 200;
	const int  COUNT_REALLOC_MAX = 10;
	int count_alloc = COUNT_ALLOC_MAX, count_realloc = COUNT_ALLOC_MAX;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n", __func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(COUNT_ALLOC_MAX * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s(): FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	/* Allocate ion_handles as much as possible */
	for(i = 0; i < COUNT_ALLOC_MAX; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			break;
		}
	}

	/* Free COUNT_REALLOC_MAX ion_handles */
	for (i = count_alloc-1; i > (count_alloc-1 - COUNT_REALLOC_MAX); i--) {
		printf("%s(): Free  handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free  handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	/* Again allocate COUNT_REALLOC_MAX ion_handles to test
	   that we are still able to allocate */
	for(i = (count_alloc - COUNT_REALLOC_MAX); i < count_alloc; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n\n",
					__func__, i, handle[i], strerror(ret));
			count_realloc = i;
			goto err_alloc;
		}
	}
	count_realloc = i;

	err_alloc:
	/* Free all ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free  handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free  handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	printf("\ncount_alloc=%d, count_realloc=%d\n",count_alloc, count_realloc);

	if(ret || (count_alloc != count_realloc)) {
		printf("\nion alloc->fail->alloc test: FAILED\n\n");
		if(count_alloc != COUNT_ALLOC_MAX)
			ret = -ENOMEM;
	}
	else
		printf("\nion alloc->fail->alloc test: PASSED\n\n");

	return ret;
}
Exemplo n.º 5
0
void ddl_pmem_alloc(struct ddl_buf_addr *buff_addr, size_t sz, u32 align)
{
	u32 guard_bytes, align_mask;
	u32 physical_addr;
	u32 align_offset;
	u32 alloc_size, flags = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long *kernel_vaddr = NULL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	int ret = -EINVAL;

	if (!buff_addr) {
		ERR("\n%s() Invalid Parameters\n", __func__);
		return;
	}
	if (align == DDL_LINEAR_BUFFER_ALIGN_BYTES) {
		guard_bytes = 31;
		align_mask = 0xFFFFFFE0U;
	} else {
		guard_bytes = DDL_TILE_BUF_ALIGN_GUARD_BYTES;
		align_mask = DDL_TILE_BUF_ALIGN_MASK;
	}
	ddl_context = ddl_get_context();
	alloc_size = sz + guard_bytes;
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			ERR("\n%s(): DDL ION Client Invalid handle\n",
				__func__);
			goto bailout;
		}
		buff_addr->mem_type = res_trk_get_mem_type();
		buff_addr->alloc_handle = ion_alloc(
					ddl_context->video_ion_client,
					alloc_size,
					SZ_4K,
					buff_addr->mem_type, 0);
		if (IS_ERR_OR_NULL(buff_addr->alloc_handle)) {
			ERR("\n%s(): DDL ION alloc failed\n", __func__);
			goto bailout;
		}
		ret = ion_phys(ddl_context->video_ion_client,
					buff_addr->alloc_handle,
					&phyaddr,
					&len);
		if (ret || !phyaddr) {
			ERR("\n%s(): DDL ION client physical failed\n",
					__func__);
			goto free_ion_buffer;
		}
		buff_addr->physical_base_addr = (u32 *)phyaddr;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					buff_addr->alloc_handle);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
			ERR("\n%s(): DDL ION map failed\n", __func__);
			goto unmap_ion_buffer;
		}
		buff_addr->virtual_base_addr = (u32 *)kernel_vaddr;
		DBG("ddl_ion_alloc: handle(0x%x), mem_type(0x%x), "\
			"phys(0x%x), virt(0x%x), size(%u), align(%u), "\
			"alloced_len(%u)", (u32)buff_addr->alloc_handle,
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, align, len);
	} else {
		physical_addr = (u32)
			allocate_contiguous_memory_nomap(alloc_size,
						ddl_context->memtype, SZ_4K);
		if (!physical_addr) {
			ERR("\n%s(): DDL pmem allocate failed\n",
			       __func__);
			goto bailout;
		}
		buff_addr->physical_base_addr = (u32 *) physical_addr;
		flags = MSM_SUBSYSTEM_MAP_KADDR;
		buff_addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)physical_addr,
		alloc_size, flags, NULL, 0);
		if (IS_ERR(buff_addr->mapped_buffer)) {
			ERR("\n%s() buffer map failed\n", __func__);
			goto free_pmem_buffer;
		}
		mapped_buffer = buff_addr->mapped_buffer;
		if (!mapped_buffer->vaddr) {
			ERR("\n%s() mapped virtual address is NULL\n",
				__func__);
			goto unmap_pmem_buffer;
		}
		buff_addr->virtual_base_addr = mapped_buffer->vaddr;
		DBG("ddl_pmem_alloc: mem_type(0x%x), phys(0x%x),"\
			" virt(0x%x), sz(%u), align(%u)",
			(u32)buff_addr->mem_type,
			(u32)buff_addr->physical_base_addr,
			(u32)buff_addr->virtual_base_addr,
			alloc_size, SZ_4K);
	}

	memset(buff_addr->virtual_base_addr, 0 , sz + guard_bytes);
	buff_addr->buffer_size = sz;
	buff_addr->align_physical_addr = (u32 *)
		(((u32)buff_addr->physical_base_addr + guard_bytes) &
		align_mask);
	align_offset = (u32) (buff_addr->align_physical_addr) -
		(u32)buff_addr->physical_base_addr;
	buff_addr->align_virtual_addr =
	    (u32 *) ((u32) (buff_addr->virtual_base_addr)
		     + align_offset);
	DBG("%s(): phys(0x%x) align_phys(0x%x), virt(0x%x),"\
		" align_virt(0x%x)", __func__,
		(u32)buff_addr->physical_base_addr,
		(u32)buff_addr->align_physical_addr,
		(u32)buff_addr->virtual_base_addr,
		(u32)buff_addr->align_virtual_addr);
	return;

unmap_pmem_buffer:
	if (buff_addr->mapped_buffer)
		msm_subsystem_unmap_buffer(buff_addr->mapped_buffer);
free_pmem_buffer:
	if (buff_addr->physical_base_addr)
		free_contiguous_memory_by_paddr((unsigned long)
			buff_addr->physical_base_addr);
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
	return;

unmap_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_unmap_kernel(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
free_ion_buffer:
	if (ddl_context->video_ion_client) {
		if (buff_addr->alloc_handle)
			ion_free(ddl_context->video_ion_client,
				buff_addr->alloc_handle);
	}
bailout:
	memset(buff_addr, 0, sizeof(struct ddl_buf_addr));
}
Exemplo n.º 6
0
static int audamrnb_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_amrnb_in;
	int rc;
	int encid;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	client = msm_ion_client_create(UINT_MAX, "Audio_AMR_In_Client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID),0);
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto buff_alloc_error;
	}
	audio->buff_handle = handle;

	rc = ion_phys(client, handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		goto buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		goto buff_get_flags_error;
	}

	audio->map_v_read = ion_map_kernel(client, handle);
	if (IS_ERR(audio->map_v_read)) {
		MM_ERR("could not map write buffers\n");
		rc = -ENOMEM;
		goto buff_map_error;
	}
	audio->data = audio->map_v_read;
	MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	MM_DBG("Memory addr = 0x%8x  phy addr = 0x%8x\n",\
		(int) audio->data, (int) audio->phys);
	if ((file->f_mode & FMODE_WRITE) &&
			(file->f_mode & FMODE_READ)) {
		rc = -EACCES;
		MM_ERR("Non tunnel encoding is not supported\n");
		goto buff_map_error;
	} else if (!(file->f_mode & FMODE_WRITE) &&
					(file->f_mode & FMODE_READ)) {
		audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
		MM_DBG("Opened for tunnel mode encoding\n");
	} else {
		rc = -EACCES;
		goto buff_map_error;
	}


	/* Settings will be re-config at AUDIO_SET_CONFIG,
	 * but at least we need to have initial config
	 */
	audio->buffer_size = (FRAME_SIZE - 8);
	audio->enc_type = ENC_TYPE_AMRNB | audio->mode;
	audio->dtx_mode = -1;
	audio->frame_format = 0;
	audio->used_mode = 7; /* Bit Rate 12.2 kbps MR122 */

	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto aenc_alloc_error;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_amrnb_adsp_ops, audio);

	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto aenc_alloc_error;
	}

	audio->stopped = 0;
	audio->source = 0;

	audamrnb_in_flush(audio);

	audio->device_events = AUDDEV_EVT_DEV_RDY | AUDDEV_EVT_DEV_RLS |
				AUDDEV_EVT_VOICE_STATE_CHG;

	audio->voice_state = msm_get_voice_state();
	rc = auddev_register_evt_listner(audio->device_events,
					AUDDEV_CLNT_ENC, audio->enc_id,
					amrnb_in_listener, (void *) audio);
	if (rc) {
		MM_ERR("failed to register device event listener\n");
		goto evt_error;
	}
	audio->build_id = socinfo_get_build_id();
	MM_DBG("Modem build id = %s\n", audio->build_id);

	file->private_data = audio;
	audio->opened = 1;
	mutex_unlock(&audio->lock);
	return rc;
evt_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	ion_unmap_kernel(client, audio->buff_handle);
aenc_alloc_error:
buff_map_error:
buff_get_phys_error:
buff_get_flags_error:
	ion_free(client, audio->buff_handle);
buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
done:
	mutex_unlock(&audio->lock);
	return rc;
}
Exemplo n.º 7
0
static int audpcm_in_open(struct inode *inode, struct file *file)
{
	struct audio_in *audio = &the_audio_in;
	int rc;
	int len = 0;
	unsigned long ionflag = 0;
	ion_phys_addr_t addr = 0;
	struct ion_handle *handle = NULL;
	struct ion_client *client = NULL;

	int encid;
	struct timespec ts;
	struct rtc_time tm;

	mutex_lock(&audio->lock);
	if (audio->opened) {
		rc = -EBUSY;
		goto done;
	}

	audio->mode = MSM_AUD_ENC_MODE_TUNNEL;
	audio->samp_rate = RPC_AUD_DEF_SAMPLE_RATE_11025;
	audio->samp_rate_index = AUDREC_CMD_SAMP_RATE_INDX_11025;
	audio->channel_mode = AUDREC_CMD_STEREO_MODE_MONO;
	audio->buffer_size = MONO_DATA_SIZE;
	audio->enc_type = AUDREC_CMD_TYPE_0_INDEX_WAV | audio->mode;

	rc = audmgr_open(&audio->audmgr);
	if (rc)
		goto done;
	encid = audpreproc_aenc_alloc(audio->enc_type, &audio->module_name,
			&audio->queue_ids);
	if (encid < 0) {
		MM_AUD_ERR("No free encoder available\n");
		rc = -ENODEV;
		goto done;
	}
	audio->enc_id = encid;

	rc = msm_adsp_get(audio->module_name, &audio->audrec,
			   &audrec_adsp_ops, audio);
	if (rc) {
		audpreproc_aenc_free(audio->enc_id);
		goto done;
	}

	audio->dsp_cnt = 0;
	audio->stopped = 0;

	audpcm_in_flush(audio);

	client = msm_ion_client_create(UINT_MAX, "Audio_PCM_in_client");
	if (IS_ERR_OR_NULL(client)) {
		MM_ERR("Unable to create ION client\n");
		rc = -ENOMEM;
		goto client_create_error;
	}
	audio->client = client;

	MM_DBG("allocating mem sz = %d\n", DMASZ);
	handle = ion_alloc(client, DMASZ, SZ_4K,
		ION_HEAP(ION_AUDIO_HEAP_ID));
	if (IS_ERR_OR_NULL(handle)) {
		MM_ERR("Unable to create allocate O/P buffers\n");
		rc = -ENOMEM;
		goto output_buff_alloc_error;
	}

	audio->output_buff_handle = handle;

	rc = ion_phys(client , handle, &addr, &len);
	if (rc) {
		MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
		rc = -ENOMEM;
		goto output_buff_get_phys_error;
	} else {
		MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
			(unsigned int) addr, (unsigned int) len);
	}
	audio->phys = (int32_t)addr;

	rc = ion_handle_get_flags(client, handle, &ionflag);
	if (rc) {
		MM_ERR("could not get flags for the handle\n");
		rc = -ENOMEM;
		goto output_buff_get_flags_error;
	}

	audio->data = ion_map_kernel(client, handle, ionflag);
	if (IS_ERR(audio->data)) {
		MM_ERR("could not map read buffers,freeing instance 0x%08x\n",
				(int)audio);
		rc = -ENOMEM;
		goto output_buff_map_error;
	}
	MM_DBG("read buf: phy addr 0x%08x kernel addr 0x%08x\n",
		audio->phys, (int)audio->data);

	file->private_data = audio;
	audio->opened = 1;
	rc = 0;
done:
	mutex_unlock(&audio->lock);
	getnstimeofday(&ts);
	rtc_time_to_tm(ts.tv_sec, &tm);
	pr_aud_info1("[ATS][start_recording][successful] at %lld \
		(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n",
		ktime_to_ns(ktime_get()),
		tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
	return rc;
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
	ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
	ion_client_destroy(client);
client_create_error:
	msm_adsp_put(audio->audrec);
	audpreproc_aenc_free(audio->enc_id);
	mutex_unlock(&audio->lock);
	return rc;
}
Exemplo n.º 8
0
static int smcmod_send_cipher_cmd(struct smcmod_cipher_req *reqp)
{
	int ret = 0;
	struct smcmod_cipher_scm_req scm_req;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_key_handlep = NULL;
	struct ion_handle *ion_plain_handlep = NULL;
	struct ion_handle *ion_cipher_handlep = NULL;
	struct ion_handle *ion_iv_handlep = NULL;
	size_t size = 0;

	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if ((reqp->ion_plain_text_fd < 0) ||
		(reqp->ion_cipher_text_fd < 0) ||
		(reqp->ion_init_vector_fd < 0))
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* fill in the scm request structure */
	scm_req.algorithm = reqp->algorithm;
	scm_req.operation = reqp->operation;
	scm_req.mode = reqp->mode;
	scm_req.key_phys_addr = 0;
	scm_req.key_size = reqp->key_size;
	scm_req.plain_text_size = reqp->plain_text_size;
	scm_req.cipher_text_size = reqp->cipher_text_size;
	scm_req.init_vector_size = reqp->init_vector_size;

	if (!reqp->key_is_null) {
		/* import the key buffer and get the physical address */
		ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
			&ion_key_handlep, &scm_req.key_phys_addr, &size);
		if (ret < 0)
			goto buf_cleanup;

		/* ensure that the key size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->key_size > size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* import the plain text buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_plain_text_fd, ion_clientp,
		&ion_plain_handlep, &scm_req.plain_text_phys_addr, &size);

	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the plain text size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->plain_text_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the cipher text buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_cipher_text_fd, ion_clientp,
		&ion_cipher_handlep, &scm_req.cipher_text_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the cipher text size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->cipher_text_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the init vector buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_init_vector_fd, ion_clientp,
		&ion_iv_handlep, &scm_req.init_vector_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the init vector size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->init_vector_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* Only the scm_req structure will be flushed by scm_call,
	 * so we must flush the cache for the input ion buffers here.
	 */
	msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
		scm_req.key_size, ION_IOC_CLEAN_CACHES);
	msm_ion_do_cache_op(ion_clientp, ion_iv_handlep, NULL,
		scm_req.init_vector_size, ION_IOC_CLEAN_CACHES);

	/* For decrypt, cipher text is input, otherwise it's plain text. */
	if (reqp->operation)
		msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
			scm_req.cipher_text_size, ION_IOC_CLEAN_CACHES);
	else
		msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
			scm_req.plain_text_size, ION_IOC_CLEAN_CACHES);

	/* call scm function to switch to secure world */
	reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
		SMCMOD_CRYPTO_CMD_CIPHER, &scm_req,
		sizeof(scm_req), NULL, 0);

	/* Invalidate the output buffer, since it's not done by scm_call */

	/* for decrypt, plain text is the output, otherwise it's cipher text */
	if (reqp->operation)
		msm_ion_do_cache_op(ion_clientp, ion_plain_handlep, NULL,
			scm_req.plain_text_size, ION_IOC_INV_CACHES);
	else
		msm_ion_do_cache_op(ion_clientp, ion_cipher_handlep, NULL,
			scm_req.cipher_text_size, ION_IOC_INV_CACHES);

buf_cleanup:
	/* if the client and handles are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_key_handlep))
			ion_free(ion_clientp, ion_key_handlep);

		if (!IS_ERR_OR_NULL(ion_plain_handlep))
			ion_free(ion_clientp, ion_plain_handlep);

		if (!IS_ERR_OR_NULL(ion_cipher_handlep))
			ion_free(ion_clientp, ion_cipher_handlep);

		if (!IS_ERR_OR_NULL(ion_iv_handlep))
			ion_free(ion_clientp, ion_iv_handlep);

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
Exemplo n.º 9
0
static int smcmod_send_msg_digest_cmd(struct smcmod_msg_digest_req *reqp)
{
	int ret = 0;
	struct smcmod_msg_digest_scm_req scm_req;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_key_handlep = NULL;
	struct ion_handle *ion_input_handlep = NULL;
	struct ion_handle *ion_output_handlep = NULL;
	size_t size = 0;

	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if ((reqp->ion_input_fd < 0) || (reqp->ion_output_fd < 0))
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* fill in the scm request structure */
	scm_req.algorithm = reqp->algorithm;
	scm_req.key_phys_addr = 0;
	scm_req.key_size = reqp->key_size;
	scm_req.input_size = reqp->input_size;
	scm_req.output_size = reqp->output_size;
	scm_req.verify = 0;

	if (!reqp->key_is_null) {
		/* import the key buffer and get the physical address */
		ret = smcmod_ion_fd_to_phys(reqp->ion_key_fd, ion_clientp,
			&ion_key_handlep, &scm_req.key_phys_addr, &size);
		if (ret < 0)
			goto buf_cleanup;

		/* ensure that the key size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->key_size > size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* import the input buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_input_fd, ion_clientp,
		&ion_input_handlep, &scm_req.input_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the input size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->input_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* import the output buffer and get the physical address */
	ret = smcmod_ion_fd_to_phys(reqp->ion_output_fd, ion_clientp,
		&ion_output_handlep, &scm_req.output_phys_addr, &size);
	if (ret < 0)
		goto buf_cleanup;

	/* ensure that the output size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->output_size > size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* Only the scm_req structure will be flushed by scm_call,
	 * so we must flush the cache for the input ion buffers here.
	 */
	msm_ion_do_cache_op(ion_clientp, ion_key_handlep, NULL,
		scm_req.key_size, ION_IOC_CLEAN_CACHES);
	msm_ion_do_cache_op(ion_clientp, ion_input_handlep, NULL,
		scm_req.input_size, ION_IOC_CLEAN_CACHES);

	/* call scm function to switch to secure world */
	if (reqp->fixed_block)
		reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
			SMCMOD_CRYPTO_CMD_MSG_DIGEST_FIXED,
			&scm_req,
			sizeof(scm_req),
			NULL, 0);
	else
		reqp->return_val = scm_call(SMCMOD_SVC_CRYPTO,
			SMCMOD_CRYPTO_CMD_MSG_DIGEST,
			&scm_req,
			sizeof(scm_req),
			NULL, 0);

	/* Invalidate the output buffer, since it's not done by scm_call */
	msm_ion_do_cache_op(ion_clientp, ion_output_handlep, NULL,
		scm_req.output_size, ION_IOC_INV_CACHES);

buf_cleanup:
	/* if the client and handles are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_key_handlep))
			ion_free(ion_clientp, ion_key_handlep);

		if (!IS_ERR_OR_NULL(ion_input_handlep))
			ion_free(ion_clientp, ion_input_handlep);

		if (!IS_ERR_OR_NULL(ion_output_handlep))
			ion_free(ion_clientp, ion_output_handlep);

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
Exemplo n.º 10
0
static int register_memory(void)
{
	int			result;
	unsigned long		paddr;
	void                    *kvptr;
	unsigned long		kvaddr;
	unsigned long		mem_len;

	mutex_lock(&acdb_data.acdb_mutex);
	acdb_data.ion_client =
		msm_ion_client_create(UINT_MAX, "audio_acdb_client");
	if (IS_ERR_OR_NULL(acdb_data.ion_client)) {
		pr_err("%s: Could not register ION client!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_client);
		goto err;
	}

	acdb_data.ion_handle = ion_import_fd(acdb_data.ion_client,
		atomic_read(&acdb_data.map_handle));
	if (IS_ERR_OR_NULL(acdb_data.ion_handle)) {
		pr_err("%s: Could not import map handle!!!\n", __func__);
		result = PTR_ERR(acdb_data.ion_handle);
		goto err_ion_client;
	}

	result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle,
				&paddr, (size_t *)&mem_len);
	if (result != 0) {
		pr_err("%s: Could not get phys addr!!!\n", __func__);
		goto err_ion_handle;
	}

	kvptr = ion_map_kernel(acdb_data.ion_client,
		acdb_data.ion_handle, 0);
	if (IS_ERR_OR_NULL(kvptr)) {
		pr_err("%s: Could not get kernel virt addr!!!\n", __func__);
		result = PTR_ERR(kvptr);
		goto err_ion_handle;
	}
	kvaddr = (unsigned long)kvptr;
	atomic64_set(&acdb_data.paddr, paddr);
	atomic64_set(&acdb_data.kvaddr, kvaddr);
	atomic64_set(&acdb_data.mem_len, mem_len);
	mutex_unlock(&acdb_data.acdb_mutex);

	pr_debug("%s done! paddr = 0x%lx, "
		"kvaddr = 0x%lx, len = x%lx\n",
		 __func__,
		(long)atomic64_read(&acdb_data.paddr),
		(long)atomic64_read(&acdb_data.kvaddr),
		(long)atomic64_read(&acdb_data.mem_len));

	return result;
err_ion_handle:
	ion_free(acdb_data.ion_client, acdb_data.ion_handle);
err_ion_client:
	ion_client_destroy(acdb_data.ion_client);
err:
	atomic64_set(&acdb_data.mem_len, 0);
	mutex_unlock(&acdb_data.acdb_mutex);
	return result;
}
Exemplo n.º 11
0
static int smcmod_send_buf_cmd(struct smcmod_buf_req *reqp)
{
	int ret = 0;
	struct ion_client *ion_clientp = NULL;
	struct ion_handle *ion_cmd_handlep = NULL;
	struct ion_handle *ion_resp_handlep = NULL;
	void *cmd_vaddrp = NULL;
	void *resp_vaddrp = NULL;
	unsigned long cmd_buf_size = 0;
	unsigned long resp_buf_size = 0;

	/* sanity check the argument */
	if (IS_ERR_OR_NULL(reqp))
		return -EINVAL;

	/* sanity check the fds */
	if (reqp->ion_cmd_fd < 0)
		return -EINVAL;

	/* create an ion client */
	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");

	/* check for errors */
	if (IS_ERR_OR_NULL(ion_clientp))
		return -EINVAL;

	/* import the command buffer fd */
	ion_cmd_handlep = ion_import_dma_buf(ion_clientp, reqp->ion_cmd_fd);

	/* sanity check the handle */
	if (IS_ERR_OR_NULL(ion_cmd_handlep)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* retrieve the size of the buffer */
	if (ion_handle_get_size(ion_clientp, ion_cmd_handlep,
		&cmd_buf_size) < 0) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* ensure that the command buffer size is not
	 * greater than the size of the buffer.
	 */
	if (reqp->cmd_len > cmd_buf_size) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* map the area to get a virtual address */
	cmd_vaddrp = ion_map_kernel(ion_clientp, ion_cmd_handlep);

	/* sanity check the address */
	if (IS_ERR_OR_NULL(cmd_vaddrp)) {
		ret = -EINVAL;
		goto buf_cleanup;
	}

	/* check if there is a response buffer */
	if (reqp->ion_resp_fd >= 0) {
		/* import the handle */
		ion_resp_handlep =
			ion_import_dma_buf(ion_clientp, reqp->ion_resp_fd);

		/* sanity check the handle */
		if (IS_ERR_OR_NULL(ion_resp_handlep)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* retrieve the size of the buffer */
		if (ion_handle_get_size(ion_clientp, ion_resp_handlep,
			&resp_buf_size) < 0) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* ensure that the command buffer size is not
		 * greater than the size of the buffer.
		 */
		if (reqp->resp_len > resp_buf_size) {
			ret = -EINVAL;
			goto buf_cleanup;
		}

		/* map the area to get a virtual address */
		resp_vaddrp = ion_map_kernel(ion_clientp, ion_resp_handlep);

		/* sanity check the address */
		if (IS_ERR_OR_NULL(resp_vaddrp)) {
			ret = -EINVAL;
			goto buf_cleanup;
		}
	}

	/* No need to flush the cache lines for the command buffer here,
	 * because the buffer will be flushed by scm_call.
	 */

	/* call scm function to switch to secure world */
	reqp->return_val = scm_call(reqp->service_id, reqp->command_id,
		cmd_vaddrp, reqp->cmd_len, resp_vaddrp, reqp->resp_len);

	/* The cache lines for the response buffer have already been
	 * invalidated by scm_call before returning.
	 */

buf_cleanup:
	/* if the client and handle(s) are valid, free them */
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_cmd_handlep)) {
			if (!IS_ERR_OR_NULL(cmd_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_cmd_handlep);
			ion_free(ion_clientp, ion_cmd_handlep);
		}

		if (!IS_ERR_OR_NULL(ion_resp_handlep)) {
			if (!IS_ERR_OR_NULL(resp_vaddrp))
				ion_unmap_kernel(ion_clientp, ion_resp_handlep);
			ion_free(ion_clientp, ion_resp_handlep);
		}

		ion_client_destroy(ion_clientp);
	}

	return ret;
}
Exemplo n.º 12
0
/*
 * IOCTL operation; Import fd to  UMP memory
 */
int ump_ion_import_wrapper(u32 __user * argument, struct ump_session_data  * session_data)
{
	_ump_uk_ion_import_s user_interaction;
	ump_dd_handle *ump_handle;
	ump_dd_physical_block * blocks;
	unsigned long num_blocks;
	struct ion_handle *ion_hnd;
	struct scatterlist *sg;
	struct scatterlist *sg_ion;
	unsigned long i = 0;

	ump_session_memory_list_element * session_memory_element = NULL;
	if (ion_client_ump==NULL)
	    ion_client_ump = ion_client_create(ion_exynos, -1, "ump");

	/* Sanity check input parameters */
	if (NULL == argument || NULL == session_data)
	{
		MSG_ERR(("NULL parameter in ump_ioctl_allocate()\n"));
		return -ENOTTY;
	}

	/* Copy the user space memory to kernel space (so we safely can read it) */
	if (0 != copy_from_user(&user_interaction, argument, sizeof(user_interaction)))
	{
		MSG_ERR(("copy_from_user() in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	user_interaction.ctx = (void *) session_data;

	/* translate fd to secure ID*/
	ion_hnd = ion_import_fd(ion_client_ump, user_interaction.ion_fd);
	sg_ion = ion_map_dma(ion_client_ump,ion_hnd);

	blocks = (ump_dd_physical_block*)_mali_osk_malloc(sizeof(ump_dd_physical_block)*1024);

	if (NULL == blocks) {
		MSG_ERR(("Failed to allocate blocks in ump_ioctl_allocate()\n"));
		return -ENOMEM;
	}

	sg = sg_ion;
	do {
		blocks[i].addr = sg_phys(sg);
		blocks[i].size = sg_dma_len(sg);
		i++;
		if (i>=1024) {
			_mali_osk_free(blocks);
			MSG_ERR(("ion_import fail() in ump_ioctl_allocate()\n"));
			return -EFAULT;
		}
		sg = sg_next(sg);
	} while(sg);

	num_blocks = i;

	/* Initialize the session_memory_element, and add it to the session object */
	session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));

	if (NULL == session_memory_element)
	{
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	ump_handle = ump_dd_handle_create_from_phys_blocks(blocks, num_blocks);
	if (UMP_DD_HANDLE_INVALID == ump_handle)
	{
		_mali_osk_free(session_memory_element);
		_mali_osk_free(blocks);
		DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
		return -EFAULT;
	}

	session_memory_element->mem = (ump_dd_mem*)ump_handle;
	_mali_osk_lock_wait(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
	_mali_osk_lock_signal(session_data->lock, _MALI_OSK_LOCKMODE_RW);
	ion_unmap_dma(ion_client_ump,ion_hnd);
	ion_free(ion_client_ump, ion_hnd);

	_mali_osk_free(blocks);

	user_interaction.secure_id = ump_dd_secure_id_get(ump_handle);
	user_interaction.size = ump_dd_size_get(ump_handle);
	user_interaction.ctx = NULL;

	if (0 != copy_to_user(argument, &user_interaction, sizeof(user_interaction)))
	{
		/* If the copy fails then we should release the memory. We can use the IOCTL release to accomplish this */

		MSG_ERR(("copy_to_user() failed in ump_ioctl_allocate()\n"));

		return -EFAULT;
	}
	return 0; /* success */
}
static int alloc_ion_mem(struct smem_client *client, size_t size, u32 align,
	u32 flags, enum hal_buffer buffer_type, struct msm_smem *mem,
	int map_kernel)
{
	struct ion_handle *hndl;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long heap_mask = 0;
	int rc = 0;

	align = ALIGN(align, SZ_4K);
	size = ALIGN(size, SZ_4K);

	if (flags & SMEM_SECURE) {
		size = ALIGN(size, SZ_1M);
		align = ALIGN(align, SZ_1M);
	}

	if (is_iommu_present(client->res)) {
		heap_mask = ION_HEAP(ION_IOMMU_HEAP_ID);
	} else {
		dprintk(VIDC_DBG,
			"allocate shared memory from adsp heap size %d align %d\n",
			size, align);
		heap_mask = ION_HEAP(ION_ADSP_HEAP_ID);
	}

	if (flags & SMEM_SECURE)
		heap_mask = ION_HEAP(ION_CP_MM_HEAP_ID);

	hndl = ion_alloc(client->clnt, size, align, heap_mask, flags);
	if (IS_ERR_OR_NULL(hndl)) {
		dprintk(VIDC_ERR,
		"Failed to allocate shared memory = %p, %d, %d, 0x%x\n",
		client, size, align, flags);
		rc = -ENOMEM;
		goto fail_shared_mem_alloc;
	}
	mem->mem_type = client->mem_type;
	mem->smem_priv = hndl;
	mem->flags = flags;
	mem->buffer_type = buffer_type;
	if (map_kernel) {
		mem->kvaddr = ion_map_kernel(client->clnt, hndl);
		if (!mem->kvaddr) {
			dprintk(VIDC_ERR,
				"Failed to map shared mem in kernel\n");
			rc = -EIO;
			goto fail_map;
		}
	} else
		mem->kvaddr = NULL;

	rc = get_device_address(client, hndl, align, &iova, &buffer_size,
				flags, buffer_type);
	if (rc) {
		dprintk(VIDC_ERR, "Failed to get device address: %d\n",
			rc);
		goto fail_device_address;
	}
	mem->device_addr = iova;
	dprintk(VIDC_DBG,
		"device_address = 0x%lx, kvaddr = 0x%p, size = %d\n",
		mem->device_addr, mem->kvaddr, size);
	mem->size = size;
	return rc;
fail_device_address:
	ion_unmap_kernel(client->clnt, hndl);
fail_map:
	ion_free(client->clnt, hndl);
fail_shared_mem_alloc:
	return rc;
}
Exemplo n.º 14
0
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct ion_client *client = filp->private_data;

	switch (cmd) {
	case ION_IOC_ALLOC_NEW:
	{
		struct ion_allocation_data_new data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags | data.heap_mask);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;

	}
	case ION_IOC_ALLOC:
	{
		struct ion_allocation_data data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;
	}
	case ION_IOC_FREE:
	{
		struct ion_handle_data data;
		bool valid;

		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_handle_data)))
			return -EFAULT;
		mutex_lock(&client->lock);
		valid = ion_handle_validate(client, data.handle);
		mutex_unlock(&client->lock);
		if (!valid)
			return -EINVAL;
		ion_free(client, data.handle);
		break;
	}
	case ION_IOC_MAP:
	case ION_IOC_SHARE:
	{
		struct ion_fd_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;

		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
		if (ret)
			return ret;

		data.fd = ion_share_dma_buf(client, data.handle);
		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
			return -EFAULT;
		if (data.fd < 0)
			return data.fd;
		break;
	}
	case ION_IOC_IMPORT:
	{
		struct ion_fd_data data;
		int ret = 0;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_fd_data)))
			return -EFAULT;
		data.handle = ion_import_dma_buf(client, data.fd);
		if (IS_ERR(data.handle))
			data.handle = NULL;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_fd_data)))
			return -EFAULT;
		if (ret < 0)
			return ret;
		break;
	}
	case ION_IOC_CUSTOM:
	{
		struct ion_device *dev = client->dev;
		struct ion_custom_data data;

		if (!dev->custom_ioctl)
			return -ENOTTY;
		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_custom_data)))
			return -EFAULT;
		return dev->custom_ioctl(client, data.cmd, data.arg);
	}
	case ION_IOC_CLEAN_CACHES_OLD:
	case ION_IOC_CLEAN_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_CLEAN_CACHES, arg);
	case ION_IOC_INV_CACHES_OLD:
	case ION_IOC_INV_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_INV_CACHES, arg);
	case ION_IOC_CLEAN_INV_CACHES_OLD:
	case ION_IOC_CLEAN_INV_CACHES:
		return client->dev->custom_ioctl(client,
						ION_IOC_CLEAN_INV_CACHES, arg);
	case ION_IOC_GET_FLAGS_OLD:
	case ION_IOC_GET_FLAGS:
		return client->dev->custom_ioctl(client,
						ION_IOC_GET_FLAGS, arg);
	case ION_IOC_CLIENT_RENAME:
	{
		struct ion_client_name_data data;
		int len;

		if (copy_from_user(&data, (void __user *)arg,
					sizeof(struct ion_client_name_data)))
			return -EFAULT;
		if(data.len < 0)
			return -EFAULT;
		len = (ION_CLIENT_NAME_LENGTH > data.len) ? data.len: ION_CLIENT_NAME_LENGTH;
		if (copy_from_user(client->name, (void __user *)data.name, len))
                        return -EFAULT;
		client->name[len] = '\0';
		break;
	}
	default:
		return -ENOTTY;
	}
	return 0;
}
static int exynos_secure_mem_enable(struct kbase_device *kbdev, int ion_fd, u64 flags, struct kbase_va_region *reg)
{
	/* enable secure world mode : TZASC */
	int ret = 0;

	if (!kbdev && !kbdev->secure_mode_support) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
		ret = -EINVAL;
		goto secure_out;
	}

	if (!reg) {
		GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong input argument, reg %p\n",
			__func__, reg);
		goto secure_out;
	}
#if defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)
#if MALI_SEC_ASP_SECURE_RENDERING
	{
		struct ion_client *client;
		struct ion_handle *ion_handle;
		size_t len = 0;
		ion_phys_addr_t phys = 0;

		flush_all_cpu_caches();

		if ((flags & kbdev->sec_sr_info.secure_flags_crc_asp) == kbdev->sec_sr_info.secure_flags_crc_asp) {
			reg->flags |= KBASE_REG_SECURE_CRC | KBASE_REG_SECURE;
		} else {

			client = ion_client_create(ion_exynos, "G3D");
			if (IS_ERR(client)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_client of G3D\n",
						__func__);
				goto secure_out;
			}

			ion_handle = ion_import_dma_buf(client, ion_fd);

			if (IS_ERR(ion_handle)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get ion_handle of G3D\n",
						__func__);
				ion_client_destroy(client);
				goto secure_out;
			}

			if (ion_phys(client, ion_handle, &phys, &len)) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: Failed to get phys. addr of G3D\n",
						__func__);
				ion_client_destroy(client);
				ion_free(client, ion_handle);
				goto secure_out;
			}

			ion_free(client, ion_handle);
			ion_client_destroy(client);

			ret = exynos_smc(SMC_DRM_SECBUF_CFW_PROT, phys, len, PROT_G3D);
			if (ret != DRMDRV_OK) {
				GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: failed to set secure buffer region of G3D buffer, phy 0x%08x, error 0x%x\n",
					__func__, (unsigned int)phys, ret);
				BUG();
			}

			reg->flags |= KBASE_REG_SECURE;
		}

		reg->phys_by_ion = phys;
		reg->len_by_ion = len;
	}
#else
	reg->flags |= KBASE_REG_SECURE;

	reg->phys_by_ion = 0;
	reg->len_by_ion = 0;
#endif
#else
	GPU_LOG(DVFS_ERROR, LSI_GPU_SECURE, 0u, 0u, "%s: wrong operation! DDK cannot support Secure Rendering\n", __func__);
	ret = -EINVAL;
#endif // defined(CONFIG_ION) && defined(CONFIG_EXYNOS_CONTENT_PATH_PROTECTION)

	return ret;
secure_out:
	ret = -EINVAL;
	return ret;
}
Exemplo n.º 16
0
static int smcmod_send_dec_cmd(struct smcmod_decrypt_req *reqp)
{
	struct ion_client *ion_clientp;
	struct ion_handle *ion_handlep = NULL;
	int ion_fd;
	int ret;
	u32 pa;
	size_t size;
	struct {
		u32 args[4];
	} req;
	struct {
		u32 args[3];
	} rsp;

	ion_clientp = msm_ion_client_create(UINT_MAX, "smcmod");
	if (IS_ERR_OR_NULL(ion_clientp))
		return PTR_ERR(ion_clientp);

	switch (reqp->operation) {
	case SMCMOD_DECRYPT_REQ_OP_METADATA: {
		ion_fd = reqp->request.metadata.ion_fd;
		ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
					    &ion_handlep, &pa, &size);
		if (ret)
			goto error;

		req.args[0] = reqp->request.metadata.len;
		req.args[1] = pa;
		break;
	}
	case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
		ion_fd = reqp->request.img_frag.ion_fd;
		ret = smcmod_ion_fd_to_phys(ion_fd, ion_clientp,
					    &ion_handlep, &pa, &size);
		if (ret)
			goto error;

		req.args[0] = reqp->request.img_frag.ctx_id;
		req.args[1] = reqp->request.img_frag.last_frag;
		req.args[2] = reqp->request.img_frag.frag_len;
		req.args[3] = pa + reqp->request.img_frag.offset;
		break;
	}
	default:
		ret = -EINVAL;
		goto error;
	}

	/*
	 * scm_call does cache maintenance over request and response buffers.
	 * The userspace must flush/invalidate ion input/output buffers itself.
	 */

	ret = scm_call(reqp->service_id, reqp->command_id,
		       &req, sizeof(req), &rsp, sizeof(rsp));
	if (ret)
		goto error;

	switch (reqp->operation) {
	case SMCMOD_DECRYPT_REQ_OP_METADATA:
		reqp->response.metadata.status = rsp.args[0];
		reqp->response.metadata.ctx_id = rsp.args[1];
		reqp->response.metadata.end_offset = rsp.args[2] - pa;
		break;
	case SMCMOD_DECRYPT_REQ_OP_IMG_FRAG: {
		reqp->response.img_frag.status = rsp.args[0];
		break;
	}
	default:
		break;
	}

error:
	if (!IS_ERR_OR_NULL(ion_clientp)) {
		if (!IS_ERR_OR_NULL(ion_handlep))
			ion_free(ion_clientp, ion_handlep);
		ion_client_destroy(ion_clientp);
	}
	return ret;
}
Exemplo n.º 17
0
void *ddl_pmem_alloc(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size, offset = 0 ;
	u32 index = 0;
	struct ddl_context *ddl_context;
	struct msm_mapped_buffer *mapped_buffer = NULL;
	unsigned long iova = 0;
	unsigned long buffer_size = 0;
	unsigned long *kernel_vaddr = NULL;
	unsigned long ionflag = 0;
	unsigned long flags = 0;
	int ret = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		alloc_size = (alloc_size+4095) & ~4095;
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		if (res_trk_check_for_sec_session() ||
			addr->mem_type == DDL_FW_MEM)
			ionflag = UNCACHED;
		else
			ionflag = CACHED;
		kernel_vaddr = (unsigned long *) ion_map_kernel(
					ddl_context->video_ion_client,
					addr->alloc_handle, ionflag);
		if (IS_ERR_OR_NULL(kernel_vaddr)) {
				DDL_MSG_ERROR("%s() :DDL ION map failed\n",
							 __func__);
				goto free_ion_alloc;
		}
		addr->virtual_base_addr = (u8 *) kernel_vaddr;
		ret = ion_map_iommu(ddl_context->video_ion_client,
				addr->alloc_handle,
				VIDEO_DOMAIN,
				VIDEO_MAIN_POOL,
				SZ_4K,
				0,
				&iova,
				&buffer_size,
				UNCACHED, 0);
		if (ret) {
			DDL_MSG_ERROR("%s():DDL ION ion map iommu failed\n",
						 __func__);
			goto unmap_ion_alloc;
		}
		addr->alloced_phys_addr = (phys_addr_t) iova;
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto unmap_ion_alloc;
		}
		addr->mapped_buffer = NULL;
		addr->physical_base_addr = (u8 *) iova;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = alloc_size;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
		flags = MSM_SUBSYSTEM_MAP_IOVA | MSM_SUBSYSTEM_MAP_KADDR;
		if (alignment == DDL_KILO_BYTE(128))
				index = 1;
		else if (alignment > SZ_4K)
			flags |= MSM_SUBSYSTEM_ALIGN_IOVA_8K;

		addr->mapped_buffer =
		msm_subsystem_map_buffer((unsigned long)addr->alloced_phys_addr,
			alloc_size, flags, &vidc_mmu_subsystem[index],
			sizeof(vidc_mmu_subsystem[index])/sizeof(unsigned int));
		if (IS_ERR(addr->mapped_buffer)) {
			pr_err(" %s() buffer map failed", __func__);
			goto free_acm_alloc;
		}
		mapped_buffer = addr->mapped_buffer;
		if (!mapped_buffer->vaddr || !mapped_buffer->iova[0]) {
			pr_err("%s() map buffers failed\n", __func__);
			goto free_map_buffers;
		}
		addr->physical_base_addr = (u8 *)mapped_buffer->iova[0];
		addr->virtual_base_addr = mapped_buffer->vaddr;
		addr->align_physical_addr = (u8 *) DDL_ALIGN((u32)
			addr->physical_base_addr, alignment);
		offset = (u32)(addr->align_physical_addr -
				addr->physical_base_addr);
		addr->align_virtual_addr = addr->virtual_base_addr + offset;
		addr->buffer_size = sz;
	}
	return addr->virtual_base_addr;
free_map_buffers:
	msm_subsystem_unmap_buffer(addr->mapped_buffer);
	addr->mapped_buffer = NULL;
free_acm_alloc:
		free_contiguous_memory_by_paddr(
			(unsigned long)addr->alloced_phys_addr);
		addr->alloced_phys_addr = (phys_addr_t)NULL;
		return NULL;
unmap_ion_alloc:
	ion_unmap_kernel(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->virtual_base_addr = NULL;
	addr->alloced_phys_addr = (phys_addr_t)NULL;
free_ion_alloc:
	ion_free(ddl_context->video_ion_client,
		addr->alloc_handle);
	addr->alloc_handle = NULL;
bail_out:
	return NULL;
}
Exemplo n.º 18
0
static void free_ion_mem(struct smem_client *client, struct msm_smem *mem)
{
    ion_unmap_kernel(client->clnt, mem->smem_priv);
    ion_free(client->clnt, mem->smem_priv);
}
static long secmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct secmem_info *info = filp->private_data;

	static int nbufs = 0;

	switch (cmd) {
	case SECMEM_IOC_GET_CHUNK_NUM:
	{
		nbufs = sizeof(secmem_regions) / sizeof(uint32_t);

		if (nbufs == 0)
			return -ENOMEM;

		if (copy_to_user((void __user *)arg, &nbufs, sizeof(int)))
			return -EFAULT;
		break;
	}
	case SECMEM_IOC_CHUNKINFO:
	{
		struct secchunk_info minfo;

		if (copy_from_user(&minfo, (void __user *)arg, sizeof(minfo)))
			return -EFAULT;

		memset(&minfo.name, 0, MAX_NAME_LEN);

		if (minfo.index < 0)
			return -EINVAL;

		if (minfo.index >= nbufs) {
			minfo.index = -1; /* No more memory region */
		} else {

			if (ion_exynos_contig_heap_info(secmem_regions[minfo.index],
					&minfo.base, &minfo.size))
				return -EINVAL;

			memcpy(minfo.name, secmem_regions_name[minfo.index], MAX_NAME_LEN);
		}

		if (copy_to_user((void __user *)arg, &minfo, sizeof(minfo)))
			return -EFAULT;
		break;
	}
#if defined(CONFIG_ION)
	case SECMEM_IOC_GET_FD_PHYS_ADDR:
	{
		struct ion_client *client;
		struct secfd_info fd_info;
		struct ion_fd_data data;
		size_t len;

		if (copy_from_user(&fd_info, (int __user *)arg,
					sizeof(fd_info)))
			return -EFAULT;

		client = ion_client_create(ion_exynos, "DRM");
		if (IS_ERR(client)) {
			pr_err("%s: Failed to get ion_client of DRM\n",
				__func__);
			return -ENOMEM;
		}

		data.fd = fd_info.fd;
		data.handle = ion_import_dma_buf(client, data.fd);
		pr_debug("%s: fd from user space = %d\n",
				__func__, fd_info.fd);
		if (IS_ERR(data.handle)) {
			pr_err("%s: Failed to get ion_handle of DRM\n",
				__func__);
			ion_client_destroy(client);
			return -ENOMEM;
		}

		if (ion_phys(client, data.handle, &fd_info.phys, &len)) {
			pr_err("%s: Failed to get phys. addr of DRM\n",
				__func__);
			ion_client_destroy(client);
			ion_free(client, data.handle);
			return -ENOMEM;
		}

		pr_debug("%s: physical addr from kernel space = 0x%08x\n",
				__func__, (unsigned int)fd_info.phys);

		ion_free(client, data.handle);
		ion_client_destroy(client);

		if (copy_to_user((void __user *)arg, &fd_info, sizeof(fd_info)))
			return -EFAULT;
		break;
	}
#endif
	case SECMEM_IOC_GET_DRM_ONOFF:
		smp_rmb();
		if (copy_to_user((void __user *)arg, &drm_onoff, sizeof(int)))
			return -EFAULT;
		break;
	case SECMEM_IOC_SET_DRM_ONOFF:
	{
		int ret, val = 0;

		if (copy_from_user(&val, (int __user *)arg, sizeof(int)))
			return -EFAULT;

		mutex_lock(&drm_lock);
		if ((info->drm_enabled && !val) ||
		    (!info->drm_enabled && val)) {
			/*
			 * 1. if we enabled drm, then disable it
			 * 2. if we don't already hdrm enabled,
			 *    try to enable it.
			 */
			ret = drm_enable_locked(info, val);
			if (ret < 0)
				pr_err("fail to lock/unlock drm status. lock = %d\n", val);
		}
		mutex_unlock(&drm_lock);
		break;
	}
	case SECMEM_IOC_GET_CRYPTO_LOCK:
	{
		break;
	}
	case SECMEM_IOC_RELEASE_CRYPTO_LOCK:
	{
		break;
	}
	case SECMEM_IOC_SET_TZPC:
	{
#if !defined(CONFIG_SOC_EXYNOS5422) && !defined(CONFIG_SOC_EXYNOS5430)
		struct protect_info prot;

		if (copy_from_user(&prot, (void __user *)arg, sizeof(struct protect_info)))
			return -EFAULT;

		mutex_lock(&smc_lock);
		exynos_smc((uint32_t)(0x81000000), 0, prot.dev, prot.enable);
		mutex_unlock(&smc_lock);
#endif
		break;
	}
	default:
		return -ENOTTY;
	}

	return 0;
}
Exemplo n.º 20
0
PVRSRV_ERROR IonImportBufferAndAcquirePhysAddr(IMG_HANDLE hIonDev,
											   IMG_UINT32 ui32NumFDs,
											   IMG_INT32  *pai32BufferFDs,
											   IMG_UINT32 *pui32PageCount,
											   IMG_SYS_PHYADDR **ppsSysPhysAddr,
											   IMG_PVOID  *ppvKernAddr0,
											   IMG_HANDLE *phPriv,
											   IMG_HANDLE *phUnique)
{
	struct scatterlist *psTemp, *psScatterList[MAX_IMPORT_ION_FDS] = {};
	PVRSRV_ERROR eError = PVRSRV_ERROR_OUT_OF_MEMORY;
	struct ion_client *psIonClient = hIonDev;
	IMG_UINT32 i, k, ui32PageCount = 0;
	ION_IMPORT_DATA *psImportData;

	if(ui32NumFDs > MAX_IMPORT_ION_FDS)
	{
		printk(KERN_ERR "%s: More ion export fds passed in than supported "
						"(%d provided, %d max)", __func__, ui32NumFDs,
						MAX_IMPORT_ION_FDS);
		return PVRSRV_ERROR_INVALID_PARAMS;
	}

	psImportData = kzalloc(sizeof(ION_IMPORT_DATA), GFP_KERNEL);
	if (psImportData == NULL)
	{
		goto exitFailKMallocImportData;
	}

	/* Set up import data for free call */
	psImportData->psIonClient = psIonClient;
	psImportData->ui32NumIonHandles = ui32NumFDs;

	for(i = 0; i < ui32NumFDs; i++)
	{
		int fd = (int)pai32BufferFDs[i];
		struct sg_table *psSgTable;

		psImportData->apsIonHandle[i] = ion_import_dma_buf(psIonClient, fd);
		if (psImportData->apsIonHandle[i] == IMG_NULL)
		{
			eError = PVRSRV_ERROR_BAD_MAPPING;
			goto exitFailImport;
		}

		psSgTable = ion_sg_table(psIonClient, psImportData->apsIonHandle[i]);
		psScatterList[i] = psSgTable->sgl;
		if (psScatterList[i] == NULL)
		{
			eError = PVRSRV_ERROR_INVALID_PARAMS;
			goto exitFailImport;
		}

		/* Although all heaps will provide an sg_table, the tables cannot
		 * always be trusted because sg_lists are just pointers to "struct
		 * page" values, and some memory e.g. carveout may not have valid
		 * "struct page" values. In particular, on ARM, carveout is
		 * generally reserved with memblock_remove(), which leaves the
		 * "struct page" entries uninitialized when SPARSEMEM is enabled.
		 * The effect of this is that page_to_pfn(pfn_to_page(pfn)) != pfn.
		 *
		 * There's more discussion on this mailing list thread:
		 * http://lists.linaro.org/pipermail/linaro-mm-sig/2012-August/002440.html
		 *
		 * If the heap this buffer comes from implements ->phys(), it's
		 * probably a contiguous allocator. If the phys() function is
		 * implemented, we'll use it to check sg_table->sgl[0]. If we find
		 * they don't agree, we'll assume phys() is more reliable and use
		 * that.
		 *
		 * Some heaps out there will implement phys() even though they are
		 * not for physically contiguous allocations (so the sg_table must
		 * be used). Therefore use the sg_table if the phys() and first
		 * sg_table entry match. This should be reliable because for most
		 * contiguous allocators, the sg_table should be a single span
		 * from 'start' to 'start+size'.
		 *
		 * Also, ion prints out an error message if the heap doesn't implement
		 * ->phys(), which we want to avoid, so only use ->phys() if the
		 * sg_table contains a single span and therefore could plausibly
		 * be a contiguous allocator.
		 */
		if(!sg_next(psScatterList[i]))
		{
			ion_phys_addr_t sPhyAddr;
			size_t sLength;

			if(!ion_phys(psIonClient, psImportData->apsIonHandle[i],
						 &sPhyAddr, &sLength))
			{
				BUG_ON(sLength & ~PAGE_MASK);

				if(sg_phys(psScatterList[i]) != sPhyAddr)
				{
					psScatterList[i] = IMG_NULL;
					ui32PageCount += sLength / PAGE_SIZE;
				}
			}
		}

		for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
		{
			IMG_UINT32 j;
			for (j = 0; j < psTemp->length; j += PAGE_SIZE)
			{
				ui32PageCount++;
			}
		}
	}

	BUG_ON(ui32PageCount == 0);

	psImportData->psSysPhysAddr = kmalloc(sizeof(IMG_SYS_PHYADDR) * ui32PageCount, GFP_KERNEL);
	if (psImportData->psSysPhysAddr == NULL)
	{
		goto exitFailImport;
	}

	for(i = 0, k = 0; i < ui32NumFDs; i++)
	{
		if(psScatterList[i])
		{
			for(psTemp = psScatterList[i]; psTemp; psTemp = sg_next(psTemp))
			{
				IMG_UINT32 j;
				for (j = 0; j < psTemp->length; j += PAGE_SIZE)
				{
					psImportData->psSysPhysAddr[k].uiAddr = sg_phys(psTemp) + j;
					k++;
				}
			}
		}
		else
		{
			ion_phys_addr_t sPhyAddr;
			size_t sLength, j;

			ion_phys(psIonClient, psImportData->apsIonHandle[i],
					 &sPhyAddr, &sLength);

			for(j = 0; j < sLength; j += PAGE_SIZE)
			{
				psImportData->psSysPhysAddr[k].uiAddr = sPhyAddr + j;
				k++;
			}
		}
	}

	*pui32PageCount = ui32PageCount;
	*ppsSysPhysAddr = psImportData->psSysPhysAddr;

#if defined(PDUMP)
	if(ui32NumFDs == 1)
	{
		IMG_PVOID pvKernAddr0;

		pvKernAddr0 = ion_map_kernel(psIonClient, psImportData->apsIonHandle[0]);
		if (IS_ERR(pvKernAddr0))
		{
			pvKernAddr0 = IMG_NULL;
		}

		psImportData->pvKernAddr0 = pvKernAddr0;
		*ppvKernAddr0 = pvKernAddr0;
	}
	else
#endif /* defined(PDUMP) */
	{
		*ppvKernAddr0 = NULL;
	}

	*phPriv = psImportData;
	*phUnique = (IMG_HANDLE)psImportData->psSysPhysAddr[0].uiAddr;

	return PVRSRV_OK;

exitFailImport:
	for(i = 0; psImportData->apsIonHandle[i] != NULL; i++)
	{
		ion_free(psIonClient, psImportData->apsIonHandle[i]);
	}
	kfree(psImportData);
exitFailKMallocImportData:
	return eError;
}
int ion_map_test(int count)
{
	int fd, ret = 0, i, count_alloc, count_map;
	struct ion_handle **handle;
	unsigned char **ptr;
	int *map_fd;

	fd = ion_open();
	if (fd < 0) {
		printf("%s(): FAILED to open ion device\n",	__func__);
		return -1;
	}

	handle = (struct ion_handle **)malloc(count * sizeof(struct ion_handle *));
	if(handle == NULL) {
		printf("%s(): FAILED to allocate memory for ion_handles\n", __func__);
		return -ENOMEM;
	}

	count_alloc = count;
	count_map = count;

	/* Allocate ion_handles */
	for(i = 0; i < count; i++) {
		ret = _ion_alloc_test(fd, &(handle[i]));
		printf("%s(): Alloc handle[%d]=%p\n", __func__, i, handle[i]);
		if(ret || ((int)handle[i]  == -ENOMEM)) {
			printf("%s(): Alloc handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_alloc = i;
			goto err_alloc;
		}
	}

	/* Map ion_handles and validate */
	if (tiler_test)
		len = height * stride;

	ptr = (unsigned char **)malloc(count * sizeof(unsigned char **));
	map_fd = (int *)malloc(count * sizeof(int *));

	for(i = 0; i < count; i++) {
		/* Map ion_handle on userside */
		ret = ion_map(fd, handle[i], len, prot, map_flags, 0, &(ptr[i]), &(map_fd[i]));
		printf("%s(): Map handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s Map handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			count_map = i;
			goto err_map;
		}

		/* Validate mapping by writing the data and reading it back */
		if (tiler_test)
			_ion_tiler_map_test(ptr[i]);
		else
			_ion_map_test(ptr[i]);
	}

	/* clean up properly */
	err_map:
	for(i = 0; i < count_map; i++) {
		/* Unmap ion_handles */
		ret = munmap(ptr[i], len);
		printf("%s(): Unmap handle[%d]=%p, map_fd=%d, ptr=%p\n",
				__func__, i, handle[i], map_fd[i], ptr[i]);
		if(ret) {
			printf("%s(): Unmap handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
			goto err_map;
		}
		/* Close fds */
		close(map_fd[i]);
	}
	free(map_fd);
	free(ptr);

	err_alloc:
	/* Free ion_handles */
	for (i = 0; i < count_alloc; i++) {
		printf("%s(): Free handle[%d]=%p\n", __func__, i, handle[i]);
		ret = ion_free(fd, handle[i]);
		if (ret) {
			printf("%s(): Free handle[%d]=%p FAILED, err:%s\n",
					__func__, i, handle[i], strerror(ret));
		}
	}

	ion_close(fd);
	free(handle);
	handle = NULL;

	if(ret || (count_alloc != count) || (count_map != count))
	{
		printf("\nion map test: FAILED\n\n");
		if((count_alloc != count) || (count_map != count))
			ret = -ENOMEM;
	}	else
		printf("\nion map test: PASSED\n");

	return ret;
}
Exemplo n.º 22
0
static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
	struct ion_client *client = filp->private_data;

	switch (cmd) {
	case ION_IOC_ALLOC:
	{
		struct ion_allocation_data data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;
		data.handle = ion_alloc(client, data.len, data.align,
					     data.flags);

		if (IS_ERR(data.handle))
			return PTR_ERR(data.handle);

		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			ion_free(client, data.handle);
			return -EFAULT;
		}
		break;
	}
	case ION_IOC_FREE:
	{
		struct ion_handle_data data;
		bool valid;

		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_handle_data)))
			return -EFAULT;
		mutex_lock(&client->lock);
		valid = ion_handle_validate(client, data.handle);
		mutex_unlock(&client->lock);
		if (!valid)
			return -EINVAL;
		ion_free(client, data.handle);
		break;
	}
	case ION_IOC_MAP:
	case ION_IOC_SHARE:
	{
		struct ion_fd_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
			return -EFAULT;

		ret = ion_share_set_flags(client, data.handle, filp->f_flags);
		if (ret)
			return ret;

		data.fd = ion_share_dma_buf(client, data.handle);
		if (copy_to_user((void __user *)arg, &data, sizeof(data)))
			return -EFAULT;
		if (data.fd < 0)
			return data.fd;
		break;
	}
	case ION_IOC_IMPORT:
	{
		struct ion_fd_data data;
		int ret = 0;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_fd_data)))
			return -EFAULT;
		data.handle = ion_import_dma_buf(client, data.fd);
		if (IS_ERR(data.handle))
			data.handle = NULL;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_fd_data)))
			return -EFAULT;
		if (ret < 0)
			return ret;
		break;
	}
	case ION_IOC_CUSTOM:
	{
		struct ion_device *dev = client->dev;
		struct ion_custom_data data;

		if (!dev->custom_ioctl)
			return -ENOTTY;
		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_custom_data)))
			return -EFAULT;
		return dev->custom_ioctl(client, data.cmd, data.arg);
	}
	case ION_IOC_CLEAN_CACHES:
	case ION_IOC_INV_CACHES:
	case ION_IOC_CLEAN_INV_CACHES:
	{
		struct ion_flush_data data;
		unsigned long start, end;
		struct ion_handle *handle = NULL;
		int ret;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(struct ion_flush_data)))
			return -EFAULT;

		start = (unsigned long) data.vaddr;
		end = (unsigned long) data.vaddr + data.length;

		if (check_vaddr_bounds(start, end)) {
			pr_err("%s: virtual address %p is out of bounds\n",
				__func__, data.vaddr);
			return -EINVAL;
		}

		if (!data.handle) {
			handle = ion_import_dma_buf(client, data.fd);
			if (IS_ERR(handle)) {
				pr_info("%s: Could not import handle: %d\n",
					__func__, (int)handle);
				return -EINVAL;
			}
		}

		ret = ion_do_cache_op(client,
					data.handle ? data.handle : handle,
					data.vaddr, data.offset, data.length,
					cmd);

		if (!data.handle)
			ion_free(client, handle);

		if (ret < 0)
			return ret;
		break;

	}
	case ION_IOC_GET_FLAGS:
	{
		struct ion_flag_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg,
				   sizeof(struct ion_flag_data)))
			return -EFAULT;

		ret = ion_handle_get_flags(client, data.handle, &data.flags);
		if (ret < 0)
			return ret;
		if (copy_to_user((void __user *)arg, &data,
				 sizeof(struct ion_flag_data)))
			return -EFAULT;
		break;
	}
	default:
		return -ENOTTY;
	}
	return 0;
}
static int msm_pmem_table_add(struct hlist_head *ptype,
	struct msm_pmem_info *info, struct ion_client *client)
{
	unsigned long paddr;
#ifndef CONFIG_MSM_MULTIMEDIA_USE_ION
	unsigned long kvstart;
	struct file *file;
#endif
	int rc = -ENOMEM;

	unsigned long len;
	struct msm_pmem_region *region;

	region = kmalloc(sizeof(struct msm_pmem_region), GFP_KERNEL);
	if (!region)
		goto out;
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	region->handle = ion_import_dma_buf(client, info->fd);
	if (IS_ERR_OR_NULL(region->handle))
		goto out1;
	if (ion_map_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL,
				  SZ_4K, 0, &paddr, &len, UNCACHED, 0) < 0)
		goto out2;
#elif CONFIG_ANDROID_PMEM
	rc = get_pmem_file(info->fd, &paddr, &kvstart, &len, &file);
	if (rc < 0) {
		pr_err("%s: get_pmem_file fd %d error %d\n",
				__func__, info->fd, rc);
		goto out1;
	}
	region->file = file;
#else
	paddr = 0;
	file = NULL;
	kvstart = 0;
#endif
	if (!info->len)
		info->len = len;
	rc = check_pmem_info(info, len);
	if (rc < 0)
		goto out3;
	paddr += info->offset;
	len = info->len;

	if (check_overlap(ptype, paddr, len) < 0) {
		rc = -EINVAL;
		goto out3;
	}

	CDBG("%s: type %d, active flag %d, paddr 0x%lx, vaddr 0x%lx\n",
		__func__, info->type, info->active, paddr,
		(unsigned long)info->vaddr);

	INIT_HLIST_NODE(&region->list);
	region->paddr = paddr;
	region->len = len;
	memcpy(&region->info, info, sizeof(region->info));
	D("%s Adding region to list with type %d\n", __func__,
						region->info.type);
	D("%s pmem_stats address is 0x%p\n", __func__, ptype);
	hlist_add_head(&(region->list), ptype);

	return 0;
out3:
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	ion_unmap_iommu(client, region->handle, CAMERA_DOMAIN, GEN_POOL);
#endif
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
out2:
	ion_free(client, region->handle);
#elif CONFIG_ANDROID_PMEM
	put_pmem_file(region->file);
#endif
out1:
	kfree(region);
out:
	return rc;
}
Exemplo n.º 24
0
static int alloc_device_free(alloc_device_t *dev, buffer_handle_t handle)
{
	if (private_handle_t::validate(handle) < 0)
	{
		return -EINVAL;
	}

	private_handle_t const *hnd = reinterpret_cast<private_handle_t const *>(handle);

	if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
	{
		// free this buffer
		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
		const size_t bufferSize = m->finfo.line_length * m->info.yres;
		int index = (hnd->base - m->framebuffer->base) / bufferSize;
		m->bufferMask &= ~(1 << index);
		close(hnd->fd);

#if GRALLOC_ARM_UMP_MODULE

		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
		{
			ump_reference_release((ump_handle)hnd->ump_mem_handle);
		}

#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
	{
#if GRALLOC_ARM_UMP_MODULE

		/* Buffer might be unregistered so we need to check for invalid ump handle*/
		if ((int)UMP_INVALID_MEMORY_HANDLE != hnd->ump_mem_handle)
		{
			ump_mapped_pointer_release((ump_handle)hnd->ump_mem_handle);
			ump_reference_release((ump_handle)hnd->ump_mem_handle);
		}

#else
		AERR("Can't free ump memory for handle:0x%x. Not supported.", (unsigned int)hnd);
#endif
	}
	else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION)
	{
#if GRALLOC_ARM_DMA_BUF_MODULE
		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);

		/* Buffer might be unregistered so we need to check for invalid ump handle*/
		if (0 != hnd->base)
		{
			if (0 != munmap((void *)hnd->base, hnd->size))
			{
				AERR("Failed to munmap handle 0x%x", (unsigned int)hnd);
			}
		}

		close(hnd->share_fd);

		if (0 != ion_free(m->ion_client, hnd->ion_hnd))
		{
			AERR("Failed to ion_free( ion_client: %d ion_hnd: %x )", m->ion_client, (unsigned int)hnd->ion_hnd);
		}

		memset((void *)hnd, 0, sizeof(*hnd));
#else
		AERR("Can't free dma_buf memory for handle:0x%x. Not supported.", (unsigned int)hnd);
#endif

	}

	delete hnd;

	return 0;
}
Exemplo n.º 25
0
static void *res_trk_pmem_alloc
	(struct ddl_buf_addr *addr, size_t sz, u32 alignment)
{
	u32 alloc_size;
	struct ddl_context *ddl_context;
	int rc = -EINVAL;
	ion_phys_addr_t phyaddr = 0;
	size_t len = 0;
	DBG_PMEM("\n%s() IN: Requested alloc size(%u)", __func__, (u32)sz);
	if (!addr) {
		DDL_MSG_ERROR("\n%s() Invalid Parameters", __func__);
		goto bail_out;
	}
	ddl_context = ddl_get_context();
	res_trk_set_mem_type(addr->mem_type);
	alloc_size = (sz + alignment);
	if (res_trk_get_enable_ion()) {
		if (!ddl_context->video_ion_client)
			ddl_context->video_ion_client =
				res_trk_get_ion_client();
		if (!ddl_context->video_ion_client) {
			DDL_MSG_ERROR("%s() :DDL ION Client Invalid handle\n",
						 __func__);
			goto bail_out;
		}
		addr->alloc_handle = ion_alloc(
		ddl_context->video_ion_client, alloc_size, SZ_4K,
			res_trk_get_mem_type());
		if (IS_ERR_OR_NULL(addr->alloc_handle)) {
			DDL_MSG_ERROR("%s() :DDL ION alloc failed\n",
						 __func__);
			goto bail_out;
		}
		rc = ion_phys(ddl_context->video_ion_client,
				addr->alloc_handle, &phyaddr,
				 &len);
		if (rc || !phyaddr) {
			DDL_MSG_ERROR("%s():DDL ION client physical failed\n",
						 __func__);
			goto free_acm_ion_alloc;
		}
		addr->alloced_phys_addr = phyaddr;
	} else {
		addr->alloced_phys_addr = (phys_addr_t)
		allocate_contiguous_memory_nomap(alloc_size,
			res_trk_get_mem_type(), SZ_4K);
		if (!addr->alloced_phys_addr) {
			DDL_MSG_ERROR("%s() : acm alloc failed (%d)\n",
					 __func__, alloc_size);
			goto bail_out;
		}
	}

	addr->buffer_size = sz;
	return (void *)addr->alloced_phys_addr;

free_acm_ion_alloc:
	if (ddl_context->video_ion_client) {
		if (addr->alloc_handle) {
			ion_free(ddl_context->video_ion_client,
				addr->alloc_handle);
			addr->alloc_handle = NULL;
		}
	}
bail_out:
	return NULL;
}
Exemplo n.º 26
0
static int gralloc_alloc_buffer(alloc_device_t *dev, size_t size, int usage, buffer_handle_t *pHandle)
{
#if GRALLOC_ARM_DMA_BUF_MODULE
	{
		private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
		ion_user_handle_t ion_hnd;
		unsigned char *cpu_ptr;
		int shared_fd;
		int ret;
		unsigned int ion_flags = 0;

		if( (usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN )
			ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
		if (usage & GRALLOC_USAGE_PRIVATE_1) {
			ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_CARVEOUT_MASK, ion_flags, &ion_hnd);
		} else {
			ret = ion_alloc(m->ion_client, size, 0, ION_HEAP_SYSTEM_MASK, ion_flags, &ion_hnd);
		}

		if (ret != 0)
		{
			AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
			return -1;
		}

		ret = ion_share(m->ion_client, ion_hnd, &shared_fd);

		if (ret != 0)
		{
			AERR("ion_share( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			return -1;
		}

		cpu_ptr = (unsigned char *)mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0);

		if (MAP_FAILED == cpu_ptr)
		{
			AERR("ion_map( %d ) failed", m->ion_client);

			if (0 != ion_free(m->ion_client, ion_hnd))
			{
				AERR("ion_free( %d ) failed", m->ion_client);
			}

			close(shared_fd);
			return -1;
		}

		private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_ION, usage, size, (int)cpu_ptr, private_handle_t::LOCK_STATE_MAPPED);

		if (NULL != hnd)
		{
			hnd->share_fd = shared_fd;
			hnd->ion_hnd = ion_hnd;
			*pHandle = hnd;
			return 0;
		}
		else
		{
			AERR("Gralloc out of mem for ion_client:%d", m->ion_client);
		}

		close(shared_fd);
		ret = munmap(cpu_ptr, size);

		if (0 != ret)
		{
			AERR("munmap failed for base:%p size: %d", cpu_ptr, size);
		}

		ret = ion_free(m->ion_client, ion_hnd);

		if (0 != ret)
		{
			AERR("ion_free( %d ) failed", m->ion_client);
		}

		return -1;
	}
#endif

#if GRALLOC_ARM_UMP_MODULE
	{
		ump_handle ump_mem_handle;
		void *cpu_ptr;
		ump_secure_id ump_id;
		ump_alloc_constraints constraints;

		size = round_up_to_page_size(size);

		if ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN)
		{
			constraints =  UMP_REF_DRV_CONSTRAINT_USE_CACHE;
		}
		else
		{
			constraints = UMP_REF_DRV_CONSTRAINT_NONE;
		}

#ifdef GRALLOC_SIMULATE_FAILURES
		/* if the failure condition matches, fail this iteration */
		if (__ump_alloc_should_fail())
		{
			ump_mem_handle = UMP_INVALID_MEMORY_HANDLE;
		}
		else
#endif
		{
			ump_mem_handle = ump_ref_drv_allocate(size, constraints);

			if (UMP_INVALID_MEMORY_HANDLE != ump_mem_handle)
			{
				cpu_ptr = ump_mapped_pointer_get(ump_mem_handle);

				if (NULL != cpu_ptr)
				{
					ump_id = ump_secure_id_get(ump_mem_handle);

					if (UMP_INVALID_SECURE_ID != ump_id)
					{
						private_handle_t *hnd = new private_handle_t(private_handle_t::PRIV_FLAGS_USES_UMP, usage, size, (int)cpu_ptr,
						private_handle_t::LOCK_STATE_MAPPED, ump_id, ump_mem_handle);

						if (NULL != hnd)
						{
							*pHandle = hnd;
							return 0;
						}
						else
						{
							AERR("gralloc_alloc_buffer() failed to allocate handle. ump_handle = %p, ump_id = %d", ump_mem_handle, ump_id);
						}
					}
					else
					{
						AERR("gralloc_alloc_buffer() failed to retrieve valid secure id. ump_handle = %p", ump_mem_handle);
					}

					ump_mapped_pointer_release(ump_mem_handle);
				}
				else
				{
					AERR("gralloc_alloc_buffer() failed to map UMP memory. ump_handle = %p", ump_mem_handle);
				}

				ump_reference_release(ump_mem_handle);
			}
			else
			{
				AERR("gralloc_alloc_buffer() failed to allocate UMP memory. size:%d constraints: %d", size, constraints);
			}
		}
		return -1;
	}
#endif

}
/*****************************************************************************
 @Function                AllocPages
******************************************************************************/
static IMG_RESULT AllocPages(
	SYSMEM_Heap *		heap,
	IMG_UINT32			ui32Size,
	SYSMEMU_sPages *	psPages,
	SYS_eMemAttrib		eMemAttrib
)
{
    IMG_UINT32           Res;
    struct ion_handle *  ion_handle;
    unsigned             allocFlags;
    struct ion_client *  ion_client;
    IMG_UINT64 *         pCpuPhysAddrs;
    size_t               numPages;
    size_t               physAddrArrSize;

    ion_client = (struct ion_client *)heap->priv;

    if (   (eMemAttrib & SYS_MEMATTRIB_WRITECOMBINE)
        || (eMemAttrib & SYS_MEMATTRIB_UNCACHED))
    {
        allocFlags = 0;
    } else {
        allocFlags = ION_FLAG_CACHED;
    }

    if (eMemAttrib == SYS_MEMATTRIB_UNCACHED)
        REPORT(REPORT_MODULE_SYSMEM, REPORT_WARNING,
               "Purely uncached memory is not supported by ION");

    // PAGE_SIZE aligment, heap depends on platform
    ion_handle = ion_alloc(ion_client, ui32Size, PAGE_SIZE,
    					ION_HEAP_SYSTEM_MASK,
                          allocFlags);
    if (!ion_handle) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error allocating %u bytes from ion", ui32Size);
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errAlloc;
    }

    /* Find out physical addresses in the mappable region */
    numPages = (ui32Size + HOST_MMU_PAGE_SIZE - 1)/HOST_MMU_PAGE_SIZE;

    physAddrArrSize = sizeof *pCpuPhysAddrs * numPages;
    pCpuPhysAddrs = IMG_BIGORSMALL_ALLOC(physAddrArrSize);
    if (!pCpuPhysAddrs) {
        Res = IMG_ERROR_OUT_OF_MEMORY;
        goto errPhysArrAlloc;
    }

    {
        struct scatterlist *psScattLs, *psScattLsAux;
        struct sg_table *psSgTable;
        size_t pg_i = 0;

        psSgTable = ion_sg_table(ion_client, ion_handle);
        if (psSgTable == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining sg table");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }
        psScattLs = psSgTable->sgl;

        if (psScattLs == NULL)
        {
            REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR, "Error obtaining scatter list");
            Res = IMG_ERROR_FATAL;
            goto errGetPhys;
        }

        // Get physical addresses from scatter list
        for (psScattLsAux = psScattLs; psScattLsAux; psScattLsAux = sg_next(psScattLsAux))
        {
            int offset;
            dma_addr_t chunkBase = sg_phys(psScattLsAux);

            for (offset = 0; offset < psScattLsAux->length; offset += PAGE_SIZE, ++pg_i)
            {
                if (pg_i >= numPages)
                    break;

                //pCpuPhysAddrs[pg_i] = dma_map_page(NULL, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
                pCpuPhysAddrs[pg_i] = chunkBase + offset;
            }
            if (pg_i >= numPages)
                break;
        }
    }

    // Set pointer to physical address in structure
    psPages->ppaPhysAddr = pCpuPhysAddrs;

    DEBUG_REPORT(REPORT_MODULE_SYSMEM, "%s region of size %u phys 0x%llx",
                 __FUNCTION__, ui32Size, psPages->ppaPhysAddr[0]);

    Res = SYSBRGU_CreateMappableRegion(psPages->ppaPhysAddr[0], ui32Size, eMemAttrib,
    						psPages, &psPages->hRegHandle);
    if (Res != IMG_SUCCESS) {
        REPORT(REPORT_MODULE_SYSMEM, REPORT_ERR,
               "Error %u in SYSBRGU_CreateMappableRegion", Res);
        goto errCreateMapRegion;
    }

    psPages->pvImplData = ion_handle;

    return IMG_SUCCESS;

errCreateMapRegion:
errGetPhys:
    IMG_BIGORSMALL_FREE(numPages*sizeof(*pCpuPhysAddrs), pCpuPhysAddrs);
errPhysArrAlloc:
    ion_unmap_kernel(ion_client, ion_handle);
    ion_free(ion_client, ion_handle);
errAlloc:
    return Res;
}
Exemplo n.º 28
0
static long msm_ion_custom_ioctl(struct ion_client *client,
				unsigned int cmd,
				unsigned long arg)
{
	switch (cmd) {
	case ION_IOC_CLEAN_CACHES:
	case ION_IOC_INV_CACHES:
	case ION_IOC_CLEAN_INV_CACHES:
	{
		struct ion_flush_data data;
		unsigned long start, end;
		struct ion_handle *handle = NULL;
		int ret;

		if (copy_from_user(&data, (void __user *)arg,
					sizeof(struct ion_flush_data)))
			return -EFAULT;

		start = (unsigned long) data.vaddr;
		end = (unsigned long) data.vaddr + data.length;

		if (start && check_vaddr_bounds(start, end)) {
			pr_err("%s: virtual address %p is out of bounds\n",
				__func__, data.vaddr);
			return -EINVAL;
		}

		if (!data.handle) {
			handle = ion_import_dma_buf(client, data.fd);
			if (IS_ERR(handle)) {
				pr_info("%s: Could not import handle: %d\n",
					__func__, (int)handle);
				return -EINVAL;
			}
		}

		ret = ion_do_cache_op(client,
				data.handle ? data.handle : handle,
				data.vaddr, data.offset, data.length,
				cmd);

		if (!data.handle)
			ion_free(client, handle);

		if (ret < 0)
			return ret;
		break;

	}
	case ION_IOC_GET_FLAGS:
	{
		struct ion_flag_data data;
		int ret;
		if (copy_from_user(&data, (void __user *)arg,
					sizeof(struct ion_flag_data)))
			return -EFAULT;

		ret = ion_handle_get_flags(client, data.handle, &data.flags);
		if (ret < 0)
			return ret;
		if (copy_to_user((void __user *)arg, &data,
					sizeof(struct ion_flag_data)))
			return -EFAULT;
		break;
	}
	default:
		return -ENOTTY;
	}
	return 0;
}
Exemplo n.º 29
0
/*--------------------MemoryManager Class STARTS here-----------------------------*/
void* MemoryManager::allocateBuffer(int width, int height, const char* format, int &bytes, int numBufs)
{
    LOG_FUNCTION_NAME;

    if(mIonFd == 0)
        {
        mIonFd = ion_open();
        if(mIonFd == 0)
            {
            CAMHAL_LOGEA("ion_open failed!!!");
            return NULL;
            }
        }

    ///We allocate numBufs+1 because the last entry will be marked NULL to indicate end of array, which is used when freeing
    ///the buffers
    const uint numArrayEntriesC = (uint)(numBufs+1);

    ///Allocate a buffer array
    uint32_t *bufsArr = new uint32_t [numArrayEntriesC];
    if(!bufsArr)
        {
        CAMHAL_LOGEB("Allocation failed when creating buffers array of %d uint32_t elements", numArrayEntriesC);
        LOG_FUNCTION_NAME_EXIT;
        return NULL;
        }

    ///Initialize the array with zeros - this will help us while freeing the array in case of error
    ///If a value of an array element is NULL, it means we didnt allocate it
    memset(bufsArr, 0, sizeof(*bufsArr) * numArrayEntriesC);

    //2D Allocations are not supported currently
    if(bytes != 0)
        {
        struct ion_handle *handle;
        int mmap_fd;

        ///1D buffers
        for (int i = 0; i < numBufs; i++)
            {
            int ret = ion_alloc(mIonFd, bytes, 0, 1 << ION_HEAP_TYPE_CARVEOUT, &handle);
            if(ret < 0)
                {
                CAMHAL_LOGEB("ion_alloc resulted in error %d", ret);
                goto error;
                }

            CAMHAL_LOGDB("Before mapping, handle = %x, nSize = %d", handle, bytes);
            if ((ret = ion_map(mIonFd, handle, bytes, PROT_READ | PROT_WRITE, MAP_SHARED, 0,
                          (unsigned char**)&bufsArr[i], &mmap_fd)) < 0)
                {
                CAMHAL_LOGEB("Userspace mapping of ION buffers returned error %d", ret);
                ion_free(mIonFd, handle);
                goto error;
                }

            mIonHandleMap.add(bufsArr[i], (unsigned int)handle);
            mIonFdMap.add(bufsArr[i], (unsigned int) mmap_fd);
            mIonBufLength.add(bufsArr[i], (unsigned int) bytes);
            }

        }
    else // If bytes is not zero, then it is a 2-D tiler buffer request
        {
        }

        LOG_FUNCTION_NAME_EXIT;

        return (void*)bufsArr;

error:
    LOGE("Freeing buffers already allocated after error occurred");
    freeBuffer(bufsArr);

    if ( NULL != mErrorNotifier.get() )
        {
        mErrorNotifier->errorNotify(-ENOMEM);
        }

    LOG_FUNCTION_NAME_EXIT;
    return NULL;
}
Exemplo n.º 30
0
static long sprd_heap_ioctl(struct ion_client *client, unsigned int cmd,
				unsigned long arg)
{
	int ret = 0;

	switch (cmd) {
	case ION_SPRD_CUSTOM_PHYS:
	{
		struct ion_phys_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl alloc copy_from_user error!\n");
			return -EFAULT;
		}

		handle = ion_import_dma_buf(client, data.fd_buffer);

		if (IS_ERR(handle)) {
			pr_err("sprd_heap_ioctl alloc handle=0x%lx error!\n", (unsigned long)handle);
			return PTR_ERR(handle);
		}

		ret = ion_phys(client, handle, &data.phys, &data.size);
		ion_free(client, handle);
		
		if (ret) {
			pr_err("sprd_heap_ioctl alloc ret=0x%x error!\n",ret);
			return ret;
		}
		
		if (copy_to_user((void __user *)arg,
				&data, sizeof(data))) {
			pr_err("sprd_heap_ioctl alloc copy_to_user error!\n");
			return -EFAULT;
		}

		pr_debug("sprd_heap_ioctl alloc paddress=0x%lx size=0x%zx\n",data.phys,data.size);
		break;
	}
	case ION_SPRD_CUSTOM_MSYNC:
	{
#if 0
		struct ion_msync_data data;
		void *kaddr;
		void *paddr;
		size_t size;
		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			return -EFAULT;
		}
		kaddr = data.vaddr;
		paddr = data.paddr;	
		size = data.size;
		dmac_flush_range(kaddr, kaddr + size);
		outer_clean_range((phys_addr_t)paddr, (phys_addr_t)(paddr + size));

/*maybe open in future if support discrete page map so keep this code unremoved here*/
#else
		struct ion_msync_data data;
		void *v_addr;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl free copy_from_user error!\n");
			return -EFAULT;
		}

		if ((int)data.vaddr & (PAGE_SIZE - 1)) {
			pr_err("sprd_heap_ioctl free data.vaddr=%p error!\n",data.vaddr);
			return -EFAULT;
		}

		pr_debug("sprd_heap_ioctl free vaddress=%p paddress=%p size=0x%zx\n",data.vaddr,data.paddr,data.size);
		dmac_flush_range(data.vaddr, data.vaddr + data.size);

		v_addr = data.vaddr;
		while (v_addr < data.vaddr + data.size) {
			uint32_t phy_addr = user_va2pa(current->mm, (uint32_t)v_addr);
			if (phy_addr) {
				outer_flush_range(phy_addr, phy_addr + PAGE_SIZE);
			}
			v_addr += PAGE_SIZE;
		}
#endif
		break;
	}
#if defined(CONFIG_SPRD_IOMMU)
	case ION_SPRD_CUSTOM_GSP_MAP:
	{
		struct ion_mmu_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl gsp map copy_from_user error!\n");
			return -EFAULT;
		}

		handle = ion_import_dma_buf(client, data.fd_buffer);

		if (IS_ERR(handle)) {
			pr_err("sprd_heap_ioctl gsp map handle=0x%lx error!\n", (unsigned long)handle);
			return PTR_ERR(handle);
		}

		mutex_lock(&(ion_handle_buffer(handle))->lock);
		if (0 == (ion_handle_buffer(handle))->iomap_cnt[IOMMU_GSP]) {
			(ion_handle_buffer(handle))->iova[IOMMU_GSP] = sprd_iova_alloc(IOMMU_GSP, (ion_handle_buffer(handle))->size);
			ret = sprd_iova_map(IOMMU_GSP, (ion_handle_buffer(handle))->iova[IOMMU_GSP], ion_handle_buffer(handle));
		}
		(ion_handle_buffer(handle))->iomap_cnt[IOMMU_GSP]++;
		data.iova_addr = (ion_handle_buffer(handle))->iova[IOMMU_GSP];
		data.iova_size = (ion_handle_buffer(handle))->size;
		mutex_unlock(&(ion_handle_buffer(handle))->lock);
		ion_free(client, handle);
		if (ret) {
			pr_err("sprd_heap_ioctl gsp map sprd_iova_map error %d!\n",ret);
			sprd_iova_free(IOMMU_GSP, data.iova_addr, data.iova_size);
			return ret;
		}

		if (copy_to_user((void __user *)arg,
				&data, sizeof(data))) {
			pr_err("sprd_heap_ioctl gsp map copy_to_user error!\n");
			return -EFAULT;
		}
		break;
	}
	case ION_SPRD_CUSTOM_GSP_UNMAP:
	{
		struct ion_mmu_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl gsp unmap copy_from_user error!\n");
			return -EFAULT;
		}

		handle = ion_import_dma_buf(client, data.fd_buffer);

		if (IS_ERR(handle)) {
			pr_err("sprd_heap_ioctl gsp unmap handle=0x%lx ion_import_dma_buf error!\n", (unsigned long)handle);
			return PTR_ERR(handle);
		}

		mutex_lock(&(ion_handle_buffer(handle))->lock);
		if ((ion_handle_buffer(handle))->iomap_cnt[IOMMU_GSP] > 0) {
			(ion_handle_buffer(handle))->iomap_cnt[IOMMU_GSP]--;
			if (0 == (ion_handle_buffer(handle))->iomap_cnt[IOMMU_GSP]) {
				ret = sprd_iova_unmap(IOMMU_GSP, (ion_handle_buffer(handle))->iova[IOMMU_GSP], ion_handle_buffer(handle));
				sprd_iova_free(IOMMU_GSP, (ion_handle_buffer(handle))->iova[IOMMU_GSP], (ion_handle_buffer(handle))->size);
				(ion_handle_buffer(handle))->iova[IOMMU_GSP] = 0;
			}
		}
		mutex_unlock(&(ion_handle_buffer(handle))->lock);
		data.iova_addr = 0;
		data.iova_size = 0;
		ion_free(client, handle);
		if (ret) {
			pr_err("sprd_heap_ioctl gsp unmap sprd_iova_unmap error %d!\n",ret);
			return ret;
		}

		if (copy_to_user((void __user *)arg,
				&data, sizeof(data))) {
			pr_err("sprd_heap_ioctl gsp unmap copy_to_user error!\n");
			return -EFAULT;
		}
		break;
	}
	case ION_SPRD_CUSTOM_MM_MAP:
	{
		struct ion_mmu_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl mm map copy_from_user error!\n");
			return -EFAULT;
		}

		handle = ion_import_dma_buf(client, data.fd_buffer);

		if (IS_ERR(handle)) {
			pr_err("sprd_heap_ioctl mm map handle=0x%lx error!\n", (unsigned long)handle);
			return PTR_ERR(handle);
		}

		mutex_lock(&(ion_handle_buffer(handle))->lock);
		if (0 == (ion_handle_buffer(handle))->iomap_cnt[IOMMU_MM]) {
			(ion_handle_buffer(handle))->iova[IOMMU_MM] = sprd_iova_alloc(IOMMU_MM, (ion_handle_buffer(handle))->size);
			ret = sprd_iova_map(IOMMU_MM, (ion_handle_buffer(handle))->iova[IOMMU_MM], ion_handle_buffer(handle));
		}
		(ion_handle_buffer(handle))->iomap_cnt[IOMMU_MM]++;
		data.iova_size = (ion_handle_buffer(handle))->size;
		data.iova_addr = (ion_handle_buffer(handle))->iova[IOMMU_MM];
		
		mutex_unlock(&(ion_handle_buffer(handle))->lock);
		ion_free(client, handle);
		if (ret) {
			pr_err("sprd_heap_ioctl mm map ret=0x%x error!\n",ret);
			sprd_iova_free(IOMMU_MM,data.iova_addr,data.iova_size);
			return ret;
		}

		if (copy_to_user((void __user *)arg,
				&data, sizeof(data))) {
			pr_err("sprd_heap_ioctl mm map copy_to_user error!\n");
			return -EFAULT;
		}

		pr_debug("sprd_heap_ioctl mm map vaddress=0x%lx size=0x%zx\n",data.iova_addr,data.iova_size);
		break;
	}
	case ION_SPRD_CUSTOM_MM_UNMAP:
	{
		struct ion_mmu_data data;
		struct ion_handle *handle;

		if (copy_from_user(&data, (void __user *)arg,
				sizeof(data))) {
			pr_err("sprd_heap_ioctl mm unmap copy_from_user error!\n");
			return -EFAULT;
		}

		pr_debug("sprd_heap_ioctl mm unmap vaddress=0x%lx size=0x%zx\n",data.iova_addr,data.iova_size);
		handle = ion_import_dma_buf(client, data.fd_buffer);

		if (IS_ERR(handle)) {
			pr_err("sprd_heap_ioctl mm unmap handle=0x%lx error!\n", (unsigned long)handle);
			return PTR_ERR(handle);
		}

		mutex_lock(&(ion_handle_buffer(handle))->lock);
		if ((ion_handle_buffer(handle))->iomap_cnt[IOMMU_MM] > 0) {
			(ion_handle_buffer(handle))->iomap_cnt[IOMMU_MM]--;
			if (0 == (ion_handle_buffer(handle))->iomap_cnt[IOMMU_MM]) {
				ret = sprd_iova_unmap(IOMMU_MM, (ion_handle_buffer(handle))->iova[IOMMU_MM], (ion_handle_buffer(handle)));
				sprd_iova_free(IOMMU_MM, (ion_handle_buffer(handle))->iova[IOMMU_MM], (ion_handle_buffer(handle))->size);
				(ion_handle_buffer(handle))->iova[IOMMU_MM] = 0;
			}
		}
		mutex_unlock(&(ion_handle_buffer(handle))->lock);
		data.iova_addr = 0;
		data.iova_size = 0;
		ion_free(client, handle);
		if (ret) {
			pr_err("sprd_heap_ioctl mm unmap ret=0x%x error!\n",ret);
			return ret;
		}
		
		if (copy_to_user((void __user *)arg,
				&data, sizeof(data))) {
			pr_err("sprd_heap_ioctl mm unmap copy_to_user error!\n");
			return -EFAULT;
		}
		break;
	}
#endif
	case ION_SPRD_CUSTOM_FENCE_CREATE:
	{
		int ret = -1;
		struct ion_fence_data data;
		
		if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
			pr_err("FENCE_CREATE user data is err\n");
			return -EFAULT;
		}
		
		ret = sprd_fence_build(&data);
		if (ret != 0) {
			pr_err("sprd_fence_build failed\n");
			return -EFAULT;
		}
		
		if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
			sprd_fence_destroy(&data);
			pr_err("copy_to_user fence failed\n");
			return -EFAULT;
		}
		
		break;
    }
	case ION_SPRD_CUSTOM_FENCE_SIGNAL:
	{
		struct ion_fence_data data;

		if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
			pr_err("FENCE_CREATE user data is err\n");
			return -EFAULT;
		}

		sprd_fence_signal(&data);
		
		break;
	}
	case ION_SPRD_CUSTOM_FENCE_DUP:
	{
		break;
		
	}
	default:
		pr_err("sprd_ion Do not support cmd: %d\n", cmd);
		return -ENOTTY;
	}

	return ret;
}