Пример #1
0
void tf_l4sec_clkdm_allow_idle(bool wakeunlock)
{
	spin_lock(&tf_get_device()->sm.lock);
	if (atomic_dec_return(&smc_l4_sec_clkdm_use_count) == 0)
		clkdm_allow_idle(smc_l4_sec_clkdm);
#ifdef CONFIG_HAS_WAKELOCK
	if (wakeunlock)
		if (atomic_dec_return(&tf_wake_lock_count) == 0)
			wake_unlock(&g_tf_wake_lock);
#endif
	spin_unlock(&tf_get_device()->sm.lock);
}
Пример #2
0
void tf_l4sec_clkdm_wakeup(bool wakelock)
{
	spin_lock(&tf_get_device()->sm.lock);
#ifdef CONFIG_HAS_WAKELOCK
	if (wakelock) {
		atomic_inc(&tf_wake_lock_count);
		wake_lock(&g_tf_wake_lock);
	}
#endif
	atomic_inc(&smc_l4_sec_clkdm_use_count);
	clkdm_wakeup(smc_l4_sec_clkdm);
	spin_unlock(&tf_get_device()->sm.lock);
}
Пример #3
0
int __init tf_ctrl_device_register(void)
{
	int error;
	struct tf_device *dev = tf_get_device();

	cdev_init(&dev->cdev_ctrl, &g_tf_ctrl_device_file_ops);
	dev->cdev_ctrl.owner = THIS_MODULE;

	error = register_chrdev_region(dev->dev_number + 1, 1,
		TF_DEVICE_CTRL_BASE_NAME);
	if (error)
		return error;

	error = cdev_add(&dev->cdev_ctrl,
		dev->dev_number + 1, 1);
	if (error) {
		cdev_del(&(dev->cdev_ctrl));
		unregister_chrdev_region(dev->dev_number + 1, 1);
		return error;
	}

#ifdef CONFIG_ANDROID
	tf_ctrl_class = class_create(THIS_MODULE, TF_DEVICE_CTRL_BASE_NAME);
	device_create(tf_ctrl_class, NULL,
		dev->dev_number + 1,
		NULL, TF_DEVICE_CTRL_BASE_NAME);
#endif

	mutex_init(&dev->dev_mutex);

	return error;
}
TEEC_Result TEEC_InitializeContext(const char *name,
				   TEEC_Context *context)
{
	int error;
	struct tf_connection *connection = NULL;

	error = tf_open(tf_get_device(), NULL, &connection);
	if (error != 0) {
		dprintk(KERN_ERR "TEEC_InitializeContext(%s): "
			"tf_open failed (error %d)!\n",
			(name == NULL ? "(null)" : name), error);
		goto error;
	}
	BUG_ON(connection == NULL);
	connection->owner = TF_CONNECTION_OWNER_KERNEL;

	error = tf_create_device_context(connection);
	if (error != 0) {
		dprintk(KERN_ERR "TEEC_InitializeContext(%s): "
			"tf_create_device_context failed (error %d)!\n",
			(name == NULL ? "(null)" : name), error);
		goto error;
	}

	context->imp._connection = connection;
	/*spin_lock_init(&context->imp._operations_lock);*/
	return S_SUCCESS;

error:
	tf_close(connection);
	return TEEC_encode_error(error);
}
Пример #5
0
static int tf_ctrl_device_open(struct inode *inode, struct file *file)
{
	int error;
	struct tf_connection *connection = NULL;

	dpr_info("%s(%u:%u, %p)\n",
		__func__, imajor(inode), iminor(inode), file);

	/* Dummy lseek for non-seekable driver */
	error = nonseekable_open(inode, file);
	if (error != 0) {
		dpr_err("%s(%p): "
			"nonseekable_open failed (error %d)!\n",
			__func__, file, error);
		goto error;
	}

#ifndef CONFIG_ANDROID
	/*
	 * Check file flags. We only autthorize the O_RDWR access
	 */
	if (file->f_flags != O_RDWR) {
		dpr_err("%s(%p): "
			"Invalid access mode %u\n",
			__func__, file, file->f_flags);
		error = -EACCES;
		goto error;
	}
#endif

	error = tf_ctrl_check_omap_type();
	if (error <= 0)
		return error;

	error = tf_open(tf_get_device(), file, &connection);
	if (error != 0) {
		dpr_err("%s(%p): tf_open failed (error %d)!\n",
			__func__, file, error);
		goto error;
	}

	file->private_data = connection;

	/*
	 * Successful completion.
	 */

	dpr_info("%s(%p): Success\n", __func__, file);
	return 0;

	/*
	 * Error handling.
	 */
error:
	tf_close(connection);
	dpr_info("%s(%p): Failure (error %d)\n",
		__func__, file, error);
	return error;
}
Пример #6
0
int tf_pm_hibernate(struct tf_comm *comm)
{
	struct tf_device *dev = tf_get_device();

	dpr_info("%s()\n", __func__);

	/*
	 * As we enter in CORE OFF, the keys are going to be cleared.
	 * Reset the global key context.
	 * When the system leaves CORE OFF, this will force the driver to go
	 * through the secure world which will reconfigure the accelerators.
	 */
	dev->aes1_key_context = 0;
	dev->des_key_context = 0;
#ifndef CONFIG_SMC_KERNEL_CRYPTO
	dev->sham1_is_public = false;
#endif
	return 0;
}
Пример #7
0
void __init tf_allocate_workspace(void)
{
	struct tf_device *dev = tf_get_device();

	tf_clock_timer_init();

	if (tf_ctrl_check_omap_type() <= 0)
		return;

	dev->workspace_size = smc_mem;
	if (dev->workspace_size < 3*SZ_1M)
		dev->workspace_size = 3*SZ_1M;

	if (smc_address == 0)
#if 0
		dev->workspace_addr = (u32) __pa(__alloc_bootmem(
			dev->workspace_size, SZ_1M, __pa(MAX_DMA_ADDRESS)));
#else
		dev->workspace_addr = (u32) 0xBFD00000;
#endif
	else
Пример #8
0
int tf_pm_resume(struct tf_comm *comm)
{

	dprintk(KERN_INFO "tf_pm_resume()\n");
	#if 0
	{
		void *workspace_va;
		struct tf_device *dev = tf_get_device();
		workspace_va = ioremap(dev->workspace_addr,
			dev->workspace_size);
		printk(KERN_INFO
		"Read first word of workspace [0x%x]\n",
		*(uint32_t *)workspace_va);
	}
	#endif

#ifdef CONFIG_SMC_KERNEL_CRYPTO
	spin_lock(&tf_delayed_resume_lock);
	tf_need_delayed_resume = DELAYED_RESUME_PENDING;
	spin_unlock(&tf_delayed_resume_lock);
#endif
	return 0;
}
Пример #9
0
static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num,
	unsigned long ioctl_param)
{
	int result = S_SUCCESS;
	struct tf_pa_ctrl pa_ctrl;
	struct tf_device *dev = tf_get_device();

	dpr_info("%s(%p, %u, %p)\n",
		__func__, file, ioctl_num, (void *) ioctl_param);

	mutex_lock(&dev->dev_mutex);

	if (ioctl_num != IOCTL_TF_PA_CTRL) {
		dpr_err("%s(%p): ioctl number is invalid (%p)\n",
			__func__, file, (void *)ioctl_num);

		result = -EFAULT;
		goto exit;
	}

	if ((ioctl_param & 0x3) != 0) {
		dpr_err("%s(%p): ioctl command message pointer is not word "
			"aligned (%p)\n",
			__func__, file, (void *)ioctl_param);

		result = -EFAULT;
		goto exit;
	}

	if (copy_from_user(&pa_ctrl, (struct tf_pa_ctrl *)ioctl_param,
			sizeof(struct tf_pa_ctrl))) {
		dpr_err("%s(%p): cannot access ioctl parameter (%p)\n",
			__func__, file, (void *)ioctl_param);

		result = -EFAULT;
		goto exit;
	}

	switch (pa_ctrl.nPACommand) {
	case TF_PA_CTRL_START: {
		struct tf_shmem_desc *shmem_desc = NULL;
		u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES];
		u32 descriptor_count;
		u32 offset;
		struct tf_connection *connection;

		dpr_info("%s(%p): Start the SMC PA (%d bytes) with conf "
			"(%d bytes)\n",
			__func__, file, pa_ctrl.pa_size, pa_ctrl.conf_size);

		connection = tf_conn_from_file(file);

		if (dev->workspace_addr == 0) {
			result = -ENOMEM;
			goto start_exit;
		}

		result = tf_validate_shmem_and_flags(
				(u32)pa_ctrl.conf_buffer,
				pa_ctrl.conf_size,
				TF_SHMEM_TYPE_READ);
		if (result != 0)
			goto start_exit;

		offset = 0;
		result = tf_map_shmem(
				connection,
				(u32)pa_ctrl.conf_buffer,
				TF_SHMEM_TYPE_READ,
				true, /* in user space */
				shared_mem_descriptors,
				&offset,
				pa_ctrl.conf_size,
				&shmem_desc,
				&descriptor_count);
		if (result != 0)
			goto start_exit;

		if (descriptor_count > 1) {
			dpr_err("%s(%p): configuration file is too long (%d)\n",
				__func__, file, descriptor_count);
			result = -ENOMEM;
			goto start_exit;
		}

		result = tf_start(&dev->sm,
			dev->workspace_addr,
			dev->workspace_size,
			pa_ctrl.pa_buffer,
			pa_ctrl.pa_size,
			shared_mem_descriptors[0],
			offset,
			pa_ctrl.conf_size);
		if (result)
			dpr_err("SMC: start failed\n");
		else
			dpr_info("SMC: started\n");

start_exit:
		tf_unmap_shmem(connection, shmem_desc, true); /* full cleanup */
		break;
	}

	case TF_PA_CTRL_STOP:
		dpr_info("%s(%p): Stop the SMC PA\n", __func__, file);

		result = tf_power_management(&dev->sm,
			TF_POWER_OPERATION_SHUTDOWN);
		if (result)
			dpr_err("SMC: stop failed [0x%x]\n", result);
		else
			dpr_info("SMC: stopped\n");
		break;

	default:
		result = -EOPNOTSUPP;
		break;
	}

exit:
	mutex_unlock(&dev->dev_mutex);
	return result;
}
Пример #10
0
int tf_delayed_secure_resume(void)
{
	int ret;
	union tf_command message;
	union tf_answer answer;
	struct tf_device *dev = tf_get_device();

	spin_lock(&tf_delayed_resume_lock);
	if (likely(tf_need_delayed_resume == DELAYED_RESUME_NONE)) {
		spin_unlock(&tf_delayed_resume_lock);
		return 0;
	}

	if (unlikely(tf_need_delayed_resume == DELAYED_RESUME_ONGOING)) {
		spin_unlock(&tf_delayed_resume_lock);

		/*
		 * Wait for the other caller to actually finish the delayed
		 * resume operation
		 */
		while (tf_need_delayed_resume != DELAYED_RESUME_NONE)
			cpu_relax();

		return 0;
	}

	tf_need_delayed_resume = DELAYED_RESUME_ONGOING;
	spin_unlock(&tf_delayed_resume_lock);

	/*
	 * When the system leaves CORE OFF, HWA are configured as secure.  We
	 * need them as public for the Linux Crypto API.
	 */
	memset(&message, 0, sizeof(message));

	message.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT;
	message.header.message_size =
		(sizeof(struct tf_command_management) -
			sizeof(struct tf_command_header))/sizeof(u32);
	message.management.command =
		TF_MANAGEMENT_RESUME_FROM_CORE_OFF;

	ret = tf_send_receive(&dev->sm, &message, &answer, NULL, false);
	if (ret) {
		printk(KERN_ERR "tf_pm_resume(%p): "
			"tf_send_receive failed (error %d)!\n",
			&dev->sm, ret);

		unregister_smc_public_crypto_digest();
		unregister_smc_public_crypto_aes();
		return ret;
	}

	if (answer.header.error_code) {
		unregister_smc_public_crypto_digest();
		unregister_smc_public_crypto_aes();
	}

	spin_lock(&tf_delayed_resume_lock);
	tf_need_delayed_resume = DELAYED_RESUME_NONE;
	spin_unlock(&tf_delayed_resume_lock);

	return answer.header.error_code;
}
Пример #11
0
/*
 *Static function, perform AES encryption/decryption using the DMA for data
 *transfer.
 *
 *inputs: src : pointer of the input data to process
 *        nb_blocks : number of block to process
 *        dma_use : PUBLIC_CRYPTO_DMA_USE_IRQ (use irq to monitor end of DMA)
 *                     | PUBLIC_CRYPTO_DMA_USE_POLLING (poll the end of DMA)
 *output: dest : pointer of the output data (can be eq to src)
 */
static bool tf_aes_update_dma(u8 *src, u8 *dest, u32 nb_blocks,
			      u32 ctrl, bool is_kernel)
{
	/*
	 *Note: The DMA only sees physical addresses !
	 */

	int dma_ch0;
	int dma_ch1;
	struct omap_dma_channel_params ch0_parameters;
	struct omap_dma_channel_params ch1_parameters;
	u32 length = nb_blocks * AES_BLOCK_SIZE;
	u32 length_loop = 0;
	u32 nb_blocksLoop = 0;
	struct tf_device *dev = tf_get_device();

	dprintk(KERN_INFO
		"%s: In=0x%08x, Out=0x%08x, Len=%u\n",
		__func__,
		(unsigned int)src,
		(unsigned int)dest,
		(unsigned int)length);

	/*lock the DMA */
	while (!mutex_trylock(&dev->sm.dma_mutex))
		cpu_relax();

	if (tf_dma_request(&dma_ch0) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
		mutex_unlock(&dev->sm.dma_mutex);
		return false;
	}
	if (tf_dma_request(&dma_ch1) != PUBLIC_CRYPTO_OPERATION_SUCCESS) {
		omap_free_dma(dma_ch0);
		mutex_unlock(&dev->sm.dma_mutex);
		return false;
	}

	while (length > 0) {

		/*
		 * At this time, we are sure that the DMAchannels
		 *are available and not used by other public crypto operation
		 */

		/*DMA used for Input and Output */
		OUTREG32(&paes_reg->AES_SYSCONFIG,
			INREG32(&paes_reg->AES_SYSCONFIG)
			| AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT
			| AES_SYSCONFIG_DMA_REQ_IN_EN_BIT);

		/*check length */
		if (length <= dev->dma_buffer_length)
			length_loop = length;
		else
			length_loop = dev->dma_buffer_length;

		/*The length is always a multiple of the block size */
		nb_blocksLoop = length_loop / AES_BLOCK_SIZE;

		/*
		 * Copy the data from the user input buffer into a preallocated
		 * buffer which has correct properties from efficient DMA
		 * transfers.
		 */
		if (!is_kernel) {
			if (copy_from_user(
				 dev->dma_buffer, src, length_loop)) {
				omap_free_dma(dma_ch0);
				omap_free_dma(dma_ch1);
				mutex_unlock(&dev->sm.dma_mutex);
				return false;
			}
		} else {
			memcpy(dev->dma_buffer, src, length_loop);
		}

		/*DMA1: Mem -> AES */
		tf_dma_set_channel_common_params(&ch0_parameters,
			nb_blocksLoop,
			DMA_CEN_Elts_per_Frame_AES,
			AES1_REGS_HW_ADDR + 0x60,
			(u32)dev->dma_buffer_phys,
			OMAP44XX_DMA_AES1_P_DATA_IN_REQ);

		ch0_parameters.src_amode = OMAP_DMA_AMODE_POST_INC;
		ch0_parameters.dst_amode = OMAP_DMA_AMODE_CONSTANT;
		ch0_parameters.src_or_dst_synch = OMAP_DMA_DST_SYNC;

		dprintk(KERN_INFO "%s: omap_set_dma_params(ch0)\n", __func__);
		omap_set_dma_params(dma_ch0, &ch0_parameters);

		omap_set_dma_src_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_burst_mode(dma_ch0, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_src_data_pack(dma_ch0, 1);

		/*DMA2: AES -> Mem */
		tf_dma_set_channel_common_params(&ch1_parameters,
			nb_blocksLoop,
			DMA_CEN_Elts_per_Frame_AES,
			(u32)dev->dma_buffer_phys,
			AES1_REGS_HW_ADDR + 0x60,
			OMAP44XX_DMA_AES1_P_DATA_OUT_REQ);

		ch1_parameters.src_amode = OMAP_DMA_AMODE_CONSTANT;
		ch1_parameters.dst_amode = OMAP_DMA_AMODE_POST_INC;
		ch1_parameters.src_or_dst_synch = OMAP_DMA_SRC_SYNC;

		dprintk(KERN_INFO "%s: omap_set_dma_params(ch1)\n", __func__);
		omap_set_dma_params(dma_ch1, &ch1_parameters);

		omap_set_dma_src_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_burst_mode(dma_ch1, OMAP_DMA_DATA_BURST_8);
		omap_set_dma_dest_data_pack(dma_ch1, 1);

		wmb();

		dprintk(KERN_INFO
			"%s: Start DMA channel %d\n",
			__func__, (unsigned int)dma_ch1);
		tf_dma_start(dma_ch1, OMAP_DMA_BLOCK_IRQ);
		dprintk(KERN_INFO
			"%s: Start DMA channel %d\n",
			__func__, (unsigned int)dma_ch0);
		tf_dma_start(dma_ch0, OMAP_DMA_BLOCK_IRQ);

		dprintk(KERN_INFO
			"%s: Waiting for IRQ\n", __func__);
		tf_dma_wait(2);

		/*Unset DMA synchronisation requests */
		OUTREG32(&paes_reg->AES_SYSCONFIG,
				INREG32(&paes_reg->AES_SYSCONFIG)
			& (~AES_SYSCONFIG_DMA_REQ_OUT_EN_BIT)
			& (~AES_SYSCONFIG_DMA_REQ_IN_EN_BIT));

		omap_clear_dma(dma_ch0);
		omap_clear_dma(dma_ch1);

		/*
		 *The dma transfer is complete
		 */

		pr_info("%s completing\n", __func__);
#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
		tf_aes_fault_injection(ctrl, dev->dma_buffer);
#endif

		/*The DMA output is in the preallocated aligned buffer
		 *and needs to be copied to the output buffer.*/
		if (!is_kernel) {
			if (copy_to_user(
				dest, dev->dma_buffer, length_loop)) {
				omap_free_dma(dma_ch0);
				omap_free_dma(dma_ch1);
				mutex_unlock(&dev->sm.dma_mutex);
				return false;
			}
		} else {
			memcpy(dest, dev->dma_buffer, length_loop);
		}

		src += length_loop;
		dest += length_loop;
		length -= length_loop;
	}

	/*For safety reasons, let's clean the working buffer */
	memset(dev->dma_buffer, 0, length_loop);

	/*release the DMA */
	omap_free_dma(dma_ch0);
	omap_free_dma(dma_ch1);

	mutex_unlock(&dev->sm.dma_mutex);

	dprintk(KERN_INFO "%s: Success\n", __func__);

	return true;
}
Пример #12
0
/*----------------------------------------------------------------------------
 *Restore the HWA registers from the operation state structure
 *---------------------------------------------------------------------------*/
static void tf_aes_restore_registers(
	struct tf_crypto_aes_operation_state *aes_state, int encrypt)
{
	struct tf_device *dev = tf_get_device();
	u32 CTRL = aes_state->CTRL;

	dprintk(KERN_INFO "tf_aes_restore_registers: "
		"paes_reg(%p) <- aes_state(%p): CTRL=0x%08x\n",
		paes_reg, aes_state, aes_state->CTRL);

	if (aes_state->key_is_public) {
		OUTREG32(&paes_reg->AES_KEY1_0, aes_state->KEY1_0);
		OUTREG32(&paes_reg->AES_KEY1_1, aes_state->KEY1_1);
		OUTREG32(&paes_reg->AES_KEY1_2, aes_state->KEY1_2);
		OUTREG32(&paes_reg->AES_KEY1_3, aes_state->KEY1_3);
		OUTREG32(&paes_reg->AES_KEY1_4, aes_state->KEY1_4);
		OUTREG32(&paes_reg->AES_KEY1_5, aes_state->KEY1_5);
		OUTREG32(&paes_reg->AES_KEY1_6, aes_state->KEY1_6);
		OUTREG32(&paes_reg->AES_KEY1_7, aes_state->KEY1_7);

		/*
		 * Make sure a potential secure key that has been overwritten by
		 * the previous code is reinstalled before performing other
		 * public crypto operations.
		 */
		dev->aes1_key_context = 0;

		if (encrypt)
			CTRL |= AES_CTRL_DIRECTION_ENCRYPT;
		else
			CTRL = CTRL & ~AES_CTRL_DIRECTION_ENCRYPT;
	} else {
		CTRL |= INREG32(&paes_reg->AES_CTRL);
	}

	/*
	 * Restore the IV first if we are in CBC or CTR mode
	 * (not required for ECB)
	 */
	if (!AES_CTRL_IS_MODE_ECB(CTRL)) {
		OUTREG32(&paes_reg->AES_IV_IN_0, aes_state->AES_IV_0);
		OUTREG32(&paes_reg->AES_IV_IN_1, aes_state->AES_IV_1);
		OUTREG32(&paes_reg->AES_IV_IN_2, aes_state->AES_IV_2);
		OUTREG32(&paes_reg->AES_IV_IN_3, aes_state->AES_IV_3);
	}

	/* Then set the CTRL register:
	 * overwrite the CTRL only when needed, because unconditionally doing
	 * it leads to break the HWA process (observed by experimentation)
	 */

	CTRL = (CTRL & (3 << 3)) /* key size */
		|  (CTRL & ((1 << 2) | (1 << 5) | (1 << 6)))
		|  (0x3 << 7) /* Always set CTR_WIDTH to 128-bit */;

	if ((CTRL & 0x1FC) != (INREG32(&paes_reg->AES_CTRL) & 0x1FC))
		OUTREG32(&paes_reg->AES_CTRL, CTRL & 0x1FC);

	/* Reset the SYSCONFIG register */
	OUTREG32(&paes_reg->AES_SYSCONFIG, 0);
}