Esempio n. 1
0
/* Yields the Secure World */
int tf_schedule_secure_world(struct tf_comm *comm)
{
	int status = 0;
	int ret;
	unsigned long iflags;
	u32 appli_id;

	tf_set_current_time(comm);

	local_irq_save(iflags);

	switch (g_RPC_advancement) {
	case  RPC_ADVANCEMENT_NONE:
		/* Return from IRQ */
		appli_id = SMICODEPUB_IRQ_END;
		break;
	case  RPC_ADVANCEMENT_PENDING:
		/* nothing to do in this case */
		goto exit;
	default:
	case RPC_ADVANCEMENT_FINISHED:
		appli_id = SMICODEPUB_RPC_END;
		g_RPC_advancement = RPC_ADVANCEMENT_NONE;
		break;
	}

	tf_clock_timer_start();

	g_service_end = 1;
	/* yield to the Secure World */
	ret = omap4_secure_dispatcher(appli_id, /* app_id */
	   0, 0,        /* flags, nargs */
	   0, 0, 0, 0); /* arg1, arg2, arg3, arg4 */
	if (g_service_end != 0) {
		dpr_err("Service End ret=%X\n", ret);

		if (ret == 0) {
			dmac_flush_range((void *)comm->l1_buffer,
				(void *)(((u32)(comm->l1_buffer)) +
					PAGE_SIZE));
			outer_inv_range(__pa(comm->l1_buffer),
				__pa(comm->l1_buffer) +
				PAGE_SIZE);

			ret = comm->l1_buffer->exit_code;

			dpr_err("SMC PA failure ret=%X\n", ret);
			if (ret == 0)
				ret = -EFAULT;
		}
		clear_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
		omap4_secure_dispatcher(API_HAL_LM_PAUNLOADALL_INDEX,
			FLAG_START_HAL_CRITICAL, 0, 0, 0, 0, 0);
		status = ret;
	}

	tf_clock_timer_stop();

exit:
	local_irq_restore(iflags);

	return status;
}
/* Start the SMC PA */
int tf_start(struct tf_comm *comm,
	u32 workspace_addr, u32 workspace_size,
	u8 *pa_buffer, u32 pa_size,
	u32 conf_descriptor, u32 conf_offset, u32 conf_size)
{
	struct tf_l1_shared_buffer *l1_shared_buffer = NULL;
	struct tf_ns_pa_info pa_info;
	int ret;
	u32 descr;
	u32 sdp_backing_store_addr;
	u32 sdp_bkext_store_addr;
#ifdef CONFIG_SMP
	long ret_affinity;
	cpumask_t saved_cpu_mask;
	cpumask_t local_cpu_mask = CPU_MASK_NONE;

	/* OMAP4 Secure ROM Code can only be called from CPU0. */
	cpu_set(0, local_cpu_mask);
	sched_getaffinity(0, &saved_cpu_mask);
	ret_affinity = sched_setaffinity(0, &local_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity);
#endif

	workspace_size -= SZ_1M;
	sdp_backing_store_addr = workspace_addr + workspace_size;
	workspace_size -= 0x20000;
	sdp_bkext_store_addr = workspace_addr + workspace_size;

	tf_clock_timer_start();

	if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) {
		dpr_err("%s(%p): The SMC PA is already started\n",
			__func__, comm);

		ret = -EFAULT;
		goto error1;
	}

	if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) {
		dpr_err("%s(%p): The L1 structure size is incorrect!\n",
			__func__, comm);
		ret = -EFAULT;
		goto error1;
	}

	ret = tf_se_init(comm, sdp_backing_store_addr,
		sdp_bkext_store_addr);
	if (ret != 0) {
		dpr_err("%s(%p): SE initialization failed\n", __func__, comm);
		goto error1;
	}

	l1_shared_buffer =
		(struct tf_l1_shared_buffer *)
			internal_get_zeroed_page(GFP_KERNEL);

	if (l1_shared_buffer == NULL) {
		dpr_err("%s(%p): Ouf of memory!\n", __func__, comm);

		ret = -ENOMEM;
		goto error1;
	}
	/* Ensure the page is mapped */
	__set_page_locked(virt_to_page(l1_shared_buffer));

	dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n",
		__func__, comm,
		(u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer));

	descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer,
			current->mm);
	pa_info.certificate = (void *) workspace_addr;
	pa_info.parameters = (void *) __pa(l1_shared_buffer);
	pa_info.results = (void *) __pa(l1_shared_buffer);

	l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF;

	l1_shared_buffer->backing_store_addr = sdp_backing_store_addr;
	l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr;
	l1_shared_buffer->workspace_addr = workspace_addr;
	l1_shared_buffer->workspace_size = workspace_size;

	dpr_info("%s(%p): System Configuration (%d bytes)\n",
		__func__, comm, conf_size);
	dpr_info("%s(%p): Starting PA (%d bytes)...\n",
		__func__, comm, pa_size);

	/*
	 * Make sure all data is visible to the secure world
	 */
	dmac_flush_range((void *)l1_shared_buffer,
		(void *)(((u32)l1_shared_buffer) + PAGE_SIZE));
	outer_clean_range(__pa(l1_shared_buffer),
		__pa(l1_shared_buffer) + PAGE_SIZE);

	if (pa_size > workspace_size) {
		dpr_err("%s(%p): PA size is incorrect (%x)\n",
			__func__, comm, pa_size);
		ret = -EFAULT;
		goto error1;
	}

	{
		void *tmp;
		tmp = ioremap_nocache(workspace_addr, pa_size);
		if (copy_from_user(tmp, pa_buffer, pa_size)) {
			iounmap(tmp);
			dpr_err("%s(%p): Cannot access PA buffer (%p)\n",
				__func__, comm, (void *) pa_buffer);
			ret = -EFAULT;
			goto error1;
		}
		iounmap(tmp);
	}

	dmac_flush_range((void *)&pa_info,
		(void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info)));
	outer_clean_range(__pa(&pa_info),
		__pa(&pa_info) + sizeof(struct tf_ns_pa_info));
	wmb();

	spin_lock(&(comm->lock));
	comm->l1_buffer = l1_shared_buffer;
	comm->l1_buffer->conf_descriptor = conf_descriptor;
	comm->l1_buffer->conf_offset     = conf_offset;
	comm->l1_buffer->conf_size       = conf_size;
	spin_unlock(&(comm->lock));
	l1_shared_buffer = NULL;

	/*
	 * Set the OS current time in the L1 shared buffer first. The secure
	 * world uses it as itw boot reference time.
	 */
	tf_set_current_time(comm);

	/* Workaround for issue #6081 */
	disable_nonboot_cpus();

	/*
	 * Start the SMC PA
	 */
	ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX,
		FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1,
		__pa(&pa_info), 0, 0, 0);
	if (ret != API_HAL_RET_VALUE_OK) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	/* Loop until the first S Yield RPC is received */
loop:
	mutex_lock(&(comm->rpc_mutex));

	if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) {
		dpr_info("%s: Executing CMD=0x%x\n",
			__func__, comm->l1_buffer->rpc_command);

		switch (comm->l1_buffer->rpc_command) {
		case RPC_CMD_YIELD:
			dpr_info("%s: RPC_CMD_YIELD\n", __func__);
			set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED,
				&(comm->flags));
			comm->l1_buffer->rpc_status = RPC_SUCCESS;
			break;

		case RPC_CMD_INIT:
			dpr_info("%s: RPC_CMD_INIT\n", __func__);
			comm->l1_buffer->rpc_status = tf_rpc_init(comm);
			break;

		case RPC_CMD_TRACE:
			comm->l1_buffer->rpc_status = tf_rpc_trace(comm);
			break;

		default:
			comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS;
			break;
		}
		g_RPC_advancement = RPC_ADVANCEMENT_FINISHED;
	}

	mutex_unlock(&(comm->rpc_mutex));

	ret = tf_schedule_secure_world(comm);
	if (ret != 0) {
		pr_err("SMC: Error while loading the PA [0x%x]\n", ret);
		goto error2;
	}

	if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)))
		goto loop;

	set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags);
	wake_up(&(comm->wait_queue));
	ret = 0;

	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	goto exit;

error2:
	/* Workaround for issue #6081 */
	enable_nonboot_cpus();

	spin_lock(&(comm->lock));
	l1_shared_buffer = comm->l1_buffer;
	comm->l1_buffer = NULL;
	spin_unlock(&(comm->lock));

error1:
	if (l1_shared_buffer != NULL) {
		__clear_page_locked(virt_to_page(l1_shared_buffer));
		internal_free_page((unsigned long) l1_shared_buffer);
	}

exit:
	tf_clock_timer_stop();
#ifdef CONFIG_SMP
	ret_affinity = sched_setaffinity(0, &saved_cpu_mask);
	if (ret_affinity != 0)
		dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity);
#endif

	if (ret > 0)
		ret = -EFAULT;

	return ret;
}