int tf_rpc_execute(struct tf_comm *comm) { u32 rpc_command; u32 rpc_error = RPC_NO; #ifdef DEBUG BUG_ON((read_mpidr() & 0x00000003) != 0); #endif /* Lock the RPC */ mutex_lock(&(comm->rpc_mutex)); rpc_command = g_RPC_parameters[1]; if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dprintk(KERN_INFO "tf_rpc_execute: " "Executing CMD=0x%x\n", g_RPC_parameters[1]); switch (rpc_command) { case RPC_CMD_YIELD: dprintk(KERN_INFO "tf_rpc_execute: " "RPC_CMD_YIELD\n"); rpc_error = RPC_YIELD; g_RPC_parameters[0] = RPC_SUCCESS; break; case RPC_CMD_TRACE: rpc_error = RPC_NON_YIELD; g_RPC_parameters[0] = tf_rpc_trace(comm); break; default: if (tf_crypto_execute_rpc(rpc_command, comm->pBuffer->rpc_cus_buffer) != 0) g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS; else g_RPC_parameters[0] = RPC_SUCCESS; rpc_error = RPC_NON_YIELD; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); dprintk(KERN_INFO "tf_rpc_execute: Return 0x%x\n", rpc_error); return rpc_error; }
int tf_rpc_execute(struct tf_comm *comm) { u32 rpc_command; u32 rpc_error = RPC_NO; #ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT BUG_ON((hard_smp_processor_id() & 0x00000003) != 0); #endif /* Lock the RPC */ mutex_lock(&(comm->rpc_mutex)); rpc_command = comm->l1_buffer->rpc_command; if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dpr_info("%s: Executing CMD=0x%x\n", __func__, rpc_command); switch (rpc_command) { case RPC_CMD_YIELD: dpr_info("%s: RPC_CMD_YIELD\n", __func__); rpc_error = RPC_YIELD; comm->l1_buffer->rpc_status = RPC_SUCCESS; break; case RPC_CMD_TRACE: rpc_error = RPC_NON_YIELD; comm->l1_buffer->rpc_status = tf_rpc_trace(comm); break; default: if (tf_crypto_execute_rpc(rpc_command, comm->l1_buffer->rpc_cus_buffer) != 0) comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS; else comm->l1_buffer->rpc_status = RPC_SUCCESS; rpc_error = RPC_NON_YIELD; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); dpr_info("%s: Return 0x%x\n", __func__, rpc_error); return rpc_error; }
/* Start the SMC PA */ int tf_start(struct tf_comm *comm, u32 workspace_addr, u32 workspace_size, u8 *pa_buffer, u32 pa_size, u32 conf_descriptor, u32 conf_offset, u32 conf_size) { struct tf_l1_shared_buffer *l1_shared_buffer = NULL; struct tf_ns_pa_info pa_info; int ret; u32 descr; u32 sdp_backing_store_addr; u32 sdp_bkext_store_addr; #ifdef CONFIG_SMP long ret_affinity; cpumask_t saved_cpu_mask; cpumask_t local_cpu_mask = CPU_MASK_NONE; /* OMAP4 Secure ROM Code can only be called from CPU0. */ cpu_set(0, local_cpu_mask); sched_getaffinity(0, &saved_cpu_mask); ret_affinity = sched_setaffinity(0, &local_cpu_mask); if (ret_affinity != 0) dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity); #endif workspace_size -= SZ_1M; sdp_backing_store_addr = workspace_addr + workspace_size; workspace_size -= 0x20000; sdp_bkext_store_addr = workspace_addr + workspace_size; if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { dpr_err("%s(%p): The SMC PA is already started\n", __func__, comm); ret = -EFAULT; goto error1; } if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) { dpr_err("%s(%p): The L1 structure size is incorrect!\n", __func__, comm); ret = -EFAULT; goto error1; } ret = tf_se_init(comm, sdp_backing_store_addr, sdp_bkext_store_addr); if (ret != 0) { dpr_err("%s(%p): SE initialization failed\n", __func__, comm); goto error1; } l1_shared_buffer = (struct tf_l1_shared_buffer *) internal_get_zeroed_page(GFP_KERNEL); if (l1_shared_buffer == NULL) { dpr_err("%s(%p): Ouf of memory!\n", __func__, comm); ret = -ENOMEM; goto error1; } /* Ensure the page is mapped */ __set_page_locked(virt_to_page(l1_shared_buffer)); dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n", __func__, comm, (u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer)); descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer, current->mm); pa_info.certificate = (void *) workspace_addr; pa_info.parameters = (void *) __pa(l1_shared_buffer); pa_info.results = (void *) __pa(l1_shared_buffer); l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF; l1_shared_buffer->backing_store_addr = sdp_backing_store_addr; l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr; l1_shared_buffer->workspace_addr = workspace_addr; l1_shared_buffer->workspace_size = workspace_size; dpr_info("%s(%p): System Configuration (%d bytes)\n", __func__, comm, conf_size); dpr_info("%s(%p): Starting PA (%d bytes)...\n", __func__, comm, pa_size); /* * Make sure all data is visible to the secure world */ dmac_flush_range((void *)l1_shared_buffer, (void *)(((u32)l1_shared_buffer) + PAGE_SIZE)); outer_clean_range(__pa(l1_shared_buffer), __pa(l1_shared_buffer) + PAGE_SIZE); if (pa_size > workspace_size) { dpr_err("%s(%p): PA size is incorrect (%x)\n", __func__, comm, pa_size); ret = -EFAULT; goto error1; } { void *tmp; tmp = ioremap_nocache(workspace_addr, pa_size); if (copy_from_user(tmp, pa_buffer, pa_size)) { iounmap(tmp); dpr_err("%s(%p): Cannot access PA buffer (%p)\n", __func__, comm, (void *) pa_buffer); ret = -EFAULT; goto error1; } iounmap(tmp); } dmac_flush_range((void *)&pa_info, (void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info))); outer_clean_range(__pa(&pa_info), __pa(&pa_info) + sizeof(struct tf_ns_pa_info)); wmb(); spin_lock(&(comm->lock)); comm->l1_buffer = l1_shared_buffer; comm->l1_buffer->conf_descriptor = conf_descriptor; comm->l1_buffer->conf_offset = conf_offset; comm->l1_buffer->conf_size = conf_size; spin_unlock(&(comm->lock)); l1_shared_buffer = NULL; /* * Set the OS current time in the L1 shared buffer first. The secure * world uses it as itw boot reference time. */ tf_set_current_time(comm); /* Workaround for issue #6081 */ disable_nonboot_cpus(); /* * Start the SMC PA */ ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX, FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, __pa(&pa_info), 0, 0, 0); if (ret != API_HAL_RET_VALUE_OK) { pr_err("SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } /* Loop until the first S Yield RPC is received */ loop: mutex_lock(&(comm->rpc_mutex)); if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dpr_info("%s: Executing CMD=0x%x\n", __func__, comm->l1_buffer->rpc_command); switch (comm->l1_buffer->rpc_command) { case RPC_CMD_YIELD: dpr_info("%s: RPC_CMD_YIELD\n", __func__); set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)); comm->l1_buffer->rpc_status = RPC_SUCCESS; break; case RPC_CMD_INIT: dpr_info("%s: RPC_CMD_INIT\n", __func__); comm->l1_buffer->rpc_status = tf_rpc_init(comm); break; case RPC_CMD_TRACE: comm->l1_buffer->rpc_status = tf_rpc_trace(comm); break; default: comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); ret = tf_schedule_secure_world(comm); if (ret != 0) { pr_err("SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) goto loop; set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags); wake_up(&(comm->wait_queue)); ret = 0; /* Workaround for issue #6081 */ enable_nonboot_cpus(); goto exit; error2: /* Workaround for issue #6081 */ enable_nonboot_cpus(); spin_lock(&(comm->lock)); l1_shared_buffer = comm->l1_buffer; comm->l1_buffer = NULL; spin_unlock(&(comm->lock)); error1: if (l1_shared_buffer != NULL) { __clear_page_locked(virt_to_page(l1_shared_buffer)); internal_free_page((unsigned long) l1_shared_buffer); } exit: #ifdef CONFIG_SMP ret_affinity = sched_setaffinity(0, &saved_cpu_mask); if (ret_affinity != 0) dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity); #endif if (ret > 0) ret = -EFAULT; return ret; }
/* Start the SMC PA */ int tf_start(struct tf_comm *comm, u32 workspace_addr, u32 workspace_size, u8 *pa_buffer, u32 pa_size, u8 *properties_buffer, u32 properties_length) { struct tf_init_buffer *init_shared_buffer = NULL; struct tf_l1_shared_buffer *l1_shared_buffer = NULL; u32 l1_shared_buffer_descr; struct tf_ns_pa_info pa_info; int ret; u32 descr; u32 sdp_backing_store_addr; u32 sdp_bkext_store_addr; #ifdef CONFIG_SMP long ret_affinity; cpumask_t saved_cpu_mask; cpumask_t local_cpu_mask = CPU_MASK_NONE; cpu_set(0, local_cpu_mask); sched_getaffinity(0, &saved_cpu_mask); ret_affinity = sched_setaffinity(0, &local_cpu_mask); if (ret_affinity != 0) dprintk(KERN_ERR "sched_setaffinity #1 -> 0x%lX", ret_affinity); #endif tf_l4sec_clkdm_wakeup(true); workspace_size -= SZ_1M; sdp_backing_store_addr = workspace_addr + workspace_size; workspace_size -= 0x20000; sdp_bkext_store_addr = workspace_addr + workspace_size; /* * Implementation notes: * * 1/ The PA buffer (pa_buffer)is now owned by this function. * In case of error, it is responsible for releasing the buffer. * * 2/ The PA Info and PA Buffer will be freed through a RPC call * at the beginning of the PA entry in the SE. */ if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { dprintk(KERN_ERR "tf_start(%p): " "The SMC PA is already started\n", comm); ret = -EFAULT; goto error1; } if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) { dprintk(KERN_ERR "tf_start(%p): " "The L1 structure size is incorrect!\n", comm); ret = -EFAULT; goto error1; } ret = tf_se_init(comm, sdp_backing_store_addr, sdp_bkext_store_addr); if (ret != 0) { dprintk(KERN_ERR "tf_start(%p): " "SE initialization failed\n", comm); goto error1; } init_shared_buffer = (struct tf_init_buffer *) internal_get_zeroed_page(GFP_KERNEL); if (init_shared_buffer == NULL) { dprintk(KERN_ERR "tf_start(%p): " "Ouf of memory!\n", comm); ret = -ENOMEM; goto error1; } /* Ensure the page is mapped */ __set_page_locked(virt_to_page(init_shared_buffer)); l1_shared_buffer = (struct tf_l1_shared_buffer *) internal_get_zeroed_page(GFP_KERNEL); if (l1_shared_buffer == NULL) { dprintk(KERN_ERR "tf_start(%p): " "Ouf of memory!\n", comm); ret = -ENOMEM; goto error1; } /* Ensure the page is mapped */ __set_page_locked(virt_to_page(l1_shared_buffer)); dprintk(KERN_INFO "tf_start(%p): " "L0SharedBuffer={0x%08x, 0x%08x}\n", comm, (u32) init_shared_buffer, (u32) __pa(init_shared_buffer)); dprintk(KERN_INFO "tf_start(%p): " "L1SharedBuffer={0x%08x, 0x%08x}\n", comm, (u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer)); descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer, current->mm); l1_shared_buffer_descr = ( ((u32) __pa(l1_shared_buffer) & 0xFFFFF000) | (descr & 0xFFF)); pa_info.certificate = (void *) __pa(pa_buffer); pa_info.parameters = (void *) __pa(init_shared_buffer); pa_info.results = (void *) __pa(init_shared_buffer); init_shared_buffer->l1_shared_buffer_descr = l1_shared_buffer_descr; init_shared_buffer->backing_store_addr = sdp_backing_store_addr; init_shared_buffer->backext_storage_addr = sdp_bkext_store_addr; init_shared_buffer->workspace_addr = workspace_addr; init_shared_buffer->workspace_size = workspace_size; init_shared_buffer->properties_length = properties_length; if (properties_length == 0) { init_shared_buffer->properties_buffer[0] = 0; } else { /* Test for overflow */ if ((init_shared_buffer->properties_buffer + properties_length > init_shared_buffer->properties_buffer) && (properties_length <= init_shared_buffer->properties_length)) { memcpy(init_shared_buffer->properties_buffer, properties_buffer, properties_length); } else { dprintk(KERN_INFO "tf_start(%p): " "Configuration buffer size from userland is " "incorrect(%d, %d)\n", comm, (u32) properties_length, init_shared_buffer->properties_length); ret = -EFAULT; goto error1; } } dprintk(KERN_INFO "tf_start(%p): " "System Configuration (%d bytes)\n", comm, init_shared_buffer->properties_length); dprintk(KERN_INFO "tf_start(%p): " "Starting PA (%d bytes)...\n", comm, pa_size); /* * Make sure all data is visible to the secure world */ dmac_flush_range((void *)init_shared_buffer, (void *)(((u32)init_shared_buffer) + PAGE_SIZE)); outer_clean_range(__pa(init_shared_buffer), __pa(init_shared_buffer) + PAGE_SIZE); dmac_flush_range((void *)pa_buffer, (void *)(pa_buffer + pa_size)); outer_clean_range(__pa(pa_buffer), __pa(pa_buffer) + pa_size); dmac_flush_range((void *)&pa_info, (void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info))); outer_clean_range(__pa(&pa_info), __pa(&pa_info) + sizeof(struct tf_ns_pa_info)); wmb(); spin_lock(&(comm->lock)); comm->init_shared_buffer = init_shared_buffer; comm->pBuffer = l1_shared_buffer; spin_unlock(&(comm->lock)); init_shared_buffer = NULL; l1_shared_buffer = NULL; /* * Set the OS current time in the L1 shared buffer first. The secure * world uses it as itw boot reference time. */ tf_set_current_time(comm); /* Workaround for issue #6081 */ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) disable_nonboot_cpus(); /* * Start the SMC PA */ ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX, FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, __pa(&pa_info), 0, 0, 0); if (ret != API_HAL_RET_VALUE_OK) { printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } /* Loop until the first S Yield RPC is received */ loop: mutex_lock(&(comm->rpc_mutex)); if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dprintk(KERN_INFO "tf_rpc_execute: " "Executing CMD=0x%x\n", g_RPC_parameters[1]); switch (g_RPC_parameters[1]) { case RPC_CMD_YIELD: dprintk(KERN_INFO "tf_rpc_execute: " "RPC_CMD_YIELD\n"); set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)); g_RPC_parameters[0] = RPC_SUCCESS; break; case RPC_CMD_INIT: dprintk(KERN_INFO "tf_rpc_execute: " "RPC_CMD_INIT\n"); g_RPC_parameters[0] = tf_rpc_init(comm); break; case RPC_CMD_TRACE: g_RPC_parameters[0] = tf_rpc_trace(comm); break; default: g_RPC_parameters[0] = RPC_ERROR_BAD_PARAMETERS; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); ret = tf_schedule_secure_world(comm, false); if (ret != 0) { printk(KERN_ERR "SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) goto loop; set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags); wake_up(&(comm->wait_queue)); ret = 0; #if 0 { void *workspace_va; workspace_va = ioremap(workspace_addr, workspace_size); printk(KERN_INFO "Read first word of workspace [0x%x]\n", *(uint32_t *)workspace_va); } #endif /* Workaround for issue #6081 */ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) enable_nonboot_cpus(); goto exit; error2: /* Workaround for issue #6081 */ if ((omap_rev() && 0xFFF000FF) == OMAP443X_CLASS) enable_nonboot_cpus(); spin_lock(&(comm->lock)); l1_shared_buffer = comm->pBuffer; init_shared_buffer = comm->init_shared_buffer; comm->pBuffer = NULL; comm->init_shared_buffer = NULL; spin_unlock(&(comm->lock)); error1: if (init_shared_buffer != NULL) { __clear_page_locked(virt_to_page(init_shared_buffer)); internal_free_page((unsigned long) init_shared_buffer); } if (l1_shared_buffer != NULL) { __clear_page_locked(virt_to_page(l1_shared_buffer)); internal_free_page((unsigned long) l1_shared_buffer); } exit: #ifdef CONFIG_SMP ret_affinity = sched_setaffinity(0, &saved_cpu_mask); if (ret_affinity != 0) dprintk(KERN_ERR "sched_setaffinity #2 -> 0x%lX", ret_affinity); #endif tf_l4sec_clkdm_allow_idle(true); if (ret > 0) ret = -EFAULT; return ret; }