static int tf_ctrl_device_open(struct inode *inode, struct file *file) { int error; struct tf_connection *connection = NULL; dpr_info("%s(%u:%u, %p)\n", __func__, imajor(inode), iminor(inode), file); /* Dummy lseek for non-seekable driver */ error = nonseekable_open(inode, file); if (error != 0) { dpr_err("%s(%p): " "nonseekable_open failed (error %d)!\n", __func__, file, error); goto error; } #ifndef CONFIG_ANDROID /* * Check file flags. We only autthorize the O_RDWR access */ if (file->f_flags != O_RDWR) { dpr_err("%s(%p): " "Invalid access mode %u\n", __func__, file, file->f_flags); error = -EACCES; goto error; } #endif error = tf_ctrl_check_omap_type(); if (error <= 0) return error; error = tf_open(tf_get_device(), file, &connection); if (error != 0) { dpr_err("%s(%p): tf_open failed (error %d)!\n", __func__, file, error); goto error; } file->private_data = connection; /* * Successful completion. */ dpr_info("%s(%p): Success\n", __func__, file); return 0; /* * Error handling. */ error: tf_close(connection); dpr_info("%s(%p): Failure (error %d)\n", __func__, file, error); return error; }
static int tf_ctrl_device_release(struct inode *inode, struct file *file) { struct tf_connection *connection; dpr_info("%s(%u:%u, %p)\n", __func__, imajor(inode), iminor(inode), file); connection = tf_conn_from_file(file); tf_close(connection); dpr_info("%s(%p): Success\n", __func__, file); return 0; }
int tf_rpc_execute(struct tf_comm *comm) { u32 rpc_command; u32 rpc_error = RPC_NO; #ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT BUG_ON((hard_smp_processor_id() & 0x00000003) != 0); #endif /* Lock the RPC */ mutex_lock(&(comm->rpc_mutex)); rpc_command = comm->l1_buffer->rpc_command; if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dpr_info("%s: Executing CMD=0x%x\n", __func__, rpc_command); switch (rpc_command) { case RPC_CMD_YIELD: dpr_info("%s: RPC_CMD_YIELD\n", __func__); rpc_error = RPC_YIELD; comm->l1_buffer->rpc_status = RPC_SUCCESS; break; case RPC_CMD_TRACE: rpc_error = RPC_NON_YIELD; comm->l1_buffer->rpc_status = tf_rpc_trace(comm); break; default: if (tf_crypto_execute_rpc(rpc_command, comm->l1_buffer->rpc_cus_buffer) != 0) comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS; else comm->l1_buffer->rpc_status = RPC_SUCCESS; rpc_error = RPC_NON_YIELD; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); dpr_info("%s: Return 0x%x\n", __func__, rpc_error); return rpc_error; }
int tf_pm_resume(struct tf_comm *comm) { dpr_info("%s()\n", __func__); tf_aes_pm_resume(); return 0; }
/* Check protocol version returned by the PA */ static u32 tf_rpc_init(struct tf_comm *comm) { u32 protocol_version; u32 rpc_error = RPC_SUCCESS; dpr_info("%s(%p)\n", __func__, comm); spin_lock(&(comm->lock)); protocol_version = comm->l1_buffer->protocol_version; if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version)) != TF_S_PROTOCOL_MAJOR_VERSION) { dpr_err("SMC: Unsupported SMC Protocol PA Major " "Version (0x%02x, expected 0x%02x)!\n", GET_PROTOCOL_MAJOR_VERSION(protocol_version), TF_S_PROTOCOL_MAJOR_VERSION); rpc_error = RPC_ERROR_CONNECTION_PROTOCOL; } else { rpc_error = RPC_SUCCESS; } spin_unlock(&(comm->lock)); return rpc_error; }
/* Check protocol version returned by the PA */ static u32 tf_rpc_init(struct tf_comm *comm) { u32 protocol_version; u32 rpc_error = RPC_SUCCESS; dpr_info("%s(%p)\n", __func__, comm); spin_lock(&(comm->lock)); #if 0 dmac_flush_range((void *)comm->l1_buffer, (void *)(((u32)(comm->l1_buffer)) + PAGE_SIZE)); outer_inv_range(__pa(comm->l1_buffer), __pa(comm->l1_buffer) + PAGE_SIZE); #endif protocol_version = comm->l1_buffer->protocol_version; if ((GET_PROTOCOL_MAJOR_VERSION(protocol_version)) != TF_S_PROTOCOL_MAJOR_VERSION) { dpr_err("SMC: Unsupported SMC Protocol PA Major " "Version (0x%02x, expected 0x%02x)!\n", GET_PROTOCOL_MAJOR_VERSION(protocol_version), TF_S_PROTOCOL_MAJOR_VERSION); rpc_error = RPC_ERROR_CONNECTION_PROTOCOL; } else { rpc_error = RPC_SUCCESS; } spin_unlock(&(comm->lock)); return rpc_error; }
static int tf_ctrl_check_omap_type(void) { /* No need to do anything on a GP device */ switch (omap_type()) { case OMAP2_DEVICE_TYPE_GP: dpr_info("SMC: Running on a GP device\n"); return 0; case OMAP2_DEVICE_TYPE_EMU: case OMAP2_DEVICE_TYPE_SEC: /*case OMAP2_DEVICE_TYPE_TEST:*/ dpr_info("SMC: Running on a EMU or HS device\n"); return 1; default: pr_err("SMC: unknown omap type %x\n", omap_type()); return -EFAULT; } }
void tf_terminate(struct tf_comm *comm) { dpr_info("%s(%p)\n", __func__, comm); spin_lock(&(comm->lock)); tf_crypto_terminate(); spin_unlock(&(comm->lock)); }
static u32 tf_rpc_trace(struct tf_comm *comm) { dpr_info("%s(%p)\n", __func__, comm); #ifdef CONFIG_SECURE_TRACE spin_lock(&(comm->lock)); pr_info("SMC PA: %s", comm->l1_buffer->rpc_trace_buffer); spin_unlock(&(comm->lock)); #endif return RPC_SUCCESS; }
/* * Perform a Secure World shutdown operation. * The routine does not return if the operation succeeds. * the routine returns an appropriate error code if * the operation fails. */ int tf_pm_shutdown(struct tf_comm *comm) { int error; union tf_command command; union tf_answer answer; dpr_info("%s()\n", __func__); memset(&command, 0, sizeof(command)); command.header.message_type = TF_MESSAGE_TYPE_MANAGEMENT; command.header.message_size = (sizeof(struct tf_command_management) - sizeof(struct tf_command_header))/sizeof(u32); command.management.command = TF_MANAGEMENT_SHUTDOWN; error = tf_send_receive( comm, &command, &answer, NULL, false); if (error != 0) { dpr_err("%s(): tf_send_receive failed (error %d)!\n", __func__, error); return error; } #ifdef CONFIG_TF_DRIVER_DEBUG_SUPPORT if (answer.header.error_code != 0) dpr_err("tf_driver: shutdown failed.\n"); else dpr_info("tf_driver: shutdown succeeded.\n"); #endif return answer.header.error_code; }
static int __init tf_early_init(void) { g_secure_task_id = 0; dpr_info("SMC early init\n"); smc_l4_sec_clkdm = clkdm_lookup("l4_secure_clkdm"); if (smc_l4_sec_clkdm == NULL) return -EFAULT; #ifdef CONFIG_HAS_WAKELOCK wake_lock_init(&g_tf_wake_lock, WAKE_LOCK_SUSPEND, TF_DEVICE_BASE_NAME); #endif return 0; }
int tf_pm_hibernate(struct tf_comm *comm) { struct tf_device *dev = tf_get_device(); dpr_info("%s()\n", __func__); /* * As we enter in CORE OFF, the keys are going to be cleared. * Reset the global key context. * When the system leaves CORE OFF, this will force the driver to go * through the secure world which will reconfigure the accelerators. */ dev->aes1_key_context = 0; dev->des_key_context = 0; #ifndef CONFIG_SMC_KERNEL_CRYPTO dev->sham1_is_public = false; #endif return 0; }
static long tf_ctrl_device_ioctl(struct file *file, unsigned int ioctl_num, unsigned long ioctl_param) { int result = S_SUCCESS; struct tf_pa_ctrl pa_ctrl; struct tf_device *dev = tf_get_device(); dpr_info("%s(%p, %u, %p)\n", __func__, file, ioctl_num, (void *) ioctl_param); mutex_lock(&dev->dev_mutex); if (ioctl_num != IOCTL_TF_PA_CTRL) { dpr_err("%s(%p): ioctl number is invalid (%p)\n", __func__, file, (void *)ioctl_num); result = -EFAULT; goto exit; } if ((ioctl_param & 0x3) != 0) { dpr_err("%s(%p): ioctl command message pointer is not word " "aligned (%p)\n", __func__, file, (void *)ioctl_param); result = -EFAULT; goto exit; } if (copy_from_user(&pa_ctrl, (struct tf_pa_ctrl *)ioctl_param, sizeof(struct tf_pa_ctrl))) { dpr_err("%s(%p): cannot access ioctl parameter (%p)\n", __func__, file, (void *)ioctl_param); result = -EFAULT; goto exit; } switch (pa_ctrl.nPACommand) { case TF_PA_CTRL_START: { struct tf_shmem_desc *shmem_desc = NULL; u32 shared_mem_descriptors[TF_MAX_COARSE_PAGES]; u32 descriptor_count; u32 offset; struct tf_connection *connection; dpr_info("%s(%p): Start the SMC PA (%d bytes) with conf " "(%d bytes)\n", __func__, file, pa_ctrl.pa_size, pa_ctrl.conf_size); connection = tf_conn_from_file(file); if (dev->workspace_addr == 0) { result = -ENOMEM; goto start_exit; } result = tf_validate_shmem_and_flags( (u32)pa_ctrl.conf_buffer, pa_ctrl.conf_size, TF_SHMEM_TYPE_READ); if (result != 0) goto start_exit; offset = 0; result = tf_map_shmem( connection, (u32)pa_ctrl.conf_buffer, TF_SHMEM_TYPE_READ, true, /* in user space */ shared_mem_descriptors, &offset, pa_ctrl.conf_size, &shmem_desc, &descriptor_count); if (result != 0) goto start_exit; if (descriptor_count > 1) { dpr_err("%s(%p): configuration file is too long (%d)\n", __func__, file, descriptor_count); result = -ENOMEM; goto start_exit; } result = tf_start(&dev->sm, dev->workspace_addr, dev->workspace_size, pa_ctrl.pa_buffer, pa_ctrl.pa_size, shared_mem_descriptors[0], offset, pa_ctrl.conf_size); if (result) dpr_err("SMC: start failed\n"); else dpr_info("SMC: started\n"); start_exit: tf_unmap_shmem(connection, shmem_desc, true); /* full cleanup */ break; } case TF_PA_CTRL_STOP: dpr_info("%s(%p): Stop the SMC PA\n", __func__, file); result = tf_power_management(&dev->sm, TF_POWER_OPERATION_SHUTDOWN); if (result) dpr_err("SMC: stop failed [0x%x]\n", result); else dpr_info("SMC: stopped\n"); break; default: result = -EOPNOTSUPP; break; } exit: mutex_unlock(&dev->dev_mutex); return result; }
/* Start the SMC PA */ int tf_start(struct tf_comm *comm, u32 workspace_addr, u32 workspace_size, u8 *pa_buffer, u32 pa_size, u32 conf_descriptor, u32 conf_offset, u32 conf_size) { struct tf_l1_shared_buffer *l1_shared_buffer = NULL; struct tf_ns_pa_info pa_info; int ret; u32 descr; u32 sdp_backing_store_addr; u32 sdp_bkext_store_addr; #ifdef CONFIG_SMP long ret_affinity; cpumask_t saved_cpu_mask; cpumask_t local_cpu_mask = CPU_MASK_NONE; /* OMAP4 Secure ROM Code can only be called from CPU0. */ cpu_set(0, local_cpu_mask); sched_getaffinity(0, &saved_cpu_mask); ret_affinity = sched_setaffinity(0, &local_cpu_mask); if (ret_affinity != 0) dpr_err("sched_setaffinity #1 -> 0x%lX", ret_affinity); #endif workspace_size -= SZ_1M; sdp_backing_store_addr = workspace_addr + workspace_size; workspace_size -= 0x20000; sdp_bkext_store_addr = workspace_addr + workspace_size; if (test_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags)) { dpr_err("%s(%p): The SMC PA is already started\n", __func__, comm); ret = -EFAULT; goto error1; } if (sizeof(struct tf_l1_shared_buffer) != PAGE_SIZE) { dpr_err("%s(%p): The L1 structure size is incorrect!\n", __func__, comm); ret = -EFAULT; goto error1; } ret = tf_se_init(comm, sdp_backing_store_addr, sdp_bkext_store_addr); if (ret != 0) { dpr_err("%s(%p): SE initialization failed\n", __func__, comm); goto error1; } l1_shared_buffer = (struct tf_l1_shared_buffer *) internal_get_zeroed_page(GFP_KERNEL); if (l1_shared_buffer == NULL) { dpr_err("%s(%p): Ouf of memory!\n", __func__, comm); ret = -ENOMEM; goto error1; } /* Ensure the page is mapped */ __set_page_locked(virt_to_page(l1_shared_buffer)); dpr_info("%s(%p): L1SharedBuffer={0x%08x, 0x%08x}\n", __func__, comm, (u32) l1_shared_buffer, (u32) __pa(l1_shared_buffer)); descr = tf_get_l2_descriptor_common((u32) l1_shared_buffer, current->mm); pa_info.certificate = (void *) workspace_addr; pa_info.parameters = (void *) __pa(l1_shared_buffer); pa_info.results = (void *) __pa(l1_shared_buffer); l1_shared_buffer->l1_shared_buffer_descr = descr & 0xFFF; l1_shared_buffer->backing_store_addr = sdp_backing_store_addr; l1_shared_buffer->backext_storage_addr = sdp_bkext_store_addr; l1_shared_buffer->workspace_addr = workspace_addr; l1_shared_buffer->workspace_size = workspace_size; dpr_info("%s(%p): System Configuration (%d bytes)\n", __func__, comm, conf_size); dpr_info("%s(%p): Starting PA (%d bytes)...\n", __func__, comm, pa_size); /* * Make sure all data is visible to the secure world */ dmac_flush_range((void *)l1_shared_buffer, (void *)(((u32)l1_shared_buffer) + PAGE_SIZE)); outer_clean_range(__pa(l1_shared_buffer), __pa(l1_shared_buffer) + PAGE_SIZE); if (pa_size > workspace_size) { dpr_err("%s(%p): PA size is incorrect (%x)\n", __func__, comm, pa_size); ret = -EFAULT; goto error1; } { void *tmp; tmp = ioremap_nocache(workspace_addr, pa_size); if (copy_from_user(tmp, pa_buffer, pa_size)) { iounmap(tmp); dpr_err("%s(%p): Cannot access PA buffer (%p)\n", __func__, comm, (void *) pa_buffer); ret = -EFAULT; goto error1; } iounmap(tmp); } dmac_flush_range((void *)&pa_info, (void *)(((u32)&pa_info) + sizeof(struct tf_ns_pa_info))); outer_clean_range(__pa(&pa_info), __pa(&pa_info) + sizeof(struct tf_ns_pa_info)); wmb(); spin_lock(&(comm->lock)); comm->l1_buffer = l1_shared_buffer; comm->l1_buffer->conf_descriptor = conf_descriptor; comm->l1_buffer->conf_offset = conf_offset; comm->l1_buffer->conf_size = conf_size; spin_unlock(&(comm->lock)); l1_shared_buffer = NULL; /* * Set the OS current time in the L1 shared buffer first. The secure * world uses it as itw boot reference time. */ tf_set_current_time(comm); /* Workaround for issue #6081 */ disable_nonboot_cpus(); /* * Start the SMC PA */ ret = omap4_secure_dispatcher(API_HAL_LM_PALOAD_INDEX, FLAG_IRQ_ENABLE | FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, __pa(&pa_info), 0, 0, 0); if (ret != API_HAL_RET_VALUE_OK) { pr_err("SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } /* Loop until the first S Yield RPC is received */ loop: mutex_lock(&(comm->rpc_mutex)); if (g_RPC_advancement == RPC_ADVANCEMENT_PENDING) { dpr_info("%s: Executing CMD=0x%x\n", __func__, comm->l1_buffer->rpc_command); switch (comm->l1_buffer->rpc_command) { case RPC_CMD_YIELD: dpr_info("%s: RPC_CMD_YIELD\n", __func__); set_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags)); comm->l1_buffer->rpc_status = RPC_SUCCESS; break; case RPC_CMD_INIT: dpr_info("%s: RPC_CMD_INIT\n", __func__); comm->l1_buffer->rpc_status = tf_rpc_init(comm); break; case RPC_CMD_TRACE: comm->l1_buffer->rpc_status = tf_rpc_trace(comm); break; default: comm->l1_buffer->rpc_status = RPC_ERROR_BAD_PARAMETERS; break; } g_RPC_advancement = RPC_ADVANCEMENT_FINISHED; } mutex_unlock(&(comm->rpc_mutex)); ret = tf_schedule_secure_world(comm); if (ret != 0) { pr_err("SMC: Error while loading the PA [0x%x]\n", ret); goto error2; } if (!test_bit(TF_COMM_FLAG_L1_SHARED_ALLOCATED, &(comm->flags))) goto loop; set_bit(TF_COMM_FLAG_PA_AVAILABLE, &comm->flags); wake_up(&(comm->wait_queue)); ret = 0; /* Workaround for issue #6081 */ enable_nonboot_cpus(); goto exit; error2: /* Workaround for issue #6081 */ enable_nonboot_cpus(); spin_lock(&(comm->lock)); l1_shared_buffer = comm->l1_buffer; comm->l1_buffer = NULL; spin_unlock(&(comm->lock)); error1: if (l1_shared_buffer != NULL) { __clear_page_locked(virt_to_page(l1_shared_buffer)); internal_free_page((unsigned long) l1_shared_buffer); } exit: #ifdef CONFIG_SMP ret_affinity = sched_setaffinity(0, &saved_cpu_mask); if (ret_affinity != 0) dpr_err("sched_setaffinity #2 -> 0x%lX", ret_affinity); #endif if (ret > 0) ret = -EFAULT; return ret; }
/* Initializes the SE (SDP, SRAM resize, RPC handler) */ static int tf_se_init(struct tf_comm *comm, u32 sdp_backing_store_addr, u32 sdp_bkext_store_addr) { int error; unsigned int crc; if (comm->se_initialized) { dpr_info("%s: SE already initialized... nothing to do\n", __func__); return 0; } /* Secure CRC read */ dpr_info("%s: Secure CRC Read...\n", __func__); crc = omap4_secure_dispatcher(API_HAL_KM_GETSECUREROMCODECRC_INDEX, 0, 0, 0, 0, 0, 0); pr_info("SMC: SecureCRC=0x%08X\n", crc); /* * Flush caches before resize, just to be sure there is no * pending public data writes back to SRAM that could trigger a * security violation once their address space is marked as * secure. */ #define OMAP4_SRAM_PA 0x40300000 #define OMAP4_SRAM_SIZE 0xe000 flush_cache_all(); outer_flush_range(OMAP4_SRAM_PA, OMAP4_SRAM_PA + OMAP4_SRAM_SIZE); wmb(); /* SRAM resize */ dpr_info("%s: SRAM resize (52KB)...\n", __func__); error = omap4_secure_dispatcher(API_HAL_SEC_L3_RAM_RESIZE_INDEX, FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 1, SEC_RAM_SIZE_52KB, 0, 0, 0); if (error == API_HAL_RET_VALUE_OK) { dpr_info("%s: SRAM resize OK\n", __func__); } else { dpr_err("%s: SRAM resize failed [0x%x]\n", __func__, error); goto error; } /* SDP init */ dpr_info("%s: SDP runtime init..." "(sdp_backing_store_addr=%x, sdp_bkext_store_addr=%x)\n", __func__, sdp_backing_store_addr, sdp_bkext_store_addr); error = omap4_secure_dispatcher(API_HAL_SDP_RUNTIMEINIT_INDEX, FLAG_FIQ_ENABLE | FLAG_START_HAL_CRITICAL, 2, sdp_backing_store_addr, sdp_bkext_store_addr, 0, 0); if (error == API_HAL_RET_VALUE_OK) { dpr_info("%s: SDP runtime init OK\n", __func__); } else { dpr_err("%s: SDP runtime init failed [0x%x]\n", __func__, error); goto error; } /* RPC init */ dpr_info("%s: RPC init...\n", __func__); error = omap4_secure_dispatcher(API_HAL_TASK_MGR_RPCINIT_INDEX, FLAG_START_HAL_CRITICAL, 1, (u32) (u32(*const) (u32, u32, u32, u32)) &rpc_handler, 0, 0, 0); if (error == API_HAL_RET_VALUE_OK) { dpr_info("%s: RPC init OK\n", __func__); } else { dpr_err("%s: RPC init failed [0x%x]\n", __func__, error); goto error; } comm->se_initialized = true; return 0; error: return -EFAULT; }