/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0], scm_data[1]); spin_unlock(&tz_lock); } else { if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.arginfo = 0; ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_RESET_ID_64), &desc); } else { ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data, size_scm_data, NULL, 0); } } return ret; }
void msm_disable_wdog_debug(void) { int ret; ret = scm_call_atomic2(SCM_SVC_BOOT, SCM_WDOG_DEBUG_BOOT_PART, 1, 0); if (ret) pr_err("failed to disable wdog debug: %d\n", ret); }
void msm_enable_wdog_debug(void) { int ret; ret = scm_call_atomic2(SCM_SVC_BOOT, SCM_WDOG_DEBUG_BOOT_PART, 0, BOOT_PART_EN_VAL); if (ret) pr_err("failed to enable wdog debug: %d\n", ret); }
/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_entry(u32 cmd, u32 val, u32 id) { int ret; spin_lock(&tz_lock); __iowmb(); ret = scm_call_atomic2(SCM_SVC_IO, cmd, val, id); spin_unlock(&tz_lock); return ret; }
/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_entry2(u32 cmd, u32 val1, u32 val2) { int ret; spin_lock(&tz_lock); /* sync memory before sending the commands to tz*/ __iowmb(); ret = scm_call_atomic2(SCM_SVC_IO, cmd, val1, val2); spin_unlock(&tz_lock); return ret; }
static int simulate_secure_wdog_bite(void) { int ret; struct scm_desc desc = { .args[0] = 0, .arginfo = SCM_ARGS(1), }; pr_emerg("simulating secure watch dog bite using scm_call\n"); if (!is_scm_armv8()) ret = scm_call_atomic2(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_TRIG, 0, 0); else ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_TRIG), &desc); /* if we hit, scm_call has failed */ pr_emerg("simulation of secure watch dog bite failed\n"); return ret; } #else int simulate_secure_wdog_bite(void) { return 0; } #endif #if defined(CONFIG_ARCH_MSM8226) || defined(CONFIG_ARCH_MSM8974) /* * Misc data structures needed for simulating bus timeout in * camera */ #define HANG_ADDRESS 0xfda10000 struct clk_pair { const char *dev; const char *clk; }; static struct clk_pair bus_timeout_camera_clocks_on[] = { /* * gcc_mmss_noc_cfg_ahb_clk should be on but right * now this clock is on by default and not accessable. * Update this table if gcc_mmss_noc_cfg_ahb_clk is * ever not enabled by default! */ { .dev = "fda0c000.qcom,cci", .clk = "camss_top_ahb_clk", }, { .dev = "fda10000.qcom,vfe",
void sec_do_bypass_sdi_execution_in_low(void) { int ret; struct scm_desc desc = { .args[0] = 1, .args[1] = 0, .arginfo = SCM_ARGS(2), }; /* Needed to bypass debug image on some chips */ if (!is_scm_armv8()) ret = scm_call_atomic2(SCM_SVC_BOOT, SCM_WDOG_DEBUG_BOOT_PART, 1, 0); else ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_WDOG_DEBUG_BOOT_PART), &desc); if (ret) pr_err("Failed to disable wdog debug: %d\n", ret); } int __init sec_debug_init(void) { int ret; ret=sec_dt_addr_init(); if (ret<0) return ret; register_reboot_notifier(&nb_reboot_block); atomic_notifier_chain_register(&panic_notifier_list, &nb_panic_block); if (!enable) { sec_do_bypass_sdi_execution_in_low(); return -EPERM; } #ifdef CONFIG_SEC_DEBUG_SCHED_LOG __init_sec_debug_log(); #endif sec_debug_set_upload_magic(SECDEBUG_MODE); sec_debug_set_upload_cause(UPLOAD_CAUSE_INIT); return 0; }
/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0], scm_data[1]); spin_unlock(&tz_lock); } else { ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data, size_scm_data, NULL, 0); } return ret; }
static void __secure_writel(u32 v, u32 addr) { __iowmb(); scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, addr, v); }
static long smcmod_ioctl(struct file *file, unsigned cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int ret = 0; /* sanity check */ if (!argp) return -EINVAL; /* * The SMC instruction should only be initiated by one process * at a time, hence the critical section here. Note that this * does not prevent user space from modifying the * allocated buffer contents. Extra steps are needed to * prevent that from happening. */ mutex_lock(&ioctl_lock); ret = smcmod_ioctl_check(cmd); if (ret) goto cleanup; switch (cmd) { case SMCMOD_IOCTL_SEND_REG_CMD: { struct smcmod_reg_req req; /* copy struct from user */ if (copy_from_user((void *)&req, argp, sizeof(req))) { ret = -EFAULT; goto cleanup; } /* call the correct scm function to switch to secure * world */ if (req.num_args == 1) { req.return_val = scm_call_atomic1(req.service_id, req.command_id, req.args[0]); } else if (req.num_args == 2) { req.return_val = scm_call_atomic2(req.service_id, req.command_id, req.args[0], req.args[1]); } else { ret = -EINVAL; goto cleanup; } /* copy result back to user */ if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; /* This is an example of how to pass buffers to/from the secure * side using the ion driver. */ case SMCMOD_IOCTL_SEND_BUF_CMD: { struct smcmod_buf_req req; /* copy struct from user */ if (copy_from_user((void *)&req, argp, sizeof(req))) { ret = -EFAULT; goto cleanup; } /* send the command */ ret = smcmod_send_buf_cmd(&req); if (ret < 0) goto cleanup; /* copy result back to user */ if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; case SMCMOD_IOCTL_SEND_CIPHER_CMD: { struct smcmod_cipher_req req; /* copy struct from user */ if (copy_from_user((void *)&req, argp, sizeof(req))) { ret = -EFAULT; goto cleanup; } ret = smcmod_send_cipher_cmd(&req); if (ret < 0) goto cleanup; /* copy result back to user */ if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; case SMCMOD_IOCTL_SEND_MSG_DIGEST_CMD: { struct smcmod_msg_digest_req req; /* copy struct from user */ if (copy_from_user((void *)&req, argp, sizeof(req))) { ret = -EFAULT; goto cleanup; } ret = smcmod_send_msg_digest_cmd(&req); if (ret < 0) goto cleanup; /* copy result back to user */ if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; case SMCMOD_IOCTL_GET_VERSION: { uint32_t req; /* call scm function to switch to secure world */ req = scm_get_version(); /* copy result back to user */ if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; case SMCMOD_IOCTL_SEND_DECRYPT_CMD: { struct smcmod_decrypt_req req; if (copy_from_user((void *)&req, argp, sizeof(req))) { ret = -EFAULT; goto cleanup; } ret = smcmod_send_dec_cmd(&req); if (ret < 0) goto cleanup; if (copy_to_user(argp, (void *)&req, sizeof(req))) { ret = -EFAULT; goto cleanup; } } break; default: ret = -EINVAL; } cleanup: mutex_unlock(&ioctl_lock); return ret; }