static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data, int *val, u32 size_val, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic3(SCM_SVC_IO, TZ_UPDATE_ID, scm_data[0], scm_data[1], scm_data[2]); spin_unlock(&tz_lock); *val = ret; } else { if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = scm_data[0]; desc.args[1] = scm_data[1]; desc.args[2] = scm_data[2]; desc.arginfo = SCM_ARGS(3); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_UPDATE_ID_64), &desc); *val = desc.ret[0]; } else { ret = scm_call(SCM_SVC_DCVS, TZ_UPDATE_ID_64, scm_data, size_scm_data, val, size_val); } } return ret; }
static uint8_t get_tamper_fuse_cmd_new(uint32_t flag) { int ret; uint32_t fuse_id; uint8_t resp_buf; size_t resp_len; struct scm_desc desc = {0}; resp_len = sizeof(resp_buf); desc.args[0] = fuse_id = flag; desc.arginfo = SCM_ARGS(1); if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID, &fuse_id, sizeof(fuse_id), &resp_buf, resp_len); } else { ret = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID), &desc); resp_buf = desc.ret[0]; } if (ret) { printk("scm_call/1 returned %d", ret); resp_buf = 0xff; } ic = resp_buf; return resp_buf; }
static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart) { int ret = 0; int scm_ret = 0; struct scm_desc desc = {0}; desc.args[0] = mss_restart; desc.args[1] = 0; desc.arginfo = SCM_ARGS(2); if (drv->restart_reg && !drv->restart_reg_sec) { writel_relaxed(mss_restart, drv->restart_reg); mb(); udelay(2); } else if (drv->restart_reg_sec) { if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID, &mss_restart, sizeof(mss_restart), &scm_ret, sizeof(scm_ret)); } else { ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, MSS_RESTART_ID), &desc); scm_ret = desc.ret[0]; } if (ret || scm_ret) pr_err("Secure MSS restart failed\n"); } return ret; }
/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0], scm_data[1]); spin_unlock(&tz_lock); } else { if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.arginfo = 0; ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_RESET_ID_64), &desc); } else { ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data, size_scm_data, NULL, 0); } } return ret; }
int __spdm_scm_call(struct spdm_args *args, int num_args) { int status = 0; SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,num_args:%d\n", __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0], num_args); if (!is_scm_armv8()) { status = scm_call(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg, sizeof(args->arg), args->ret, sizeof(args->ret)); } else { struct scm_desc desc = {0}; desc.arginfo = SCM_ARGS(num_args); memcpy(desc.args, args->arg, COPY_SIZE(sizeof(desc.args), sizeof(args->arg))); status = scm_call2(SCM_SIP_FNID(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID), &desc); memcpy(args->ret, desc.ret, COPY_SIZE(sizeof(args->ret), sizeof(desc.ret))); } SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,Ret[0]:%llu,Ret[1]:%llu\n" , __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0], args->ret[0], args->ret[1]); return status; }
static int set_tamper_fuse_cmd_new(uint32_t flag) { struct scm_desc desc = {0}; uint32_t fuse_id; desc.args[0] = fuse_id = flag; desc.arginfo = SCM_ARGS(1); if (!is_scm_armv8()) { return scm_call(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID, &fuse_id, sizeof(fuse_id), NULL, 0); } else { return scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID), &desc); } }
static int tz_init(struct devfreq_msm_adreno_tz_data *priv, unsigned int *tz_pwrlevels, u32 size_pwrlevels, unsigned int *version, u32 size_version) { int ret; /* Make sure all CMD IDs are avaialble */ if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, size_pwrlevels, NULL, 0); *version = 0; } else if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) && scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) && scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) { struct scm_desc desc = {0}; unsigned int *tz_buf; if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64, tz_pwrlevels, size_pwrlevels, version, size_version); if (!ret) priv->is_64 = true; return ret; } tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL); if (!tz_buf) return -ENOMEM; memcpy(tz_buf, tz_pwrlevels, size_pwrlevels); /* Ensure memcpy completes execution */ mb(); dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels)); desc.args[0] = virt_to_phys(tz_buf); desc.args[1] = size_pwrlevels; desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_INIT_ID_64), &desc); *version = desc.ret[0]; if (!ret) priv->is_64 = true; kzfree(tz_buf); } else ret = -EINVAL; return ret; }
void sec_watchdog_disable(void) { int ret; struct scm_desc desc; desc.args[0] = 1; desc.arginfo = SCM_ARGS(1); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_DIS), &desc); if (ret || desc.ret[0]) { pr_err("%s failed\n", __func__); return; } pr_info("%s\n", __func__); }
static int __init init_debug_lar_unlock(void) { int ret; uint32_t argument = 0; struct scm_desc desc = {0}; if (!is_scm_armv8()) ret = scm_call(SCM_SVC_TZ, SCM_CMD_DEBUG_LAR_UNLOCK, &argument, sizeof(argument), NULL, 0); else ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, SCM_CMD_DEBUG_LAR_UNLOCK), &desc); if (ret) pr_err("Core Debug Lock unlock failed, ret: %d\n", ret); else pr_info("Core Debug Lock unlocked\n"); return ret; }
static int pil_mss_mem_setup(struct pil_desc *pil, phys_addr_t addr, size_t size) { struct modem_data *md = dev_get_drvdata(pil->dev); struct pas_init_image_req { u32 proc; u32 start_addr; u32 len; } request; u32 scm_ret = 0; int ret; struct scm_desc desc = {0}; if (!md->subsys_desc.pil_mss_memsetup) return 0; request.proc = md->pas_id; request.start_addr = addr; request.len = size; if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request, sizeof(request), &scm_ret, sizeof(scm_ret)); } else { desc.args[0] = md->pas_id; desc.args[1] = addr; desc.args[2] = size; desc.arginfo = SCM_ARGS(3); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD), &desc); scm_ret = desc.ret[0]; } if (ret) return ret; return scm_ret; }
/** * Changes ownership of SPI transfers from TEE to REE side or vice versa. * * SPI transfers can be owned only by one of TEE or REE side at any given time. * This can be changed dynamically if needed but of course that needs support * from underlaying layers. This function will transfer the ownership from REE * to TEE or vice versa. * * If REE side uses the SPI master when TEE owns the pipe or vice versa the * system will most likely crash dump. * * If available this should be set at boot time to eg. TEE side and not * dynamically as that will increase the security of the system. This however * implies that there are no other SPI slaves connected that should be handled * from REE side. * * @see SET_PIPE_OWNERSHIP */ static int set_pipe_ownership(struct fpc1145_data *fpc1145, bool to_tz) { #ifdef SET_PIPE_OWNERSHIP int rc; const u32 TZ_BLSP_MODIFY_OWNERSHIP_ID = 3; const u32 TZBSP_APSS_ID = 1; const u32 TZBSP_TZ_ID = 3; struct scm_desc desc = { .arginfo = SCM_ARGS(2), .args[0] = fpc1145->qup_id, .args[1] = to_tz ? TZBSP_TZ_ID : TZBSP_APSS_ID, }; rc = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, TZ_BLSP_MODIFY_OWNERSHIP_ID), &desc); if (rc || desc.ret[0]) { dev_err(fpc1145->dev, "%s: scm_call2: responce %llu, rc %d\n", __func__, desc.ret[0], rc); return -EINVAL; } dev_dbg(fpc1145->dev, "%s: scm_call2: ok\n", __func__); #endif return 0; } static int set_clks(struct fpc1145_data *fpc1145, bool enable) { int rc; if (enable) { rc = clk_set_rate(fpc1145->core_clk, fpc1145->spi->max_speed_hz); if (rc) { dev_err(fpc1145->dev, "%s: Error setting clk_rate: %u, %d\n", __func__, fpc1145->spi->max_speed_hz, rc); return rc; } rc = clk_prepare_enable(fpc1145->core_clk); if (rc) { dev_err(fpc1145->dev, "%s: Error enabling core clk: %d\n", __func__, rc); return rc; } rc = clk_prepare_enable(fpc1145->iface_clk); if (rc) { dev_err(fpc1145->dev, "%s: Error enabling iface clk: %d\n", __func__, rc); clk_disable_unprepare(fpc1145->core_clk); return rc; } dev_dbg(fpc1145->dev, "%s ok. clk rate %u hz\n", __func__, fpc1145->spi->max_speed_hz); } else { clk_disable_unprepare(fpc1145->iface_clk); clk_disable_unprepare(fpc1145->core_clk); rc = 0; } return rc; }
/** * Changes ownership of SPI transfers from TEE to REE side or vice versa. * * SPI transfers can be owned only by one of TEE or REE side at any given time. * This can be changed dynamically if needed but of course that needs support * from underlaying layers. This function will transfer the ownership from REE * to TEE or vice versa. * * If REE side uses the SPI master when TEE owns the pipe or vice versa the * system will most likely crash dump. * * If available this should be set at boot time to eg. TEE side and not * dynamically as that will increase the security of the system. This however * implies that there are no other SPI slaves connected that should be handled * from REE side. * * @see SET_PIPE_OWNERSHIP */ static int set_pipe_ownership(struct fpc1020_data *fpc1020, bool to_tz) { const u32 TZ_BLSP_MODIFY_OWNERSHIP_ID = 3; const u32 TZBSP_APSS_ID = 1; const u32 TZBSP_TZ_ID = 3; int rc; struct scm_desc desc = { .arginfo = SCM_ARGS(2), .args[0] = fpc1020->qup_id, .args[1] = to_tz ? TZBSP_TZ_ID : TZBSP_APSS_ID, }; rc = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ, TZ_BLSP_MODIFY_OWNERSHIP_ID), &desc); if (rc || desc.ret[0]) { dev_err(fpc1020->dev, "%s: scm_call2: responce %llu, rc %d\n", __func__, desc.ret[0], rc); return -EINVAL; } dev_dbg(fpc1020->dev, "%s: scm_call2: ok\n", __func__); return 0; } static int set_clks(struct fpc1020_data *fpc1020, bool enable) { int rc = 0; mutex_lock(&fpc1020->lock); if (enable == fpc1020->clocks_enabled) goto out; if (enable) { rc = clk_prepare_enable(fpc1020->core_clk); if (rc) { dev_err(fpc1020->dev, "%s: Error enabling core clk: %d\n", __func__, rc); goto out; } rc = clk_prepare_enable(fpc1020->iface_clk); if (rc) { dev_err(fpc1020->dev, "%s: Error enabling iface clk: %d\n", __func__, rc); clk_disable_unprepare(fpc1020->core_clk); goto out; } dev_dbg(fpc1020->dev, "%s ok. clk rate %u hz\n", __func__, fpc1020->spi->max_speed_hz); fpc1020->clocks_enabled = true; } else { clk_disable_unprepare(fpc1020->iface_clk); clk_disable_unprepare(fpc1020->core_clk); fpc1020->clocks_enabled = false; } out: mutex_unlock(&fpc1020->lock); return rc; }