static int tz_init(struct devfreq_msm_adreno_tz_data *priv, unsigned int *tz_pwrlevels, u32 size_pwrlevels, unsigned int *version, u32 size_version) { int ret; /* Make sure all CMD IDs are avaialble */ if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, size_pwrlevels, NULL, 0); *version = 0; } else if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) && scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) && scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) { struct scm_desc desc = {0}; unsigned int *tz_buf; if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64, tz_pwrlevels, size_pwrlevels, version, size_version); if (!ret) priv->is_64 = true; return ret; } tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL); if (!tz_buf) return -ENOMEM; memcpy(tz_buf, tz_pwrlevels, size_pwrlevels); /* Ensure memcpy completes execution */ mb(); dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels)); desc.args[0] = virt_to_phys(tz_buf); desc.args[1] = size_pwrlevels; desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_INIT_ID_64), &desc); *version = desc.ret[0]; if (!ret) priv->is_64 = true; kzfree(tz_buf); } else ret = -EINVAL; return ret; }
SCM_EXPORT ScmObj scm_dynamic_wind(ScmObj before, ScmObj thunk, ScmObj after) { ScmObj ret; scm_call(before, SCM_NULL); wind_onto_dynamic_extent(before, after); ret = scm_call(thunk, SCM_NULL); unwind_dynamic_extent(); scm_call(after, SCM_NULL); return ret; }
static void test_scm_call () { SCM result; result = scm_call (scm_c_public_ref ("guile", "+"), scm_from_int (1), scm_from_int (2), SCM_UNDEFINED); assert (scm_is_true (scm_equal_p (result, scm_from_int (3)))); result = scm_call (scm_c_public_ref ("guile", "list"), SCM_UNDEFINED); assert (scm_is_eq (result, SCM_EOL)); }
static ssize_t qfprom_show_version(struct sys_device *dev, struct sysdev_attribute *attr, char *buf) { uint64_t version; uint32_t qfprom_api_status; int32_t ret; struct qfprom_read_ip { uint32_t row_reg_addr; uint32_t addr_type; uint32_t row_data_addr; uint32_t qfprom_ret_ptr; } rdip; rdip.row_reg_addr = QFPROM_RAW_SPARE_REG27_ROW0_LSB; rdip.addr_type = 0; rdip.row_data_addr = virt_to_phys(&version); rdip.qfprom_ret_ptr = virt_to_phys(&qfprom_api_status); ret = scm_call(SCM_SVC_FUSE, QFPROM_ROW_READ_CMD, &rdip, sizeof(rdip), NULL, 0); if (ret && qfprom_api_status) { pr_err("%s: Error in QFPROM read (%d, 0x%x)\n", __func__, ret, qfprom_api_status); return ret; } return sprintf(buf, "0x%llX\n", version); }
static void mem_prot_region(u64 start, u64 size, bool lock) { int ret; struct req_cmd { u32 address; u32 size; u32 permission; u32 lock; u32 arg; } request; request.address = PAGE_ALIGN(start); request.size = PAGE_ALIGN(size); request.permission = 0x1; request.lock = lock; request.arg = 0; ret = scm_call(SCM_SVC_MP, TZ_PROTECT_MEMORY, &request, sizeof(request), &ret, sizeof(ret)); if (ret != 0) pr_err("Failed to %s region %llx - %llx\n", lock ? "protect" : "unlock", start, start + size); else pr_debug("SUCCESS to %s region %llx - %llx\n", lock ? "protect" : "unlock", start, start + size); }
static int __mdss_mdp_set_secure(struct mdss_data_type *mdata, int enable) { u32 cmd, resp = 0; int rc; mutex_lock(&mdata->sec_lock); pr_debug("MDP Secure Mode=%d\n", enable); cmd = enable ? 1 : 0; if (mdata->secure_mode == cmd) { mutex_unlock(&mdata->sec_lock); return 0; } mdata->secure_mode = cmd; if (mdata->secure_mode) mdss_mdp_rotator_wait4idle(); rc = scm_call(SCM_SVC_MP, SCM_CP_MDSS_SECURE, &cmd, sizeof(cmd), &resp, sizeof(resp)); if (resp) rc = resp; mutex_unlock(&mdata->sec_lock); return rc; }
static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart) { int ret = 0; int scm_ret = 0; struct scm_desc desc = {0}; desc.args[0] = mss_restart; desc.args[1] = 0; desc.arginfo = SCM_ARGS(2); if (drv->restart_reg && !drv->restart_reg_sec) { writel_relaxed(mss_restart, drv->restart_reg); mb(); udelay(2); } else if (drv->restart_reg_sec) { if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID, &mss_restart, sizeof(mss_restart), &scm_ret, sizeof(scm_ret)); } else { ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, MSS_RESTART_ID), &desc); scm_ret = desc.ret[0]; } if (ret || scm_ret) pr_err("Secure MSS restart failed\n"); } return ret; }
/* if return value == 0, success * if return value < 0, scm call fail * if return value > 0, status error to read qfprom * This API can use in range 0x700XXX */ int qfuse_read_single_row(u32 fuse_addr, u32 addr_type, u32 *r_buf) { struct qfprom_read_cmd_buffer request; u32 *p_status = NULL; u32 scm_ret = 0; int ret = 0; p_status = kmalloc(sizeof(u32), GFP_KERNEL); if(!p_status) { printk("%s : status memory alloc fail\n", __func__); ret = -ENOMEM; goto error_stat; } request.qfprom_addr = fuse_addr; request.qfprom_addr_type = addr_type; request.read_buf = virt_to_phys((void *)r_buf); request.qfprom_status = virt_to_phys((void *)p_status); msleep(10); ret = scm_call(QFPROM_SVC_ID, QFPROM_READ_CMD, &request, sizeof(request), &scm_ret, sizeof(scm_ret)); if(ret < 0) { printk("%s: scm call fail\n", __func__); goto error_scm; } ret = *((u32 *)phys_to_virt(request.qfprom_status)); printk("%s: qfprom_status = 0x%x\n", __func__, ret); error_scm: kfree(p_status); error_stat: return ret; }
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size) { int ret; struct pas_init_image_req { u32 proc; u32 image_addr; } request; u32 scm_ret = 0; void *mdata_buf; dma_addr_t mdata_phys; DEFINE_DMA_ATTRS(attrs); dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); mdata_buf = dma_alloc_attrs(NULL, size, &mdata_phys, GFP_KERNEL, &attrs); if (!mdata_buf) { pr_err("Allocation for metadata failed.\n"); return -ENOMEM; } memcpy(mdata_buf, metadata, size); request.proc = id; request.image_addr = mdata_phys; ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request, sizeof(request), &scm_ret, sizeof(scm_ret)); dma_free_attrs(NULL, size, mdata_buf, mdata_phys, &attrs); if (ret) return ret; return scm_ret; }
static int tz_start(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv; unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1]; int i, out, ret; if (devfreq->data == NULL) { pr_err(TAG "data is required for this governor\n"); return -EINVAL; } priv = devfreq->data; priv->nb.notifier_call = tz_notify; out = 1; if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) { for (i = 0; i < devfreq->profile->max_state; i++) tz_pwrlevels[out++] = devfreq->profile->freq_table[i]; tz_pwrlevels[0] = i; } else { pr_err(TAG "tz_pwrlevels[] is too short\n"); return -EINVAL; } ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, sizeof(tz_pwrlevels), NULL, 0); if (ret != 0) pr_err(TAG "tz_init failed\n"); return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb); }
int do_fuseipq(cmd_tbl_t *cmdtp, int flag, int argc, char *const argv[]) { int ret; uint32_t fuse_status = 0; if (argc != 2) { printf("No Argument provided\n"); printf("Command format: fuseipq <address>\n"); return 1; } fuseip.address = simple_strtoul(argv[1], NULL, 16); fuseip.status = (uint32_t)&fuse_status; ret = scm_call(SCM_SVC_FUSE, TZ_BLOW_FUSE_SECDAT, &fuseip, sizeof(fuseip), NULL, 0); if (ret || fuse_status) printf("%s: Error in QFPROM write (%d, %d)\n", __func__, ret, fuse_status); if (fuse_status == FUSEPROV_SECDAT_LOCK_BLOWN) printf("Fuse already blown\n"); else if (fuse_status == FUSEPROV_INVALID_HASH) printf("Invalid sec.dat\n"); else if (fuse_status != FUSEPROV_SUCCESS) printf("Failed to Blow fuses"); else printf("Fuse Blown Successfully\n"); return 0; }
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale) { int i = 0, j = 1, ret = 0; struct tz_priv *priv; struct kgsl_pwrctrl *pwr = &device->pwrctrl; unsigned int tz_pwrlevels[KGSL_MAX_PWRLEVELS + 1]; priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL); if (pwrscale->priv == NULL) return -ENOMEM; priv->idle_dcvs = 0; priv->governor = TZ_GOVERNOR_ONDEMAND; spin_lock_init(&tz_lock); kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group); for (i = 0; i < pwr->num_pwrlevels - 1; i++) { if (i == 0) tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq; else if (pwr->pwrlevels[i].gpu_freq != pwr->pwrlevels[i - 1].gpu_freq) { j++; tz_pwrlevels[j] = pwr->pwrlevels[i].gpu_freq; } } tz_pwrlevels[0] = j; ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, sizeof(tz_pwrlevels), NULL, 0); if (ret) priv->idle_dcvs = 1; return 0; }
static int set_tamper_fuse_cmd() { uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE; return scm_call(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID, &fuse_id, sizeof(fuse_id), NULL, 0); }
/* Trap into the TrustZone, and call funcs there. */ static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0], scm_data[1]); spin_unlock(&tz_lock); } else { if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.arginfo = 0; ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_RESET_ID_64), &desc); } else { ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data, size_scm_data, NULL, 0); } } return ret; }
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size) { int ret; struct pas_init_image_req { u32 proc; u32 image_addr; } request; u32 scm_ret = 0; /* Make memory physically contiguous */ void *mdata_buf = kmemdup(metadata, size, GFP_KERNEL); if (!mdata_buf) return -ENOMEM; request.proc = id; request.image_addr = virt_to_phys(mdata_buf); ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request, sizeof(request), &scm_ret, sizeof(scm_ret)); kfree(mdata_buf); if (ret) return ret; return scm_ret; }
static uint8_t get_tamper_fuse_cmd_new(uint32_t flag) { int ret; uint32_t fuse_id; uint8_t resp_buf; size_t resp_len; struct scm_desc desc = {0}; resp_len = sizeof(resp_buf); desc.args[0] = fuse_id = flag; desc.arginfo = SCM_ARGS(1); if (!is_scm_armv8()) { ret = scm_call(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID, &fuse_id, sizeof(fuse_id), &resp_buf, resp_len); } else { ret = scm_call2(SCM_SIP_FNID(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID), &desc); resp_buf = desc.ret[0]; } if (ret) { printk("scm_call/1 returned %d", ret); resp_buf = 0xff; } ic = resp_buf; return resp_buf; }
int pas_supported(enum pas_id id) { int ret; u32 periph = id, ret_val = 0; if (!secure_pil) return 0; if (machine_is_tenderloin()) return 0; /* * 8660 SCM doesn't support querying secure PIL support so just return * true if not overridden on the command line. */ if (cpu_is_msm8x60()) return 1; if (scm_is_call_available(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD) <= 0) return 0; ret = scm_call(SCM_SVC_PIL, PAS_IS_SUPPORTED_CMD, &periph, sizeof(periph), &ret_val, sizeof(ret_val)); if (ret) return ret; return ret_val; }
/*=========================================================================== R5RS : 6.4 Control features ===========================================================================*/ SCM_EXPORT ScmObj scm_p_force(ScmObj promise) { ScmObj proc, result; DECLARE_FUNCTION("force", procedure_fixed_1); ENSURE_CONS(promise); proc = CDR(promise); ENSURE_PROCEDURE(proc); if (PROMISE_FORCEDP(promise)) return CAR(promise); /* R5RS: * Rationale: A promise may refer to its own value, as in the last * example above. Forcing such a promise may cause the promise to be * forced a second time before the value of the first force has been * computed. This complicates the definition of `make-promise'. */ result = scm_call(proc, SCM_NULL); if (PROMISE_FORCEDP(promise)) return CAR(promise); SET_CAR(promise, result); return result; }
int __spdm_scm_call(struct spdm_args *args, int num_args) { int status = 0; SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,num_args:%d\n", __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0], num_args); if (!is_scm_armv8()) { status = scm_call(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg, sizeof(args->arg), args->ret, sizeof(args->ret)); } else { struct scm_desc desc = {0}; desc.arginfo = SCM_ARGS(num_args); memcpy(desc.args, args->arg, COPY_SIZE(sizeof(desc.args), sizeof(args->arg))); status = scm_call2(SCM_SIP_FNID(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID), &desc); memcpy(args->ret, desc.ret, COPY_SIZE(sizeof(args->ret), sizeof(desc.ret))); } SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,Ret[0]:%llu,Ret[1]:%llu\n" , __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0], args->ret[0], args->ret[1]); return status; }
/* Create random key(8byte size) by tzbsp * if return value =0, error to create random key by tzbsp * if return value >0, success to create random key by bzbsp */ u32 qfprom_create_random(void) { int ret; u32 rand = 0; struct prng_data { u32 r; u32 s; } pdata; u8 *p_buf = NULL; p_buf = kmalloc(sizeof(u8)*4, GFP_KERNEL); if(!p_buf) { printk("%s: memory alloc fail\n", __func__); goto err; } pdata.r = virt_to_phys((void *)p_buf); pdata.s = 4; ret = scm_call(QFPROM_SVC_ID, QFPROM_PRNG_CMD, &pdata, sizeof(pdata), NULL, 0); if(ret < 0){ printk("%s: scm call error for creating random\n", __func__); goto err_scm; } rand = (p_buf[0]<<24)|(p_buf[1]<<16)|(p_buf[2]<<8)|(p_buf[3]); err_scm: kfree(p_buf); err: return rand; }
static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data, int *val, u32 size_val, bool is_64) { int ret; /* sync memory before sending the commands to tz*/ __iowmb(); if (!is_64) { spin_lock(&tz_lock); ret = scm_call_atomic3(SCM_SVC_IO, TZ_UPDATE_ID, scm_data[0], scm_data[1], scm_data[2]); spin_unlock(&tz_lock); *val = ret; } else { if (is_scm_armv8()) { struct scm_desc desc = {0}; desc.args[0] = scm_data[0]; desc.args[1] = scm_data[1]; desc.args[2] = scm_data[2]; desc.arginfo = SCM_ARGS(3); ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_UPDATE_ID_64), &desc); *val = desc.ret[0]; } else { ret = scm_call(SCM_SVC_DCVS, TZ_UPDATE_ID_64, scm_data, size_scm_data, val, size_val); } } return ret; }
static int set_tamper_fuse_cmd_new(uint32_t flag) { uint32_t fuse_id = flag; return scm_call(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID, &fuse_id, sizeof(fuse_id), NULL, 0); }
static int tz_start(struct devfreq *devfreq) { struct devfreq_msm_adreno_tz_data *priv; unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1]; unsigned int t1, t2 = 2 * HIST; int i, out, ret; struct msm_adreno_extended_profile *ext_profile = container_of( (devfreq->profile), struct msm_adreno_extended_profile, profile); /* * Assuming that we have only one instance of the adreno device * connected to this governor, * can safely restore the pointer to the governor private data * from the container of the device profile */ devfreq->data = ext_profile->private_data; priv = devfreq->data; priv->nb.notifier_call = tz_notify; out = 1; if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) { for (i = 0; i < devfreq->profile->max_state; i++) tz_pwrlevels[out++] = devfreq->profile->freq_table[i]; tz_pwrlevels[0] = i; } else { pr_err(TAG "tz_pwrlevels[] is too short\n"); return -EINVAL; } ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels, sizeof(tz_pwrlevels), NULL, 0); if (ret != 0) pr_err(TAG "tz_init failed\n"); /* Set up the cut-over percentages for the bus calculation. */ if (priv->bus.num) { for (i = 0; i < priv->bus.num; i++) { t1 = (u32)(100 * priv->bus.ib[i]) / (u32)priv->bus.ib[priv->bus.num - 1]; priv->bus.p_up[i] = t1 - HIST; priv->bus.p_down[i] = t2 - 2 * HIST; t2 = t1; } /* Set the upper-most and lower-most bounds correctly. */ priv->bus.p_down[0] = 0; priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ? priv->bus.p_down[1] : (2 * HIST); if (priv->bus.num - 1 >= 0) priv->bus.p_up[priv->bus.num - 1] = 100; _update_cutoff(priv, priv->bus.max); } return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb); }
int msm_dcvs_scm_create_group(uint32_t id) { int ret = 0; ret = scm_call(SCM_SVC_DCVS, DCVS_CMD_CREATE_GROUP, &id, sizeof(uint32_t), NULL, 0); return ret; }
static uint8_t get_tamper_fuse_cmd(void) { uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE; uint8_t resp_buf; scm_call(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID, &fuse_id, sizeof(fuse_id), &resp_buf, sizeof(resp_buf)); return resp_buf; }
static uint8_t get_tamper_fuse_cmd(void) { uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE; if (ic == STATE_IC_BAD) return STATE_IC_BAD; scm_call(SCM_SVC_FUSE, SCM_IS_SW_FUSE_BLOWN_ID, &fuse_id, sizeof(fuse_id), &ic, sizeof(ic)); return ic; }
static int set_tamper_fuse_cmd(void) { uint32_t fuse_id = HLOS_IMG_TAMPER_FUSE; if (ic == STATE_IC_BAD) return 0; ic = STATE_IC_BAD; return scm_call(SCM_SVC_FUSE, SCM_BLOW_SW_FUSE_ID, &fuse_id, sizeof(fuse_id), 0, 0); }
static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size, unsigned int permission_type) { struct cp_lock_msg cmd; cmd.start = phy_base; cmd.end = phy_base + size; cmd.permission_type = permission_type; cmd.lock = SCM_CP_UNPROTECT; return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID, &cmd, sizeof(cmd), NULL, 0); }
int scm_set_boot_addr(void *addr, int flags) { struct { unsigned int flags; void *addr; } cmd; cmd.addr = addr; cmd.flags = flags; return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR, &cmd, sizeof(cmd), NULL, 0); }
int pas_shutdown(enum pas_id id) { int ret; u32 proc = id, scm_ret = 0; ret = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc, sizeof(proc), &scm_ret, sizeof(scm_ret)); if (ret) return ret; return scm_ret; }