static int _tee_shm_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct tee_shm *shm = dmabuf->priv; size_t size = vma->vm_end - vma->vm_start; struct tee *tee; int ret; pgprot_t prot; unsigned long pfn; tee = shm->ctx->tee; pfn = shm->paddr >> PAGE_SHIFT; INMSG(); if (shm->flags & TEE_SHM_CACHED) prot = vma->vm_page_prot; else prot = pgprot_noncached(vma->vm_page_prot); ret = remap_pfn_range(vma, vma->vm_start, pfn, size, prot); if (!ret) vma->vm_private_data = (void *)shm; dev_dbg(_DEV(shm->ctx->tee), "%s: map the shm (p@=%p,s=%dKiB) => %x\n", __func__, (void *)shm->paddr, (int)size / 1024, (unsigned int)vma->vm_start); OUTMSG(ret); return ret; }
static void _tee_shm_detach_dma_buf(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { struct tee_shm_attach *tee_shm_attach = attach->priv; struct sg_table *sgt; struct tee_shm *shm; struct tee *tee; shm = dmabuf->priv; tee = shm->tee; INMSG(); if (!tee_shm_attach) { OUTMSG(0); return; } sgt = &tee_shm_attach->sgt; if (tee_shm_attach->dir != DMA_NONE) dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, tee_shm_attach->dir); sg_free_table(sgt); devm_kfree(_DEV(tee), tee_shm_attach); attach->priv = NULL; OUTMSG(0); }
static int _tee_shm_attach_dma_buf(struct dma_buf *dmabuf, struct device *dev, struct dma_buf_attachment *attach) { struct tee_shm_attach *tee_shm_attach; struct tee_shm *shm; struct tee *tee; shm = dmabuf->priv; tee = shm->tee; INMSG(); tee_shm_attach = devm_kzalloc(_DEV(tee), sizeof(*tee_shm_attach), GFP_KERNEL); if (!tee_shm_attach) { OUTMSG(-ENOMEM); return -ENOMEM; } tee_shm_attach->dir = DMA_NONE; attach->priv = tee_shm_attach; OUTMSG(0); return 0; }
/* * Invokes a TEE command (secure service, sub-PA or whatever). */ TEEC_Result TEEC_InvokeCommand(TEEC_Session *session, uint32_t cmd_id, TEEC_Operation *operation, uint32_t *error_origin) { INMSG("session: [%p], cmd_id: [%d]", session, cmd_id); struct tee_cmd tc; TEEC_Operation dummy_op; TEEC_Result result = TEEC_SUCCESS; uint32_t origin = TEEC_ORIGIN_API; if (session == NULL) { origin = TEEC_ORIGIN_API; result = TEEC_ERROR_BAD_PARAMETERS; goto error; } if (operation == NULL) { /* * The code here exist because Global Platform API states that * it is allowed to give operation as a NULL pointer. In kernel * and secure world we in most cases don't want this to be NULL, * hence we use this dummy operation when a client doesn't * provide any operation. */ memset(&dummy_op, 0, sizeof(TEEC_Operation)); operation = &dummy_op; } teec_mutex_lock(&mutex); operation->session = session; teec_mutex_unlock(&mutex); memset(&tc, 0, sizeof(struct tee_cmd)); tc.cmd = cmd_id; tc.op = operation; if (ioctl(session->fd, TEE_INVOKE_COMMAND_IOC, &tc) != 0) EMSG("Ioctl(TEE_INVOKE_COMMAND_IOC) failed! (%s)\n", strerror(errno)); if (operation != NULL) { teec_mutex_lock(&mutex); operation->session = NULL; teec_mutex_unlock(&mutex); } origin = tc.origin; result = tc.err; error: if (error_origin != NULL) *error_origin = origin; OUTRMSG(result); }
/* * This function initializes a new TEE Context, connecting this Client * application to the TEE identified by the name name. * * name == NULL will give the default TEE. */ TEEC_Result TEEC_InitializeContext(const char *name, TEEC_Context *context) { int name_size = 0; const char* _name = name; INMSG("%s", name); if (context == NULL) return TEEC_ERROR_BAD_PARAMETERS; /* * Specification says that when no name is provided it should fall back * on a predefined TEE. */ if (name == NULL) _name = TEE_TZ_DEVICE_NAME; name_size = snprintf(context->devname, TEEC_MAX_DEVNAME_SIZE, "/dev/%s", _name); if (name_size >= TEEC_MAX_DEVNAME_SIZE) return TEEC_ERROR_BAD_PARAMETERS; /* Device name truncated */ context->fd = open(context->devname, O_RDWR); if (context->fd == -1) return TEEC_ERROR_ITEM_NOT_FOUND; pthread_mutex_init(&mutex, NULL); OUTMSG(""); return TEEC_SUCCESS; }
static struct sg_table *_tee_shm_dma_buf_map_dma_buf( struct dma_buf_attachment *attach, enum dma_data_direction dir) { struct tee_shm_attach *tee_shm_attach = attach->priv; struct tee_shm *tee_shm = attach->dmabuf->priv; struct sg_table *sgt = NULL; struct scatterlist *rd, *wr; unsigned int i; int nents, ret; struct tee *tee; tee = tee_shm->tee; INMSG(); /* just return current sgt if already requested. */ if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) { OUTMSGX(&tee_shm_attach->sgt); return &tee_shm_attach->sgt; } sgt = &tee_shm_attach->sgt; ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL); if (ret) { dev_err(_DEV(tee), "failed to alloc sgt.\n"); return ERR_PTR(-ENOMEM); } rd = tee_shm->sgt.sgl; wr = sgt->sgl; for (i = 0; i < sgt->orig_nents; ++i) { sg_set_page(wr, sg_page(rd), rd->length, rd->offset); rd = sg_next(rd); wr = sg_next(wr); } if (dir != DMA_NONE) { nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir); if (!nents) { dev_err(_DEV(tee), "failed to map sgl with iommu.\n"); sg_free_table(sgt); sgt = ERR_PTR(-EIO); goto err_unlock; } } tee_shm_attach->is_mapped = true; tee_shm_attach->dir = dir; attach->priv = tee_shm_attach; err_unlock: OUTMSGX(sgt); return sgt; }
struct tee_shm *tee_shm_alloc(struct tee *tee, size_t size, uint32_t flags) { struct tee_shm *shm; unsigned long pfn; unsigned int nr_pages; struct page *page; int ret; INMSG(); shm = tee->ops->alloc(tee, size, flags); if (IS_ERR_OR_NULL(shm)) { dev_err(_DEV(tee), "%s: allocation failed (s=%d,flags=0x%08x) err=%ld\n", __func__, (int)size, flags, PTR_ERR(shm)); goto exit; } shm->tee = tee; dev_dbg(_DEV(tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n", __func__, shm, (void *)shm->paddr, (int)shm->size_req, (int)shm->size_alloc, current->comm, current->pid); pfn = shm->paddr >> PAGE_SHIFT; page = pfn_to_page(pfn); if (IS_ERR_OR_NULL(page)) { dev_err(_DEV(tee), "%s: pfn_to_page(%lx) failed\n", __func__, pfn); tee->ops->free(shm); return (struct tee_shm *)page; } /* Only one page of contiguous physical memory */ nr_pages = 1; ret = sg_alloc_table_from_pages(&shm->sgt, &page, nr_pages, 0, nr_pages * PAGE_SIZE, GFP_KERNEL); if (IS_ERR_VALUE(ret)) { dev_err(_DEV(tee), "%s: sg_alloc_table_from_pages() failed\n", __func__); tee->ops->free(shm); shm = ERR_PTR(ret); } exit: OUTMSGX(shm); return shm; }
void core_trace_test(void) { INMSG("level: [%d]", _trace_level); IMSG("current trace level = %d", _trace_level); IMSG("Without args"); AMSG("[%d] and [%s]", TRACE_ALWAYS, "TRACE_ALWAYS"); EMSG("[%d] and [%s]", TRACE_ERROR, "TRACE_ERROR"); IMSG("[%d] and [%s]", TRACE_INFO, "TRACE_INFO"); DMSG("[%d] and [%s]", TRACE_DEBUG, "TRACE_DEBUG"); FMSG("[%d] and [%s]", TRACE_FLOW, "TRACE_FLOW"); AMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_ALWAYS"); AMSG_RAW(" __ end of raw trace\n"); DMSG_RAW("Raw trace in TEE CORE with level [%s]", "TRACE_DEBUG"); DMSG_RAW(" __ end of raw trace\n"); OUTMSG(""); }
static void _tee_shm_dma_buf_release(struct dma_buf *dmabuf) { struct tee_shm *shm = dmabuf->priv; struct tee_context *ctx; struct tee *tee; tee = shm->ctx->tee; INMSG(); ctx = shm->ctx; dev_dbg(_DEV(ctx->tee), "%s: shm=%p, paddr=%p,s=%d/%d app=\"%s\" pid=%d\n", __func__, shm, (void *)shm->paddr, (int)shm->size_req, (int)shm->size_alloc, current->comm, current->pid); tee_shm_free_io(shm); OUTMSG(0); }
struct tee_shm *tee_shm_alloc_from_rpc(struct tee *tee, size_t size) { struct tee_shm *shm; INMSG(); shm = tee_shm_alloc(tee, size, TEE_SHM_TEMP | TEE_SHM_FROM_RPC); if (IS_ERR_OR_NULL(shm)) { dev_err(_DEV(tee), "%s: buffer allocation failed (%ld)\n", __func__, PTR_ERR(shm)); goto out; } mutex_lock(&tee->lock); tee_inc_stats(&tee->stats[TEE_STATS_SHM_IDX]); list_add_tail(&shm->entry, &tee->list_rpc_shm); mutex_unlock(&tee->lock); shm->ctx = NULL; out: OUTMSGX(shm); return shm; }