// TODO: when do release secure command buffer int32_t cmdq_sec_fill_iwc_resource_msg_unlocked(int32_t iwcCommand, void *_pTask, int32_t thread, void *_pIwc) { iwcCmdqMessage_t *pIwc; cmdqSecSharedMemoryHandle pSharedMem; pSharedMem = cmdq_core_get_secure_shared_memory(); if (NULL == pSharedMem) { CMDQ_ERR("FILL:RES, NULL shared memory\n"); return -EFAULT; } if(pSharedMem && NULL == pSharedMem->pVABase) { CMDQ_ERR("FILL:RES, %p shared memory has not init\n", pSharedMem); return -EFAULT; } pIwc = (iwcCmdqMessage_t *)_pIwc; memset(pIwc, 0x0, sizeof(iwcCmdqMessage_t)); pIwc->cmd = iwcCommand; pIwc->pathResource.shareMemoyPA = 0LL | (pSharedMem->MVABase); pIwc->pathResource.size = pSharedMem->size; CMDQ_MSG("FILL:RES, shared memory:%pa(0x%llx), size:%d\n", &(pSharedMem->MVABase), pIwc->pathResource.shareMemoyPA, pSharedMem->size); /* medatada: debug config */ pIwc->debug.logLevel = (cmdq_core_should_print_msg()) ? (1) : (0); pIwc->debug.enableProfile = cmdq_core_profile_enabled(); return 0; }
int32_t cmdq_sec_allocate_wsm_impl(uint32_t deviceId, uint8_t **ppWsm, uint32_t wsmSize) { int32_t status = 0; enum mc_result mcRet = MC_DRV_OK; do { if ((*ppWsm) != NULL) { status = -1; CMDQ_ERR("[SEC]_WSM_ALLOC: err[pWsm is not NULL]"); break; } /* because world shared mem(WSM) will ba managed by mobicore device, not linux kernel */ /* instead of vmalloc/kmalloc, call mc_malloc_wasm to alloc WSM to prvent error such as */ /* "can not resolve tci physicall address" etc */ mcRet = mc_malloc_wsm(deviceId, 0, wsmSize, ppWsm, 0); if (MC_DRV_OK != mcRet) { CMDQ_ERR("[SEC]_WSM_ALLOC: err[0x%x]\n", mcRet); status = -1; break; } CMDQ_MSG("[SEC]_WSM_ALLOC: status[%d], *ppWsm: 0x%p\n", status, (*ppWsm)); } while (0); return status; }
int32_t cmdq_sec_open_session_impl(uint32_t deviceId, const struct mc_uuid_t *uuid, uint8_t *pWsm, uint32_t wsmSize, struct mc_session_handle *pSessionHandle) { int32_t status = 0; enum mc_result mcRet = MC_DRV_OK; do { if (NULL == pWsm || NULL == pSessionHandle) { status = -1; CMDQ_ERR("[SEC]_SESSION_OPEN: invalid param, pWsm[0x%p], pSessionHandle[0x%p]\n", pWsm, pSessionHandle); break; } memset(pSessionHandle, 0, sizeof(*pSessionHandle)); pSessionHandle->device_id = deviceId; mcRet = mc_open_session(pSessionHandle, uuid, pWsm, wsmSize); if (MC_DRV_OK != mcRet) { CMDQ_ERR("[SEC]_SESSION_OPEN: err[0x%x]\n", mcRet); status = -1; break; } CMDQ_MSG("[SEC]_SESSION_OPEN: status[%d], mcRet[0x%x]\n", status, mcRet); } while (0); return status; }
int32_t cmdq_sec_allocate_path_resource_unlocked(void) { #ifdef CMDQ_SECURE_PATH_SUPPORT int32_t status = 0; if(1 == atomic_read(&gCmdqSecPathResource)) { /* has allocated successfully */ return status; } status = cmdq_sec_submit_to_secure_world_async_unlocked( CMD_CMDQ_TL_PATH_RES_ALLOCATE, NULL, CMDQ_INVALID_THREAD, NULL, NULL); if (0 > status) { CMDQ_ERR("%s[%d]\n", __func__, status); } else { atomic_set(&gCmdqSecPathResource, 1); } return status; #else CMDQ_ERR("secure path not support\n"); return -EFAULT; #endif }
static long cmdq_ioctl_compat(struct file *pFile, unsigned int code, unsigned long param) { switch (code) { case CMDQ_IOCTL_QUERY_USAGE: case CMDQ_IOCTL_EXEC_COMMAND: case CMDQ_IOCTL_ASYNC_JOB_EXEC: case CMDQ_IOCTL_ASYNC_JOB_WAIT_AND_CLOSE: case CMDQ_IOCTL_ALLOC_WRITE_ADDRESS: case CMDQ_IOCTL_FREE_WRITE_ADDRESS: case CMDQ_IOCTL_READ_ADDRESS_VALUE: case CMDQ_IOCTL_QUERY_CAP_BITS: case CMDQ_IOCTL_QUERY_DTS: case CMDQ_IOCTL_NOTIFY_ENGINE: /* All ioctl structures should be the same size in 32-bit and 64-bit linux. */ return cmdq_ioctl(pFile, code, param); case CMDQ_IOCTL_LOCK_MUTEX: case CMDQ_IOCTL_UNLOCK_MUTEX: CMDQ_ERR("[COMPAT]deprecated ioctl 0x%08x\n", code); return -ENOIOCTLCMD; default: CMDQ_ERR("[COMPAT]unrecognized ioctl 0x%08x\n", code); return -ENOIOCTLCMD; } CMDQ_ERR("[COMPAT]unrecognized ioctl 0x%08x\n", code); return -ENOIOCTLCMD; }
static int __init cmdq_init(void) { int status; CMDQ_MSG("CMDQ driver init begin\n"); /* Initialize group callback */ cmdqCoreInitGroupCB(); /* Register MDP callback */ cmdqCoreRegisterCB(CMDQ_GROUP_MDP, cmdqMdpClockOn, cmdqMdpDumpInfo, cmdqMdpResetEng, cmdqMdpClockOff); /* Register VENC callback */ cmdqCoreRegisterCB(CMDQ_GROUP_VENC, NULL, cmdqVEncDumpInfo, NULL, NULL); status = platform_driver_register(&gCmdqDriver); if (0 != status) { CMDQ_ERR("Failed to register the CMDQ driver(%d)\n", status); return -ENODEV; } /* register pm notifier */ status = register_pm_notifier(&cmdq_pm_notifier_block); if (0 != status) { CMDQ_ERR("Failed to register_pm_notifier(%d)\n", status); return -ENODEV; } CMDQ_MSG("CMDQ driver init end\n"); return 0; }
static void cmdq_driver_process_read_address_request(struct cmdqReadAddressStruct *req_user) { /* create kernel-space buffer for working */ uint32_t *addrs = NULL; uint32_t *values = NULL; dma_addr_t pa = 0; int i = 0; CMDQ_LOG("[READ_PA] cmdq_driver_process_read_address_request()\n"); do { if (NULL == req_user || 0 == req_user->count || NULL == CMDQ_U32_PTR(req_user->values) || NULL == CMDQ_U32_PTR(req_user->dmaAddresses)) { CMDQ_ERR("[READ_PA] invalid req_user\n"); break; } addrs = kcalloc(req_user->count, sizeof(uint32_t), GFP_KERNEL); if (NULL == addrs) { CMDQ_ERR("[READ_PA] fail to alloc addr buf\n"); break; } values = kcalloc(req_user->count, sizeof(uint32_t), GFP_KERNEL); if (NULL == values) { CMDQ_ERR("[READ_PA] fail to alloc value buf\n"); break; } /* copy from user */ if (copy_from_user (addrs, CMDQ_U32_PTR(req_user->dmaAddresses), req_user->count * sizeof(uint32_t))) { CMDQ_ERR("[READ_PA] fail to copy user dmaAddresses\n"); break; } /* actually read these PA write buffers */ for (i = 0; i < req_user->count; ++i) { pa = (0xFFFFFFFF & addrs[i]); CMDQ_LOG("[READ_PA] req read dma address 0x%pa\n", &pa); values[i] = cmdqCoreReadWriteAddress(pa); } /* copy value to user */ if (copy_to_user (CMDQ_U32_PTR(req_user->values), values, req_user->count * sizeof(uint32_t))) { CMDQ_ERR("[READ_PA] fail to copy to user value buf\n"); break; } } while (0); kfree(addrs); kfree(values); }
static long cmdq_driver_create_secure_medadata(cmdqCommandStruct *pCommand) { void *pAddrMetadatas = NULL; const uint32_t length = (pCommand->secData.addrMetadataCount) * sizeof(cmdqSecAddrMetadataStruct); /* verify parameter */ if ((false == pCommand->secData.isSecure) && (0 != pCommand->secData.addrMetadataCount)) { /* normal path with non-zero secure metadata */ CMDQ_ERR ("[secData]mismatch secData.isSecure(%d) and secData.addrMetadataCount(%d)\n", pCommand->secData.isSecure, pCommand->secData.addrMetadataCount); return -EFAULT; } /* revise max count field */ pCommand->secData.addrMetadataMaxCount = pCommand->secData.addrMetadataCount; /* bypass 0 metadata case */ if (0 == pCommand->secData.addrMetadataCount) { pCommand->secData.addrMetadatas = (cmdqU32Ptr_t) (unsigned long)NULL; return 0; } /* create kernel-space buffer for working */ pAddrMetadatas = kzalloc(length, GFP_KERNEL); if (NULL == pAddrMetadatas) { CMDQ_ERR("[secData]kzalloc for addrMetadatas failed, count:%d, alloacted_size:%d\n", pCommand->secData.addrMetadataCount, length); return -ENOMEM; } /* copy from user */ if (copy_from_user(pAddrMetadatas, CMDQ_U32_PTR(pCommand->secData.addrMetadatas), length)) { CMDQ_ERR("[secData]fail to copy user addrMetadatas\n"); /* replace buffer first to ensure that */ /* addrMetadatas is valid kernel space buffer address when free it */ pCommand->secData.addrMetadatas = (cmdqU32Ptr_t) (unsigned long)pAddrMetadatas; /* free secure path metadata */ cmdq_driver_destroy_secure_medadata(pCommand); return -EFAULT; } /* replace buffer */ pCommand->secData.addrMetadatas = (cmdqU32Ptr_t) (unsigned long)pAddrMetadatas; #if 0 cmdq_core_dump_secure_metadata(&(pCommand->secData)); #endif return 0; }
int32_t cmdqRecProfileMarker(cmdqRecHandle handle, const char *tag) { #ifdef CMDQ_PROFILE_MARKER_SUPPORT int32_t status; int32_t index; cmdqBackupSlotHandle hSlot; dma_addr_t allocatedStartPA; do { allocatedStartPA = 0; status = 0; /* allocate temp slot for GCE to store timestamp info */ /* those timestamp info will copy to record strute after task execute done */ if ((0 == handle->profileMarker.count) && (0 == handle->profileMarker.hSlot)) { status = cmdqCoreAllocWriteAddress(CMDQ_MAX_PROFILE_MARKER_IN_TASK, &allocatedStartPA); if(0 > status) { CMDQ_ERR("[REC][PROF_MARKER]allocate failed, status:%d\n", status); break; } handle->profileMarker.hSlot = 0LL | (allocatedStartPA); CMDQ_VERBOSE("[REC][PROF_MARKER]update handle(%p) slot start PA:%pa(0x%llx)\n", handle, &allocatedStartPA, handle->profileMarker.hSlot); } /* insert instruciton */ index = handle->profileMarker.count; hSlot = (cmdqBackupSlotHandle)(handle->profileMarker.hSlot); if (index >= CMDQ_MAX_PROFILE_MARKER_IN_TASK) { CMDQ_ERR("[REC][PROF_MARKER]insert profile maker failed since already reach max count\n"); status = -EFAULT; break; } CMDQ_VERBOSE("[REC][PROF_MARKER]inserting profile instr, handle:%p, slot:%pa(0x%llx), index:%d, tag:%s\n", handle, &hSlot, handle->profileMarker.hSlot, index, tag); cmdqRecBackupRegisterToSlot(handle, hSlot, index, CMDQ_APXGPT2_COUNT); handle->profileMarker.tag[index] = tag; handle->profileMarker.count += 1; } while(0); return status; #else CMDQ_ERR("func:%s failed since CMDQ dosen't enable profile marker\n", __func__); return -EFAULT; #endif }
static int cmdq_sec_sectrace_map(void *va, size_t size) { int status; enum mc_result mcRet; CMDQ_LOG("[sectrace]-->map: start, va:%p, size:%d\n", va, (int)size); status = 0; cmdq_sec_lock_secure_path(); do { /* HACK: submit a dummy message to ensure secure path init done */ status = cmdq_sec_submit_to_secure_world_async_unlocked( CMD_CMDQ_TL_TEST_HELLO_TL, NULL, CMDQ_INVALID_THREAD, NULL, NULL); /* map log buffer in NWd */ mcRet = mc_map(&(gCmdqSecContextHandle->sessionHandle), va, (uint32_t)size, &gCmdqSectraceMappedInfo); if (MC_DRV_OK != mcRet) { CMDQ_ERR("[sectrace]map: failed in NWd, mc_map err: 0x%x\n", mcRet); status = -EFAULT; break; } CMDQ_LOG("[sectrace]map: mc_map sectrace buffer done, gCmdqSectraceMappedInfo(va:0x%08x, size:%d)\n", gCmdqSectraceMappedInfo.secure_virt_addr, gCmdqSectraceMappedInfo.secure_virt_len); /* ask secure CMDQ to map sectrace log buffer */ status = cmdq_sec_submit_to_secure_world_async_unlocked( CMD_CMDQ_TL_SECTRACE_MAP, NULL, CMDQ_INVALID_THREAD, cmdq_sec_fill_iwc_command_sectrace_unlocked, NULL); if(0 > status) { CMDQ_ERR("[sectrace]map: failed in SWd: %d\n", status); mc_unmap(&(gCmdqSecContextHandle->sessionHandle), va, &gCmdqSectraceMappedInfo); status = -EFAULT; break; } } while(0); cmdq_sec_unlock_secure_path(); CMDQ_LOG("[sectrace]<--map: status: %d\n", status); return status; }
int32_t cmdq_sec_execute_session_unlocked(struct mc_session_handle *pSessionHandle, CMDQ_IWC_STATE_ENUM *pIwcState, int32_t timeout_ms) { enum mc_result mcRet; int32_t status = 0; const int32_t secureWoldTimeout_ms = (0 < timeout_ms) ? (timeout_ms) : (MC_INFINITE_TIMEOUT); CMDQ_PROF_START("CMDQ_SEC_EXE"); do { /* notify to secure world */ mcRet = mc_notify(pSessionHandle); if (MC_DRV_OK != mcRet) { CMDQ_ERR("[SEC]EXEC: mc_notify err[0x%x]\n", mcRet); status = -1; break; } else { CMDQ_MSG("[SEC]EXEC: mc_notify ret[0x%x]\n", mcRet); } (*pIwcState) = IWC_SES_TRANSACTED; /* wait respond */ mcRet = mc_wait_notification(pSessionHandle, secureWoldTimeout_ms); if (MC_DRV_ERR_TIMEOUT == mcRet) { CMDQ_ERR ("[SEC]EXEC: mc_wait_notification timeout, err[0x%x], secureWoldTimeout_ms[%d]\n", mcRet, secureWoldTimeout_ms); status = -ETIMEDOUT; break; } if (MC_DRV_OK != mcRet) { CMDQ_ERR("[SEC]EXEC: mc_wait_notification err[0x%x]\n", mcRet); status = -1; break; } else { CMDQ_MSG("[SEC]EXEC: mc_wait_notification err[%d]\n", mcRet); } (*pIwcState) = IWC_SES_ON_TRANSACTED; } while (0); CMDQ_PROF_END("CMDQ_SEC_EXE"); return status; }
/******************************************************************************** * operator API *******************************************************************************/ int32_t cmdq_sec_open_mobicore_impl(uint32_t deviceId) { int32_t status; enum mc_result mcRet = MC_DRV_ERR_UNKNOWN; int retryCnt = 0; do { status = 0; mcRet = mc_open_device(deviceId); /* Currently, a process context limits to open mobicore device once, */ /* and mc_open_device dose not support reference cout */ /* so skip the false alarm error.... */ if (MC_DRV_ERR_INVALID_OPERATION == mcRet) { CMDQ_MSG("[SEC]_MOBICORE_OPEN: already opened, continue to execution\n"); status = -EEXIST; } else if (MC_DRV_OK != mcRet) { CMDQ_ERR("[SEC]_MOBICORE_OPEN: err[0x%x]\n", mcRet); status = -1; retryCnt++; continue; } CMDQ_MSG("[SEC]_MOBICORE_OPEN: status[%d], ret[0x%x]\n", status, mcRet); break; }while(retryCnt < 30); return status; }
const int32_t cmdq_core_can_module_entry_suspend(EngineStruct *engineList) { int32_t status = 0; int i; CMDQ_ENG_ENUM e = 0; CMDQ_ENG_ENUM mdpEngines[] = { CMDQ_ENG_ISP_IMGI, CMDQ_ENG_MDP_RDMA0, CMDQ_ENG_MDP_RSZ0, CMDQ_ENG_MDP_RSZ1, CMDQ_ENG_MDP_TDSHP0, CMDQ_ENG_MDP_WROT0, CMDQ_ENG_MDP_WDMA }; for (i = 0; i < (sizeof(mdpEngines) / sizeof(CMDQ_ENG_ENUM)); ++i) { e = mdpEngines[i]; if (0 != engineList[e].userCount) { CMDQ_ERR("suspend but engine %d has userCount %d, owner=%d\n", e, engineList[e].userCount, engineList[e].currOwner); status = -EBUSY; } } return status; }
static int cmdq_driver_create_reg_address_buffer(struct cmdqCommandStruct *pCommand) { int status = 0; uint32_t totalRegCount = 0; uint32_t *regAddrBuf = NULL; uint32_t *kernelRegAddr = NULL; uint32_t kernelRegCount = 0; const uint32_t userRegCount = pCommand->regRequest.count; if (0 != pCommand->debugRegDump) { /* get kernel dump request count */ status = cmdqCoreDebugRegDumpBegin(pCommand->debugRegDump, &kernelRegCount, &kernelRegAddr); if (0 != status) { CMDQ_ERR ("cmdqCoreDebugRegDumpBegin returns %d, ignore kernel reg dump request\n", status); kernelRegCount = 0; kernelRegAddr = NULL; } } /* how many register to dump? */ totalRegCount = kernelRegCount + userRegCount; if (0 == totalRegCount) { /* no need to dump register */ pCommand->regRequest.count = 0; pCommand->regValue.count = 0; pCommand->regRequest.regAddresses = (cmdqU32Ptr_t) (unsigned long)NULL; pCommand->regValue.regValues = (cmdqU32Ptr_t) (unsigned long)NULL; } else { regAddrBuf = kcalloc(totalRegCount, sizeof(uint32_t), GFP_KERNEL); if (NULL == regAddrBuf) return -ENOMEM; /* collect user space dump request */ if (userRegCount) { if (copy_from_user (regAddrBuf, CMDQ_U32_PTR(pCommand->regRequest.regAddresses), userRegCount * sizeof(uint32_t))) { kfree(regAddrBuf); return -EFAULT; } } /* collect kernel space dump request, concatnate after user space request */ if (kernelRegCount) { memcpy(regAddrBuf + userRegCount, kernelRegAddr, kernelRegCount * sizeof(uint32_t)); } /* replace address buffer and value address buffer with kzalloc memory */ pCommand->regRequest.regAddresses = (cmdqU32Ptr_t) (unsigned long)(regAddrBuf); pCommand->regRequest.count = totalRegCount; } return 0; }
static irqreturn_t cmdq_irq_handler(int IRQ, void *pDevice) { int index; uint32_t irqStatus; bool handled = false; /* we share IRQ bit with CQ-DMA, */ /* so it is possible that this handler */ /* is called but GCE does not have IRQ flag. */ do { if (cmdq_dev_get_irq_id() == IRQ) { irqStatus = CMDQ_REG_GET32(CMDQ_CURR_IRQ_STATUS) & 0x0FFFF; for (index = 0; (irqStatus != 0xFFFF) && index < CMDQ_MAX_THREAD_COUNT; index++) { /* STATUS bit set to 0 means IRQ asserted */ if (irqStatus & (1 << index)) continue; /* so we mark irqStatus to 1 to denote finished processing */ /* and we can early-exit if no more threads being asserted */ irqStatus |= (1 << index); cmdqCoreHandleIRQ(index); handled = true; } } else if (cmdq_dev_get_irq_secure_id() == IRQ) { CMDQ_ERR("receive secure IRQ %d in NWD\n", IRQ); } } while (0); if (handled) { cmdq_core_add_consume_task(); return IRQ_HANDLED; } /* allow CQ-DMA to process this IRQ bit */ return IRQ_NONE; }
static int cmdq_release(struct inode *pInode, struct file *pFile) { cmdqFileNodeStruct *pNode; unsigned long flags; CMDQ_VERBOSE("CMDQ driver release fd=%p begin\n", pFile); pNode = (cmdqFileNodeStruct *) pFile->private_data; if (NULL == pNode) { CMDQ_ERR("CMDQ file node NULL\n"); return -EFAULT; } spin_lock_irqsave(&pNode->nodeLock, flags); /* note that we did not release CMDQ tasks */ /* issued by this file node, */ /* since their HW operation may be pending. */ spin_unlock_irqrestore(&pNode->nodeLock, flags); /* scan through tasks that created by this file node and release them */ cmdq_core_release_task_by_file_node((void *)pNode); if (NULL != pFile->private_data) { kfree(pFile->private_data); pFile->private_data = NULL; } CMDQ_VERBOSE("CMDQ driver release end\n"); return 0; }
int32_t cmdqRecCreate(CMDQ_SCENARIO_ENUM scenario, cmdqRecHandle *pHandle) { cmdqRecHandle handle = NULL; if (scenario < 0 || scenario >= CMDQ_MAX_SCENARIO_COUNT) { CMDQ_ERR("Unknown scenario type %d\n", scenario); return -EINVAL; } handle = kzalloc(sizeof(uint8_t *) * sizeof(cmdqRecStruct), GFP_KERNEL); if (NULL == handle) { return -ENOMEM; } handle->scenario = scenario; handle->pBuffer = NULL; handle->bufferSize = 0; handle->blockSize = 0; handle->engineFlag = cmdq_rec_flag_from_scenario(scenario); handle->priority = CMDQ_THR_PRIO_NORMAL; handle->prefetchCount = 0; handle->finalized = false; handle->pRunningTask = NULL; if (0 != cmdq_rec_realloc_cmd_buffer(handle, CMDQ_INITIAL_CMD_BLOCK_SIZE)) { kfree(handle); return -ENOMEM; } *pHandle = handle; return 0; }
int cmdq_rec_realloc_cmd_buffer(cmdqRecHandle handle, uint32_t size) { void *pNewBuf = NULL; if (size <= handle->bufferSize) { return 0; } pNewBuf = kzalloc(size, GFP_KERNEL); if (NULL == pNewBuf) { CMDQ_ERR("REC: kzalloc %d bytes cmd_buffer failed\n", size); return -ENOMEM; } memset(pNewBuf, 0, size); if (handle->pBuffer && handle->blockSize > 0) { memcpy(pNewBuf, handle->pBuffer, handle->blockSize); } CMDQ_VERBOSE("REC: realloc size from %d to %d bytes\n", handle->bufferSize, size); kfree(handle->pBuffer); handle->pBuffer = pNewBuf; handle->bufferSize = size; return 0; }
int32_t cmdq_rec_finalize_command(cmdqRecHandle handle, bool loop) { int32_t status = 0; uint32_t argB = 0; if (!handle->finalized) { if ((handle->prefetchCount > 0) && cmdq_core_should_enable_prefetch(handle->scenario)) { CMDQ_ERR("not insert prefetch disble marker when prefetch enabled, prefetchCount:%d\n", handle->prefetchCount); cmdqRecDumpCommand(handle); status = -EFAULT; return status; } /* insert EOF instruction */ argB = 0x1; /* generate IRQ for each command iteration */ status = cmdq_append_command(handle, CMDQ_CODE_EOC, 0, argB, 0, 0); if (0 != status) { return status; } /* insert JUMP to loop to beginning or as a scheduling mark(8) */ status = cmdq_append_command(handle, CMDQ_CODE_JUMP, 0, /* not absolute */ loop ? -handle->blockSize : 8, 0, 0); if (0 != status) { return status; } handle->finalized = true; } return status; }
int32_t cmdq_rec_realloc_addr_metadata_buffer(cmdqRecHandle handle, const uint32_t size) { void *pNewBuf = NULL; void *pOriginalBuf = (void*)CMDQ_U32_PTR(handle->secData.addrMetadatas); const uint32_t originalSize = sizeof(cmdqSecAddrMetadataStruct) * (handle->secData.addrMetadataMaxCount); if (size <= originalSize) { return 0; } pNewBuf = kzalloc(size, GFP_KERNEL); if (NULL == pNewBuf) { CMDQ_ERR("REC: secAddrMetadata, kzalloc %d bytes addr_metadata buffer failed\n", size); return -ENOMEM; } if (pOriginalBuf && originalSize > 0) { memcpy(pNewBuf, pOriginalBuf, originalSize); } CMDQ_VERBOSE("REC: secAddrMetadata, realloc size from %d to %d bytes\n", originalSize, size); kfree(pOriginalBuf); handle->secData.addrMetadatas = (cmdqU32Ptr_t)(unsigned long)(pNewBuf); handle->secData.addrMetadataMaxCount = size / sizeof(cmdqSecAddrMetadataStruct); return 0; }
void cmdq_core_dump_clock_gating(void) { uint32_t value[3] = { 0 }; value[0] = CMDQ_REG_GET32(MMSYS_CONFIG_BASE + 0x100); value[1] = CMDQ_REG_GET32(MMSYS_CONFIG_BASE + 0x110); /* value[2] = CMDQ_REG_GET32(MMSYS_CONFIG_BASE + 0x890); */ CMDQ_ERR("MMSYS_CG_CON0(deprecated): 0x%08x, MMSYS_CG_CON1: 0x%08x\n", value[0], value[1]); /* CMDQ_ERR("MMSYS_DUMMY_REG: 0x%08x\n", value[2]); */ #ifdef CONFIG_MTK_LEGACY #ifndef CONFIG_MTK_FPGA CMDQ_ERR("ISPSys clock state %d\n", subsys_is_on(SYS_IMG)); CMDQ_ERR("DisSys clock state %d\n", subsys_is_on(SYS_DIS)); /* CMDQ_ERR("VDESys clock state %d\n", subsys_is_on(SYS_VDE)); */ #endif #endif /* defined(CONFIG_MTK_LEGACY) */ }
int32_t cmdq_sec_handle_session_reply_unlocked(const iwcCmdqMessage_t *pIwc, const int32_t iwcCommand, TaskStruct *pTask, void *data) { int32_t status; int32_t iwcRsp; cmdqSecCancelTaskResultStruct *pCancelResult = NULL; /* get secure task execution result */ iwcRsp = (pIwc)->rsp; status = iwcRsp; if (CMD_CMDQ_TL_CANCEL_TASK == iwcCommand) { pCancelResult = (cmdqSecCancelTaskResultStruct *)data; if (pCancelResult) { pCancelResult->throwAEE = pIwc->cancelTask.throwAEE; pCancelResult->hasReset = pIwc->cancelTask.hasReset; pCancelResult->irqFlag = pIwc->cancelTask.irqFlag; pCancelResult->errInstr[0]= pIwc->cancelTask.errInstr[0]; /* argB */ pCancelResult->errInstr[1] = pIwc->cancelTask.errInstr[1]; /* argA */ pCancelResult->pc = pIwc->cancelTask.pc; } CMDQ_ERR("CANCEL_TASK: pTask %p, INST:(0x%08x, 0x%08x), throwAEE:%d, hasReset:%d, pc:0x%08x\n", pTask, pIwc->cancelTask.errInstr[1], pIwc->cancelTask.errInstr[0], pIwc->cancelTask.throwAEE, pIwc->cancelTask.hasReset, pIwc->cancelTask.pc); } else if (CMD_CMDQ_TL_PATH_RES_ALLOCATE == iwcCommand || CMD_CMDQ_TL_PATH_RES_RELEASE == iwcCommand ) { /* do nothing*/ } else { /* note we etnry SWd to config GCE, and wait execution result in NWd */ /* udpate taskState only if config failed*/ if (pTask && 0 > iwcRsp) { pTask->taskState= TASK_STATE_ERROR; } } /* log print */ if (0 < status) { CMDQ_ERR("SEC_SEND: status[%d], cmdId[%d], iwcRsp[%d]\n", status, iwcCommand, iwcRsp); } else { CMDQ_MSG("SEC_SEND: status[%d], cmdId[%d], iwcRsp[%d]\n", status, iwcCommand, iwcRsp); } return status; }
/** * Free allocated backup slot. DO NOT free them before corresponding * task finishes. Becareful on AsyncFlush use cases. * */ int32_t cmdqBackupFreeSlot(cmdqBackupSlotHandle hBackupSlot) { #ifdef CMDQ_GPR_SUPPORT return cmdqCoreFreeWriteAddress(hBackupSlot); #else CMDQ_ERR("func:%s failed since CMDQ dosen't support GPR\n", __func__); return -EFAULT; #endif /* CMDQ_GPR_SUPPORT */ }
int32_t cmdq_sec_exec_task_async_unlocked(TaskStruct *pTask, int32_t thread) { #ifdef CMDQ_SECURE_PATH_SUPPORT int32_t status = 0; status = cmdq_sec_submit_to_secure_world_async_unlocked( CMD_CMDQ_TL_SUBMIT_TASK, pTask, thread, NULL, NULL); if (0 > status) { CMDQ_ERR("%s[%d]\n", __func__, status); } return status; #else CMDQ_ERR("secure path not support\n"); return -EFAULT; #endif }
int32_t cmdqRecSecureEnablePortSecurity(cmdqRecHandle handle, const uint64_t engineFlag) { #ifdef CMDQ_SECURE_PATH_SUPPORT handle->secData.enginesNeedPortSecurity |= engineFlag; return 0; #else CMDQ_ERR("%s failed since not support secure path\n", __func__); return -EFAULT; #endif }
int32_t cmdq_sec_close_session_impl(struct mc_session_handle *pSessionHandle) { int32_t status = 0; enum mc_result mcRet = mc_close_session(pSessionHandle); if (MC_DRV_OK != mcRet) { CMDQ_ERR("_SESSION_CLOSE: err[0x%x]", mcRet); status = -1; } return status; }
int32_t cmdqRecEnablePrefetch(cmdqRecHandle handle) { if (cmdq_core_should_enable_prefetch(handle->scenario)) { /* enable prefetch */ CMDQ_VERBOSE("REC: enable prefetch\n"); cmdqRecMark(handle); return true; } CMDQ_ERR("not allow enable prefetch, scenario: %d\n", handle->scenario); return -EFAULT; }
int32_t cmdqRecReadToDataRegister(cmdqRecHandle handle, uint32_t hwRegAddr, CMDQ_DATA_REGISTER_ENUM dstDataReg) { #ifdef CMDQ_GPR_SUPPORT /* read from hwRegAddr(argA) to dstDataReg(argB) */ return cmdq_append_command(handle, CMDQ_CODE_READ, hwRegAddr, dstDataReg, 0, 1); #else CMDQ_ERR("func:%s failed since CMDQ dosen't support GPR\n", __func__); return -EFAULT; #endif }
/** * Read 32-bit register backup slot by index * */ int32_t cmdqBackupReadSlot(cmdqBackupSlotHandle hBackupSlot, uint32_t slotIndex, uint32_t *value) { #ifdef CMDQ_GPR_SUPPORT if (NULL == value) { return -EINVAL; } if (0 == hBackupSlot){ CMDQ_ERR("%s, hBackupSlot is NULL\n", __func__); return -EINVAL; } *value = cmdqCoreReadWriteAddress(hBackupSlot + slotIndex * sizeof(uint32_t)); return 0; #else CMDQ_ERR("func:%s failed since CMDQ dosen't support GPR\n", __func__); return -EFAULT; #endif /* CMDQ_GPR_SUPPORT */ }
int32_t cmdq_sec_cancel_error_task_unlocked(TaskStruct *pTask, int32_t thread, cmdqSecCancelTaskResultStruct *pResult) { #ifdef CMDQ_SECURE_PATH_SUPPORT int32_t status = 0; if ((NULL == pTask) || (false == cmdq_core_is_a_secure_thread(thread)) || (NULL == pResult)) { CMDQ_ERR("%s invalid param, pTask:%p, thread:%d, pResult:%p\n", __func__, pTask, thread, pResult); return -EFAULT; } status = cmdq_sec_submit_to_secure_world_async_unlocked(CMD_CMDQ_TL_CANCEL_TASK, pTask, thread, NULL ,(void*)pResult); return status; #else CMDQ_ERR("secure path not support\n"); return -EFAULT; #endif }