/* * ======== Hwi_Instance_init ======== */ Int Hwi_Instance_init(Hwi_Object *hwi, Int intNum, Hwi_FuncPtr fxn, const Hwi_Params *params, Error_Block *eb) { Int status; if (intNum >= Hwi_NUM_INTERRUPTS) { Error_raise(eb, Hwi_E_badIntNum, intNum, 0); return (1); } if (Hwi_module->dispatchTable[intNum] != ti_sysbios_family_arm_gic_Hwi_Module_State_nonPluggedHwi()) { Error_raise(eb, Hwi_E_alreadyDefined, intNum, 0); return (1); } Hwi_module->dispatchTable[intNum] = hwi; Hwi_reconfig(hwi, fxn, params); #ifndef ti_sysbios_hal_Hwi_DISABLE_ALL_HOOKS if (Hwi_hooks.length > 0) { /* Allocate environment space for each hook instance. */ hwi->hookEnv = Memory_calloc(Hwi_Object_heap(), Hwi_hooks.length * sizeof(Ptr), 0, eb); if (hwi->hookEnv == NULL) { return (1); } } #endif hwi->irp = 0; status = Hwi_postInit(hwi, eb); if (Error_check(eb)) { return (2 + status); } return (0); }
/* * ======== Ipc_attach ======== */ Int Ipc_attach(UInt16 remoteProcId) { Int i; Ptr sharedAddr; SizeT memReq; volatile ti_sdo_ipc_Ipc_Reserved *slave; ti_sdo_ipc_Ipc_ProcEntry *ipc; Error_Block eb; SharedRegion_Entry entry; SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc(); Bool cacheEnabled = SharedRegion_isCacheEnabled(0); UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId); Int status; UInt hwiKey; /* Assert remoteProcId is in our cluster and isn't our own */ Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster, ti_sdo_utils_MultiProc_A_invalidMultiProcId); Assert_isTrue(remoteProcId != MultiProc_self(), ti_sdo_ipc_Ipc_A_invArgument); /* Check whether Ipc_start has been called. If not, fail. */ if (Ipc_module->ipcSharedAddr == NULL) { return (Ipc_E_FAIL); } /* for checking and incrementing attached below */ hwiKey = Hwi_disable(); /* Make sure its not already attached */ if (Ipc_module->procEntry[clusterId].attached) { Ipc_module->procEntry[clusterId].attached++; /* restore interrupts and return */ Hwi_restore(hwiKey); return (Ipc_S_ALREADYSETUP); } /* restore interrupts */ Hwi_restore(hwiKey); /* get region 0 information */ SharedRegion_getEntry(0, &entry); /* Make sure we've attached to owner of SR0 if we're not owner */ if ((MultiProc_self() != entry.ownerProcId) && (remoteProcId != entry.ownerProcId) && !(Ipc_module->procEntry[ti_sdo_utils_MultiProc_getClusterId( entry.ownerProcId)].attached)) { return (Ipc_E_FAIL); } /* Init error block */ Error_init(&eb); /* determine the slave's slot */ slave = Ipc_getSlaveAddr(remoteProcId, Ipc_module->ipcSharedAddr); if (cacheEnabled) { Cache_inv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); } /* Synchronize the processors. */ status = Ipc_procSyncStart(remoteProcId, Ipc_module->ipcSharedAddr); if (status < 0) { return (status); } /* must be called before SharedRegion_attach */ status = ti_sdo_ipc_GateMP_attach(remoteProcId, Ipc_module->gateMPSharedAddr); if (status < 0) { return (status); } /* retrieves the SharedRegion Heap handles */ status = ti_sdo_ipc_SharedRegion_attach(remoteProcId); if (status < 0) { return (status); } /* get the attach parameters associated with remoteProcId */ ipc = &(Ipc_module->procEntry[clusterId]); /* attach Notify if not yet attached and specified to set internal setup */ if (!(Notify_intLineRegistered(remoteProcId, 0)) && (ipc->entry.setupNotify)) { /* call Notify_attach */ memReq = Notify_sharedMemReq(remoteProcId, Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { /* * calloc required here due to race condition. Its possible * that the slave, who creates the instance, tries a sendEvent * before the master has created its instance because the * state of memory was enabled from a previous run. */ sharedAddr = Memory_calloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* if cache enabled, wbInv the calloc above */ if (cacheEnabled) { Cache_wbInv(sharedAddr, memReq, Cache_Type_ALL, TRUE); } /* set the notify SRPtr */ slave->notifySRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the notify SRPtr */ sharedAddr = SharedRegion_getPtr(slave->notifySRPtr); } } else { sharedAddr = NULL; slave->notifySRPtr = 0; } /* call attach to remote processor */ status = Notify_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* Must come after GateMP_start because depends on default GateMP */ if (!(ti_sdo_utils_NameServer_isRegistered(remoteProcId)) && (ipc->entry.setupNotify)) { memReq = ti_sdo_utils_NameServer_SetupProxy_sharedMemReq( Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { sharedAddr = Memory_alloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* set the NSRN SRPtr */ slave->nsrnSRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the NSRN SRPtr */ sharedAddr = SharedRegion_getPtr(slave->nsrnSRPtr); } } else { sharedAddr = NULL; slave->nsrnSRPtr = 0; } /* call attach to remote processor */ status = ti_sdo_utils_NameServer_SetupProxy_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* Must come after GateMP_start because depends on default GateMP */ if (!(ti_sdo_ipc_MessageQ_SetupTransportProxy_isRegistered(remoteProcId)) && (ipc->entry.setupMessageQ)) { memReq = ti_sdo_ipc_MessageQ_SetupTransportProxy_sharedMemReq( Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { sharedAddr = Memory_alloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* set the transport SRPtr */ slave->transportSRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the transport SRPtr */ sharedAddr = SharedRegion_getPtr(slave->transportSRPtr); } } else { sharedAddr = NULL; slave->transportSRPtr = 0; } /* call attach to remote processor */ status = ti_sdo_ipc_MessageQ_SetupTransportProxy_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* writeback invalidate slave's shared memory if cache enabled */ if (cacheEnabled) { if (MultiProc_self() < remoteProcId) { Cache_wbInv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); } } /* Call user attach fxns */ for (i = 0; i < ti_sdo_ipc_Ipc_numUserFxns; i++) { if (ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach) { status = ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach( ti_sdo_ipc_Ipc_userFxns[i].arg, remoteProcId); if (status < 0) { return (status); } } } /* Finish the processor synchronization */ status = ti_sdo_ipc_Ipc_procSyncFinish(remoteProcId, Ipc_module->ipcSharedAddr); if (status < 0) { return (status); } /* for atomically incrementing attached */ hwiKey = Hwi_disable(); /* now attached to remote processor */ Ipc_module->procEntry[clusterId].attached++; /* restore interrupts */ Hwi_restore(hwiKey); return (status); }
/*! * @brief Allocate memory for remote processor application * * This function is called by remote processor application to * Allocate a buffer. * * @param dataSize Size of the marshalled data packet * @param data Marshalled data packet * * @sa SysLinkMemUtils_free */ Int32 SysLinkMemUtils_alloc (UInt32 dataSize, UInt32 * data) { AllocArgs * args = (AllocArgs *)data; Int i; MemAllocBlock * memBlock = NULL; Ptr allocedPtr = NULL; UInt32 retAddr = 0; UInt32 size = 0; Int32 status = PROCMGR_SUCCESS; SyslinkMemUtils_MpuAddrToMap mpuAddrList [1]; GT_2trace (curTrace, GT_ENTER, "SysLinkMemUtils_alloc", dataSize, data); memBlock = Memory_calloc (NULL, sizeof (MemAllocBlock) * args->numBuffers, 0); if (!memBlock) { status = PROCMGR_E_MEMORY; #if !defined(SYSLINK_BUILD_OPTIMIZE) GT_setFailureReason (curTrace, GT_4CLASS, (Char *)__func__, status, "Error allocating memBlock"); #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */ } else { for (i = 0; i < args->numBuffers; i++) { memBlock [i].pixelFormat = args->params [i].pixelFormat; memBlock [i].dim.area.width = args->params [i].width; memBlock [i].dim.area.height = args->params [i].height; memBlock [i].dim.len = args->params [i].length; } } if (status == PROCMGR_SUCCESS) { /* Allocation */ allocedPtr = MemMgr_Alloc (memBlock, args->numBuffers); if (!allocedPtr) { status = PROCMGR_E_MEMORY; #if !defined(SYSLINK_BUILD_OPTIMIZE) GT_setFailureReason (curTrace, GT_4CLASS, (Char *)__func__, status, "Error MemMgr buffer"); #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */ } } if (status == PROCMGR_SUCCESS) { for (i = 0; i < args->numBuffers; i++) { args->params [i].stride = memBlock [i].stride; args->params [i].ptr = memBlock [i].ptr; } size = _SysLinkMemUtils_bufferSize (memBlock, args->numBuffers); mpuAddrList [0].mpuAddr = (UInt32)allocedPtr; mpuAddrList [0].size = size; status = SysLinkMemUtils_map (mpuAddrList, 1, &retAddr, ProcMgr_MapType_Tiler, PROC_SYSM3); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (status != PROCMGR_SUCCESS) { GT_setFailureReason (curTrace, GT_4CLASS, (Char *)__func__, status, "Error in SysLinkMemUtils_map"); } #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */ } if (status == PROCMGR_SUCCESS) { status = _SysLinkMemUtils_insertMapElement ((Ptr)retAddr, allocedPtr, size); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (status != PROCMGR_SUCCESS) { GT_setFailureReason (curTrace, GT_4CLASS, (Char *)__func__, status, "Error in SysLinkMemUtils_InsertMapElement"); } #endif /* if !defined(SYSLINK_BUILD_OPTIMIZE) */ } if (status != PROCMGR_SUCCESS) { if (retAddr) { SysLinkMemUtils_unmap (retAddr, PROC_SYSM3); } if (allocedPtr) { MemMgr_Free (allocedPtr); } } if (memBlock) Memory_free (NULL, memBlock, 1); GT_1trace (curTrace, GT_LEAVE, "SysLinkMemUtils_alloc", retAddr); return retAddr; }
/* * ======== HeapMultiBuf_Instance_init ======== * Initializes a dynamically created HeapMultiBuf. Dynamic initialization * requires different steps than static initialization because the buffers * are provided by the user. With static allocation, buffers with matching * properties can be merged before allocation. For dynamic creation, the * buffers have already been allocated, so to merge them we must create a * heapBuf from one buffer then add the other buffer to it. */ Int HeapMultiBuf_Instance_init(HeapMultiBuf_Object *obj, const HeapMultiBuf_Params *params, Error_Block *eb) { HeapBuf_Handle heapBuf, heapBuf1, heapBuf2; Int i, j, k; HeapMultiBuf_AddrPair addrPair; Char *endAddr; /* Start with one HeapBuf per buffer, then merge */ obj->numBufs = params->numBufs; obj->numHeapBufs = params->numBufs; obj->blockBorrow = params->blockBorrow; /* * The bufsByAddr array stores the pairing between the ending address of * a buffer and the HeapBuf that manages it, so there is one entry per * provided buffer. */ obj->bufsByAddr = Memory_alloc(NULL, params->numBufs * sizeof(HeapMultiBuf_AddrPair), 0, eb); if (Error_check(eb)) { return (1); /* Failed at 1 */ } /* * To simplify initialization, bufsBySize is allocated to the largest * potential size, meaning one entry per buffer. If any of the buffers * are merged, there will be wasted spots in the bufsBySize array, but * this greatly simplifies the initialization. * * This array is calloc'd so that if one of the HeapBuf creates fails, * we can know how many HeapBufs to delete in finalize by checking for * NULL. */ obj->bufsBySize = Memory_calloc(NULL, obj->numBufs * sizeof(HeapBuf_Object*), 0, eb); if (Error_check(eb)) { return (2); /* Failed at 2 */ } /* Create all of the HeapBufs */ for (i = 0; i < obj->numBufs; i++) { heapBuf = HeapBuf_create(&(params->bufParams[i]), eb); if (Error_check(eb)) { return (3); /* Failed at 3 */ } /* Add the new heapBuf to the bufsBySize array */ obj->bufsBySize[i] = heapBuf; /* Add the new heapBuf to bufsByAddr */ addrPair.lastAddr = HeapBuf_getEndAddr(heapBuf); addrPair.heapBuf = heapBuf; /* Copy by value */ obj->bufsByAddr[i] = addrPair; } /* * Sort the bufConfigs by size, then by align. This simplifies the search * for matching bufConfigs. */ qsort(obj->bufsBySize, obj->numBufs, sizeof(HeapBuf_Handle), HeapMultiBuf_sizeAlignCompare); /* Find any HeapBufs which need to be merged. */ for (i = 0; i < obj->numHeapBufs; i++) { heapBuf1 = obj->bufsBySize[i]; for (j = i + 1; j < obj->numHeapBufs; j++) { heapBuf2 = obj->bufsBySize[j]; /* If the blockSize and align are equal, merge them. */ if ((HeapBuf_getBlockSize(heapBuf1) == HeapBuf_getBlockSize(heapBuf2)) && (HeapBuf_getAlign(heapBuf1) == HeapBuf_getAlign(heapBuf2))) { /* Merge heapBuf2 into heapBuf1 */ /* Update the bufsByAddr array first. */ endAddr = HeapBuf_getEndAddr(heapBuf2); for (k = 0; k < obj->numBufs; k++) { if (obj->bufsByAddr[k].lastAddr == endAddr) { obj->bufsByAddr[k].heapBuf = heapBuf1; break; } } /* Give heapBuf2's buffer to heapBuf1. */ HeapBuf_mergeHeapBufs(heapBuf1, heapBuf2); /* * Move heapBuf2 to end of array. heapBuf2 is no longer used, * but is stored at the end of the array so that it can be * deleted when the HeapMultiBuf is deleted. */ HeapMultiBuf_moveToEnd(obj->bufsBySize, obj->numHeapBufs, j); /* Shorten the perceived array length. */ obj->numHeapBufs--; /* * Since the array has been shifted left, incrementing j would * skip over the next HeapBuf in the array. */ j--; } else { /* If this one didn't match, then none do, so break. */ break; } } } /* * Once all of the heapBufs have been created, sort the bufsByAddr * array. The bufConfigs param was sorted, so bufsBySize does not * need to be sorted here. */ qsort(obj->bufsByAddr, obj->numBufs, sizeof(struct HeapMultiBuf_AddrPair), HeapMultiBuf_addrPairCompare); return (0); /* Success */ }
/*! * @brief Creates an instance of Mutex object. * * @param semType Type of semaphore. This parameter is a mask of semaphore * type and interruptability type. * * @sa OsalSemaphore_delete */ OsalSemaphore_Handle OsalSemaphore_create(UInt32 semType, UInt32 semValue) { Int status = OSALSEMAPHORE_SUCCESS; OsalSemaphore_Object * semObj = NULL; int osStatus = 0; GT_1trace (curTrace, GT_ENTER, "OsalSemaphore_create", semType); /* Check for semaphore type (binary/counting) */ GT_assert (curTrace, ((OSALSEMAPHORE_TYPE_VALUE(semType)) < OsalSemaphore_Type_EndValue)); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (OSALSEMAPHORE_TYPE_VALUE(semType) >= OsalSemaphore_Type_EndValue) { GT_setFailureReason (curTrace, GT_4CLASS, "OsalSemaphore_create", (unsigned int)OSALSEMAPHORE_E_INVALIDARG, "Invalid semaphore type (OsalSemaphore_Type) provided"); } else { #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ semObj = Memory_calloc (NULL, sizeof (OsalSemaphore_Object), 0); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (semObj == NULL) { GT_setFailureReason (curTrace, GT_4CLASS, "OsalSemaphore_create", (unsigned int)OSALSEMAPHORE_E_MEMORY, "Failed to allocate memory for semaphore object."); } else { #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ semObj->semType = semType; semObj->value = semValue; #if !defined(SYSLINK_BUILD_OPTIMIZE) if ((OSALSEMAPHORE_TYPE_VALUE (semObj->semType) == OsalSemaphore_Type_Binary) && (semValue > 1)){ /*! @retVal NULL Invalid semaphore value. */ status = OSALSEMAPHORE_E_INVALIDARG; GT_setFailureReason (curTrace, GT_4CLASS, "OsalSemaphore_create", status, "Invalid semaphore value"); } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ osStatus = sem_init(&(semObj->lock), 0, semValue); #if !defined(SYSLINK_BUILD_OPTIMIZE) if (osStatus < 0) { /*! @retVal NULL Failed to initialize semaphore. */ status = OSALSEMAPHORE_E_RESOURCE; GT_setFailureReason (curTrace, GT_4CLASS, "OsalSemaphore_create", status, "Failed to initialize semaphore"); } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ #if !defined(SYSLINK_BUILD_OPTIMIZE) } } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) */ GT_1trace (curTrace, GT_LEAVE, "OsalSemaphore_create", semObj); return (OsalSemaphore_Handle) semObj; }
/* * ======== Task_Instance_init ======== */ Int Task_Instance_init(Task_Object *tsk, Task_FuncPtr fxn, const Task_Params *params, Error_Block *eb) { Int align; Int status; SizeT stackSize; Assert_isTrue((BIOS_taskEnabled == TRUE), Task_A_taskDisabled); Assert_isTrue(((BIOS_getThreadType() != BIOS_ThreadType_Hwi) && (BIOS_getThreadType() != BIOS_ThreadType_Swi)), Task_A_badThreadType); Assert_isTrue((((params->priority == -1) || (params->priority > 0)) && (params->priority < (Int)Task_numPriorities)), Task_A_badPriority); tsk->priority = params->priority; /* deal with undefined Task_Params defaults */ if (params->stackHeap == NULL) { tsk->stackHeap = Task_defaultStackHeap; } else { tsk->stackHeap = params->stackHeap; } if (params->stackSize == 0) { stackSize = Task_defaultStackSize; } else { stackSize = params->stackSize; } align = Task_SupportProxy_getStackAlignment(); if (params->stack != NULL) { if (align != 0) { UArg stackTemp; /* align low address to stackAlignment */ stackTemp = (UArg)params->stack; stackTemp += align - 1; stackTemp &= -align; tsk->stack = (Ptr)xdc_uargToPtr(stackTemp); /* subtract what we removed from the low address from stackSize */ tsk->stackSize = stackSize - (stackTemp - (UArg)params->stack); /* lower the high address as necessary */ tsk->stackSize &= -align; } else { tsk->stack = params->stack; tsk->stackSize = stackSize; } /* tell Task_delete that stack was provided */ tsk->stackHeap = (xdc_runtime_IHeap_Handle)(-1); } else { if (BIOS_runtimeCreatesEnabled) { if (align != 0) { /* * round stackSize up to the nearest multiple of the alignment. */ tsk->stackSize = (stackSize + align - 1) & -align; } else { tsk->stackSize = stackSize; } tsk->stack = Memory_alloc(tsk->stackHeap, tsk->stackSize, align, eb); if (tsk->stack == NULL) { return (1); } } } tsk->fxn = fxn; tsk->arg0 = params->arg0; tsk->arg1 = params->arg1; tsk->env = params->env; tsk->vitalTaskFlag = params->vitalTaskFlag; if (tsk->vitalTaskFlag == TRUE) { Task_module->vitalTasks += 1; } #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS if (Task_hooks.length > 0) { tsk->hookEnv = Memory_calloc(Task_Object_heap(), Task_hooks.length * sizeof (Ptr), 0, eb); if (tsk->hookEnv == NULL) { return (2); } } #endif status = Task_postInit(tsk, eb); if (Error_check(eb)) { return (3 + status); } return (0); /* no failure states */ }
/*! * @brief Function to create an instance of this PwrMgr. * * @param procId Processor ID addressed by this PwrMgr instance. * @param params Configuration parameters. * * @sa DM8168DUCATIPWR_delete */ DM8168DUCATIPWR_Handle DM8168DUCATIPWR_create ( UInt16 procId, const DM8168DUCATIPWR_Params * params) { Int status = PWRMGR_SUCCESS; PwrMgr_Object * handle = NULL; IArg key; GT_2trace (curTrace, GT_ENTER, "DM8168DUCATIPWR_create", procId, params); GT_assert (curTrace, IS_VALID_PROCID (procId)); GT_assert (curTrace, (params != NULL)); GT_assert (curTrace, (DM8168DUCATIPWR_state.refCount != 0)); #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) if (DM8168DUCATIPWR_state.refCount == 0) { GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", DM8168DUCATIPWR_E_INVALIDSTATE, "Module was not initialized!"); } else if (!IS_VALID_PROCID (procId)) { /* Not setting status here since this function does not return status.*/ GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", PWRMGR_E_INVALIDARG, "Invalid procId specified"); } else if (params == NULL) { GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", PWRMGR_E_INVALIDARG, "params passed is null!"); } else { #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) */ /* Enter critical section protection. */ key = IGateProvider_enter (DM8168DUCATIPWR_state.gateHandle); #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) /* Check if the PwrMgr already exists for specified procId. */ if (DM8168DUCATIPWR_state.pwrHandles [procId] != NULL) { status = PWRMGR_E_ALREADYEXIST; GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", status, "PwrMgr already exists for specified procId!"); } else { #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) */ /* Allocate memory for the handle */ handle = (PwrMgr_Object *) Memory_calloc (NULL, sizeof (PwrMgr_Object), 0, NULL); if (handle == NULL) { GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", PWRMGR_E_MEMORY, "Memory allocation failed for handle!"); } else { /* Populate the handle fields */ handle->pwrFxnTable.attach = &DM8168DUCATIPWR_attach; handle->pwrFxnTable.detach = &DM8168DUCATIPWR_detach; handle->pwrFxnTable.on = &DM8168DUCATIPWR_on; handle->pwrFxnTable.off = &DM8168DUCATIPWR_off; /* TBD: Other functions */ /* Allocate memory for the DM8168DUCATIPWR handle */ handle->object = Memory_calloc (NULL, sizeof (DM8168DUCATIPWR_Object), 0, NULL); if (handle == NULL) { status = PWRMGR_E_MEMORY; GT_setFailureReason (curTrace, GT_4CLASS, "DM8168DUCATIPWR_create", status, "Memory allocation failed for handle!"); } else { #if defined (SYSLINK_BUILDOS_LINUX) ((DM8168DUCATIPWR_Object *)handle->object)->clockHandle = (ClockOps_Handle) LinuxClock_create(); #endif/* #if defined (SYSLINK_BUILDOS_LINUX) */ #if defined (SYSLINK_BUILD_RTOS) ((DM8168DUCATIPWR_Object *)handle->object)->clockHandle = (ClockOps_Handle) DM8168CLOCK_create(); #endif/* #if defined (SYSLINK_BUILD_RTOS) */ handle->procId = procId; DM8168DUCATIPWR_state.pwrHandles [procId] = (DM8168DUCATIPWR_Handle) handle; } } #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) */ /* Leave critical section protection. */ IGateProvider_leave (DM8168DUCATIPWR_state.gateHandle, key); #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) } #endif /* #if !defined(SYSLINK_BUILD_OPTIMIZE) && defined (SYSLINK_BUILD_HLOS) */ if (status < 0) { if (handle != NULL) { if (handle->object != NULL) { Memory_free (NULL, handle->object, sizeof (DM8168DUCATIPWR_Object)); } Memory_free (NULL, handle, sizeof (PwrMgr_Object)); } /*! @retval NULL Function failed */ handle = NULL; } GT_1trace (curTrace, GT_LEAVE, "DM8168DUCATIPWR_create", handle); /*! @retval Valid-Handle Operation successful */ return (DM8168DUCATIPWR_Handle) handle; }