/** * ============================================================================ * @n@b Osal_cppiMalloc * * @b brief * @n This API allocates a memory block of a given * size specified by input parameter 'num_bytes'. * * This API should allocate memory from shared memory if the test applications * are to be run on multiple cores. * * @param[in] num_bytes * Number of bytes to be allocated. * * @return * Allocated block address * ============================================================================= */ Void* Osal_cppiMalloc (UInt32 num_bytes) { Error_Block errorBlock; Void* dataPtr; /* Increment the allocation counter. */ fftcCppiMallocCounter++; /* Allocate a buffer from the default HeapMemMp */ if (SharedRegion_getHeap(0) != NULL) { dataPtr = Memory_alloc ((xdc_runtime_IHeap_Handle) SharedRegion_getHeap(0), num_bytes, 0, &errorBlock); } else { #ifdef FFTC_TEST_DEBUG Fftc_osalLog ("CppiAlloc Failed for size: %d \n", num_bytes); #endif return NULL; } #ifdef FFTC_TEST_DEBUG Fftc_osalLog ("CppiAlloc DataP: %p size: %d \n", dataPtr, num_bytes); #endif return dataPtr; }
Int SystemCfg_deleteResources(SystemCfg_Object *obj) { IHeap_Handle heapH; Int status = 0; Log_print1(Diags_ENTRY | Diags_INFO, "--> "FXNN": (obj=0x%x)",(IArg)obj); /* delete opencl buffer heap */ // HeapBufMP_delete(&obj->openclHeapH); /* unregister rcm message heap with MessageQ */ MessageQ_unregisterHeap(SystemCfg_RcmMsgHeapId_CompDev); /* delete rcm message heap instance */ HeapBuf_delete(&obj->rcmHeapH); /* free the rcm message heap storage */ heapH = (IHeap_Handle)SharedRegion_getHeap(0); Memory_free(heapH, obj->rcmHeapBufBase, obj->rcmHeapBufSize); obj->rcmHeapBufBase = NULL; obj->rcmHeapBufSize = 0; Log_print1(Diags_EXIT, "<-- "FXNN": %d", (IArg)status); return(status); }
/** * @b Description * @n * The function is used to allocate a memory block of the specified size. * from shared memory. * * @param[in] num_bytes * Number of bytes to be allocated. * * @retval * Allocated block address */ Ptr Osal_cppiMalloc (UInt32 num_bytes) { Error_Block errorBlock; /* Allocate a buffer from the default HeapMemMp */ return Memory_alloc ((xdc_runtime_IHeap_Handle) SharedRegion_getHeap(0), num_bytes, 0, &errorBlock); }
Int32 Vsys_allocBuf(UInt32 srRegId, UInt32 bufSize, UInt32 bufAlign, Vsys_AllocBufInfo *bufInfo) { IHeap_Handle heapHndl; heapHndl = SharedRegion_getHeap(srRegId); OSA_assert(heapHndl != NULL); bufInfo->virtAddr = NULL; bufInfo->physAddr = NULL; bufInfo->srPtr = 0; bufInfo->virtAddr = Memory_alloc(heapHndl, bufSize, bufAlign, NULL); if(bufInfo->virtAddr==NULL) return -1; bufInfo->physAddr = Memory_translate (bufInfo->virtAddr, Memory_XltFlags_Virt2Phys); if(bufInfo->physAddr==NULL) return -1; bufInfo->srPtr = SharedRegion_getSRPtr(bufInfo->virtAddr,srRegId); return 0; }
UInt32 Utils_memGetSR0HeapFreeSpace(void) { UInt32 size; Memory_Stats stats; Memory_getStats(SharedRegion_getHeap(0), &stats); size = stats.totalFreeSize; return ((UInt32) (size)); }
Int32 System_ipcListMPFreeListElemMem(UInt32 regionId, UInt32 shAddr, UInt32 size) { IHeap_Handle heapHndl; heapHndl = SharedRegion_getHeap(regionId); UTILS_assert(heapHndl!=NULL); Memory_free(heapHndl, (Ptr)shAddr, size); return OSA_SOK; }
Void Audio_freeSharedRegionBuf (Void *buf, Int32 bufSize) { IHeap_Handle heap = NULL; heap = SharedRegion_getHeap(SR_FRAME_BUFFERS_ID); if (heap) { Memory_free(heap, buf, bufSize); gFreedSRBufs++; } }
/* * ======== tsk0 ======== */ Void tsk0(UArg arg0, UArg arg1) { Int status; MessageQ_Msg msg; System_printf("tsk0 starting\n"); /* Register this heap with MessageQ */ MessageQ_registerHeap((IHeap_Handle)SharedRegion_getHeap(0), HEAP_ID); /* Open the 'next' remote message queue. Spin until it is ready. */ do { status = MessageQ_open(nextQueueName, &nextQueueId); } while (status < 0); if (selfId == 0) { msg = MessageQ_alloc(HEAP_ID, MSGSIZE); if (msg == NULL) { System_abort("MessageQ_alloc failed\n"); } /* Kick off the loop */ status = MessageQ_put(nextQueueId, msg); if (status < 0) { System_abort("MessageQ_put failed\n"); } } for (numReceived = 0; numReceived < NUMLOOPS; numReceived++) { /* Get a message */ status = MessageQ_get(messageQ, &msg, MessageQ_FOREVER); if (status < 0) { System_abort("MessageQ_get failed\n"); } if (selfId == 0) { rawtimestamps[numReceived] = Timestamp_get32(); if (numReceived == NUMLOOPS - 1) { printStatistics(); break; } } status = MessageQ_put(nextQueueId, msg); if (status < 0) { System_abort("MessageQ_put failed\n"); } } System_exit(0); }
Void *Audio_allocateSharedRegionBuf (Int32 bufSize) { IHeap_Handle heap = NULL; Void *tmp = NULL; heap = SharedRegion_getHeap(SR_FRAME_BUFFERS_ID); tmp = Memory_alloc (heap, bufSize, 128, NULL); if (tmp) { gAllocedSRBufs++; } return tmp; }
Int32 Vsys_freeBuf(UInt32 srRegId, UInt8 *virtAddr, UInt32 bufSize) { IHeap_Handle heapHndl; heapHndl = SharedRegion_getHeap(srRegId); OSA_assert(heapHndl != NULL); OSA_assert(virtAddr != NULL); Memory_free(heapHndl, virtAddr, bufSize); return 0; }
Int SystemCfg_createResources(SystemCfg_Object *obj) { Error_Block eb; Int status = 0; HeapBuf_Params heapBufP; IHeap_Handle heapH; Log_print1(Diags_ENTRY | Diags_INFO, "--> "FXNN": (obj=0x%x)",(IArg)obj); Error_init(&eb); /* allocate heap backing store from SR_0 heap */ heapH = (IHeap_Handle)SharedRegion_getHeap(0); obj->rcmHeapBufSize = 5 * 128; obj->rcmHeapBufBase = Memory_alloc(heapH, obj->rcmHeapBufSize, 128, &eb); if (Error_check(&eb)) { Log_error1(FXNN": out of memory: size=%u", obj->rcmHeapBufSize); status = -1; goto leave; } /* create heap for messages */ HeapBuf_Params_init(&heapBufP); heapBufP.blockSize = 128; // header = 52 B, payload = 76 B heapBufP.numBlocks = 5; // 5 messages, total heap storage = 640 B heapBufP.align = 128; // align on cache line boundary heapBufP.buf = obj->rcmHeapBufBase; // heap storage base address heapBufP.bufSize = obj->rcmHeapBufSize; // heap storage size obj->rcmHeapH = HeapBuf_create(&heapBufP, &eb); if (Error_check(&eb)) { Log_error0(FXNN": HeapBuf_create() failed"); status = -1; goto leave; } /* register this heap with MessageQ */ Log_print2(Diags_INFO, FXNN": MessageQ_registerHeap: (rcmHeapH: 0x%x, heapId: %d)", (IArg)(obj->rcmHeapH), (IArg)SystemCfg_RcmMsgHeapId_CompDev); MessageQ_registerHeap((Ptr)(obj->rcmHeapH), SystemCfg_RcmMsgHeapId_CompDev); leave: Log_print1(Diags_EXIT, "<-- "FXNN": %d", (IArg)status); return(status); }
/** * ============================================================================ * @n@b Osal_cppiFree * * @b brief * @n This API frees and restores a given memory location * pointer 'dataPtr' of size 'num_bytes' to its * original heap location. Frees up memory allocated using * @a Osal_cppiMalloc () * * @param[in] dataPtr * Pointer to the memory block to be cleaned up. * * @param[in] num_bytes * Size of the memory block to be cleaned up. * * @return * Not Applicable * ============================================================================= */ Void Osal_cppiFree (Void* dataPtr, UInt32 num_bytes) { /* Increment the free counter. */ fftcCppiFreeCounter++; /* Free up the memory */ if (dataPtr) { #ifdef FFTC_TEST_DEBUG Fftc_osalLog ("CppiFree: DataP: %p size: %d\n", dataPtr, num_bytes); #endif Memory_free ((xdc_runtime_IHeap_Handle) SharedRegion_getHeap(0), dataPtr, num_bytes); } }
/* * ======== SemaphoreMP_Instance_finalize ======== */ Void SemaphoreMP_Instance_finalize(SemaphoreMP_Object *obj, Int status) { if (obj->objType & (ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC | ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION)) { /* SemaphoreMP is being deleted */ /* Remove entry from NameServer */ if (obj->nsKey != NULL) { NameServer_removeEntry((NameServer_Handle) SemaphoreMP_module->nameServer, obj->nsKey); } /* Set status to 'not created' */ obj->attrs->status = 0; if (obj->cacheEnabled) { Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE); } /* Delete the pendQ. If NULL, then ListMP_create failed. */ if (obj->pendQ != NULL) { ListMP_delete((ListMP_Handle *)&(obj->pendQ)); } /* * Free the shared memory back to the region SemaphoreMP. If NULL, then * the Memory_alloc failed. */ if (obj->objType == ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION && obj->attrs != NULL) { Memory_free(SharedRegion_getHeap(obj->regionId), obj->attrs, obj->allocSize); } } else { /* SemaphoreMP is being closed */ /* Close the pendQ. If NULL, then ListMP_openByAddr failed. */ if (obj->pendQ != NULL) { ListMP_close((ListMP_Handle *)&(obj->pendQ)); } /* Close the gate. If NULL, then GateMP_openByAddr failed. */ if (obj->gate != NULL) { GateMP_close((GateMP_Handle *)&(obj->gate)); } } }
/** * Handler for shared region get heap API. * * \param ctp Thread's associated context information. * \param msg The actual devctl() message. * \param ocb OCB associated with client's session. * * \return POSIX errno value. * * \retval EOK Success. * \retval ENOTSUP Unsupported devctl(). */ int syslink_sharedregion_getheap(resmgr_context_t *ctp, io_devctl_t *msg, syslink_ocb_t *ocb) { SharedRegionDrv_CmdArgs * cargs = (SharedRegionDrv_CmdArgs *) (_DEVCTL_DATA (msg->i)); SharedRegionDrv_CmdArgs * out = (SharedRegionDrv_CmdArgs *) (_DEVCTL_DATA (msg->o)); IHeap_Handle heapHandle = NULL; heapHandle = (IHeap_Handle) SharedRegion_getHeap ( cargs->args.getHeap.id); GT_assert (curTrace, (heapHandle != NULL)); out->args.getHeap.heapHandle = heapHandle; if (out->args.getHeap.heapHandle != NULL) cargs->apiStatus = SharedRegion_S_SUCCESS; else cargs->apiStatus = SharedRegion_E_FAIL; return (_RESMGR_PTR (ctp, &msg->o, sizeof (msg->o) + sizeof(SharedRegionDrv_CmdArgs))); }
Int32 Utils_memInit() { SharedRegion_Entry srEntry; Int srStatus = SharedRegion_S_SUCCESS; UInt32 srId[UTILS_MEM_NUM_SHARED_REGION_HEAP], i; srId[UTILS_MEM_VID_FRAME_BUF_HEAP] = SYSTEM_IPC_SR_VIDEO_FRAME; srId[UTILS_MEM_VID_BITS_BUF_HEAP] = SYSTEM_IPC_SR_CACHED; for (i=0; i<UTILS_MEM_NUM_SHARED_REGION_HEAP; i++) { SharedRegion_entryInit(&srEntry); SharedRegion_getEntry(srId[i], &srEntry); Vps_printf (" %d: MEM: Shared Region %d: Base = 0x%08x, Length = 0x%08x (%d MB) \n", Utils_getCurTimeInMsec(), srId[i],srEntry.base,srEntry.len, srEntry.len/(1024*1024)); if ((FALSE == srEntry.isValid) && (0 != srEntry.len)) { srEntry.isValid = TRUE; do { srStatus = SharedRegion_setEntry(srId[i], &srEntry); if (srStatus != SharedRegion_S_SUCCESS) { Vps_printf(" %d: MEM: ERROR: SharedRegion_setEntry (%d, 0x%08x) FAILED !!! " " (status=%d) \n", Utils_getCurTimeInMsec(), srId[i], &srEntry, srStatus); Task_sleep(10); } } while (srStatus != SharedRegion_S_SUCCESS); } if (srEntry.len) { gUtils_heapMemHandle[i] = SharedRegion_getHeap(srId[i]); UTILS_assert(gUtils_heapMemHandle[i] != NULL); gUtils_memClearBuf[i] = FALSE; } } return 0; }
UInt32 System_ipcListMPAllocListElemMem(UInt32 regionId, UInt32 size) { IHeap_Handle heapHndl; Error_Block eb; UInt32 shAddr; heapHndl = SharedRegion_getHeap(regionId); UTILS_assert(heapHndl!=NULL); Error_init(&eb); shAddr = (UInt32) Memory_alloc ((IHeap_Handle)heapHndl, size, 0, &eb); UTILS_assert(shAddr!=(UInt32)NULL); printf(" %u: SYSTEM: ListElem Shared Addr = 0x%08x\n", OSA_getCurTimeInMsec(), shAddr ); return shAddr; }
/** * @b Description * @n * The function is used to free a memory block of the specified size allocated * using Osal_cppiMalloc() API. * * @param[in] ptr * Pointer to the memory block to be cleaned up. * * @param[in] size * Size of the memory block to be cleaned up. * * @retval * Not Applicable */ Void Osal_cppiFree (Ptr ptr, UInt32 size) { Memory_free ((xdc_runtime_IHeap_Handle) SharedRegion_getHeap(0), ptr, size); }
/* * ======== SemaphoreMP_pend ======== */ Bool SemaphoreMP_pend(SemaphoreMP_Object *obj) { UInt tskKey; SemaphoreMP_PendElem *elem; IArg gateMPKey; /* Check for correct calling context */ Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task), SemaphoreMP_A_badContext); elem = ThreadLocal_getspecific(SemaphoreMP_pendElemKey); if (elem == NULL) { /* * Choose region zero (instead of the region that contains the * SemaphoreMP) since region zero is always accessible by all cores */ elem = Memory_alloc(SharedRegion_getHeap(0), sizeof(SemaphoreMP_PendElem), 0, NULL); ThreadLocal_setspecific(SemaphoreMP_pendElemKey, elem); } /* Enter the gate */ gateMPKey = GateMP_enter((GateMP_Handle)obj->gate); if (obj->cacheEnabled) { Cache_inv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE); } /* check semaphore count */ if (obj->attrs->count == 0) { /* lock task scheduler */ tskKey = Task_disable(); /* get task handle and block tsk */ elem->task = (Bits32)Task_self(); elem->procId = MultiProc_self(); Task_block((Task_Handle)elem->task); if (obj->cacheEnabled) { Cache_wbInv(elem, sizeof(SemaphoreMP_PendElem), Cache_Type_ALL, TRUE); } /* add it to pendQ */ ListMP_putTail((ListMP_Handle)obj->pendQ, (ListMP_Elem *)elem); /* Leave the gate */ GateMP_leave((GateMP_Handle)obj->gate, gateMPKey); Task_restore(tskKey);/* the calling task will switch out here */ return (TRUE); } else { obj->attrs->count--; if (obj->cacheEnabled) { Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE); } /* Leave the gate */ GateMP_leave((GateMP_Handle)obj->gate, gateMPKey); return (TRUE); } }
/* * ======== Ipc_attach ======== */ Int Ipc_attach(UInt16 remoteProcId) { Int i; Ptr sharedAddr; SizeT memReq; volatile ti_sdo_ipc_Ipc_Reserved *slave; ti_sdo_ipc_Ipc_ProcEntry *ipc; Error_Block eb; SharedRegion_Entry entry; SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc(); Bool cacheEnabled = SharedRegion_isCacheEnabled(0); UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId); Int status; UInt hwiKey; /* Assert remoteProcId is in our cluster and isn't our own */ Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster, ti_sdo_utils_MultiProc_A_invalidMultiProcId); Assert_isTrue(remoteProcId != MultiProc_self(), ti_sdo_ipc_Ipc_A_invArgument); /* Check whether Ipc_start has been called. If not, fail. */ if (Ipc_module->ipcSharedAddr == NULL) { return (Ipc_E_FAIL); } /* for checking and incrementing attached below */ hwiKey = Hwi_disable(); /* Make sure its not already attached */ if (Ipc_module->procEntry[clusterId].attached) { Ipc_module->procEntry[clusterId].attached++; /* restore interrupts and return */ Hwi_restore(hwiKey); return (Ipc_S_ALREADYSETUP); } /* restore interrupts */ Hwi_restore(hwiKey); /* get region 0 information */ SharedRegion_getEntry(0, &entry); /* Make sure we've attached to owner of SR0 if we're not owner */ if ((MultiProc_self() != entry.ownerProcId) && (remoteProcId != entry.ownerProcId) && !(Ipc_module->procEntry[ti_sdo_utils_MultiProc_getClusterId( entry.ownerProcId)].attached)) { return (Ipc_E_FAIL); } /* Init error block */ Error_init(&eb); /* determine the slave's slot */ slave = Ipc_getSlaveAddr(remoteProcId, Ipc_module->ipcSharedAddr); if (cacheEnabled) { Cache_inv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); } /* Synchronize the processors. */ status = Ipc_procSyncStart(remoteProcId, Ipc_module->ipcSharedAddr); if (status < 0) { return (status); } /* must be called before SharedRegion_attach */ status = ti_sdo_ipc_GateMP_attach(remoteProcId, Ipc_module->gateMPSharedAddr); if (status < 0) { return (status); } /* retrieves the SharedRegion Heap handles */ status = ti_sdo_ipc_SharedRegion_attach(remoteProcId); if (status < 0) { return (status); } /* get the attach parameters associated with remoteProcId */ ipc = &(Ipc_module->procEntry[clusterId]); /* attach Notify if not yet attached and specified to set internal setup */ if (!(Notify_intLineRegistered(remoteProcId, 0)) && (ipc->entry.setupNotify)) { /* call Notify_attach */ memReq = Notify_sharedMemReq(remoteProcId, Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { /* * calloc required here due to race condition. Its possible * that the slave, who creates the instance, tries a sendEvent * before the master has created its instance because the * state of memory was enabled from a previous run. */ sharedAddr = Memory_calloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* if cache enabled, wbInv the calloc above */ if (cacheEnabled) { Cache_wbInv(sharedAddr, memReq, Cache_Type_ALL, TRUE); } /* set the notify SRPtr */ slave->notifySRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the notify SRPtr */ sharedAddr = SharedRegion_getPtr(slave->notifySRPtr); } } else { sharedAddr = NULL; slave->notifySRPtr = 0; } /* call attach to remote processor */ status = Notify_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* Must come after GateMP_start because depends on default GateMP */ if (!(ti_sdo_utils_NameServer_isRegistered(remoteProcId)) && (ipc->entry.setupNotify)) { memReq = ti_sdo_utils_NameServer_SetupProxy_sharedMemReq( Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { sharedAddr = Memory_alloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* set the NSRN SRPtr */ slave->nsrnSRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the NSRN SRPtr */ sharedAddr = SharedRegion_getPtr(slave->nsrnSRPtr); } } else { sharedAddr = NULL; slave->nsrnSRPtr = 0; } /* call attach to remote processor */ status = ti_sdo_utils_NameServer_SetupProxy_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* Must come after GateMP_start because depends on default GateMP */ if (!(ti_sdo_ipc_MessageQ_SetupTransportProxy_isRegistered(remoteProcId)) && (ipc->entry.setupMessageQ)) { memReq = ti_sdo_ipc_MessageQ_SetupTransportProxy_sharedMemReq( Ipc_module->ipcSharedAddr); if (memReq != 0) { if (MultiProc_self() < remoteProcId) { sharedAddr = Memory_alloc(SharedRegion_getHeap(0), memReq, SharedRegion_getCacheLineSize(0), &eb); /* make sure alloc did not fail */ if (sharedAddr == NULL) { return (Ipc_E_MEMORY); } /* set the transport SRPtr */ slave->transportSRPtr = SharedRegion_getSRPtr(sharedAddr, 0); } else { /* get the transport SRPtr */ sharedAddr = SharedRegion_getPtr(slave->transportSRPtr); } } else { sharedAddr = NULL; slave->transportSRPtr = 0; } /* call attach to remote processor */ status = ti_sdo_ipc_MessageQ_SetupTransportProxy_attach(remoteProcId, sharedAddr); if (status < 0) { if (MultiProc_self() < remoteProcId && sharedAddr != NULL) { /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq); } return (Ipc_E_FAIL); } } /* writeback invalidate slave's shared memory if cache enabled */ if (cacheEnabled) { if (MultiProc_self() < remoteProcId) { Cache_wbInv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); } } /* Call user attach fxns */ for (i = 0; i < ti_sdo_ipc_Ipc_numUserFxns; i++) { if (ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach) { status = ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach( ti_sdo_ipc_Ipc_userFxns[i].arg, remoteProcId); if (status < 0) { return (status); } } } /* Finish the processor synchronization */ status = ti_sdo_ipc_Ipc_procSyncFinish(remoteProcId, Ipc_module->ipcSharedAddr); if (status < 0) { return (status); } /* for atomically incrementing attached */ hwiKey = Hwi_disable(); /* now attached to remote processor */ Ipc_module->procEntry[clusterId].attached++; /* restore interrupts */ Hwi_restore(hwiKey); return (status); }
/* * ======== Ipc_writeConfig ======== */ Int Ipc_writeConfig(UInt16 remoteProcId, UInt32 tag, Ptr cfg, SizeT size) { Int status = Ipc_S_SUCCESS; UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId); SharedRegion_SRPtr curSRPtr, *prevSRPtr; ti_sdo_ipc_Ipc_ConfigEntry *entry; Error_Block eb; Bool cacheEnabled = SharedRegion_isCacheEnabled(0); /* Assert that the remoteProc in our cluster */ Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster, ti_sdo_utils_MultiProc_A_invalidMultiProcId); Error_init(&eb); if (cfg == NULL) { status = Ipc_E_FAIL; /* get head of local config list and set prevSRPtr to it */ prevSRPtr = (Ipc_module->procEntry[clusterId].localConfigList); /* * When cfg is NULL, the last memory allocated from a previous * Ipc_writeConfig call with the same remoteProcId, tag, and size * is freed. */ curSRPtr = *prevSRPtr; /* loop through list of config entries until matching entry is found */ while (curSRPtr != ti_sdo_ipc_SharedRegion_INVALIDSRPTR) { /* convert Ptr associated with curSRPtr */ entry = (ti_sdo_ipc_Ipc_ConfigEntry *) (SharedRegion_getPtr(curSRPtr)); /* make sure entry matches remoteProcId, tag, and size */ if ((entry->remoteProcId == remoteProcId) && (entry->tag == tag) && (entry->size == size)) { /* Update the 'prev' next ptr */ *prevSRPtr = (SharedRegion_SRPtr)entry->next; /* writeback the 'prev' ptr */ if (cacheEnabled) { Cache_wb(prevSRPtr, sizeof(ti_sdo_ipc_Ipc_ConfigEntry), Cache_Type_ALL, FALSE); } /* free entry's memory back to shared heap */ Memory_free(SharedRegion_getHeap(0), entry, size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry)); /* set the status to success */ status = Ipc_S_SUCCESS; break; } /* set the 'prev' to the 'cur' SRPtr */ prevSRPtr = (SharedRegion_SRPtr *)(&entry->next); /* point to next config entry */ curSRPtr = (SharedRegion_SRPtr)entry->next; } /* return that status */ return (status); } /* Allocate memory from the shared heap (System Heap) */ entry = Memory_alloc(SharedRegion_getHeap(0), size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry), SharedRegion_getCacheLineSize(0), &eb); if (entry == NULL) { return (Ipc_E_FAIL); } /* set the entry */ entry->remoteProcId = remoteProcId; entry->localProcId = MultiProc_self(); entry->tag = tag; entry->size = size; memcpy((Ptr)((UInt32)entry + sizeof(ti_sdo_ipc_Ipc_ConfigEntry)), cfg, size); /* point the entry's next to the first entry in the list */ entry->next = *Ipc_module->procEntry[clusterId].localConfigList; /* first write-back the entry if cache is enabled */ if (cacheEnabled) { Cache_wb(entry, size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry), Cache_Type_ALL, FALSE); } /* set the entry as the new first in the list */ *Ipc_module->procEntry[clusterId].localConfigList = SharedRegion_getSRPtr(entry, 0); /* write-back the config list */ if (cacheEnabled) { Cache_wb(Ipc_module->procEntry[clusterId].localConfigList, SharedRegion_getCacheLineSize(0), Cache_Type_ALL, FALSE); } return (status); }
/* * ======== Ipc_detach ======== */ Int Ipc_detach(UInt16 remoteProcId) { Int i; UInt16 baseId = MultiProc_getBaseIdOfCluster(); UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId); Ptr notifySharedAddr; Ptr nsrnSharedAddr; Ptr msgqSharedAddr; volatile ti_sdo_ipc_Ipc_Reserved *slave, *master; SharedRegion_Entry entry; ti_sdo_ipc_Ipc_ProcEntry *ipc; SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc(); Bool cacheEnabled = SharedRegion_isCacheEnabled(0); Int status = Ipc_S_SUCCESS; UInt hwiKey; /* Assert remoteProcId is in our cluster and isn't our own */ Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster, ti_sdo_utils_MultiProc_A_invalidMultiProcId); Assert_isTrue(remoteProcId != MultiProc_self(), ti_sdo_ipc_Ipc_A_invArgument); /* for checking and incrementing attached below */ hwiKey = Hwi_disable(); if (Ipc_module->procEntry[clusterId].attached > 1) { /* only detach if attach count reaches 1 */ Ipc_module->procEntry[clusterId].attached--; Hwi_restore(hwiKey); return (Ipc_S_BUSY); } else if (Ipc_module->procEntry[clusterId].attached == 0) { /* already detached, restore interrupts and return success */ Hwi_restore(hwiKey); return (Ipc_S_SUCCESS); } /* restore interrupts */ Hwi_restore(hwiKey); /* get region 0 information */ SharedRegion_getEntry(0, &entry); /* * Make sure we detach from all other procs in cluster before * detaching from owner of SR 0. */ if (remoteProcId == entry.ownerProcId) { for (i = 0; i < ti_sdo_utils_MultiProc_numProcsInCluster; i++, baseId++) { if ((baseId != MultiProc_self()) && (baseId != entry.ownerProcId) && (Ipc_module->procEntry[i].attached)) { return (Ipc_E_FAIL); } } } /* get the paramters associated with remoteProcId */ ipc = &(Ipc_module->procEntry[clusterId]); /* determine the slave's slot */ slave = Ipc_getSlaveAddr(remoteProcId, Ipc_module->ipcSharedAddr); /* determine the master's slot */ master = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId, Ipc_module->ipcSharedAddr); if (cacheEnabled) { Cache_inv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); Cache_inv((Ptr)master, reservedSize, Cache_Type_ALL, TRUE); } if (MultiProc_self() < remoteProcId) { /* check to make sure master is not trying to attach */ if (master->startedKey == ti_sdo_ipc_Ipc_PROCSYNCSTART) { return (Ipc_E_NOTREADY); } } else { /* check to make sure slave is not trying to attach */ if (slave->startedKey == ti_sdo_ipc_Ipc_PROCSYNCSTART) { return (Ipc_E_NOTREADY); } } /* The slave processor waits for master to finish its detach sequence */ if (MultiProc_self() < remoteProcId) { if (master->startedKey != ti_sdo_ipc_Ipc_PROCSYNCDETACH) { return (Ipc_E_NOTREADY); } } /* Call user detach fxns */ for (i = 0; i < ti_sdo_ipc_Ipc_numUserFxns; i++) { if (ti_sdo_ipc_Ipc_userFxns[i].userFxn.detach) { status = ti_sdo_ipc_Ipc_userFxns[i].userFxn.detach( ti_sdo_ipc_Ipc_userFxns[i].arg, remoteProcId); if (status < 0) { return (status); } } } if ((ipc->entry.setupMessageQ) && (ti_sdo_ipc_MessageQ_SetupTransportProxy_isRegistered(remoteProcId))) { /* call MessageQ_detach for remote processor */ status = ti_sdo_ipc_MessageQ_SetupTransportProxy_detach(remoteProcId); if (status < 0) { return (Ipc_E_FAIL); } if (slave->transportSRPtr) { /* free the memory if slave processor */ if (MultiProc_self() < remoteProcId) { /* get the pointer to MessageQ transport instance */ msgqSharedAddr = SharedRegion_getPtr(slave->transportSRPtr); /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), msgqSharedAddr, ti_sdo_ipc_MessageQ_SetupTransportProxy_sharedMemReq( msgqSharedAddr)); /* set pointer for MessageQ transport instance back to NULL */ slave->transportSRPtr = NULL; } } } if ((ipc->entry.setupNotify) && (ti_sdo_utils_NameServer_isRegistered(remoteProcId))) { /* call NameServer_SetupProxy_detach for remote processor */ status = ti_sdo_utils_NameServer_SetupProxy_detach(remoteProcId); if (status < 0) { return (Ipc_E_FAIL); } if (slave->nsrnSRPtr) { /* free the memory if slave processor */ if (MultiProc_self() < remoteProcId) { /* get the pointer to NSRN instance */ nsrnSharedAddr = SharedRegion_getPtr(slave->nsrnSRPtr); /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), nsrnSharedAddr, ti_sdo_utils_NameServer_SetupProxy_sharedMemReq( nsrnSharedAddr)); /* set pointer for NSRN instance back to NULL */ slave->nsrnSRPtr = NULL; } } } if ((ipc->entry.setupNotify) && (Notify_intLineRegistered(remoteProcId, 0))) { /* call Notify_detach for remote processor */ status = ti_sdo_ipc_Notify_detach(remoteProcId); if (status < 0) { return (Ipc_E_FAIL); } if (slave->notifySRPtr) { /* free the memory if slave processor */ if (MultiProc_self() < remoteProcId) { /* get the pointer to Notify instance */ notifySharedAddr = SharedRegion_getPtr(slave->notifySRPtr); /* free the memory back to SharedRegion 0 heap */ Memory_free(SharedRegion_getHeap(0), notifySharedAddr, Notify_sharedMemReq(remoteProcId, notifySharedAddr)); /* set pointer for Notify instance back to NULL */ slave->notifySRPtr = NULL; } } } /* close any HeapMemMP which may have been opened */ status = ti_sdo_ipc_SharedRegion_detach(remoteProcId); if (status < 0) { return (status); } /* close any GateMP which may have been opened */ status = ti_sdo_ipc_GateMP_detach(remoteProcId); if (status < 0) { return (status); } if (MultiProc_self() < remoteProcId) { slave->configListHead = ti_sdo_ipc_SharedRegion_INVALIDSRPTR; slave->startedKey = ti_sdo_ipc_Ipc_PROCSYNCDETACH; if (cacheEnabled) { Cache_wbInv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE); } } else { master->configListHead = ti_sdo_ipc_SharedRegion_INVALIDSRPTR; master->startedKey = ti_sdo_ipc_Ipc_PROCSYNCDETACH; if (cacheEnabled) { Cache_wbInv((Ptr)master, reservedSize, Cache_Type_ALL, TRUE); } } /* attached must be decremented atomically */ hwiKey = Hwi_disable(); /* now detached from remote processor */ Ipc_module->procEntry[clusterId].attached--; /* restore interrupts */ Hwi_restore(hwiKey); return (status); }
/* * ======== SemaphoreMP_Instance_init ======== */ Int SemaphoreMP_Instance_init(SemaphoreMP_Object *obj, Int count, const SemaphoreMP_Params *params, Error_Block *eb) { Ptr localAddr; Int status; IHeap_Handle regionHeap; ListMP_Params listMPParams; SharedRegion_SRPtr sharedShmBase; if (params->openFlag) { /* Open by sharedAddr */ obj->objType = ti_sdo_ipc_Ipc_ObjType_OPENDYNAMIC; obj->attrs = (SemaphoreMP_Attrs *)params->sharedAddr; obj->regionId = SharedRegion_getId(obj->attrs); obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->mode = (SemaphoreMP_Mode)obj->attrs->mode; regionHeap = SharedRegion_getHeap(obj->regionId); Assert_isTrue(regionHeap != NULL, ti_sdo_ipc_SharedRegion_A_noHeap); /* get the local address of the SRPtr */ localAddr = SharedRegion_getPtr(obj->attrs->gateMPAddr); status = GateMP_openByAddr(localAddr, (GateMP_Handle *)&(obj->gate)); if (status < 0) { return (1); } /* Open the ListMP */ localAddr = (Ptr)_Ipc_roundup( (UInt32)obj->attrs + sizeof(SemaphoreMP_Attrs), SharedRegion_getCacheLineSize(obj->regionId)); status = ListMP_openByAddr(localAddr, (ListMP_Handle *)&(obj->pendQ)); if (status < 0) { /* obj->freeList set to NULL */ return (4); } return (0); } /* init the gate */ if (params->gate != NULL) { obj->gate = params->gate; } else { obj->gate = (ti_sdo_ipc_GateMP_Handle)GateMP_getDefaultRemote(); } obj->mode = params->mode; if (params->sharedAddr == NULL) { /* Creating using a shared region ID */ obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION; obj->regionId = params->regionId; obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); /* Need to allocate from the heap */ obj->allocSize = SemaphoreMP_sharedMemReq(params); regionHeap = SharedRegion_getHeap(obj->regionId); Assert_isTrue(regionHeap != NULL, ti_sdo_ipc_SharedRegion_A_noHeap); /* The region heap will take care of the alignment */ obj->attrs = Memory_alloc(regionHeap, obj->allocSize, 0, eb); if (obj->attrs == NULL) { return (2); } } else { /* Creating using sharedAddr */ obj->regionId = SharedRegion_getId(params->sharedAddr); /* Assert that the buffer is in a valid shared region */ Assert_isTrue(obj->regionId != SharedRegion_INVALIDREGIONID, ti_sdo_ipc_Ipc_A_addrNotInSharedRegion); /* Assert that sharedAddr is cache aligned */ Assert_isTrue(((UInt32)params->sharedAddr % SharedRegion_getCacheLineSize(obj->regionId) == 0), ti_sdo_ipc_Ipc_A_addrNotCacheAligned); /* set object's cacheEnabled, objType, and attrs */ obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC; obj->attrs = (SemaphoreMP_Attrs *)params->sharedAddr; } /* Store the GateMP sharedAddr in the SemaphoreMP Attrs */ obj->attrs->gateMPAddr = ti_sdo_ipc_GateMP_getSharedAddr(obj->gate); obj->attrs->mode = (Bits16)obj->mode; obj->attrs->count = count; /* Create the freeList */ ListMP_Params_init(&listMPParams); listMPParams.sharedAddr = (Ptr)_Ipc_roundup((UInt32)obj->attrs + sizeof(SemaphoreMP_Attrs), SharedRegion_getCacheLineSize(obj->regionId)); listMPParams.gate = (GateMP_Handle)obj->gate; obj->pendQ = (ti_sdo_ipc_ListMP_Handle)ListMP_create(&listMPParams); if (obj->pendQ == NULL) { return (3); } /* Last thing, set the status */ obj->attrs->status = SemaphoreMP_CREATED; if (obj->cacheEnabled) { Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE); } /* Add entry to NameServer */ if (params->name != NULL) { /* We will store a shared pointer in the NameServer */ sharedShmBase = SharedRegion_getSRPtr(obj->attrs, obj->regionId); obj->nsKey = NameServer_addUInt32((NameServer_Handle) SemaphoreMP_module->nameServer, params->name, (UInt32)sharedShmBase); if (obj->nsKey == NULL) { /* NameServer_addUInt32 failed */ return (4); } } return (0); }
/****************************************************************************** * TASK FUNCTION *****************************************************************************/ void task_fxn(UArg arg0, UArg arg1){ Int status; Int coreCount; Int nextCore; MessageQ_Msg msg; MessageQ_QueueId msgQueueIds[MAX_NUM_CORES]; /* Register this heap with the Message Q */ MessageQ_registerHeap((IHeap_Handle)SharedRegion_getHeap(0), HEAP_ID); /* * In order to send messages to other cores, we must know that core's Queue * ID. So, we'll create an array on each core that associates the Queue ID * with the core number, and then we'll open each queue. Again, we spin * here until the queue is open, sleeping for one tick after every attempt. */ for (coreCount = 0; coreCount < MAX_NUM_CORES; coreCount++){ System_sprintf(remoteQueueName, "%s", MultiProc_getName(coreCount)); do { status = MessageQ_open(remoteQueueName, &msgQueueIds[coreCount]); if (status < 0){ Task_sleep(1); } }while (status < 0); } /* * At this point, our application is ready to begin sending messages using * Message Queue. The core with the number TOKEN_START_CORE has the * responsibility of sending the first message. So, we'll handle that in * this block. */ if (selfId == TOKEN_START_CORE){ /* * Allocate the initial message. If the message is not properly * allocated, we must abort */ /* * TODO: IPC #1 - Allocate Memory for Token Message * Add core below that ALLOCATES the memory for the token message. * We've already declared the variable msg to hold the pointer to * this message. The code to check if the pointer is NULL is * already included. */ msg = MessageQ_alloc(HEAP_ID, sizeof(myMsg)); if (msg == NULL){ System_abort("MessageQ_alloc failed\n"); } /* * Now randomly select the next processor to send the. This function * simply selects a random core number and ensures it's not the same as * the current core number. */ nextCore = findNextCore(selfId); /* * Set the Initial Token Count in the message, and specify that the * message type is MSG_TOKEN */ ((myMsg*)msg)->tokenCount = 1; ((myMsg*)msg)->messageType = MSG_TOKEN; /* * We can also set a reply queue so that the core can acknowledge this * message without having to know which core it came from. */ MessageQ_setReplyQueue(messageQ, msg); /* * Now we actually send the message to the next core that we've chosen. */ /* TODO: IPC #2 - Pass the token to the destination core * Add the code to send the message to the destination core. This is * done by putting the message in the destination core's queue. Don't * forget that the ID of the destination core's queue is stored at * element "nextCore" in the array msgQueueIds, and is NOT the same * as the core number. */ status = MessageQ_put(msgQueueIds[nextCore], msg); } while (TRUE){ msgType messageType; MessageQ_Msg ack; MessageQ_QueueId ackQueueId; Int currentTokenCount; /* TODO: IPC #3 - Get a Message from the local queue. * Take the message from the local queue and store it in the variable * message. The function call return value should be stored in the * variable status. Hint: The parameters passed to this function * specify a time out size. We want to configure this call to * never time out, and block eternally until a message is received. */ status = MessageQ_get(messageQ, &msg, MessageQ_FOREVER); if (status < 0){ System_abort("This should not occur since the timeout is forever\n"); } /* * Read the Message Type from the received message, along with the current * token count. */ messageType = ((myMsg*)msg)->messageType; currentTokenCount = ((myMsg*)msg)->tokenCount; /* * Now, check what type of message it is and take action. Here are the * actions to be taken. * * MSG_TOKEN * - Acknowledge that token is received to sending core. * - If token count is less than MAX_MESSAGES * - Increment the token count. * - Forward the token on to the next random core * - If token count is equal to MAX Messages * - Free the Token message. * - Send a Done Message to all other cores. * - Break out of the infinite loop. * * MSG_ACK * - Free the Ack message * MSG_DONE * - Free the Done Message * - Break Out of infinite loop */ switch (messageType){ case MSG_TOKEN: System_printf("Token Received - Count = %d\n", currentTokenCount); /* * TODO: IPC #4 - Get the Reply Queue for the token * Store the ID of the reply queue in the variable ackQueueId. * This function allows us to not have to figure out which core * sent this message. This is the analogous function to the * MessageQ_setReplyQueue() function that was set before the * message was sent. This data is stored in the MessageQ_MsgHeader * element that's included with the message */ ackQueueId = MessageQ_getReplyQueue(msg); /* * TODO: IPC #5 - Allocate the acknowledge message * Allocate the acknowledge message and store the pointer to it * in the variable ack. */ ack = MessageQ_alloc(HEAP_ID, sizeof(myMsg)); // Set the Message Type of the new Message to MSG_ACK if (ack==NULL){ System_abort("MessageQ Alloc Failed\n"); } // Set the Message Type of the new Message to MSG_ACK ((myMsg*)ack)->messageType = MSG_ACK; /* * TODO: IPC #6 - Send the Acknowledge message * Don't forget that we've already stored the reply queue ID in * ackQueueId above. */ status = MessageQ_put(ackQueueId, ack); /* * Now handle the actions required by the status of the message. First * we must check to see if we're at the Token Passing limit. So we'll * compare the current Token count with MAX_MESSAGES. */ /* * If the current token count is the max, then we must free the current * message and then allocate new DONE messages to be sent to the other * cores. */ if (currentTokenCount == NUM_MESSAGES){ /* * TODO: IPC #7 - Free the memory used by the token message * Don't forget that the pointer to this memory is in the * variable msg. */ MessageQ_free(msg); /* * Now allocate and send ALL cores a DONE message. We don't need to * worry about special handling of the current core. It will just * send itself a DONE message and handle it just as the other cores * do */ /* * TODO: IPC #8 - Note that this core will send itself a message. * There's nothing to be added here. just note that this * routine is blindly sending done messages to all of the cores * and not taking into account it's own core number. So, this * core will send one of these messages to itself. */ for (coreCount =0; coreCount < MAX_NUM_CORES; coreCount++){ msg = MessageQ_alloc(HEAP_ID, sizeof(myMsg)); if (msg == NULL){ System_abort("MessageQ Alloc Failed\n"); } // Set the Message Type to MSG_DONE ((myMsg*)msg)->messageType = MSG_DONE; // Now send it to the selected core status = MessageQ_put(msgQueueIds[coreCount], msg); } break; } /* * If we're not at the last message, then we must increment the * tokenCount and pass the message on to a random core. Don't * forget to set the reply queue so we can get an acknowledge. */ nextCore = findNextCore(selfId); ((myMsg*)msg)->tokenCount = currentTokenCount + 1; /* * TODO: IPC #9- Set the reply queue for the token message. * We need to be sure to set the reply queue each time. * Otherwise, the wrong core will receive the acknowledge. */ MessageQ_setReplyQueue(messageQ, msg); // Put the message on the proper queue status = MessageQ_put(msgQueueIds[nextCore], msg); break; case MSG_ACK: System_printf("Ack Received\n"); /* * All we need to do in this case is free the Ack message */ MessageQ_free(msg); break; case MSG_DONE: System_printf("Done Received\n"); /* * If we receive the Done message we just need to free the message, and * then exit SYS/BIOS because the application is complete. */ MessageQ_free(msg); BIOS_exit(0); break; default: System_printf("Invalid Message Type Received\n"); return; } } }
static Int dvsdk_grapx_display_rpc_remote_mode_init () { RcmClient_Params rcmClientParams; IArg key; Bool doInit = FALSE; Int status = 0; key = Gate_enterSystem(); if (FALSE == g_RemoteStubContext.bRemoteInitDone) { doInit = TRUE; } Gate_leaveSystem(key); if (TRUE == doInit) { GateThread_Params gtParams; GateThread_Params_init (>Params); g_RemoteStubContext.hGate = GateThread_Handle_upCast(GateThread_create(>Params, NULL)); key = GateH_enter (g_RemoteStubContext.hGate); MessageQ_registerHeap(SharedRegion_getHeap(0),Global_GrpxDssMsgHeapId); RcmClient_Params_init (&rcmClientParams); rcmClientParams.heapId = Global_GrpxDssMsgHeapId; do { status = RcmClient_create(DVSDK_DSS_GRPX_SERVER_NAME, &rcmClientParams, &g_RemoteStubContext.hRcmClient); if (status < 0) { Thread_yield(NULL); } } while (status < 0); if (0 == status) { status = RcmClient_getSymbolIndex (g_RemoteStubContext.hRcmClient, DVSDK_DSS_GRPX_INIT_FXN_NAME, &g_RemoteStubContext.nInitFxnIdx); } if (0 == status) { status = RcmClient_getSymbolIndex (g_RemoteStubContext.hRcmClient, DVSDK_DSS_GRPX_START_FXN_NAME, &g_RemoteStubContext.nStartFxnIdx); } if (0 == status) { status = RcmClient_getSymbolIndex (g_RemoteStubContext.hRcmClient, DVSDK_DSS_GRPX_STOP_FXN_NAME, &g_RemoteStubContext.nStopFxnIdx); } if (0 == status) { status = RcmClient_getSymbolIndex (g_RemoteStubContext.hRcmClient, DVSDK_DSS_GRPX_DEINIT_FXN_NAME, &g_RemoteStubContext.nDeinitFxnIdx); } if (0 == status) { status = RcmClient_getSymbolIndex (g_RemoteStubContext.hRcmClient, DVSDK_DSS_GRPX_DISPLAYTOGGLE_FXN_NAME, &g_RemoteStubContext.nDisplayToggelFxnIdx); } g_RemoteStubContext.bRemoteInitDone = TRUE; GateH_leave(g_RemoteStubContext.hGate, key); } return status; }
/* * ======== ipcSetup ======== */ Int ipcSetup (Int testCase) { Int status = 0; Char * procName; UInt16 procId; ProcMgr_AttachParams attachParams; ProcMgr_State state; #if !defined(SYSLINK_USE_DAEMON) UInt32 entryPoint = 0; ProcMgr_StartParams startParams; Char uProcId; HeapBufMP_Params heapbufmpParams; #if defined(SYSLINK_USE_LOADER) Char * imageName; UInt32 fileId; #endif #endif Ipc_Config config; Int i; UInt32 srCount; SharedRegion_Entry srEntry; Osal_printf ("ipcSetup: Setup IPC componnets \n"); switch(testCase) { case 0: Osal_printf ("ipcSetup: Local RCM test\n"); remoteServerName = RCMSERVER_NAME; procName = MPU_PROC_NAME; break; case 1: Osal_printf ("ipcSetup: RCM test with RCM client and server on " "Sys M3\n\n"); remoteServerName = SYSM3_SERVER_NAME; procName = SYSM3_PROC_NAME; break; case 2: Osal_printf ("ipcSetup: RCM test with RCM client and server on " "App M3\n\n"); remoteServerName = APPM3_SERVER_NAME; procName = APPM3_PROC_NAME; break; case 3: Osal_printf ("ipcSetup: RCM test with RCM client and server on " "Tesla\n\n"); remoteServerName = DSP_SERVER_NAME; procName = DSP_PROC_NAME; break; default: Osal_printf ("ipcSetup: Please pass valid arg " "(0-local, 1-Sys M3, 2-App M3, 3-Tesla) \n"); goto exit; break; } Ipc_getConfig (&config); status = Ipc_setup (&config); if (status < 0) { Osal_printf ("ipcSetup: Error in Ipc_setup [0x%x]\n", status); goto exit; } Osal_printf("Ipc_setup status [0x%x]\n", status); procId = ((testCase == 3) ? MultiProc_getId (DSP_PROC_NAME) : \ MultiProc_getId (SYSM3_PROC_NAME)); remoteIdClient = MultiProc_getId (procName); /* Open a handle to the ProcMgr instance. */ status = ProcMgr_open (&procMgrHandleClient, procId); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_open [0x%x]\n", status); goto exit; } if (status >= 0) { Osal_printf ("ipcSetup: ProcMgr_open Status [0x%x]\n", status); ProcMgr_getAttachParams (NULL, &attachParams); /* Default params will be used if NULL is passed. */ status = ProcMgr_attach (procMgrHandleClient, &attachParams); if (status < 0) { Osal_printf ("ipcSetup: ProcMgr_attach failed [0x%x]\n", status); } else { Osal_printf ("ipcSetup: ProcMgr_attach status: [0x%x]\n", status); state = ProcMgr_getState (procMgrHandleClient); Osal_printf ("ipcSetup: After attach: ProcMgr_getState\n" " state [0x%x]\n", state); } } if ((status >= 0) && (testCase == 2)) { status = ProcMgr_open (&procMgrHandleClient1, remoteIdClient); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_open [0x%x]\n", status); goto exit; } if (status >= 0) { Osal_printf ("ipcSetup: ProcMgr_open Status [0x%x]\n", status); ProcMgr_getAttachParams (NULL, &attachParams); /* Default params will be used if NULL is passed. */ status = ProcMgr_attach (procMgrHandleClient1, &attachParams); if (status < 0) { Osal_printf ("ipcSetup: ProcMgr_attach failed [0x%x]\n", status); } else { Osal_printf ("ipcSetup: ProcMgr_attach status: [0x%x]\n", status); state = ProcMgr_getState (procMgrHandleClient1); Osal_printf ("ipcSetup: After attach: ProcMgr_getState\n" " state [0x%x]\n", state); } } } #if !defined(SYSLINK_USE_DAEMON) /* Daemon sets this up */ #if defined(SYSLINK_USE_LOADER) if (testCase == 1) imageName = RCM_MPUCLIENT_SYSM3ONLY_IMAGE; else if (testCase == 2) imageName = RCM_MPUCLIENT_SYSM3_IMAGE; else if (testCase == 3) imageName = RCM_MPUCLIENT_DSP_IMAGE; if (testCase != 0) { status = ProcMgr_load (procMgrHandleClient, imageName, 2, &imageName, &entryPoint, &fileId, procId); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_load %s image [0x%x]\n", procName, status); goto exit; } Osal_printf ("ipcSetup: ProcMgr_load %s image Status [0x%x]\n", procName, status); } #endif /* defined(SYSLINK_USE_LOADER) */ if (testCase != 0) { startParams.proc_id = procId; status = ProcMgr_start (procMgrHandleClient, entryPoint, &startParams); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_start %s [0x%x]\n", procName, status); goto exit; } Osal_printf ("ipcSetup: ProcMgr_start %s Status [0x%x]\n", procName, status); } if (testCase == 2) { #if defined(SYSLINK_USE_LOADER) imageName = RCM_MPUCLIENT_APPM3_IMAGE; uProcId = MultiProc_getId (APPM3_PROC_NAME); status = ProcMgr_load (procMgrHandleClient1, imageName, 2, &imageName, &entryPoint, &fileId, uProcId); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_load AppM3 image: " "[0x%x]\n", status); goto exit; } Osal_printf ("ipcSetup: AppM3: ProcMgr_load Status [0x%x]\n", status); #endif /* defined(SYSLINK_USE_LOADER) */ startParams.proc_id = MultiProc_getId (APPM3_PROC_NAME); status = ProcMgr_start (procMgrHandleClient1, entryPoint, &startParams); if (status < 0) { Osal_printf ("ipcSetup: Error in ProcMgr_start AppM3 [0x%x]\n", status); goto exit; } Osal_printf ("ipcSetup: ProcMgr_start AppM3 Status [0x%x]\n", status); } #endif /* defined(SYSLINK_USE_DAEMON) */ srCount = SharedRegion_getNumRegions(); Osal_printf ("SharedRegion_getNumRegions = %d\n", srCount); for (i = 0; i < srCount; i++) { status = SharedRegion_getEntry (i, &srEntry); Osal_printf ("SharedRegion_entry #%d: base = 0x%x len = 0x%x " "ownerProcId = %d isValid = %d cacheEnable = %d " "cacheLineSize = 0x%x createHeap = %d name = %s\n", i, srEntry.base, srEntry.len, srEntry.ownerProcId, (Int)srEntry.isValid, (Int)srEntry.cacheEnable, srEntry.cacheLineSize, (Int)srEntry.createHeap, srEntry.name); } #if !defined(SYSLINK_USE_DAEMON) /* Daemon sets this up */ /* Create Heap and register it with MessageQ */ if (status >= 0) { HeapBufMP_Params_init (&heapbufmpParams); heapbufmpParams.sharedAddr = NULL; heapbufmpParams.align = 128; heapbufmpParams.numBlocks = 4; heapbufmpParams.blockSize = MSGSIZE; heapSize = HeapBufMP_sharedMemReq (&heapbufmpParams); Osal_printf ("ipcSetup: heapSize = 0x%x\n", heapSize); srHeap = SharedRegion_getHeap (RCM_HEAP_SR); if (srHeap == NULL) { status = MEMORYOS_E_FAIL; Osal_printf ("ipcSetup: SharedRegion_getHeap failed for srHeap:" " [0x%x]\n", srHeap); } else { Osal_printf ("ipcSetup: Before Memory_alloc = 0x%x\n", srHeap); heapBufPtr = Memory_alloc (srHeap, heapSize, 0); if (heapBufPtr == NULL) { status = MEMORYOS_E_MEMORY; Osal_printf ("ipcSetup: Memory_alloc failed for ptr: [0x%x]\n", heapBufPtr); } else { heapbufmpParams.name = RCM_MSGQ_HEAPNAME; heapbufmpParams.sharedAddr = heapBufPtr; Osal_printf ("ipcSetup: Before HeapBufMP_Create: [0x%x]\n", heapBufPtr); heapHandle = HeapBufMP_create (&heapbufmpParams); if (heapHandle == NULL) { status = HeapBufMP_E_FAIL; Osal_printf ("ipcSetup: HeapBufMP_create failed for Handle:" "[0x%x]\n", heapHandle); } else { /* Register this heap with MessageQ */ status = MessageQ_registerHeap (heapHandle, RCM_MSGQ_HEAPID); if (status < 0) { Osal_printf ("ipcSetup: MessageQ_registerHeap " "failed!\n"); } } } } } #endif /* defined(SYSLINK_USE_DAEMON) */ exit: Osal_printf ("ipcSetup: Leaving ipcSetup()\n"); return status; }
/* * ======== ipcSetup ======== */ static Int ipcSetup (Char * sysM3ImageName, Char * appM3ImageName) { Ipc_Config config; ProcMgr_StopParams stopParams; ProcMgr_StartParams startParams; UInt32 entryPoint = 0; UInt16 procId; Int status = 0; ProcMgr_AttachParams attachParams; ProcMgr_State state; HeapBufMP_Params heapbufmpParams; Int i; UInt32 srCount; SharedRegion_Entry srEntry; if(appM3ImageName != NULL) appM3Client = TRUE; else appM3Client = FALSE; Ipc_getConfig (&config); status = Ipc_setup (&config); if (status < 0) { Osal_printf ("Error in Ipc_setup [0x%x]\n", status); goto exit; } /* Get MultiProc IDs by name. */ remoteIdSysM3 = MultiProc_getId (SYSM3_PROC_NAME); Osal_printf ("MultiProc_getId remoteId: [0x%x]\n", remoteIdSysM3); remoteIdAppM3 = MultiProc_getId (APPM3_PROC_NAME); Osal_printf ("MultiProc_getId remoteId: [0x%x]\n", remoteIdAppM3); procId = remoteIdSysM3; Osal_printf ("MultiProc_getId procId: [0x%x]\n", procId); /* Temporary fix to account for a timing issue during recovery. */ usleep(FAULT_RECOVERY_DELAY); printf("RCM procId= %d\n", procId); /* Open a handle to the ProcMgr instance. */ status = ProcMgr_open (&procMgrHandleSysM3, procId); if (status < 0) { Osal_printf ("Error in ProcMgr_open [0x%x]\n", status); goto exit_ipc_destroy; } else { Osal_printf ("ProcMgr_open Status [0x%x]\n", status); ProcMgr_getAttachParams (NULL, &attachParams); /* Default params will be used if NULL is passed. */ status = ProcMgr_attach (procMgrHandleSysM3, &attachParams); if (status < 0) { Osal_printf ("ProcMgr_attach failed [0x%x]\n", status); } else { Osal_printf ("ProcMgr_attach status: [0x%x]\n", status); state = ProcMgr_getState (procMgrHandleSysM3); Osal_printf ("After attach: ProcMgr_getState\n" " state [0x%x]\n", status); } } if (status >= 0 && appM3Client) { procId = remoteIdAppM3; Osal_printf ("MultiProc_getId procId: [0x%x]\n", procId); /* Open a handle to the ProcMgr instance. */ status = ProcMgr_open (&procMgrHandleAppM3, procId); if (status < 0) { Osal_printf ("Error in ProcMgr_open [0x%x]\n", status); goto exit_ipc_destroy; } else { Osal_printf ("ProcMgr_open Status [0x%x]\n", status); ProcMgr_getAttachParams (NULL, &attachParams); /* Default params will be used if NULL is passed. */ status = ProcMgr_attach (procMgrHandleAppM3, &attachParams); if (status < 0) { Osal_printf ("ProcMgr_attach failed [0x%x]\n", status); } else { Osal_printf ("ProcMgr_attach status: [0x%x]\n", status); state = ProcMgr_getState (procMgrHandleAppM3); Osal_printf ("After attach: ProcMgr_getState\n" " state [0x%x]\n", status); } } } #if defined(SYSLINK_USE_LOADER) Osal_printf ("SysM3 Load: loading the SysM3 image %s\n", sysM3ImageName); status = ProcMgr_load (procMgrHandleSysM3, sysM3ImageName, 2, &sysM3ImageName, &entryPoint, &fileIdSysM3, remoteIdSysM3); if(status < 0) { Osal_printf ("Error in ProcMgr_load, status [0x%x]\n", status); goto exit_procmgr_close_sysm3; } #endif startParams.proc_id = remoteIdSysM3; Osal_printf ("Starting ProcMgr for procID = %d\n", startParams.proc_id); status = ProcMgr_start(procMgrHandleSysM3, entryPoint, &startParams); if(status < 0) { Osal_printf ("Error in ProcMgr_start, status [0x%x]\n", status); goto exit_procmgr_close_sysm3; } if(appM3Client) { #if defined(SYSLINK_USE_LOADER) Osal_printf ("AppM3 Load: loading the AppM3 image %s\n", appM3ImageName); status = ProcMgr_load (procMgrHandleAppM3, appM3ImageName, 2, &appM3ImageName, &entryPoint, &fileIdAppM3, remoteIdAppM3); if(status < 0) { Osal_printf ("Error in ProcMgr_load, status [0x%x]\n", status); goto exit_procmgr_stop_sysm3; } #endif startParams.proc_id = remoteIdAppM3; Osal_printf ("Starting ProcMgr for procID = %d\n", startParams.proc_id); status = ProcMgr_start(procMgrHandleAppM3, entryPoint, &startParams); if(status < 0) { Osal_printf ("Error in ProcMgr_start, status [0x%x]\n", status); goto exit_procmgr_stop_sysm3; } } Osal_printf ("SysM3: Creating Ducati DMM pool of size 0x%x\n", DUCATI_DMM_POOL_0_SIZE); status = ProcMgr_createDMMPool (DUCATI_DMM_POOL_0_ID, DUCATI_DMM_POOL_0_START, DUCATI_DMM_POOL_0_SIZE, remoteIdSysM3); if(status < 0) { Osal_printf ("Error in ProcMgr_createDMMPool, status [0x%x]\n", status); goto exit_procmgr_stop_sysm3; } srCount = SharedRegion_getNumRegions(); Osal_printf ("SharedRegion_getNumRegions = %d\n", srCount); for (i = 0; i < srCount; i++) { status = SharedRegion_getEntry (i, &srEntry); Osal_printf ("SharedRegion_entry #%d: base = 0x%x len = 0x%x " "ownerProcId = %d isValid = %d cacheEnable = %d " "cacheLineSize = 0x%x createHeap = %d name = %s\n", i, srEntry.base, srEntry.len, srEntry.ownerProcId, (Int)srEntry.isValid, (Int)srEntry.cacheEnable, srEntry.cacheLineSize, (Int)srEntry.createHeap, srEntry.name); } /* Create the heap to be used by RCM and register it with MessageQ */ /* TODO: Do this dynamically by reading from the IPC config from the * baseimage using Ipc_readConfig() */ if (status >= 0) { HeapBufMP_Params_init (&heapbufmpParams); heapbufmpParams.sharedAddr = NULL; heapbufmpParams.align = RCM_MSGQ_TILER_HEAP_ALIGN; heapbufmpParams.numBlocks = RCM_MSGQ_TILER_HEAP_BLOCKS; heapbufmpParams.blockSize = RCM_MSGQ_TILER_MSGSIZE; heapSize = HeapBufMP_sharedMemReq (&heapbufmpParams); Osal_printf ("heapSize = 0x%x\n", heapSize); srHeap = SharedRegion_getHeap (RCM_MSGQ_HEAP_SR); if (srHeap == NULL) { status = MEMORYOS_E_FAIL; Osal_printf ("SharedRegion_getHeap failed for srHeap:" " [0x%x]\n", srHeap); goto exit_procmgr_stop_sysm3; } else { Osal_printf ("Before Memory_alloc = 0x%x\n", srHeap); heapBufPtr = Memory_alloc (srHeap, heapSize, 0); if (heapBufPtr == NULL) { status = MEMORYOS_E_MEMORY; Osal_printf ("Memory_alloc failed for ptr: [0x%x]\n", heapBufPtr); goto exit_procmgr_stop_sysm3; } else { heapbufmpParams.name = RCM_MSGQ_TILER_HEAPNAME; heapbufmpParams.sharedAddr = heapBufPtr; Osal_printf ("Before HeapBufMP_Create: [0x%x]\n", heapBufPtr); heapHandle = HeapBufMP_create (&heapbufmpParams); if (heapHandle == NULL) { status = HeapBufMP_E_FAIL; Osal_printf ("HeapBufMP_create failed for Handle:" "[0x%x]\n", heapHandle); goto exit_procmgr_stop_sysm3; } else { /* Register this heap with MessageQ */ status = MessageQ_registerHeap (heapHandle, RCM_MSGQ_TILER_HEAPID); if (status < 0) { Osal_printf ("MessageQ_registerHeap failed!\n"); goto exit_procmgr_stop_sysm3; } } } } } if (status >= 0) { HeapBufMP_Params_init (&heapbufmpParams); heapbufmpParams.sharedAddr = NULL; heapbufmpParams.align = RCM_MSGQ_DOMX_HEAP_ALIGN; heapbufmpParams.numBlocks = RCM_MSGQ_DOMX_HEAP_BLOCKS; heapbufmpParams.blockSize = RCM_MSGQ_DOMX_MSGSIZE; heapSize1 = HeapBufMP_sharedMemReq (&heapbufmpParams); Osal_printf ("heapSize1 = 0x%x\n", heapSize1); heapBufPtr1 = Memory_alloc (srHeap, heapSize1, 0); if (heapBufPtr1 == NULL) { status = MEMORYOS_E_MEMORY; Osal_printf ("Memory_alloc failed for ptr: [0x%x]\n", heapBufPtr1); goto exit_procmgr_stop_sysm3; } else { heapbufmpParams.name = RCM_MSGQ_DOMX_HEAPNAME; heapbufmpParams.sharedAddr = heapBufPtr1; Osal_printf ("Before HeapBufMP_Create: [0x%x]\n", heapBufPtr1); heapHandle1 = HeapBufMP_create (&heapbufmpParams); if (heapHandle1 == NULL) { status = HeapBufMP_E_FAIL; Osal_printf ("HeapBufMP_create failed for Handle:" "[0x%x]\n", heapHandle1); goto exit_procmgr_stop_sysm3; } else { /* Register this heap with MessageQ */ status = MessageQ_registerHeap (heapHandle1, RCM_MSGQ_DOMX_HEAPID); if (status < 0) { Osal_printf ("MessageQ_registerHeap failed!\n"); goto exit_procmgr_stop_sysm3; } } } } Osal_printf ("=== SysLink-IPC setup completed successfully!===\n"); return 0; exit_procmgr_stop_sysm3: stopParams.proc_id = remoteIdSysM3; status = ProcMgr_stop (procMgrHandleSysM3, &stopParams); if (status < 0) { Osal_printf ("Error in ProcMgr_stop(%d): status = 0x%x\n", stopParams.proc_id, status); } exit_procmgr_close_sysm3: status = ProcMgr_close (&procMgrHandleSysM3); if (status < 0) { Osal_printf ("Error in ProcMgr_close: status = 0x%x\n", status); } exit_ipc_destroy: status = Ipc_destroy (); if (status < 0) { Osal_printf ("Error in Ipc_destroy: status = 0x%x\n", status); } exit: return (-1); }
/* * ======== ti_sdo_ipc_ListMP_Instance_init ======== */ Int ti_sdo_ipc_ListMP_Instance_init(ti_sdo_ipc_ListMP_Object *obj, const ti_sdo_ipc_ListMP_Params *params, Error_Block *eb) { SharedRegion_SRPtr sharedShmBase; Ptr localAddr; Int status; ListMP_Params sparams; IHeap_Handle regionHeap; if (params->openFlag == TRUE) { /* Open by sharedAddr */ obj->objType = ti_sdo_ipc_Ipc_ObjType_OPENDYNAMIC; obj->attrs = (ti_sdo_ipc_ListMP_Attrs *)params->sharedAddr; obj->regionId = SharedRegion_getId(&(obj->attrs->head)); obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->cacheLineSize = SharedRegion_getCacheLineSize(obj->regionId); /* get the local address of the SRPtr */ localAddr = SharedRegion_getPtr(obj->attrs->gateMPAddr); status = GateMP_openByAddr(localAddr, (GateMP_Handle *)&(obj->gate)); if (status != GateMP_S_SUCCESS) { Error_raise(eb, ti_sdo_ipc_Ipc_E_internal, 0, 0); return (1); } return (0); } /* init the gate */ if (params->gate != NULL) { obj->gate = params->gate; } else { obj->gate = (ti_sdo_ipc_GateMP_Handle)GateMP_getDefaultRemote(); } if (params->sharedAddr == NULL) { /* Creating using a shared region ID */ obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC_REGION; obj->regionId = params->regionId; obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->cacheLineSize = SharedRegion_getCacheLineSize(obj->regionId); /* Need to allocate from the heap */ ListMP_Params_init(&sparams); sparams.regionId = params->regionId; obj->allocSize = ListMP_sharedMemReq(&sparams); regionHeap = SharedRegion_getHeap(obj->regionId); Assert_isTrue(regionHeap != NULL, ti_sdo_ipc_SharedRegion_A_noHeap); /* The region heap will take care of the alignment */ obj->attrs = Memory_alloc(regionHeap, obj->allocSize, 0, eb); if (obj->attrs == NULL) { return (2); } } else { /* Creating using sharedAddr */ obj->regionId = SharedRegion_getId(params->sharedAddr); /* Assert that the buffer is in a valid shared region */ Assert_isTrue(obj->regionId != SharedRegion_INVALIDREGIONID, ti_sdo_ipc_Ipc_A_addrNotInSharedRegion); /* set object's cacheEnabled, objType, and attrs */ obj->cacheEnabled = SharedRegion_isCacheEnabled(obj->regionId); obj->cacheLineSize = SharedRegion_getCacheLineSize(obj->regionId); obj->objType = ti_sdo_ipc_Ipc_ObjType_CREATEDYNAMIC; obj->attrs = (ti_sdo_ipc_ListMP_Attrs *)params->sharedAddr; /* Assert that sharedAddr is cache aligned */ Assert_isTrue((obj->cacheLineSize == 0) || ((UInt32)params->sharedAddr % obj->cacheLineSize == 0), ti_sdo_ipc_Ipc_A_addrNotCacheAligned); } /* init the head (to be empty) */ ListMP_elemClear(&(obj->attrs->head)); /* store the GateMP sharedAddr in the Attrs */ obj->attrs->gateMPAddr = ti_sdo_ipc_GateMP_getSharedAddr(obj->gate); /* last thing, set the status */ obj->attrs->status = ti_sdo_ipc_ListMP_CREATED; if (obj->cacheEnabled) { Cache_wbInv(obj->attrs, sizeof(ti_sdo_ipc_ListMP_Attrs), Cache_Type_ALL, TRUE); } /* add to NameServer if name not NULL */ if (params->name != NULL) { sharedShmBase = SharedRegion_getSRPtr(obj->attrs, obj->regionId); obj->nsKey = NameServer_addUInt32( (NameServer_Handle)ListMP_module->nameServer, params->name, (UInt32)sharedShmBase); if (obj->nsKey == NULL) { Error_raise(eb, ti_sdo_ipc_Ipc_E_nameFailed, params->name, 0); return (3); } } return (0); }