Beispiel #1
0
Int32 Utils_memFree(Ptr addr, UInt32 size)
{
    UInt32 heapId = UTILS_MEM_VID_FRAME_BUF_HEAP;
    SharedRegion_Entry srEntry;

   SharedRegion_getEntry(SYSTEM_IPC_SR_VIDEO_FRAME, &srEntry);

    /* if address falls in tiler buffer then free from that heap */
    if((UInt32)addr >= (UInt32)srEntry.base && (UInt32)addr < ((UInt32)srEntry.base + srEntry.len) )
    {
        #ifdef UTILS_MEM_DEBUG
        Vps_printf(" UTILS: MEM: FRAME FREE, addr = 0x%08x, size = %d bytes, heap = %d\n", addr, size, heapId);
        #endif

        /* free previously allocated memory */
        Memory_free(gUtils_heapMemHandle[heapId], addr, size);
    }
    else
    {
        Int32 status;

        status = SystemTiler_freeRaw(addr,size);
        UTILS_assert(status == 0);
    }

    return 0;
}
Beispiel #2
0
Int32 Utils_memInit()
{
    SharedRegion_Entry srEntry;
    Int                srStatus = SharedRegion_S_SUCCESS;
    UInt32 srId[UTILS_MEM_NUM_SHARED_REGION_HEAP], i;

    srId[UTILS_MEM_VID_FRAME_BUF_HEAP] = SYSTEM_IPC_SR_VIDEO_FRAME;
    srId[UTILS_MEM_VID_BITS_BUF_HEAP]  = SYSTEM_IPC_SR_CACHED;


    for (i=0; i<UTILS_MEM_NUM_SHARED_REGION_HEAP; i++)
    {
        SharedRegion_entryInit(&srEntry);
        SharedRegion_getEntry(srId[i], &srEntry);
        Vps_printf (" %d: MEM: Shared Region %d: Base = 0x%08x, Length = 0x%08x (%d MB) \n",
                    Utils_getCurTimeInMsec(), srId[i],srEntry.base,srEntry.len, srEntry.len/(1024*1024));
        if ((FALSE == srEntry.isValid)
            &&
            (0 != srEntry.len))
        {
            srEntry.isValid     = TRUE;
            do {
                srStatus = SharedRegion_setEntry(srId[i], &srEntry);

                if (srStatus != SharedRegion_S_SUCCESS) {
                    Vps_printf(" %d: MEM: ERROR: SharedRegion_setEntry (%d, 0x%08x) FAILED !!! "
                               " (status=%d) \n", Utils_getCurTimeInMsec(), srId[i], &srEntry, srStatus);
                    Task_sleep(10);
                }
            } while (srStatus != SharedRegion_S_SUCCESS);
        }
        if (srEntry.len)
        {
            gUtils_heapMemHandle[i] = SharedRegion_getHeap(srId[i]);
            UTILS_assert(gUtils_heapMemHandle[i] != NULL);
            gUtils_memClearBuf[i] = FALSE;
        }
    }
    return 0;
}
Beispiel #3
0
/*
 *  ======== Ipc_attach ========
 */
Int Ipc_attach(UInt16 remoteProcId)
{
    Int i;
    Ptr sharedAddr;
    SizeT memReq;
    volatile ti_sdo_ipc_Ipc_Reserved *slave;
    ti_sdo_ipc_Ipc_ProcEntry *ipc;
    Error_Block eb;
    SharedRegion_Entry entry;
    SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc();
    Bool cacheEnabled = SharedRegion_isCacheEnabled(0);
    UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId);
    Int status;
    UInt hwiKey;

    /* Assert remoteProcId is in our cluster and isn't our own */
    Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster,
                  ti_sdo_utils_MultiProc_A_invalidMultiProcId);
    Assert_isTrue(remoteProcId != MultiProc_self(),
                  ti_sdo_ipc_Ipc_A_invArgument);

    /* Check whether Ipc_start has been called.  If not, fail. */
    if (Ipc_module->ipcSharedAddr == NULL) {
        return (Ipc_E_FAIL);
    }

    /* for checking and incrementing attached below */
    hwiKey = Hwi_disable();

    /* Make sure its not already attached */
    if (Ipc_module->procEntry[clusterId].attached) {
        Ipc_module->procEntry[clusterId].attached++;
        /* restore interrupts and return */
        Hwi_restore(hwiKey);
        return (Ipc_S_ALREADYSETUP);
    }

    /* restore interrupts */
    Hwi_restore(hwiKey);

    /* get region 0 information */
    SharedRegion_getEntry(0, &entry);

    /* Make sure we've attached to owner of SR0 if we're not owner */
    if ((MultiProc_self() != entry.ownerProcId) &&
        (remoteProcId != entry.ownerProcId) &&
        !(Ipc_module->procEntry[ti_sdo_utils_MultiProc_getClusterId(
            entry.ownerProcId)].attached)) {
        return (Ipc_E_FAIL);
    }

    /* Init error block */
    Error_init(&eb);

    /* determine the slave's slot */
    slave = Ipc_getSlaveAddr(remoteProcId, Ipc_module->ipcSharedAddr);

    if (cacheEnabled) {
        Cache_inv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE);
    }

    /* Synchronize the processors. */
    status = Ipc_procSyncStart(remoteProcId, Ipc_module->ipcSharedAddr);
    if (status < 0) {
        return (status);
    }

    /* must be called before SharedRegion_attach */
    status = ti_sdo_ipc_GateMP_attach(remoteProcId,
            Ipc_module->gateMPSharedAddr);
    if (status < 0) {
        return (status);
    }

    /* retrieves the SharedRegion Heap handles */
    status = ti_sdo_ipc_SharedRegion_attach(remoteProcId);
    if (status < 0) {
        return (status);
    }

    /* get the attach parameters associated with remoteProcId */
    ipc = &(Ipc_module->procEntry[clusterId]);

    /* attach Notify if not yet attached and specified to set internal setup */
    if (!(Notify_intLineRegistered(remoteProcId, 0)) &&
        (ipc->entry.setupNotify)) {
        /* call Notify_attach */
        memReq = Notify_sharedMemReq(remoteProcId, Ipc_module->ipcSharedAddr);
        if (memReq != 0) {
            if (MultiProc_self() < remoteProcId) {
                /*
                 *  calloc required here due to race condition.  Its possible
                 *  that the slave, who creates the instance, tries a sendEvent
                 *  before the master has created its instance because the
                 *  state of memory was enabled from a previous run.
                 */
                sharedAddr = Memory_calloc(SharedRegion_getHeap(0),
                                       memReq,
                                       SharedRegion_getCacheLineSize(0),
                                       &eb);

                /* make sure alloc did not fail */
                if (sharedAddr == NULL) {
                    return (Ipc_E_MEMORY);
                }

                /* if cache enabled, wbInv the calloc above */
                if (cacheEnabled) {
                    Cache_wbInv(sharedAddr, memReq, Cache_Type_ALL, TRUE);
                }

                /* set the notify SRPtr */
                slave->notifySRPtr = SharedRegion_getSRPtr(sharedAddr, 0);
            }
            else {
                /* get the notify SRPtr */
                sharedAddr = SharedRegion_getPtr(slave->notifySRPtr);
            }
        }
        else {
            sharedAddr = NULL;
            slave->notifySRPtr = 0;
        }

        /* call attach to remote processor */
        status = Notify_attach(remoteProcId, sharedAddr);

        if (status < 0) {
            if (MultiProc_self() < remoteProcId && sharedAddr != NULL) {
                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq);
            }

            return (Ipc_E_FAIL);
        }
    }

    /* Must come after GateMP_start because depends on default GateMP */
    if (!(ti_sdo_utils_NameServer_isRegistered(remoteProcId)) &&
        (ipc->entry.setupNotify)) {
        memReq = ti_sdo_utils_NameServer_SetupProxy_sharedMemReq(
            Ipc_module->ipcSharedAddr);
        if (memReq != 0) {
            if (MultiProc_self() < remoteProcId) {
                sharedAddr = Memory_alloc(SharedRegion_getHeap(0),
                                     memReq,
                                     SharedRegion_getCacheLineSize(0),
                                     &eb);

                /* make sure alloc did not fail */
                if (sharedAddr == NULL) {
                    return (Ipc_E_MEMORY);
                }

                /* set the NSRN SRPtr */
                slave->nsrnSRPtr = SharedRegion_getSRPtr(sharedAddr, 0);
            }
            else {
                /* get the NSRN SRPtr */
                sharedAddr = SharedRegion_getPtr(slave->nsrnSRPtr);
            }
        }
        else {
            sharedAddr = NULL;
            slave->nsrnSRPtr = 0;
        }

        /* call attach to remote processor */
        status = ti_sdo_utils_NameServer_SetupProxy_attach(remoteProcId,
                                                           sharedAddr);

        if (status < 0) {
            if (MultiProc_self() < remoteProcId && sharedAddr != NULL) {
                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq);
            }

            return (Ipc_E_FAIL);
        }
    }

    /* Must come after GateMP_start because depends on default GateMP */
    if (!(ti_sdo_ipc_MessageQ_SetupTransportProxy_isRegistered(remoteProcId)) &&
        (ipc->entry.setupMessageQ)) {
        memReq = ti_sdo_ipc_MessageQ_SetupTransportProxy_sharedMemReq(
            Ipc_module->ipcSharedAddr);

        if (memReq != 0) {
            if (MultiProc_self() < remoteProcId) {
                sharedAddr = Memory_alloc(SharedRegion_getHeap(0),
                    memReq, SharedRegion_getCacheLineSize(0), &eb);

                /* make sure alloc did not fail */
                if (sharedAddr == NULL) {
                    return (Ipc_E_MEMORY);
                }

                /* set the transport SRPtr */
                slave->transportSRPtr = SharedRegion_getSRPtr(sharedAddr, 0);
            }
            else {
                /* get the transport SRPtr */
                sharedAddr = SharedRegion_getPtr(slave->transportSRPtr);
            }
        }
        else {
            sharedAddr = NULL;
            slave->transportSRPtr = 0;
        }

        /* call attach to remote processor */
        status = ti_sdo_ipc_MessageQ_SetupTransportProxy_attach(remoteProcId,
            sharedAddr);

        if (status < 0) {
            if (MultiProc_self() < remoteProcId && sharedAddr != NULL) {
                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0), sharedAddr, memReq);
            }

            return (Ipc_E_FAIL);
        }
    }

    /* writeback invalidate slave's shared memory if cache enabled */
    if (cacheEnabled) {
        if (MultiProc_self() < remoteProcId) {
            Cache_wbInv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE);
        }
    }

    /* Call user attach fxns */
    for (i = 0; i < ti_sdo_ipc_Ipc_numUserFxns; i++) {
        if (ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach) {
            status = ti_sdo_ipc_Ipc_userFxns[i].userFxn.attach(
                ti_sdo_ipc_Ipc_userFxns[i].arg, remoteProcId);

            if (status < 0) {
                return (status);
            }
        }
    }

    /* Finish the processor synchronization */
    status = ti_sdo_ipc_Ipc_procSyncFinish(remoteProcId,
        Ipc_module->ipcSharedAddr);

    if (status < 0) {
        return (status);
    }

    /* for atomically incrementing attached */
    hwiKey = Hwi_disable();

    /* now attached to remote processor */
    Ipc_module->procEntry[clusterId].attached++;

    /* restore interrupts */
    Hwi_restore(hwiKey);

    return (status);
}
Beispiel #4
0
/*
 *  ======== Ipc_start ========
 */
Int Ipc_start()
{
    Int i;
    UInt16 baseId = MultiProc_getBaseIdOfCluster();
    SharedRegion_Entry entry;
    Ptr ipcSharedAddr;
    Ptr gateMPSharedAddr;
    GateMP_Params gateMPParams;
    Int status;

    /* Check whether Ipc_start has been called.  If so, succeed. */
    if (Ipc_module->ipcSharedAddr != NULL) {
        return (Ipc_S_ALREADYSETUP);
    }

    if (ti_sdo_ipc_Ipc_generateSlaveDataForHost) {
        /* get Ipc_sr0MemorySetup out of the cache */
        Cache_inv(&Ipc_sr0MemorySetup,
              sizeof(Ipc_sr0MemorySetup),
              Cache_Type_ALL,
              TRUE);

        /* check Ipc_sr0MemorySetup variable */
        if (Ipc_sr0MemorySetup == 0x0) {
            return (Ipc_E_NOTREADY);
        }
    }

    /* get region 0 information */
    SharedRegion_getEntry(0, &entry);

    /* if entry is not valid then return */
    if (entry.isValid == FALSE) {
        return (Ipc_E_NOTREADY);
    }

    /*
     *  Need to reserve memory in region 0 for processor synchronization.
     *  This must done before SharedRegion_start().
     */
    ipcSharedAddr = ti_sdo_ipc_SharedRegion_reserveMemory(
            0, Ipc_getRegion0ReservedSize());

    /* must reserve memory for GateMP before SharedRegion_start() */
    gateMPSharedAddr = ti_sdo_ipc_SharedRegion_reserveMemory(0,
            ti_sdo_ipc_GateMP_getRegion0ReservedSize());

    /* Init params for default gate (must match those in GateMP_start()) */
    GateMP_Params_init(&gateMPParams);
    gateMPParams.localProtect  = GateMP_LocalProtect_TASKLET;

    if (ti_sdo_utils_MultiProc_numProcessors > 1) {
        gateMPParams.remoteProtect = GateMP_RemoteProtect_SYSTEM;
    }
    else {
        gateMPParams.remoteProtect = GateMP_RemoteProtect_NONE;
    }

    /* reserve memory for default gate before SharedRegion_start() */
    ti_sdo_ipc_SharedRegion_reserveMemory(0, GateMP_sharedMemReq(&gateMPParams));

    /* clear the reserved memory */
    ti_sdo_ipc_SharedRegion_clearReservedMemory();

    /* Set shared addresses */
    Ipc_module->ipcSharedAddr = ipcSharedAddr;
    Ipc_module->gateMPSharedAddr = gateMPSharedAddr;

    /* create default GateMP, must be called before SharedRegion start */
    status = ti_sdo_ipc_GateMP_start(Ipc_module->gateMPSharedAddr);
    if (status < 0) {
        return (status);
    }

    /* create HeapMemMP in each SharedRegion */
    status = ti_sdo_ipc_SharedRegion_start();
    if (status < 0) {
        return (status);
    }

    /* Call attach for all procs if procSync is ALL */
    if (ti_sdo_ipc_Ipc_procSync == ti_sdo_ipc_Ipc_ProcSync_ALL) {
        /* Must attach to owner first to get default GateMP and HeapMemMP */
        if (MultiProc_self() != entry.ownerProcId) {
            do {
                status = Ipc_attach(entry.ownerProcId);
            } while (status == Ipc_E_NOTREADY);

            if (status < 0) {
                /* Ipc_attach failed. Get out of Ipc_start */
                return (status);
            }
        }

        /* Loop to attach to all other processors in cluster */
        for (i = 0; i < ti_sdo_utils_MultiProc_numProcsInCluster; i++, baseId++) {
            if ((baseId == MultiProc_self()) || (baseId == entry.ownerProcId)) {
                continue;
            }

            /* Skip the processor if there are no interrupt lines to it */
            if (Notify_numIntLines(baseId) == 0) {
                continue;
            }

            /* call Ipc_attach for every remote processor */
            do {
                status = Ipc_attach(baseId);
            } while (status == Ipc_E_NOTREADY);

            if (status < 0) {
                /* Ipc_attach failed. Get out of Ipc_start */
                return (status);
            }
        }
    }

    return (status);
}
Beispiel #5
0
/*
 *  ======== Ipc_detach ========
 */
Int Ipc_detach(UInt16 remoteProcId)
{
    Int i;
    UInt16 baseId = MultiProc_getBaseIdOfCluster();
    UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId);
    Ptr notifySharedAddr;
    Ptr nsrnSharedAddr;
    Ptr msgqSharedAddr;
    volatile ti_sdo_ipc_Ipc_Reserved *slave, *master;
    SharedRegion_Entry entry;
    ti_sdo_ipc_Ipc_ProcEntry *ipc;
    SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc();
    Bool cacheEnabled = SharedRegion_isCacheEnabled(0);
    Int status = Ipc_S_SUCCESS;
    UInt hwiKey;

    /* Assert remoteProcId is in our cluster and isn't our own */
    Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster,
                  ti_sdo_utils_MultiProc_A_invalidMultiProcId);
    Assert_isTrue(remoteProcId != MultiProc_self(),
                  ti_sdo_ipc_Ipc_A_invArgument);

    /* for checking and incrementing attached below */
    hwiKey = Hwi_disable();

    if (Ipc_module->procEntry[clusterId].attached > 1) {
        /* only detach if attach count reaches 1 */
        Ipc_module->procEntry[clusterId].attached--;
        Hwi_restore(hwiKey);
        return (Ipc_S_BUSY);
    }
    else if (Ipc_module->procEntry[clusterId].attached == 0) {
        /* already detached, restore interrupts and return success */
        Hwi_restore(hwiKey);
        return (Ipc_S_SUCCESS);
    }

    /* restore interrupts */
    Hwi_restore(hwiKey);

    /* get region 0 information */
    SharedRegion_getEntry(0, &entry);

    /*
     *  Make sure we detach from all other procs in cluster before
     *  detaching from owner of SR 0.
     */
    if (remoteProcId == entry.ownerProcId) {
        for (i = 0; i < ti_sdo_utils_MultiProc_numProcsInCluster; i++, baseId++) {
            if ((baseId != MultiProc_self()) && (baseId != entry.ownerProcId) &&
                (Ipc_module->procEntry[i].attached)) {
                return (Ipc_E_FAIL);
            }
        }
    }

    /* get the paramters associated with remoteProcId */
    ipc = &(Ipc_module->procEntry[clusterId]);

    /* determine the slave's slot */
    slave = Ipc_getSlaveAddr(remoteProcId, Ipc_module->ipcSharedAddr);

    /* determine the master's slot */
    master = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId,
        Ipc_module->ipcSharedAddr);

    if (cacheEnabled) {
        Cache_inv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE);
        Cache_inv((Ptr)master, reservedSize, Cache_Type_ALL, TRUE);
    }

    if (MultiProc_self() < remoteProcId) {
        /* check to make sure master is not trying to attach */
        if (master->startedKey == ti_sdo_ipc_Ipc_PROCSYNCSTART) {
            return (Ipc_E_NOTREADY);
        }
    }
    else {
        /* check to make sure slave is not trying to attach */
        if (slave->startedKey == ti_sdo_ipc_Ipc_PROCSYNCSTART) {
            return (Ipc_E_NOTREADY);
        }
    }

    /* The slave processor waits for master to finish its detach sequence */
    if (MultiProc_self() < remoteProcId) {
        if (master->startedKey != ti_sdo_ipc_Ipc_PROCSYNCDETACH) {
            return (Ipc_E_NOTREADY);
        }
    }

    /* Call user detach fxns */
    for (i = 0; i < ti_sdo_ipc_Ipc_numUserFxns; i++) {
        if (ti_sdo_ipc_Ipc_userFxns[i].userFxn.detach) {
            status = ti_sdo_ipc_Ipc_userFxns[i].userFxn.detach(
                ti_sdo_ipc_Ipc_userFxns[i].arg, remoteProcId);

            if (status < 0) {
                return (status);
            }
        }
    }

    if ((ipc->entry.setupMessageQ) &&
       (ti_sdo_ipc_MessageQ_SetupTransportProxy_isRegistered(remoteProcId))) {
        /* call MessageQ_detach for remote processor */
        status = ti_sdo_ipc_MessageQ_SetupTransportProxy_detach(remoteProcId);
        if (status < 0) {
            return (Ipc_E_FAIL);
        }

        if (slave->transportSRPtr) {
            /* free the memory if slave processor */
            if (MultiProc_self() < remoteProcId) {
                /* get the pointer to MessageQ transport instance */
                msgqSharedAddr = SharedRegion_getPtr(slave->transportSRPtr);

                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0),
                    msgqSharedAddr,
                    ti_sdo_ipc_MessageQ_SetupTransportProxy_sharedMemReq(
                        msgqSharedAddr));

                /* set pointer for MessageQ transport instance back to NULL */
                slave->transportSRPtr = NULL;
            }
        }
    }

    if ((ipc->entry.setupNotify) &&
        (ti_sdo_utils_NameServer_isRegistered(remoteProcId))) {
        /* call NameServer_SetupProxy_detach for remote processor */
        status = ti_sdo_utils_NameServer_SetupProxy_detach(remoteProcId);
        if (status < 0) {
            return (Ipc_E_FAIL);
        }

        if (slave->nsrnSRPtr) {
            /* free the memory if slave processor */
            if (MultiProc_self() < remoteProcId) {
                /* get the pointer to NSRN instance */
                nsrnSharedAddr = SharedRegion_getPtr(slave->nsrnSRPtr);

                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0),
                            nsrnSharedAddr,
                            ti_sdo_utils_NameServer_SetupProxy_sharedMemReq(
                                nsrnSharedAddr));

                /* set pointer for NSRN instance back to NULL */
                slave->nsrnSRPtr = NULL;
            }
        }
    }

    if ((ipc->entry.setupNotify) &&
        (Notify_intLineRegistered(remoteProcId, 0))) {
        /* call Notify_detach for remote processor */
        status = ti_sdo_ipc_Notify_detach(remoteProcId);
        if (status < 0) {
            return (Ipc_E_FAIL);
        }

        if (slave->notifySRPtr) {
            /* free the memory if slave processor */
            if (MultiProc_self() < remoteProcId) {
                /* get the pointer to Notify instance */
                notifySharedAddr = SharedRegion_getPtr(slave->notifySRPtr);

                /* free the memory back to SharedRegion 0 heap */
                Memory_free(SharedRegion_getHeap(0),
                            notifySharedAddr,
                            Notify_sharedMemReq(remoteProcId, notifySharedAddr));

                /* set pointer for Notify instance back to NULL */
                slave->notifySRPtr = NULL;
            }
        }
    }

    /* close any HeapMemMP which may have been opened */
    status = ti_sdo_ipc_SharedRegion_detach(remoteProcId);
    if (status < 0) {
        return (status);
    }
    
    /* close any GateMP which may have been opened */
    status = ti_sdo_ipc_GateMP_detach(remoteProcId);
    if (status < 0) {
        return (status);
    }

    if (MultiProc_self() < remoteProcId) {
        slave->configListHead = ti_sdo_ipc_SharedRegion_INVALIDSRPTR;
        slave->startedKey = ti_sdo_ipc_Ipc_PROCSYNCDETACH;
        if (cacheEnabled) {
            Cache_wbInv((Ptr)slave, reservedSize, Cache_Type_ALL, TRUE);
        }
    }
    else {
        master->configListHead = ti_sdo_ipc_SharedRegion_INVALIDSRPTR;
        master->startedKey = ti_sdo_ipc_Ipc_PROCSYNCDETACH;
        if (cacheEnabled) {
            Cache_wbInv((Ptr)master, reservedSize, Cache_Type_ALL, TRUE);
        }
    }

    /* attached must be decremented atomically */
    hwiKey = Hwi_disable();

    /* now detached from remote processor */
    Ipc_module->procEntry[clusterId].attached--;

    /* restore interrupts */
    Hwi_restore(hwiKey);

    return (status);
}
/*
 *  ======== ipcSetup ========
 */
Int ipcSetup (Int testCase)
{
    Int                             status          = 0;
    Char *                          procName;
    UInt16                          procId;
    ProcMgr_AttachParams            attachParams;
    ProcMgr_State                   state;
#if !defined(SYSLINK_USE_DAEMON)
    UInt32                          entryPoint      = 0;
    ProcMgr_StartParams             startParams;
    Char                            uProcId;
    HeapBufMP_Params                heapbufmpParams;
#if defined(SYSLINK_USE_LOADER)
    Char                          * imageName;
    UInt32                          fileId;
#endif
#endif
    Ipc_Config                      config;
    Int                             i;
    UInt32                          srCount;
    SharedRegion_Entry              srEntry;

    Osal_printf ("ipcSetup: Setup IPC componnets \n");

    switch(testCase) {
    case 0:
        Osal_printf ("ipcSetup: Local RCM test\n");
        remoteServerName = RCMSERVER_NAME;
        procName = MPU_PROC_NAME;
        break;
    case 1:
        Osal_printf ("ipcSetup: RCM test with RCM client and server on "
                     "Sys M3\n\n");
        remoteServerName = SYSM3_SERVER_NAME;
        procName = SYSM3_PROC_NAME;
        break;
    case 2:
        Osal_printf ("ipcSetup: RCM test with RCM client and server on "
                     "App M3\n\n");
        remoteServerName = APPM3_SERVER_NAME;
        procName = APPM3_PROC_NAME;
        break;
    case 3:
        Osal_printf ("ipcSetup: RCM test with RCM client and server on "
                     "Tesla\n\n");
        remoteServerName = DSP_SERVER_NAME;
        procName = DSP_PROC_NAME;
        break;
    default:
        Osal_printf ("ipcSetup: Please pass valid arg "
                     "(0-local, 1-Sys M3, 2-App M3, 3-Tesla) \n");
        goto exit;
        break;
    }

    Ipc_getConfig (&config);
    status = Ipc_setup (&config);
    if (status < 0) {
        Osal_printf ("ipcSetup: Error in Ipc_setup [0x%x]\n", status);
        goto exit;
    }
    Osal_printf("Ipc_setup status [0x%x]\n", status);

    procId = ((testCase == 3) ? MultiProc_getId (DSP_PROC_NAME) : \
                                MultiProc_getId (SYSM3_PROC_NAME));
    remoteIdClient = MultiProc_getId (procName);

    /* Open a handle to the ProcMgr instance. */
    status = ProcMgr_open (&procMgrHandleClient, procId);
    if (status < 0) {
        Osal_printf ("ipcSetup: Error in ProcMgr_open [0x%x]\n", status);
        goto exit;
    }
    if (status >= 0) {
        Osal_printf ("ipcSetup: ProcMgr_open Status [0x%x]\n", status);
        ProcMgr_getAttachParams (NULL, &attachParams);
        /* Default params will be used if NULL is passed. */
        status = ProcMgr_attach (procMgrHandleClient, &attachParams);
        if (status < 0) {
            Osal_printf ("ipcSetup: ProcMgr_attach failed [0x%x]\n", status);
        }
        else {
            Osal_printf ("ipcSetup: ProcMgr_attach status: [0x%x]\n", status);
            state = ProcMgr_getState (procMgrHandleClient);
            Osal_printf ("ipcSetup: After attach: ProcMgr_getState\n"
                         "    state [0x%x]\n", state);
        }
    }

    if ((status >= 0) && (testCase == 2)) {
        status = ProcMgr_open (&procMgrHandleClient1, remoteIdClient);
        if (status < 0) {
            Osal_printf ("ipcSetup: Error in ProcMgr_open [0x%x]\n", status);
            goto exit;
        }
        if (status >= 0) {
            Osal_printf ("ipcSetup: ProcMgr_open Status [0x%x]\n", status);
            ProcMgr_getAttachParams (NULL, &attachParams);
            /* Default params will be used if NULL is passed. */
            status = ProcMgr_attach (procMgrHandleClient1, &attachParams);
            if (status < 0) {
                Osal_printf ("ipcSetup: ProcMgr_attach failed [0x%x]\n",
                                status);
            }
            else {
                Osal_printf ("ipcSetup: ProcMgr_attach status: [0x%x]\n",
                                status);
                state = ProcMgr_getState (procMgrHandleClient1);
                Osal_printf ("ipcSetup: After attach: ProcMgr_getState\n"
                             "    state [0x%x]\n", state);
            }
        }
    }

#if !defined(SYSLINK_USE_DAEMON) /* Daemon sets this up */
#if defined(SYSLINK_USE_LOADER)
    if (testCase == 1)
        imageName = RCM_MPUCLIENT_SYSM3ONLY_IMAGE;
    else if (testCase == 2)
        imageName = RCM_MPUCLIENT_SYSM3_IMAGE;
    else if (testCase == 3)
        imageName = RCM_MPUCLIENT_DSP_IMAGE;

    if (testCase != 0) {
        status = ProcMgr_load (procMgrHandleClient, imageName, 2, &imageName,
                                &entryPoint, &fileId, procId);
        if (status < 0) {
            Osal_printf ("ipcSetup: Error in ProcMgr_load %s image [0x%x]\n",
                            procName, status);
            goto exit;
        }
        Osal_printf ("ipcSetup: ProcMgr_load %s image Status [0x%x]\n",
                        procName, status);
    }
#endif /* defined(SYSLINK_USE_LOADER) */
    if (testCase != 0) {
        startParams.proc_id = procId;
        status = ProcMgr_start (procMgrHandleClient, entryPoint, &startParams);
        if (status < 0) {
            Osal_printf ("ipcSetup: Error in ProcMgr_start %s [0x%x]\n",
                            procName, status);
            goto exit;
        }
        Osal_printf ("ipcSetup: ProcMgr_start %s Status [0x%x]\n", procName,
                        status);
    }

    if (testCase == 2) {
#if defined(SYSLINK_USE_LOADER)
        imageName = RCM_MPUCLIENT_APPM3_IMAGE;
        uProcId = MultiProc_getId (APPM3_PROC_NAME);
        status = ProcMgr_load (procMgrHandleClient1, imageName, 2, &imageName,
                                &entryPoint, &fileId, uProcId);
        if (status < 0) {
            Osal_printf ("ipcSetup: Error in ProcMgr_load AppM3 image: "
                "[0x%x]\n", status);
            goto exit;
        }
        Osal_printf ("ipcSetup: AppM3: ProcMgr_load Status [0x%x]\n", status);
#endif /* defined(SYSLINK_USE_LOADER) */
        startParams.proc_id = MultiProc_getId (APPM3_PROC_NAME);
        status = ProcMgr_start (procMgrHandleClient1, entryPoint, &startParams);
        if (status < 0) {
            Osal_printf ("ipcSetup: Error in ProcMgr_start AppM3 [0x%x]\n",
                        status);
            goto exit;
        }
        Osal_printf ("ipcSetup: ProcMgr_start AppM3 Status [0x%x]\n", status);
    }
#endif /* defined(SYSLINK_USE_DAEMON) */

    srCount = SharedRegion_getNumRegions();
    Osal_printf ("SharedRegion_getNumRegions = %d\n", srCount);
    for (i = 0; i < srCount; i++) {
        status = SharedRegion_getEntry (i, &srEntry);
        Osal_printf ("SharedRegion_entry #%d: base = 0x%x len = 0x%x "
                        "ownerProcId = %d isValid = %d cacheEnable = %d "
                        "cacheLineSize = 0x%x createHeap = %d name = %s\n",
                        i, srEntry.base, srEntry.len, srEntry.ownerProcId,
                        (Int)srEntry.isValid, (Int)srEntry.cacheEnable,
                        srEntry.cacheLineSize, (Int)srEntry.createHeap,
                        srEntry.name);
    }

#if !defined(SYSLINK_USE_DAEMON) /* Daemon sets this up */
    /* Create Heap and register it with MessageQ */
    if (status >= 0) {
        HeapBufMP_Params_init (&heapbufmpParams);
        heapbufmpParams.sharedAddr = NULL;
        heapbufmpParams.align      = 128;
        heapbufmpParams.numBlocks  = 4;
        heapbufmpParams.blockSize  = MSGSIZE;
        heapSize = HeapBufMP_sharedMemReq (&heapbufmpParams);
        Osal_printf ("ipcSetup: heapSize = 0x%x\n", heapSize);

        srHeap = SharedRegion_getHeap (RCM_HEAP_SR);
        if (srHeap == NULL) {
            status = MEMORYOS_E_FAIL;
            Osal_printf ("ipcSetup: SharedRegion_getHeap failed for srHeap:"
                         " [0x%x]\n", srHeap);
        }
        else {
            Osal_printf ("ipcSetup: Before Memory_alloc = 0x%x\n", srHeap);
            heapBufPtr = Memory_alloc (srHeap, heapSize, 0);
            if (heapBufPtr == NULL) {
                status = MEMORYOS_E_MEMORY;
                Osal_printf ("ipcSetup: Memory_alloc failed for ptr: [0x%x]\n",
                             heapBufPtr);
            }
            else {
                heapbufmpParams.name           = RCM_MSGQ_HEAPNAME;
                heapbufmpParams.sharedAddr     = heapBufPtr;
                Osal_printf ("ipcSetup: Before HeapBufMP_Create: [0x%x]\n",
                                heapBufPtr);
                heapHandle = HeapBufMP_create (&heapbufmpParams);
                if (heapHandle == NULL) {
                    status = HeapBufMP_E_FAIL;
                    Osal_printf ("ipcSetup: HeapBufMP_create failed for Handle:"
                                 "[0x%x]\n", heapHandle);
                }
                else {
                    /* Register this heap with MessageQ */
                    status = MessageQ_registerHeap (heapHandle,
                                                    RCM_MSGQ_HEAPID);
                    if (status < 0) {
                        Osal_printf ("ipcSetup: MessageQ_registerHeap "
                                     "failed!\n");
                    }
                }
            }
        }
    }
#endif /* defined(SYSLINK_USE_DAEMON) */

exit:
    Osal_printf ("ipcSetup: Leaving ipcSetup()\n");
    return status;
}
Beispiel #7
0
/** 
********************************************************************************
 *  @func     memcfg_module_init_shared_region
 *  @brief  This function performs initialization of shared regions.
 *
 *          This functions gets a virtual address by calling 
 *          DomxCore_mapPhyAddr2UsrVirtual(). Checks if the address already 
 *          available the table. If not it creates an entry and sets the entry
 *          by calling SharedRegion_setEntry.
 *
 *  @param[in ]  None  : None
 * 
 *  @returns MEMCFG_E_FAIL 
 *           MEMCFG_S_SUCCESS
********************************************************************************
*/
static int32_t memcfg_module_init_shared_region (void)
{
  int32_t i, j;
  int32_t nSR_retval;
  SharedRegion_Entry srEntry;
  uint16_t srOwnerProcId;
  uint16_t srIndex;
  int32_t createHeap;
  int32_t srStatus = SharedRegion_S_SUCCESS;
  void *srBaseVirtual = NULL;
  int32_t memcfg_retval = MEMCFG_S_SUCCESS;
  int32_t aSRIndices[MEMCFG_MAXNUMSHAREDREGIONS];
  int16_t sridx_local = 0;
  int16_t k;

  /*--------------------------------------------------------------------------*/
  /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
  /* Following are the initialization steps for each of the shared regions: */
  /* 1. Convert the base address into user space virtual */
  /* 2. Derived to Base params with Actual values */
  /* 3. Get entry for a specific shared region index */
  /* 4. If the shared region is not valid, then set the shared region */
  /* 5. If the shared region is already there, then check if the values are */
  /* correct.  */
  /* */
  /*--------------------------------------------------------------------------*/

  /* This needs to be fixed. SR1 is in cacheable region for M3, but the heap is 
     created by the master A8 which updates the Heap attributes in SR1, thus
     writing to it. If MEMCFGModule_init on M3 runs before A8, then the memory
     corresponding to the Heap attrs gets cached by M3 and thus
     SharedRegion_setEntry keeps on failing despite A8 successfully creating
     the heap. IPC probably needs to communicate heap attrs via SR0? The fix
     below ensures SR1 is created by A8 before M3 checks for it, as M3 waits
     for SR2 creation before SR1 and SR2 is in non-cacheable region. In short, 
     on A8, SRs which does not require cache operations are created first and
     then those which does needs cache operations. On M3, reverse happens. */
  if (DomxTypes_coreCortexA8 == MultiProc_self ())
  {                             /* Master/SR Owner */
    /* On master, create the order such that all the SRs that does not need
       cache operations go first... */
    for (k = 0; k < MEMCFG_sharedRegionConfigTable.nNumSharedRegions; k++)
    {
      if (!MEMCFG_sharedRegionConfigTable.aSRInfo[k].bCacheEnable)
      {                         /* if the SR does NOT need cache operation, put 
                                   that in */
        aSRIndices[sridx_local++] =
          MEMCFG_sharedRegionConfigTable.aSRInfo[k].uIndex;
      }                         /* if */
    }                           /* for */
    /* ... and then all SRs that need cache operation go next */
    for (k = 0; k < MEMCFG_sharedRegionConfigTable.nNumSharedRegions; k++)
    {
      if (MEMCFG_sharedRegionConfigTable.aSRInfo[k].bCacheEnable)
      {                         /* if the SR needs cache operation, put that in 
                                 */
        aSRIndices[sridx_local++] =
          MEMCFG_sharedRegionConfigTable.aSRInfo[k].uIndex;
      }                         /* if */
    }                           /* for */
  }                             /* if (DomxTypes_coreCortexA8...) */
  else
  {                             /* Slaves */
    /* On slaves, create the order such that all the SRs that needs cache
       operations go first... */
    for (k = 0; k < MEMCFG_sharedRegionConfigTable.nNumSharedRegions; k++)
    {
      if (MEMCFG_sharedRegionConfigTable.aSRInfo[k].bCacheEnable)
      {                         /* if the SR is NOT cached, put that in */
        aSRIndices[sridx_local++] =
          MEMCFG_sharedRegionConfigTable.aSRInfo[k].uIndex;
      }                         /* if */
    }                           /* for */
    /* ... and then all SRs does not need cache operation go next */
    for (k = 0; k < MEMCFG_sharedRegionConfigTable.nNumSharedRegions; k++)
    {
      if (!MEMCFG_sharedRegionConfigTable.aSRInfo[k].bCacheEnable)
      {                         /* if the SR is cached, put that in */
        aSRIndices[sridx_local++] =
          MEMCFG_sharedRegionConfigTable.aSRInfo[k].uIndex;
      }                         /* if */
    }                           /* for */
  }                             /* if (DomxTypes_coreCortexA8) else ... */

  for (j = 0; j < MEMCFG_sharedRegionConfigTable.nNumSharedRegions; j++)
  {
    i = aSRIndices[j];
    srBaseVirtual = NULL;
    /*------------------------------------------------------------------------*/
    /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
    /* Convert the physical address to virtual address.  */
    /* */
    /*------------------------------------------------------------------------*/
    srBaseVirtual =
      DomxCore_mapPhyAddr2UsrVirtual (MEMCFG_sharedRegionConfigTable.aSRInfo[i].
                                      uBase,
                                      MEMCFG_sharedRegionConfigTable.aSRInfo[i].
                                      uSize);
    /*------------------------------------------------------------------------*/
    /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
    /* Get the entry for the specific shared region index.  */
    /* */
    /*------------------------------------------------------------------------*/
    nSR_retval =
      SharedRegion_getEntry (MEMCFG_sharedRegionConfigTable.aSRInfo[i].uIndex,
                             &srEntry);
    if (nSR_retval == SharedRegion_S_SUCCESS)
    {
      if (FALSE == srEntry.isValid)
      {
        /*--------------------------------------------------------------------*/
        /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
        /* If the shared region is not included, then set a new entry.  */
        /* */
        /*--------------------------------------------------------------------*/
        SharedRegion_entryInit (&srEntry);
        MEMCFG_module->bSrAddEntryDone[i] = TRUE;
        if (MEMCFG_sharedRegionTypeMaster ==
            MEMCFG_sharedRegionConfigTable.aSRInfo[i].srType)
        {
          srOwnerProcId = MultiProc_self ();
          createHeap = TRUE;
        }
        else
        {
          srOwnerProcId = SharedRegion_DEFAULTOWNERID;
          createHeap = TRUE;
        }
        srEntry.base = srBaseVirtual;
        srIndex = MEMCFG_sharedRegionConfigTable.aSRInfo[i].uIndex;
        srEntry.len = MEMCFG_sharedRegionConfigTable.aSRInfo[i].uSize;
        srEntry.cacheEnable =
          MEMCFG_sharedRegionConfigTable.aSRInfo[i].bCacheEnable;
        srEntry.createHeap = createHeap;
        srEntry.isValid = TRUE;
        srEntry.ownerProcId = srOwnerProcId;

        Log_print2 (Diags_USER1, "Calling SharedRegion_setEntry (%d, 0x%x)",
                    srIndex, (xdc_IArg) & srEntry);
        Log_print3 (Diags_USER1, "base : 0x%x, len: 0x%x, ownerProcId: %d\n",
                    (xdc_IArg) srEntry.base, srEntry.len, srEntry.ownerProcId);

        do
        {
          /*------------------------------------------------------------------*/
          /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
          /* If the shared region is not included, then set a new entry.  */
          /* */
          /*------------------------------------------------------------------*/
          srStatus = SharedRegion_setEntry (srIndex, &srEntry);

          if (srStatus != SharedRegion_S_SUCCESS)
          {
            /*----------------------------------------------------------------*/
            /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
            /* If the shared region is not successful, retry for */
            /* MEMCFG_MAX_NUM_TRIALS */
            /* */
            /*----------------------------------------------------------------*/
            Log_print3 (Diags_USER1,
                        "SharedRegion_setEntry (%d, 0x%x) Failed, Status %d",
                        srIndex, (xdc_IArg) & srEntry, srStatus);
          }
        }
        while (srStatus != SharedRegion_S_SUCCESS);
      }                         /* if (FALSE == srEntry..isValid) { */
      else
      {
        /*--------------------------------------------------------------------*/
        /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */
        /* There is already an entry. Check it has valid information.  */
        /* */
        /*--------------------------------------------------------------------*/
        Assert_isTrue (((srEntry.base == srBaseVirtual) &&
                        (srEntry.len ==
                         MEMCFG_sharedRegionConfigTable.aSRInfo[i].uSize)),
                       Assert_E_assertFailed);
        MEMCFG_module->bSrAddEntryDone[i] = FALSE;
      }
    }                           /* End of IF block: if (nSR_retval ==
                                   SharedRegion_S_SUCCESS) */
    else
    {
      /* if (nSR_retval != SharedRegion_S_SUCCESS) */
      memcfg_retval = MEMCFG_E_FAIL;
      goto EXIT;
    }
  }                             /* End of FOR block: for (i = 0; i <
                                   MEMCFG_sharedRegionConfigTable.nNumSharedRegions; 
                                   i++) */

EXIT:
  return memcfg_retval;
}                               /* memcfg_module_init_shared_region */
/*
 *  ======== ipcSetup ========
 */
static Int ipcSetup (Char * sysM3ImageName, Char * appM3ImageName)
{
    Ipc_Config                      config;
    ProcMgr_StopParams              stopParams;
    ProcMgr_StartParams             startParams;
    UInt32                          entryPoint = 0;
    UInt16                          procId;
    Int                             status = 0;
    ProcMgr_AttachParams            attachParams;
    ProcMgr_State                   state;
    HeapBufMP_Params                heapbufmpParams;
    Int                             i;
    UInt32                          srCount;
    SharedRegion_Entry              srEntry;

    if(appM3ImageName != NULL)
        appM3Client = TRUE;
    else
        appM3Client = FALSE;

    Ipc_getConfig (&config);
    status = Ipc_setup (&config);
    if (status < 0) {
        Osal_printf ("Error in Ipc_setup [0x%x]\n", status);
        goto exit;
    }

    /* Get MultiProc IDs by name. */
    remoteIdSysM3 = MultiProc_getId (SYSM3_PROC_NAME);
    Osal_printf ("MultiProc_getId remoteId: [0x%x]\n", remoteIdSysM3);
    remoteIdAppM3 = MultiProc_getId (APPM3_PROC_NAME);
    Osal_printf ("MultiProc_getId remoteId: [0x%x]\n", remoteIdAppM3);
    procId = remoteIdSysM3;
    Osal_printf ("MultiProc_getId procId: [0x%x]\n", procId);

    /* Temporary fix to account for a timing issue during recovery. */
    usleep(FAULT_RECOVERY_DELAY);

    printf("RCM procId= %d\n", procId);
    /* Open a handle to the ProcMgr instance. */
    status = ProcMgr_open (&procMgrHandleSysM3, procId);
    if (status < 0) {
        Osal_printf ("Error in ProcMgr_open [0x%x]\n", status);
        goto exit_ipc_destroy;
    }
    else {
        Osal_printf ("ProcMgr_open Status [0x%x]\n", status);
        ProcMgr_getAttachParams (NULL, &attachParams);
        /* Default params will be used if NULL is passed. */
        status = ProcMgr_attach (procMgrHandleSysM3, &attachParams);
        if (status < 0) {
            Osal_printf ("ProcMgr_attach failed [0x%x]\n", status);
        }
        else {
            Osal_printf ("ProcMgr_attach status: [0x%x]\n", status);
            state = ProcMgr_getState (procMgrHandleSysM3);
            Osal_printf ("After attach: ProcMgr_getState\n"
                         "    state [0x%x]\n", status);
        }
    }

    if (status >= 0 && appM3Client) {
        procId = remoteIdAppM3;
        Osal_printf ("MultiProc_getId procId: [0x%x]\n", procId);

        /* Open a handle to the ProcMgr instance. */
        status = ProcMgr_open (&procMgrHandleAppM3, procId);
        if (status < 0) {
            Osal_printf ("Error in ProcMgr_open [0x%x]\n", status);
            goto exit_ipc_destroy;
        }
        else {
            Osal_printf ("ProcMgr_open Status [0x%x]\n", status);
            ProcMgr_getAttachParams (NULL, &attachParams);
            /* Default params will be used if NULL is passed. */
            status = ProcMgr_attach (procMgrHandleAppM3, &attachParams);
            if (status < 0) {
                Osal_printf ("ProcMgr_attach failed [0x%x]\n", status);
            }
            else {
                Osal_printf ("ProcMgr_attach status: [0x%x]\n", status);
                state = ProcMgr_getState (procMgrHandleAppM3);
                Osal_printf ("After attach: ProcMgr_getState\n"
                             "    state [0x%x]\n", status);
            }
        }
    }

#if defined(SYSLINK_USE_LOADER)
    Osal_printf ("SysM3 Load: loading the SysM3 image %s\n",
                sysM3ImageName);

    status = ProcMgr_load (procMgrHandleSysM3, sysM3ImageName, 2,
                            &sysM3ImageName, &entryPoint, &fileIdSysM3,
                            remoteIdSysM3);
    if(status < 0) {
        Osal_printf ("Error in ProcMgr_load, status [0x%x]\n", status);
        goto exit_procmgr_close_sysm3;
    }
#endif
    startParams.proc_id = remoteIdSysM3;
    Osal_printf ("Starting ProcMgr for procID = %d\n", startParams.proc_id);
    status  = ProcMgr_start(procMgrHandleSysM3, entryPoint, &startParams);
    if(status < 0) {
        Osal_printf ("Error in ProcMgr_start, status [0x%x]\n", status);
        goto exit_procmgr_close_sysm3;
    }

    if(appM3Client) {
#if defined(SYSLINK_USE_LOADER)
        Osal_printf ("AppM3 Load: loading the AppM3 image %s\n",
                    appM3ImageName);
        status = ProcMgr_load (procMgrHandleAppM3, appM3ImageName, 2,
                              &appM3ImageName, &entryPoint, &fileIdAppM3,
                              remoteIdAppM3);
        if(status < 0) {
            Osal_printf ("Error in ProcMgr_load, status [0x%x]\n", status);
            goto exit_procmgr_stop_sysm3;
        }
#endif
        startParams.proc_id = remoteIdAppM3;
        Osal_printf ("Starting ProcMgr for procID = %d\n", startParams.proc_id);
        status  = ProcMgr_start(procMgrHandleAppM3, entryPoint,
                                &startParams);
        if(status < 0) {
            Osal_printf ("Error in ProcMgr_start, status [0x%x]\n", status);
            goto exit_procmgr_stop_sysm3;
        }
    }

    Osal_printf ("SysM3: Creating Ducati DMM pool of size 0x%x\n",
                DUCATI_DMM_POOL_0_SIZE);
    status = ProcMgr_createDMMPool (DUCATI_DMM_POOL_0_ID,
                                    DUCATI_DMM_POOL_0_START,
                                    DUCATI_DMM_POOL_0_SIZE,
                                    remoteIdSysM3);
    if(status < 0) {
        Osal_printf ("Error in ProcMgr_createDMMPool, status [0x%x]\n", status);
        goto exit_procmgr_stop_sysm3;
    }

    srCount = SharedRegion_getNumRegions();
    Osal_printf ("SharedRegion_getNumRegions = %d\n", srCount);
    for (i = 0; i < srCount; i++) {
        status = SharedRegion_getEntry (i, &srEntry);
        Osal_printf ("SharedRegion_entry #%d: base = 0x%x len = 0x%x "
                        "ownerProcId = %d isValid = %d cacheEnable = %d "
                        "cacheLineSize = 0x%x createHeap = %d name = %s\n",
                        i, srEntry.base, srEntry.len, srEntry.ownerProcId,
                        (Int)srEntry.isValid, (Int)srEntry.cacheEnable,
                        srEntry.cacheLineSize, (Int)srEntry.createHeap,
                        srEntry.name);
    }

    /* Create the heap to be used by RCM and register it with MessageQ */
    /* TODO: Do this dynamically by reading from the IPC config from the
     *       baseimage using Ipc_readConfig() */
    if (status >= 0) {
        HeapBufMP_Params_init (&heapbufmpParams);
        heapbufmpParams.sharedAddr = NULL;
        heapbufmpParams.align      = RCM_MSGQ_TILER_HEAP_ALIGN;
        heapbufmpParams.numBlocks  = RCM_MSGQ_TILER_HEAP_BLOCKS;
        heapbufmpParams.blockSize  = RCM_MSGQ_TILER_MSGSIZE;
        heapSize = HeapBufMP_sharedMemReq (&heapbufmpParams);
        Osal_printf ("heapSize = 0x%x\n", heapSize);

        srHeap = SharedRegion_getHeap (RCM_MSGQ_HEAP_SR);
        if (srHeap == NULL) {
            status = MEMORYOS_E_FAIL;
            Osal_printf ("SharedRegion_getHeap failed for srHeap:"
                         " [0x%x]\n", srHeap);
            goto exit_procmgr_stop_sysm3;
        }
        else {
            Osal_printf ("Before Memory_alloc = 0x%x\n", srHeap);
            heapBufPtr = Memory_alloc (srHeap, heapSize, 0);
            if (heapBufPtr == NULL) {
                status = MEMORYOS_E_MEMORY;
                Osal_printf ("Memory_alloc failed for ptr: [0x%x]\n",
                             heapBufPtr);
                goto exit_procmgr_stop_sysm3;
            }
            else {
                heapbufmpParams.name           = RCM_MSGQ_TILER_HEAPNAME;
                heapbufmpParams.sharedAddr     = heapBufPtr;
                Osal_printf ("Before HeapBufMP_Create: [0x%x]\n", heapBufPtr);
                heapHandle = HeapBufMP_create (&heapbufmpParams);
                if (heapHandle == NULL) {
                    status = HeapBufMP_E_FAIL;
                    Osal_printf ("HeapBufMP_create failed for Handle:"
                                 "[0x%x]\n", heapHandle);
                    goto exit_procmgr_stop_sysm3;
                }
                else {
                    /* Register this heap with MessageQ */
                    status = MessageQ_registerHeap (heapHandle,
                                                    RCM_MSGQ_TILER_HEAPID);
                    if (status < 0) {
                        Osal_printf ("MessageQ_registerHeap failed!\n");
                        goto exit_procmgr_stop_sysm3;
                    }
                }
            }
        }
    }

    if (status >= 0) {
        HeapBufMP_Params_init (&heapbufmpParams);
        heapbufmpParams.sharedAddr = NULL;
        heapbufmpParams.align      = RCM_MSGQ_DOMX_HEAP_ALIGN;
        heapbufmpParams.numBlocks  = RCM_MSGQ_DOMX_HEAP_BLOCKS;
        heapbufmpParams.blockSize  = RCM_MSGQ_DOMX_MSGSIZE;
        heapSize1 = HeapBufMP_sharedMemReq (&heapbufmpParams);
        Osal_printf ("heapSize1 = 0x%x\n", heapSize1);

        heapBufPtr1 = Memory_alloc (srHeap, heapSize1, 0);
        if (heapBufPtr1 == NULL) {
            status = MEMORYOS_E_MEMORY;
            Osal_printf ("Memory_alloc failed for ptr: [0x%x]\n",
                         heapBufPtr1);
            goto exit_procmgr_stop_sysm3;
        }
        else {
            heapbufmpParams.name           = RCM_MSGQ_DOMX_HEAPNAME;
            heapbufmpParams.sharedAddr     = heapBufPtr1;
            Osal_printf ("Before HeapBufMP_Create: [0x%x]\n", heapBufPtr1);
            heapHandle1 = HeapBufMP_create (&heapbufmpParams);
            if (heapHandle1 == NULL) {
                status = HeapBufMP_E_FAIL;
                Osal_printf ("HeapBufMP_create failed for Handle:"
                             "[0x%x]\n", heapHandle1);
                goto exit_procmgr_stop_sysm3;
            }
            else {
                /* Register this heap with MessageQ */
                status = MessageQ_registerHeap (heapHandle1,
                                                RCM_MSGQ_DOMX_HEAPID);
                if (status < 0) {
                    Osal_printf ("MessageQ_registerHeap failed!\n");
                    goto exit_procmgr_stop_sysm3;
                }
            }
        }
    }

    Osal_printf ("=== SysLink-IPC setup completed successfully!===\n");
    return 0;

exit_procmgr_stop_sysm3:
    stopParams.proc_id = remoteIdSysM3;
    status = ProcMgr_stop (procMgrHandleSysM3, &stopParams);
    if (status < 0) {
        Osal_printf ("Error in ProcMgr_stop(%d): status = 0x%x\n",
            stopParams.proc_id, status);
    }

exit_procmgr_close_sysm3:
    status = ProcMgr_close (&procMgrHandleSysM3);
    if (status < 0) {
        Osal_printf ("Error in ProcMgr_close: status = 0x%x\n", status);
    }
exit_ipc_destroy:
    status = Ipc_destroy ();
    if (status < 0) {
        Osal_printf ("Error in Ipc_destroy: status = 0x%x\n", status);
    }

exit:
    return (-1);
}