Пример #1
0
/* Function to write back invalidate the Cache module */
Void Cache_wbInv(Ptr blockPtr, UInt32 byteCnt, Bits16 type, Bool wait) {
    GT_4trace (curTrace, GT_ENTER, "Cache_wbInv", blockPtr, byteCnt, type, wait);
#if 0

/*
 * It appears that this #if 0'ed code doesn't actually perform the
 * invalidate part of the wbInv, and it appears that Cache_wb() and Cache_inv()
 * work properly, so for now we implement wbInv as a combination of the two
 * individual functions.
*/

#ifdef USE_CACHE_VOID_ARG
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34)
                dmac_map_area(blockPtr, (size_t)byteCnt, DMA_BIDIRECTIONAL);
                outer_flush_range(__pa((UInt32)blockPtr),
                                  __pa((UInt32)(blockPtr+byteCnt)) );
#else
                dmac_flush_range(blockPtr, (blockPtr+byteCnt) );
#endif
#else
                dmac_flush_range( (UInt32)blockPtr, (UInt32)(blockPtr + byteCnt) );
#endif

#else

    Cache_wb(blockPtr, byteCnt, type, wait);
    Cache_inv(blockPtr, byteCnt, type, wait);

#endif

    GT_0trace (curTrace, GT_LEAVE, "Cache_wbInv");
}
Пример #2
0
void CacheWriteBackShrinkData(const uint8_T *ImgSmall, const uint32_t yStart, const uint32_t yEnd, const uint32_t xWidthSmall)
{
	//Cache write back output cache, the memory this core (and only this core) has written to (also cache write back on main core)
	uint8_T* out_buf_start;
	uint32_T out_buf_size;
	CalcOutputBufferPos(ImgSmall, yStart, yEnd, xWidthSmall, out_buf_start, out_buf_size);
	Cache_wb((xdc_Ptr*) out_buf_start, out_buf_size, Cache_Type_ALL, CACHE_BLOCKING);	//Blocking because the main core will process the image edges after we're finished here
}
Пример #3
0
/*
 *  ======== Memory_cacheWb ========
 */
Void Memory_cacheWb(Ptr addr, Int sizeInBytes)
{
    Log_print2(Diags_ENTRY, "[+E] Memory_cacheWb> "
            "Enter(addr=0x%x, sizeInBytes=%d)", (IArg)addr, (IArg)sizeInBytes);

    Cache_wb(addr, sizeInBytes, Cache_Type_ALL, TRUE);

    Log_print0(Diags_EXIT, "[+X] Memory_cacheWb> return");
}
Пример #4
0
/**
 * Public, called from shrinkImageDSP (on the main core) before entering shrinkImage_on_core()
 */
void CacheWriteBackToBeShrinkedData(const uint8_T *Img, const uint32_t yStart, const uint32_t yEnd, const uint32_t yEvenOdd, const uint32_t xWidth)
{
	//ASSERT(0 == MultiProc_self());

	//Cache write back the data on core 0 that other cores will need later (and pull by CacheInvalShrinkData())
	uint8_T* in_buf_start;
	uint32_T in_buf_size;
	CalcInputBufferPos(Img, yStart, yEnd, yEvenOdd, xWidth, in_buf_start, in_buf_size);
	Cache_wb((xdc_Ptr*) in_buf_start, in_buf_size, Cache_Type_ALL, CACHE_BLOCKING);		//Blocking because we will process the data immediately
}
Пример #5
0
/*
 *  ======== Power_suspend ========
 */
UInt Power_suspend(Power_Suspend level)
{
    Bool l1CacheEnabled;
    Bool l2CacheEnabled;
    UInt32 *wordPtr;
    UInt32 taskKey;
    UInt32 swiKey;
    UInt32 hwiKey;
    UInt32 reset;
    UInt32 tmp1;
    UInt32 tmp2;
    UInt32 i;

    /* disable interrupts */
    hwiKey = Hwi_disable();

    /* disable scheduling */
    taskKey = Task_disable();
    swiKey = Swi_disable();

    /* check Unicache state; set 'enabled' flags */
    l1CacheEnabled = Cache_cache.L1_CONFIG & 0x2;
    l2CacheEnabled = Cache_cache.L2_CONFIG & 0x2;

#if _VERBOSE_
    System_printf("Power_suspend\n");
    System_printf("    suspend level = 0x%x\n", level);
    System_printf("    subsystem context = 0x%x\n", &ssContext);
    System_printf("    CPU context = 0x%x\n", 
        &ti_sysbios_family_c64p_tesla_Power_cpuRegs);
    System_printf("    CPU sys regs = 0x%x\n", &ssContext.cpuSysRegs);
    System_printf("    INTC context = 0x%x\n", &ssContext.configINTC);
    System_printf("    SYSC context = 0x%x\n", &ssContext.configSYSC);
    System_printf("    AMMU context = 0x%x\n", &ssContext.configAMMU);
    System_printf("    EDMA context = 0x%x\n", &ssContext.configEDMA);
    System_printf("    TSC flag = 0x%x\n", &ssContext.tscRunning);
    System_printf("    L1 context = 0x%x\n", &ssContext.configL1);
    System_printf("    L1 enabled = 0x%x\n", l1CacheEnabled);
    System_printf("    L2 context = 0x%x\n", &ssContext.configL2);
    System_printf("    L2 enabled = 0x%x\n", l2CacheEnabled);
#endif

    /* = = = = = = = = */
    
    /* if HIBERNATE: save Tesla subsystem context ... */
    if (level == Power_Suspend_HIBERNATE) {

        /* save Unicache config context */
        ssContext.configL1.CONFIG = (UInt32) Cache_cache.L1_CONFIG;
        ssContext.configL1.OCP = (UInt32) Cache_cache.L1_OCP;
        ssContext.configL2.CONFIG = (UInt32) Cache_cache.L2_CONFIG;
        ssContext.configL2.OCP = (UInt32) Cache_cache.L2_OCP;

        /* = = = = = = = = */

        /* save AMMU context */
        for (i = 0; i < AMMU_numLargePages; i++) {
            ssContext.configAMMU.largeAddr[i] = 
                (UInt32) AMMU_mmu.LARGE_ADDR[i];
            ssContext.configAMMU.largePolicy[i] = 
                (UInt32) AMMU_mmu.LARGE_POLICY[i];
        }
        for (i = 0; i < AMMU_numMediumPages; i++) {
            ssContext.configAMMU.medAddr[i] = 
                (UInt32) AMMU_mmu.MEDIUM_ADDR[i];
            ssContext.configAMMU.medPolicy[i] = 
                (UInt32) AMMU_mmu.MEDIUM_POLICY[i];
        }
        for (i = 0; i < AMMU_numSmallPages; i++) {
            ssContext.configAMMU.smallAddr[i] = 
                (UInt32) AMMU_mmu.SMALL_ADDR[i];
            ssContext.configAMMU.smallPolicy[i] = 
                (UInt32) AMMU_mmu.SMALL_POLICY[i];
        }

        /* = = = = = = = = */

        /* save SYSC context */
        ssContext.configSYSC.SYSCONFIG = 
            REG((UInt32)Power_syscRegs + SYSCONFIG_REG_OFFSET);
        ssContext.configSYSC.VBUSM2OCP = 
            REG((UInt32)Power_syscRegs + VBUSM2OCP_REG_OFFSET);
        ssContext.configSYSC.EDMA = 
            REG((UInt32)Power_syscRegs + EDMA_REG_OFFSET);
        ssContext.configSYSC.CORE = 
            REG((UInt32)Power_syscRegs + CORE_REG_OFFSET);
        ssContext.configSYSC.IVA_ICTRL = 
            REG((UInt32)Power_syscRegs + IVA_ICTRL_REG_OFFSET);
        ssContext.configSYSC.IDLEDLY = 
            REG((UInt32)Power_syscRegs + IDLEDLY_REG_OFFSET);

        /* = = = = = = = = */

        /* save INTC context */
        ssContext.configINTC.EVTMASK0 = REG(EVTMASK0_REG);
        ssContext.configINTC.EVTMASK1 = REG(EVTMASK1_REG);
        ssContext.configINTC.EVTMASK2 = REG(EVTMASK2_REG);
        ssContext.configINTC.EVTMASK3 = REG(EVTMASK3_REG);
        ssContext.configINTC.EXPMASK0 = REG(EXPMASK0_REG);
        ssContext.configINTC.EXPMASK1 = REG(EXPMASK1_REG);
        ssContext.configINTC.EXPMASK2 = REG(EXPMASK2_REG);
        ssContext.configINTC.EXPMASK3 = REG(EXPMASK3_REG);
        ssContext.configINTC.INTMUX1 = REG(INTMUX1_REG);
        ssContext.configINTC.INTMUX2 = REG(INTMUX2_REG);
        ssContext.configINTC.INTMUX3 = REG(INTMUX3_REG);
        ssContext.configINTC.AEGMUX0 = REG(AEGMUX0_REG);
        ssContext.configINTC.AEGMUX1 = REG(AEGMUX1_REG);
        ssContext.configINTC.INTDMASK = REG(INTDMASK_REG);

        /* = = = = = = = = */

        /* save EDMA context */
 
        ssContext.configEDMA.CLKGDIS = 
            REG((UInt32)Power_tpccRegs + CLKGDIS_REG_OFFSET);

        /* save DMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DCHMAP0_REG_OFFSET);
        for (i = 0; i < 64; i++) {      
            ssContext.configEDMA.DCHMAP[i] = *wordPtr++;
        }

        /* save QDMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + QCHMAP0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            ssContext.configEDMA.QCHMAP[i] = *wordPtr++;
        }

        /* save DMA queue mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DMAQNUM0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            ssContext.configEDMA.DMAQNUM[i] = *wordPtr++;
        }

        ssContext.configEDMA.QDMAQNUM = 
            REG((UInt32)Power_tpccRegs + QDMAQNUM_REG_OFFSET);
        ssContext.configEDMA.QUETCMAP = 
            REG((UInt32)Power_tpccRegs + QUETCMAP_REG_OFFSET);
        ssContext.configEDMA.QUEPRI = 
            REG((UInt32)Power_tpccRegs + QUEPRI_REG_OFFSET);

        /* save DMA and QDMA region access enable bits */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DRAEM0_REG_OFFSET);
        for (i = 0; i < (8 * 3); i++) { 
            ssContext.configEDMA.regionAccessBits[i] = *wordPtr++;
        }

        ssContext.configEDMA.QWMTHRA = 
            REG((UInt32)Power_tpccRegs + QWMTHRA_REG_OFFSET);
        ssContext.configEDMA.AETCTL = 
            REG((UInt32)Power_tpccRegs + AETCTL_REG_OFFSET);
        ssContext.configEDMA.IER = 
            REG((UInt32)Power_tpccRegs + IER_REG_OFFSET);
        ssContext.configEDMA.IERH = 
            REG((UInt32)Power_tpccRegs + IERH_REG_OFFSET);
        ssContext.configEDMA.QEER = 
            REG((UInt32)Power_tpccRegs + QEER_REG_OFFSET);

        /* bulk save of all PaRAMs (8 regs * 128 PaRAMs */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + OPT0_REG_OFFSET);
        for (i = 0; i < (8 * 128); i++) {       
            ssContext.configEDMA.PaRAMs[i] = *wordPtr++;
        }

        /* = = = = = = = = */

        /* save CPU control registers */
        ssContext.cpuSysRegs.AMR = AMR;
        ssContext.cpuSysRegs.CSR = CSR;
        ssContext.cpuSysRegs.IER = IER;
        ssContext.cpuSysRegs.ISTP = ISTP;
        ssContext.cpuSysRegs.IRP = IRP;
        ssContext.cpuSysRegs.SSR = SSR;
        ssContext.cpuSysRegs.GPLYB = GPLYB;
        ssContext.cpuSysRegs.GFPGFR = GFPGFR;
        ssContext.cpuSysRegs.TSR = TSR;
        ssContext.cpuSysRegs.ITSR = ITSR;
        ssContext.cpuSysRegs.IERR = IERR;

    }

    /* sample and set the 'TSC is running' state flag */
    tmp1 = TSCL;
    tmp2 = TSCL;
    if (tmp1 == tmp2) {
        ssContext.tscRunning = 0;
    }
    else {
        ssContext.tscRunning = 1;
    }

    /* if Unicache enabled, prepare for standby ... */
    if (l1CacheEnabled || l2CacheEnabled) {

        /* if HIBERNATE: write back all for L1 and L2 */
        if (level == Power_Suspend_HIBERNATE) { 
            Cache_wbAll();
        }
        /* else, retention, just clean the write buffers */
        else {
            Cache_wb(0, 0, Cache_Type_ALL, TRUE);/* start=end=0 -> clean bufs */
        }

        /* now bypass the caches... */
        if (l1CacheEnabled) {
            Cache_disable(Cache_Type_L1);
        }
        if (l2CacheEnabled) {
            Cache_disable(Cache_Type_L2);
        }
    }

    /* set reset-function-sampled 'doing a resume' flag */
    ti_sysbios_family_c64p_tesla_Power_doResume = 1;

    /* set the ready-to-standby flag (an FYI for the MPU) */
    ti_sysbios_family_c64p_tesla_Power_readyIdle = 1;

    /* setup PDC to put GEM into standby when execute IDLE */
    REG(PDCCMD_REG) = PDCCMD_STANDBY;
    REG(PDCCMD_REG);

    /* make function call to save child-preserved CPU regs and do standby ... */
    reset = ti_sysbios_family_c64p_tesla_Power_standby(
        &ti_sysbios_family_c64p_tesla_Power_cpuRegs);

    /* = = = = = = = = */

    /* NOTE: return here both when woke from IDLE, or resumed after reset */

    /* = = = = = = = = */

    /* note: this symbol is not used, but is defined for debug purposes only */
    asm(" .global ti_sysbios_family_c64p_tesla_Power_suspend_RESUME");
    asm("ti_sysbios_family_c64p_tesla_Power_suspend_RESUME:");

    /* if HIBERNATE and *did* reset: restore all context ... */
    if ((reset != 0) && (level == Power_Suspend_HIBERNATE)) {

        /* restore CPU control registers */
        AMR = ssContext.cpuSysRegs.AMR;
        CSR = ssContext.cpuSysRegs.CSR;
        IER = ssContext.cpuSysRegs.IER;
        ISTP = ssContext.cpuSysRegs.ISTP;
        IRP = ssContext.cpuSysRegs.IRP;
        SSR = ssContext.cpuSysRegs.SSR;
        GPLYB = ssContext.cpuSysRegs.GPLYB;
        GFPGFR = ssContext.cpuSysRegs.GFPGFR;
        TSR = ssContext.cpuSysRegs.TSR;
        ITSR = ssContext.cpuSysRegs.ITSR;
        IERR = ssContext.cpuSysRegs.IERR;

        /* = = = = = = = = */

        /* restore AMMU configuration */
        for (i = 0; i < AMMU_numLargePages; i++) {
            AMMU_mmu.LARGE_ADDR[i] = 
                (Char *) ssContext.configAMMU.largeAddr[i];
            AMMU_mmu.LARGE_POLICY[i] = 
                ssContext.configAMMU.largePolicy[i];
        }
        for (i = 0; i < AMMU_numMediumPages; i++) {
            AMMU_mmu.MEDIUM_ADDR[i] =
                (Char *) ssContext.configAMMU.medAddr[i];
            AMMU_mmu.MEDIUM_POLICY[i] =
                ssContext.configAMMU.medPolicy[i];
        }
        for (i = 0; i < AMMU_numSmallPages; i++) {
            AMMU_mmu.SMALL_ADDR[i] =
                (Char *) ssContext.configAMMU.smallAddr[i];
            AMMU_mmu.SMALL_POLICY[i] =
                ssContext.configAMMU.smallPolicy[i];
        }

        /* = = = = = = = = */

        /* restore Unicache config */
        Cache_cache.L1_OCP = ssContext.configL1.OCP;
        tmp1 = Cache_cache.L1_OCP;      /* read to ensure posted write done */
        Cache_cache.L1_CONFIG = ssContext.configL1.CONFIG;
        tmp1 = Cache_cache.L1_CONFIG;   /* read to ensure posted write done */
        Cache_cache.L2_OCP = ssContext.configL2.OCP;
        tmp1 = Cache_cache.L2_OCP;      /* read to ensure posted write done */
        Cache_cache.L2_CONFIG = ssContext.configL2.CONFIG;
        tmp1 = Cache_cache.L2_CONFIG;   /* read to ensure posted write done */

        /* = = = = = = = = */

        /* restore SYSC context */
        REG((UInt32)Power_syscRegs + SYSCONFIG_REG_OFFSET) =
            ssContext.configSYSC.SYSCONFIG;
        REG((UInt32)Power_syscRegs + VBUSM2OCP_REG_OFFSET) =
            ssContext.configSYSC.VBUSM2OCP;
        REG((UInt32)Power_syscRegs + EDMA_REG_OFFSET) =
            ssContext.configSYSC.EDMA;
        REG((UInt32)Power_syscRegs + CORE_REG_OFFSET) =
            ssContext.configSYSC.CORE;
        REG((UInt32)Power_syscRegs + IVA_ICTRL_REG_OFFSET) =
            ssContext.configSYSC.IVA_ICTRL;
        REG((UInt32)Power_syscRegs + IDLEDLY_REG_OFFSET) =
            ssContext.configSYSC.IDLEDLY;

        /* = = = = = = = = */

        /* restore INTC context */
        REG(EVTMASK0_REG) = ssContext.configINTC.EVTMASK0;
        REG(EVTMASK1_REG) = ssContext.configINTC.EVTMASK1;
        REG(EVTMASK2_REG) = ssContext.configINTC.EVTMASK2;
        REG(EVTMASK3_REG) = ssContext.configINTC.EVTMASK3;
        REG(EXPMASK0_REG) = ssContext.configINTC.EXPMASK0;
        REG(EXPMASK1_REG) = ssContext.configINTC.EXPMASK1;
        REG(EXPMASK2_REG) = ssContext.configINTC.EXPMASK2;
        REG(EXPMASK3_REG) = ssContext.configINTC.EXPMASK3;
        REG(INTMUX1_REG) = ssContext.configINTC.INTMUX1;
        REG(INTMUX2_REG) = ssContext.configINTC.INTMUX2;
        REG(INTMUX3_REG) = ssContext.configINTC.INTMUX3;
        REG(AEGMUX0_REG) = ssContext.configINTC.AEGMUX0;
        REG(AEGMUX1_REG) = ssContext.configINTC.AEGMUX1;
        REG(INTDMASK_REG) = ssContext.configINTC.INTDMASK;

        /* = = = = = = = = */

        /* restore EDMA context */

        REG((UInt32)Power_tpccRegs + CLKGDIS_REG_OFFSET) =
            ssContext.configEDMA.CLKGDIS;

        /* restore DMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DCHMAP0_REG_OFFSET);
        for (i = 0; i < 64; i++) {      
            *wordPtr++ = ssContext.configEDMA.DCHMAP[i];
        }

        /* restore QDMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + QCHMAP0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            *wordPtr++ = ssContext.configEDMA.QCHMAP[i];
        }

        /* restore DMA queue mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DMAQNUM0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            *wordPtr++ = ssContext.configEDMA.DMAQNUM[i];
        }

        REG((UInt32)Power_tpccRegs + QDMAQNUM_REG_OFFSET) =
            ssContext.configEDMA.QDMAQNUM;
        REG((UInt32)Power_tpccRegs + QUETCMAP_REG_OFFSET) =
            ssContext.configEDMA.QUETCMAP;
        REG((UInt32)Power_tpccRegs + QUEPRI_REG_OFFSET) =
            ssContext.configEDMA.QUEPRI;

        /* restore DMA and QDMA region access enable bits */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DRAEM0_REG_OFFSET);
        for (i = 0; i < (8 * 3); i++) { 
            *wordPtr++ = ssContext.configEDMA.regionAccessBits[i];
        }

        REG((UInt32)Power_tpccRegs + QWMTHRA_REG_OFFSET) =
            ssContext.configEDMA.QWMTHRA;
        REG((UInt32)Power_tpccRegs + AETCTL_REG_OFFSET) =
            ssContext.configEDMA.AETCTL;

        /* restore interrupt enable registers (using IESR and IESRH) */
        REG((UInt32)Power_tpccRegs + IESR_REG_OFFSET) =
            ssContext.configEDMA.IER;
        REG((UInt32)Power_tpccRegs + IESRH_REG_OFFSET) =
            ssContext.configEDMA.IERH;

        /* restore QDMA event enable register (using QEESR) */
        REG((UInt32)Power_tpccRegs + QEESR_REG_OFFSET) = 
            ssContext.configEDMA.QEER;

        /* restore all PaRAMs (8 regs * 128 PaRAMs */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + OPT0_REG_OFFSET);
        for (i = 0; i < (8 * 128); i++) {       
            *wordPtr++ = ssContext.configEDMA.PaRAMs[i];
        }

#if _VERBOSE_
        System_printf("hibernate: restored context\n");
#endif

    }

    /* Else: Restore caches to their pre-standby enable state.
     * Note: When come out of retention reset caches will always be enabled,
     *       even if they weren't enabled before standby. So, need to disable
     *       them now, if they weren't enabled when suspend was invoked. 
     */
    else {
        /* restore the enabled state of the caches ... */
        if (l1CacheEnabled) {
            Cache_enable(Cache_Type_L1);
        }
        else {
            Cache_disable(Cache_Type_L1);
        }
        if (l2CacheEnabled) {
            Cache_enable(Cache_Type_L2);
        }
        else {
            Cache_disable(Cache_Type_L2);
        }
    }

#if _VERBOSE_
    System_printf("reset flag = %d\n", reset);
#endif

    /* if TSC was enabled on entry: start it again */
    if (ssContext.tscRunning == 1) {
        TSCL = 1;       /* write any value to TSC to kick start it */
    }

    /* clear the ready-to-standby flag */
    ti_sysbios_family_c64p_tesla_Power_readyIdle = 0;

    /* clear the reset-sampled 'do resume' flag */
    ti_sysbios_family_c64p_tesla_Power_doResume = 0;

    /* re-enable scheduling */
    Task_restore(taskKey);
    Swi_restore(swiKey);

    /* re-enable interrupts */
    Hwi_restore(hwiKey);

    return (reset);
}
Пример #6
0
/*
 *  ======== Ipc_writeConfig ========
 */
Int Ipc_writeConfig(UInt16 remoteProcId, UInt32 tag, Ptr cfg, SizeT size)
{
    Int status = Ipc_S_SUCCESS;
    UInt16 clusterId = ti_sdo_utils_MultiProc_getClusterId(remoteProcId);
    SharedRegion_SRPtr curSRPtr, *prevSRPtr;
    ti_sdo_ipc_Ipc_ConfigEntry *entry;
    Error_Block eb;
    Bool cacheEnabled = SharedRegion_isCacheEnabled(0);

    /* Assert that the remoteProc in our cluster */
    Assert_isTrue(clusterId < ti_sdo_utils_MultiProc_numProcsInCluster,
                  ti_sdo_utils_MultiProc_A_invalidMultiProcId);

    Error_init(&eb);

    if (cfg == NULL) {
        status = Ipc_E_FAIL;

        /* get head of local config list and set prevSRPtr to it */
        prevSRPtr = (Ipc_module->procEntry[clusterId].localConfigList);

        /* 
         *  When cfg is NULL, the last memory allocated from a previous
         *  Ipc_writeConfig call with the same remoteProcId, tag, and size
         *  is freed.
         */
        curSRPtr = *prevSRPtr;

        /* loop through list of config entries until matching entry is found */
        while (curSRPtr != ti_sdo_ipc_SharedRegion_INVALIDSRPTR) {
            /* convert Ptr associated with curSRPtr */
            entry = (ti_sdo_ipc_Ipc_ConfigEntry *)
                    (SharedRegion_getPtr(curSRPtr));

            /* make sure entry matches remoteProcId, tag, and size */
            if ((entry->remoteProcId == remoteProcId) &&
                (entry->tag == tag) &&
                (entry->size == size)) {
                /* Update the 'prev' next ptr */
                *prevSRPtr = (SharedRegion_SRPtr)entry->next;

                /* writeback the 'prev' ptr */
                if (cacheEnabled) {
                    Cache_wb(prevSRPtr,
                        sizeof(ti_sdo_ipc_Ipc_ConfigEntry),
                        Cache_Type_ALL,
                        FALSE);
                }

                /* free entry's memory back to shared heap */
                Memory_free(SharedRegion_getHeap(0),
                    entry,
                    size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry));

                /* set the status to success */
                status = Ipc_S_SUCCESS;
                break;
            }

            /* set the 'prev' to the 'cur' SRPtr */
            prevSRPtr = (SharedRegion_SRPtr *)(&entry->next);

            /* point to next config entry */
            curSRPtr = (SharedRegion_SRPtr)entry->next;
        }

        /* return that status */
        return (status);
    }

    /* Allocate memory from the shared heap (System Heap) */
    entry = Memory_alloc(SharedRegion_getHeap(0),
                         size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry),
                         SharedRegion_getCacheLineSize(0),
                         &eb);

    if (entry == NULL) {
        return (Ipc_E_FAIL);
    }

    /* set the entry */
    entry->remoteProcId = remoteProcId;
    entry->localProcId = MultiProc_self();
    entry->tag = tag;
    entry->size = size;
    memcpy((Ptr)((UInt32)entry + sizeof(ti_sdo_ipc_Ipc_ConfigEntry)), cfg,
                  size);
        
    /* point the entry's next to the first entry in the list */
    entry->next = *Ipc_module->procEntry[clusterId].localConfigList;

    /* first write-back the entry if cache is enabled */
    if (cacheEnabled) {
        Cache_wb(entry, size + sizeof(ti_sdo_ipc_Ipc_ConfigEntry),
                 Cache_Type_ALL,
                 FALSE);
    }

    /* set the entry as the new first in the list */
    *Ipc_module->procEntry[clusterId].localConfigList =
        SharedRegion_getSRPtr(entry, 0);

    /* write-back the config list */
    if (cacheEnabled) {
        Cache_wb(Ipc_module->procEntry[clusterId].localConfigList, 
                 SharedRegion_getCacheLineSize(0), 
                 Cache_Type_ALL, 
                 FALSE);
    }

    return (status);
}