/* * ======== Hwi_eventMap ======== * Function maps an event to a Hwi vector so that the event will be the * source of the interrupt of the vector. */ Void Hwi_eventMap(Int vectId, Int eventId) { #ifdef ti_sysbios_BIOS_useSK__D UInt mask; /* Interrupt mask value */ mask = _disable_interrupts(); SK_eventMap(vectId, eventId); _restore_interrupts(mask); #else Int muxnum; /* Interrupt mux register number */ Int bitpos; /* The bit position of the interrupt select */ UInt mask; /* Interrupt mask value */ volatile Bits32 *muxReg = (volatile Bits32 *)INTRMUXREG1; if (vectId < 4 || vectId > 15) { return; } muxnum = (vectId - 4) >> 2; bitpos = (vectId % 4) << 3; mask = _disable_interrupts(); muxReg[muxnum] = (muxReg[muxnum] & ~(0x7f << bitpos)) | (eventId << bitpos); /* clear any residual interrupt */ ICR = 1 << vectId; _restore_interrupts(mask); #endif }
/* * ======== Hwi_restoreIER ======== */ Bits16 Hwi_restoreIER(Bits16 mask) { UInt key; Bits16 oldIER; key = _disable_interrupts(); oldIER = IER; IER = mask; _restore_interrupts(key); return oldIER; }
/****************************************************************************** * flushCache ******************************************************************************/ static void flushCache (void) { uint32_t key; /* Disable Interrupts */ key = _disable_interrupts(); CSL_XMC_invalidatePrefetchBuffer(); /*------------------------------------------------------------------------- * Also flushes L1P and L1D. *------------------------------------------------------------------------*/ CACHE_wbInvAllL2(CACHE_NOWAIT); _mfence(); asm(" NOP 9"); asm(" NOP 7"); /* Reenable Interrupts. */ _restore_interrupts(key); }
/* * ======== Hwi_restore ======== */ Void ti_sysbios_family_c64p_Hwi_restore__E(UInt key) { _restore_interrupts(key); }