void IPC_msgNotifyMaster(void) { // make sure all data transactions complete before next instruction is executed __DSB(); // now trigger the remote processor __sev(); }
void IPC_sendInterrupt(void) { /* make sure all data transactions complete before next instruction is executed */ __DSB(); /* now trigger the remote processor */ __sev(); }
void os_tick_idle(os_time_t ticks) { OS_ASSERT_CRITICAL(); __DSB(); __WFI(); }
void nrf_nvmc_write_words(uint32_t address, const uint32_t * src, uint32_t num_words) { // Enable write. NRF_NVMC->CONFIG = NVMC_CONFIG_WEN_Wen; __ISB(); __DSB(); for (uint32_t i = 0; i < num_words; i++) { ((uint32_t*)address)[i] = src[i]; wait_for_flash_ready(); } NRF_NVMC->CONFIG = NVMC_CONFIG_WEN_Ren; __ISB(); __DSB(); }
/** * @brief NAND memory Block erase * @param hnand: pointer to a NAND_HandleTypeDef structure that contains * the configuration information for NAND module. * @param pAddress : pointer to NAND address structure * @retval HAL status */ HAL_StatusTypeDef HAL_NAND_Erase_Block(NAND_HandleTypeDef *hnand, NAND_AddressTypeDef *pAddress) { uint32_t DeviceAddress = 0; /* Process Locked */ __HAL_LOCK(hnand); /* Check the NAND controller state */ if(hnand->State == HAL_NAND_STATE_BUSY) { return HAL_BUSY; } /* Identify the device address */ DeviceAddress = NAND_DEVICE; /* Update the NAND controller state */ hnand->State = HAL_NAND_STATE_BUSY; /* Send Erase block command sequence */ *(__IO uint8_t *)((uint32_t)(DeviceAddress | CMD_AREA)) = NAND_CMD_ERASE0; *(__IO uint8_t *)((uint32_t)(DeviceAddress | ADDR_AREA)) = ADDR_1ST_CYCLE(ARRAY_ADDRESS(pAddress, hnand)); *(__IO uint8_t *)((uint32_t)(DeviceAddress | ADDR_AREA)) = ADDR_2ND_CYCLE(ARRAY_ADDRESS(pAddress, hnand)); *(__IO uint8_t *)((uint32_t)(DeviceAddress | ADDR_AREA)) = ADDR_3RD_CYCLE(ARRAY_ADDRESS(pAddress, hnand)); __DSB(); /* for 512 and 1 GB devices, 4th cycle is required */ if(hnand->Info.BlockNbr >= 1024) { *(__IO uint8_t *)((uint32_t)(DeviceAddress | ADDR_AREA)) = ADDR_4TH_CYCLE(ARRAY_ADDRESS(pAddress, hnand)); __DSB(); } *(__IO uint8_t *)((uint32_t)(DeviceAddress | CMD_AREA)) = NAND_CMD_ERASE1; __DSB(); /* Update the NAND controller state */ hnand->State = HAL_NAND_STATE_READY; /* Process unlocked */ __HAL_UNLOCK(hnand); return HAL_OK; }
void HAL_timer_disable_interrupt(const uint8_t timer_num) { const IRQn_Type IRQ_Id = IRQn_Type(getTimerIrq(TimerHandle[timer_num].timer)); HAL_NVIC_DisableIRQ(IRQ_Id); // We NEED memory barriers to ensure Interrupts are actually disabled! // ( https://dzone.com/articles/nvic-disabling-interrupts-on-arm-cortex-m-and-the ) __DSB(); __ISB(); }
void CSI0_DriverIRQHandler(void) { s_csiIsr(CSI, s_csiHandle[0]); /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) __DSB(); #endif }
void CTIMER4_DriverIRQHandler(void) { CTIMER_GenericIRQHandler(4); /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) __DSB(); #endif }
inline void usbdbg_set_irq_enabled(bool enabled) { if (enabled) { HAL_NVIC_EnableIRQ(OTG_FS_IRQn); } else { HAL_NVIC_DisableIRQ(OTG_FS_IRQn); } __DSB(); __ISB(); }
/** * @brief enable the MPU */ void arm_core_mpu_enable(void) { /* Enable MPU */ SYSMPU->CESR |= SYSMPU_CESR_VLD_MASK; /* Make sure that all the registers are set before proceeding */ __DSB(); __ISB(); }
/** * @brief disable the MPU */ void arm_core_mpu_disable(void) { __DSB(); /* Disable MPU */ SYSMPU->CESR &= ~SYSMPU_CESR_VLD_MASK; /* Clear MPU error status */ SYSMPU->CESR |= SYSMPU_CESR_SPERR_MASK; }
static void prvLowPowerMode1( void ) { /* Clear SLEEPDEEP for EM1 */ SCB->SCR &= ~( 1 << SCB_SCR_SLEEPDEEP_Pos ); /* Power down. */ __DSB(); __WFI(); }
void L2CACHE_Disable(void) { /* First CleanInvalidate all enties in the cache. */ L2CACHE_CleanInvalidate(); /* Disable the level 2 cache controller. */ L2CACHEC->REG1_CONTROL &= ~L2CACHEC_REG1_CONTROL_CE_MASK; /* DSB - data sync barrier.*/ __DSB(); }
int main(void) { int i = 0; driver_stat.statusAdc = HALT_ADC; for(i =0; i < 4;i++) { driver_stat.gain[i] = 0; driver_stat.rec_message[i] = 0; } driver_stat.is_overload = 0; driver_stat.enable_monitoring = 0; driver_stat.mut = 0; test_stat.statusTest = HALT_TEST; NVIC_PriorityGroupConfig(NVIC_PriorityGroup_3); /* Setup SysTick Timer for 1 ms interrupts */ if (SysTick_Config(SystemCoreClock / 10)) { /* Capture error */ while (1) {} } initial_spi(); initial_gpio(); initial_adc(); initial_i2c(); initial_mco(); pga2505_write(); driver_stat.enable_monitoring = 1; while(1) { if(test_stat.statusTest != HALT_TEST) { *spi3 = 100 + i; __DSB(); *spi3 = 200 + i; *spi1 = 300 + i; __DSB(); *spi1 = 400 + i; i++; if( i > 100) i = 0; if(test_stat.statusTest == DURING_STOP_TEST) test_stat.statusTest = HALT_TEST; Delay(2); } } }
/*---------------------------------------------------------------------------- System Initialization *----------------------------------------------------------------------------*/ void SystemInit (void) { /* do not use global variables because this function is called before reaching pre-main. RW section may be overwritten afterwards. */ // Invalidate entire Unified TLB __set_TLBIALL(0); // Invalidate entire branch predictor array __set_BPIALL(0); __DSB(); __ISB(); // Invalidate instruction cache and flush branch target cache __set_ICIALLU(0); __DSB(); __ISB(); // Invalidate data cache L1C_InvalidateDCacheAll(); // Create Translation Table MMU_CreateTranslationTable(); // Enable MMU MMU_Enable(); // Enable Caches L1C_EnableCaches(); L1C_EnableBTAC(); #if (__L2C_PRESENT == 1) // Enable GIC L2C_Enable(); #endif #if ((__FPU_PRESENT == 1) && (__FPU_USED == 1)) // Enable FPU __FPU_Enable(); #endif // IRQ Initialize IRQ_Initialize(); }
/** * Enter request block. * * This is a helper function used in all request functions to atomically * find an empty slot in request queue and allow atomic slot update. * * @return Pointer to an empty slot in the request queue. */ static nrf_drv_radio802154_req_data_t * req_enter(void) { __disable_irq(); __DSB(); __ISB(); assert(!req_queue_is_full()); return &m_req_queue[m_req_w_ptr]; }
void vPortYield( void ) { /* Set a PendSV to request a context switch. */ *(portNVIC_INT_CTRL) = portNVIC_PENDSVSET; /* Barriers are normally not required but do ensure the code is completely within the specified behaviour for the architecture. */ __DSB(); __ISB(); }
/** \brief Test case: TC_CoreFunc_BASEPRI \details - Check if __get_FPSCR and __set_FPSCR intrinsics can be used */ void TC_CoreFunc_FPSCR(void) { uint32_t fpscr = __get_FPSCR(); __ISB(); __DSB(); __set_FPSCR(~fpscr); __ISB(); __DSB(); uint32_t result = __get_FPSCR(); __set_FPSCR(fpscr); #if (defined (__FPU_USED ) && (__FPU_USED == 1U)) ASSERT_TRUE(result != fpscr); #else (void)result; #endif }
void FLEXCOMM9_DriverIRQHandler(void) { assert(s_flexcommIrqHandler[9]); s_flexcommIrqHandler[9]((void *)s_flexcommBaseAddrs[9], s_flexcommHandle[9]); /* Add for ARM errata 838869, affects Cortex-M4, Cortex-M4F Store immediate overlapping exception return operation might vector to incorrect interrupt */ #if defined __CORTEX_M && (__CORTEX_M == 4U) __DSB(); #endif }
void nOS_SwitchContext (void) { nOS_StatusReg sr = __get_BASEPRI(); /* Request context switch */ *(volatile uint32_t *)0xE000ED04UL = 0x10000000UL; /* Leave critical section */ __set_BASEPRI(0); __DSB(); __ISB(); __no_operation(); /* Enter critical section */ __set_BASEPRI(sr); __DSB(); __ISB(); }
/** * Exit request block. * * This is a helper function used in all request functions to end atomic slot update * and trigger SWI to process the request from the slot. */ static void req_exit(void) { req_queue_ptr_increment(&m_req_w_ptr); nrf_egu_task_trigger(SWI_EGU, REQ_TASK); __enable_irq(); __DSB(); __ISB(); }
void nrf_nvmc_write_byte(uint32_t address, uint8_t value) { uint32_t byte_shift = address & (uint32_t)0x03; uint32_t address32 = address & ~byte_shift; // Address to the word this byte is in. uint32_t value32 = (*(uint32_t*)address32 & ~((uint32_t)0xFF << (byte_shift << (uint32_t)3))); value32 = value32 + ((uint32_t)value << (byte_shift << 3)); // Enable write. NRF_NVMC->CONFIG = (NVMC_CONFIG_WEN_Wen << NVMC_CONFIG_WEN_Pos); __ISB(); __DSB(); *(uint32_t*)address32 = value32; wait_for_flash_ready(); NRF_NVMC->CONFIG = (NVMC_CONFIG_WEN_Ren << NVMC_CONFIG_WEN_Pos); __ISB(); __DSB(); }
/** * Execute the application binary * * \param addr Application start address. * \return If success, no return; * 1 - address alignment error; * 2 - address not executable. */ static uint8_t _app_exec(void *addr) { uint32_t i; /* Check parameters */ if ((uint32_t)addr & 0x7F) { return 1; } if ((uint32_t)addr > CM_SRAM_END) { return 2; } __disable_irq(); /* Disable SysTick */ SysTick->CTRL = 0; /* Disable IRQs & clear pending IRQs */ for (i = 0; i < 8; i++) { NVIC->ICER[i] = 0xFFFFFFFF; NVIC->ICPR[i] = 0xFFFFFFFF; } /* Switch clock to slow RC */ osc_enable(OSC_SLCK_32K_RC); osc_wait_ready(OSC_SLCK_32K_RC); pmc_switch_mck_to_sclk(SYSCLK_PRES_1); /* Switch clock to fast RC */ osc_enable(OSC_MAINCK_12M_RC); osc_wait_ready(OSC_MAINCK_12M_RC); pmc_switch_mck_to_mainck(SYSCLK_PRES_1); /* Modify vector table location */ __DSB(); __ISB(); SCB->VTOR = ((uint32_t)addr & SCB_VTOR_TBLOFF_Msk); __DSB(); __ISB(); __enable_irq(); /* Jump to application */ jump_to_app(addr); /* Never be here */ return 0; }
/** * @brief enable the MPU */ void arm_core_mpu_enable(void) { /* Enable MPU and use the default memory map as a * background region for privileged software access. */ MPU->CTRL = MPU_CTRL_ENABLE_Msk | MPU_CTRL_PRIVDEFENA_Msk; /* Make sure that all the registers are set before proceeding */ __DSB(); __ISB(); }
/* * MPU Initialisation, Setup basic regions and non execute for stack/heap */ void SCS_init(void) { /* Configure region 1 to cover ROM (Executable, Read-only) */ /* Start address, Region field valid, Region number */ SCS.MPU.RegionBaseAddr = 0x00000000 | REGION_Valid | 1; /* Access control bits, Size, Enable 0x06030000 */ SCS.MPU.RegionAttrSize = RO | CACHEABLE | BUFFERABLE | REGION_2M | REGION_Enabled; /* Configure a region to cover RAM (Executable, Read-Write) */ SCS.MPU.RegionBaseAddr = 0x20000000 | REGION_Valid | 2; //0x03030000 SCS.MPU.RegionAttrSize = FULL_ACCESS | CACHEABLE | BUFFERABLE | REGION_1M | REGION_Enabled; #ifdef TWO_REGION /* Two Region Stack and Heap MPU settings */ /* Configure a region to cover Stack (Not Executable, Read-Write) */ SCS.MPU.RegionBaseAddr = STACK_BASE - STACK_SIZE | REGION_Valid | 3; //0x13030000 SCS.MPU.RegionAttrSize = NOT_EXEC | FULL_ACCESS | CACHEABLE | BUFFERABLE | REGION_32K | REGION_Enabled; /* Configure a region to cover Heap (Not Executable, Read-Write) */ SCS.MPU.RegionBaseAddr = HEAP_BASE | REGION_Valid | 4; SCS.MPU.RegionAttrSize = NOT_EXEC | FULL_ACCESS | CACHEABLE | BUFFERABLE | REGION_1M | REGION_Enabled; #endif /* TWO_REGION */ #ifdef ONE_REGION /* One Region Stack/Heap Settings */ /* Configure a region to cover Stack and Heap (Not Executable, Read-Write) */ SCS.MPU.RegionBaseAddr = STACK_HEAP_BASE | REGION_Valid | 3; SCS.MPU.RegionAttrSize = NOT_EXEC | FULL_ACCESS | CACHEABLE | BUFFERABLE | REGION_1M | REGION_Enabled; #endif /* ONE_REGION */ /* Configure a region to cover UART Registers (Not Executable, Read-Write) */ SCS.MPU.RegionBaseAddr = 0x40018000 | REGION_Valid | 5; SCS.MPU.RegionAttrSize = NOT_EXEC | FULL_ACCESS | REGION_4K | REGION_Enabled; /* Enable the MPU */ SCS.MPU.Ctrl |= 1; /* If we are using Cortex-M3 rev1 or later, enable hardware stack alignment */ #if defined __TARGET_CPU_CORTEX_M3 && !defined __TARGET_CPU_CORTEX_M3_REV0 SCS.ConfigCtrl |= 0x200; #endif /* Force Memory Writes before continuing */ __DSB(); /* Flush and refill pipline with updated permissions */ __ISB(); }
/******************************************************************************* * Function Name: Cy_SystemInitFpuEnable ****************************************************************************//** * * Enables the FPU if it is used. The function is called from the startup file. * *******************************************************************************/ void Cy_SystemInitFpuEnable(void) { #if defined (__FPU_USED) && (__FPU_USED == 1U) uint32_t interruptState; interruptState = Cy_SaveIRQ(); SCB->CPACR |= SCB_CPACR_CP10_CP11_ENABLE; __DSB(); __ISB(); Cy_RestoreIRQ(interruptState); #endif /* (__FPU_USED) && (__FPU_USED == 1U) */ }
/// Block Kernel (disable: thread switching, time tick, post ISR processing). static void KernelBlock (void) { OS_Tick_Disable(); osRtxInfo.kernel.blocked = 1U; __DSB(); if (GetPendSV() != 0U) { ClrPendSV(); osRtxInfo.kernel.pendSV = 1U; } }
/** * @brief Enable the MPU. * @param MPU_Control Specifies the control mode of the MPU during hard fault, * NMI, FAULTMASK and privileged access to the default memory * This parameter can be one of the following values: * @arg MPU_HFNMI_PRIVDEF_NONE * @arg MPU_HARDFAULT_NMI * @arg MPU_PRIVILEGED_DEFAULT * @arg MPU_HFNMI_PRIVDEF * @retval None */ void HAL_MPU_Enable(uint32_t MPU_Control) { /* Enable the MPU */ MPU->CTRL = MPU_Control | MPU_CTRL_ENABLE_Msk; /* Enable fault exceptions */ SCB->SHCSR |= SCB_SHCSR_MEMFAULTENA_Msk; /* Ensure MPU setting take effects */ __DSB(); __ISB(); }
/** * @brief CPU L1-Cache enable. * @param None * @retval None */ static void CPU_CACHE_Enable(void) { /* Enable branch prediction */ SCB->CCR |= (1 <<18); __DSB(); /* Enable I-Cache */ SCB_EnableICache(); /* Enable D-Cache */ SCB_EnableDCache(); }
/// Unblock Kernel static void KernelUnblock (void) { osRtxInfo.kernel.blocked = 0U; __DSB(); if (osRtxInfo.kernel.pendSV != 0U) { osRtxInfo.kernel.pendSV = 0U; SetPendSV(); } OS_Tick_Enable(); }