void nrf_pwr_mgmt_run(void) { #if NRF_PWR_MGMT_CONFIG_FPU_SUPPORT_ENABLED /* * Clear FPU exceptions. * Without this step, the FPU interrupt is marked as pending, * preventing system from sleeping. */ uint32_t fpscr = __get_FPSCR(); __set_FPSCR(fpscr & ~0x9Fu); __DMB(); NVIC_ClearPendingIRQ(FPU_IRQn); // Assert if a critical FPU exception is signaled. ASSERT((fpscr & 0x03) == 0); #endif // NRF_PWR_MGMT_CONFIG_FPU_SUPPORT_ENABLED SLEEP_LOCK(); #if NRF_PWR_MGMT_CONFIG_CPU_USAGE_MONITOR_ENABLED uint32_t sleep_start; uint32_t sleep_end; uint32_t sleep_duration; sleep_start = app_timer_cnt_get(); #endif // NRF_PWR_MGMT_CONFIG_CPU_USAGE_MONITOR_ENABLED DEBUG_PIN_SET(); // Wait for an event. #ifdef SOFTDEVICE_PRESENT ret_code_t ret_code = sd_app_evt_wait(); if (ret_code == NRF_ERROR_SOFTDEVICE_NOT_ENABLED) { __WFE(); __SEV(); __WFE(); } else { APP_ERROR_CHECK(ret_code); } #else __WFE(); __SEV(); __WFE(); #endif // SOFTDEVICE_PRESENT DEBUG_PIN_CLEAR(); #if NRF_PWR_MGMT_CONFIG_CPU_USAGE_MONITOR_ENABLED sleep_end = app_timer_cnt_get(); UNUSED_VARIABLE(app_timer_cnt_diff_compute(sleep_end, sleep_start, &sleep_duration)); m_ticks_sleeping += sleep_duration; #endif // NRF_PWR_MGMT_CONFIG_CPU_USAGE_MONITOR_ENABLED SLEEP_RELEASE(); }
// Called in the Assert handler: the OS isn't running so use crude loops // This function does not return void HardfaultBlink(void) { while(1) { STM_EVAL_LEDOff(LED3); STM_EVAL_LEDOff(LED4); STM_EVAL_LEDOff(LED5); STM_EVAL_LEDOff(LED6); for(uint32_t i = 0; i < CRUDE_1MS * 500; i++) { __DMB(); } STM_EVAL_LEDOn(LED3); STM_EVAL_LEDOn(LED4); STM_EVAL_LEDOn(LED5); STM_EVAL_LEDOn(LED6); for(uint32_t i = 0; i < CRUDE_1MS * 500; i++) { __DMB(); } } }
void alt_init() { RCC->AHB1ENR |= RCC_AHB1ENR_GPIOCEN; RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN; __DMB(); // read EEPROM sched_sleep(10); i2c_shared_wait(); i2c_shared_lock(); i2c_polling_start(addr, false); i2c_polling_write(REG_AC1_MSB); i2c_polling_start(addr, true); for (int i=0; i<11; i++) { uint8_t msb = i2c_polling_read(true); uint8_t lsb = i2c_polling_read(i != 10); eeprom.vals[i] = (msb << 8) | lsb; } i2c_polling_stop(); i2c_shared_unlock(); // enable EXTI SYSCFG->EXTICR[1] |= SYSCFG_EXTICR2_EXTI5_PC; EXTI->RTSR |= (1 << PIN_EOC); EXTI->IMR |= (1 << PIN_EOC); util_enable_irq(EXTI9_5_IRQn, IRQ_PRI_HIGH); // jumpstart the async process if (!(GPIOC->IDR & (1 << PIN_EOC))) { i2c_shared_wait(); i2c_shared_lock(); i2c_async_send(addr, cmd_conv_temp, sizeof(cmd_conv_temp), i2c_shared_done_unlock); } else { irq_exti95(); } }
OS_RESULT rt_mut_delete (OS_ID mutex) { /* Delete a mutex object */ P_MUCB p_MCB = mutex; P_TCB p_TCB; __DMB(); /* Restore owner task's priority. */ if (p_MCB->level != 0) { p_MCB->owner->prio = p_MCB->prio; if (p_MCB->owner != os_tsk.run) { rt_resort_prio (p_MCB->owner); } } while (p_MCB->p_lnk != NULL) { /* A task is waiting for mutex. */ p_TCB = rt_get_first ((P_XCB)p_MCB); rt_ret_val(p_TCB, 0/*osOK*/); rt_rmv_dly(p_TCB); p_TCB->state = READY; rt_put_prio (&os_rdy, p_TCB); } if (os_rdy.p_lnk && (os_rdy.p_lnk->prio > os_tsk.run->prio)) { /* preempt running task */ rt_put_prio (&os_rdy, os_tsk.run); os_tsk.run->state = READY; rt_dispatch (NULL); } p_MCB->cb_type = 0; return (OS_R_OK); }
static void ss(bool ss) { if (ss) GPIOA->BSRRH = (1 << 4); else GPIOA->BSRRL = (1 << 4); __DMB(); }
OS_RESULT rt_sem_delete (OS_ID semaphore) { /* Delete semaphore */ P_SCB p_SCB = semaphore; P_TCB p_TCB; __DMB(); while (p_SCB->p_lnk != NULL) { /* A task is waiting for token */ p_TCB = rt_get_first ((P_XCB)p_SCB); rt_ret_val(p_TCB, 0); rt_rmv_dly(p_TCB); p_TCB->state = READY; rt_put_prio (&os_rdy, p_TCB); } if (os_rdy.p_lnk && (os_rdy.p_lnk->prio > os_tsk.run->prio)) { /* preempt running task */ rt_put_prio (&os_rdy, os_tsk.run); os_tsk.run->state = READY; rt_dispatch (NULL); } p_SCB->cb_type = 0; return (OS_R_OK); }
void mag_init() { RCC->AHB1ENR |= RCC_AHB1ENR_GPIOBEN; RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN; __DMB(); // configure mag i2c_shared_wait(); i2c_shared_lock(); i2c_polling_start(addr, false); i2c_polling_write(REG_IDA); // read the IDA register i2c_polling_start(addr, true); if (i2c_polling_read(false) != 'H') kernel_halt("Failed to identify magnetometer"); i2c_polling_start(addr, false); i2c_polling_write(REG_CONFIGA); i2c_polling_write(0); // config a i2c_polling_write(0x40); // config b i2c_polling_stop(); i2c_shared_unlock(); // enable EXTI SYSCFG->EXTICR[3] |= SYSCFG_EXTICR4_EXTI12_PB; EXTI->RTSR |= (1 << PIN_DRDY); EXTI->IMR |= (1 << PIN_DRDY); util_enable_irq(EXTI15_10_IRQn, IRQ_PRI_HIGH); // jumpstart the async process if (!(GPIOB->IDR & (1 << PIN_DRDY))) { i2c_shared_wait(); i2c_shared_lock(); i2c_async_send(addr, read_cmd, sizeof(read_cmd), i2c_shared_done_unlock); // if DRDY is low, send a command } else { irq_exti1510(); // if DRDY is high, run its IRQ since EXTI looks for edges not levels } }
void send_mouse(report_mouse_t *report) { #ifdef MOUSEKEY_ENABLE uint32_t irqflags; irqflags = __get_PRIMASK(); __disable_irq(); __DMB(); memcpy(udi_hid_mou_report, report, UDI_HID_MOU_REPORT_SIZE); udi_hid_mou_b_report_valid = 1; udi_hid_mou_send_report(); __DMB(); __set_PRIMASK(irqflags); #endif //MOUSEKEY_ENABLE }
void mutex_lock(mutex_t* mutex) { for(;;){ if(__LDREXB(mutex) == MUTEX_LOCKED) continue; if(__STREXB(MUTEX_LOCKED, mutex) == 0) break; } __DMB(); }
/** * @brief disable the MPU */ void arm_core_mpu_disable(void) { /* Force any outstanding transfers to complete before disabling MPU */ __DMB(); /* Disable MPU */ MPU->CTRL = 0; }
void NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) { uint32_t *vectors; vectors = (uint32_t *)SCB->VTOR; vectors[IRQn + NVIC_USER_IRQ_OFFSET] = vector; __DMB(); }
void send_consumer(uint16_t data) { #ifdef EXTRAKEY_ENABLE uint32_t irqflags; irqflags = __get_PRIMASK(); __disable_irq(); __DMB(); udi_hid_exk_report.desc.report_id = REPORT_ID_CONSUMER; udi_hid_exk_report.desc.report_data = data; udi_hid_exk_b_report_valid = 1; udi_hid_exk_send_report(); __DMB(); __set_PRIMASK(irqflags); #endif //EXTRAKEY_ENABLE }
bool mutex_trylock(mutex_t* mutex) { for(;;){ if(__LDREXB(mutex) == MUTEX_LOCKED) return false; if(__STREXB(MUTEX_LOCKED, mutex) == 0) break; } __DMB(); return true; }
void send_system(uint16_t data) { #ifdef EXTRAKEY_ENABLE uint32_t irqflags; irqflags = __get_PRIMASK(); __disable_irq(); __DMB(); udi_hid_exk_report.desc.report_id = REPORT_ID_SYSTEM; if (data != 0) data = data - SYSTEM_POWER_DOWN + 1; udi_hid_exk_report.desc.report_data = data; udi_hid_exk_b_report_valid = 1; udi_hid_exk_send_report(); __DMB(); __set_PRIMASK(irqflags); #endif //EXTRAKEY_ENABLE }
/** * @brief Disables the MPU * @retval None */ void HAL_MPU_Disable(void) { /* Make sure outstanding transfers are done */ __DMB(); /* Disable fault exceptions */ SCB->SHCSR &= ~SCB_SHCSR_MEMFAULTENA_Msk; /* Disable the MPU and clear the control register*/ MPU->CTRL = 0U; }
inline uint64_t nrf5AlarmGetCurrentTime() { uint32_t rtcValue1; uint32_t rtcValue2; uint32_t offset; rtcValue1 = nrf_rtc_counter_get(RTC_INSTANCE); __DMB(); offset = sTimeOffset; __DMB(); rtcValue2 = nrf_rtc_counter_get(RTC_INSTANCE); if ((rtcValue2 < rtcValue1) || (rtcValue1 == 0)) { // Overflow detected. Additional condition (rtcValue1 == 0) covers situation when overflow occurred in // interrupt state, before this function was entered. But in general, this function shall not be called // from interrupt other than alarm interrupt. // Wait at least 20 cycles, to ensure that if interrupt is going to be called, it will be called now. for (uint32_t i = 0; i < 4; i++) { __NOP(); __NOP(); __NOP(); } // If the event flag is still on, it means that the interrupt was not called, as we are in interrupt state. if (nrf_rtc_event_pending(RTC_INSTANCE, NRF_RTC_EVENT_OVERFLOW)) { HandleOverflow(); } offset = sTimeOffset; } return US_PER_MS * (uint64_t)offset + TicksToTime(rtcValue2); }
// Called in the Assert handler: the OS may have failed so use crude loops // This function does not return void AssertBlink(void) { STM_EVAL_LEDOff(LED5); while(1) { STM_EVAL_LEDOff(LED5); for(uint32_t i = 0; i < CRUDE_1MS * 500; i++) { __DMB(); } STM_EVAL_LEDOn(LED5); for(uint32_t i = 0; i < CRUDE_1MS * 500; i++) { __DMB(); } } }
void send_keyboard(report_keyboard_t *report) { uint32_t irqflags; #ifdef NKRO_ENABLE if (!keymap_config.nkro) { #endif //NKRO_ENABLE while (udi_hid_kbd_b_report_trans_ongoing) { main_subtasks(); } //Run other tasks while waiting for USB to be free irqflags = __get_PRIMASK(); __disable_irq(); __DMB(); memcpy(udi_hid_kbd_report, report->raw, UDI_HID_KBD_REPORT_SIZE); udi_hid_kbd_b_report_valid = 1; udi_hid_kbd_send_report(); __DMB(); __set_PRIMASK(irqflags); #ifdef NKRO_ENABLE } else { while (udi_hid_nkro_b_report_trans_ongoing) { main_subtasks(); } //Run other tasks while waiting for USB to be free irqflags = __get_PRIMASK(); __disable_irq(); __DMB(); memcpy(udi_hid_nkro_report, report->raw, UDI_HID_NKRO_REPORT_SIZE); udi_hid_nkro_b_report_valid = 1; udi_hid_nkro_send_report(); __DMB(); __set_PRIMASK(irqflags); } #endif //NKRO_ENABLE }
OS_RESULT rt_mut_release (OS_ID mutex) { /* Release a mutex object */ P_MUCB p_MCB = mutex; P_TCB p_TCB; if (p_MCB->level == 0 || p_MCB->owner != os_tsk.run) { /* Unbalanced mutex release or task is not the owner */ return (OS_R_NOK); } __DMB(); if (--p_MCB->level != 0) { return (OS_R_OK); } /* Restore owner task's priority. */ os_tsk.run->prio = p_MCB->prio; if (p_MCB->p_lnk != NULL) { /* A task is waiting for mutex. */ p_TCB = rt_get_first ((P_XCB)p_MCB); #ifdef __CMSIS_RTOS rt_ret_val(p_TCB, 0/*osOK*/); #else rt_ret_val(p_TCB, OS_R_MUT); #endif rt_rmv_dly (p_TCB); /* A waiting task becomes the owner of this mutex. */ p_MCB->level = 1; p_MCB->owner = p_TCB; p_MCB->prio = p_TCB->prio; /* Priority inversion, check which task continues. */ if (os_tsk.run->prio >= rt_rdy_prio()) { rt_dispatch (p_TCB); } else { /* Ready task has higher priority than running task. */ rt_put_prio (&os_rdy, os_tsk.run); rt_put_prio (&os_rdy, p_TCB); os_tsk.run->state = READY; p_TCB->state = READY; rt_dispatch (NULL); } } else { /* Check if own priority raised by priority inversion. */ if (rt_rdy_prio() > os_tsk.run->prio) { rt_put_prio (&os_rdy, os_tsk.run); os_tsk.run->state = READY; rt_dispatch (NULL); } } return (OS_R_OK); }
/** \brief Test case: TC_MutexInterrupts \details - Call all mutex management functions from the ISR */ void TC_MutexInterrupts (void) { TST_IRQHandler = Mutex_IRQHandler; NVIC_EnableIRQ((IRQn_Type)SWI_HANDLER); Isr.Ex_Num = 0; /* Test: osMutexCreate */ NVIC_SetPendingIRQ((IRQn_Type)SWI_HANDLER); __DMB(); ASSERT_TRUE (ISR_MutexId == NULL); Isr.Ex_Num = 1; /* Test: osMutexWait */ /* Create valid mutex, to be used for ISR function calls */ ISR_MutexId = osMutexCreate (osMutex (MutexIsr)); ASSERT_TRUE (ISR_MutexId != NULL); if (ISR_MutexId != NULL) { NVIC_SetPendingIRQ((IRQn_Type)SWI_HANDLER); __DMB(); ASSERT_TRUE (ISR_OsStat == osErrorISR); Isr.Ex_Num = 2; /* Test: osMutexRelease */ NVIC_SetPendingIRQ((IRQn_Type)SWI_HANDLER); __DMB(); ASSERT_TRUE (ISR_OsStat == osErrorISR); Isr.Ex_Num = 3; /* Test: osMutexDelete */ NVIC_SetPendingIRQ((IRQn_Type)SWI_HANDLER); __DMB(); ASSERT_TRUE (ISR_OsStat == osErrorISR); /* Delete mutex */ ASSERT_TRUE (osMutexDelete (ISR_MutexId) == osOK); } NVIC_DisableIRQ((IRQn_Type)SWI_HANDLER); }
void debug_init() { RCC->AHB1ENR = RCC_AHB1ENR_CCMDATARAMEN | RCC_AHB1ENR_GPIOAEN | RCC_AHB1ENR_GPIOBEN; RCC->APB2ENR = RCC_APB2ENR_SYSCFGEN | RCC_APB2ENR_USART1EN; __DMB(); GPIOA->AFR[1] = 0x00000770; GPIOA->AFR[0] = 0x00000000; GPIOA->ODR = 0x0000; GPIOA->MODER = 0xA8280000; GPIOB->AFR[1] = 0x00000000; GPIOB->AFR[0] = 0x00000000; GPIOB->ODR = 0x0032; GPIOB->MODER = 0x00000524; USART1->BRR = brr; USART1->CR1 = USART_CR1_UE | USART_CR1_TE | USART_CR1_RE; }
void rt_sem_psh (P_SCB p_CB) { /* Check if task has to be waken up */ P_TCB p_TCB; __DMB(); if (p_CB->p_lnk != NULL) { /* A task is waiting for token */ p_TCB = rt_get_first ((P_XCB)p_CB); rt_rmv_dly (p_TCB); p_TCB->state = READY; #ifdef __CMSIS_RTOS rt_ret_val(p_TCB, 1); #else rt_ret_val(p_TCB, OS_R_SEM); #endif rt_put_prio (&os_rdy, p_TCB); } else { /* Store token */ p_CB->tokens++; } }
void mpu_init() { // configure clocks RCC->AHB1ENR |= RCC_AHB1ENR_GPIOAEN | RCC_AHB1ENR_GPIOCEN | RCC_AHB1ENR_DMA2EN; RCC->APB2ENR |= RCC_APB2ENR_SYSCFGEN | RCC_APB2ENR_SPI1EN; __DMB(); // configure receive stream on DMA rx_stream->PAR = (uint32_t)&spi->DR; rx_stream->M0AR = (uint32_t)read_buf; rx_stream->NDTR = sizeof(read_buf); rx_stream->CR = (3 << DMA_SxCR_CHSEL_Pos) | DMA_SxCR_MINC | DMA_SxCR_TCIE; util_enable_irq(DMA2_Stream0_IRQn, IRQ_PRI_KERNEL); // configure transmit stream on DMA tx_stream->PAR = (uint32_t)&spi->DR; tx_stream->M0AR = (uint32_t)read_cmd; tx_stream->NDTR = sizeof(read_cmd); tx_stream->CR = (3 << DMA_SxCR_CHSEL_Pos) | DMA_SxCR_MINC | DMA_SxCR_DIR_MEM2PER; // enable SPI spi->CR2 = SPI_CR2_TXDMAEN | SPI_CR2_RXDMAEN; spi->CR1 = SPI_CR1_SSM | SPI_CR1_SSI | SPI_CR1_BR_DIV128 | SPI_CR1_MSTR | SPI_CR1_CPOL | SPI_CR1_CPHA | SPI_CR1_SPE; // configure GPIOs GPIOA->BSRRL = (1 << PIN_NSS); GPIOA->AFR[0] |= AFRL(PIN_SCK, AF_SPI1) | AFRL(PIN_MISO, AF_SPI1) | AFRL(PIN_MOSI, AF_SPI1); GPIOA->MODER |= MODER_OUT(PIN_NSS) | MODER_AF(PIN_SCK) | MODER_AF(PIN_MISO) | MODER_AF(PIN_MOSI); // enable EXTI SYSCFG->EXTICR[1] |= SYSCFG_EXTICR2_EXTI4_PC; EXTI->RTSR |= (1 << PIN_INT); // enable rising edge EXTI->IMR |= (1 << PIN_INT); // enable interrupt from EXTI // set up MPU mpu_reset(AccelFS::FS4G, GyroFS::FS500DS, 1, 0); util_enable_irq(EXTI0_IRQn + PIN_INT, IRQ_PRI_HIGH); // enable interrupt in NVIC }
OS_RESULT rt_sem_send (OS_ID semaphore) { /* Return a token to semaphore */ P_SCB p_SCB = semaphore; P_TCB p_TCB; __DMB(); if (p_SCB->p_lnk != NULL) { /* A task is waiting for token */ p_TCB = rt_get_first ((P_XCB)p_SCB); #ifdef __CMSIS_RTOS rt_ret_val(p_TCB, 1); #else rt_ret_val(p_TCB, OS_R_SEM); #endif rt_rmv_dly (p_TCB); rt_dispatch (p_TCB); } else { /* Store token. */ p_SCB->tokens++; } return (OS_R_OK); }
OS_RESULT rt_mut_wait (OS_ID mutex, U16 timeout) { /* Wait for a mutex, continue when mutex is free. */ P_MUCB p_MCB = mutex; if (p_MCB->level == 0) { p_MCB->owner = os_tsk.run; p_MCB->prio = os_tsk.run->prio; goto inc; } if (p_MCB->owner == os_tsk.run) { /* OK, running task is the owner of this mutex. */ inc:p_MCB->level++; __DMB(); return (OS_R_OK); } /* Mutex owned by another task, wait until released. */ if (timeout == 0) { return (OS_R_TMO); } /* Raise the owner task priority if lower than current priority. */ /* This priority inversion is called priority inheritance. */ if (p_MCB->prio < os_tsk.run->prio) { p_MCB->owner->prio = os_tsk.run->prio; rt_resort_prio (p_MCB->owner); } if (p_MCB->p_lnk != NULL) { rt_put_prio ((P_XCB)p_MCB, os_tsk.run); } else { p_MCB->p_lnk = os_tsk.run; os_tsk.run->p_lnk = NULL; os_tsk.run->p_rlnk = (P_TCB)p_MCB; } rt_block(timeout, WAIT_MUT); return (OS_R_TMO); }
OS_RESULT rt_sem_wait (OS_ID semaphore, U16 timeout) { /* Obtain a token; possibly wait for it */ P_SCB p_SCB = semaphore; if (p_SCB->tokens) { p_SCB->tokens--; __DMB(); return (OS_R_OK); } /* No token available: wait for one */ if (timeout == 0) { return (OS_R_TMO); } if (p_SCB->p_lnk != NULL) { rt_put_prio ((P_XCB)p_SCB, os_tsk.run); } else { p_SCB->p_lnk = os_tsk.run; os_tsk.run->p_lnk = NULL; os_tsk.run->p_rlnk = (P_TCB)p_SCB; } rt_block(timeout, WAIT_SEM); return (OS_R_TMO); }
void i2c_ev_handler(void) { static uint8_t subaddress_sent, final_stop; // flag to indicate if subaddess sent, flag to indicate final bus condition static int8_t index; // index is signed -1 == send the subaddress uint8_t SReg_1 = I2Cx->SR1; // read the status register here if (SReg_1 & 0x0001) { // we just sent a start - EV5 in ref manual I2Cx->CR1 &= ~0x0800; // reset the POS bit so ACK/NACK applied to the current byte I2C_AcknowledgeConfig(I2Cx, ENABLE); // make sure ACK is on index = 0; // reset the index if (reading && (subaddress_sent || 0xFF == reg)) { // we have sent the subaddr subaddress_sent = 1; // make sure this is set in case of no subaddress, so following code runs correctly if (bytes == 2) I2Cx->CR1 |= 0x0800; // set the POS bit so NACK applied to the final byte in the two byte read I2C_Send7bitAddress(I2Cx, addr, I2C_Direction_Receiver); // send the address and set hardware mode } else { // direction is Tx, or we havent sent the sub and rep start I2C_Send7bitAddress(I2Cx, addr, I2C_Direction_Transmitter); // send the address and set hardware mode if (reg != 0xFF) // 0xFF as subaddress means it will be ignored, in Tx or Rx mode index = -1; // send a subaddress } } else if (SReg_1 & 0x0002) { // we just sent the address - EV6 in ref manual // Read SR1,2 to clear ADDR __DMB(); // memory fence to control hardware if (bytes == 1 && reading && subaddress_sent) { // we are receiving 1 byte - EV6_3 I2C_AcknowledgeConfig(I2Cx, DISABLE); // turn off ACK __DMB(); (void)I2Cx->SR2; // clear ADDR after ACK is turned off I2C_GenerateSTOP(I2Cx, ENABLE); // program the stop final_stop = 1; I2C_ITConfig(I2Cx, I2C_IT_BUF, ENABLE); // allow us to have an EV7 } else { // EV6 and EV6_1 (void)I2Cx->SR2; // clear the ADDR here __DMB(); if (bytes == 2 && reading && subaddress_sent) { // rx 2 bytes - EV6_1 I2C_AcknowledgeConfig(I2Cx, DISABLE); // turn off ACK I2C_ITConfig(I2Cx, I2C_IT_BUF, DISABLE); // disable TXE to allow the buffer to fill } else if (bytes == 3 && reading && subaddress_sent) // rx 3 bytes I2C_ITConfig(I2Cx, I2C_IT_BUF, DISABLE); // make sure RXNE disabled so we get a BTF in two bytes time else // receiving greater than three bytes, sending subaddress, or transmitting I2C_ITConfig(I2Cx, I2C_IT_BUF, ENABLE); } } else if (SReg_1 & 0x004) { // Byte transfer finished - EV7_2, EV7_3 or EV8_2 final_stop = 1; if (reading && subaddress_sent) { // EV7_2, EV7_3 if (bytes > 2) { // EV7_2 I2C_AcknowledgeConfig(I2Cx, DISABLE); // turn off ACK read_p[index++] = (uint8_t)I2Cx->DR; // read data N-2 I2C_GenerateSTOP(I2Cx, ENABLE); // program the Stop final_stop = 1; // required to fix hardware read_p[index++] = (uint8_t)I2Cx->DR; // read data N - 1 I2C_ITConfig(I2Cx, I2C_IT_BUF, ENABLE); // enable TXE to allow the final EV7 } else { // EV7_3 if (final_stop) I2C_GenerateSTOP(I2Cx, ENABLE); // program the Stop else I2C_GenerateSTART(I2Cx, ENABLE); // program a rep start read_p[index++] = (uint8_t)I2Cx->DR; // read data N - 1 read_p[index++] = (uint8_t)I2Cx->DR; // read data N index++; // to show job completed } } else { // EV8_2, which may be due to a subaddress sent or a write completion if (subaddress_sent || (writing)) { if (final_stop) I2C_GenerateSTOP(I2Cx, ENABLE); // program the Stop else I2C_GenerateSTART(I2Cx, ENABLE); // program a rep start index++; // to show that the job is complete } else { // We need to send a subaddress I2C_GenerateSTART(I2Cx, ENABLE); // program the repeated Start subaddress_sent = 1; // this is set back to zero upon completion of the current task } } // TODO - busy waiting in ISR // we must wait for the start to clear, otherwise we get constant BTF while (I2Cx->CR1 & 0x0100) { ; } } else if (SReg_1 & 0x0040) { // Byte received - EV7 read_p[index++] = (uint8_t)I2Cx->DR; if (bytes == (index + 3)) I2C_ITConfig(I2Cx, I2C_IT_BUF, DISABLE); // disable TXE to allow the buffer to flush so we can get an EV7_2 if (bytes == index) // We have completed a final EV7 index++; // to show job is complete } else if (SReg_1 & 0x0080) { // Byte transmitted EV8 / EV8_1 if (index != -1) { // we dont have a subaddress to send I2Cx->DR = write_p[index++]; if (bytes == index) // we have sent all the data I2C_ITConfig(I2Cx, I2C_IT_BUF, DISABLE); // disable TXE to allow the buffer to flush } else { index++; I2Cx->DR = reg; // send the subaddress if (reading || !bytes) // if receiving or sending 0 bytes, flush now I2C_ITConfig(I2Cx, I2C_IT_BUF, DISABLE); // disable TXE to allow the buffer to flush } } if (index == bytes + 1) { // we have completed the current job subaddress_sent = 0; // reset this here if (final_stop) // If there is a final stop and no more jobs, bus is inactive, disable interrupts to prevent BTF I2C_ITConfig(I2Cx, I2C_IT_EVT | I2C_IT_ERR, DISABLE); // Disable EVT and ERR interrupts while bus inactive busy = 0; } }
static struct net_pkt *frame_get(struct gmac_queue *queue) { struct gmac_desc_list *rx_desc_list = &queue->rx_desc_list; struct gmac_desc *rx_desc; struct ring_buf *rx_frag_list = &queue->rx_frag_list; struct net_pkt *rx_frame; bool frame_is_complete; struct net_buf *frag; struct net_buf *new_frag; struct net_buf *last_frag = NULL; u8_t *frag_data; u32_t frag_len; u32_t frame_len = 0; u16_t tail; /* Check if there exists a complete frame in RX descriptor list */ tail = rx_desc_list->tail; rx_desc = &rx_desc_list->buf[tail]; frame_is_complete = false; while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) { frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF); MODULO_INC(tail, rx_desc_list->len); rx_desc = &rx_desc_list->buf[tail]; } /* Frame which is not complete can be dropped by GMAC. Do not process * it, even partially. */ if (!frame_is_complete) { return NULL; } rx_frame = net_pkt_get_reserve_rx(0, K_NO_WAIT); /* Process a frame */ tail = rx_desc_list->tail; rx_desc = &rx_desc_list->buf[tail]; frame_is_complete = false; /* TODO: Don't assume first RX fragment will have SOF (Start of frame) * bit set. If SOF bit is missing recover gracefully by dropping * invalid frame. */ __ASSERT(rx_desc->w1 & GMAC_RXW1_SOF, "First RX fragment is missing SOF bit"); /* TODO: We know already tail and head indexes of fragments containing * complete frame. Loop over those indexes, don't search for them * again. */ while ((rx_desc->w0 & GMAC_RXW0_OWNERSHIP) && !frame_is_complete) { frag = (struct net_buf *)rx_frag_list->buf[tail]; frag_data = (u8_t *)(rx_desc->w0 & GMAC_RXW0_ADDR); __ASSERT(frag->data == frag_data, "RX descriptor and buffer list desynchronized"); frame_is_complete = (bool)(rx_desc->w1 & GMAC_RXW1_EOF); if (frame_is_complete) { frag_len = (rx_desc->w1 & GMAC_TXW1_LEN) - frame_len; } else { frag_len = CONFIG_NET_BUF_DATA_SIZE; } frame_len += frag_len; /* Link frame fragments only if RX net buffer is valid */ if (rx_frame != NULL) { /* Assure cache coherency after DMA write operation */ DCACHE_INVALIDATE(frag_data, frag_len); /* Get a new data net buffer from the buffer pool */ new_frag = net_pkt_get_frag(rx_frame, K_NO_WAIT); if (new_frag == NULL) { queue->err_rx_frames_dropped++; net_pkt_unref(rx_frame); rx_frame = NULL; } else { net_buf_add(frag, frag_len); if (!last_frag) { net_pkt_frag_insert(rx_frame, frag); } else { net_buf_frag_insert(last_frag, frag); } last_frag = frag; frag = new_frag; rx_frag_list->buf[tail] = (u32_t)frag; } } /* Update buffer descriptor status word */ rx_desc->w1 = 0; /* Guarantee that status word is written before the address * word to avoid race condition. */ __DMB(); /* data memory barrier */ /* Update buffer descriptor address word */ rx_desc->w0 = ((u32_t)frag->data & GMAC_RXW0_ADDR) | (tail == rx_desc_list->len-1 ? GMAC_RXW0_WRAP : 0); MODULO_INC(tail, rx_desc_list->len); rx_desc = &rx_desc_list->buf[tail]; } rx_desc_list->tail = tail; SYS_LOG_DBG("Frame complete: rx=%p, tail=%d", rx_frame, tail); __ASSERT_NO_MSG(frame_is_complete); return rx_frame; }
static int eth_tx(struct net_if *iface, struct net_pkt *pkt) { struct device *const dev = net_if_get_device(iface); const struct eth_sam_dev_cfg *const cfg = DEV_CFG(dev); struct eth_sam_dev_data *const dev_data = DEV_DATA(dev); Gmac *gmac = cfg->regs; struct gmac_queue *queue = &dev_data->queue_list[0]; struct gmac_desc_list *tx_desc_list = &queue->tx_desc_list; struct gmac_desc *tx_desc; struct net_buf *frag; u8_t *frag_data; u16_t frag_len; u32_t err_tx_flushed_count_at_entry = queue->err_tx_flushed_count; unsigned int key; __ASSERT(pkt, "buf pointer is NULL"); __ASSERT(pkt->frags, "Frame data missing"); SYS_LOG_DBG("ETH tx"); /* First fragment is special - it contains link layer (Ethernet * in our case) header. Modify the data pointer to account for more data * in the beginning of the buffer. */ net_buf_push(pkt->frags, net_pkt_ll_reserve(pkt)); frag = pkt->frags; while (frag) { frag_data = frag->data; frag_len = frag->len; /* Assure cache coherency before DMA read operation */ DCACHE_CLEAN(frag_data, frag_len); k_sem_take(&queue->tx_desc_sem, K_FOREVER); /* The following section becomes critical and requires IRQ lock * / unlock protection only due to the possibility of executing * tx_error_handler() function. */ key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } tx_desc = &tx_desc_list->buf[tx_desc_list->head]; /* Update buffer descriptor address word */ tx_desc->w0 = (u32_t)frag_data; /* Guarantee that address word is written before the status * word to avoid race condition. */ __DMB(); /* data memory barrier */ /* Update buffer descriptor status word (clear used bit) */ tx_desc->w1 = (frag_len & GMAC_TXW1_LEN) | (!frag->frags ? GMAC_TXW1_LASTBUFFER : 0) | (tx_desc_list->head == tx_desc_list->len - 1 ? GMAC_TXW1_WRAP : 0); /* Update descriptor position */ MODULO_INC(tx_desc_list->head, tx_desc_list->len); __ASSERT(tx_desc_list->head != tx_desc_list->tail, "tx_desc_list overflow"); irq_unlock(key); /* Continue with the rest of fragments (only data) */ frag = frag->frags; } key = irq_lock(); /* Check if tx_error_handler() function was executed */ if (queue->err_tx_flushed_count != err_tx_flushed_count_at_entry) { irq_unlock(key); return -EIO; } /* Ensure the descriptor following the last one is marked as used */ tx_desc = &tx_desc_list->buf[tx_desc_list->head]; tx_desc->w1 |= GMAC_TXW1_USED; /* Account for a sent frame */ ring_buf_put(&queue->tx_frames, POINTER_TO_UINT(pkt)); irq_unlock(key); /* Start transmission */ gmac->GMAC_NCR |= GMAC_NCR_TSTART; return 0; }
/** * @brief MPU disable function */ void MPU_Disable(void) { __DMB(); // Make sure outstanding transfers are done MPU->CTRL = 0; // Disable the MPU }