/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; ul_isr = gmac_get_interrupt_status(p_hw); ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_dev->func_rx_cb) { p_gmac_dev->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* A frame transmitted */ /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE & Number of discarded buffers */ ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Check UND */ if (ul_tsr & GMAC_TSR_UND) { ul_tx_status_flag |= GMAC_TSR_UND; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_tail]; p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); } while (CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size)); } if (ul_tsr & GMAC_TSR_RLE) { /* Notify upper layer RLE */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold) && p_gmac_dev->func_wakeup_cb) { p_gmac_dev->func_wakeup_cb(); } } }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; #ifdef FREERTOS_USED portBASE_TYPE xHigherPriorityTaskWoken = pdFALSE; #endif gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx]; if(queue_idx == GMAC_QUE_0) { ul_isr = gmac_get_interrupt_status(p_hw); } else { ul_isr = gmac_get_priority_interrupt_status(p_hw, queue_idx); } ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_queue->func_rx_cb) { p_gmac_queue->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE */ ul_tx_status_flag = GMAC_TSR_RLE; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev, queue_idx); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_tail]; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size); } while (CIRC_CNT(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size)); } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size) >= p_gmac_queue->uc_wakeup_threshold) && p_gmac_queue->func_wakeup_cb) { p_gmac_queue->func_wakeup_cb(); } } #ifdef FREERTOS_USED /* Notify TCP/IP task to start data processing. */ /* LwIP works on top of GMAC driver, hence this semaphore locks */ /* the complete IP stack. */ xSemaphoreGiveFromISR(netif_notification_semaphore, &xHigherPriorityTaskWoken); portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); #endif }