/** * \brief Process successfully sent packets * \param gmacd Pointer to GMAC Driver instance. */ static void _gmacd_tx_complete_handler(struct _gmacd* gmacd, uint8_t queue) { Gmac* gmac = gmacd->gmac; struct _gmacd_queue* q = &gmacd->queues[queue]; struct _gmac_desc *desc; gmacd_callback_t callback; uint32_t tsr; //printf("<TX>\r\n"); /* Clear status */ tsr = gmac_get_tx_status(gmac); gmac_clear_tx_status(gmac, tsr); while (!RING_EMPTY(q->tx_head, q->tx_tail)) { desc = &q->tx_desc[q->tx_tail]; /* Exit if frame has not been sent yet: * On TX completion, the GMAC set the USED bit only into the * very first buffer descriptor of the sent frame. * Otherwise it updates this descriptor with status error bits. * This is the descriptor writeback. */ if ((desc->status & GMAC_TX_STATUS_USED) == 0) break; /* Process all buffers of the current transmitted frame */ while ((desc->status & GMAC_TX_STATUS_LASTBUF) == 0) { RING_INC(q->tx_tail, q->tx_size); desc = &q->tx_desc[q->tx_tail]; } /* Notify upper layer that a frame has been sent */ if (q->tx_callbacks) { callback = q->tx_callbacks[q->tx_tail]; if (callback) callback(queue, tsr); } /* Go to next frame */ RING_INC(q->tx_tail, q->tx_size); } /* If a wakeup callback has been set, notify upper layer that it can send more packets now */ if (q->tx_wakeup_callback) { if (RING_SPACE(q->tx_head, q->tx_tail, q->tx_size) >= q->tx_wakeup_threshold) { q->tx_wakeup_callback(queue); } } }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; ul_isr = gmac_get_interrupt_status(p_hw); ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_dev->func_rx_cb) { p_gmac_dev->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* A frame transmitted */ /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE & Number of discarded buffers */ ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Check UND */ if (ul_tsr & GMAC_TSR_UND) { ul_tx_status_flag |= GMAC_TSR_UND; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_tail]; p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); } while (CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size)); } if (ul_tsr & GMAC_TSR_RLE) { /* Notify upper layer RLE */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold) && p_gmac_dev->func_wakeup_cb) { p_gmac_dev->func_wakeup_cb(); } } }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; #ifdef FREERTOS_USED portBASE_TYPE xHigherPriorityTaskWoken = pdFALSE; #endif gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx]; if(queue_idx == GMAC_QUE_0) { ul_isr = gmac_get_interrupt_status(p_hw); } else { ul_isr = gmac_get_priority_interrupt_status(p_hw, queue_idx); } ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_queue->func_rx_cb) { p_gmac_queue->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE */ ul_tx_status_flag = GMAC_TSR_RLE; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev, queue_idx); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_tail]; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size); } while (CIRC_CNT(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size)); } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size) >= p_gmac_queue->uc_wakeup_threshold) && p_gmac_queue->func_wakeup_cb) { p_gmac_queue->func_wakeup_cb(); } } #ifdef FREERTOS_USED /* Notify TCP/IP task to start data processing. */ /* LwIP works on top of GMAC driver, hence this semaphore locks */ /* the complete IP stack. */ xSemaphoreGiveFromISR(netif_notification_semaphore, &xHigherPriorityTaskWoken); portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); #endif }
/** * \brief Reset TX queue when errors are detected * \param gmacd Pointer to GMAC Driver instance. */ static void _gmacd_tx_error_handler(struct _gmacd* gmacd, uint8_t queue) { Gmac *gmac = gmacd->gmac; struct _gmacd_queue* q = &gmacd->queues[queue]; struct _gmac_desc* desc; gmacd_callback_t callback; uint32_t tsr; printf("<TXERR>\r\n"); /* Clear TXEN bit into the Network Configuration Register: * this is a workaround to recover from TX lockups that * occur on sama5d4 gmac (r1p24f2) when using scatter-gather. * This issue has never been seen on sama5d4 gmac (r1p31). */ gmac_transmit_enable(gmac, false); /* The following step should be optional since this function is called * directly by the IRQ handler. Indeed, according to Cadence * documentation, the transmission is halted on errors such as * too many retries or transmit under run. * However it would become mandatory if the call of this function * were scheduled as a task by the IRQ handler (this is how Linux * driver works). Then this function might compete with GMACD_Send(). * * Setting bit 10, tx_halt, of the Network Control Register is not enough: * We should wait for bit 3, tx_go, of the Transmit Status Register to * be cleared at transmit completion if a frame is being transmitted. */ gmac_halt_transmission(gmac); while (gmac_get_tx_status(gmac) & GMAC_TSR_TXGO); /* Treat frames in TX queue including the ones that caused the error. */ while (!RING_EMPTY(q->tx_head, q->tx_tail)) { int tx_completed = 0; desc = &q->tx_desc[q->tx_tail]; /* Check USED bit on the very first buffer descriptor to validate * TX completion. */ if (desc->status & GMAC_TX_STATUS_USED) tx_completed = 1; /* Go to the last buffer descriptor of the frame */ while ((desc->status & GMAC_TX_STATUS_LASTBUF) == 0) { RING_INC(q->tx_tail, q->tx_size); desc = &q->tx_desc[q->tx_tail]; } /* Notify upper layer that a frame status */ // TODO: which error to notify? if (q->tx_callbacks) { callback = q->tx_callbacks[q->tx_tail]; if (callback) callback(queue, tx_completed ? GMAC_TSR_TXCOMP : 0); } /* Go to next frame */ RING_INC(q->tx_tail, q->tx_size); } /* Reset TX queue */ _gmacd_reset_tx(gmacd, queue); /* Clear status */ tsr = gmac_get_tx_status(gmac); gmac_clear_tx_status(gmac, tsr); /* Now we are ready to start transmission again */ gmac_transmit_enable(gmac, true); if (q->tx_wakeup_callback) q->tx_wakeup_callback(queue); }