/** * \brief Initialize the GMAC driver. * * \param p_gmac Pointer to the GMAC instance. * \param p_gmac_dev Pointer to the GMAC device instance. * \param p_opt GMAC configure options. */ void gmac_dev_init(Gmac* p_gmac, gmac_device_t* p_gmac_dev, gmac_options_t* p_opt) { gmac_dev_mem_t gmac_dev_mm; /* Disable TX & RX and more */ gmac_network_control(p_gmac, 0); gmac_disable_interrupt(p_gmac, ~0u); gmac_clear_statistics(p_gmac); /* Clear all status bits in the receive status register. */ gmac_clear_rx_status(p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA); /* Clear all status bits in the transmit status register */ gmac_clear_tx_status(p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND); /* Clear interrupts */ gmac_get_interrupt_status(p_gmac); /* Enable the copy of data into the buffers ignore broadcasts, and not copy FCS. */ gmac_set_configure(p_gmac, gmac_get_configure(p_gmac) | GMAC_NCFGR_RFCS| GMAC_NCFGR_PEN); gmac_enable_copy_all(p_gmac, p_opt->uc_copy_all_frame); gmac_disable_broadcast(p_gmac, p_opt->uc_no_boardcast); /* Fill in GMAC device memory management */ gmac_dev_mm.p_rx_buffer = gs_uc_rx_buffer; gmac_dev_mm.p_rx_dscr = gs_rx_desc; gmac_dev_mm.us_rx_size = GMAC_RX_BUFFERS; gmac_dev_mm.p_tx_buffer = gs_uc_tx_buffer; gmac_dev_mm.p_tx_dscr = gs_tx_desc; gmac_dev_mm.us_tx_size = GMAC_TX_BUFFERS; gmac_init_mem(p_gmac, p_gmac_dev, &gmac_dev_mm, gs_tx_callback); gmac_set_address(p_gmac, 0, p_opt->uc_mac_addr); }
/** * \brief Initialize the GMAC driver. * * \param p_gmac Pointer to the GMAC instance. * \param p_gmac_dev Pointer to the GMAC device instance. * \param p_opt GMAC configure options. */ void gmac_dev_init(Gmac* p_gmac, gmac_device_t* p_gmac_dev, gmac_options_t* p_opt) { /* Disable TX & RX and more */ gmac_network_control(p_gmac, 0); gmac_disable_interrupt(p_gmac, ~0u); gmac_clear_statistics(p_gmac); /* Clear all status bits in the receive status register. */ gmac_clear_rx_status(p_gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA | GMAC_RSR_HNO); /* Clear all status bits in the transmit status register */ gmac_clear_tx_status(p_gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE | GMAC_TSR_TXGO | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_HRESP ); /* Enable the copy of data into the buffers ignore broadcasts, and not copy FCS. */ gmac_set_config(p_gmac, gmac_get_config(p_gmac) | GMAC_NCFGR_FD | GMAC_NCFGR_DBW(0) | GMAC_NCFGR_MAXFS | GMAC_NCFGR_RFCS | GMAC_NCFGR_PEN); gmac_enable_copy_all(p_gmac, p_opt->uc_copy_all_frame); gmac_disable_broadcast(p_gmac, p_opt->uc_no_boardcast); gmac_init_queue(p_gmac, p_gmac_dev); gmac_set_address(p_gmac, 0, p_opt->uc_mac_addr); #ifdef FREERTOS_USED /* Asynchronous operation requires a notification semaphore. First, * create the semaphore. */ vSemaphoreCreateBinary(netif_notification_semaphore); vQueueAddToRegistry(netif_notification_semaphore, "GMAC Sem"); /* Then set the semaphore into the correct initial state. */ xSemaphoreTake(netif_notification_semaphore, 0); #endif }
/** * \brief GMAC Interrupt handler * \param gmacd Pointer to GMAC Driver instance. */ static void _gmacd_handler(struct _gmacd * gmacd, uint8_t queue) { Gmac *gmac = gmacd->gmac; struct _gmacd_queue* q = &gmacd->queues[queue]; uint32_t isr; uint32_t rsr; /* Interrupt Status Register is cleared on read */ while ((isr = gmac_get_it_status(gmac, queue)) != 0) { /* RX packet */ if (isr & GMAC_INT_RX_BITS) { /* Clear status */ rsr = gmac_get_rx_status(gmac); gmac_clear_rx_status(gmac, rsr); /* Invoke callback */ if (q->rx_callback) q->rx_callback(queue, rsr); } /* TX error */ if (isr & GMAC_INT_TX_ERR_BITS) { _gmacd_tx_error_handler(gmacd, queue); break; } /* TX packet */ if (isr & GMAC_IER_TCOMP) { _gmacd_tx_complete_handler(gmacd, queue); } /* HRESP not OK */ if (isr & GMAC_IER_HRESP) { trace_error("HRESP not OK\n\r"); } } }
bool gmac_configure(Gmac* gmac) { pmc_enable_peripheral(get_gmac_id_from_addr(gmac)); /* Disable TX & RX and more */ gmac_set_network_control_register(gmac, 0); gmac_set_network_config_register(gmac, GMAC_NCFGR_DBW_DBW32); /* Disable interrupts */ gmac_disable_it(gmac, 0, ~0u); #ifdef CONFIG_HAVE_GMAC_QUEUES gmac_disable_it(gmac, 1, ~0u); gmac_disable_it(gmac, 2, ~0u); #endif /* Clear statistics */ gmac_clear_statistics(gmac); /* Clear all status bits in the receive status register. */ gmac_clear_rx_status(gmac, GMAC_RSR_RXOVR | GMAC_RSR_REC | GMAC_RSR_BNA | GMAC_RSR_HNO); /* Clear all status bits in the transmit status register */ gmac_clear_tx_status(gmac, GMAC_TSR_UBR | GMAC_TSR_COL | GMAC_TSR_RLE | GMAC_TSR_TXGO | GMAC_TSR_TFC | GMAC_TSR_TXCOMP | GMAC_TSR_UND | GMAC_TSR_HRESP); /* Clear interrupts */ gmac_get_it_status(gmac, 0); #ifdef CONFIG_HAVE_GMAC_QUEUES gmac_get_it_status(gmac, 1); gmac_get_it_status(gmac, 2); #endif return _gmac_configure_mdc_clock(gmac); }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; ul_isr = gmac_get_interrupt_status(p_hw); ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_dev->func_rx_cb) { p_gmac_dev->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* A frame transmitted */ /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE & Number of discarded buffers */ ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Check UND */ if (ul_tsr & GMAC_TSR_UND) { ul_tx_status_flag |= GMAC_TSR_UND; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_tail]; p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); } while (CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size)); } if (ul_tsr & GMAC_TSR_RLE) { /* Notify upper layer RLE */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold) && p_gmac_dev->func_wakeup_cb) { p_gmac_dev->func_wakeup_cb(); } } }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; #ifdef FREERTOS_USED portBASE_TYPE xHigherPriorityTaskWoken = pdFALSE; #endif gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx]; if(queue_idx == GMAC_QUE_0) { ul_isr = gmac_get_interrupt_status(p_hw); } else { ul_isr = gmac_get_priority_interrupt_status(p_hw, queue_idx); } ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_queue->func_rx_cb) { p_gmac_queue->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE */ ul_tx_status_flag = GMAC_TSR_RLE; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev, queue_idx); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_tail]; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size); } while (CIRC_CNT(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size)); } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size) >= p_gmac_queue->uc_wakeup_threshold) && p_gmac_queue->func_wakeup_cb) { p_gmac_queue->func_wakeup_cb(); } } #ifdef FREERTOS_USED /* Notify TCP/IP task to start data processing. */ /* LwIP works on top of GMAC driver, hence this semaphore locks */ /* the complete IP stack. */ xSemaphoreGiveFromISR(netif_notification_semaphore, &xHigherPriorityTaskWoken); portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); #endif }