/** * \brief Send ulLength bytes from pcFrom. This copies the buffer to one of the * GMAC Tx buffers, and then indicates to the GMAC that the buffer is ready. * If lEndOfFrame is true then the data being copied is the end of the frame * and the frame can be transmitted. * * \param p_gmac_dev Pointer to the GMAC device instance. * \param p_buffer Pointer to the data buffer. * \param ul_size Length of the frame. * \param func_tx_cb Transmit callback function. * * \return Length sent. */ uint32_t gmac_dev_write(gmac_device_t* p_gmac_dev, void *p_buffer, uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb) { volatile gmac_tx_descriptor_t *p_tx_td; volatile gmac_dev_tx_cb_t *p_func_tx_cb; Gmac *p_hw = p_gmac_dev->p_hw; /* Check parameter */ if (ul_size > GMAC_TX_UNITSIZE) { return GMAC_PARAM; } /* Pointers to the current transmit descriptor */ p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_head]; /* If no free TxTd, buffer can't be sent, schedule the wakeup callback */ if (CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size) == 0) { if (p_tx_td[p_gmac_dev->us_tx_head].status.val & GMAC_TXD_USED) return GMAC_TX_BUSY; } /* Pointers to the current Tx callback */ p_func_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_head]; /* Set up/copy data to transmission buffer */ if (p_buffer && ul_size) { /* Driver manages the ring buffer */ memcpy((void *)p_tx_td->addr, p_buffer, ul_size); } /* Tx callback */ *p_func_tx_cb = func_tx_cb; /* Update transmit descriptor status */ /* The buffer size defined is the length of ethernet frame, so it's always the last buffer of the frame. */ if (p_gmac_dev->us_tx_head == p_gmac_dev->us_tx_list_size - 1) { p_tx_td->status.val = (ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST | GMAC_TXD_WRAP; } else { p_tx_td->status.val = (ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST; } circ_inc(&p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_list_size); /* Now start to transmit if it is still not done */ gmac_start_transmission(p_hw); return GMAC_OK; }
uint32_t gmac_dev_write_nocopy(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx, uint32_t ul_size, gmac_dev_tx_cb_t func_tx_cb) { volatile gmac_tx_descriptor_t *p_tx_td; volatile gmac_dev_tx_cb_t *p_func_tx_cb; Gmac *p_hw = p_gmac_dev->p_hw; gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx]; /* Check parameter */ if (ul_size > GMAC_TX_UNITSIZE) { return GMAC_PARAM; } /* Pointers to the current transmit descriptor */ p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_head]; /* Pointers to the current Tx callback */ p_func_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_head]; /* Tx callback */ *p_func_tx_cb = func_tx_cb; /* Update transmit descriptor status */ /* The buffer size defined is the length of ethernet frame, so it's always the last buffer of the frame. */ if (p_gmac_queue->us_tx_head == p_gmac_queue->us_tx_list_size - 1) { p_tx_td->status.val = (ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST | GMAC_TXD_WRAP; } else { p_tx_td->status.val = (ul_size & GMAC_TXD_LEN_MASK) | GMAC_TXD_LAST; } circ_inc(&p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_list_size); /* Now start to transmit if it is still not done */ gmac_start_transmission(p_hw); return GMAC_OK; }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; ul_isr = gmac_get_interrupt_status(p_hw); ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_dev->func_rx_cb) { p_gmac_dev->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* A frame transmitted */ /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE & Number of discarded buffers */ ul_tx_status_flag = GMAC_TSR_RLE | CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Check UND */ if (ul_tsr & GMAC_TSR_UND) { ul_tx_status_flag |= GMAC_TSR_UND; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_dev->p_tx_dscr[p_gmac_dev->us_tx_tail]; p_tx_cb = &p_gmac_dev->func_tx_cb_list[p_gmac_dev->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size); } while (CIRC_CNT(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size)); } if (ul_tsr & GMAC_TSR_RLE) { /* Notify upper layer RLE */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_dev->us_tx_head, p_gmac_dev->us_tx_tail, p_gmac_dev->us_tx_list_size) >= p_gmac_dev->uc_wakeup_threshold) && p_gmac_dev->func_wakeup_cb) { p_gmac_dev->func_wakeup_cb(); } } }
/** * \brief Frames can be read from the GMAC in multiple sections. * Read ul_frame_size bytes from the GMAC receive buffers to pcTo. * p_rcv_size is the size of the entire frame. Generally gmac_read * will be repeatedly called until the sum of all the ul_frame_size equals * the value of p_rcv_size. * * \param p_gmac_dev Pointer to the GMAC device instance. * \param p_frame Address of the frame buffer. * \param ul_frame_size Length of the frame. * \param p_rcv_size Received frame size. * * \return GMAC_OK if receiving frame successfully, otherwise failed. */ uint32_t gmac_dev_read(gmac_device_t* p_gmac_dev, uint8_t* p_frame, uint32_t ul_frame_size, uint32_t* p_rcv_size) { uint16_t us_buffer_length; uint32_t tmp_ul_frame_size = 0; uint8_t *p_tmp_frame = 0; uint16_t us_tmp_idx = p_gmac_dev->us_rx_idx; gmac_rx_descriptor_t *p_rx_td = &p_gmac_dev->p_rx_dscr[p_gmac_dev->us_rx_idx]; int8_t c_is_frame = 0; if (p_frame == NULL) return GMAC_PARAM; /* Set the default return value */ *p_rcv_size = 0; /* Process received RX descriptor */ while ((p_rx_td->addr.val & GMAC_RXD_OWNERSHIP) == GMAC_RXD_OWNERSHIP) { /* A start of frame has been received, discard previous fragments */ if ((p_rx_td->status.val & GMAC_RXD_SOF) == GMAC_RXD_SOF) { /* Skip previous fragment */ while (us_tmp_idx != p_gmac_dev->us_rx_idx) { p_rx_td = &p_gmac_dev->p_rx_dscr[p_gmac_dev->us_rx_idx]; p_rx_td->addr.val &= ~(GMAC_RXD_OWNERSHIP); circ_inc(&p_gmac_dev->us_rx_idx, p_gmac_dev->us_rx_list_size); } /* Reset the temporary frame pointer */ p_tmp_frame = p_frame; tmp_ul_frame_size = 0; /* Start to gather buffers in a frame */ c_is_frame = 1; } /* Increment the pointer */ circ_inc(&us_tmp_idx, p_gmac_dev->us_rx_list_size); /* Copy data in the frame buffer */ if (c_is_frame) { if (us_tmp_idx == p_gmac_dev->us_rx_idx) { do { p_rx_td = &p_gmac_dev->p_rx_dscr[p_gmac_dev->us_rx_idx]; p_rx_td->addr.val &= ~(GMAC_RXD_OWNERSHIP); circ_inc(&p_gmac_dev->us_rx_idx, p_gmac_dev->us_rx_list_size); } while (us_tmp_idx != p_gmac_dev->us_rx_idx); return GMAC_RX_NULL; } /* Copy the buffer into the application frame */ us_buffer_length = GMAC_RX_UNITSIZE; if ((tmp_ul_frame_size + us_buffer_length) > ul_frame_size) { us_buffer_length = ul_frame_size - tmp_ul_frame_size; } memcpy(p_tmp_frame, (void *)(p_rx_td->addr.val & GMAC_RXD_ADDR_MASK), us_buffer_length); p_tmp_frame += us_buffer_length; tmp_ul_frame_size += us_buffer_length; /* An end of frame has been received, return the data */ if ((p_rx_td->status.val & GMAC_RXD_EOF) == GMAC_RXD_EOF) { /* Frame size from the GMAC */ *p_rcv_size = (p_rx_td->status.val & GMAC_RXD_LEN_MASK); /* All data have been copied in the application frame buffer => release TD */ while (p_gmac_dev->us_rx_idx != us_tmp_idx) { p_rx_td = &p_gmac_dev->p_rx_dscr[p_gmac_dev->us_rx_idx]; p_rx_td->addr.val &= ~(GMAC_RXD_OWNERSHIP); circ_inc(&p_gmac_dev->us_rx_idx, p_gmac_dev->us_rx_list_size); } /* Application frame buffer is too small so that all data have not been copied */ if (tmp_ul_frame_size < *p_rcv_size) { return GMAC_SIZE_TOO_SMALL; } return GMAC_OK; } } /* SOF has not been detected, skip the fragment */ else { p_rx_td->addr.val &= ~(GMAC_RXD_OWNERSHIP); p_gmac_dev->us_rx_idx = us_tmp_idx; } /* Process the next buffer */ p_rx_td = &p_gmac_dev->p_rx_dscr[us_tmp_idx]; } return GMAC_RX_NULL; }
/** * \brief GMAC Interrupt handler. * * \param p_gmac_dev Pointer to GMAC device instance. */ void gmac_handler(gmac_device_t* p_gmac_dev, gmac_quelist_t queue_idx) { Gmac *p_hw = p_gmac_dev->p_hw; gmac_tx_descriptor_t *p_tx_td; gmac_dev_tx_cb_t *p_tx_cb; volatile uint32_t ul_isr; volatile uint32_t ul_rsr; volatile uint32_t ul_tsr; uint32_t ul_rx_status_flag; uint32_t ul_tx_status_flag; #ifdef FREERTOS_USED portBASE_TYPE xHigherPriorityTaskWoken = pdFALSE; #endif gmac_queue_t* p_gmac_queue = &p_gmac_dev->gmac_queue_list[queue_idx]; if(queue_idx == GMAC_QUE_0) { ul_isr = gmac_get_interrupt_status(p_hw); } else { ul_isr = gmac_get_priority_interrupt_status(p_hw, queue_idx); } ul_rsr = gmac_get_rx_status(p_hw); ul_tsr = gmac_get_tx_status(p_hw); ul_isr &= ~(gmac_get_interrupt_mask(p_hw) | 0xF8030300); /* RX packet */ if ((ul_isr & GMAC_ISR_RCOMP) || (ul_rsr & GMAC_RSR_REC)) { ul_rx_status_flag = GMAC_RSR_REC; /* Check OVR */ if (ul_rsr & GMAC_RSR_RXOVR) { ul_rx_status_flag |= GMAC_RSR_RXOVR; } /* Check BNA */ if (ul_rsr & GMAC_RSR_BNA) { ul_rx_status_flag |= GMAC_RSR_BNA; } /* Clear status */ gmac_clear_rx_status(p_hw, ul_rx_status_flag); /* Invoke callbacks */ if (p_gmac_queue->func_rx_cb) { p_gmac_queue->func_rx_cb(ul_rx_status_flag); } } /* TX packet */ if ((ul_isr & GMAC_ISR_TCOMP) || (ul_tsr & GMAC_TSR_TXCOMP)) { ul_tx_status_flag = GMAC_TSR_TXCOMP; /* Check RLE */ if (ul_tsr & GMAC_TSR_RLE) { /* Status RLE */ ul_tx_status_flag = GMAC_TSR_RLE; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; gmac_reset_tx_mem(p_gmac_dev, queue_idx); gmac_enable_transmit(p_hw, 1); } /* Check COL */ if (ul_tsr & GMAC_TSR_COL) { ul_tx_status_flag |= GMAC_TSR_COL; } /* Clear status */ gmac_clear_tx_status(p_hw, ul_tx_status_flag); if (!CIRC_EMPTY(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail)) { /* Check the buffers */ do { p_tx_td = &p_gmac_queue->p_tx_dscr[p_gmac_queue->us_tx_tail]; p_tx_cb = &p_gmac_queue->func_tx_cb_list[p_gmac_queue->us_tx_tail]; /* Any error? Exit if buffer has not been sent yet */ if ((p_tx_td->status.val & GMAC_TXD_USED) == 0) { break; } /* Notify upper layer that a packet has been sent */ if (*p_tx_cb) { (*p_tx_cb) (ul_tx_status_flag); } circ_inc(&p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size); } while (CIRC_CNT(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size)); } /* If a wakeup has been scheduled, notify upper layer that it can send other packets, and the sending will be successful. */ if ((CIRC_SPACE(p_gmac_queue->us_tx_head, p_gmac_queue->us_tx_tail, p_gmac_queue->us_tx_list_size) >= p_gmac_queue->uc_wakeup_threshold) && p_gmac_queue->func_wakeup_cb) { p_gmac_queue->func_wakeup_cb(); } } #ifdef FREERTOS_USED /* Notify TCP/IP task to start data processing. */ /* LwIP works on top of GMAC driver, hence this semaphore locks */ /* the complete IP stack. */ xSemaphoreGiveFromISR(netif_notification_semaphore, &xHigherPriorityTaskWoken); portEND_SWITCHING_ISR(xHigherPriorityTaskWoken); #endif }