/* * ixgbe_free_dma - Free all the DMA resources of all rx/tx rings. */ void ixgbe_free_dma(ixgbe_t *ixgbe) { ixgbe_rx_ring_t *rx_ring; ixgbe_rx_data_t *rx_data; ixgbe_tx_ring_t *tx_ring; int i; /* * Free DMA resources of rx rings */ for (i = 0; i < ixgbe->num_rx_rings; i++) { rx_ring = &ixgbe->rx_rings[i]; rx_data = rx_ring->rx_data; ixgbe_free_rbd_ring(rx_data); ixgbe_free_rcb_lists(rx_data); } /* * Free DMA resources of tx rings */ for (i = 0; i < ixgbe->num_tx_rings; i++) { tx_ring = &ixgbe->tx_rings[i]; ixgbe_free_tbd_ring(tx_ring); ixgbe_free_tcb_lists(tx_ring); } }
/* * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks * of one ring. */ static int ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *rx_data) { int i; int ret; rx_control_block_t *rcb; ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe; dma_buffer_t *rx_buf; uint32_t rcb_count; /* * Allocate memory for the rx control blocks for work list and * free list. */ rcb_count = rx_data->ring_size + rx_data->free_list_size; rcb = rx_data->rcb_area; for (i = 0; i < rcb_count; i++, rcb++) { ASSERT(rcb != NULL); if (i < rx_data->ring_size) { /* Attach the rx control block to the work list */ rx_data->work_list[i] = rcb; } else { /* Attach the rx control block to the free list */ rx_data->free_list[i - rx_data->ring_size] = rcb; } rx_buf = &rcb->rx_buf; ret = ixgbe_alloc_dma_buffer(ixgbe, rx_buf, ixgbe->rx_buf_size); if (ret != IXGBE_SUCCESS) { ixgbe_error(ixgbe, "Allocate rx dma buffer failed"); goto alloc_rcb_lists_fail; } rx_buf->size -= IPHDR_ALIGN_ROOM; rx_buf->address += IPHDR_ALIGN_ROOM; rx_buf->dma_address += IPHDR_ALIGN_ROOM; rcb->ref_cnt = 1; rcb->rx_data = (ixgbe_rx_data_t *)rx_data; rcb->free_rtn.free_func = ixgbe_rx_recycle; rcb->free_rtn.free_arg = (char *)rcb; rcb->mp = desballoc((unsigned char *) rx_buf->address, rx_buf->size, 0, &rcb->free_rtn); } return (IXGBE_SUCCESS); alloc_rcb_lists_fail: ixgbe_free_rcb_lists(rx_data); return (IXGBE_FAILURE); }