/*
 * ixgbe_alloc_rcb_lists - Memory allocation for the receive control blocks
 * of one ring.
 */
static int
ixgbe_alloc_rcb_lists(ixgbe_rx_data_t *rx_data)
{
	int i;
	int ret;
	rx_control_block_t *rcb;
	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;
	dma_buffer_t *rx_buf;
	uint32_t rcb_count;

	/*
	 * Allocate memory for the rx control blocks for work list and
	 * free list.
	 */
	rcb_count = rx_data->ring_size + rx_data->free_list_size;
	rcb = rx_data->rcb_area;

	for (i = 0; i < rcb_count; i++, rcb++) {
		ASSERT(rcb != NULL);

		if (i < rx_data->ring_size) {
			/* Attach the rx control block to the work list */
			rx_data->work_list[i] = rcb;
		} else {
			/* Attach the rx control block to the free list */
			rx_data->free_list[i - rx_data->ring_size] = rcb;
		}

		rx_buf = &rcb->rx_buf;
		ret = ixgbe_alloc_dma_buffer(ixgbe,
		    rx_buf, ixgbe->rx_buf_size);

		if (ret != IXGBE_SUCCESS) {
			ixgbe_error(ixgbe, "Allocate rx dma buffer failed");
			goto alloc_rcb_lists_fail;
		}

		rx_buf->size -= IPHDR_ALIGN_ROOM;
		rx_buf->address += IPHDR_ALIGN_ROOM;
		rx_buf->dma_address += IPHDR_ALIGN_ROOM;

		rcb->ref_cnt = 1;
		rcb->rx_data = (ixgbe_rx_data_t *)rx_data;
		rcb->free_rtn.free_func = ixgbe_rx_recycle;
		rcb->free_rtn.free_arg = (char *)rcb;

		rcb->mp = desballoc((unsigned char *)
		    rx_buf->address,
		    rx_buf->size,
		    0, &rcb->free_rtn);
	}

	return (IXGBE_SUCCESS);

alloc_rcb_lists_fail:
	ixgbe_free_rcb_lists(rx_data);

	return (IXGBE_FAILURE);
}
Example #2
0
/*
 * ixgbe_alloc_tcb_lists - Memory allocation for the transmit control bolcks
 * of one ring.
 */
static int
ixgbe_alloc_tcb_lists(ixgbe_tx_ring_t *tx_ring)
{
	int i;
	int ret;
	tx_control_block_t *tcb;
	dma_buffer_t *tx_buf;
	ixgbe_t *ixgbe = tx_ring->ixgbe;
	dev_info_t *devinfo = ixgbe->dip;

	/*
	 * Allocate memory for the work list.
	 */
	tx_ring->work_list = kmem_zalloc(sizeof (tx_control_block_t *) *
	    tx_ring->ring_size, KM_NOSLEEP);

	if (tx_ring->work_list == NULL) {
		ixgbe_error(ixgbe,
		    "Cound not allocate memory for tx work list");
		return (IXGBE_FAILURE);
	}

	/*
	 * Allocate memory for the free list.
	 */
	tx_ring->free_list = kmem_zalloc(sizeof (tx_control_block_t *) *
	    tx_ring->free_list_size, KM_NOSLEEP);

	if (tx_ring->free_list == NULL) {
		kmem_free(tx_ring->work_list,
		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
		tx_ring->work_list = NULL;

		ixgbe_error(ixgbe,
		    "Cound not allocate memory for tx free list");
		return (IXGBE_FAILURE);
	}

	/*
	 * Allocate memory for the tx control blocks of free list.
	 */
	tx_ring->tcb_area =
	    kmem_zalloc(sizeof (tx_control_block_t) *
	    tx_ring->free_list_size, KM_NOSLEEP);

	if (tx_ring->tcb_area == NULL) {
		kmem_free(tx_ring->work_list,
		    sizeof (tx_control_block_t *) * tx_ring->ring_size);
		tx_ring->work_list = NULL;

		kmem_free(tx_ring->free_list,
		    sizeof (tx_control_block_t *) * tx_ring->free_list_size);
		tx_ring->free_list = NULL;

		ixgbe_error(ixgbe,
		    "Cound not allocate memory for tx control blocks");
		return (IXGBE_FAILURE);
	}

	/*
	 * Allocate dma memory for the tx control block of free list.
	 */
	tcb = tx_ring->tcb_area;
	for (i = 0; i < tx_ring->free_list_size; i++, tcb++) {
		ASSERT(tcb != NULL);

		tx_ring->free_list[i] = tcb;

		/*
		 * Pre-allocate dma handles for transmit. These dma handles
		 * will be dynamically bound to the data buffers passed down
		 * from the upper layers at the time of transmitting.
		 */
		ret = ddi_dma_alloc_handle(devinfo,
		    &ixgbe_tx_dma_attr,
		    DDI_DMA_DONTWAIT, NULL,
		    &tcb->tx_dma_handle);
		if (ret != DDI_SUCCESS) {
			tcb->tx_dma_handle = NULL;
			ixgbe_error(ixgbe,
			    "Could not allocate tx dma handle: %x", ret);
			goto alloc_tcb_lists_fail;
		}

		/*
		 * Pre-allocate transmit buffers for packets that the
		 * size is less than bcopy_thresh.
		 */
		tx_buf = &tcb->tx_buf;

		ret = ixgbe_alloc_dma_buffer(ixgbe,
		    tx_buf, ixgbe->tx_buf_size);

		if (ret != IXGBE_SUCCESS) {
			ASSERT(tcb->tx_dma_handle != NULL);
			ddi_dma_free_handle(&tcb->tx_dma_handle);
			tcb->tx_dma_handle = NULL;
			ixgbe_error(ixgbe, "Allocate tx dma buffer failed");
			goto alloc_tcb_lists_fail;
		}

		tcb->last_index = MAX_TX_RING_SIZE;
	}

	return (IXGBE_SUCCESS);

alloc_tcb_lists_fail:
	ixgbe_free_tcb_lists(tx_ring);

	return (IXGBE_FAILURE);
}
Example #3
0
/*
 * ixgbe_alloc_dma_buffer - Allocate DMA resources for a DMA buffer.
 */
static int
ixgbe_alloc_dma_buffer(ixgbe_t *ixgbe, dma_buffer_t *buf, size_t size)
{
	int ret;
	dev_info_t *devinfo = ixgbe->dip;
	ddi_dma_cookie_t cookie;
	size_t len;
	uint_t cookie_num;

	ret = ddi_dma_alloc_handle(devinfo,
	    &ixgbe_buf_dma_attr, DDI_DMA_DONTWAIT,
	    NULL, &buf->dma_handle);

	if (ret != DDI_SUCCESS) {
		buf->dma_handle = NULL;
		ixgbe_error(ixgbe,
		    "Could not allocate dma buffer handle: %x", ret);
		return (IXGBE_FAILURE);
	}

	ret = ddi_dma_mem_alloc(buf->dma_handle,
	    size, &ixgbe_buf_acc_attr, DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &buf->address,
	    &len, &buf->acc_handle);

	if (ret != DDI_SUCCESS) {
		buf->acc_handle = NULL;
		buf->address = NULL;
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		ixgbe_error(ixgbe,
		    "Could not allocate dma buffer memory: %x", ret);
		return (IXGBE_FAILURE);
	}

	ret = ddi_dma_addr_bind_handle(buf->dma_handle, NULL,
	    buf->address,
	    len, DDI_DMA_RDWR | DDI_DMA_STREAMING,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		buf->dma_address = NULL;
		if (buf->acc_handle != NULL) {
			ddi_dma_mem_free(&buf->acc_handle);
			buf->acc_handle = NULL;
			buf->address = NULL;
		}
		if (buf->dma_handle != NULL) {
			ddi_dma_free_handle(&buf->dma_handle);
			buf->dma_handle = NULL;
		}
		ixgbe_error(ixgbe,
		    "Could not bind dma buffer handle: %x", ret);
		return (IXGBE_FAILURE);
	}

	ASSERT(cookie_num == 1);

	buf->dma_address = cookie.dmac_laddress;
	buf->size = len;
	buf->len = 0;

	return (IXGBE_SUCCESS);
}
Example #4
0
/*
 * ixgbe_alloc_rbd_ring - Memory allocation for the rx descriptors of one ring.
 */
static int
ixgbe_alloc_rbd_ring(ixgbe_rx_data_t *rx_data)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	ixgbe_t *ixgbe = rx_data->rx_ring->ixgbe;

	devinfo = ixgbe->dip;
	size = sizeof (union ixgbe_adv_rx_desc) * rx_data->ring_size;

	/*
	 * Allocate a new DMA handle for the receive descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &rx_data->rbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		ixgbe_error(ixgbe,
		    "Could not allocate rbd dma handle: %x", ret);
		rx_data->rbd_area.dma_handle = NULL;
		return (IXGBE_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the receive
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(rx_data->rbd_area.dma_handle,
	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&rx_data->rbd_area.address,
	    &len, &rx_data->rbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		ixgbe_error(ixgbe,
		    "Could not allocate rbd dma memory: %x", ret);
		rx_data->rbd_area.acc_handle = NULL;
		rx_data->rbd_area.address = NULL;
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IXGBE_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(rx_data->rbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call.
	 */
	ret = ddi_dma_addr_bind_handle(rx_data->rbd_area.dma_handle,
	    NULL, (caddr_t)rx_data->rbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		ixgbe_error(ixgbe,
		    "Could not bind rbd dma resource: %x", ret);
		rx_data->rbd_area.dma_address = NULL;
		if (rx_data->rbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&rx_data->rbd_area.acc_handle);
			rx_data->rbd_area.acc_handle = NULL;
			rx_data->rbd_area.address = NULL;
		}
		if (rx_data->rbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&rx_data->rbd_area.dma_handle);
			rx_data->rbd_area.dma_handle = NULL;
		}
		return (IXGBE_FAILURE);
	}

	ASSERT(cookie_num == 1);

	rx_data->rbd_area.dma_address = cookie.dmac_laddress;
	rx_data->rbd_area.size = len;

	rx_data->rbd_ring = (union ixgbe_adv_rx_desc *)(uintptr_t)
	    rx_data->rbd_area.address;

	return (IXGBE_SUCCESS);
}
Example #5
0
/*
 * ixgbe_alloc_tbd_ring - Memory allocation for the tx descriptors of one ring.
 */
static int
ixgbe_alloc_tbd_ring(ixgbe_tx_ring_t *tx_ring)
{
	int ret;
	size_t size;
	size_t len;
	uint_t cookie_num;
	dev_info_t *devinfo;
	ddi_dma_cookie_t cookie;
	ixgbe_t *ixgbe = tx_ring->ixgbe;

	devinfo = ixgbe->dip;
	size = sizeof (union ixgbe_adv_tx_desc) * tx_ring->ring_size;

	/*
	 * If tx head write-back is enabled, an extra tbd is allocated
	 * to save the head write-back value
	 */
	if (ixgbe->tx_head_wb_enable) {
		size += sizeof (union ixgbe_adv_tx_desc);
	}

	/*
	 * Allocate a DMA handle for the transmit descriptor
	 * memory area.
	 */
	ret = ddi_dma_alloc_handle(devinfo, &ixgbe_desc_dma_attr,
	    DDI_DMA_DONTWAIT, NULL,
	    &tx_ring->tbd_area.dma_handle);

	if (ret != DDI_SUCCESS) {
		ixgbe_error(ixgbe,
		    "Could not allocate tbd dma handle: %x", ret);
		tx_ring->tbd_area.dma_handle = NULL;

		return (IXGBE_FAILURE);
	}

	/*
	 * Allocate memory to DMA data to and from the transmit
	 * descriptors.
	 */
	ret = ddi_dma_mem_alloc(tx_ring->tbd_area.dma_handle,
	    size, &ixgbe_desc_acc_attr, DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL,
	    (caddr_t *)&tx_ring->tbd_area.address,
	    &len, &tx_ring->tbd_area.acc_handle);

	if (ret != DDI_SUCCESS) {
		ixgbe_error(ixgbe,
		    "Could not allocate tbd dma memory: %x", ret);
		tx_ring->tbd_area.acc_handle = NULL;
		tx_ring->tbd_area.address = NULL;
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IXGBE_FAILURE);
	}

	/*
	 * Initialize the entire transmit buffer descriptor area to zero
	 */
	bzero(tx_ring->tbd_area.address, len);

	/*
	 * Allocates DMA resources for the memory that was allocated by
	 * the ddi_dma_mem_alloc call. The DMA resources then get bound to the
	 * the memory address
	 */
	ret = ddi_dma_addr_bind_handle(tx_ring->tbd_area.dma_handle,
	    NULL, (caddr_t)tx_ring->tbd_area.address,
	    len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
	    DDI_DMA_DONTWAIT, NULL, &cookie, &cookie_num);

	if (ret != DDI_DMA_MAPPED) {
		ixgbe_error(ixgbe,
		    "Could not bind tbd dma resource: %x", ret);
		tx_ring->tbd_area.dma_address = NULL;
		if (tx_ring->tbd_area.acc_handle != NULL) {
			ddi_dma_mem_free(&tx_ring->tbd_area.acc_handle);
			tx_ring->tbd_area.acc_handle = NULL;
			tx_ring->tbd_area.address = NULL;
		}
		if (tx_ring->tbd_area.dma_handle != NULL) {
			ddi_dma_free_handle(&tx_ring->tbd_area.dma_handle);
			tx_ring->tbd_area.dma_handle = NULL;
		}
		return (IXGBE_FAILURE);
	}

	ASSERT(cookie_num == 1);

	tx_ring->tbd_area.dma_address = cookie.dmac_laddress;
	tx_ring->tbd_area.size = len;

	tx_ring->tbd_ring = (union ixgbe_adv_tx_desc *)(uintptr_t)
	    tx_ring->tbd_area.address;

	return (IXGBE_SUCCESS);
}
Example #6
0
int
ixgbe_alloc_rx_ring_data(ixgbe_rx_ring_t *rx_ring)
{
	ixgbe_rx_data_t	*rx_data;
	ixgbe_t *ixgbe = rx_ring->ixgbe;
	uint32_t rcb_count;

	/*
	 * Allocate memory for software receive rings
	 */
	rx_data = kmem_zalloc(sizeof (ixgbe_rx_data_t), KM_NOSLEEP);

	if (rx_data == NULL) {
		ixgbe_error(ixgbe, "Allocate software receive rings failed");
		return (IXGBE_FAILURE);
	}

	rx_data->rx_ring = rx_ring;
	mutex_init(&rx_data->recycle_lock, NULL,
	    MUTEX_DRIVER, DDI_INTR_PRI(ixgbe->intr_pri));

	rx_data->ring_size = ixgbe->rx_ring_size;
	rx_data->free_list_size = ixgbe->rx_ring_size;

	rx_data->rcb_head = 0;
	rx_data->rcb_tail = 0;
	rx_data->rcb_free = rx_data->free_list_size;

	/*
	 * Allocate memory for the work list.
	 */
	rx_data->work_list = kmem_zalloc(sizeof (rx_control_block_t *) *
	    rx_data->ring_size, KM_NOSLEEP);

	if (rx_data->work_list == NULL) {
		ixgbe_error(ixgbe,
		    "Could not allocate memory for rx work list");
		goto alloc_rx_data_failure;
	}

	/*
	 * Allocate memory for the free list.
	 */
	rx_data->free_list = kmem_zalloc(sizeof (rx_control_block_t *) *
	    rx_data->free_list_size, KM_NOSLEEP);

	if (rx_data->free_list == NULL) {
		ixgbe_error(ixgbe,
		    "Cound not allocate memory for rx free list");
		goto alloc_rx_data_failure;
	}

	/*
	 * Allocate memory for the rx control blocks for work list and
	 * free list.
	 */
	rcb_count = rx_data->ring_size + rx_data->free_list_size;
	rx_data->rcb_area =
	    kmem_zalloc(sizeof (rx_control_block_t) * rcb_count,
	    KM_NOSLEEP);

	if (rx_data->rcb_area == NULL) {
		ixgbe_error(ixgbe,
		    "Cound not allocate memory for rx control blocks");
		goto alloc_rx_data_failure;
	}

	rx_ring->rx_data = rx_data;
	return (IXGBE_SUCCESS);

alloc_rx_data_failure:
	ixgbe_free_rx_ring_data(rx_data);
	return (IXGBE_FAILURE);
}