Example #1
0
/*
 * Name: qla_init_cntxt_regions
 * Function: Initializes Tx/Rx Contexts.
 */
static void
qla_init_cntxt_regions(qla_host_t *ha)
{
	qla_hw_t		*hw;
	q80_tx_cntxt_req_t	*tx_cntxt_req;
	q80_rcv_cntxt_req_t	*rx_cntxt_req;
	bus_addr_t		phys_addr;
	uint32_t		i;
        device_t                dev;
	uint32_t		size;

        dev = ha->pci_dev;

	hw = &ha->hw;

	hw->tx_ring_base = hw->dma_buf.tx_ring.dma_b;
	
	for (i = 0; i < ha->hw.num_sds_rings; i++)
		hw->sds[i].sds_ring_base =
			(q80_stat_desc_t *)hw->dma_buf.sds_ring[i].dma_b;


	phys_addr = hw->dma_buf.context.dma_addr;

	memset((void *)hw->dma_buf.context.dma_b, 0,
		ha->hw.dma_buf.context.size);

	hw->tx_cntxt_req	=
		(q80_tx_cntxt_req_t *)hw->dma_buf.context.dma_b;
	hw->tx_cntxt_req_paddr	= phys_addr;

	size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);

	hw->tx_cntxt_rsp	=
		(q80_tx_cntxt_rsp_t *)((uint8_t *)hw->tx_cntxt_req + size);
	hw->tx_cntxt_rsp_paddr	= hw->tx_cntxt_req_paddr + size;

	size = QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);

	hw->rx_cntxt_req =
		(q80_rcv_cntxt_req_t *)((uint8_t *)hw->tx_cntxt_rsp + size);
	hw->rx_cntxt_req_paddr = hw->tx_cntxt_rsp_paddr + size;

	size = QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);

	hw->rx_cntxt_rsp =
		(q80_rcv_cntxt_rsp_t *)((uint8_t *)hw->rx_cntxt_req + size);
	hw->rx_cntxt_rsp_paddr = hw->rx_cntxt_req_paddr + size;

	size = QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);

	hw->tx_cons = (uint32_t *)((uint8_t *)hw->rx_cntxt_rsp + size);
	hw->tx_cons_paddr = hw->rx_cntxt_rsp_paddr + size;

	/*
	 * Initialize the Transmit Context Request so that we don't need to
	 * do it everytime we need to create a context
	 */
	tx_cntxt_req = hw->tx_cntxt_req;

	tx_cntxt_req->rsp_dma_addr = qla_host_to_le64(hw->tx_cntxt_rsp_paddr);

	tx_cntxt_req->cmd_cons_dma_addr = qla_host_to_le64(hw->tx_cons_paddr);

	tx_cntxt_req->caps[0] = qla_host_to_le32((CNTXT_CAP0_BASEFW |
					CNTXT_CAP0_LEGACY_MN | CNTXT_CAP0_LSO));
	
	tx_cntxt_req->intr_mode = qla_host_to_le32(CNTXT_INTR_MODE_SHARED);

	tx_cntxt_req->phys_addr =
		qla_host_to_le64(hw->dma_buf.tx_ring.dma_addr);

	tx_cntxt_req->num_entries = qla_host_to_le32(NUM_TX_DESCRIPTORS);

	/*
	 * Initialize the Receive Context Request
	 */

	rx_cntxt_req = hw->rx_cntxt_req;

	rx_cntxt_req->rx_req.rsp_dma_addr =
		qla_host_to_le64(hw->rx_cntxt_rsp_paddr);

	rx_cntxt_req->rx_req.caps[0] = qla_host_to_le32(CNTXT_CAP0_BASEFW |
						CNTXT_CAP0_LEGACY_MN |
						CNTXT_CAP0_JUMBO |
						CNTXT_CAP0_LRO|
						CNTXT_CAP0_HW_LRO);

	rx_cntxt_req->rx_req.intr_mode =
		qla_host_to_le32(CNTXT_INTR_MODE_SHARED);

	rx_cntxt_req->rx_req.rds_intr_mode =
		qla_host_to_le32(CNTXT_INTR_MODE_UNIQUE);

	rx_cntxt_req->rx_req.rds_ring_offset = 0;
	rx_cntxt_req->rx_req.sds_ring_offset = qla_host_to_le32(
		(hw->num_rds_rings * sizeof(q80_rq_rds_ring_t)));
	rx_cntxt_req->rx_req.num_rds_rings =
		qla_host_to_le16(hw->num_rds_rings);
	rx_cntxt_req->rx_req.num_sds_rings =
		qla_host_to_le16(hw->num_sds_rings);

	for (i = 0; i < hw->num_rds_rings; i++) {
		rx_cntxt_req->rds_req[i].phys_addr =
			qla_host_to_le64(hw->dma_buf.rds_ring[i].dma_addr);

		if (i == RDS_RING_INDEX_NORMAL) {
			rx_cntxt_req->rds_req[i].buf_size =
				qla_host_to_le64(MCLBYTES);
			rx_cntxt_req->rds_req[i].size =
				qla_host_to_le32(NUM_RX_DESCRIPTORS);
		} else {
			rx_cntxt_req->rds_req[i].buf_size =
				qla_host_to_le64(MJUM9BYTES);
			rx_cntxt_req->rds_req[i].size =
				qla_host_to_le32(NUM_RX_JUMBO_DESCRIPTORS);
		}
	}

	for (i = 0; i < hw->num_sds_rings; i++) {
		rx_cntxt_req->sds_req[i].phys_addr =
			qla_host_to_le64(hw->dma_buf.sds_ring[i].dma_addr);
		rx_cntxt_req->sds_req[i].size =
			qla_host_to_le32(NUM_STATUS_DESCRIPTORS);
		rx_cntxt_req->sds_req[i].msi_index = qla_host_to_le16(i);
	}

	QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_req = %p paddr %p\n",
		__func__, hw->tx_cntxt_req, (void *)hw->tx_cntxt_req_paddr));
	QL_DPRINT2((ha->pci_dev, "%s: tx_cntxt_rsp = %p paddr %p\n",
		__func__, hw->tx_cntxt_rsp, (void *)hw->tx_cntxt_rsp_paddr));
	QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_req = %p paddr %p\n",
		__func__, hw->rx_cntxt_req, (void *)hw->rx_cntxt_req_paddr));
	QL_DPRINT2((ha->pci_dev, "%s: rx_cntxt_rsp = %p paddr %p\n",
		__func__, hw->rx_cntxt_rsp, (void *)hw->rx_cntxt_rsp_paddr));
	QL_DPRINT2((ha->pci_dev, "%s: tx_cons      = %p paddr %p\n",
		__func__, hw->tx_cons, (void *)hw->tx_cons_paddr));
}
Example #2
0
/*
 * Name: qla_isr
 * Function: Main Interrupt Service Routine
 */
static uint32_t
qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
{
	device_t dev;
	qla_hw_t *hw;
	uint32_t comp_idx, desc_count;
	q80_stat_desc_t *sdesc;
	struct lro_ctrl *lro;
	uint32_t ret = 0;

	dev = ha->pci_dev;
	hw = &ha->hw;

	hw->sds[sds_idx].rcv_active = 1;
	if (ha->flags.stop_rcv) {
		hw->sds[sds_idx].rcv_active = 0;
		return 0;
	}

	QL_DPRINT2((dev, "%s: [%d]enter\n", __func__, sds_idx));

	/*
	 * receive interrupts
	 */
	comp_idx = hw->sds[sds_idx].sdsr_next;
	lro = &hw->sds[sds_idx].lro;

	while (count--) {

		sdesc = (q80_stat_desc_t *)
				&hw->sds[sds_idx].sds_ring_base[comp_idx];

		if (Q8_STAT_DESC_OWNER((sdesc->data[0])) !=
			Q8_STAT_DESC_OWNER_HOST) {
			QL_DPRINT2((dev, "%s:  data %p sdsr_next 0x%08x\n",
				__func__, (void *)sdesc->data[0], comp_idx));
			break;
		}

		desc_count = Q8_STAT_DESC_COUNT((sdesc->data[0]));

		switch (Q8_STAT_DESC_OPCODE((sdesc->data[0]))) {

		case Q8_STAT_DESC_OPCODE_RCV_PKT:
		case Q8_STAT_DESC_OPCODE_SYN_OFFLOAD:
			qla_rx_intr(ha, (sdesc->data[0]), sds_idx, lro);
			
			break;

		default:
			device_printf(dev, "%s: default 0x%llx!\n", __func__,
					(long long unsigned int)sdesc->data[0]);
			break;
		}

		while (desc_count--) {
			sdesc->data[0] =
				Q8_STAT_DESC_SET_OWNER(Q8_STAT_DESC_OWNER_FW);
			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
			sdesc = (q80_stat_desc_t *)
				&hw->sds[sds_idx].sds_ring_base[comp_idx];
		}
	}

	tcp_lro_flush_all(lro);

	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
	}
	hw->sds[sds_idx].sdsr_next = comp_idx;

	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
	if ((sds_idx == 0) && (Q8_STAT_DESC_OWNER((sdesc->data[0])) ==
					Q8_STAT_DESC_OWNER_HOST)) {
		ret = -1;
	}

	hw->sds[sds_idx].rcv_active = 0;
	return (ret);
}
Example #3
0
/*
 * Name: qla_alloc_dma
 * Function: Allocates DMA'able memory for Tx/Rx Rings, Tx/Rx Contexts.
 */
int
qla_alloc_dma(qla_host_t *ha)
{
        device_t                dev;
	uint32_t		i, j, size;

        dev = ha->pci_dev;

        QL_DPRINT2((dev, "%s: enter\n", __func__));

	ha->hw.num_rds_rings = (uint16_t)sysctl_num_rds_rings;
	ha->hw.num_sds_rings = (uint16_t)sysctl_num_sds_rings;

	/*
	 * Allocate Transmit Ring
	 */

	ha->hw.dma_buf.tx_ring.alignment = 8;
	ha->hw.dma_buf.tx_ring.size =
		(sizeof(q80_tx_cmd_t)) * NUM_TX_DESCRIPTORS;
	
        if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.tx_ring)) {
                device_printf(dev, "%s: tx ring alloc failed\n", __func__);
                goto qla_alloc_dma_exit;
        }
        ha->hw.dma_buf.flags.tx_ring = 1;

	QL_DPRINT2((dev, "%s: tx_ring phys %p virt %p\n",
		__func__, (void *)(ha->hw.dma_buf.tx_ring.dma_addr),
		ha->hw.dma_buf.tx_ring.dma_b));
	/*
	 * Allocate Receive Descriptor Rings
	 */

	for (i = 0; i < ha->hw.num_rds_rings; i++) {
		ha->hw.dma_buf.rds_ring[i].alignment = 8;

		if (i == RDS_RING_INDEX_NORMAL) {
			ha->hw.dma_buf.rds_ring[i].size =
				(sizeof(q80_recv_desc_t)) * NUM_RX_DESCRIPTORS;
		} else if (i == RDS_RING_INDEX_JUMBO) {
			ha->hw.dma_buf.rds_ring[i].size = 
				(sizeof(q80_recv_desc_t)) *
					NUM_RX_JUMBO_DESCRIPTORS;
		} else
			break;
	
		if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.rds_ring[i])) {
			QL_DPRINT4((dev, "%s: rds ring alloc failed\n",
				__func__));

			for (j = 0; j < i; j++)
				qla_free_dmabuf(ha,
					&ha->hw.dma_buf.rds_ring[j]);

			goto qla_alloc_dma_exit;
		}
		QL_DPRINT4((dev, "%s: rx_ring[%d] phys %p virt %p\n",
			__func__, i,
			(void *)(ha->hw.dma_buf.rds_ring[i].dma_addr),
			ha->hw.dma_buf.rds_ring[i].dma_b));
	}
	ha->hw.dma_buf.flags.rds_ring = 1;

	/*
	 * Allocate Status Descriptor Rings
	 */

	for (i = 0; i < ha->hw.num_sds_rings; i++) {
		ha->hw.dma_buf.sds_ring[i].alignment = 8;
		ha->hw.dma_buf.sds_ring[i].size =
			(sizeof(q80_stat_desc_t)) * NUM_STATUS_DESCRIPTORS;

		if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.sds_ring[i])) {
			device_printf(dev, "%s: sds ring alloc failed\n",
				__func__);

			for (j = 0; j < i; j++)
				qla_free_dmabuf(ha,
					&ha->hw.dma_buf.sds_ring[j]);

			goto qla_alloc_dma_exit;
		}
		QL_DPRINT4((dev, "%s: sds_ring[%d] phys %p virt %p\n",
			__func__, i,
			(void *)(ha->hw.dma_buf.sds_ring[i].dma_addr),
			ha->hw.dma_buf.sds_ring[i].dma_b));
	}
	ha->hw.dma_buf.flags.sds_ring = 1;

	/*
	 * Allocate Context Area
	 */
	size = QL_ALIGN((sizeof (q80_tx_cntxt_req_t)), QL_BUFFER_ALIGN);

	size += QL_ALIGN((sizeof (q80_tx_cntxt_rsp_t)), QL_BUFFER_ALIGN);

	size += QL_ALIGN((sizeof (q80_rcv_cntxt_req_t)), QL_BUFFER_ALIGN);

	size += QL_ALIGN((sizeof (q80_rcv_cntxt_rsp_t)), QL_BUFFER_ALIGN);

	size += sizeof (uint32_t); /* for tx consumer index */

	size = QL_ALIGN(size, PAGE_SIZE);
	
	ha->hw.dma_buf.context.alignment = 8;
	ha->hw.dma_buf.context.size = size;
	
        if (qla_alloc_dmabuf(ha, &ha->hw.dma_buf.context)) {
                device_printf(dev, "%s: context alloc failed\n", __func__);
                goto qla_alloc_dma_exit;
        }
        ha->hw.dma_buf.flags.context = 1;
	QL_DPRINT2((dev, "%s: context phys %p virt %p\n",
		__func__, (void *)(ha->hw.dma_buf.context.dma_addr),
		ha->hw.dma_buf.context.dma_b));

	qla_init_cntxt_regions(ha);

	return 0;

qla_alloc_dma_exit:
	qla_free_dma(ha);
	return -1;
}
Example #4
0
/*
 * Name: qla_rcv_isr
 * Function: Main Interrupt Service Routine
 */
static uint32_t
qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count)
{
	device_t dev;
	qla_hw_t *hw;
	uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode;
	volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL;
	uint32_t ret = 0;
	qla_sgl_comp_t sgc;
	uint16_t nhandles;
	uint32_t sds_replenish_threshold = 0;

	dev = ha->pci_dev;
	hw = &ha->hw;

	hw->sds[sds_idx].rcv_active = 1;
	if (ha->flags.stop_rcv) {
		hw->sds[sds_idx].rcv_active = 0;
		return 0;
	}

	QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx));

	/*
	 * receive interrupts
	 */
	comp_idx = hw->sds[sds_idx].sdsr_next;

	while (count-- && !ha->flags.stop_rcv) {

		sdesc = (q80_stat_desc_t *)
				&hw->sds[sds_idx].sds_ring_base[comp_idx];

		opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));

		if (!opcode)
			break;

		hw->sds[sds_idx].intr_count++;
		switch (opcode) {

		case Q8_STAT_DESC_OPCODE_RCV_PKT:

			desc_count = 1;

			bzero(&sgc, sizeof(qla_sgl_comp_t));

			sgc.rcv.pkt_length =
				Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0]));
			sgc.rcv.num_handles = 1;
			sgc.rcv.handle[0] =
				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
			sgc.rcv.chksum_status =
				Q8_STAT_DESC_STATUS((sdesc->data[1]));

			sgc.rcv.rss_hash =
				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));

			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
				sgc.rcv.vlan_tag =
					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
			}
			qla_rx_intr(ha, &sgc.rcv, sds_idx);
			break;

		case Q8_STAT_DESC_OPCODE_SGL_RCV:

			desc_count =
				Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1]));

			if (desc_count > 1) {
				c_idx = (comp_idx + desc_count -1) &
						(NUM_STATUS_DESCRIPTORS-1);
				sdesc0 = (q80_stat_desc_t *)
					&hw->sds[sds_idx].sds_ring_base[c_idx];

				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
						Q8_STAT_DESC_OPCODE_CONT) {
					desc_count = 0;
					break;
				}
			}

			bzero(&sgc, sizeof(qla_sgl_comp_t));

			sgc.rcv.pkt_length =
				Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\
					(sdesc->data[0]));
			sgc.rcv.chksum_status =
				Q8_STAT_DESC_STATUS((sdesc->data[1]));

			sgc.rcv.rss_hash =
				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));

			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
				sgc.rcv.vlan_tag =
					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
			}

			QL_ASSERT(ha, (desc_count <= 2) ,\
				("%s: [sds_idx, data0, data1]="\
				"%d, %p, %p]\n", __func__, sds_idx,\
				(void *)sdesc->data[0],\
				(void *)sdesc->data[1]));

			sgc.rcv.num_handles = 1;
			sgc.rcv.handle[0] = 
				Q8_STAT_DESC_HANDLE((sdesc->data[0]));
			
			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count,
				&sgc.rcv.handle[1], &nhandles)) {
				device_printf(dev,
					"%s: [sds_idx, dcount, data0, data1]="
					 "[%d, %d, 0x%llx, 0x%llx]\n",
					__func__, sds_idx, desc_count,
					(long long unsigned int)sdesc->data[0],
					(long long unsigned int)sdesc->data[1]);
				desc_count = 0;
				break;	
			}

			sgc.rcv.num_handles += nhandles;

			qla_rx_intr(ha, &sgc.rcv, sds_idx);
			
			break;

		case Q8_STAT_DESC_OPCODE_SGL_LRO:

			desc_count =
				Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1]));

			if (desc_count > 1) {
				c_idx = (comp_idx + desc_count -1) &
						(NUM_STATUS_DESCRIPTORS-1);
				sdesc0 = (q80_stat_desc_t *)
					&hw->sds[sds_idx].sds_ring_base[c_idx];

				if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) !=
						Q8_STAT_DESC_OPCODE_CONT) {
					desc_count = 0;
					break;
				}
			}
			bzero(&sgc, sizeof(qla_sgl_comp_t));

			sgc.lro.payload_length =
			Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0]));
				
			sgc.lro.rss_hash =
				Q8_STAT_DESC_RSS_HASH((sdesc->data[0]));
			
			sgc.lro.num_handles = 1;
			sgc.lro.handle[0] =
				Q8_STAT_DESC_HANDLE((sdesc->data[0]));

			if (Q8_SGL_LRO_STAT_TS((sdesc->data[1])))
				sgc.lro.flags |= Q8_LRO_COMP_TS;

			if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1])))
				sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT;

			sgc.lro.l2_offset =
				Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1]));
			sgc.lro.l4_offset =
				Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1]));

			if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) {
				sgc.lro.vlan_tag =
					Q8_STAT_DESC_VLAN_ID((sdesc->data[1]));
			}

			QL_ASSERT(ha, (desc_count <= 7) ,\
				("%s: [sds_idx, data0, data1]="\
				 "[%d, 0x%llx, 0x%llx]\n",\
				__func__, sds_idx,\
				(long long unsigned int)sdesc->data[0],\
				(long long unsigned int)sdesc->data[1]));
				
			if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, 
				desc_count, &sgc.lro.handle[1], &nhandles)) {
				device_printf(dev,
				"%s: [sds_idx, data0, data1]="\
				 "[%d, 0x%llx, 0x%llx]\n",\
				__func__, sds_idx,\
				(long long unsigned int)sdesc->data[0],\
				(long long unsigned int)sdesc->data[1]);

				desc_count = 0;
				break;	
			}

			sgc.lro.num_handles += nhandles;

			if (qla_lro_intr(ha, &sgc.lro, sds_idx)) {
				device_printf(dev,
				"%s: [sds_idx, data0, data1]="\
				 "[%d, 0x%llx, 0x%llx]\n",\
				__func__, sds_idx,\
				(long long unsigned int)sdesc->data[0],\
				(long long unsigned int)sdesc->data[1]);
				device_printf(dev,
				"%s: [comp_idx, c_idx, dcount, nhndls]="\
				 "[%d, %d, %d, %d]\n",\
				__func__, comp_idx, c_idx, desc_count,
				sgc.lro.num_handles);
				if (desc_count > 1) {
				device_printf(dev,
				"%s: [sds_idx, data0, data1]="\
				 "[%d, 0x%llx, 0x%llx]\n",\
				__func__, sds_idx,\
				(long long unsigned int)sdesc0->data[0],\
				(long long unsigned int)sdesc0->data[1]);
				}
			}
			
			break;

		default:
			device_printf(dev, "%s: default 0x%llx!\n", __func__,
					(long long unsigned int)sdesc->data[0]);
			break;
		}

		if (desc_count == 0)
			break;

		sds_replenish_threshold += desc_count;


		while (desc_count--) {
			sdesc->data[0] = 0ULL;
			sdesc->data[1] = 0ULL;
			comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1);
			sdesc = (q80_stat_desc_t *)
				&hw->sds[sds_idx].sds_ring_base[comp_idx];
		}

		if (sds_replenish_threshold > ha->hw.sds_cidx_thres) {
			sds_replenish_threshold = 0;
			if (hw->sds[sds_idx].sdsr_next != comp_idx) {
				QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\
					comp_idx);
			}
			hw->sds[sds_idx].sdsr_next = comp_idx;
		}
	}

	if (ha->flags.stop_rcv)
		goto qla_rcv_isr_exit;

	if (hw->sds[sds_idx].sdsr_next != comp_idx) {
		QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx);
	}
	hw->sds[sds_idx].sdsr_next = comp_idx;

	sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx];
	opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1]));

	if (opcode)
		ret = -1;

qla_rcv_isr_exit:
	hw->sds[sds_idx].rcv_active = 0;

	return (ret);
}