Beispiel #1
0
/*
 * function to charge a given rq with buffers from a pool's free list
 *
 * dev - software handle to the device
 * rq - pointer to the RQ to charge
 * nbufs - numbers of buffers to be charged
 *
 * return number of rqe's charges.
 */
static inline int
oce_rq_charge(struct oce_rq *rq, uint32_t nbufs, boolean_t repost)
{
	struct oce_nic_rqe *rqe;
	oce_rq_bdesc_t *rqbd;
	oce_rq_bdesc_t **shadow_rq;
	int cnt;
	int cur_index;
	oce_ring_buffer_t *ring;

	shadow_rq = rq->shadow_ring;
	ring = rq->ring;
	cur_index = ring->cidx;

	for (cnt = 0; cnt < nbufs; cnt++) {
		if (!repost) {
			rqbd = oce_rqb_alloc(rq);
		} else {
			/* just repost the buffers from shadow ring */
			rqbd = shadow_rq[cur_index];
			cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items);
		}
		/* fill the rqes */
		rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring,
		    struct oce_nic_rqe);
		rqe->u0.s.frag_pa_lo = rqbd->frag_addr.dw.addr_lo;
		rqe->u0.s.frag_pa_hi = rqbd->frag_addr.dw.addr_hi;
		shadow_rq[rq->ring->pidx] = rqbd;
		DW_SWAP(u32ptr(rqe), sizeof (struct oce_nic_rqe));
		RING_PUT(rq->ring, 1);
	}

	return (cnt);
} /* oce_rq_charge */
Beispiel #2
0
/*
 * function to stop the RX
 *
 * rq - pointer to RQ structure
 *
 * return none
 */
void
oce_clean_rq(struct oce_rq *rq)
{
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	struct oce_nic_rx_cqe *cqe;
	int32_t ti = 0;

	dev = rq->parent;
	cq = rq->cq;
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
	/* dequeue till you reach an invalid cqe */
	for (ti = 0; ti < DEFAULT_DRAIN_TIME; ti++) {

		while (RQ_CQE_VALID(cqe)) {
			DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
			oce_rx_drop_pkt(rq, cqe);
			atomic_add_32(&rq->buf_avail,
			    -(cqe->u0.s.num_fragments & 0x7));
			oce_arm_cq(dev, cq->cq_id, 1, B_TRUE);
			RQ_CQE_INVALIDATE(cqe);
			RING_GET(cq->ring, 1);
			cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
			    struct oce_nic_rx_cqe);
			num_cqe++;
		}
		OCE_MSDELAY(1);
	}
} /* oce_clean_rq */
Beispiel #3
0
boolean_t
oce_m_getcap(void *arg, mac_capab_t cap, void *data)
{
	struct oce_dev *dev = arg;
	boolean_t ret = B_TRUE;
	switch (cap) {

	case MAC_CAPAB_HCKSUM: {
		uint32_t *csum_flags = u32ptr(data);
		*csum_flags = HCKSUM_ENABLE |
		    HCKSUM_INET_FULL_V4 |
		    HCKSUM_IPHDRCKSUM;
		break;
	}
	case MAC_CAPAB_LSO: {
		mac_capab_lso_t *mcap_lso = (mac_capab_lso_t *)data;
		if (dev->lso_capable) {
			mcap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
			mcap_lso->lso_basic_tcp_ipv4.lso_max = OCE_LSO_MAX_SIZE;
		} else {
			ret = B_FALSE;
		}
		break;
	}
	default:
		ret = B_FALSE;
		break;
	}
	return (ret);
} /* oce_m_getcap */
Beispiel #4
0
/* Add frame links at the end of the snapshot. */
static MSize snapshot_framelinks(jit_State *J, IRRef2 *map)
{
  cTValue *frame = J->L->base - 1;
  cTValue *lim = J->L->base - J->baseslot;
  MSize f = 0;
  map[f++] = u32ptr(J->pc);
  while (frame > lim) {
    if (frame_islua(frame)) {
      map[f++] = u32ptr(frame_pc(frame));
      frame = frame_prevl(frame);
    } else if (frame_ispcall(frame)) {
      map[f++] = (uint32_t)frame_ftsz(frame);
      frame = frame_prevd(frame);
    } else if (frame_iscont(frame)) {
      map[f++] = (uint32_t)frame_ftsz(frame);
      map[f++] = u32ptr(frame_contpc(frame));
      frame = frame_prevd(frame);
    } else {
      lua_assert(0);
    }
  }
  return f;
}
Beispiel #5
0
static inline int
oce_process_tx_compl(struct oce_wq *wq, boolean_t rearm)
{
	struct oce_nic_tx_cqe *cqe;
	uint16_t num_cqe = 0;
	struct oce_cq *cq;
	oce_wqe_desc_t *wqed;
	int wqe_freed = 0;
	struct oce_dev *dev;

	cq  = wq->cq;
	dev = wq->parent;
	(void) ddi_dma_sync(cq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORKERNEL);

	mutex_enter(&wq->txc_lock);
	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
	while (WQ_CQE_VALID(cqe)) {

		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_tx_cqe));

		/* update stats */
		if (cqe->u0.s.status != 0) {
			atomic_inc_32(&dev->tx_errors);
		}

		/* complete the WQEs */
		wqed = OCE_LIST_REM_HEAD(&wq->wqe_desc_list);

		wqe_freed = wqed->wqe_cnt;
		oce_free_wqed(wq, wqed);
		RING_GET(wq->ring, wqe_freed);
		atomic_add_32(&wq->wq_free, wqe_freed);
		/* clear the valid bit and progress cqe */
		WQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_tx_cqe);
		num_cqe++;
	} /* for all valid CQE */
	mutex_exit(&wq->txc_lock);
	if (num_cqe)
		oce_arm_cq(wq->parent, cq->cq_id, num_cqe, rearm);
	return (num_cqe);
} /* oce_process_tx_completion */
Beispiel #6
0
/*
 * function to process a Recieve queue
 *
 * arg - pointer to the RQ to charge
 *
 * return number of cqes processed
 */
uint16_t
oce_drain_rq_cq(void *arg)
{
	struct oce_nic_rx_cqe *cqe;
	struct oce_rq *rq;
	mblk_t *mp = NULL;
	mblk_t *mblk_head;
	mblk_t **mblk_tail;
	uint16_t num_cqe = 0;
	struct oce_cq  *cq;
	struct oce_dev *dev;
	int32_t frag_cnt;
	uint32_t nbufs = 0;

	rq = (struct oce_rq *)arg;
	dev = rq->parent;
	cq = rq->cq;
	mblk_head = NULL;
	mblk_tail = &mblk_head;

	cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);

	(void) DBUF_SYNC(cq->ring->dbuf, DDI_DMA_SYNC_FORKERNEL);
	/* dequeue till you reach an invalid cqe */
	while (RQ_CQE_VALID(cqe)) {
		DW_SWAP(u32ptr(cqe), sizeof (struct oce_nic_rx_cqe));
		frag_cnt = cqe->u0.s.num_fragments & 0x7;
		/* if insufficient buffers to charge then do copy */
		if ((cqe->u0.s.pkt_size < dev->rx_bcopy_limit) ||
		    (oce_atomic_reserve(&rq->rqb_free, frag_cnt) < 0)) {
			mp = oce_rx_bcopy(dev, rq, cqe);
		} else {
			mp = oce_rx(dev, rq, cqe);
			if (mp == NULL) {
				atomic_add_32(&rq->rqb_free, frag_cnt);
				mp = oce_rx_bcopy(dev, rq, cqe);
			}
		}
		if (mp != NULL) {
			if (cqe->u0.s.vlan_tag_present) {
				oce_rx_insert_tag(mp, cqe->u0.s.vlan_tag);
			}
			oce_set_rx_oflags(mp, cqe);

			*mblk_tail = mp;
			mblk_tail = &mp->b_next;
		} else {
			(void) oce_rq_charge(rq, frag_cnt, B_TRUE);
		}
		RING_GET(rq->ring, frag_cnt);
		rq->buf_avail -= frag_cnt;
		nbufs += frag_cnt;

		oce_rq_post_buffer(rq, frag_cnt);
		RQ_CQE_INVALIDATE(cqe);
		RING_GET(cq->ring, 1);
		cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring,
		    struct oce_nic_rx_cqe);
		num_cqe++;
		/* process max ring size */
		if (num_cqe > dev->rx_pkt_per_intr) {
			break;
		}
	} /* for all valid CQEs */

	if (mblk_head) {
		mac_rx(dev->mac_handle, NULL, mblk_head);
	}
	oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
	return (num_cqe);
} /* oce_drain_rq_cq */
Beispiel #7
0
/*
 * function to xmit  Single packet over the wire
 *
 * wq - pointer to WQ
 * mp - Pointer to packet chain
 *
 * return pointer to the packet
 */
mblk_t *
oce_send_packet(struct oce_wq *wq, mblk_t *mp)
{
	struct oce_nic_hdr_wqe *wqeh;
	struct oce_dev *dev;
	struct ether_header *eh;
	struct ether_vlan_header *evh;
	int32_t num_wqes;
	uint16_t etype;
	uint32_t ip_offset;
	uint32_t csum_flags = 0;
	boolean_t use_copy = B_FALSE;
	boolean_t tagged   = B_FALSE;
	uint16_t  vlan_tag;
	uint32_t  reg_value = 0;
	oce_wqe_desc_t *wqed = NULL;
	mblk_t *nmp = NULL;
	mblk_t *tmp = NULL;
	uint32_t pkt_len = 0;
	int num_mblks = 0;
	int ret = 0;
	uint32_t mss = 0;
	uint32_t flags = 0;
	int len = 0;

	/* retrieve the adap priv struct ptr */
	dev = wq->parent;

	/* check if we have enough free slots */
	if (wq->wq_free < dev->tx_reclaim_threshold) {
		(void) oce_process_tx_compl(wq, B_FALSE);
	}
	if (wq->wq_free < OCE_MAX_TX_HDL) {
		return (mp);
	}

	/* check if we should copy */
	for (tmp = mp; tmp != NULL; tmp = tmp->b_cont) {
		pkt_len += MBLKL(tmp);
		num_mblks++;
	}

	if (pkt_len == 0 || num_mblks == 0) {
		freemsg(mp);
		return (NULL);
	}

	/* retrieve LSO information */
	mac_lso_get(mp, &mss, &flags);

	/* get the offload flags */
	mac_hcksum_get(mp, NULL, NULL, NULL, NULL, &csum_flags);

	/* restrict the mapped segment to wat we support */
	if (num_mblks  > OCE_MAX_TX_HDL) {
		nmp = msgpullup(mp, -1);
		if (nmp == NULL) {
			atomic_inc_32(&wq->pkt_drops);
			freemsg(mp);
			return (NULL);
		}
		/* Reset it to new collapsed mp */
		freemsg(mp);
		mp = nmp;
	}

	/* Get the packet descriptor for Tx */
	wqed = kmem_cache_alloc(wq->wqed_cache, KM_NOSLEEP);
	if (wqed == NULL) {
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}
	eh = (struct ether_header *)(void *)mp->b_rptr;
	if (ntohs(eh->ether_type) == VLAN_TPID) {
		evh = (struct ether_vlan_header *)(void *)mp->b_rptr;
		tagged = B_TRUE;
		etype = ntohs(evh->ether_type);
		ip_offset = sizeof (struct ether_vlan_header);
		pkt_len -= VTAG_SIZE;
		vlan_tag = ntohs(evh->ether_tci);
		oce_remove_vtag(mp);
	} else {
		etype = ntohs(eh->ether_type);
		ip_offset = sizeof (struct ether_header);
	}

	/* Save the WQ pointer */
	wqed->wq = wq;
	wqed->frag_idx = 1; /* index zero is always header */
	wqed->frag_cnt = 0;
	wqed->nhdl = 0;
	wqed->mp = NULL;
	OCE_LIST_LINK_INIT(&wqed->link);

	/* If entire packet is less than the copy limit  just do copy */
	if (pkt_len < dev->tx_bcopy_limit) {
		use_copy = B_TRUE;
		ret = oce_bcopy_wqe(wq, wqed, mp, pkt_len);
	} else {
		/* copy or dma map the individual fragments */
		for (nmp = mp; nmp != NULL; nmp = nmp->b_cont) {
			len = MBLKL(nmp);
			if (len == 0) {
				continue;
			}
			if (len < dev->tx_bcopy_limit) {
				ret = oce_bcopy_wqe(wq, wqed, nmp, len);
			} else {
				ret = oce_map_wqe(wq, wqed, nmp, len);
			}
			if (ret != 0)
				break;
		}
	}

	/*
	 * Any failure other than insufficient Q entries
	 * drop the packet
	 */
	if (ret != 0) {
		oce_free_wqed(wq, wqed);
		atomic_inc_32(&wq->pkt_drops);
		freemsg(mp);
		return (NULL);
	}

	wqeh = (struct oce_nic_hdr_wqe *)&wqed->frag[0];
	bzero(wqeh, sizeof (struct oce_nic_hdr_wqe));

	/* fill rest of wqe header fields based on packet */
	if (flags & HW_LSO) {
		wqeh->u0.s.lso = B_TRUE;
		wqeh->u0.s.lso_mss = mss;
	}
	if (csum_flags & HCK_FULLCKSUM) {
		uint8_t *proto;
		if (etype == ETHERTYPE_IP) {
			proto = (uint8_t *)(void *)
			    (mp->b_rptr + ip_offset);
			if (proto[9] == 6)
				/* IPPROTO_TCP */
				wqeh->u0.s.tcpcs = B_TRUE;
			else if (proto[9] == 17)
				/* IPPROTO_UDP */
				wqeh->u0.s.udpcs = B_TRUE;
		}
	}

	if (csum_flags & HCK_IPV4_HDRCKSUM)
		wqeh->u0.s.ipcs = B_TRUE;
	if (tagged) {
		wqeh->u0.s.vlan = B_TRUE;
		wqeh->u0.s.vlan_tag = vlan_tag;
	}

	wqeh->u0.s.complete = B_TRUE;
	wqeh->u0.s.event = B_TRUE;
	wqeh->u0.s.crc = B_TRUE;
	wqeh->u0.s.total_length = pkt_len;

	num_wqes = wqed->frag_cnt + 1;

	/* h/w expects even no. of WQEs */
	if (num_wqes & 0x1) {
		bzero(&wqed->frag[num_wqes], sizeof (struct oce_nic_frag_wqe));
		num_wqes++;
	}
	wqed->wqe_cnt = (uint16_t)num_wqes;
	wqeh->u0.s.num_wqe = num_wqes;
	DW_SWAP(u32ptr(&wqed->frag[0]), (wqed->wqe_cnt * NIC_WQE_SIZE));

	mutex_enter(&wq->tx_lock);
	if (num_wqes > wq->wq_free) {
		atomic_inc_32(&wq->tx_deferd);
		mutex_exit(&wq->tx_lock);
		goto wqe_fail;
	}
	atomic_add_32(&wq->wq_free, -num_wqes);

	/* fill the wq for adapter */
	oce_fill_ring_descs(wq, wqed);

	/* Set the mp pointer in the wqe descriptor */
	if (use_copy == B_FALSE) {
		wqed->mp = mp;
	}
	/* Add the packet desc to list to be retrieved during cmpl */
	OCE_LIST_INSERT_TAIL(&wq->wqe_desc_list,  wqed);
	(void) ddi_dma_sync(wq->ring->dbuf->dma_handle, 0, 0,
	    DDI_DMA_SYNC_FORDEV);

	/* ring tx doorbell */
	reg_value = (num_wqes << 16) | wq->wq_id;
	/* Ring the door bell  */
	OCE_DB_WRITE32(dev, PD_TXULP_DB, reg_value);
	mutex_exit(&wq->tx_lock);
	if (oce_fm_check_acc_handle(dev, dev->db_handle) != DDI_FM_OK) {
		ddi_fm_service_impact(dev->dip, DDI_SERVICE_DEGRADED);
	}

	/* free mp if copied or packet chain collapsed */
	if (use_copy == B_TRUE) {
		freemsg(mp);
	}
	return (NULL);

wqe_fail:

	if (tagged) {
		oce_insert_vtag(mp, vlan_tag);
	}
	oce_free_wqed(wq, wqed);
	return (mp);
} /* oce_send_packet */