Beispiel #1
0
/*
 * Name: qla_tx_tso
 * Function: Checks if the packet to be transmitted is a candidate for
 *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
 *	Ring Structure are plugged in.
 */
static int
qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd)
{
	struct ether_vlan_header *eh;
	struct ip *ip = NULL;
	struct tcphdr *th = NULL;
	uint32_t ehdrlen,  hdrlen, ip_hlen, tcp_hlen;
	uint16_t etype, opcode, offload = 1;
	device_t dev;

	dev = ha->pci_dev;

	if (mp->m_pkthdr.len <= ha->max_frame_size)
		return (-1);

	eh = mtod(mp, struct ether_vlan_header *);

	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
		etype = ntohs(eh->evl_proto);
	} else {
		ehdrlen = ETHER_HDR_LEN;
		etype = ntohs(eh->evl_encap_proto);
	}

	switch (etype) {
		case ETHERTYPE_IP:
			ip = (struct ip *)(mp->m_data + ehdrlen);
			ip_hlen = ip->ip_hl << 2;
			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;

			if (ip->ip_p != IPPROTO_TCP) {
				offload = 0;
			} else
				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
		break;

		default:
			QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
			offload = 0;
		break;
	}

	if (!offload)
		return (-1);

	tcp_hlen = th->th_off << 2;

	hdrlen = ehdrlen + ip_hlen + tcp_hlen;

	if (mp->m_len < hdrlen) {
		device_printf(dev, "%s: (mp->m_len < hdrlen)\n", __func__);
		return (-1);
	}

	tx_cmd->flags_opcode = opcode ;
	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
	tx_cmd->ip_hdr_off = ehdrlen;
	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
	tx_cmd->total_hdr_len = hdrlen;

	/* Check for Multicast least significant bit of MSB == 1 */
	if (eh->evl_dhost[0] & 0x01) {
		tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
	}

	return (0);
}
Beispiel #2
0
/*
 * Name: qla_hw_send
 * Function: Transmits a packet. It first checks if the packet is a
 *	candidate for Large TCP Segment Offload and then for UDP/TCP checksum
 *	offload. If either of these creteria are not met, it is transmitted
 *	as a regular ethernet frame.
 */
int
qla_hw_send(qla_host_t *ha, bus_dma_segment_t *segs, int nsegs,
	uint32_t *tx_idx,  struct mbuf *mp)
{
	struct ether_vlan_header *eh;
	qla_hw_t *hw = &ha->hw;
	q80_tx_cmd_t *tx_cmd, tso_cmd;
	bus_dma_segment_t *c_seg;
	uint32_t num_tx_cmds, hdr_len = 0;
	uint32_t total_length = 0, bytes, tx_cmd_count = 0;
	device_t dev;
	int i;

	dev = ha->pci_dev;

	/*
	 * Always make sure there is atleast one empty slot in the tx_ring
	 * tx_ring is considered full when there only one entry available
	 */
        num_tx_cmds = (nsegs + (Q8_TX_CMD_MAX_SEGMENTS - 1)) >> 2;

	total_length = mp->m_pkthdr.len;
	if (total_length > QLA_MAX_TSO_FRAME_SIZE) {
		device_printf(dev, "%s: total length exceeds maxlen(%d)\n",
			__func__, total_length);
		return (-1);
	}

	bzero((void *)&tso_cmd, sizeof(q80_tx_cmd_t));

	if (qla_tx_tso(ha, mp, &tso_cmd) == 0) {
		/* find the additional tx_cmd descriptors required */

		hdr_len = tso_cmd.total_hdr_len;

		bytes = sizeof(q80_tx_cmd_t) - Q8_TX_CMD_TSO_ALIGN;
		bytes = QL_MIN(bytes, hdr_len);

		num_tx_cmds++;
		hdr_len -= bytes;

		while (hdr_len) {
			bytes = QL_MIN((sizeof(q80_tx_cmd_t)), hdr_len);
			hdr_len -= bytes;
			num_tx_cmds++;
		}
		hdr_len = tso_cmd.total_hdr_len;
	}

	if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
		qla_hw_tx_done_locked(ha);
		if (hw->txr_free <= (num_tx_cmds + QLA_TX_MIN_FREE)) {
        		QL_DPRINT8((dev, "%s: (hw->txr_free <= "
				"(num_tx_cmds + QLA_TX_MIN_FREE))\n",
				__func__));
			return (-1);
		}
	}

	*tx_idx = hw->txr_next;

        tx_cmd = &hw->tx_ring_base[hw->txr_next];

	if (hdr_len == 0) {
		if ((nsegs > Q8_TX_MAX_SEGMENTS) ||
			(mp->m_pkthdr.len > ha->max_frame_size)){
        		device_printf(dev,
				"%s: (nsegs[%d, %d, 0x%x] > Q8_TX_MAX_SEGMENTS)\n",
				__func__, nsegs, mp->m_pkthdr.len,
				mp->m_pkthdr.csum_flags);
			qla_dump_buf8(ha, "qla_hw_send: wrong pkt",
				mtod(mp, char *), mp->m_len);
			return (EINVAL);
		}
		bzero((void *)tx_cmd, sizeof(q80_tx_cmd_t));
		if (qla_tx_chksum(ha, mp, tx_cmd) != 0) 
        		tx_cmd->flags_opcode = Q8_TX_CMD_OP_XMT_ETHER;
	} else {
Beispiel #3
0
/*
 * Name: qla_tx_tso
 * Function: Checks if the packet to be transmitted is a candidate for
 *	Large TCP Segment Offload. If yes, the appropriate fields in the Tx
 *	Ring Structure are plugged in.
 */
static int
qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr)
{
	struct ether_vlan_header *eh;
	struct ip *ip = NULL;
	struct tcphdr *th = NULL;
	uint32_t ehdrlen,  hdrlen = 0, ip_hlen, tcp_hlen, tcp_opt_off;
	uint16_t etype, opcode, offload = 1;
	uint8_t *tcp_opt;
	device_t dev;

	dev = ha->pci_dev;

	eh = mtod(mp, struct ether_vlan_header *);

	if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
		ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
		etype = ntohs(eh->evl_proto);
	} else {
		ehdrlen = ETHER_HDR_LEN;
		etype = ntohs(eh->evl_encap_proto);
	}

	switch (etype) {
		case ETHERTYPE_IP:

			tcp_opt_off = ehdrlen + sizeof(struct ip) +
					sizeof(struct tcphdr);

			if (mp->m_len < tcp_opt_off) {
				m_copydata(mp, 0, tcp_opt_off, hdr);
				ip = (struct ip *)hdr;
			} else {
				ip = (struct ip *)(mp->m_data + ehdrlen);
			}

			ip_hlen = ip->ip_hl << 2;
			opcode = Q8_TX_CMD_OP_XMT_TCP_LSO;

			if ((ip->ip_p != IPPROTO_TCP) ||
				(ip_hlen != sizeof (struct ip))) {
				offload = 0;
			} else {
				th = (struct tcphdr *)((caddr_t)ip + ip_hlen);
			}
		break;

		default:
			QL_DPRINT8((dev, "%s: type!=ip\n", __func__));
			offload = 0;
		break;
	}

	if (!offload)
		return (-1);

	tcp_hlen = th->th_off << 2;


	hdrlen = ehdrlen + ip_hlen + tcp_hlen;

	if (mp->m_len < hdrlen) {
		if (mp->m_len < tcp_opt_off) {
			if (tcp_hlen > sizeof(struct tcphdr)) {
				m_copydata(mp, tcp_opt_off,
					(tcp_hlen - sizeof(struct tcphdr)),
					&hdr[tcp_opt_off]);
			}
		} else {
			m_copydata(mp, 0, hdrlen, hdr);
		}
	}

	if ((mp->m_pkthdr.csum_flags & CSUM_TSO) == 0) {

		/* If TCP options are preset only time stamp option is supported */
		if ((tcp_hlen - sizeof(struct tcphdr)) != 10) 
			return -1;
		else {

			if (mp->m_len < hdrlen) {
				tcp_opt = &hdr[tcp_opt_off];
			} else {
				tcp_opt = (uint8_t *)(mp->m_data + tcp_opt_off);
			}

			if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) ||
				(*(tcp_opt + 2) != 0x08) || (*(tcp_opt + 2) != 10)) {
				return -1;
			}
		}

		tx_cmd->mss = ha->max_frame_size - ETHER_CRC_LEN - hdrlen;
	} else {
		tx_cmd->mss = mp->m_pkthdr.tso_segsz;
	}

	tx_cmd->flags_opcode = opcode ;
	tx_cmd->tcp_hdr_off = ip_hlen + ehdrlen;
	tx_cmd->ip_hdr_off = ehdrlen;
	tx_cmd->mss = mp->m_pkthdr.tso_segsz;
	tx_cmd->total_hdr_len = hdrlen;

	/* Check for Multicast least significant bit of MSB == 1 */
	if (eh->evl_dhost[0] & 0x01) {
		tx_cmd->flags_opcode = Q8_TX_CMD_FLAGS_MULTICAST;
	}

	if (mp->m_len < hdrlen) {
		return (1);
	}

	return (0);
}