Esempio n. 1
0
kni_sock_sndmsg(struct socket *sock,
	   struct msghdr *m, size_t total_len)
#endif /* HAVE_KIOCB_MSG_PARAM */
{
	struct kni_vhost_queue *q =
		container_of(sock->sk, struct kni_vhost_queue, sk);
	int vnet_hdr_len = 0;
	unsigned long len = total_len;

	if (unlikely(q == NULL || q->kni == NULL))
		return 0;

	KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n",
#ifdef HAVE_IOV_ITER_MSGHDR
		   len, q->flags, (int)m->msg_iter.iov->iov_len);
#else
		   len, q->flags, (int)m->msg_iovlen);
#endif

#ifdef RTE_KNI_VHOST_VNET_HDR_EN
	if (likely(q->flags & IFF_VNET_HDR)) {
		vnet_hdr_len = q->vnet_hdr_sz;
		if (unlikely(len < vnet_hdr_len))
			return -EINVAL;
		len -= vnet_hdr_len;
	}
#endif

	if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
		return -EINVAL;

	return kni_vhost_net_tx(q->kni, m, vnet_hdr_len, len);
}
Esempio n. 2
0
static int 
kni_sock_sndmsg(struct kiocb *iocb, struct socket *sock,
	   struct msghdr *m, size_t total_len)
{
 	struct kni_vhost_queue *q = 
		container_of(sock->sk, struct kni_vhost_queue, sk);
	int vnet_hdr_len = 0;
	unsigned long len = total_len;

	if (unlikely(q == NULL || q->kni == NULL))
		return 0;

	KNI_DBG_TX("kni_sndmsg len %ld, flags 0x%08x, nb_iov %d\n", 
		   len, q->flags, (int)m->msg_iovlen);

#ifdef RTE_KNI_VHOST_VNET_HDR_EN
	if (likely(q->flags & IFF_VNET_HDR)) {
		vnet_hdr_len = q->vnet_hdr_sz;
		if (unlikely(len < vnet_hdr_len))
			return -EINVAL;
		len -= vnet_hdr_len;
	}
#endif

	if (unlikely(len < ETH_HLEN + q->vnet_hdr_sz))
		return -EINVAL;

	return kni_vhost_net_tx(q->kni, m->msg_iov, vnet_hdr_len, len);
}
Esempio n. 3
0
static inline int
kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
		 unsigned offset, unsigned len)
{
	struct rte_kni_mbuf *pkt_kva = NULL;
	struct rte_kni_mbuf *pkt_va = NULL;
	int ret;

	KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
#ifdef HAVE_IOV_ITER_MSGHDR
		   offset, len, (int)m->msg_iter.iov->iov_len);
#else
		   offset, len, (int)m->msg_iov->iov_len);
#endif

	/**
	 * Check if it has at least one free entry in tx_q and
	 * one entry in alloc_q.
	 */
	if (kni_fifo_free_count(kni->tx_q) == 0 ||
	    kni_fifo_count(kni->alloc_q) == 0) {
		/**
		 * If no free entry in tx_q or no entry in alloc_q,
		 * drops skb and goes out.
		 */
		goto drop;
	}

	/* dequeue a mbuf from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
	if (likely(ret == 1)) {
		void *data_kva;

		pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
		data_kva = pkt_kva->buf_addr + pkt_kva->data_off
		           - kni->mbuf_va + kni->mbuf_kva;

#ifdef HAVE_IOV_ITER_MSGHDR
		copy_from_iter(data_kva, len, &m->msg_iter);
#else
		memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
#endif

		if (unlikely(len < ETH_ZLEN)) {
			memset(data_kva + len, 0, ETH_ZLEN - len);
			len = ETH_ZLEN;
		}
		pkt_kva->pkt_len = len;
		pkt_kva->data_len = len;

		/* enqueue mbuf into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
		if (unlikely(ret != 1)) {
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbuf into tx_q\n");
			goto drop;
		}
	} else {
		/* Failing should not happen */
		KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
		goto drop;
	}

	/* update statistics */
	kni->stats.tx_bytes += len;
	kni->stats.tx_packets++;

	return 0;

drop:
	/* update statistics */
	kni->stats.tx_dropped++;

	return 0;
}