Exemplo n.º 1
0
static void
kni_allocate_mbufs(struct rte_kni *kni)
{
	int i, ret;
	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];

	/* Check if pktmbuf pool has been configured */
	if (kni->pktmbuf_pool == NULL) {
		RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
		return;
	}

	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
		if (unlikely(pkts[i] == NULL)) {
			/* Out of memory */
			RTE_LOG(ERR, KNI, "Out of memory\n");
			break;
		}
	}

	/* No pkt mbuf alocated */
	if (i <= 0)
		return;

	ret = kni_fifo_put(kni->alloc_q, (void **)pkts, i);

	/* Check if any mbufs not put into alloc_q, and then free them */
	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
		int j;

		for (j = ret; j < i; j++)
			rte_pktmbuf_free(pkts[j]);
	}
}
Exemplo n.º 2
0
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
	unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);

	/* Get mbufs from free_q and then free them */
	kni_free_mbufs(kni);

	return ret;
}
Exemplo n.º 3
0
static void
kni_allocate_mbufs(struct rte_kni *kni)
{
	int i, ret;
	struct rte_mbuf *pkts[MAX_MBUF_BURST_NUM];
	void *phys[MAX_MBUF_BURST_NUM];

	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pool) !=
			 offsetof(struct rte_kni_mbuf, pool));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_addr) !=
			 offsetof(struct rte_kni_mbuf, buf_addr));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, next) !=
			 offsetof(struct rte_kni_mbuf, next));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) !=
			 offsetof(struct rte_kni_mbuf, data_off));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_len) !=
			 offsetof(struct rte_kni_mbuf, data_len));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, pkt_len) !=
			 offsetof(struct rte_kni_mbuf, pkt_len));
	RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
			 offsetof(struct rte_kni_mbuf, ol_flags));

	/* Check if pktmbuf pool has been configured */
	if (kni->pktmbuf_pool == NULL) {
		RTE_LOG(ERR, KNI, "No valid mempool for allocating mbufs\n");
		return;
	}

	for (i = 0; i < MAX_MBUF_BURST_NUM; i++) {
		pkts[i] = rte_pktmbuf_alloc(kni->pktmbuf_pool);
		if (unlikely(pkts[i] == NULL)) {
			/* Out of memory */
			RTE_LOG(ERR, KNI, "Out of memory\n");
			break;
		}
		phys[i] = va2pa(pkts[i]);
	}

	/* No pkt mbuf alocated */
	if (i <= 0)
		return;

	ret = kni_fifo_put(kni->alloc_q, phys, i);

	/* Check if any mbufs not put into alloc_q, and then free them */
	if (ret >= 0 && ret < i && ret < MAX_MBUF_BURST_NUM) {
		int j;

		for (j = ret; j < i; j++)
			rte_pktmbuf_free(pkts[j]);
	}
}
Exemplo n.º 4
0
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
	unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);

	/* Get mbufs from free_q and then free them */
	kni_free_mbufs(kni);

	/* Handle the requests from kernel space */
	kni_request_handler(kni);

	return ret;
}
Exemplo n.º 5
0
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
	unsigned ret = kni_fifo_put(kni->rx_q, (void **)mbufs, num);
#ifdef RTE_LIBRW_PIOT
        /* try allocating buffer only if the fifo is not full */
        if (ret)
#endif
	/* Get mbufs from free_q and then free them */
	kni_free_mbufs(kni);

	return ret;
}
Exemplo n.º 6
0
int
rte_kni_handle_request(struct rte_kni *kni)
{
	unsigned ret;
	struct rte_kni_request *req;

	if (kni == NULL)
		return -1;
#ifdef RTE_LIBRW_PIOT
        /*If there is a case where the kni interface is unidirectional. then the
          free-q will never get freed. Hence calling it here*/
        if (!kni_fifo_empty(kni->free_q)) 
          kni_free_mbufs(kni);
#endif
	/* Get request mbuf */
	ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
	if (ret != 1)
		return 0; /* It is OK of can not getting the request mbuf */

	if (req != kni->sync_addr) {
		rte_panic("Wrong req pointer %p\n", req);
	}

	/* Analyze the request and call the relevant actions for it */
	switch (req->req_id) {
	case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
		if (kni->ops.change_mtu)
			req->result = kni->ops.change_mtu(kni->ops.port_id,
							req->new_mtu);
		break;
	case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
		if (kni->ops.config_network_if)
			req->result = kni->ops.config_network_if(\
					kni->ops.port_id, req->if_up);
		break;
	default:
		RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
		req->result = -EINVAL;
		break;
	}

	/* Construct response mbuf and put it back to resp_q */
	ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
	if (ret != 1) {
		RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
		return -1; /* It is an error of can't putting the mbuf back */
	}

	return 0;
}
Exemplo n.º 7
0
int
rte_kni_handle_request(struct rte_kni *kni)
{
	unsigned ret;
	struct rte_kni_request *req;

	if (kni == NULL)
		return -1;

	/* Get request mbuf */
	ret = kni_fifo_get(kni->req_q, (void **)&req, 1);
	if (ret != 1)
		return 0; /* It is OK of can not getting the request mbuf */

	if (req != kni->sync_addr) {
		RTE_LOG(ERR, KNI, "Wrong req pointer %p\n", req);
		return -1;
	}

	/* Analyze the request and call the relevant actions for it */
	switch (req->req_id) {
	case RTE_KNI_REQ_CHANGE_MTU: /* Change MTU */
		if (kni->ops.change_mtu)
			req->result = kni->ops.change_mtu(kni->ops.port_id,
							req->new_mtu);
		break;
	case RTE_KNI_REQ_CFG_NETWORK_IF: /* Set network interface up/down */
		if (kni->ops.config_network_if)
			req->result = kni->ops.config_network_if(\
					kni->ops.port_id, req->if_up);
		break;
	default:
		RTE_LOG(ERR, KNI, "Unknown request id %u\n", req->req_id);
		req->result = -EINVAL;
		break;
	}

	/* Construct response mbuf and put it back to resp_q */
	ret = kni_fifo_put(kni->resp_q, (void **)&req, 1);
	if (ret != 1) {
		RTE_LOG(ERR, KNI, "Fail to put the muf back to resp_q\n");
		return -1; /* It is an error of can't putting the mbuf back */
	}

	return 0;
}
Exemplo n.º 8
0
unsigned
rte_kni_tx_burst(struct rte_kni *kni, struct rte_mbuf **mbufs, unsigned num)
{
	void *phy_mbufs[num];
	unsigned int ret;
	unsigned int i;

	for (i = 0; i < num; i++)
		phy_mbufs[i] = va2pa(mbufs[i]);

	ret = kni_fifo_put(kni->rx_q, phy_mbufs, num);

	/* Get mbufs from free_q and then free them */
	kni_free_mbufs(kni);

	return ret;
}
Exemplo n.º 9
0
/*
 * It can be called to process the request.
 */
static int
kni_net_process_request(struct kni_dev *kni, struct rte_kni_request *req)
{
	int ret = -1;
	void *resp_va;
	unsigned num;
	int ret_val;

	if (!kni || !req) {
		KNI_ERR("No kni instance or request\n");
		return -EINVAL;
	}

	mutex_lock(&kni->sync_lock);

	/* Construct data */
	memcpy(kni->sync_kva, req, sizeof(struct rte_kni_request));
	num = kni_fifo_put(kni->req_q, &kni->sync_va, 1);
	if (num < 1) {
		KNI_ERR("Cannot send to req_q\n");
		ret = -EBUSY;
		goto fail;
	}

	ret_val = wait_event_interruptible_timeout(kni->wq,
			kni_fifo_count(kni->resp_q), 3 * HZ);
	if (signal_pending(current) || ret_val <= 0) {
		ret = -ETIME;
		goto fail;
	}
	num = kni_fifo_get(kni->resp_q, (void **)&resp_va, 1);
	if (num != 1 || resp_va != kni->sync_va) {
		/* This should never happen */
		KNI_ERR("No data in resp_q\n");
		ret = -ENODATA;
		goto fail;
	}

	memcpy(req, kni->sync_kva, sizeof(struct rte_kni_request));
	ret = 0;

fail:
	mutex_unlock(&kni->sync_lock);
	return ret;
}
Exemplo n.º 10
0
int rte_kni_fifo_put(void *fifo, void **mbufs, int num){
  return kni_fifo_put((struct rte_kni_fifo *)fifo, mbufs, num);
}
Exemplo n.º 11
0
static int
kni_vhost_backend_init(struct kni_dev *kni)
{
	struct kni_vhost_queue *q;
	struct net *net = current->nsproxy->net_ns;
	int err, i, sockfd;
	struct rte_kni_fifo *fifo;
	struct sk_buff *elem;

	if (kni->vhost_queue != NULL)
		return -1;

	if (!(q = (struct kni_vhost_queue *)sk_alloc(
		      net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto)))
		return -ENOMEM;

	err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock);
	if (err)
		goto free_sk;

	sockfd = kni_sock_map_fd(q->sock);
	if (sockfd < 0) {
		err = sockfd;
		goto free_sock;
	}

	/* cache init */
	q->cache = (struct sk_buff*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff),
			GFP_KERNEL);
	if (!q->cache)
		goto free_fd;

	fifo = (struct rte_kni_fifo*)
		kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *)
			+ sizeof(struct rte_kni_fifo), GFP_KERNEL);
	if (!fifo)
		goto free_cache;

	kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE);

	for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) {
		elem = &q->cache[i];
		kni_fifo_put(fifo, (void**)&elem, 1);
	}
	q->fifo = fifo;

	/* store sockfd in vhost_queue */
	q->sockfd = sockfd;

	/* init socket */
	q->sock->type = SOCK_RAW;
	q->sock->state = SS_CONNECTED;
	q->sock->ops = &kni_socket_ops;
	sock_init_data(q->sock, &q->sk);

	/* init sock data */
	q->sk.sk_write_space = kni_sk_write_space;
	q->sk.sk_destruct = kni_sk_destruct;
	q->flags = IFF_NO_PI | IFF_TAP;
	q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
#ifdef RTE_KNI_VHOST_VNET_HDR_EN
	q->flags |= IFF_VNET_HDR;
#endif

	/* bind kni_dev with vhost_queue */
	q->kni = kni;
	kni->vhost_queue = q;

	wmb();

	kni->vq_status = BE_START;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)
	KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx,"
		  "sk->sk_wq=0x%16llx",
		  q->sockfd, (uint64_t)q->sock->wq,
		  (uint64_t)q->sk.sk_wq);
#else
	KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx,"
		  "sk->sk_sleep=0x%16llx",
		  q->sockfd, (uint64_t)&q->sock->wait,
		  (uint64_t)q->sk.sk_sleep);
#endif

	return 0;

free_cache:
	kfree(q->cache);
	q->cache = NULL;

free_fd:
	put_unused_fd(sockfd);

free_sock:
	q->kni = NULL;
	kni->vhost_queue = NULL;
	kni->vq_status |= BE_FINISH;
	sock_release(q->sock);
	q->sock->ops = NULL;
	q->sock = NULL;

free_sk:
	sk_free((struct sock*)q);

	return err;
}
Exemplo n.º 12
0
static inline int
kni_vhost_net_rx(struct kni_dev *kni, struct msghdr *m,
		 unsigned offset, unsigned len)
{
	uint32_t pkt_len;
	struct rte_kni_mbuf *kva;
	struct rte_kni_mbuf *va;
	void * data_kva;
	struct sk_buff *skb;
	struct kni_vhost_queue *q = kni->vhost_queue;

	if (unlikely(q == NULL))
		return 0;

	/* ensure at least one entry in free_q */
	if (unlikely(kni_fifo_free_count(kni->free_q) == 0))
		return 0;

	skb = skb_dequeue(&q->sk.sk_receive_queue);
	if (unlikely(skb == NULL))
		return 0;

	kva = (struct rte_kni_mbuf*)skb->data;

	/* free skb to cache */
	skb->data = NULL;
	if (unlikely(1 != kni_fifo_put(q->fifo, (void **)&skb, 1)))
		/* Failing should not happen */
		KNI_ERR("Fail to enqueue entries into rx cache fifo\n");

	pkt_len = kva->data_len;
	if (unlikely(pkt_len > len))
		goto drop;

	KNI_DBG_RX("rx offset=%d, len=%d, pkt_len=%d, iovlen=%d\n",
#ifdef HAVE_IOV_ITER_MSGHDR
		   offset, len, pkt_len, (int)m->msg_iter.iov->iov_len);
#else
		   offset, len, pkt_len, (int)m->msg_iov->iov_len);
#endif

	data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va + kni->mbuf_kva;
#ifdef HAVE_IOV_ITER_MSGHDR
	if (unlikely(copy_to_iter(data_kva, pkt_len, &m->msg_iter)))
#else
	if (unlikely(memcpy_toiovecend(m->msg_iov, data_kva, offset, pkt_len)))
#endif
		goto drop;

	/* Update statistics */
	kni->stats.rx_bytes += pkt_len;
	kni->stats.rx_packets++;

	/* enqueue mbufs into free_q */
	va = (void*)kva - kni->mbuf_kva + kni->mbuf_va;
	if (unlikely(1 != kni_fifo_put(kni->free_q, (void **)&va, 1)))
		/* Failing should not happen */
		KNI_ERR("Fail to enqueue entries into free_q\n");

	KNI_DBG_RX("receive done %d\n", pkt_len);

	return pkt_len;

drop:
	/* Update drop statistics */
	kni->stats.rx_dropped++;

	return 0;
}
Exemplo n.º 13
0
static int
kni_net_tx(struct sk_buff *skb, struct net_device *dev)
{
  int len = 0;
  unsigned ret;
  struct kni_dev *kni = netdev_priv(dev);
  struct rte_kni_mbuf *pkt_kva;
  struct rte_kni_mbuf *pkt_va[RW_FPATH_KNI_MAX_SEGS];
  int num_req_mbuf = 1;

  int err;
  
  kni->tx_attempted++;

  err = skb_linearize(skb);
  if (unlikely(err)){
    goto drop;
  }
#ifdef RTE_LIBRW_NOHUGE
  if (kni->nohuge){
    kni->nl_tx_queued++;
    skb_queue_tail(&kni->skb_tx_queue,
                     skb);
    return NETDEV_TX_OK;    
  }

#endif
  dev->trans_start = jiffies; /* save the timestamp */
  
  /* Check if the length of skb is less than mbuf size */
  if (skb->len > kni->mbuf_size){
    num_req_mbuf = (skb->len/kni->mbuf_size) + 1;
    if (num_req_mbuf > RW_FPATH_KNI_MAX_SEGS){
      goto drop;
    }
  }
  

  if (kni->no_tx ||
      kni->no_data){
    goto drop;
  }
  if (kni_fifo_free_count(kni->tx_q) < num_req_mbuf){
    kni->tx_no_txq++;
    /**
     * If no free entry in tx_q or no entry in alloc_q,
     * drops skb and goes out.
     */
    goto drop;
  }
  if (kni_fifo_count(kni->alloc_q) < num_req_mbuf) {
    kni->tx_no_allocq++;
    /**
     * If no free entry in tx_q or no entry in alloc_q,
     * drops skb and goes out.
     */
    goto drop;
  }
  
  /* dequeue a mbuf from alloc_q */
  ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va[0], num_req_mbuf);
  
  if (likely(ret == num_req_mbuf)) {
    int seg_no = 0;
    int copylen, remlen;
    unsigned char *to, *from;
    int next;
    struct rte_kni_mbuf **prev;
    
    len = skb->len;

    prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next;
    pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva;
    pkt_kva->pkt_len = len;
    RW_KNI_VF_SET_MDATA_PAYLOAD(&pkt_kva->meta_data,
                                skb->protocol);
    from = (unsigned char*)skb->data;
    
    to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
                          + kni->mbuf_kva);
    remlen = kni->mbuf_size;
    next = 0;
    
    while (len > 0) {
      copylen = len;
      if (copylen > remlen){
        next= 1;
        copylen = remlen;
      }
      
      memcpy(to, from, remlen);
      to += copylen;
      from += copylen;
      len -= copylen;
      remlen -= copylen;

      if (unlikely(len < ETH_ZLEN)) {
#if 0
        //AKKI
        memset(data_kva + len, 0, ETH_ZLEN - len);
        len = ETH_ZLEN;
#endif
      }
      pkt_kva->data_len += copylen;
      if (next){
        seg_no++;
        *prev = pkt_va[seg_no];
        prev = (struct rte_kni_mbuf **)&pkt_va[seg_no]->next;
        pkt_kva = (void *)pkt_va[seg_no] - kni->mbuf_va + kni->mbuf_kva;
        to = (unsigned char*)(pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va+ kni->mbuf_kva);
        remlen = kni->mbuf_size;
        next = 0;
        //AKKI increment the nb_segs.. bug bug
      }
    }
    
    /* enqueue mbuf into tx_q */
    ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va[0], 1);
    if (unlikely(ret != 1)) {
      /* Failing should not happen */
      KNI_ERR("Fail to enqueue mbuf into tx_q\n");
      goto drop;
    }
  } else {
    /* Failing should not happen */
    KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
    goto drop;
  }
  
  /* Free skb and update statistics */
  dev_kfree_skb(skb);
  kni->stats.tx_bytes += len;
  kni->stats.tx_packets++;
  
  return NETDEV_TX_OK;
  
drop:
  /* Free skb and update statistics */
  dev_kfree_skb(skb);
  kni->stats.tx_dropped++;
  
  return NETDEV_TX_OK;
}
Exemplo n.º 14
0
  /* Free skb and update statistics */
  dev_kfree_skb(skb);
  kni->stats.tx_dropped++;
  
  return NETDEV_TX_OK;
}

#else /*RTE_LIBRW_PIOT*/
static int
kni_net_tx(struct sk_buff *skb, struct net_device *dev)
{
	int len = 0;
	unsigned ret;
	struct kni_dev *kni = netdev_priv(dev);
	struct rte_kni_mbuf *pkt_kva = NULL;
	struct rte_kni_mbuf *pkt_va = NULL;

	dev->trans_start = jiffies; /* save the timestamp */

	/* Check if the length of skb is less than mbuf size */
	if (skb->len > kni->mbuf_size){
          goto drop;
        }

	/**
	 * Check if it has at least one free entry in tx_q and
	 * one entry in alloc_q.
	 */
	if (kni_fifo_free_count(kni->tx_q) == 0 ||
			kni_fifo_count(kni->alloc_q) == 0) {
		/**
		 * If no free entry in tx_q or no entry in alloc_q,
		 * drops skb and goes out.
		 */
		goto drop;
	}

	/* dequeue a mbuf from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
	if (likely(ret == 1)) {
		void *data_kva;

		pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
		data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
				+ kni->mbuf_kva;

		len = skb->len;
		memcpy(data_kva, skb->data, len);
		if (unlikely(len < ETH_ZLEN)) {
			memset(data_kva + len, 0, ETH_ZLEN - len);
			len = ETH_ZLEN;
		}
		pkt_kva->pkt_len = len;
		pkt_kva->data_len = len;
		/* enqueue mbuf into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
		if (unlikely(ret != 1)) {
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbuf into tx_q\n");
			goto drop;
		}
	} else {
		/* Failing should not happen */
		KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
		goto drop;
	}

	/* Free skb and update statistics */
	dev_kfree_skb(skb);
	kni->stats.tx_bytes += len;
	kni->stats.tx_packets++;

	return NETDEV_TX_OK;

drop:
	/* Free skb and update statistics */
	dev_kfree_skb(skb);
	kni->stats.tx_dropped++;

	return NETDEV_TX_OK;
}
Exemplo n.º 15
0
/*
 * RX: loopback with enqueue/dequeue fifos and sk buffer copies.
 */
static void
kni_net_rx_lo_fifo_skb(struct kni_dev *kni)
{
	unsigned ret;
	uint32_t len;
	unsigned i, num_rq, num_fq, num;
	struct rte_kni_mbuf *kva;
	struct rte_kni_mbuf *va[MBUF_BURST_SZ];
	void * data_kva;

	struct sk_buff *skb;
	struct net_device *dev = kni->net_dev;

	/* Get the number of entries in rx_q */
	num_rq = kni_fifo_count(kni->rx_q);

	/* Get the number of free entries in free_q */
	num_fq = kni_fifo_free_count(kni->free_q);

	/* Calculate the number of entries to dequeue from rx_q */
	num = min(num_rq, num_fq);
	num = min(num, (unsigned)MBUF_BURST_SZ);

	/* Return if no entry to dequeue from rx_q */
	if (num == 0)
		return;

	/* Burst dequeue mbufs from rx_q */
	ret = kni_fifo_get(kni->rx_q, (void **)va, num);
	if (ret == 0)
		return;

	/* Copy mbufs to sk buffer and then call tx interface */
	for (i = 0; i < num; i++) {
		kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
		len = kva->data_len;
		data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va +
				kni->mbuf_kva;

		skb = dev_alloc_skb(len + 2);
		if (skb == NULL)
			KNI_ERR("Out of mem, dropping pkts\n");
		else {
                  /* Align IP on 16B boundary */
                  skb_reserve(skb, 2);
                  memcpy(skb_put(skb, len), data_kva, len);
                  skb->dev = dev;
                  skb->ip_summed = CHECKSUM_UNNECESSARY;
                  dev_kfree_skb(skb);
		}

		/* Simulate real usage, allocate/copy skb twice */
		skb = dev_alloc_skb(len + 2);
		if (skb == NULL) {
			KNI_ERR("Out of mem, dropping pkts\n");
			kni->stats.rx_dropped++;
		}
		else {
                  /* Align IP on 16B boundary */
			skb_reserve(skb, 2);
			memcpy(skb_put(skb, len), data_kva, len);
			skb->dev = dev;
			skb->ip_summed = CHECKSUM_UNNECESSARY;

			kni->stats.rx_bytes += len;
			kni->stats.rx_packets++;

			/* call tx interface */
			kni_net_tx(skb, dev);
		}
	}

	/* enqueue all the mbufs from rx_q into free_q */
	ret = kni_fifo_put(kni->free_q, (void **)&va, num);
	if (ret != num)
		/* Failing should not happen */
		KNI_ERR("Fail to enqueue mbufs into free_q\n");
}
Exemplo n.º 16
0
/*
 * RX: loopback with enqueue/dequeue fifos.
 */
static void
kni_net_rx_lo_fifo(struct kni_dev *kni)
{
	unsigned ret;
	uint32_t len;
	unsigned i, num, num_rq, num_tq, num_aq, num_fq;
	struct rte_kni_mbuf *kva;
	struct rte_kni_mbuf *va[MBUF_BURST_SZ];
	void * data_kva;

	struct rte_kni_mbuf *alloc_kva;
	struct rte_kni_mbuf *alloc_va[MBUF_BURST_SZ];
	void *alloc_data_kva;

	/* Get the number of entries in rx_q */
	num_rq = kni_fifo_count(kni->rx_q);

	/* Get the number of free entrie in tx_q */
	num_tq = kni_fifo_free_count(kni->tx_q);

	/* Get the number of entries in alloc_q */
	num_aq = kni_fifo_count(kni->alloc_q);

	/* Get the number of free entries in free_q */
	num_fq = kni_fifo_free_count(kni->free_q);

	/* Calculate the number of entries to be dequeued from rx_q */
	num = min(num_rq, num_tq);
	num = min(num, num_aq);
	num = min(num, num_fq);
	num = min(num, (unsigned)MBUF_BURST_SZ);

	/* Return if no entry to dequeue from rx_q */
	if (num == 0)
		return;

	/* Burst dequeue from rx_q */
	ret = kni_fifo_get(kni->rx_q, (void **)va, num);
	if (ret == 0)
		return; /* Failing should not happen */

	/* Dequeue entries from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)alloc_va, num);
	if (ret) {
		num = ret;
		/* Copy mbufs */
		for (i = 0; i < num; i++) {
			kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
			len = kva->pkt_len;
			data_kva = kva->buf_addr + kva->data_off -
					kni->mbuf_va + kni->mbuf_kva;

			alloc_kva = (void *)alloc_va[i] - kni->mbuf_va +
							kni->mbuf_kva;
			alloc_data_kva = alloc_kva->buf_addr +
					alloc_kva->data_off - kni->mbuf_va +
							kni->mbuf_kva;
			memcpy(alloc_data_kva, data_kva, len);
			alloc_kva->pkt_len = len;
			alloc_kva->data_len = len;

			kni->stats.tx_bytes += len;
			kni->stats.rx_bytes += len;
		}

		/* Burst enqueue mbufs into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)alloc_va, num);
		if (ret != num)
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbufs into tx_q\n");
	}

	/* Burst enqueue mbufs into free_q */
	ret = kni_fifo_put(kni->free_q, (void **)va, num);
	if (ret != num)
		/* Failing should not happen */
		KNI_ERR("Fail to enqueue mbufs into free_q\n");

	/**
	 * Update statistic, and enqueue/dequeue failure is impossible,
	 * as all queues are checked at first.
	 */
	kni->stats.tx_packets += num;
	kni->stats.rx_packets += num;
}
Exemplo n.º 17
0
/*
 * RX: normal working mode
 */
static void
kni_net_rx_normal(struct kni_dev *kni)
{
	unsigned ret;
	uint32_t len;
	unsigned i, num, num_rq, num_fq;
	struct rte_kni_mbuf *kva;
	struct rte_kni_mbuf *va[MBUF_BURST_SZ];
	void * data_kva;

	struct sk_buff *skb;
	struct net_device *dev = kni->net_dev;

	/* Get the number of entries in rx_q */
	num_rq = kni_fifo_count(kni->rx_q);

	/* Get the number of free entries in free_q */
	num_fq = kni_fifo_free_count(kni->free_q);

	/* Calculate the number of entries to dequeue in rx_q */
	num = min(num_rq, num_fq);
	num = min(num, (unsigned)MBUF_BURST_SZ);

	/* Return if no entry in rx_q and no free entry in free_q */
	if (num == 0)
		return;

	/* Burst dequeue from rx_q */
	ret = kni_fifo_get(kni->rx_q, (void **)va, num);
	if (ret == 0)
		return; /* Failing should not happen */

	/* Transfer received packets to netif */
	for (i = 0; i < num; i++) {
		kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
                len = kva->data_len;
		data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
                    + kni->mbuf_kva;

		skb = dev_alloc_skb(len + 2);
		if (!skb) {
			KNI_ERR("Out of mem, dropping pkts\n");
			/* Update statistics */
			kni->stats.rx_dropped++;
		}
		else {
			/* Align IP on 16B boundary */
			skb_reserve(skb, 2);
			memcpy(skb_put(skb, len), data_kva, len);
			skb->dev = dev;

                        skb->protocol = eth_type_trans(skb, dev);
			skb->ip_summed = CHECKSUM_UNNECESSARY;
			/* Call netif interface */
			netif_rx(skb);

			/* Update statistics */
			kni->stats.rx_bytes += len;
			kni->stats.rx_packets++;
		}
	}

	/* Burst enqueue mbufs into free_q */
	ret = kni_fifo_put(kni->free_q, (void **)va, num);
	if (ret != num)
		/* Failing should not happen */
		KNI_ERR("Fail to enqueue entries into free_q\n");
}
Exemplo n.º 18
0
/*
 * RX: normal working mode
 */
static void
kni_net_rx_normal(struct kni_dev *kni)
{
  unsigned ret;
  uint32_t pkt_len;
  uint32_t data_len;
  unsigned i, num, num_rq, num_fq;
  struct rte_kni_mbuf *kva;
  struct rte_kni_mbuf *va[MBUF_BURST_SZ];
  void * data_kva;
  int copied_len = 0;
  int num_segs = 0;
  
  struct sk_buff *skb;
  struct net_device *dev = kni->net_dev;
  
  if (kni->no_data){
    return;
  }
  
  /* Get the number of entries in rx_q */
  num_rq = kni_fifo_count(kni->rx_q);
  
  /* Get the number of free entries in free_q */
  num_fq = kni_fifo_free_count(kni->free_q);
  
  /* Calculate the number of entries to dequeue in rx_q */
  num = min(num_rq, num_fq);
  num = min(num, (unsigned)MBUF_BURST_SZ);
  
  /* Return if no entry in rx_q and no free entry in free_q */
  if (num == 0)
    return;
  
  /* Burst dequeue from rx_q */
  ret = kni_fifo_get(kni->rx_q, (void **)va, num);
  if (ret == 0)
    return; /* Failing should not happen */
  
  /* Transfer received packets to netif */
  for (i = 0; i < num; i++) {
    kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;
    pkt_len = kva->pkt_len;
    skb = dev_alloc_skb(pkt_len + 2);
    if (!skb) {
      KNI_ERR("Out of mem, dropping pkts\n");
      /* Update statistics */
      kni->stats.rx_dropped++;
      continue;
    }
    /* Align IP on 16B boundary */
    skb_reserve(skb, 2);
    copied_len = 0;
    num_segs = 0;
    kva = (void *)va[i];
    do {
      kva = (void *)kva  - kni->mbuf_va + kni->mbuf_kva;
      data_kva = kva->buf_addr + kva->data_off - kni->mbuf_va
          + kni->mbuf_kva;
      data_len = kva->data_len;
      memcpy(skb_put(skb, data_len), data_kva, data_len);
      copied_len += data_len;
      num_segs++;
    }while((kva = (void *)kva->next) != NULL);
    /*Go back to the head*/
    kva = (void *)va[i] - kni->mbuf_va + kni->mbuf_kva;

    kni_net_process_rx_packet(skb, dev, &kva->meta_data);
    kni->stats.rx_bytes += pkt_len;
  }
  /* Burst enqueue mbufs into free_q */
  ret = kni_fifo_put(kni->free_q, (void **)va, num);
  if (ret != num)
    /* Failing should not happen */
    KNI_ERR("Fail to enqueue entries into free_q\n");
}
Exemplo n.º 19
0
static inline int
kni_vhost_net_tx(struct kni_dev *kni, struct msghdr *m,
		 unsigned offset, unsigned len)
{
	struct rte_kni_mbuf *pkt_kva = NULL;
	struct rte_kni_mbuf *pkt_va = NULL;
	int ret;

	KNI_DBG_TX("tx offset=%d, len=%d, iovlen=%d\n",
#ifdef HAVE_IOV_ITER_MSGHDR
		   offset, len, (int)m->msg_iter.iov->iov_len);
#else
		   offset, len, (int)m->msg_iov->iov_len);
#endif

	/**
	 * Check if it has at least one free entry in tx_q and
	 * one entry in alloc_q.
	 */
	if (kni_fifo_free_count(kni->tx_q) == 0 ||
	    kni_fifo_count(kni->alloc_q) == 0) {
		/**
		 * If no free entry in tx_q or no entry in alloc_q,
		 * drops skb and goes out.
		 */
		goto drop;
	}

	/* dequeue a mbuf from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
	if (likely(ret == 1)) {
		void *data_kva;

		pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
		data_kva = pkt_kva->buf_addr + pkt_kva->data_off
		           - kni->mbuf_va + kni->mbuf_kva;

#ifdef HAVE_IOV_ITER_MSGHDR
		copy_from_iter(data_kva, len, &m->msg_iter);
#else
		memcpy_fromiovecend(data_kva, m->msg_iov, offset, len);
#endif

		if (unlikely(len < ETH_ZLEN)) {
			memset(data_kva + len, 0, ETH_ZLEN - len);
			len = ETH_ZLEN;
		}
		pkt_kva->pkt_len = len;
		pkt_kva->data_len = len;

		/* enqueue mbuf into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
		if (unlikely(ret != 1)) {
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbuf into tx_q\n");
			goto drop;
		}
	} else {
		/* Failing should not happen */
		KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
		goto drop;
	}

	/* update statistics */
	kni->stats.tx_bytes += len;
	kni->stats.tx_packets++;

	return 0;

drop:
	/* update statistics */
	kni->stats.tx_dropped++;

	return 0;
}