Esempio n. 1
0
ssize_t
usdf_dgram_prefix_recvmsg(struct fid_ep *fep, const struct fi_msg *msg, uint64_t flags)
{
	struct usdf_ep *ep;
	struct usd_qp_impl *qp;
	struct usd_rq *rq;
	struct vnic_rq *vrq;
	struct rq_enet_desc *desc;
	uint8_t *hdr_ptr;
	const struct iovec *iovp;
	uint32_t index;
	unsigned i;

	ep = ep_ftou(fep);
	qp = to_qpi(ep->e.dg.ep_qp);
	rq = &qp->uq_rq;
	vrq = &rq->urq_vnic_rq;
	desc = rq->urq_next_desc;
	index = rq->urq_post_index;

	iovp = msg->msg_iov;
	rq->urq_context[index] = msg->context;
	hdr_ptr = iovp[0].iov_base +
		(USDF_HDR_BUF_ENTRY - sizeof(struct usd_udp_hdr));
	rq_enet_desc_enc(desc, (dma_addr_t) hdr_ptr,
			 RQ_ENET_TYPE_ONLY_SOP,
			 iovp[0].iov_len -
			  (USDF_HDR_BUF_ENTRY - sizeof(struct usd_udp_hdr)));
	ep->e.dg.ep_hdr_ptr[index] = (struct usd_udp_hdr *) hdr_ptr;

	index = (index+1) & rq->urq_post_index_mask;
	desc = (struct rq_enet_desc *) ((uintptr_t)rq->urq_desc_ring
					    + (index<<4));

	for (i = 1; i < msg->iov_count; ++i) {
		rq->urq_context[index] = msg->context;
		rq_enet_desc_enc(desc, (dma_addr_t) iovp[i].iov_base,
			     RQ_ENET_TYPE_NOT_SOP, iovp[i].iov_len);
		ep->e.dg.ep_hdr_ptr[index] = (struct usd_udp_hdr *) hdr_ptr;

		index = (index+1) & rq->urq_post_index_mask;
		desc = (struct rq_enet_desc *) ((uintptr_t)rq->urq_desc_ring
					    + (index<<4));
	}

	if ((flags & FI_MORE) == 0) {
		wmb();
		iowrite32(index, &vrq->ctrl->posted_index);
	}

	rq->urq_next_desc = desc;
	rq->urq_post_index = index;
	rq->urq_recv_credits -= msg->iov_count;

	return 0;
}
Esempio n. 2
0
File: usd_post.c Progetto: ORNL/ompi
int
usd_post_recv(
    struct usd_qp *uqp,
    struct usd_recv_desc *recv_list)
{
    struct usd_qp_impl *qp;
    struct usd_rq *rq;
    struct vnic_rq *vrq;
    struct rq_enet_desc *desc;
    struct iovec *iovp;
    uint32_t index;
    uint32_t count;
    unsigned i;

    qp = to_qpi(uqp);
    rq = &qp->uq_rq;
    vrq = &rq->urq_vnic_rq;
    desc = rq->urq_next_desc;
    index = rq->urq_post_index;

    iovp = recv_list->urd_iov;
    count = 0;

    while (recv_list != NULL) {
        rq->urq_context[index] = recv_list->urd_context;
        rq_enet_desc_enc(desc, (dma_addr_t) iovp[0].iov_base,
                         RQ_ENET_TYPE_ONLY_SOP, iovp[0].iov_len);
        count++;

        index = (index+1) & rq->urq_post_index_mask;
        desc = (struct rq_enet_desc *) ((uintptr_t)rq->urq_desc_ring
                                            + (index<<4));

        for (i = 1; i < recv_list->urd_iov_cnt; ++i) {
            rq->urq_context[index] = recv_list->urd_context;
            rq_enet_desc_enc(desc, (dma_addr_t) iovp[i].iov_base,
                             RQ_ENET_TYPE_NOT_SOP, iovp[i].iov_len);
            count++;

            index = (index+1) & rq->urq_post_index_mask;
            desc = (struct rq_enet_desc *) ((uintptr_t)rq->urq_desc_ring
                                            + (index<<4));
        }
        recv_list = recv_list->urd_next;
    }

    wmb();
    iowrite32(index, &vrq->ctrl->posted_index);

    rq->urq_next_desc = desc;
    rq->urq_post_index = index;
    rq->urq_recv_credits -= count;

    return 0;
}
Esempio n. 3
0
/*
 * semi-native rx buffer post, i want to eventually avoid using the 
 * vnic_*() calls
 */
static inline int
_usdf_msg_post_recv(struct usdf_rx *rx, void *buf, size_t len)
{
	struct usd_rq *rq;
	struct vnic_rq *vrq;
	struct rq_enet_desc *desc;
	struct usd_qp_impl *qp;

	qp = to_qpi(rx->rx_qp);
	rq = &qp->uq_rq;
	vrq = &rq->urq_vnic_rq;

	rq->urq_context[rq->urq_post_index] = buf;
	rq->urq_post_index = (rq->urq_post_index + 1)
		& rq->urq_post_index_mask;

	desc = rq->urq_next_desc;
	rq_enet_desc_enc(desc, (dma_addr_t) buf,
			RQ_ENET_TYPE_ONLY_SOP, len);
	wmb();
	iowrite32(rq->urq_post_index, &vrq->ctrl->posted_index);

	rq->urq_next_desc = (struct rq_enet_desc *)
				((uintptr_t)rq->urq_desc_ring
					+ ((rq->urq_post_index)<<4));
	rq->urq_recv_credits -= 1;

	return 0;
}
Esempio n. 4
0
static int enic_rq_alloc_buf_a1(struct vnic_rq *rq)
{
	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);

	if (vnic_rq_posting_soon(rq)) {

		

		rq_enet_desc_enc(desc, 0, RQ_ENET_TYPE_RESV2, 0);
		vnic_rq_post(rq, 0, 0, 0, 0);
	} else {
		return enic_rq_alloc_buf(rq);
	}

	return 0;
}
Esempio n. 5
0
static int
enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
{
	struct rte_mbuf *mb;
	struct rq_enet_desc *rqd = rq->ring.descs;
	unsigned i;
	dma_addr_t dma_addr;

	dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
		  rq->ring.desc_count);

	for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
		mb = rte_rxmbuf_alloc(rq->mp);
		if (mb == NULL) {
			dev_err(enic, "RX mbuf alloc failed queue_id=%u\n",
			(unsigned)rq->index);
			return -ENOMEM;
		}

		dma_addr = (dma_addr_t)(mb->buf_physaddr
			   + RTE_PKTMBUF_HEADROOM);

		rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
				 mb->buf_len - RTE_PKTMBUF_HEADROOM);
		rq->mbuf_ring[i] = mb;
	}

	/* make sure all prior writes are complete before doing the PIO write */
	rte_rmb();

	/* Post all but the last 2 cache lines' worth of descriptors */
	rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
			/ sizeof(struct rq_enet_desc));
	rq->rx_nb_hold = 0;

	dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
		enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
	iowrite32(rq->posted_index, &rq->ctrl->posted_index);
	rte_rmb();

	return 0;

}
Esempio n. 6
0
static int enic_rq_alloc_buf(struct vnic_rq *rq)
{
	struct enic *enic = vnic_dev_priv(rq->vdev);
	dma_addr_t dma_addr;
	struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
	uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
	u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
	struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
	struct rte_mbuf *hdr_mbuf = NULL;

	if (!mbuf) {
		dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
		return -1;
	}

	if (unlikely(split_hdr_size)) {
		if (vnic_rq_desc_avail(rq) < 2) {
			rte_mempool_put(mbuf->pool, mbuf);
			return -1;
		}
		hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
		if (!hdr_mbuf) {
			rte_mempool_put(mbuf->pool, mbuf);
			dev_err(enic,
				"hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
			return -1;
		}

		hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;

		hdr_mbuf->nb_segs = 2;
		hdr_mbuf->port = rq->index;
		hdr_mbuf->next = mbuf;

		dma_addr = (dma_addr_t)
		    (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);

		rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);

		vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
			(unsigned int)split_hdr_size, 0 /*wrid*/);

		desc = vnic_rq_next_desc(rq);
		type = RQ_ENET_TYPE_NOT_SOP;
	} else {
		mbuf->nb_segs = 1;
		mbuf->port = rq->index;
	}

	mbuf->data_off = RTE_PKTMBUF_HEADROOM;
	mbuf->next = NULL;

	dma_addr = (dma_addr_t)
	    (mbuf->buf_physaddr + mbuf->data_off);

	rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);

	vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
		(unsigned int)mbuf->buf_len, 0 /*wrid*/);

	return 0;
}