Ejemplo n.º 1
0
/*
 * Return an buffer with an mbuf allocated.
 *
 * Note: the mbuf length is just that - the mbuf length.
 * It's up to the caller to reserve the required header/descriptor
 * bits before the actual payload.
 *
 * XXX TODO: need to pass in a dmatag to use, rather than a global
 * XXX TX/RX tag.  Check ath10k_pci_alloc_pipes() - each pipe has
 * XXX a different dmatag with different properties.
 *
 * Note: this doesn't load anything; that's done by the caller
 * before it passes it into the hardware.
 *
 * Note: it sets the maxsize to the requested buffer size;
 * it isn't setting it up to the actual mbuf storage size.
 * Again, the caller should (!) request more space if it
 * wants to grow.
 *
 * XXX TODO: the linux mbuf/skb emulation code assumes that
 * skb's have a single external buffer storage part.
 * But there are going to be places where we allocate a larger
 * buffer!  So, we will have to review things - maybe add an arg
 * that says "enforce getting a single contig mbuf", and then
 * slowly undo or re-implement the skb routines that do copying, etc.,
 * to take into account chained mbufs (ie, using M_* / m_* routines.)
 */
struct athp_buf *
athp_getbuf(struct ath10k *ar, struct athp_buf_ring *br, int bufsize)
{
	struct athp_buf *bf;
	struct mbuf *m;

	/* Allocate mbuf; fail if we can't allocate one */
	//m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, bufsize);
	m = m_getm2(NULL, bufsize, M_NOWAIT, MT_DATA, M_PKTHDR);
	if (m == NULL) {
		device_printf(ar->sc_dev, "%s: failed to allocate mbuf\n", __func__);
		return (NULL);
	}

	/* Allocate buffer */
	ATHP_BUF_LOCK(ar);
	bf = _athp_getbuf(ar, br);
	ATHP_BUF_UNLOCK(ar);
	if (! bf) {
		m_freem(m);
		device_printf(ar->sc_dev, "%s: out of buffers? btype=%d\n", __func__, br->btype);
		return (NULL);
	}

	/*
	 * If it's a TX ring alloc, and it doesn't have a TX descriptor
	 * allocated, then explode.
	 */
	if (br->btype == BUF_TYPE_TX && bf->txbuf_dd.dd_desc == NULL) {
		device_printf(ar->sc_dev,
		    "%s: requested TX buffer, no txbuf!\n", __func__);
		m_freem(m);
		athp_freebuf(ar, br, bf);
		return (NULL);
	}

	/* Zero out the TX buffer side; re-init the pointers */
	if (bf->btype == BUF_TYPE_TX) {
		bf->tx.htt.txbuf = bf->txbuf_dd.dd_desc;
		bf->tx.htt.txbuf_paddr = bf->txbuf_dd.dd_desc_paddr;
		bzero(bf->tx.htt.txbuf, sizeof(struct ath10k_htt_txbuf));
	}

	/* Setup initial mbuf tracking state */
	bf->m = m;
	bf->m_size = bufsize;

	/* and initial mbuf size */
	bf->m->m_len = 0;
	bf->m->m_pkthdr.len = 0;

	return (bf);
}
Ejemplo n.º 2
0
/*
 * Allocate icl_pdu with empty BHS to fill up by the caller.
 */
struct icl_pdu *
icl_pdu_new_bhs(struct icl_conn *ic, int flags)
{
	struct icl_pdu *ip;

	ip = icl_pdu_new(ic, flags);
	if (ip == NULL)
		return (NULL);

	ip->ip_bhs_mbuf = m_getm2(NULL, sizeof(struct iscsi_bhs),
	    flags, MT_DATA, M_PKTHDR);
	if (ip->ip_bhs_mbuf == NULL) {
		ICL_WARN("failed to allocate %zd bytes", sizeof(*ip));
		icl_pdu_free(ip);
		return (NULL);
	}
	ip->ip_bhs = mtod(ip->ip_bhs_mbuf, struct iscsi_bhs *);
	memset(ip->ip_bhs, 0, sizeof(struct iscsi_bhs));
	ip->ip_bhs_mbuf->m_len = sizeof(struct iscsi_bhs);

	return (ip);
}
Ejemplo n.º 3
0
static int
sdp_post_recv(struct sdp_sock *ssk)
{
	struct sdp_buf *rx_req;
	int i, rc;
	u64 addr;
	struct ib_device *dev;
	struct ib_recv_wr rx_wr = { NULL };
	struct ib_sge ibsge[SDP_MAX_RECV_SGES];
	struct ib_sge *sge = ibsge;
	struct ib_recv_wr *bad_wr;
	struct mbuf *mb, *m;
	struct sdp_bsdh *h;
	int id = ring_head(ssk->rx_ring);

	/* Now, allocate and repost recv */
	sdp_prf(ssk->socket, mb, "Posting mb");
	mb = m_getm2(NULL, ssk->recv_bytes, M_NOWAIT, MT_DATA, M_PKTHDR);
	if (mb == NULL) {
		/* Retry so we can't stall out with no memory. */
		if (!rx_ring_posted(ssk))
			queue_work(rx_comp_wq, &ssk->rx_comp_work);
		return -1;
	}
	for (m = mb; m != NULL; m = m->m_next) {
		m->m_len = (m->m_flags & M_EXT) ? m->m_ext.ext_size :
                        ((m->m_flags & M_PKTHDR) ? MHLEN : MLEN);
		mb->m_pkthdr.len += m->m_len;
	}
	h = mtod(mb, struct sdp_bsdh *);
	rx_req = ssk->rx_ring.buffer + (id & (SDP_RX_SIZE - 1));
	rx_req->mb = mb;
	dev = ssk->ib_device;
        for (i = 0;  mb != NULL; i++, mb = mb->m_next, sge++) {
		addr = ib_dma_map_single(dev, mb->m_data, mb->m_len,
		    DMA_TO_DEVICE);
		/* TODO: proper error handling */
		BUG_ON(ib_dma_mapping_error(dev, addr));
		BUG_ON(i >= SDP_MAX_RECV_SGES);
		rx_req->mapping[i] = addr;
		sge->addr = addr;
		sge->length = mb->m_len;
		sge->lkey = ssk->sdp_dev->mr->lkey;
        }

	rx_wr.next = NULL;
	rx_wr.wr_id = id | SDP_OP_RECV;
	rx_wr.sg_list = ibsge;
	rx_wr.num_sge = i;
	rc = ib_post_recv(ssk->qp, &rx_wr, &bad_wr);
	if (unlikely(rc)) {
		sdp_warn(ssk->socket, "ib_post_recv failed. status %d\n", rc);

		sdp_cleanup_sdp_buf(ssk, rx_req, DMA_FROM_DEVICE);
		m_freem(mb);

		sdp_notify(ssk, ECONNRESET);

		return -1;
	}

	atomic_inc(&ssk->rx_ring.head);
	SDPSTATS_COUNTER_INC(post_recv);

	return 0;
}