Пример #1
0
m_megapullup(PNATState pData, struct mbuf *m, int len)
#endif
{
    struct mbuf *mcl;

    if (len > m->m_pkthdr.len)
        goto bad;

    /* Do not reallocate packet if it is sequentional,
     * writable and has some extra space for expansion.
     * XXX: Constant 100bytes is completely empirical. */
#define RESERVE 100
    if (m->m_next == NULL && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= RESERVE)
        return (m);

    if (len <= MCLBYTES - RESERVE) {
#ifndef VBOX
        mcl = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
#else
        mcl = m_getcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR);
#endif
    } else if (len < MJUM16BYTES) {
        int size;
        if (len <= MJUMPAGESIZE - RESERVE) {
            size = MJUMPAGESIZE;
        } else if (len <= MJUM9BYTES - RESERVE) {
            size = MJUM9BYTES;
        } else {
            size = MJUM16BYTES;
        };
#ifndef VBOX
        mcl = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, size);
#else
        mcl = m_getjcl(pData, M_DONTWAIT, MT_DATA, M_PKTHDR, size);
#endif
    } else {
        goto bad;
    }
    if (mcl == NULL)
        goto bad;

    m_move_pkthdr(mcl, m);
    m_copydata(m, 0, len, mtod(mcl, caddr_t));
    mcl->m_len = mcl->m_pkthdr.len = len;
#ifndef VBOX
    m_freem(m);
#else
    m_freem(pData, m);
#endif

    return (mcl);
bad:
#ifndef VBOX
    m_freem(m);
#else
    m_freem(pData, m);
#endif
    return (NULL);
}
Пример #2
0
/*
 * Re-align the payload in the mbuf.  This is mainly used (right now)
 * to handle IP header alignment requirements on certain architectures.
 */
struct mbuf *
ieee80211_realign(struct ieee80211vap *vap, struct mbuf *m, size_t align)
{
	int pktlen, space;
	struct mbuf *n;

	pktlen = m->m_pkthdr.len;
	space = pktlen + align;
	if (space < MINCLSIZE)
		n = m_gethdr(M_NOWAIT, MT_DATA);
	else {
		n = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
		    space <= MCLBYTES ?     MCLBYTES :
#if MJUMPAGESIZE != MCLBYTES
		    space <= MJUMPAGESIZE ? MJUMPAGESIZE :
#endif
		    space <= MJUM9BYTES ?   MJUM9BYTES : MJUM16BYTES);
	}
	if (__predict_true(n != NULL)) {
		m_move_pkthdr(n, m);
		n->m_data = (caddr_t)(ALIGN(n->m_data + align) - align);
		m_copydata(m, 0, pktlen, mtod(n, caddr_t));
		n->m_len = pktlen;
	} else {
		IEEE80211_DISCARD(vap, IEEE80211_MSG_ANY,
		    mtod(m, const struct ieee80211_frame *), NULL,
		    "%s", "no mbuf to realign");
		vap->iv_stats.is_rx_badalign++;
	}
	m_freem(m);
	return n;
}
Пример #3
0
/*
 * Allocate a given length worth of mbufs and/or clusters (whatever fits
 * best) and return a pointer to the top of the allocated chain.  If an
 * existing mbuf chain is provided, then we will append the new chain
 * to the existing one but still return the top of the newly allocated
 * chain.
 */
struct mbuf *
m_getm2(struct mbuf *m, int len, int how, short type, int flags)
{
	struct mbuf *mb, *nm = NULL, *mtail = NULL;

	KASSERT(len >= 0, ("%s: len is < 0", __func__));

	/* Validate flags. */
	flags &= (M_PKTHDR | M_EOR);

	/* Packet header mbuf must be first in chain. */
	if ((flags & M_PKTHDR) && m != NULL)
		flags &= ~M_PKTHDR;

	/* Loop and append maximum sized mbufs to the chain tail. */
	while (len > 0) {
		if (len > MCLBYTES)
			mb = m_getjcl(how, type, (flags & M_PKTHDR),
			    MJUMPAGESIZE);
		else if (len >= MINCLSIZE)
			mb = m_getcl(how, type, (flags & M_PKTHDR));
		else if (flags & M_PKTHDR)
			mb = m_gethdr(how, type);
		else
			mb = m_get(how, type);

		/* Fail the whole operation if one mbuf can't be allocated. */
		if (mb == NULL) {
			if (nm != NULL)
				m_freem(nm);
			return (NULL);
		}

		/* Book keeping. */
		len -= M_SIZE(mb);
		if (mtail != NULL)
			mtail->m_next = mb;
		else
			nm = mb;
		mtail = mb;
		flags &= ~M_PKTHDR;	/* Only valid on the first mbuf. */
	}
	if (flags & M_EOR)
		mtail->m_flags |= M_EOR;  /* Only valid on the last mbuf. */

	/* If mbuf was supplied, append new chain to the end of it. */
	if (m != NULL) {
		for (mtail = m; mtail->m_next != NULL; mtail = mtail->m_next)
			;
		mtail->m_next = nm;
		mtail->m_flags &= ~M_EOR;
	} else
		m = nm;

	return (m);
}
Пример #4
0
static int mlx4_en_alloc_buf(struct mlx4_en_priv *priv,
			     struct mlx4_en_rx_desc *rx_desc,
			     struct mbuf **mb_list,
			     int i)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
	struct mbuf *mb;
	dma_addr_t dma;

	if (i == 0)
		mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, frag_info->frag_size);
	else
		mb = m_getjcl(M_NOWAIT, MT_DATA, 0, frag_info->frag_size);
	if (mb == NULL) {
		priv->port_stats.rx_alloc_failed++;
		return -ENOMEM;
	}
	dma = pci_map_single(mdev->pdev, mb->m_data, frag_info->frag_size,
			     PCI_DMA_FROMDEVICE);
	rx_desc->data[i].addr = cpu_to_be64(dma);
	mb_list[i] = mb;
	return 0;
}
Пример #5
0
static void
usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct usie_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = sc->sc_ifp;
	struct mbuf *m0;
	struct mbuf *m = NULL;
	struct usie_desc *rxd;
	uint32_t actlen;
	uint16_t err;
	uint16_t pkt;
	uint16_t ipl;
	uint16_t len;
	uint16_t diff;
	uint8_t pad;
	uint8_t ipv;

	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(15, "rx done, actlen=%u\n", actlen);

		if (actlen < sizeof(struct usie_hip)) {
			DPRINTF("data too short %u\n", actlen);
			goto tr_setup;
		}
		m = sc->sc_rxm;
		sc->sc_rxm = NULL;

		/* fall though */
	case USB_ST_SETUP:
tr_setup:

		if (sc->sc_rxm == NULL) {
			sc->sc_rxm = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
			    MJUMPAGESIZE /* could be bigger than MCLBYTES */ );
		}
		if (sc->sc_rxm == NULL) {
			DPRINTF("could not allocate Rx mbuf\n");
			ifp->if_ierrors++;
			usbd_xfer_set_stall(xfer);
			usbd_xfer_set_frames(xfer, 0);
		} else {
			/*
			 * Directly loading a mbuf cluster into DMA to
			 * save some data copying. This works because
			 * there is only one cluster.
			 */
			usbd_xfer_set_frame_data(xfer, 0,
			    mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX));
			usbd_xfer_set_frames(xfer, 1);
		}
		usbd_transfer_submit(xfer);
		break;

	default:			/* Error */
		DPRINTF("USB transfer error, %s\n", usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			ifp->if_ierrors++;
			goto tr_setup;
		}
		if (sc->sc_rxm != NULL) {
			m_freem(sc->sc_rxm);
			sc->sc_rxm = NULL;
		}
		break;
	}

	if (m == NULL)
		return;

	mtx_unlock(&sc->sc_mtx);

	m->m_pkthdr.len = m->m_len = actlen;

	err = pkt = 0;

	/* HW can aggregate multiple frames in a single USB xfer */
	for (;;) {
		rxd = mtod(m, struct usie_desc *);

		len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK;
		pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0;
		ipl = (len - pad - ETHER_HDR_LEN);
		if (ipl >= len) {
			DPRINTF("Corrupt frame\n");
			m_freem(m);
			break;
		}
		diff = sizeof(struct usie_desc) + ipl + pad;

		if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) ||
		    (be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) {
			DPRINTF("received wrong type of packet\n");
			m->m_data += diff;
			m->m_pkthdr.len = (m->m_len -= diff);
			err++;
			if (m->m_pkthdr.len > 0)
				continue;
			m_freem(m);
			break;
		}
		switch (be16toh(rxd->ethhdr.ether_type)) {
		case ETHERTYPE_IP:
			ipv = NETISR_IP;
			break;
#ifdef INET6
		case ETHERTYPE_IPV6:
			ipv = NETISR_IPV6;
			break;
#endif
		default:
			DPRINTF("unsupported ether type\n");
			err++;
			break;
		}

		/* the last packet */
		if (m->m_pkthdr.len <= diff) {
			m->m_data += (sizeof(struct usie_desc) + pad);
			m->m_pkthdr.len = m->m_len = ipl;
			m->m_pkthdr.rcvif = ifp;
			BPF_MTAP(sc->sc_ifp, m);
			netisr_dispatch(ipv, m);
			break;
		}
		/* copy aggregated frames to another mbuf */
		m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
		if (__predict_false(m0 == NULL)) {
			DPRINTF("could not allocate mbuf\n");
			err++;
			m_freem(m);
			break;
		}
		m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t));
		m0->m_pkthdr.rcvif = ifp;
		m0->m_pkthdr.len = m0->m_len = ipl;

		BPF_MTAP(sc->sc_ifp, m0);
		netisr_dispatch(ipv, m0);

		m->m_data += diff;
		m->m_pkthdr.len = (m->m_len -= diff);
	}

	mtx_lock(&sc->sc_mtx);

	ifp->if_ierrors += err;
	ifp->if_ipackets += pkt;
}
Пример #6
0
/*
 * Send a command to the firmware.  We try to implement the Linux
 * driver interface for the routine.
 * mostly from if_iwn (iwn_cmd()).
 *
 * For now, we always copy the first part and map the second one (if it exists).
 */
int
iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
{
	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
	struct iwm_tfd *desc;
	struct iwm_tx_data *txdata = NULL;
	struct iwm_device_cmd *cmd;
	struct mbuf *m;
	bus_dma_segment_t seg;
	bus_addr_t paddr;
	uint32_t addr_lo;
	int error = 0, i, paylen, off;
	int code;
	int async, wantresp;
	int group_id;
	int nsegs;
	size_t hdrlen, datasz;
	uint8_t *data;

	code = hcmd->id;
	async = hcmd->flags & IWM_CMD_ASYNC;
	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
	data = NULL;

	for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
		paylen += hcmd->len[i];
	}

	/* if the command wants an answer, busy sc_cmd_resp */
	if (wantresp) {
		KASSERT(!async, ("invalid async parameter"));
		while (sc->sc_wantresp != -1)
			msleep(&sc->sc_wantresp, &sc->sc_mtx, 0, "iwmcmdsl", 0);
		sc->sc_wantresp = ring->qid << 16 | ring->cur;
		IWM_DPRINTF(sc, IWM_DEBUG_CMD,
		    "wantresp is %x\n", sc->sc_wantresp);
	}

	/*
	 * Is the hardware still available?  (after e.g. above wait).
	 */
	if (sc->sc_flags & IWM_FLAG_STOPPED) {
		error = ENXIO;
		goto out;
	}

	desc = &ring->desc[ring->cur];
	txdata = &ring->data[ring->cur];

	group_id = iwm_cmd_groupid(code);
	if (group_id != 0) {
		hdrlen = sizeof(cmd->hdr_wide);
		datasz = sizeof(cmd->data_wide);
	} else {
		hdrlen = sizeof(cmd->hdr);
		datasz = sizeof(cmd->data);
	}

	if (paylen > datasz) {
		size_t totlen;
		IWM_DPRINTF(sc, IWM_DEBUG_CMD,
		    "large command paylen=%u len0=%u\n",
			paylen, hcmd->len[0]);
		/* Command is too large */
		totlen = hdrlen + paylen;
		if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
			device_printf(sc->sc_dev,
			    "firmware command too long (%zd bytes)\n",
			    totlen);
			error = EINVAL;
			goto out;
		}
		m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
		if (m == NULL) {
			error = ENOBUFS;
			goto out;
		}

		m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
		error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
		    txdata->map, m, &seg, &nsegs, BUS_DMA_NOWAIT);
		if (error != 0) {
			device_printf(sc->sc_dev,
			    "%s: can't map mbuf, error %d\n", __func__, error);
			m_freem(m);
			goto out;
		}
		txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
		cmd = mtod(m, struct iwm_device_cmd *);
		paddr = seg.ds_addr;
	} else {
Пример #7
0
static int
rtwn_pci_alloc_rx_list(struct rtwn_softc *sc)
{
	struct rtwn_pci_softc *pc = RTWN_PCI_SOFTC(sc);
	struct rtwn_rx_ring *rx_ring = &pc->rx_ring;
	struct rtwn_rx_data *rx_data;
	bus_size_t size;
	int i, error;

	/* Allocate Rx descriptors. */
	size = sizeof(struct r92ce_rx_stat) * RTWN_PCI_RX_LIST_COUNT;
	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    size, 1, size, 0, NULL, NULL, &rx_ring->desc_dmat);
	if (error != 0) {
		device_printf(sc->sc_dev, "could not create rx desc DMA tag\n");
		goto fail;
	}

	error = bus_dmamem_alloc(rx_ring->desc_dmat, (void **)&rx_ring->desc,
	    BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
	    &rx_ring->desc_map);
	if (error != 0) {
		device_printf(sc->sc_dev, "could not allocate rx desc\n");
		goto fail;
	}
	error = bus_dmamap_load(rx_ring->desc_dmat, rx_ring->desc_map,
	    rx_ring->desc, size, rtwn_pci_dma_map_addr, &rx_ring->paddr, 0);
	if (error != 0) {
		device_printf(sc->sc_dev, "could not load rx desc DMA map\n");
		goto fail;
	}
	bus_dmamap_sync(rx_ring->desc_dmat, rx_ring->desc_map,
	    BUS_DMASYNC_PREWRITE);

	/* Create RX buffer DMA tag. */
	error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0,
	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
	    MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &rx_ring->data_dmat);
	if (error != 0) {
		device_printf(sc->sc_dev, "could not create rx buf DMA tag\n");
		goto fail;
	}

	/* Allocate Rx buffers. */
	for (i = 0; i < RTWN_PCI_RX_LIST_COUNT; i++) {
		rx_data = &rx_ring->rx_data[i];
		error = bus_dmamap_create(rx_ring->data_dmat, 0, &rx_data->map);
		if (error != 0) {
			device_printf(sc->sc_dev,
			    "could not create rx buf DMA map\n");
			goto fail;
		}

		rx_data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
		    MJUMPAGESIZE);
		if (rx_data->m == NULL) {
			device_printf(sc->sc_dev,
			    "could not allocate rx mbuf\n");
			error = ENOMEM;
			goto fail;
		}

		error = bus_dmamap_load(rx_ring->data_dmat, rx_data->map,
		    mtod(rx_data->m, void *), MJUMPAGESIZE,
		    rtwn_pci_dma_map_addr, &rx_data->paddr, BUS_DMA_NOWAIT);
		if (error != 0) {
			device_printf(sc->sc_dev,
			    "could not load rx buf DMA map");
			goto fail;
		}

		rtwn_pci_setup_rx_desc(pc, &rx_ring->desc[i], rx_data->paddr,
		    MJUMPAGESIZE, i);
	}
	rx_ring->cur = 0;

	return (0);

fail:
	rtwn_pci_free_rx_list(sc);
	return (error);
}
Пример #8
0
static void
sfxge_rx_qfill(struct sfxge_rxq *rxq, unsigned int target, boolean_t retrying)
{
	struct sfxge_softc *sc;
	unsigned int index;
	struct sfxge_evq *evq;
	unsigned int batch;
	unsigned int rxfill;
	unsigned int mblksize;
	int ntodo;
	efsys_dma_addr_t addr[SFXGE_REFILL_BATCH];

	sc = rxq->sc;
	index = rxq->index;
	evq = sc->evq[index];

	prefetch_read_many(sc->enp);
	prefetch_read_many(rxq->common);

	SFXGE_EVQ_LOCK_ASSERT_OWNED(evq);

	if (__predict_false(rxq->init_state != SFXGE_RXQ_STARTED))
		return;

	rxfill = rxq->added - rxq->completed;
	KASSERT(rxfill <= EFX_RXQ_LIMIT(rxq->entries),
	    ("rxfill > EFX_RXQ_LIMIT(rxq->entries)"));
	ntodo = min(EFX_RXQ_LIMIT(rxq->entries) - rxfill, target);
	KASSERT(ntodo <= EFX_RXQ_LIMIT(rxq->entries),
	    ("ntodo > EFX_RQX_LIMIT(rxq->entries)"));

	if (ntodo == 0)
		return;

	batch = 0;
	mblksize = sc->rx_buffer_size - sc->rx_buffer_align;
	while (ntodo-- > 0) {
		unsigned int id;
		struct sfxge_rx_sw_desc *rx_desc;
		bus_dma_segment_t seg;
		struct mbuf *m;

		id = (rxq->added + batch) & rxq->ptr_mask;
		rx_desc = &rxq->queue[id];
		KASSERT(rx_desc->mbuf == NULL, ("rx_desc->mbuf != NULL"));

		rx_desc->flags = EFX_DISCARD;
		m = rx_desc->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
		    sc->rx_cluster_size);
		if (m == NULL)
			break;

		/* m_len specifies length of area to be mapped for DMA */
		m->m_len  = mblksize;
		m->m_data = (caddr_t)P2ROUNDUP((uintptr_t)m->m_data, CACHE_LINE_SIZE);
		m->m_data += sc->rx_buffer_align;

		sfxge_map_mbuf_fast(rxq->mem.esm_tag, rxq->mem.esm_map, m, &seg);
		addr[batch++] = seg.ds_addr;

		if (batch == SFXGE_REFILL_BATCH) {
			efx_rx_qpost(rxq->common, addr, mblksize, batch,
			    rxq->completed, rxq->added);
			rxq->added += batch;
			batch = 0;
		}
	}

	if (ntodo != 0)
		sfxge_rx_schedule_refill(rxq, retrying);

	if (batch != 0) {
		efx_rx_qpost(rxq->common, addr, mblksize, batch,
		    rxq->completed, rxq->added);
		rxq->added += batch;
	}

	/* Make the descriptors visible to the hardware */
	bus_dmamap_sync(rxq->mem.esm_tag, rxq->mem.esm_map,
			BUS_DMASYNC_PREWRITE);

	efx_rx_qpush(rxq->common, rxq->added, &rxq->pushed);

	/* The queue could still be empty if no descriptors were actually
	 * pushed, in which case there will be no event to cause the next
	 * refill, so we must schedule a refill ourselves.
	 */
	if(rxq->pushed == rxq->completed) {
		sfxge_rx_schedule_refill(rxq, retrying);
	}
}
Пример #9
0
/*
 * Tcp output routine: figure out what should be sent and send it.
 */
int
tcp_output(PNATState pData, register struct tcpcb *tp)
{
    register struct socket *so = tp->t_socket;
    register long len, win;
    int off, flags, error;
    register struct mbuf *m = NULL;
    register struct tcpiphdr *ti;
    u_char opt[MAX_TCPOPTLEN];
    unsigned optlen, hdrlen;
    int idle, sendalot;
    int size = 0;

    LogFlowFunc(("ENTER: tcp_output: tp = %R[tcpcb793]\n", tp));

    /*
     * Determine length of data that should be transmitted,
     * and flags that will be used.
     * If there is some data or critical controls (SYN, RST)
     * to send, then transmit; otherwise, investigate further.
     */
    idle = (tp->snd_max == tp->snd_una);
    if (idle && tp->t_idle >= tp->t_rxtcur)
        /*
         * We have been idle for "a while" and no acks are
         * expected to clock out any data we send --
         * slow start to get ack "clock" running again.
         */
        tp->snd_cwnd = tp->t_maxseg;

again:
    sendalot = 0;
    off = tp->snd_nxt - tp->snd_una;
    win = min(tp->snd_wnd, tp->snd_cwnd);

    flags = tcp_outflags[tp->t_state];

    Log2((" --- tcp_output flags = 0x%x\n", flags));

    /*
     * If in persist timeout with window of 0, send 1 byte.
     * Otherwise, if window is small but nonzero
     * and timer expired, we will send what we can
     * and go to transmit state.
     */
    if (tp->t_force)
    {
        if (win == 0)
        {
            /*
             * If we still have some data to send, then
             * clear the FIN bit.  Usually this would
             * happen below when it realizes that we
             * aren't sending all the data.  However,
             * if we have exactly 1 byte of unset data,
             * then it won't clear the FIN bit below,
             * and if we are in persist state, we wind
             * up sending the packet without recording
             * that we sent the FIN bit.
             *
             * We can't just blindly clear the FIN bit,
             * because if we don't have any more data
             * to send then the probe will be the FIN
             * itself.
             */
            if (off < SBUF_LEN(&so->so_snd))
                flags &= ~TH_FIN;
            win = 1;
        }
        else
        {
            tp->t_timer[TCPT_PERSIST] = 0;
            tp->t_rxtshift = 0;
        }
    }

    len = min(SBUF_LEN(&so->so_snd), win) - off;
    if (len < 0)
    {
        /*
         * If FIN has been sent but not acked,
         * but we haven't been called to retransmit,
         * len will be -1.  Otherwise, window shrank
         * after we sent into it.  If window shrank to 0,
         * cancel pending retransmit and pull snd_nxt
         * back to (closed) window.  We will enter persist
         * state below.  If the window didn't close completely,
         * just wait for an ACK.
         */
        len = 0;
        if (win == 0)
        {
            tp->t_timer[TCPT_REXMT] = 0;
            tp->snd_nxt = tp->snd_una;
        }
    }
    if (len > tp->t_maxseg)
    {
        len = tp->t_maxseg;
        sendalot = 1;
    }
    if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + SBUF_LEN(&so->so_snd)))
        flags &= ~TH_FIN;

    win = sbspace(&so->so_rcv);

    /*
     * Sender silly window avoidance.  If connection is idle
     * and can send all data, a maximum segment,
     * at least a maximum default-size segment do it,
     * or are forced, do it; otherwise don't bother.
     * If peer's buffer is tiny, then send
     * when window is at least half open.
     * If retransmitting (possibly after persist timer forced us
     * to send into a small window), then must resend.
     */
    if (len)
    {
        if (len == tp->t_maxseg)
            goto send;
        if ((1 || idle || tp->t_flags & TF_NODELAY) &&
                len + off >= SBUF_LEN(&so->so_snd))
            goto send;
        if (tp->t_force)
            goto send;
        if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
            goto send;
        if (SEQ_LT(tp->snd_nxt, tp->snd_max))
            goto send;
    }

    /*
     * Compare available window to amount of window
     * known to peer (as advertised window less
     * next expected input).  If the difference is at least two
     * max size segments, or at least 50% of the maximum possible
     * window, then want to send a window update to peer.
     */
    if (win > 0)
    {
        /*
         * "adv" is the amount we can increase the window,
         * taking into account that we are limited by
         * TCP_MAXWIN << tp->rcv_scale.
         */
        long adv = min(win, (long)TCP_MAXWIN << tp->rcv_scale);
        if (SEQ_GT(tp->rcv_adv, tp->rcv_nxt))
            adv -= tp->rcv_adv - tp->rcv_nxt;

        if (adv >= (long) (2 * tp->t_maxseg))
            goto send;
        if (2 * adv >= (long) SBUF_SIZE(&so->so_rcv))
            goto send;
    }

    /*
     * Send if we owe peer an ACK.
     */
    if (tp->t_flags & TF_ACKNOW)
        goto send;
    if (flags & (TH_SYN|TH_RST))
        goto send;
    if (SEQ_GT(tp->snd_up, tp->snd_una))
        goto send;
    /*
     * If our state indicates that FIN should be sent
     * and we have not yet done so, or we're retransmitting the FIN,
     * then we need to send.
     */
    if (   flags & TH_FIN
        && ((tp->t_flags & TF_SENTFIN) == 0 || tp->snd_nxt == tp->snd_una))
        goto send;

    /*
     * TCP window updates are not reliable, rather a polling protocol
     * using ``persist'' packets is used to insure receipt of window
     * updates.  The three ``states'' for the output side are:
     *      idle                    not doing retransmits or persists
     *      persisting              to move a small or zero window
     *      (re)transmitting        and thereby not persisting
     *
     * tp->t_timer[TCPT_PERSIST]
     *      is set when we are in persist state.
     * tp->t_force
     *      is set when we are called to send a persist packet.
     * tp->t_timer[TCPT_REXMT]
     *      is set when we are retransmitting
     * The output side is idle when both timers are zero.
     *
     * If send window is too small, there is data to transmit, and no
     * retransmit or persist is pending, then go to persist state.
     * If nothing happens soon, send when timer expires:
     * if window is nonzero, transmit what we can,
     * otherwise force out a byte.
     */
    if (   SBUF_LEN(&so->so_snd)
        && tp->t_timer[TCPT_REXMT] == 0
        && tp->t_timer[TCPT_PERSIST] == 0)
    {
        tp->t_rxtshift = 0;
        tcp_setpersist(tp);
    }

    /*
     * No reason to send a segment, just return.
     */
    tcpstat.tcps_didnuttin++;

    LogFlowFuncLeave();
    return (0);

send:
    LogFlowFunc(("send\n"));
    /*
     * Before ESTABLISHED, force sending of initial options
     * unless TCP set not to do any options.
     * NOTE: we assume that the IP/TCP header plus TCP options
     * always fit in a single mbuf, leaving room for a maximum
     * link header, i.e.
     *      max_linkhdr + sizeof (struct tcpiphdr) + optlen <= MHLEN
     */
    optlen = 0;
    hdrlen = sizeof (struct tcpiphdr);
    if (flags & TH_SYN)
    {
        tp->snd_nxt = tp->iss;
        if ((tp->t_flags & TF_NOOPT) == 0)
        {
            u_int16_t mss;

            opt[0] = TCPOPT_MAXSEG;
            opt[1] = 4;
            mss = RT_H2N_U16((u_int16_t) tcp_mss(pData, tp, 0));
            memcpy((caddr_t)(opt + 2), (caddr_t)&mss, sizeof(mss));
            optlen = 4;

#if 0
            if (   (tp->t_flags & TF_REQ_SCALE)
                && (   (flags & TH_ACK) == 0
                    || (tp->t_flags & TF_RCVD_SCALE)))
            {
                *((u_int32_t *) (opt + optlen)) = RT_H2N_U32(  TCPOPT_NOP << 24
                                                             | TCPOPT_WINDOW << 16
                                                             | TCPOLEN_WINDOW << 8
                                                             | tp->request_r_scale);
                optlen += 4;
            }
#endif
        }
    }

    /*
     * Send a timestamp and echo-reply if this is a SYN and our side
     * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
     * and our peer have sent timestamps in our SYN's.
     */
#if 0
    if (   (tp->t_flags & (TF_REQ_TSTMP|TF_NOOPT)) == TF_REQ_TSTMP
        && (flags & TH_RST) == 0
        && (   (flags & (TH_SYN|TH_ACK)) == TH_SYN
            || (tp->t_flags & TF_RCVD_TSTMP)))
    {
        u_int32_t *lp = (u_int32_t *)(opt + optlen);

        /* Form timestamp option as shown in appendix A of RFC 1323. */
        *lp++ = RT_H2N_U32_C(TCPOPT_TSTAMP_HDR);
        *lp++ = RT_H2N_U32(tcp_now);
        *lp   = RT_H2N_U32(tp->ts_recent);
        optlen += TCPOLEN_TSTAMP_APPA;
    }
#endif
    hdrlen += optlen;

    /*
     * Adjust data length if insertion of options will
     * bump the packet length beyond the t_maxseg length.
     */
    if (len > tp->t_maxseg - optlen)
    {
        len = tp->t_maxseg - optlen;
        sendalot = 1;
    }

    /*
     * Grab a header mbuf, attaching a copy of data to
     * be transmitted, and initialize the header from
     * the template for sends on this connection.
     */
    if (len)
    {
        if (tp->t_force && len == 1)
            tcpstat.tcps_sndprobe++;
        else if (SEQ_LT(tp->snd_nxt, tp->snd_max))
        {
            tcpstat.tcps_sndrexmitpack++;
            tcpstat.tcps_sndrexmitbyte += len;
        }
        else
        {
            tcpstat.tcps_sndpack++;
            tcpstat.tcps_sndbyte += len;
        }

        size = MCLBYTES;
        if ((len + hdrlen + ETH_HLEN) < MSIZE)
            size = MCLBYTES;
        else if ((len + hdrlen + ETH_HLEN) < MCLBYTES)
            size = MCLBYTES;
        else if((len + hdrlen + ETH_HLEN) < MJUM9BYTES)
            size = MJUM9BYTES;
        else if ((len + hdrlen + ETH_HLEN) < MJUM16BYTES)
            size = MJUM16BYTES;
        else
            AssertMsgFailed(("Unsupported size"));
        m = m_getjcl(pData, M_NOWAIT, MT_HEADER, M_PKTHDR, size);
        if (m == NULL)
        {
/*          error = ENOBUFS; */
            error = 1;
            goto out;
        }
        m->m_data += if_maxlinkhdr;
        m->m_pkthdr.header = mtod(m, void *);
        m->m_len = hdrlen;

        /*
         * This will always succeed, since we make sure our mbufs
         * are big enough to hold one MSS packet + header + ... etc.
         */
#if 0
        if (len <= MHLEN - hdrlen - max_linkhdr)
        {
#endif
            sbcopy(&so->so_snd, off, (int) len, mtod(m, caddr_t) + hdrlen);
            m->m_len += len;
#if 0
        }
        else
        {
            m->m_next = m_copy(so->so_snd.sb_mb, off, (int) len);
            if (m->m_next == 0)
                len = 0;
        }
#endif
        /*
         * If we're sending everything we've got, set PUSH.
         * (This will keep happy those implementations which only
         * give data to the user when a buffer fills or
         * a PUSH comes in.)
         */
        if (off + len == SBUF_LEN(&so->so_snd))
            flags |= TH_PUSH;
    }
    else
    {
Пример #10
0
static int
mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring,
     __be64 *pdma, struct mlx4_en_rx_mbuf *mb_list)
{
	bus_dma_segment_t segs[1];
	bus_dmamap_t map;
	struct mbuf *mb;
	int nsegs;
	int err;

	/* try to allocate a new spare mbuf */
	if (unlikely(ring->spare.mbuf == NULL)) {
		mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
		if (unlikely(mb == NULL))
			return (-ENOMEM);
		/* setup correct length */
		mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

		/* make sure IP header gets aligned */
		m_adj(mb, MLX4_NET_IP_ALIGN);

		/* load spare mbuf into BUSDMA */
		err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, ring->spare.dma_map,
		    mb, segs, &nsegs, BUS_DMA_NOWAIT);
		if (unlikely(err != 0)) {
			m_freem(mb);
			return (err);
		}

		/* store spare info */
		ring->spare.mbuf = mb;
		ring->spare.paddr_be = cpu_to_be64(segs[0].ds_addr);

		bus_dmamap_sync(ring->dma_tag, ring->spare.dma_map,
		    BUS_DMASYNC_PREREAD);
	}

	/* synchronize and unload the current mbuf, if any */
	if (likely(mb_list->mbuf != NULL)) {
		bus_dmamap_sync(ring->dma_tag, mb_list->dma_map,
		    BUS_DMASYNC_POSTREAD);
		bus_dmamap_unload(ring->dma_tag, mb_list->dma_map);
	}

	mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
	if (unlikely(mb == NULL))
		goto use_spare;

	/* setup correct length */
	mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;

	/* make sure IP header gets aligned */
	m_adj(mb, MLX4_NET_IP_ALIGN);

	err = -bus_dmamap_load_mbuf_sg(ring->dma_tag, mb_list->dma_map,
	    mb, segs, &nsegs, BUS_DMA_NOWAIT);
	if (unlikely(err != 0)) {
		m_freem(mb);
		goto use_spare;
	}

	*pdma = cpu_to_be64(segs[0].ds_addr);
	mb_list->mbuf = mb;

	bus_dmamap_sync(ring->dma_tag, mb_list->dma_map, BUS_DMASYNC_PREREAD);
	return (0);

use_spare:
	/* swap DMA maps */
	map = mb_list->dma_map;
	mb_list->dma_map = ring->spare.dma_map;
	ring->spare.dma_map = map;

	/* swap MBUFs */
	mb_list->mbuf = ring->spare.mbuf;
	ring->spare.mbuf = NULL;

	/* store physical address */
	*pdma = ring->spare.paddr_be;
	return (0);
}