Exemple #1
0
/*
 * Start output on the pflog interface.
 */
void
pflogstart(struct ifnet *ifp)
{
	struct mbuf *m;
#ifndef __FreeBSD__
	int s;
#endif

	for (;;) {
#ifdef __FreeBSD__
		IF_LOCK(&ifp->if_snd);
		_IF_DROP(&ifp->if_snd);
		_IF_DEQUEUE(&ifp->if_snd, m);
		IF_UNLOCK(&ifp->if_snd);
#else
		s = splnet();
		IF_DROP(&ifp->if_snd);
		IF_DEQUEUE(&ifp->if_snd, m);
		splx(s);
#endif

		if (m == NULL)
			return;
		else
			m_freem(m);
	}
}
Exemple #2
0
int capi_start_tx(capi_softc_t *sc, int chan)
{
    struct mbuf *m_b3;
    int sent = 0;

    _IF_DEQUEUE(&sc->sc_bchan[chan].tx_queue, m_b3);
    while (m_b3) {
	struct mbuf *m = m_b3->m_next;

	sc->sc_bchan[chan].txcount += m_b3->m_len;
	capi_data_b3_req(sc, chan, m_b3);
	sent++;

	m_b3 = m;
    }

    if (sc->sc_bchan[chan].capi_drvr_linktab) {
	/* Notify i4b driver of activity, and if the queue is drained. */

	if (sent)
	    (*sc->sc_bchan[chan].capi_drvr_linktab->bch_activity)(
		sc->sc_bchan[chan].capi_drvr_linktab->unit, ACT_TX);

	if (IF_QEMPTY(&sc->sc_bchan[chan].tx_queue))
	    (*sc->sc_bchan[chan].capi_drvr_linktab->bch_tx_queue_empty)(
		sc->sc_bchan[chan].capi_drvr_linktab->unit);
    }

    return sent;
}
Exemple #3
0
static void
bt3c_forward(node_p node, hook_p hook, void *arg1, int arg2)
{
	bt3c_softc_p	 sc = (bt3c_softc_p) NG_NODE_PRIVATE(node);
	struct mbuf	*m = NULL;
	int		 error;

	if (sc == NULL)
		return;

	if (sc->hook != NULL && NG_HOOK_IS_VALID(sc->hook)) {
		for (;;) {
			IF_DEQUEUE(&sc->inq, m);
			if (m == NULL)
				break;

			NG_SEND_DATA_ONLY(error, sc->hook, m);
			if (error != 0)
				NG_BT3C_STAT_IERROR(sc->stat);
		}
	} else {
		IF_LOCK(&sc->inq);
		for (;;) {
			_IF_DEQUEUE(&sc->inq, m);
			if (m == NULL)
				break;

			NG_BT3C_STAT_IERROR(sc->stat);
			NG_FREE_M(m);
		}
		IF_UNLOCK(&sc->inq);
	}
} /* bt3c_forward */
Exemple #4
0
/**
 * Periodic timer tick for slow management operations
 *
 * @param arg    Device to check
 */
static void cvm_do_timer(void *arg)
{
	static int port;
	static int updated;
	if (port < CVMX_PIP_NUM_INPUT_PORTS) {
		if (cvm_oct_device[port]) {
			int queues_per_port;
			int qos;
			cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc;

			cvm_oct_common_poll(priv->ifp);
			if (priv->need_link_update) {
				updated++;
				taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task);
			}

			queues_per_port = cvmx_pko_get_num_queues(port);
			/* Drain any pending packets in the free list */
			for (qos = 0; qos < queues_per_port; qos++) {
				if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) {
					IF_LOCK(&priv->tx_free_queue[qos]);
					while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) {
						struct mbuf *m;

						_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
						m_freem(m);
					}
					IF_UNLOCK(&priv->tx_free_queue[qos]);

					/*
					 * XXX locking!
					 */
					priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
				}
			}
		}
		port++;
		/* Poll the next port in a 50th of a second.
		   This spreads the polling of ports out a little bit */
		callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
	} else {
		port = 0;
		/* If any updates were made in this run, continue iterating at
		 * 1/50th of a second, so that if a link has merely gone down
		 * temporarily (e.g. because of interface reinitialization) it
		 * will not be forced to stay down for an entire second.
		 */
		if (updated > 0) {
			updated = 0;
			callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL);
		} else {
			/* All ports have been polled. Start the next iteration through
			   the ports in one second */
			callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL);
		}
	}
}
Exemple #5
0
/*---------------------------------------------------------------------------*
 *	read from trace device
 *---------------------------------------------------------------------------*/
static int
i4btrcread(dev_t dev, struct uio * uio, int ioflag)
{
	struct mbuf *m;
	int x;
	int error = 0;
	int unit = minor(dev);
	
	if(!(device_state[unit] & ST_ISOPEN))
		return(EIO);

	x = SPLI4B();
	
	IF_LOCK(&trace_queue[unit]);

	while(IF_QEMPTY(&trace_queue[unit]) && (device_state[unit] & ST_ISOPEN))
	{
		device_state[unit] |= ST_WAITDATA;
		
		if((error = msleep((caddr_t) &trace_queue[unit],
					&trace_queue[unit].ifq_mtx,
					TTIPRI | PCATCH,
					"bitrc", 0 )) != 0)
		{
			device_state[unit] &= ~ST_WAITDATA;
			IF_UNLOCK(&trace_queue[unit]);
			splx(x);
			return(error);
		}
	}

	_IF_DEQUEUE(&trace_queue[unit], m);
	IF_UNLOCK(&trace_queue[unit]);

	if(m && m->m_len)
		error = uiomove(m->m_data, m->m_len, uio);
	else
		error = EIO;
		
	if(m)
		i4b_Bfreembuf(m);

	splx(x);
	
	return(error);
}
Exemple #6
0
/*
 * Start output on the pflog interface.
 */
static void
pflogstart(struct ifnet *ifp)
{
	struct mbuf *m;

	for (;;) {
		IF_LOCK(&ifp->if_snd);
		_IF_DROP(&ifp->if_snd);
		_IF_DEQUEUE(&ifp->if_snd, m);
		IF_UNLOCK(&ifp->if_snd);

		if (m == NULL)
			return;
		else
			m_freem(m);
	}
}
/**
 * Output processing task, handles outgoing frames
 */
static void vboxNetFltFreeBSDoutput(void *arg, int pending)
{
    PVBOXNETFLTINS pThis = (PVBOXNETFLTINS)arg;
    struct mbuf *m, *m0;
    struct ifnet *ifp = pThis->u.s.ifp;
    unsigned int cSegs = 0;
    bool fDropIt = false, fActive;
    PINTNETSG pSG;

    VBOXCURVNET_SET(ifp->if_vnet);
    vboxNetFltRetain(pThis, true /* fBusy */);
    for (;;)
    {
        mtx_lock_spin(&pThis->u.s.outq.ifq_mtx);
        _IF_DEQUEUE(&pThis->u.s.outq, m);
        mtx_unlock_spin(&pThis->u.s.outq.ifq_mtx);
        if (m == NULL)
            break;

        for (m0 = m; m0 != NULL; m0 = m0->m_next)
            if (m0->m_len > 0)
                cSegs++;

#ifdef PADD_RUNT_FRAMES_FROM_HOST
        if (m_length(m, NULL) < 60)
            cSegs++;
#endif
        /* Create a copy and deliver to the virtual switch */
        pSG = RTMemTmpAlloc(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
        vboxNetFltFreeBSDMBufToSG(pThis, m, pSG, cSegs, 0);
        fDropIt = pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, NULL /* pvIf */, pSG, INTNETTRUNKDIR_HOST);
        RTMemTmpFree(pSG);

        if (fDropIt)
            m_freem(m);
        else
            ether_output_frame(ifp, m);
    }
    vboxNetFltRelease(pThis, true /* fBusy */);
    VBOXCURVNET_RESTORE();
}
void
uether_rxflush(struct usb_ether *ue)
{
	struct ifnet *ifp = ue->ue_ifp;
	struct mbuf *m;

	UE_LOCK_ASSERT(ue, MA_OWNED);

	for (;;) {
		_IF_DEQUEUE(&ue->ue_rxq, m);
		if (m == NULL)
			break;

		/*
		 * The USB xfer has been resubmitted so its safe to unlock now.
		 */
		UE_UNLOCK(ue);
		ifp->if_input(ifp, m);
		UE_LOCK(ue);
	}
}
Exemple #9
0
/*---------------------------------------------------------------------------*
 *	get_trace_data_from_l1()
 *	------------------------
 *	is called from layer 1, adds timestamp to trace data and puts
 *	it into a queue, from which it can be read from the i4btrc
 *	device. The unit number in the trace header selects the minor
 *	device's queue the data is put into.
 *---------------------------------------------------------------------------*/
int
get_trace_data_from_l1(i4b_trace_hdr_t *hdr, int len, char *buf)
{
	struct mbuf *m;
	int x;
	int unit;
	int trunc = 0;
	int totlen = len + sizeof(i4b_trace_hdr_t);

	/*
	 * for telephony (or better non-HDLC HSCX mode) we get 
	 * (MCLBYTE + sizeof(i4b_trace_hdr_t)) length packets
	 * to put into the queue to userland. because of this
	 * we detect this situation, strip the length to MCLBYTES
	 * max size, and infor the userland program of this fact
	 * by putting the no of truncated bytes into hdr->trunc.
	 */
	 
	if(totlen > MCLBYTES)
	{
		trunc = 1;
		hdr->trunc = totlen - MCLBYTES;
		totlen = MCLBYTES;
	}
	else
	{
		hdr->trunc = 0;
	}

	/* set length of trace record */
	
	hdr->length = totlen;
	
	/* check valid unit no */
	
	if((unit = hdr->unit) > NI4BTRC)
	{
		printf("i4b_trace: get_trace_data_from_l1 - unit > NI4BTRC!\n"); 
		return(0);
	}

	/* get mbuf */
	
	if(!(m = i4b_Bgetmbuf(totlen)))
	{
		printf("i4b_trace: get_trace_data_from_l1 - i4b_getmbuf() failed!\n");
		return(0);
	}

	/* check if we are in analyzemode */
	
	if(analyzemode && (unit == rxunit || unit == txunit))
	{
		if(unit == rxunit)
			hdr->dir = FROM_NT;
		else
			hdr->dir = FROM_TE;
		unit = outunit;			
	}

	IF_LOCK(&trace_queue[unit]);

	if(_IF_QFULL(&trace_queue[unit]))
	{
		struct mbuf *m1;

		x = SPLI4B();
		_IF_DEQUEUE(&trace_queue[unit], m1);
		splx(x);		

		i4b_Bfreembuf(m1);
	}
	
	/* copy trace header */
	memcpy(m->m_data, hdr, sizeof(i4b_trace_hdr_t));

	/* copy trace data */
	if(trunc)
		memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, totlen-sizeof(i4b_trace_hdr_t));
	else
		memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, len);

	x = SPLI4B();
	
	_IF_ENQUEUE(&trace_queue[unit], m);
	IF_UNLOCK(&trace_queue[unit]);
	
	if(device_state[unit] & ST_WAITDATA)
	{
		device_state[unit] &= ~ST_WAITDATA;
		wakeup((caddr_t) &trace_queue[unit]);
	}

	splx(x);
	
	return(1);
}
Exemple #10
0
/*
 * Stop the interface
 */
void
patm_stop(struct patm_softc *sc)
{
	u_int i;
	struct mbuf *m;
	struct patm_txmap *map;
	struct patm_scd *scd;

	sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
	sc->utopia.flags |= UTP_FL_POLL_CARRIER;

	patm_reset(sc);

	mtx_lock(&sc->tst_lock);
	i = sc->tst_state;
	sc->tst_state = 0;
	callout_stop(&sc->tst_callout);
	mtx_unlock(&sc->tst_lock);

	if (i != 0) {
		/* this means we are just entering or leaving the timeout.
		 * wait a little bit. Doing this correctly would be more
		 * involved */
		DELAY(1000);
	}

	/*
	 * Give any waiters on closing a VCC a chance. They will stop
	 * to wait if they see that IFF_DRV_RUNNING disappeared.
	 */
	cv_broadcast(&sc->vcc_cv);

	/* free large buffers */
	patm_debug(sc, ATTACH, "freeing large buffers...");
	for (i = 0; i < sc->lbuf_max; i++)
		if (sc->lbufs[i].m != NULL)
			patm_lbuf_free(sc, &sc->lbufs[i]);

	/* free small buffers that are on the card */
	patm_debug(sc, ATTACH, "freeing small buffers...");
	mbp_card_free(sc->sbuf_pool);

	/* free aal0 buffers that are on the card */
	patm_debug(sc, ATTACH, "freeing aal0 buffers...");
	mbp_card_free(sc->vbuf_pool);

	/* freeing partial receive chains and reset vcc state */
	for (i = 0; i < sc->mmap->max_conn; i++) {
		if (sc->vccs[i] != NULL) {
			if (sc->vccs[i]->chain != NULL) {
				m_freem(sc->vccs[i]->chain);
				sc->vccs[i]->chain = NULL;
				sc->vccs[i]->last = NULL;
			}

			if (sc->vccs[i]->vflags & (PATM_VCC_RX_CLOSING |
			    PATM_VCC_TX_CLOSING)) {
				uma_zfree(sc->vcc_zone, sc->vccs[i]);
				sc->vccs[i] = NULL;
			} else {
				/* keep */
				sc->vccs[i]->vflags &= ~PATM_VCC_OPEN;
				sc->vccs[i]->cps = 0;
				sc->vccs[i]->scd = NULL;
			}
		}
	}

	/* stop all active SCDs */
	while ((scd = LIST_FIRST(&sc->scd_list)) != NULL) {
		/* free queue packets */
		for (;;) {
			_IF_DEQUEUE(&scd->q, m);
			if (m == NULL)
				break;
			m_freem(m);
		}

		/* free transmitting packets */
		for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) {
			if ((m = scd->on_card[i]) != NULL) {
				scd->on_card[i] = 0;
				map = m->m_pkthdr.header;

				bus_dmamap_unload(sc->tx_tag, map->map);
				SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
				m_freem(m);
			}
		}
		patm_scd_free(sc, scd);
	}
	sc->scd0 = NULL;

	sc->flags &= ~PATM_CLR;

	/* reset raw cell queue */
	sc->rawh = NULL;

	ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
	    sc->utopia.carrier == UTP_CARR_OK);
}
Exemple #11
0
static void iavc_start_tx(iavc_softc_t *sc)
{
    struct mbuf *m;
    u_int8_t *dmabuf;
    u_int32_t txlen = 0;
    
    /* If device has put us on hold, punt. */

    if (sc->sc_blocked) {
	return;
    }

    /* If using DMA and transmitter busy, punt. */
    
    if (sc->sc_dma && (sc->sc_csr & EN_TX_TC_INT)) {
	return;
    }

    /* Else, see if we have messages to send. */

    _IF_DEQUEUE(&sc->sc_txq, m);
    if (!m) {
	return;
    }

    /* Have message, will send. */

    if (CAPIMSG_LEN(m->m_data)) {
	/* A proper CAPI message, possibly with B3 data */

	if (sc->sc_dma) {
	    /* Copy message to DMA buffer. */

	    if (m->m_next) {
		dmabuf = amcc_put_byte(&sc->sc_sendbuf[0], SEND_DATA_B3_REQ);
	    } else {
		dmabuf = amcc_put_byte(&sc->sc_sendbuf[0], SEND_MESSAGE);
	    }

	    dmabuf = amcc_put_word(dmabuf, m->m_len);
	    bcopy(m->m_data, dmabuf, m->m_len);
	    dmabuf += m->m_len;
	    txlen = 5 + m->m_len;

	    if (m->m_next) {
		dmabuf = amcc_put_word(dmabuf, m->m_next->m_len);
		bcopy(m->m_next->m_data, dmabuf, m->m_next->m_len);
		txlen += 4 + m->m_next->m_len;
	    }

	} else {
	    /* Use PIO. */

	    if (m->m_next) {
		iavc_put_byte(sc, SEND_DATA_B3_REQ);
		NDBGL4(L4_IAVCDBG, "iavc%d: tx SDB3R msg, len = %d", sc->sc_unit, m->m_len);
	    } else {
		iavc_put_byte(sc, SEND_MESSAGE);
		NDBGL4(L4_IAVCDBG, "iavc%d: tx SM msg, len = %d", sc->sc_unit, m->m_len);
	    }
#if 0
    {
	u_int8_t *p = mtod(m, u_int8_t*);
	int len;
	for (len = 0; len < m->m_len; len++) {
	    printf(" %02x", *p++);
	    if (len && (len % 16) == 0) printf("\n");
	}
	if (len % 16) printf("\n");
    }
#endif

	    iavc_put_slice(sc, m->m_data, m->m_len);

	    if (m->m_next) {
		iavc_put_slice(sc, m->m_next->m_data, m->m_next->m_len);
	    }
	}

    } else {
	/* A board control message to be sent as is */

	if (sc->sc_dma) {
Exemple #12
0
/**
 * Packet transmit
 *
 * @param m    Packet to send
 * @param dev    Device info structure
 * @return Always returns zero
 */
int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp)
{
	cvmx_pko_command_word0_t    pko_command;
	cvmx_buf_ptr_t              hw_buffer;
	int                         dropped;
	int                         qos;
	cvm_oct_private_t          *priv = (cvm_oct_private_t *)ifp->if_softc;
	int32_t in_use;
	int32_t buffers_to_free;
	cvmx_wqe_t *work;

	/* Prefetch the private data structure.
	   It is larger that one cache line */
	CVMX_PREFETCH(priv, 0);

	/* Start off assuming no drop */
	dropped = 0;

	/* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely
	   remove "qos" in the event neither interface supports multiple queues
	   per port */
	if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
	    (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
		qos = GET_MBUF_QOS(m);
		if (qos <= 0)
			qos = 0;
		else if (qos >= cvmx_pko_get_num_queues(priv->port))
			qos = 0;
	} else
		qos = 0;

	/* The CN3XXX series of parts has an errata (GMX-401) which causes the
	   GMX block to hang if a collision occurs towards the end of a
	   <68 byte packet. As a workaround for this, we pad packets to be
	   68 bytes whenever we are in half duplex mode. We don't handle
	   the case of having a small packet but no room to add the padding.
	   The kernel should always give us at least a cache line */
	if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
		cvmx_gmxx_prtx_cfg_t gmx_prt_cfg;
		int interface = INTERFACE(priv->port);
		int index = INDEX(priv->port);

		if (interface < 2) {
			/* We only need to pad packet in half duplex mode */
			gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
			if (gmx_prt_cfg.s.duplex == 0) {
				static uint8_t pad[64];

				if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
					printf("%s: unable to padd small packet.", __func__);
			}
		}
	}

#ifdef OCTEON_VENDOR_RADISYS
	/*
	 * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes.
	 */
	if (__predict_false(m->m_pkthdr.len < 60) &&
	    cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) {
		static uint8_t pad[60];

		if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad))
			printf("%s: unable to pad small packet.", __func__);
	}
#endif

	/*
	 * If the packet is not fragmented.
	 */
	if (m->m_pkthdr.len == m->m_len) {
		/* Build the PKO buffer pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data);
		hw_buffer.s.pool = 0;
		hw_buffer.s.size = m->m_len;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = 1;
		pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA.  */

		work = NULL;
	} else {
		struct mbuf *n;
		unsigned segs;
		uint64_t *gp;

		/*
		 * The packet is fragmented, we need to send a list of segments
		 * in memory we borrow from the WQE pool.
		 */
		work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
		if (work == NULL) {
			m_freem(m);
			if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
			return 1;
		}

		segs = 0;
		gp = (uint64_t *)work;
		for (n = m; n != NULL; n = n->m_next) {
			if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t))
				panic("%s: too many segments in packet; call m_collapse().", __func__);

			/* Build the PKO buffer pointer */
			hw_buffer.u64 = 0;
			hw_buffer.s.i = 1; /* Do not put this buffer into the FPA.  */
			hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data);
			hw_buffer.s.pool = 0;
			hw_buffer.s.size = n->m_len;

			*gp++ = hw_buffer.u64;
			segs++;
		}

		/* Build the PKO buffer gather list pointer */
		hw_buffer.u64 = 0;
		hw_buffer.s.addr = cvmx_ptr_to_phys(work);
		hw_buffer.s.pool = CVMX_FPA_WQE_POOL;
		hw_buffer.s.size = segs;

		/* Build the PKO command */
		pko_command.u64 = 0;
		pko_command.s.segs = segs;
		pko_command.s.gather = 1;
		pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA.  */
	}

	/* Finish building the PKO command */
	pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
	pko_command.s.reg0 = priv->fau+qos*4;
	pko_command.s.total_bytes = m->m_pkthdr.len;
	pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
	pko_command.s.subone0 = 1;

	/* Check if we can use the hardware checksumming */
	if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) {
		/* Use hardware checksum calc */
		pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1;
	}

	/*
	 * XXX
	 * Could use a different free queue (and different FAU address) per
	 * core instead of per QoS, to reduce contention here.
	 */
	IF_LOCK(&priv->tx_free_queue[qos]);
	/* Get the number of mbufs in use by the hardware */
	in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1);
	buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);

	cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE);

	/* Drop this packet if we have too many already queued to the HW */
	if (_IF_QFULL(&priv->tx_free_queue[qos])) {
		dropped = 1;
	}
	/* Send the packet to the output queue */
	else
	if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) {
		DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp));
		dropped = 1;
	}

	if (__predict_false(dropped)) {
		m_freem(m);
		cvmx_fau_atomic_add32(priv->fau+qos*4, -1);
		if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
	} else {
		/* Put this packet on the queue to be freed later */
		_IF_ENQUEUE(&priv->tx_free_queue[qos], m);

		/* Pass it to any BPF listeners.  */
		ETHER_BPF_MTAP(ifp, m);

		if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
		if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
	}

	/* Free mbufs not in use by the hardware */
	if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
		while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) {
			_IF_DEQUEUE(&priv->tx_free_queue[qos], m);
			m_freem(m);
		}
	}
	IF_UNLOCK(&priv->tx_free_queue[qos]);

	return dropped;
}