Esempio n. 1
0
static void
g_modem_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct g_modem_softc *sc = usbd_xfer_softc(xfer);
	int actlen;
	int aframes;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTF("st=%d aframes=%d actlen=%d bytes\n",
	    USB_GET_STATE(xfer), aframes, actlen);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:

		sc->sc_throughput += actlen;

		if (sc->sc_mode == G_MODEM_MODE_LOOP) {
			sc->sc_tx_busy = 1;
			sc->sc_data_len = actlen;
			usbd_transfer_start(sc->sc_xfer[G_MODEM_BULK_WR]);
			break;
		}

	case USB_ST_SETUP:
tr_setup:
		if ((sc->sc_mode == G_MODEM_MODE_SILENT) ||
		    (sc->sc_tx_busy != 0))
			break;

		usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, G_MODEM_BUFSIZE);
		usbd_xfer_set_frames(xfer, 1);
		usbd_transfer_submit(xfer);
		break;

	default:			/* Error */
		DPRINTF("error=%s\n", usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
/*------------------------------------------------------------------------*
 *	usb_handle_request
 *
 * Internal state sequence:
 *
 * USB_HR_NOT_COMPLETE -> USB_HR_COMPLETE_OK v USB_HR_COMPLETE_ERR
 *
 * Returns:
 * 0: Ready to start hardware
 * Else: Stall current transfer, if any
 *------------------------------------------------------------------------*/
static usb_error_t
usb_handle_request(struct usb_xfer *xfer)
{
	struct usb_device_request req;
	struct usb_device *udev;
	const void *src_zcopy;		/* zero-copy source pointer */
	const void *src_mcopy;		/* non zero-copy source pointer */
	uint16_t off;			/* data offset */
	uint16_t rem;			/* data remainder */
	uint16_t max_len;		/* max fragment length */
	uint16_t wValue;
	uint16_t wIndex;
	uint8_t state;
	uint8_t is_complete = 1;
	usb_error_t err;
	union {
		uWord	wStatus;
		uint8_t	buf[2];
	}     temp;

	/*
	 * Filter the USB transfer state into
	 * something which we understand:
	 */

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_SETUP:
		state = USB_HR_NOT_COMPLETE;

		if (!xfer->flags_int.control_act) {
			/* nothing to do */
			goto tr_stalled;
		}
		break;
	case USB_ST_TRANSFERRED:
		if (!xfer->flags_int.control_act) {
			state = USB_HR_COMPLETE_OK;
		} else {
			state = USB_HR_NOT_COMPLETE;
		}
		break;
	default:
		state = USB_HR_COMPLETE_ERR;
		break;
	}

	/* reset frame stuff */

	usbd_xfer_set_frame_len(xfer, 0, 0);

	usbd_xfer_set_frame_offset(xfer, 0, 0);
	usbd_xfer_set_frame_offset(xfer, sizeof(req), 1);

	/* get the current request, if any */

	usbd_copy_out(xfer->frbuffers, 0, &req, sizeof(req));

	if (xfer->flags_int.control_rem == 0xFFFF) {
		/* first time - not initialised */
		rem = UGETW(req.wLength);
		off = 0;
	} else {
		/* not first time - initialised */
		rem = xfer->flags_int.control_rem;
		off = UGETW(req.wLength) - rem;
	}

	/* set some defaults */

	max_len = 0;
	src_zcopy = NULL;
	src_mcopy = NULL;
	udev = xfer->xroot->udev;

	/* get some request fields decoded */

	wValue = UGETW(req.wValue);
	wIndex = UGETW(req.wIndex);

	DPRINTF("req 0x%02x 0x%02x 0x%04x 0x%04x "
	    "off=0x%x rem=0x%x, state=%d\n", req.bmRequestType,
	    req.bRequest, wValue, wIndex, off, rem, state);

	/* demultiplex the control request */

	switch (req.bmRequestType) {
	case UT_READ_DEVICE:
		if (state != USB_HR_NOT_COMPLETE) {
			break;
		}
		switch (req.bRequest) {
		case UR_GET_DESCRIPTOR:
			goto tr_handle_get_descriptor;
		case UR_GET_CONFIG:
			goto tr_handle_get_config;
		case UR_GET_STATUS:
			goto tr_handle_get_status;
		default:
			goto tr_stalled;
		}
		break;

	case UT_WRITE_DEVICE:
		switch (req.bRequest) {
		case UR_SET_ADDRESS:
			goto tr_handle_set_address;
		case UR_SET_CONFIG:
			goto tr_handle_set_config;
		case UR_CLEAR_FEATURE:
			switch (wValue) {
			case UF_DEVICE_REMOTE_WAKEUP:
				goto tr_handle_clear_wakeup;
			default:
				goto tr_stalled;
			}
			break;
		case UR_SET_FEATURE:
			switch (wValue) {
			case UF_DEVICE_REMOTE_WAKEUP:
				goto tr_handle_set_wakeup;
			default:
				goto tr_stalled;
			}
			break;
		default:
			goto tr_stalled;
		}
		break;

	case UT_WRITE_ENDPOINT:
		switch (req.bRequest) {
		case UR_CLEAR_FEATURE:
			switch (wValue) {
			case UF_ENDPOINT_HALT:
				goto tr_handle_clear_halt;
			default:
				goto tr_stalled;
			}
			break;
		case UR_SET_FEATURE:
			switch (wValue) {
			case UF_ENDPOINT_HALT:
				goto tr_handle_set_halt;
			default:
				goto tr_stalled;
			}
			break;
		default:
			goto tr_stalled;
		}
		break;

	case UT_READ_ENDPOINT:
		switch (req.bRequest) {
		case UR_GET_STATUS:
			goto tr_handle_get_ep_status;
		default:
			goto tr_stalled;
		}
		break;
	default:
		/* we use "USB_ADD_BYTES" to de-const the src_zcopy */
		err = usb_handle_iface_request(xfer,
		    USB_ADD_BYTES(&src_zcopy, 0),
		    &max_len, req, off, state);
		if (err == 0) {
			is_complete = 0;
			goto tr_valid;
		} else if (err == USB_ERR_SHORT_XFER) {
			goto tr_valid;
		}
		/*
		 * Reset zero-copy pointer and max length
		 * variable in case they were unintentionally
		 * set:
		 */
		src_zcopy = NULL;
		max_len = 0;

		/*
		 * Check if we have a vendor specific
		 * descriptor:
		 */
		goto tr_handle_get_descriptor;
	}
	goto tr_valid;

tr_handle_get_descriptor:
	err = (usb_temp_get_desc_p) (udev, &req, &src_zcopy, &max_len);
	if (err)
		goto tr_stalled;
	if (src_zcopy == NULL)
		goto tr_stalled;
	goto tr_valid;

tr_handle_get_config:
	temp.buf[0] = udev->curr_config_no;
	src_mcopy = temp.buf;
	max_len = 1;
	goto tr_valid;

tr_handle_get_status:

	wValue = 0;

	USB_BUS_LOCK(udev->bus);
	if (udev->flags.remote_wakeup) {
		wValue |= UDS_REMOTE_WAKEUP;
	}
	if (udev->flags.self_powered) {
		wValue |= UDS_SELF_POWERED;
	}
	USB_BUS_UNLOCK(udev->bus);

	USETW(temp.wStatus, wValue);
	src_mcopy = temp.wStatus;
	max_len = sizeof(temp.wStatus);
	goto tr_valid;

tr_handle_set_address:
	if (state == USB_HR_NOT_COMPLETE) {
		if (wValue >= 0x80) {
			/* invalid value */
			goto tr_stalled;
		} else if (udev->curr_config_no != 0) {
			/* we are configured ! */
			goto tr_stalled;
		}
	} else if (state != USB_HR_NOT_COMPLETE) {
		udev->address = (wValue & 0x7F);
		goto tr_bad_context;
	}
	goto tr_valid;

tr_handle_set_config:
	if (state == USB_HR_NOT_COMPLETE) {
		if (usb_handle_set_config(xfer, req.wValue[0])) {
			goto tr_stalled;
		}
	}
	goto tr_valid;

tr_handle_clear_halt:
	if (state == USB_HR_NOT_COMPLETE) {
		if (usb_handle_set_stall(xfer, req.wIndex[0], 0)) {
			goto tr_stalled;
		}
	}
	goto tr_valid;

tr_handle_clear_wakeup:
	if (state == USB_HR_NOT_COMPLETE) {
		if (usb_handle_remote_wakeup(xfer, 0)) {
			goto tr_stalled;
		}
	}
	goto tr_valid;

tr_handle_set_halt:
	if (state == USB_HR_NOT_COMPLETE) {
		if (usb_handle_set_stall(xfer, req.wIndex[0], 1)) {
			goto tr_stalled;
		}
	}
	goto tr_valid;

tr_handle_set_wakeup:
	if (state == USB_HR_NOT_COMPLETE) {
		if (usb_handle_remote_wakeup(xfer, 1)) {
			goto tr_stalled;
		}
	}
	goto tr_valid;

tr_handle_get_ep_status:
	if (state == USB_HR_NOT_COMPLETE) {
		temp.wStatus[0] =
		    usb_handle_get_stall(udev, req.wIndex[0]);
		temp.wStatus[1] = 0;
		src_mcopy = temp.wStatus;
		max_len = sizeof(temp.wStatus);
	}
	goto tr_valid;

tr_valid:
	if (state != USB_HR_NOT_COMPLETE) {
		goto tr_stalled;
	}
	/* subtract offset from length */

	max_len -= off;

	/* Compute the real maximum data length */

	if (max_len > xfer->max_data_length) {
		max_len = usbd_xfer_max_len(xfer);
	}
	if (max_len > rem) {
		max_len = rem;
	}
	/*
	 * If the remainder is greater than the maximum data length,
	 * we need to truncate the value for the sake of the
	 * comparison below:
	 */
	if (rem > xfer->max_data_length) {
		rem = usbd_xfer_max_len(xfer);
	}
	if ((rem != max_len) && (is_complete != 0)) {
		/*
	         * If we don't transfer the data we can transfer, then
	         * the transfer is short !
	         */
		xfer->flags.force_short_xfer = 1;
		xfer->nframes = 2;
	} else {
		/*
		 * Default case
		 */
		xfer->flags.force_short_xfer = 0;
		xfer->nframes = max_len ? 2 : 1;
	}
	if (max_len > 0) {
		if (src_mcopy) {
			src_mcopy = USB_ADD_BYTES(src_mcopy, off);
			usbd_copy_in(xfer->frbuffers + 1, 0,
			    src_mcopy, max_len);
			usbd_xfer_set_frame_len(xfer, 1, max_len);
		} else {
			usbd_xfer_set_frame_data(xfer, 1,
			    USB_ADD_BYTES(src_zcopy, off), max_len);
		}
	} else {
		/* the end is reached, send status */
		xfer->flags.manual_status = 0;
		usbd_xfer_set_frame_len(xfer, 1, 0);
	}
	DPRINTF("success\n");
	return (0);			/* success */

tr_stalled:
	DPRINTF("%s\n", (state != USB_HR_NOT_COMPLETE) ?
	    "complete" : "stalled");
	return (USB_ERR_STALLED);

tr_bad_context:
	DPRINTF("bad context\n");
	return (USB_ERR_BAD_CONTEXT);
}
Esempio n. 3
0
static void
g_modem_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct g_modem_softc *sc = usbd_xfer_softc(xfer);
	int actlen;
	int aframes;
	int mod;
	int x;
	int max;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTF("st=%d aframes=%d actlen=%d bytes\n",
	    USB_GET_STATE(xfer), aframes, actlen);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:

		sc->sc_tx_busy = 0;
		sc->sc_throughput += actlen;

		if (sc->sc_mode == G_MODEM_MODE_LOOP) {
			/* start loop */
			usbd_transfer_start(sc->sc_xfer[G_MODEM_BULK_RD]);
			break;
		} else if ((sc->sc_mode == G_MODEM_MODE_PATTERN) && (sc->sc_tx_interval != 0)) {
			/* wait for next timeout */
			break;
		}
	case USB_ST_SETUP:
tr_setup:
		if (sc->sc_mode == G_MODEM_MODE_PATTERN) {

			mod = sc->sc_pattern_len;
			max = sc->sc_tx_interval ? mod : G_MODEM_BUFSIZE;

			if (mod == 0) {
				for (x = 0; x != max; x++)
					sc->sc_data_buf[x] = x % 255;
			} else {
				for (x = 0; x != max; x++)
					sc->sc_data_buf[x] = sc->sc_pattern[x % mod];
			}

			usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, max);
			usbd_xfer_set_interval(xfer, 0);
			usbd_xfer_set_frames(xfer, 1);
			usbd_transfer_submit(xfer);

		} else if (sc->sc_mode == G_MODEM_MODE_LOOP) {

			if (sc->sc_tx_busy == 0)
				break;

			x = sc->sc_tx_interval;

			if (x < 0)
				x = 0;
			else if (x > 256)
				x = 256;

			usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, sc->sc_data_len);
			usbd_xfer_set_interval(xfer, x);
			usbd_xfer_set_frames(xfer, 1);
			usbd_transfer_submit(xfer);
		} else {
			sc->sc_tx_busy = 0;
		}
		break;

	default:			/* Error */
		DPRINTF("error=%s\n", usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
Esempio n. 4
0
static void
usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct usie_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = sc->sc_ifp;
	struct mbuf *m0;
	struct mbuf *m = NULL;
	struct usie_desc *rxd;
	uint32_t actlen;
	uint16_t err;
	uint16_t pkt;
	uint16_t ipl;
	uint16_t len;
	uint16_t diff;
	uint8_t pad;
	uint8_t ipv;

	usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(15, "rx done, actlen=%u\n", actlen);

		if (actlen < sizeof(struct usie_hip)) {
			DPRINTF("data too short %u\n", actlen);
			goto tr_setup;
		}
		m = sc->sc_rxm;
		sc->sc_rxm = NULL;

		/* fall though */
	case USB_ST_SETUP:
tr_setup:

		if (sc->sc_rxm == NULL) {
			sc->sc_rxm = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR,
			    MJUMPAGESIZE /* could be bigger than MCLBYTES */ );
		}
		if (sc->sc_rxm == NULL) {
			DPRINTF("could not allocate Rx mbuf\n");
			ifp->if_ierrors++;
			usbd_xfer_set_stall(xfer);
			usbd_xfer_set_frames(xfer, 0);
		} else {
			/*
			 * Directly loading a mbuf cluster into DMA to
			 * save some data copying. This works because
			 * there is only one cluster.
			 */
			usbd_xfer_set_frame_data(xfer, 0,
			    mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX));
			usbd_xfer_set_frames(xfer, 1);
		}
		usbd_transfer_submit(xfer);
		break;

	default:			/* Error */
		DPRINTF("USB transfer error, %s\n", usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			ifp->if_ierrors++;
			goto tr_setup;
		}
		if (sc->sc_rxm != NULL) {
			m_freem(sc->sc_rxm);
			sc->sc_rxm = NULL;
		}
		break;
	}

	if (m == NULL)
		return;

	mtx_unlock(&sc->sc_mtx);

	m->m_pkthdr.len = m->m_len = actlen;

	err = pkt = 0;

	/* HW can aggregate multiple frames in a single USB xfer */
	for (;;) {
		rxd = mtod(m, struct usie_desc *);

		len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK;
		pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0;
		ipl = (len - pad - ETHER_HDR_LEN);
		if (ipl >= len) {
			DPRINTF("Corrupt frame\n");
			m_freem(m);
			break;
		}
		diff = sizeof(struct usie_desc) + ipl + pad;

		if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) ||
		    (be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) {
			DPRINTF("received wrong type of packet\n");
			m->m_data += diff;
			m->m_pkthdr.len = (m->m_len -= diff);
			err++;
			if (m->m_pkthdr.len > 0)
				continue;
			m_freem(m);
			break;
		}
		switch (be16toh(rxd->ethhdr.ether_type)) {
		case ETHERTYPE_IP:
			ipv = NETISR_IP;
			break;
#ifdef INET6
		case ETHERTYPE_IPV6:
			ipv = NETISR_IPV6;
			break;
#endif
		default:
			DPRINTF("unsupported ether type\n");
			err++;
			break;
		}

		/* the last packet */
		if (m->m_pkthdr.len <= diff) {
			m->m_data += (sizeof(struct usie_desc) + pad);
			m->m_pkthdr.len = m->m_len = ipl;
			m->m_pkthdr.rcvif = ifp;
			BPF_MTAP(sc->sc_ifp, m);
			netisr_dispatch(ipv, m);
			break;
		}
		/* copy aggregated frames to another mbuf */
		m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
		if (__predict_false(m0 == NULL)) {
			DPRINTF("could not allocate mbuf\n");
			err++;
			m_freem(m);
			break;
		}
		m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t));
		m0->m_pkthdr.rcvif = ifp;
		m0->m_pkthdr.len = m0->m_len = ipl;

		BPF_MTAP(sc->sc_ifp, m0);
		netisr_dispatch(ipv, m0);

		m->m_data += diff;
		m->m_pkthdr.len = (m->m_len -= diff);
	}

	mtx_lock(&sc->sc_mtx);

	ifp->if_ierrors += err;
	ifp->if_ipackets += pkt;
}
Esempio n. 5
0
static void
cdce_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct cdce_softc *sc = usbd_xfer_softc(xfer);
	struct mbuf *m;
	uint8_t x;
	int actlen, aframes, len;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:

		DPRINTF("received %u bytes in %u frames\n", actlen, aframes);

		for (x = 0; x != aframes; x++) {

			m = sc->sc_rx_buf[x];
			sc->sc_rx_buf[x] = NULL;
			len = usbd_xfer_frame_len(xfer, x);

			/* Strip off CRC added by Zaurus, if any */
			if ((sc->sc_flags & CDCE_FLAG_ZAURUS) && len >= 14)
				len -= 4;

			if (len < sizeof(struct ether_header)) {
				m_freem(m);
				continue;
			}
			/* queue up mbuf */
			uether_rxmbuf(&sc->sc_ue, m, len);
		}

		/* FALLTHROUGH */
	case USB_ST_SETUP:
		/* 
		 * TODO: Implement support for multi frame transfers,
		 * when the USB hardware supports it.
		 */
		for (x = 0; x != 1; x++) {
			if (sc->sc_rx_buf[x] == NULL) {
				m = uether_newbuf();
				if (m == NULL)
					goto tr_stall;
				sc->sc_rx_buf[x] = m;
			} else {
				m = sc->sc_rx_buf[x];
			}

			usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
		}
		/* set number of frames and start hardware */
		usbd_xfer_set_frames(xfer, x);
		usbd_transfer_submit(xfer);
		/* flush any received frames */
		uether_rxflush(&sc->sc_ue);
		break;

	default:			/* Error */
		DPRINTF("error = %s\n",
		    usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
tr_stall:
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			usbd_xfer_set_frames(xfer, 0);
			usbd_transfer_submit(xfer);
			break;
		}

		/* need to free the RX-mbufs when we are cancelled */
		cdce_free_queue(sc->sc_rx_buf, CDCE_FRAMES_MAX);
		break;
	}
}
Esempio n. 6
0
static void
cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct cdce_softc *sc = usbd_xfer_softc(xfer);
	struct ifnet *ifp = uether_getifp(&sc->sc_ue);
	struct mbuf *m;
	struct mbuf *mt;
	uint32_t crc;
	uint8_t x;
	int actlen, aframes;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	DPRINTFN(1, "\n");

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
		    actlen, aframes);

		ifp->if_opackets++;

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		for (x = 0; x != CDCE_FRAMES_MAX; x++) {

			IFQ_DRV_DEQUEUE(&ifp->if_snd, m);

			if (m == NULL)
				break;

			if (sc->sc_flags & CDCE_FLAG_ZAURUS) {
				/*
				 * Zaurus wants a 32-bit CRC appended
				 * to every frame
				 */

				crc = cdce_m_crc32(m, 0, m->m_pkthdr.len);
				crc = htole32(crc);

				if (!m_append(m, 4, (void *)&crc)) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
			}
			if (m->m_len != m->m_pkthdr.len) {
				mt = m_defrag(m, M_DONTWAIT);
				if (mt == NULL) {
					m_freem(m);
					ifp->if_oerrors++;
					continue;
				}
				m = mt;
			}
			if (m->m_pkthdr.len > MCLBYTES) {
				m->m_pkthdr.len = MCLBYTES;
			}
			sc->sc_tx_buf[x] = m;
			usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);

			/*
			 * If there's a BPF listener, bounce a copy of
			 * this frame to him:
			 */
			BPF_MTAP(ifp, m);
		}
		if (x != 0) {
			usbd_xfer_set_frames(xfer, x);

			usbd_transfer_submit(xfer);
		}
		break;

	default:			/* Error */
		DPRINTFN(11, "transfer error, %s\n",
		    usbd_errstr(error));

		/* free all previous TX buffers */
		cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX);

		/* count output errors */
		ifp->if_oerrors++;

		if (error != USB_ERR_CANCELLED) {
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			goto tr_setup;
		}
		break;
	}
}
Esempio n. 7
0
static void
ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct ipheth_softc *sc = usbd_xfer_softc(xfer);
	struct mbuf *m;
	uint8_t x;
	int actlen;
	int aframes;
	int len;

	usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:

		DPRINTF("received %u bytes in %u frames\n", actlen, aframes);

		for (x = 0; x != aframes; x++) {

			m = sc->sc_rx_buf[x];
			sc->sc_rx_buf[x] = NULL;
			len = usbd_xfer_frame_len(xfer, x);

			if (len < (sizeof(struct ether_header) +
			    IPHETH_RX_ADJ)) {
				m_freem(m);
				continue;
			}

			m_adj(m, IPHETH_RX_ADJ);

			/* queue up mbuf */
			uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
		}

		/* FALLTHROUGH */
	case USB_ST_SETUP:

		for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
			if (sc->sc_rx_buf[x] == NULL) {
				m = uether_newbuf();
				if (m == NULL)
					goto tr_stall;

				/* cancel alignment for ethernet */
				m_adj(m, ETHER_ALIGN);

				sc->sc_rx_buf[x] = m;
			} else {
				m = sc->sc_rx_buf[x];
			}

			usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
		}
		/* set number of frames and start hardware */
		usbd_xfer_set_frames(xfer, x);
		usbd_transfer_submit(xfer);
		/* flush any received frames */
		uether_rxflush(&sc->sc_ue);
		break;

	default:			/* Error */
		DPRINTF("error = %s\n", usbd_errstr(error));

		if (error != USB_ERR_CANCELLED) {
	tr_stall:
			/* try to clear stall first */
			usbd_xfer_set_stall(xfer);
			usbd_xfer_set_frames(xfer, 0);
			usbd_transfer_submit(xfer);
			break;
		}
		/* need to free the RX-mbufs when we are cancelled */
		ipheth_free_queue(sc->sc_rx_buf, IPHETH_RX_FRAMES_MAX);
		break;
	}
}
Esempio n. 8
0
void
rtwn_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
{
	struct rtwn_usb_softc *uc = usbd_xfer_softc(xfer);
	struct rtwn_softc *sc = &uc->uc_sc;
	struct ieee80211com *ic = &sc->sc_ic;
	struct ieee80211_node *ni;
	struct mbuf *m = NULL, *next;
	struct rtwn_data *data;
	int8_t nf, rssi;

	RTWN_ASSERT_LOCKED(sc);

	switch (USB_GET_STATE(xfer)) {
	case USB_ST_TRANSFERRED:
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data == NULL)
			goto tr_setup;
		STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
		m = rtwn_report_intr(uc, xfer, data);
		STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		/* FALLTHROUGH */
	case USB_ST_SETUP:
tr_setup:
		data = STAILQ_FIRST(&uc->uc_rx_inactive);
		if (data == NULL) {
			KASSERT(m == NULL, ("mbuf isn't NULL"));
			goto finish;
		}
		STAILQ_REMOVE_HEAD(&uc->uc_rx_inactive, next);
		STAILQ_INSERT_TAIL(&uc->uc_rx_active, data, next);
		usbd_xfer_set_frame_data(xfer, 0, data->buf,
		    usbd_xfer_max_len(xfer));
		usbd_transfer_submit(xfer);

		/*
		 * To avoid LOR we should unlock our private mutex here to call
		 * ieee80211_input() because here is at the end of a USB
		 * callback and safe to unlock.
		 */
		while (m != NULL) {
			next = m->m_next;
			m->m_next = NULL;

			ni = rtwn_rx_frame(sc, m, &rssi);

			RTWN_UNLOCK(sc);

			nf = RTWN_NOISE_FLOOR;
			if (ni != NULL) {
				if (ni->ni_flags & IEEE80211_NODE_HT)
					m->m_flags |= M_AMPDU;
				(void)ieee80211_input(ni, m, rssi - nf, nf);
				ieee80211_free_node(ni);
			} else {
				(void)ieee80211_input_all(ic, m,
				    rssi - nf, nf);
			}
			RTWN_LOCK(sc);
			m = next;
		}
		break;
	default:
		/* needs it to the inactive queue due to a error. */
		data = STAILQ_FIRST(&uc->uc_rx_active);
		if (data != NULL) {
			STAILQ_REMOVE_HEAD(&uc->uc_rx_active, next);
			STAILQ_INSERT_TAIL(&uc->uc_rx_inactive, data, next);
		}
		if (error != USB_ERR_CANCELLED) {
			usbd_xfer_set_stall(xfer);
			counter_u64_add(ic->ic_ierrors, 1);
			goto tr_setup;
		}
		break;
	}
finish:
	/* Finished receive; age anything left on the FF queue by a little bump */
	/*
	 * XXX TODO: just make this a callout timer schedule so we can
	 * flush the FF staging queue if we're approaching idle.
	 */
#ifdef	IEEE80211_SUPPORT_SUPERG
	if (!(sc->sc_flags & RTWN_FW_LOADED) ||
	    sc->sc_ratectl != RTWN_RATECTL_NET80211)
		rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif

	/* Kick-start more transmit in case we stalled */
	rtwn_start(sc);
}