static void cdce_ncm_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); uint16_t x; uint8_t temp; int actlen; int aframes; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(10, "transfer complete: " "%u bytes in %u frames\n", actlen, aframes); case USB_ST_SETUP: for (x = 0; x != CDCE_NCM_TX_FRAMES_MAX; x++) { temp = cdce_ncm_fill_tx_frames(xfer, x); if (temp == 0) break; if (temp == 1) { x++; break; } } if (x != 0) { #ifdef USB_DEBUG usbd_xfer_set_interval(xfer, cdce_tx_interval); #endif usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(10, "Transfer error: %s\n", usbd_errstr(error)); /* update error counter */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (error != USB_ERR_CANCELLED) { if (usbd_get_mode(sc->sc_ue.ue_udev) == USB_MODE_HOST) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); } } break; } }
static void uhid_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhid_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct usb_page_cache *pc; uint32_t size = sc->sc_osize; uint32_t actlen; uint8_t id; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: case USB_ST_SETUP: /* try to extract the ID byte */ if (sc->sc_oid) { pc = usbd_xfer_get_frame(xfer, 0); if (usb_fifo_get_data(sc->sc_fifo.fp[USB_FIFO_TX], pc, 0, 1, &actlen, 0)) { if (actlen != 1) { goto tr_error; } usbd_copy_out(pc, 0, &id, 1); } else { return; } if (size) { size--; } } else { id = 0; } pc = usbd_xfer_get_frame(xfer, 1); if (usb_fifo_get_data(sc->sc_fifo.fp[USB_FIFO_TX], pc, 0, UHID_BSIZE, &actlen, 1)) { if (actlen != size) { goto tr_error; } uhid_fill_set_report (&req, sc->sc_iface_no, UHID_OUTPUT_REPORT, id, size); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, size); usbd_xfer_set_frames(xfer, size ? 2 : 1); usbd_transfer_submit(xfer); } return; default: tr_error: /* bomb out */ usb_fifo_get_data_error(sc->sc_fifo.fp[USB_FIFO_TX]); return; } }
static void uhso_mux_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhso_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct usb_device_request req; struct uhso_tty *ht; int actlen, len; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); UHSO_DPRINTF(3, "status %d\n", USB_GET_STATE(xfer)); ht = usbd_xfer_get_priv(xfer); UHSO_DPRINTF(3, "ht=%p open=%d\n", ht, ht->ht_open); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: /* Got data, send to ucom */ pc = usbd_xfer_get_frame(xfer, 1); len = usbd_xfer_frame_len(xfer, 1); UHSO_DPRINTF(3, "got %d bytes on mux port %d\n", len, ht->ht_muxport); if (len <= 0) { usbd_transfer_start(sc->sc_xfer[UHSO_MUX_ENDPT_INTR]); break; } /* Deliver data if the TTY is open, discard otherwise */ if (ht->ht_open) ucom_put_data(&sc->sc_ucom[ht->ht_muxport], pc, 0, len); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: memset(&req, 0, sizeof(struct usb_device_request)); req.bmRequestType = UT_READ_CLASS_INTERFACE; req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE; USETW(req.wValue, 0); USETW(req.wIndex, ht->ht_muxport); USETW(req.wLength, 1024); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, 1024); usbd_xfer_set_frames(xfer, 2); usbd_transfer_submit(xfer); break; default: UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error)); if (error == USB_ERR_CANCELLED) break; usbd_xfer_set_stall(xfer); goto tr_setup; } }
static void uhso_mux_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhso_softc *sc = usbd_xfer_softc(xfer); struct uhso_tty *ht; struct usb_page_cache *pc; struct usb_device_request req; int actlen; struct usb_page_search res; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); ht = usbd_xfer_get_priv(xfer); UHSO_DPRINTF(3, "status=%d, using mux port %d\n", USB_GET_STATE(xfer), ht->ht_muxport); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: UHSO_DPRINTF(3, "wrote %zd data bytes to muxport %d\n", actlen - sizeof(struct usb_device_request) , ht->ht_muxport); /* FALLTHROUGH */ case USB_ST_SETUP: pc = usbd_xfer_get_frame(xfer, 1); if (ucom_get_data(&sc->sc_ucom[ht->ht_muxport], pc, 0, 32, &actlen)) { usbd_get_page(pc, 0, &res); memset(&req, 0, sizeof(struct usb_device_request)); req.bmRequestType = UT_WRITE_CLASS_INTERFACE; req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND; USETW(req.wValue, 0); USETW(req.wIndex, ht->ht_muxport); USETW(req.wLength, actlen); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, actlen); usbd_xfer_set_frames(xfer, 2); UHSO_DPRINTF(3, "Prepared %d bytes for transmit " "on muxport %d\n", actlen, ht->ht_muxport); usbd_transfer_submit(xfer); } break; default: UHSO_DPRINTF(0, "error: %s\n", usbd_errstr(error)); if (error == USB_ERR_CANCELLED) break; break; } }
static void g_modem_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct g_modem_softc *sc = usbd_xfer_softc(xfer); int actlen; int aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTF("st=%d aframes=%d actlen=%d bytes\n", USB_GET_STATE(xfer), aframes, actlen); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_throughput += actlen; if (sc->sc_mode == G_MODEM_MODE_LOOP) { sc->sc_tx_busy = 1; sc->sc_data_len = actlen; usbd_transfer_start(sc->sc_xfer[G_MODEM_BULK_WR]); break; } case USB_ST_SETUP: tr_setup: if ((sc->sc_mode == G_MODEM_MODE_SILENT) || (sc->sc_tx_busy != 0)) break; usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, G_MODEM_BUFSIZE); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("error=%s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void uhid_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct uhid_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct usb_page_cache *pc; pc = usbd_xfer_get_frame(xfer, 0); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: usb_fifo_put_data(sc->sc_fifo.fp[USB_FIFO_RX], pc, sizeof(req), sc->sc_isize, 1); return; case USB_ST_SETUP: if (usb_fifo_put_bytes_max(sc->sc_fifo.fp[USB_FIFO_RX]) > 0) { uhid_fill_get_report (&req, sc->sc_iface_no, UHID_INPUT_REPORT, sc->sc_iid, sc->sc_isize); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, sc->sc_isize); usbd_xfer_set_frames(xfer, sc->sc_isize ? 2 : 1); usbd_transfer_submit(xfer); } return; default: /* Error */ /* bomb out */ usb_fifo_put_data_error(sc->sc_fifo.fp[USB_FIFO_RX]); return; } }
static void g_modem_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct g_modem_softc *sc = usbd_xfer_softc(xfer); int actlen; int aframes; int mod; int x; int max; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTF("st=%d aframes=%d actlen=%d bytes\n", USB_GET_STATE(xfer), aframes, actlen); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: sc->sc_tx_busy = 0; sc->sc_throughput += actlen; if (sc->sc_mode == G_MODEM_MODE_LOOP) { /* start loop */ usbd_transfer_start(sc->sc_xfer[G_MODEM_BULK_RD]); break; } else if ((sc->sc_mode == G_MODEM_MODE_PATTERN) && (sc->sc_tx_interval != 0)) { /* wait for next timeout */ break; } case USB_ST_SETUP: tr_setup: if (sc->sc_mode == G_MODEM_MODE_PATTERN) { mod = sc->sc_pattern_len; max = sc->sc_tx_interval ? mod : G_MODEM_BUFSIZE; if (mod == 0) { for (x = 0; x != max; x++) sc->sc_data_buf[x] = x % 255; } else { for (x = 0; x != max; x++) sc->sc_data_buf[x] = sc->sc_pattern[x % mod]; } usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, max); usbd_xfer_set_interval(xfer, 0); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); } else if (sc->sc_mode == G_MODEM_MODE_LOOP) { if (sc->sc_tx_busy == 0) break; x = sc->sc_tx_interval; if (x < 0) x = 0; else if (x > 256) x = 256; usbd_xfer_set_frame_data(xfer, 0, sc->sc_data_buf, sc->sc_data_len); usbd_xfer_set_interval(xfer, x); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); } else { sc->sc_tx_busy = 0; } break; default: /* Error */ DPRINTF("error=%s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct usie_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m0; struct mbuf *m = NULL; struct usie_desc *rxd; uint32_t actlen; uint16_t err; uint16_t pkt; uint16_t ipl; uint16_t len; uint16_t diff; uint8_t pad; uint8_t ipv; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%u\n", actlen); if (actlen < sizeof(struct usie_hip)) { DPRINTF("data too short %u\n", actlen); goto tr_setup; } m = sc->sc_rxm; sc->sc_rxm = NULL; /* fall though */ case USB_ST_SETUP: tr_setup: if (sc->sc_rxm == NULL) { sc->sc_rxm = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE /* could be bigger than MCLBYTES */ ); } if (sc->sc_rxm == NULL) { DPRINTF("could not allocate Rx mbuf\n"); ifp->if_ierrors++; usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); } else { /* * Directly loading a mbuf cluster into DMA to * save some data copying. This works because * there is only one cluster. */ usbd_xfer_set_frame_data(xfer, 0, mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX)); usbd_xfer_set_frames(xfer, 1); } usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("USB transfer error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } if (sc->sc_rxm != NULL) { m_freem(sc->sc_rxm); sc->sc_rxm = NULL; } break; } if (m == NULL) return; mtx_unlock(&sc->sc_mtx); m->m_pkthdr.len = m->m_len = actlen; err = pkt = 0; /* HW can aggregate multiple frames in a single USB xfer */ for (;;) { rxd = mtod(m, struct usie_desc *); len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK; pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0; ipl = (len - pad - ETHER_HDR_LEN); if (ipl >= len) { DPRINTF("Corrupt frame\n"); m_freem(m); break; } diff = sizeof(struct usie_desc) + ipl + pad; if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) || (be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) { DPRINTF("received wrong type of packet\n"); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); err++; if (m->m_pkthdr.len > 0) continue; m_freem(m); break; } switch (be16toh(rxd->ethhdr.ether_type)) { case ETHERTYPE_IP: ipv = NETISR_IP; break; #ifdef INET6 case ETHERTYPE_IPV6: ipv = NETISR_IPV6; break; #endif default: DPRINTF("unsupported ether type\n"); err++; break; } /* the last packet */ if (m->m_pkthdr.len <= diff) { m->m_data += (sizeof(struct usie_desc) + pad); m->m_pkthdr.len = m->m_len = ipl; m->m_pkthdr.rcvif = ifp; BPF_MTAP(sc->sc_ifp, m); netisr_dispatch(ipv, m); break; } /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { DPRINTF("could not allocate mbuf\n"); err++; m_freem(m); break; } m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t)); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = m0->m_len = ipl; BPF_MTAP(sc->sc_ifp, m0); netisr_dispatch(ipv, m0); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); } mtx_lock(&sc->sc_mtx); ifp->if_ierrors += err; ifp->if_ipackets += pkt; }
static void cdce_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; uint8_t x; int actlen, aframes, len; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("received %u bytes in %u frames\n", actlen, aframes); for (x = 0; x != aframes; x++) { m = sc->sc_rx_buf[x]; sc->sc_rx_buf[x] = NULL; len = usbd_xfer_frame_len(xfer, x); /* Strip off CRC added by Zaurus, if any */ if ((sc->sc_flags & CDCE_FLAG_ZAURUS) && len >= 14) len -= 4; if (len < sizeof(struct ether_header)) { m_freem(m); continue; } /* queue up mbuf */ uether_rxmbuf(&sc->sc_ue, m, len); } /* FALLTHROUGH */ case USB_ST_SETUP: /* * TODO: Implement support for multi frame transfers, * when the USB hardware supports it. */ for (x = 0; x != 1; x++) { if (sc->sc_rx_buf[x] == NULL) { m = uether_newbuf(); if (m == NULL) goto tr_stall; sc->sc_rx_buf[x] = m; } else { m = sc->sc_rx_buf[x]; } usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len); } /* set number of frames and start hardware */ usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); /* flush any received frames */ uether_rxflush(&sc->sc_ue); break; default: /* Error */ DPRINTF("error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { tr_stall: /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); break; } /* need to free the RX-mbufs when we are cancelled */ cdce_free_queue(sc->sc_rx_buf, CDCE_FRAMES_MAX); break; } }
static void cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct mbuf *m; struct mbuf *mt; uint32_t crc; uint8_t x; int actlen, aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); ifp->if_opackets++; /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != CDCE_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->sc_flags & CDCE_FLAG_ZAURUS) { /* * Zaurus wants a 32-bit CRC appended * to every frame */ crc = cdce_m_crc32(m, 0, m->m_pkthdr.len); crc = htole32(crc); if (!m_append(m, 4, (void *)&crc)) { m_freem(m); ifp->if_oerrors++; continue; } } if (m->m_len != m->m_pkthdr.len) { mt = m_defrag(m, M_DONTWAIT); if (mt == NULL) { m_freem(m); ifp->if_oerrors++; continue; } m = mt; } if (m->m_pkthdr.len > MCLBYTES) { m->m_pkthdr.len = MCLBYTES; } sc->sc_tx_buf[x] = m; usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len); /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* count output errors */ ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void cdce_ncm_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct mbuf *m; int sumdata; int sumlen; int actlen; int aframes; int temp; int nframes; int x; int offset; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: usbd_xfer_status(xfer, &actlen, &sumlen, &aframes, NULL); DPRINTFN(1, "received %u bytes in %u frames\n", actlen, aframes); if (actlen < (sizeof(sc->sc_ncm.hdr) + sizeof(sc->sc_ncm.dpt))) { DPRINTFN(1, "frame too short\n"); goto tr_setup; } usbd_copy_out(pc, 0, &(sc->sc_ncm.hdr), sizeof(sc->sc_ncm.hdr)); if ((sc->sc_ncm.hdr.dwSignature[0] != 'N') || (sc->sc_ncm.hdr.dwSignature[1] != 'C') || (sc->sc_ncm.hdr.dwSignature[2] != 'M') || (sc->sc_ncm.hdr.dwSignature[3] != 'H')) { DPRINTFN(1, "invalid HDR signature: " "0x%02x:0x%02x:0x%02x:0x%02x\n", sc->sc_ncm.hdr.dwSignature[0], sc->sc_ncm.hdr.dwSignature[1], sc->sc_ncm.hdr.dwSignature[2], sc->sc_ncm.hdr.dwSignature[3]); goto tr_stall; } temp = UGETW(sc->sc_ncm.hdr.wBlockLength); if (temp > sumlen) { DPRINTFN(1, "unsupported block length %u/%u\n", temp, sumlen); goto tr_stall; } temp = UGETW(sc->sc_ncm.hdr.wDptIndex); if ((temp + sizeof(sc->sc_ncm.dpt)) > actlen) { DPRINTFN(1, "invalid DPT index: 0x%04x\n", temp); goto tr_stall; } usbd_copy_out(pc, temp, &(sc->sc_ncm.dpt), sizeof(sc->sc_ncm.dpt)); if ((sc->sc_ncm.dpt.dwSignature[0] != 'N') || (sc->sc_ncm.dpt.dwSignature[1] != 'C') || (sc->sc_ncm.dpt.dwSignature[2] != 'M') || (sc->sc_ncm.dpt.dwSignature[3] != '0')) { DPRINTFN(1, "invalid DPT signature" "0x%02x:0x%02x:0x%02x:0x%02x\n", sc->sc_ncm.dpt.dwSignature[0], sc->sc_ncm.dpt.dwSignature[1], sc->sc_ncm.dpt.dwSignature[2], sc->sc_ncm.dpt.dwSignature[3]); goto tr_stall; } nframes = UGETW(sc->sc_ncm.dpt.wLength) / 4; /* Subtract size of header and last zero padded entry */ if (nframes >= (2 + 1)) nframes -= (2 + 1); else nframes = 0; DPRINTFN(1, "nframes = %u\n", nframes); temp += sizeof(sc->sc_ncm.dpt); if ((temp + (4 * nframes)) > actlen) goto tr_stall; if (nframes > CDCE_NCM_SUBFRAMES_MAX) { DPRINTFN(1, "Truncating number of frames from %u to %u\n", nframes, CDCE_NCM_SUBFRAMES_MAX); nframes = CDCE_NCM_SUBFRAMES_MAX; } usbd_copy_out(pc, temp, &(sc->sc_ncm.dp), (4 * nframes)); sumdata = 0; for (x = 0; x != nframes; x++) { offset = UGETW(sc->sc_ncm.dp[x].wFrameIndex); temp = UGETW(sc->sc_ncm.dp[x].wFrameLength); if ((offset == 0) || (temp < sizeof(struct ether_header)) || (temp > (MCLBYTES - ETHER_ALIGN))) { DPRINTFN(1, "NULL frame detected at %d\n", x); m = NULL; /* silently ignore this frame */ continue; } else if ((offset + temp) > actlen) { DPRINTFN(1, "invalid frame " "detected at %d\n", x); m = NULL; /* silently ignore this frame */ continue; } else if (temp > (MHLEN - ETHER_ALIGN)) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); } else { m = m_gethdr(M_DONTWAIT, MT_DATA); } DPRINTFN(16, "frame %u, offset = %u, length = %u \n", x, offset, temp); /* check if we have a buffer */ if (m) { m_adj(m, ETHER_ALIGN); usbd_copy_out(pc, offset, m->m_data, temp); /* enqueue */ uether_rxmbuf(&sc->sc_ue, m, temp); sumdata += temp; } else { ifp->if_ierrors++; } } DPRINTFN(1, "Efficiency: %u/%u bytes\n", sumdata, actlen); case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, sc->sc_ncm.rx_max); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); uether_rxflush(&sc->sc_ue); /* must be last */ break; default: /* Error */ DPRINTFN(1, "error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { tr_stall: /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); } break; } }
static void usbd_non_isoc_callback(struct usb_xfer *xfer, usb_error_t error) { irp *ip; struct ndis_softc *sc = usbd_xfer_softc(xfer); struct ndisusb_ep *ne = usbd_xfer_get_priv(xfer); struct ndisusb_xfer *nx; struct usbd_urb_bulk_or_intr_transfer *ubi; struct usb_page_cache *pc; uint8_t irql; uint32_t len; union usbd_urb *urb; usb_endpoint_descriptor_t *ep; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: nx = usbd_aq_getfirst(sc, ne); pc = usbd_xfer_get_frame(xfer, 0); if (nx == NULL) return; /* copy in data with regard to the URB */ if (ne->ne_dirin != 0) usbd_copy_out(pc, 0, nx->nx_urbbuf, actlen); nx->nx_urbbuf += actlen; nx->nx_urbactlen += actlen; nx->nx_urblen -= actlen; /* check for short transfer */ if (actlen < sumlen) nx->nx_urblen = 0; else { /* check remainder */ if (nx->nx_urblen > 0) { KeAcquireSpinLock(&ne->ne_lock, &irql); InsertHeadList((&ne->ne_active), (&nx->nx_next)); KeReleaseSpinLock(&ne->ne_lock, irql); ip = nx->nx_priv; urb = usbd_geturb(ip); ubi = &urb->uu_bulkintr; ep = ubi->ubi_epdesc; goto extra; } } usbd_xfer_complete(sc, ne, nx, ((actlen < sumlen) && (nx->nx_shortxfer == 0)) ? USB_ERR_SHORT_XFER : USB_ERR_NORMAL_COMPLETION); /* fall through */ case USB_ST_SETUP: next: /* get next transfer */ KeAcquireSpinLock(&ne->ne_lock, &irql); if (IsListEmpty(&ne->ne_pending)) { KeReleaseSpinLock(&ne->ne_lock, irql); return; } nx = CONTAINING_RECORD(ne->ne_pending.nle_flink, struct ndisusb_xfer, nx_next); RemoveEntryList(&nx->nx_next); /* add a entry to the active queue's tail. */ InsertTailList((&ne->ne_active), (&nx->nx_next)); KeReleaseSpinLock(&ne->ne_lock, irql); ip = nx->nx_priv; urb = usbd_geturb(ip); ubi = &urb->uu_bulkintr; ep = ubi->ubi_epdesc; nx->nx_urbbuf = ubi->ubi_trans_buf; nx->nx_urbactlen = 0; nx->nx_urblen = ubi->ubi_trans_buflen; nx->nx_shortxfer = (ubi->ubi_trans_flags & USBD_SHORT_TRANSFER_OK) ? 1 : 0; extra: len = MIN(usbd_xfer_max_len(xfer), nx->nx_urblen); pc = usbd_xfer_get_frame(xfer, 0); if (UE_GET_DIR(ep->bEndpointAddress) == UE_DIR_OUT) usbd_copy_in(pc, 0, nx->nx_urbbuf, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); break; default: nx = usbd_aq_getfirst(sc, ne); if (nx == NULL) return; if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); device_printf(sc->ndis_dev, "usb xfer warning (%s)\n", usbd_errstr(error)); } usbd_xfer_complete(sc, ne, nx, error); if (error != USB_ERR_CANCELLED) goto next; break; } }
static void ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct ipheth_softc *sc = usbd_xfer_softc(xfer); struct mbuf *m; uint8_t x; int actlen; int aframes; int len; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("received %u bytes in %u frames\n", actlen, aframes); for (x = 0; x != aframes; x++) { m = sc->sc_rx_buf[x]; sc->sc_rx_buf[x] = NULL; len = usbd_xfer_frame_len(xfer, x); if (len < (sizeof(struct ether_header) + IPHETH_RX_ADJ)) { m_freem(m); continue; } m_adj(m, IPHETH_RX_ADJ); /* queue up mbuf */ uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ); } /* FALLTHROUGH */ case USB_ST_SETUP: for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) { if (sc->sc_rx_buf[x] == NULL) { m = uether_newbuf(); if (m == NULL) goto tr_stall; /* cancel alignment for ethernet */ m_adj(m, ETHER_ALIGN); sc->sc_rx_buf[x] = m; } else { m = sc->sc_rx_buf[x]; } usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len); } /* set number of frames and start hardware */ usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); /* flush any received frames */ uether_rxflush(&sc->sc_ue); break; default: /* Error */ DPRINTF("error = %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { tr_stall: /* try to clear stall first */ usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); usbd_transfer_submit(xfer); break; } /* need to free the RX-mbufs when we are cancelled */ ipheth_free_queue(sc->sc_rx_buf, IPHETH_RX_FRAMES_MAX); break; } }
static void ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ipheth_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; uint8_t x; int actlen; int aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); ifp->if_opackets++; /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != IPHETH_TX_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, x * IPHETH_BUF_SIZE, x); pc = usbd_xfer_get_frame(xfer, x); sc->sc_tx_buf[x] = m; if (m->m_pkthdr.len > IPHETH_BUF_SIZE) m->m_pkthdr.len = IPHETH_BUF_SIZE; usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, x, IPHETH_BUF_SIZE); if (IPHETH_BUF_SIZE != m->m_pkthdr.len) { usbd_frame_zero(pc, m->m_pkthdr.len, IPHETH_BUF_SIZE - m->m_pkthdr.len); } /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* count output errors */ ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void ubt_ctrl_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ubt_softc *sc = usbd_xfer_softc(xfer); struct usb_device_request req; struct mbuf *m; struct usb_page_cache *pc; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: UBT_INFO(sc, "sent %d bytes to control pipe\n", actlen); UBT_STAT_BYTES_SENT(sc, actlen); UBT_STAT_PCKTS_SENT(sc); /* FALLTHROUGH */ case USB_ST_SETUP: send_next: /* Get next command mbuf, if any */ UBT_NG_LOCK(sc); NG_BT_MBUFQ_DEQUEUE(&sc->sc_cmdq, m); UBT_NG_UNLOCK(sc); if (m == NULL) { UBT_INFO(sc, "HCI command queue is empty\n"); break; /* transfer complete */ } /* Initialize a USB control request and then schedule it */ bzero(&req, sizeof(req)); req.bmRequestType = UBT_HCI_REQUEST; USETW(req.wLength, m->m_pkthdr.len); UBT_INFO(sc, "Sending control request, " \ "bmRequestType=0x%02x, wLength=%d\n", req.bmRequestType, UGETW(req.wLength)); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); pc = usbd_xfer_get_frame(xfer, 1); usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frame_len(xfer, 1, m->m_pkthdr.len); usbd_xfer_set_frames(xfer, 2); NG_FREE_M(m); usbd_transfer_submit(xfer); break; default: /* Error */ if (error != USB_ERR_CANCELLED) { UBT_WARN(sc, "control transfer failed: %s\n", usbd_errstr(error)); UBT_STAT_OERROR(sc); goto send_next; } /* transfer cancelled */ break; } } /* ubt_ctrl_write_callback */
static void usbd_ctrl_callback(struct usb_xfer *xfer, usb_error_t error) { irp *ip; struct ndis_softc *sc = usbd_xfer_softc(xfer); struct ndisusb_ep *ne = usbd_xfer_get_priv(xfer); struct ndisusb_xfer *nx; uint8_t irql; union usbd_urb *urb; struct usbd_urb_vendor_or_class_request *vcreq; struct usb_page_cache *pc; uint8_t type = 0; struct usb_device_request req; int len; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: nx = usbd_aq_getfirst(sc, ne); if (nx == NULL) return; ip = nx->nx_priv; urb = usbd_geturb(ip); vcreq = &urb->uu_vcreq; if (vcreq->uvc_trans_flags & USBD_TRANSFER_DIRECTION_IN) { pc = usbd_xfer_get_frame(xfer, 1); len = usbd_xfer_frame_len(xfer, 1); usbd_copy_out(pc, 0, vcreq->uvc_trans_buf, len); nx->nx_urbactlen += len; } usbd_xfer_complete(sc, ne, nx, USB_ERR_NORMAL_COMPLETION); /* fall through */ case USB_ST_SETUP: next: /* get next transfer */ KeAcquireSpinLock(&ne->ne_lock, &irql); if (IsListEmpty(&ne->ne_pending)) { KeReleaseSpinLock(&ne->ne_lock, irql); return; } nx = CONTAINING_RECORD(ne->ne_pending.nle_flink, struct ndisusb_xfer, nx_next); RemoveEntryList(&nx->nx_next); /* add a entry to the active queue's tail. */ InsertTailList((&ne->ne_active), (&nx->nx_next)); KeReleaseSpinLock(&ne->ne_lock, irql); ip = nx->nx_priv; urb = usbd_geturb(ip); vcreq = &urb->uu_vcreq; switch (urb->uu_hdr.uuh_func) { case URB_FUNCTION_CLASS_DEVICE: type = UT_CLASS | UT_DEVICE; break; case URB_FUNCTION_CLASS_INTERFACE: type = UT_CLASS | UT_INTERFACE; break; case URB_FUNCTION_CLASS_OTHER: type = UT_CLASS | UT_OTHER; break; case URB_FUNCTION_CLASS_ENDPOINT: type = UT_CLASS | UT_ENDPOINT; break; case URB_FUNCTION_VENDOR_DEVICE: type = UT_VENDOR | UT_DEVICE; break; case URB_FUNCTION_VENDOR_INTERFACE: type = UT_VENDOR | UT_INTERFACE; break; case URB_FUNCTION_VENDOR_OTHER: type = UT_VENDOR | UT_OTHER; break; case URB_FUNCTION_VENDOR_ENDPOINT: type = UT_VENDOR | UT_ENDPOINT; break; default: /* never reached. */ break; } type |= (vcreq->uvc_trans_flags & USBD_TRANSFER_DIRECTION_IN) ? UT_READ : UT_WRITE; type |= vcreq->uvc_reserved1; req.bmRequestType = type; req.bRequest = vcreq->uvc_req; USETW(req.wIndex, vcreq->uvc_idx); USETW(req.wValue, vcreq->uvc_value); USETW(req.wLength, vcreq->uvc_trans_buflen); nx->nx_urbbuf = vcreq->uvc_trans_buf; nx->nx_urblen = vcreq->uvc_trans_buflen; nx->nx_urbactlen = 0; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frames(xfer, 1); if (vcreq->uvc_trans_flags & USBD_TRANSFER_DIRECTION_IN) { if (vcreq->uvc_trans_buflen >= USBD_CTRL_READ_BUFFER_SP) device_printf(sc->ndis_dev, "warning: not enough buffer space (%d).\n", vcreq->uvc_trans_buflen); usbd_xfer_set_frame_len(xfer, 1, MIN(usbd_xfer_max_len(xfer), vcreq->uvc_trans_buflen)); usbd_xfer_set_frames(xfer, 2); } else { if (nx->nx_urblen > USBD_CTRL_WRITE_BUFFER_SP) device_printf(sc->ndis_dev, "warning: not enough write buffer space" " (%d).\n", nx->nx_urblen); /* * XXX with my local tests there was no cases to require * a extra buffer until now but it'd need to update in * the future if it needs to be. */ if (nx->nx_urblen > 0) { pc = usbd_xfer_get_frame(xfer, 1); usbd_copy_in(pc, 0, nx->nx_urbbuf, nx->nx_urblen); usbd_xfer_set_frame_len(xfer, 1, nx->nx_urblen); usbd_xfer_set_frames(xfer, 2); } } usbd_transfer_submit(xfer); break; default: nx = usbd_aq_getfirst(sc, ne); if (nx == NULL) return; if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); device_printf(sc->ndis_dev, "usb xfer warning (%s)\n", usbd_errstr(error)); } usbd_xfer_complete(sc, ne, nx, error); if (error != USB_ERR_CANCELLED) goto next; break; } }
static void cdce_intr_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct usb_cdc_notification req; struct usb_page_cache *pc; uint32_t speed; int actlen; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTF("Transferred %d bytes\n", actlen); switch (sc->sc_notify_state) { case CDCE_NOTIFY_NETWORK_CONNECTION: sc->sc_notify_state = CDCE_NOTIFY_SPEED_CHANGE; break; case CDCE_NOTIFY_SPEED_CHANGE: sc->sc_notify_state = CDCE_NOTIFY_DONE; break; default: break; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: /* * Inform host about connection. Required according to USB CDC * specification and communicating to Mac OS X USB host stack. * Some of the values seems ignored by Mac OS X though. */ if (sc->sc_notify_state == CDCE_NOTIFY_NETWORK_CONNECTION) { req.bmRequestType = UCDC_NOTIFICATION; req.bNotification = UCDC_N_NETWORK_CONNECTION; req.wIndex[0] = sc->sc_ifaces_index[1]; req.wIndex[1] = 0; USETW(req.wValue, 1); /* Connected */ USETW(req.wLength, 0); pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); } else if (sc->sc_notify_state == CDCE_NOTIFY_SPEED_CHANGE) { req.bmRequestType = UCDC_NOTIFICATION; req.bNotification = UCDC_N_CONNECTION_SPEED_CHANGE; req.wIndex[0] = sc->sc_ifaces_index[1]; req.wIndex[1] = 0; USETW(req.wValue, 0); USETW(req.wLength, 8); /* Peak theoretical bulk trasfer rate in bits/s */ if (usbd_get_speed(sc->sc_ue.ue_udev) != USB_SPEED_FULL) speed = (13 * 512 * 8 * 1000 * 8); else speed = (19 * 64 * 1 * 1000 * 8); USETDW(req.data + 0, speed); /* Upstream bit rate */ USETDW(req.data + 4, speed); /* Downstream bit rate */ pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &req, sizeof(req)); usbd_xfer_set_frame_len(xfer, 0, sizeof(req)); usbd_xfer_set_frames(xfer, 1); usbd_transfer_submit(xfer); } break; default: /* Error */ if (error != USB_ERR_CANCELLED) { if (usbd_get_mode(sc->sc_ue.ue_udev) == USB_MODE_HOST) { /* start clear stall */ usbd_xfer_set_stall(xfer); } goto tr_setup; } break; } }