/*------------------------------------------------------------------------* * usb_pc_common_mem_cb - BUS-DMA callback function *------------------------------------------------------------------------*/ static void usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error, uint8_t isload) { struct usb_dma_parent_tag *uptag; struct usb_page_cache *pc; struct usb_page *pg; usb_size_t rem; uint8_t owned; pc = arg; uptag = pc->tag_parent; /* * XXX There is sometimes recursive locking here. * XXX We should try to find a better solution. * XXX Until further the "owned" variable does * XXX the trick. */ if (error) { goto done; } pg = pc->page_start; pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); rem = segs->ds_addr & (USB_PAGE_SIZE - 1); pc->page_offset_buf = rem; pc->page_offset_end += rem; nseg--; #ifdef USB_DEBUG if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { /* * This check verifies that the physical address is correct: */ DPRINTFN(0, "Page offset was not preserved\n"); error = 1; goto done; } #endif while (nseg > 0) { nseg--; segs++; pg++; pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); } done: owned = mtx_owned(uptag->mtx); if (!owned) mtx_lock(uptag->mtx); uptag->dma_error = (error ? 1 : 0); if (isload) { (uptag->func) (uptag); } else { cv_broadcast(uptag->cv); } if (!owned) mtx_unlock(uptag->mtx); }
static void ugen_default_fs_callback(struct usb_xfer *xfer, usb_error_t error) { ; /* workaround for a bug in "indent" */ DPRINTF("st=%u alen=%u aframes=%u\n", USB_GET_STATE(xfer), xfer->actlen, xfer->aframes); switch (USB_GET_STATE(xfer)) { case USB_ST_SETUP: usbd_transfer_submit(xfer); break; default: ugen_fs_set_complete(xfer->priv_sc, USB_P2U(xfer->priv_fifo)); break; } }
/*------------------------------------------------------------------------* * usb_pc_buffer_is_aligned - verify alignment * * This function is used to check if a page cache buffer is properly * aligned to reduce the use of bounce buffers in PIO mode. *------------------------------------------------------------------------*/ uint8_t usb_pc_buffer_is_aligned(struct usb_page_cache *pc, usb_frlength_t offset, usb_frlength_t len, usb_frlength_t mask) { struct usb_page_search buf_res; while (len != 0) { usbd_get_page(pc, offset, &buf_res); if (buf_res.length > len) buf_res.length = len; if (USB_P2U(buf_res.buffer) & mask) return (0); if (buf_res.length & mask) return (0); offset += buf_res.length; len -= buf_res.length; } return (1); }
static int ugen_ioctl(struct usb_fifo *f, u_long cmd, void *addr, int fflags) { struct usb_config usb_config[1]; struct usb_device_request req; union { struct usb_fs_complete *pcomp; struct usb_fs_start *pstart; struct usb_fs_stop *pstop; struct usb_fs_open *popen; struct usb_fs_open_stream *popen_stream; struct usb_fs_close *pclose; struct usb_fs_clear_stall_sync *pstall; void *addr; } u; struct usb_endpoint *ep; struct usb_endpoint_descriptor *ed; struct usb_xfer *xfer; int error = 0; uint8_t iface_index; uint8_t isread; uint8_t ep_index; uint8_t pre_scale; u.addr = addr; DPRINTFN(6, "cmd=0x%08lx\n", cmd); switch (cmd) { case USB_FS_COMPLETE: mtx_lock(f->priv_mtx); error = ugen_fs_get_complete(f, &ep_index); mtx_unlock(f->priv_mtx); if (error) { error = EBUSY; break; } u.pcomp->ep_index = ep_index; error = ugen_fs_copy_out(f, u.pcomp->ep_index); break; case USB_FS_START: error = ugen_fs_copy_in(f, u.pstart->ep_index); if (error) break; mtx_lock(f->priv_mtx); xfer = f->fs_xfer[u.pstart->ep_index]; usbd_transfer_start(xfer); mtx_unlock(f->priv_mtx); break; case USB_FS_STOP: if (u.pstop->ep_index >= f->fs_ep_max) { error = EINVAL; break; } mtx_lock(f->priv_mtx); xfer = f->fs_xfer[u.pstart->ep_index]; if (usbd_transfer_pending(xfer)) { usbd_transfer_stop(xfer); /* * Check if the USB transfer was stopped * before it was even started. Else a cancel * callback will be pending. */ if (!xfer->flags_int.transferring) { ugen_fs_set_complete(xfer->priv_sc, USB_P2U(xfer->priv_fifo)); } } mtx_unlock(f->priv_mtx); break; case USB_FS_OPEN: case USB_FS_OPEN_STREAM: if (u.popen->ep_index >= f->fs_ep_max) { error = EINVAL; break; } if (f->fs_xfer[u.popen->ep_index] != NULL) { error = EBUSY; break; } if (u.popen->max_bufsize > USB_FS_MAX_BUFSIZE) { u.popen->max_bufsize = USB_FS_MAX_BUFSIZE; } if (u.popen->max_frames & USB_FS_MAX_FRAMES_PRE_SCALE) { pre_scale = 1; u.popen->max_frames &= ~USB_FS_MAX_FRAMES_PRE_SCALE; } else { pre_scale = 0; } if (u.popen->max_frames > USB_FS_MAX_FRAMES) { u.popen->max_frames = USB_FS_MAX_FRAMES; break; } if (u.popen->max_frames == 0) { error = EINVAL; break; } ep = usbd_get_ep_by_addr(f->udev, u.popen->ep_no); if (ep == NULL) { error = EINVAL; break; } ed = ep->edesc; if (ed == NULL) { error = ENXIO; break; } iface_index = ep->iface_index; memset(usb_config, 0, sizeof(usb_config)); usb_config[0].type = ed->bmAttributes & UE_XFERTYPE; usb_config[0].endpoint = ed->bEndpointAddress & UE_ADDR; usb_config[0].direction = ed->bEndpointAddress & (UE_DIR_OUT | UE_DIR_IN); usb_config[0].interval = USB_DEFAULT_INTERVAL; usb_config[0].flags.proxy_buffer = 1; if (pre_scale != 0) usb_config[0].flags.pre_scale_frames = 1; usb_config[0].callback = &ugen_ctrl_fs_callback; usb_config[0].timeout = 0; /* no timeout */ usb_config[0].frames = u.popen->max_frames; usb_config[0].bufsize = u.popen->max_bufsize; usb_config[0].usb_mode = USB_MODE_DUAL; /* both modes */ if (cmd == USB_FS_OPEN_STREAM) usb_config[0].stream_id = u.popen_stream->stream_id; if (usb_config[0].type == UE_CONTROL) { if (f->udev->flags.usb_mode != USB_MODE_HOST) { error = EINVAL; break; } } else { isread = ((usb_config[0].endpoint & (UE_DIR_IN | UE_DIR_OUT)) == UE_DIR_IN); if (f->udev->flags.usb_mode != USB_MODE_HOST) { isread = !isread; } /* check permissions */ if (isread) { if (!(fflags & FREAD)) { error = EPERM; break; } } else { if (!(fflags & FWRITE)) { error = EPERM; break; } } } error = usbd_transfer_setup(f->udev, &iface_index, f->fs_xfer + u.popen->ep_index, usb_config, 1, f, f->priv_mtx); if (error == 0) { /* update maximums */ u.popen->max_packet_length = f->fs_xfer[u.popen->ep_index]->max_frame_size; u.popen->max_bufsize = f->fs_xfer[u.popen->ep_index]->max_data_length; /* update number of frames */ u.popen->max_frames = f->fs_xfer[u.popen->ep_index]->nframes; /* store index of endpoint */ f->fs_xfer[u.popen->ep_index]->priv_fifo = ((uint8_t *)0) + u.popen->ep_index; } else { error = ENOMEM; } break; case USB_FS_CLOSE: if (u.pclose->ep_index >= f->fs_ep_max) { error = EINVAL; break; } if (f->fs_xfer[u.pclose->ep_index] == NULL) { error = EINVAL; break; } usbd_transfer_unsetup(f->fs_xfer + u.pclose->ep_index, 1); break; case USB_FS_CLEAR_STALL_SYNC: if (u.pstall->ep_index >= f->fs_ep_max) { error = EINVAL; break; } if (f->fs_xfer[u.pstall->ep_index] == NULL) { error = EINVAL; break; } if (f->udev->flags.usb_mode != USB_MODE_HOST) { error = EINVAL; break; } mtx_lock(f->priv_mtx); error = usbd_transfer_pending(f->fs_xfer[u.pstall->ep_index]); mtx_unlock(f->priv_mtx); if (error) { return (EBUSY); } ep = f->fs_xfer[u.pstall->ep_index]->endpoint; /* setup a clear-stall packet */ req.bmRequestType = UT_WRITE_ENDPOINT; req.bRequest = UR_CLEAR_FEATURE; USETW(req.wValue, UF_ENDPOINT_HALT); req.wIndex[0] = ep->edesc->bEndpointAddress; req.wIndex[1] = 0; USETW(req.wLength, 0); error = usbd_do_request(f->udev, NULL, &req, NULL); if (error == 0) { usbd_clear_data_toggle(f->udev, ep); } else { error = ENXIO; } break; default: error = ENOIOCTL; break; } DPRINTFN(6, "error=%d\n", error); return (error); }