/*------------------------------------------------------------------------* * usb_proc_mwait * * This function will return when the USB process message pointed to * by "pm" is no longer on a queue. This function must be called * having "up->up_mtx" locked. *------------------------------------------------------------------------*/ void usb_proc_mwait(struct usb_process *up, void *_pm0, void *_pm1) { struct usb_proc_msg *pm0 = _pm0; struct usb_proc_msg *pm1 = _pm1; /* check if gone */ if (up->up_gone) return; KKASSERT(lockowned(up->up_lock)); if (up->up_curtd == curthread) { /* Just remove the messages from the queue. */ if (pm0->pm_qentry.tqe_prev) { TAILQ_REMOVE(&up->up_qhead, pm0, pm_qentry); pm0->pm_qentry.tqe_prev = NULL; } if (pm1->pm_qentry.tqe_prev) { TAILQ_REMOVE(&up->up_qhead, pm1, pm_qentry); pm1->pm_qentry.tqe_prev = NULL; } } else while (pm0->pm_qentry.tqe_prev || pm1->pm_qentry.tqe_prev) { /* check if config thread is gone */ if (up->up_gone) break; up->up_dsleep = 1; cv_wait(&up->up_drain, up->up_lock); } }
/*------------------------------------------------------------------------* * usb_pc_common_mem_cb - BUS-DMA callback function *------------------------------------------------------------------------*/ static void usb_pc_common_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error, uint8_t isload) { struct usb_dma_parent_tag *uptag; struct usb_page_cache *pc; struct usb_page *pg; usb_size_t rem; uint8_t owned; pc = arg; uptag = pc->tag_parent; /* * XXX There is sometimes recursive locking here. * XXX We should try to find a better solution. * XXX Until further the "owned" variable does * XXX the trick. */ if (error) { goto done; } pg = pc->page_start; pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); rem = segs->ds_addr & (USB_PAGE_SIZE - 1); pc->page_offset_buf = rem; pc->page_offset_end += rem; nseg--; #ifdef USB_DEBUG if (rem != (USB_P2U(pc->buffer) & (USB_PAGE_SIZE - 1))) { /* * This check verifies that the physical address is correct: */ DPRINTFN(0, "Page offset was not preserved\n"); error = 1; goto done; } #endif while (nseg > 0) { nseg--; segs++; pg++; pg->physaddr = segs->ds_addr & ~(USB_PAGE_SIZE - 1); } done: owned = lockowned(uptag->lock); if (!owned) lockmgr(uptag->lock, LK_EXCLUSIVE); uptag->dma_error = (error ? 1 : 0); if (isload) { (uptag->func) (uptag); } else { cv_broadcast(uptag->cv); } if (!owned) lockmgr(uptag->lock, LK_RELEASE); }
/*------------------------------------------------------------------------* * usb_proc_is_gone * * Return values: * 0: USB process is running * Else: USB process is tearing down *------------------------------------------------------------------------*/ uint8_t usb_proc_is_gone(struct usb_process *up) { if (up->up_gone) return (1); /* * Allow calls when up_mtx is NULL, before the USB process * structure is initialised. */ if (up->up_lock != NULL) KKASSERT(lockowned(up->up_lock)); return (0); }
/*------------------------------------------------------------------------* * usb_proc_drain * * This function will tear down an USB process, waiting for the * currently executing command to return. * * NOTE: If the structure pointed to by "up" is all zero, * this function does nothing. *------------------------------------------------------------------------*/ void usb_proc_drain(struct usb_process *up) { /* check if not initialised */ if (up->up_lock == NULL) return; #if 0 /* XXX */ /* handle special case with Giant */ if (up->up_mtx != &Giant) mtx_assert(up->up_mtx, MA_NOTOWNED); #else KKASSERT(!lockowned(up->up_lock)); lockmgr(up->up_lock, LK_EXCLUSIVE); #endif /* Set the gone flag */ up->up_gone = 1; while (up->up_ptr) { /* Check if we need to wakeup the USB process */ if (up->up_msleep || up->up_csleep) { up->up_msleep = 0; up->up_csleep = 0; cv_signal(&up->up_cv); } /* Check if we are still cold booted */ if (cold) { USB_THREAD_SUSPEND(up->up_ptr); kprintf("WARNING: A USB process has " "been left suspended\n"); break; } cv_wait(&up->up_cv, up->up_lock); } /* Check if someone is waiting - should not happen */ if (up->up_dsleep) { up->up_dsleep = 0; cv_broadcast(&up->up_drain); DPRINTF("WARNING: Someone is waiting " "for USB process drain!\n"); } lockmgr(up->up_lock, LK_RELEASE); }
/*------------------------------------------------------------------------* * usb_bdma_done_event * * This function is called when the BUS-DMA has loaded virtual memory * into DMA, if any. *------------------------------------------------------------------------*/ void usb_bdma_done_event(struct usb_dma_parent_tag *udpt) { struct usb_xfer_root *info; info = USB_DMATAG_TO_XROOT(udpt); KKASSERT(lockowned(info->xfer_lock)); /* copy error */ info->dma_error = udpt->dma_error; /* enter workloop again */ usb_command_wrapper(&info->dma_q, info->dma_q.curr); }
/*------------------------------------------------------------------------* * usb_proc_rewakeup * * This function is called to re-wakeup the given USB * process. This usually happens after that the USB system has been in * polling mode, like during a panic. This function must be called * having "up->up_lock" locked. *------------------------------------------------------------------------*/ void usb_proc_rewakeup(struct usb_process *up) { /* check if not initialised */ if (up->up_lock == NULL) return; /* check if gone */ if (up->up_gone) return; KKASSERT(lockowned(up->up_lock)); if (up->up_msleep == 0) { /* re-wakeup */ cv_signal(&up->up_cv); } }
static int ua_mixer_set(struct snd_mixer *m, unsigned type, unsigned left, unsigned right) { struct lock *lock = mixer_get_lock(m); uint8_t do_unlock; if (lockowned(lock)) { do_unlock = 0; } else { do_unlock = 1; lockmgr(lock, LK_EXCLUSIVE); } uaudio_mixer_set(mix_getdevinfo(m), type, left, right); if (do_unlock) { lockmgr(lock, LK_RELEASE); } return (left | (right << 8)); }
static uint32_t ua_mixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct lock *lock = mixer_get_lock(m); int retval; uint8_t do_unlock; if (lockowned(lock)) { do_unlock = 0; } else { do_unlock = 1; lockmgr(lock, LK_EXCLUSIVE); } retval = uaudio_mixer_setrecsrc(mix_getdevinfo(m), src); if (do_unlock) { lockmgr(lock, LK_RELEASE); } return (retval); }
static void ubt_task_schedule(ubt_softc_p sc, int action) { KKASSERT(lockowned(&sc->sc_ng_lock) != 0); /* * Try to handle corner case when "start all" and "stop all" * actions can both be set before task is executed. * * The rules are * * sc_task_flags action new sc_task_flags * ------------------------------------------------------ * 0 start start * 0 stop stop * start start start * start stop stop * stop start stop|start * stop stop stop * stop|start start stop|start * stop|start stop stop */ if (action != 0) { if ((action & UBT_FLAG_T_STOP_ALL) != 0) sc->sc_task_flags &= ~UBT_FLAG_T_START_ALL; sc->sc_task_flags |= action; } if (sc->sc_task_flags & UBT_FLAG_T_PENDING) return; if (taskqueue_enqueue(taskqueue_swi, &sc->sc_task) == 0) { sc->sc_task_flags |= UBT_FLAG_T_PENDING; return; } /* XXX: i think this should never happen */ } /* ubt_task_schedule */
static void ue_attach_post_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; struct ifnet *ifp = uether_getifp(ue); int error; char num[14]; /* sufficient for 32 bits */ /* first call driver's post attach routine */ ue->ue_methods->ue_attach_post(ue); UE_UNLOCK(ue); KKASSERT(!lockowned(ue->ue_lock)); ue->ue_unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(ue), 0); usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_lock, 0); sysctl_ctx_init(&ue->ue_sysctl_ctx); KKASSERT(!lockowned(ue->ue_lock)); error = 0; ifp->if_softc = ue; if_initname(ifp, "ue", ue->ue_unit); if (ue->ue_methods->ue_attach_post_sub != NULL) { error = ue->ue_methods->ue_attach_post_sub(ue); KKASSERT(!lockowned(ue->ue_lock)); } else { ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; if (ue->ue_methods->ue_ioctl != NULL) ifp->if_ioctl = ue->ue_methods->ue_ioctl; else ifp->if_ioctl = uether_ioctl; ifp->if_start = ue_start; ifp->if_init = ue_init; ifq_set_maxlen(&ifp->if_snd, ifqmaxlen); ifq_set_ready(&ifp->if_snd); if (ue->ue_methods->ue_mii_upd != NULL && ue->ue_methods->ue_mii_sts != NULL) { error = mii_phy_probe(ue->ue_dev, &ue->ue_miibus, ue_ifmedia_upd, ue->ue_methods->ue_mii_sts); } } if (error) { device_printf(ue->ue_dev, "attaching PHYs failed\n"); goto fail; } if_printf(ifp, "<USB Ethernet> on %s\n", device_get_nameunit(ue->ue_dev)); ether_ifattach(ifp, ue->ue_eaddr, NULL); /* Tell upper layer we support VLAN oversized frames. */ if (ifp->if_capabilities & IFCAP_VLAN_MTU) ifp->if_hdrlen = sizeof(struct ether_vlan_header); ksnprintf(num, sizeof(num), "%u", ue->ue_unit); ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx, &SYSCTL_NODE_CHILDREN(_net, ue), OID_AUTO, num, CTLFLAG_RD, NULL, ""); SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx, SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD, ue, 0, ue_sysctl_parent, "A", "parent device"); KKASSERT(!lockowned(ue->ue_lock)); UE_LOCK(ue); return; fail: devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(ue), ue->ue_unit); UE_LOCK(ue); return; }
/*------------------------------------------------------------------------* * usb_proc_msignal * * This function will queue one of the passed USB process messages on * the USB process queue. The first message that is not already queued * will get queued. If both messages are already queued the one queued * last will be removed from the queue and queued in the end. The USB * process mutex must be locked when calling this function. This * function exploits the fact that a process can only do one callback * at a time. The message that was queued is returned. *------------------------------------------------------------------------*/ void * usb_proc_msignal(struct usb_process *up, void *_pm0, void *_pm1) { struct usb_proc_msg *pm0 = _pm0; struct usb_proc_msg *pm1 = _pm1; struct usb_proc_msg *pm2; usb_size_t d; uint8_t t; /* check if gone, return dummy value */ if (up->up_gone) return (_pm0); KKASSERT(lockowned(up->up_lock)); t = 0; if (pm0->pm_qentry.tqe_prev) { t |= 1; } if (pm1->pm_qentry.tqe_prev) { t |= 2; } if (t == 0) { /* * No entries are queued. Queue "pm0" and use the existing * message number. */ pm2 = pm0; } else if (t == 1) { /* Check if we need to increment the message number. */ if (pm0->pm_num == up->up_msg_num) { up->up_msg_num++; } pm2 = pm1; } else if (t == 2) { /* Check if we need to increment the message number. */ if (pm1->pm_num == up->up_msg_num) { up->up_msg_num++; } pm2 = pm0; } else if (t == 3) { /* * Both entries are queued. Re-queue the entry closest to * the end. */ d = (pm1->pm_num - pm0->pm_num); /* Check sign after subtraction */ if (d & 0x80000000) { pm2 = pm0; } else { pm2 = pm1; } TAILQ_REMOVE(&up->up_qhead, pm2, pm_qentry); } else { pm2 = NULL; /* panic - should not happen */ } DPRINTF(" t=%u, num=%u\n", t, up->up_msg_num); /* Put message last on queue */ pm2->pm_num = up->up_msg_num; TAILQ_INSERT_TAIL(&up->up_qhead, pm2, pm_qentry); /* Check if we need to wakeup the USB process. */ if (up->up_msleep) { up->up_msleep = 0; /* save "cv_signal()" calls */ cv_signal(&up->up_cv); } return (pm2); }
/*------------------------------------------------------------------------* * usb_bdma_work_loop * * This function handles loading of virtual buffers into DMA and is * only called when "dma_refcount" is zero. *------------------------------------------------------------------------*/ void usb_bdma_work_loop(struct usb_xfer_queue *pq) { struct usb_xfer_root *info; struct usb_xfer *xfer; usb_frcount_t nframes; xfer = pq->curr; info = xfer->xroot; KKASSERT(lockowned(info->xfer_lock)); if (xfer->error) { /* some error happened */ USB_BUS_LOCK(info->bus); usbd_transfer_done(xfer, 0); USB_BUS_UNLOCK(info->bus); return; } if (!xfer->flags_int.bdma_setup) { struct usb_page *pg; usb_frlength_t frlength_0; uint8_t isread; xfer->flags_int.bdma_setup = 1; /* reset BUS-DMA load state */ info->dma_error = 0; if (xfer->flags_int.isochronous_xfr) { /* only one frame buffer */ nframes = 1; frlength_0 = xfer->sumlen; } else { /* can be multiple frame buffers */ nframes = xfer->nframes; frlength_0 = xfer->frlengths[0]; } /* * Set DMA direction first. This is needed to * select the correct cache invalidate and cache * flush operations. */ isread = USB_GET_DATA_ISREAD(xfer); pg = xfer->dma_page_ptr; if (xfer->flags_int.control_xfr && xfer->flags_int.control_hdr) { /* special case */ if (xfer->flags_int.usb_mode == USB_MODE_DEVICE) { /* The device controller writes to memory */ xfer->frbuffers[0].isread = 1; } else { /* The host controller reads from memory */ xfer->frbuffers[0].isread = 0; } } else { /* default case */ xfer->frbuffers[0].isread = isread; } /* * Setup the "page_start" pointer which points to an array of * USB pages where information about the physical address of a * page will be stored. Also initialise the "isread" field of * the USB page caches. */ xfer->frbuffers[0].page_start = pg; info->dma_nframes = nframes; info->dma_currframe = 0; info->dma_frlength_0 = frlength_0; pg += (frlength_0 / USB_PAGE_SIZE); pg += 2; while (--nframes > 0) { xfer->frbuffers[nframes].isread = isread; xfer->frbuffers[nframes].page_start = pg; pg += (xfer->frlengths[nframes] / USB_PAGE_SIZE); pg += 2; } } if (info->dma_error) { USB_BUS_LOCK(info->bus); usbd_transfer_done(xfer, USB_ERR_DMA_LOAD_FAILED); USB_BUS_UNLOCK(info->bus); return; } if (info->dma_currframe != info->dma_nframes) { if (info->dma_currframe == 0) { /* special case */ usb_pc_load_mem(xfer->frbuffers, info->dma_frlength_0, 0); } else { /* default case */ nframes = info->dma_currframe; usb_pc_load_mem(xfer->frbuffers + nframes, xfer->frlengths[nframes], 0); } /* advance frame index */ info->dma_currframe++; return; } /* go ahead */ usb_bdma_pre_sync(xfer); /* start loading next USB transfer, if any */ usb_command_wrapper(pq, NULL); /* finally start the hardware */ usbd_pipe_enter(xfer); }
/*------------------------------------------------------------------------* * usb_pc_load_mem - load virtual memory into DMA * * Return values: * 0: Success * Else: Error *------------------------------------------------------------------------*/ uint8_t usb_pc_load_mem(struct usb_page_cache *pc, usb_size_t size, uint8_t sync) { /* setup page cache */ pc->page_offset_buf = 0; pc->page_offset_end = size; pc->ismultiseg = 1; KKASSERT(lockowned(pc->tag_parent->lock)); if (size > 0) { if (sync) { struct usb_dma_parent_tag *uptag; int err; uptag = pc->tag_parent; /* * We have to unload the previous loaded DMA * pages before trying to load a new one! */ bus_dmamap_unload(pc->tag, pc->map); /* * Try to load memory into DMA. */ err = bus_dmamap_load( pc->tag, pc->map, pc->buffer, size, &usb_pc_alloc_mem_cb, pc, BUS_DMA_WAITOK); if (err == EINPROGRESS) { cv_wait(uptag->cv, uptag->lock); err = 0; } if (err || uptag->dma_error) { return (1); } } else { /* * We have to unload the previous loaded DMA * pages before trying to load a new one! */ bus_dmamap_unload(pc->tag, pc->map); /* * Try to load memory into DMA. The callback * will be called in all cases: */ if (bus_dmamap_load( pc->tag, pc->map, pc->buffer, size, &usb_pc_load_mem_cb, pc, BUS_DMA_WAITOK)) { } } } else { if (!sync) { /* * Call callback so that refcount is decremented * properly: */ pc->tag_parent->dma_error = 0; (pc->tag_parent->func) (pc->tag_parent); } } return (0); }