void qla_rcv(void *context, int pending) { qla_ivec_t *ivec = context; qla_host_t *ha; device_t dev; qla_hw_t *hw; uint32_t sds_idx; uint32_t ret; struct ifnet *ifp; ha = ivec->ha; dev = ha->pci_dev; hw = &ha->hw; sds_idx = ivec->irq_rid - 1; ifp = ha->ifp; do { if (sds_idx == 0) { if (qla_le32_to_host(*(hw->tx_cons)) != hw->txr_comp) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } else if ((ifp->if_snd.ifq_head != NULL) && QL_RUNNING(ifp)) { taskqueue_enqueue(ha->tx_tq, &ha->tx_task); } } ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres_d); } while (ret); if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); QL_ENABLE_INTERRUPTS(ha, sds_idx); }
void qla_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha; uint32_t sds_idx; uint32_t ret; ha = ivec->ha; sds_idx = ivec->irq_rid - 1; if (sds_idx >= ha->hw.num_sds_rings) { device_printf(ha->pci_dev, "%s: bogus sds_idx 0x%x\n", __func__, sds_idx); return; } if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); ret = qla_rcv_isr(ha, sds_idx, rcv_pkt_thres); if (sds_idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); if (ret) { taskqueue_enqueue(ha->irq_vec[sds_idx].rcv_tq, &ha->irq_vec[sds_idx].rcv_task); } else { QL_ENABLE_INTERRUPTS(ha, sds_idx); } }
void ql_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha ; int idx; qla_hw_t *hw; struct ifnet *ifp; uint32_t ret = 0; ha = ivec->ha; hw = &ha->hw; ifp = ha->ifp; if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings) return; if (idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); ret = qla_rcv_isr(ha, idx, -1); if (idx == 0) taskqueue_enqueue(ha->tx_tq, &ha->tx_task); if (!ha->flags.stop_rcv) { QL_ENABLE_INTERRUPTS(ha, idx); } return; }
static void xenbus_devices_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { struct xenbus_softc *sc = (struct xenbus_softc *) watch; device_t dev = sc->xs_dev; char *node, *bus, *type, *id, *p; node = strdup(vec[XS_WATCH_PATH], M_DEVBUF);; p = strchr(node, '/'); if (!p) goto out; bus = node; *p = 0; type = p + 1; p = strchr(type, '/'); if (!p) goto out; *p = 0; id = p + 1; p = strchr(id, '/'); if (p) *p = 0; xenbus_add_device(dev, bus, type, id); taskqueue_enqueue(taskqueue_thread, &sc->xs_probechildren); out: free(node, M_DEVBUF); }
void nfs_nfsiodnew(void) { mtx_assert(&nfs_iod_mtx, MA_OWNED); taskqueue_enqueue(taskqueue_thread, &nfs_nfsiodnew_task); }
static __inline void vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags, int flag_cnt) { int f; for (f = 0; f < flag_cnt; ++f) { uint32_t chid_base; u_long flags; int chid_ofs; if (event_flags[f] == 0) continue; flags = atomic_swap_long(&event_flags[f], 0); chid_base = f << VMBUS_EVTFLAG_SHIFT; while ((chid_ofs = ffsl(flags)) != 0) { struct vmbus_channel *chan; --chid_ofs; /* NOTE: ffsl is 1-based */ flags &= ~(1UL << chid_ofs); chan = sc->vmbus_chmap[chid_base + chid_ofs]; /* if channel is closed or closing */ if (chan == NULL || chan->ch_tq == NULL) continue; if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD) vmbus_rxbr_intr_mask(&chan->ch_rxbr); taskqueue_enqueue(chan->ch_tq, &chan->ch_task); } } }
/* ** Multiqueue Transmit driver ** */ int ixl_mq_start(struct ifnet *ifp, struct mbuf *m) { struct ixl_vsi *vsi = ifp->if_softc; struct ixl_queue *que; struct tx_ring *txr; int err, i; /* Which queue to use */ if ((m->m_flags & M_FLOWID) != 0) i = m->m_pkthdr.flowid % vsi->num_queues; else i = curcpu % vsi->num_queues; /* Check for a hung queue and pick alternative */ if (((1 << i) & vsi->active_queues) == 0) i = ffsl(vsi->active_queues); que = &vsi->queues[i]; txr = &que->txr; err = drbr_enqueue(ifp, txr->br, m); if (err) return(err); if (IXL_TX_TRYLOCK(txr)) { ixl_mq_start_locked(ifp, txr); IXL_TX_UNLOCK(txr); } else taskqueue_enqueue(que->tq, &que->tx_task); return (0); }
static void cfi_disk_strategy(struct bio *bp) { struct cfi_disk_softc *sc = bp->bio_disk->d_drv1; if (sc == NULL) goto invalid; if (bp->bio_bcount == 0) { bp->bio_resid = bp->bio_bcount; biodone(bp); return; } switch (bp->bio_cmd) { case BIO_READ: case BIO_WRITE: mtx_lock(&sc->qlock); /* no value in sorting requests? */ bioq_insert_tail(&sc->bioq, bp); mtx_unlock(&sc->qlock); taskqueue_enqueue(sc->tq, &sc->iotask); return; } /* fall thru... */ invalid: bp->bio_flags |= BIO_ERROR; bp->bio_error = EINVAL; biodone(bp); }
int nfs_inactive(struct vop_inactive_args *ap) { struct nfsnode *np; struct sillyrename *sp; struct thread *td = curthread; /* XXX */ np = VTONFS(ap->a_vp); mtx_lock(&np->n_mtx); if (ap->a_vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp) { mtx_unlock(&np->n_mtx); (void)nfs_vinvalbuf(ap->a_vp, 0, td, 1); /* * Remove the silly file that was rename'd earlier */ (sp->s_removeit)(sp); crfree(sp->s_cred); TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp); taskqueue_enqueue(taskqueue_thread, &sp->s_task); mtx_lock(&np->n_mtx); } np->n_flag &= NMODIFIED; mtx_unlock(&np->n_mtx); return (0); }
static void ntb_qp_link_work(void *arg) { struct ntb_transport_qp *qp = arg; struct ntb_softc *ntb = qp->ntb; struct ntb_transport_ctx *nt = qp->transport; uint32_t val, dummy; ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num)); /* query remote spad for qp ready bits */ ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &dummy); /* See if the remote side is up */ if ((val & (1ull << qp->qp_num)) != 0) { ntb_printf(2, "qp link up\n"); qp->link_is_up = true; if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_UP); taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } else if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); }
void ql_isr(void *arg) { qla_ivec_t *ivec = arg; qla_host_t *ha ; int idx; qla_hw_t *hw; struct ifnet *ifp; qla_tx_fp_t *fp; ha = ivec->ha; hw = &ha->hw; ifp = ha->ifp; if ((idx = ivec->sds_idx) >= ha->hw.num_sds_rings) return; fp = &ha->tx_fp[idx]; if ((fp->fp_taskqueue != NULL) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) taskqueue_enqueue(fp->fp_taskqueue, &fp->fp_task); return; }
int main(int argc, char **argv) { struct taskqueue *t; struct task task; int retval; t = taskqueue_create("test", M_WAITOK, taskqueue_thread_enqueue, &t); if (!t) { kprintf("unable to create taskqueue\n"); return 1; } retval = taskqueue_start_threads(&t, 4, /*num threads*/ PWAIT, /*priority*/ "%s", /* thread name */ "test"); if (retval != 0) { kprintf("failed to create taskqueue threads\n"); return 1; } TASK_INIT(&task, /*priority*/0, task_worker, NULL); retval = taskqueue_enqueue(t, &task); if (retval != 0) { kprintf("failed to enqueue task\n"); return 1; } taskqueue_drain(t, &task); taskqueue_free(t); return 0; }
/* * Lookup and potentially load the specified firmware image. * If the firmware is not found in the registry, try to load a kernel * module named as the image name. * If the firmware is located, a reference is returned. The caller must * release this reference for the image to be eligible for removal/unload. */ const struct firmware * firmware_get(const char *imagename) { struct task fwload_task; struct thread *td; struct priv_fw *fp; mtx_lock(&firmware_mtx); fp = lookup(imagename, NULL); if (fp != NULL) goto found; /* * Image not present, try to load the module holding it. */ td = curthread; if (priv_check(td, PRIV_FIRMWARE_LOAD) != 0 || securelevel_gt(td->td_ucred, 0) != 0) { mtx_unlock(&firmware_mtx); printf("%s: insufficient privileges to " "load firmware image %s\n", __func__, imagename); return NULL; } /* * Defer load to a thread with known context. linker_reference_module * may do filesystem i/o which requires root & current dirs, etc. * Also we must not hold any mtx's over this call which is problematic. */ if (!cold) { TASK_INIT(&fwload_task, 0, loadimage, __DECONST(void *, imagename)); taskqueue_enqueue(firmware_tq, &fwload_task); msleep(__DECONST(void *, imagename), &firmware_mtx, 0, "fwload", 0); }
static void card_detect_delay(void *arg) { struct fsl_sdhc_softc *sc = arg; taskqueue_enqueue(taskqueue_swi_giant, &sc->card_detect_task); }
static void usie_if_sync_to(void *arg) { struct usie_softc *sc = arg; taskqueue_enqueue(taskqueue_thread, &sc->sc_if_sync_task); }
void ata_sata_phy_check_events(device_t dev, int port) { struct ata_channel *ch = device_get_softc(dev); u_int32_t error, status; if (ata_sata_scr_read(ch, port, ATA_SERROR, &error)) return; /* Check that SError value is sane. */ if (error == 0xffffffff) return; /* Clear set error bits/interrupt. */ if (error) ata_sata_scr_write(ch, port, ATA_SERROR, error); /* if we have a connection event deal with it */ if ((error & ATA_SE_PHY_CHANGED) && (ch->pm_level == 0)) { if (bootverbose) { if (ata_sata_scr_read(ch, port, ATA_SSTATUS, &status)) { device_printf(dev, "PHYRDY change\n"); } else if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) { device_printf(dev, "CONNECT requested\n"); } else device_printf(dev, "DISCONNECT requested\n"); } taskqueue_enqueue(taskqueue_thread, &ch->conntask); } }
static void kr_miibus_statchg(device_t dev) { struct kr_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->kr_link_task); }
static void nmdm_inwakeup(struct tty *tp) { struct nmdmpart *np = tty_softc(tp); /* We can receive again, so wake up the other side. */ taskqueue_enqueue(taskqueue_swi, &np->np_other->np_task); }
static void nmdm_outwakeup(struct tty *tp) { struct nmdmpart *np = tty_softc(tp); /* We can transmit again, so wake up our side. */ taskqueue_enqueue(taskqueue_swi, &np->np_task); }
/** * \brief XenStore watch callback for the root node of the XenStore * subtree representing a XenBus. * * This callback performs, or delegates to the xbs_probe_children task, * all processing necessary to handle dynmaic device arrival and departure * events from a XenBus. * * \param watch The XenStore watch object associated with this callback. * \param vec The XenStore watch event data. * \param len The number of fields in the event data stream. */ static void xenbusb_devices_changed(struct xs_watch *watch, const char **vec, unsigned int len) { struct xenbusb_softc *xbs; device_t dev; char *node; char *bus; char *type; char *id; char *p; u_int component; xbs = (struct xenbusb_softc *)watch->callback_data; dev = xbs->xbs_dev; if (len <= XS_WATCH_PATH) { device_printf(dev, "xenbusb_devices_changed: " "Short Event Data.\n"); return; } node = strdup(vec[XS_WATCH_PATH], M_XENBUS); p = strchr(node, '/'); if (p == NULL) goto out; bus = node; *p = 0; type = p + 1; p = strchr(type, '/'); if (p == NULL) goto out; *p++ = 0; /* * Extract the device ID. A device ID has one or more path * components separated by the '/' character. * * e.g. "<frontend vm id>/<frontend dev id>" for backend devices. */ id = p; for (component = 0; component < xbs->xbs_id_components; component++) { p = strchr(p, '/'); if (p == NULL) break; p++; } if (p != NULL) *p = 0; if (*id != 0 && component >= xbs->xbs_id_components - 1) { xenbusb_add_device(xbs->xbs_dev, type, id); taskqueue_enqueue(taskqueue_thread, &xbs->xbs_probe_children); } out: free(node, M_XENBUS); }
/** * Periodic timer tick for slow management operations * * @param arg Device to check */ static void cvm_do_timer(void *arg) { static int port; static int updated; if (port < CVMX_PIP_NUM_INPUT_PORTS) { if (cvm_oct_device[port]) { int queues_per_port; int qos; cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc; cvm_oct_common_poll(priv->ifp); if (priv->need_link_update) { updated++; taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task); } queues_per_port = cvmx_pko_get_num_queues(port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) { IF_LOCK(&priv->tx_free_queue[qos]); while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) { struct mbuf *m; _IF_DEQUEUE(&priv->tx_free_queue[qos], m); m_freem(m); } IF_UNLOCK(&priv->tx_free_queue[qos]); /* * XXX locking! */ priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } } } port++; /* Poll the next port in a 50th of a second. This spreads the polling of ports out a little bit */ callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL); } else { port = 0; /* If any updates were made in this run, continue iterating at * 1/50th of a second, so that if a link has merely gone down * temporarily (e.g. because of interface reinitialization) it * will not be forced to stay down for an entire second. */ if (updated > 0) { updated = 0; callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL); } else { /* All ports have been polled. Start the next iteration through the ports in one second */ callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL); } } }
static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) { struct ntb_queue_entry *entry; entry = data; entry->flags |= IF_NTB_DESC_DONE_FLAG; taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); }
static int nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf) { struct nicvf *nic = if_getsoftc(ifp); struct queue_set *qs = nic->qs; struct snd_queue *sq; struct mbuf *mtmp; int qidx; int err = 0; if (__predict_false(qs == NULL)) { panic("%s: missing queue set for %s", __func__, device_get_nameunit(nic->dev)); } /* Select queue */ if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE) qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt; else qidx = curcpu % qs->sq_cnt; sq = &qs->sq[qidx]; if (mbuf->m_next != NULL && (mbuf->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) { if (M_WRITABLE(mbuf) == 0) { mtmp = m_dup(mbuf, M_NOWAIT); m_freem(mbuf); if (mtmp == NULL) return (ENOBUFS); mbuf = mtmp; } } err = drbr_enqueue(ifp, sq->br, mbuf); if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) { /* * Try to enqueue packet to the ring buffer. * If the driver is not active, link down or enqueue operation * failed, return with the appropriate error code. */ return (err); } if (NICVF_TX_TRYLOCK(sq) != 0) { err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); return (err); } else taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); return (0); }
int dmar_qi_intr(void *arg) { struct dmar_unit *unit; unit = arg; KASSERT(unit->qi_enabled, ("dmar%d: QI is not enabled", unit->unit)); taskqueue_enqueue(unit->qi_taskqueue, &unit->qi_task); return (FILTER_HANDLED); }
/** * @brief Synchronize time with host after reboot, restore, etc. * * ICTIMESYNCFLAG_SYNC flag bit indicates reboot, restore events of the VM. * After reboot the flag ICTIMESYNCFLAG_SYNC is included in the first time * message after the timesync channel is opened. Since the hv_utils module is * loaded after hv_vmbus, the first message is usually missed. The other * thing is, systime is automatically set to emulated hardware clock which may * not be UTC time or in the same time zone. So, to override these effects, we * use the first 50 time samples for initial system time setting. */ static inline void hv_adj_guesttime(hv_timesync_sc *sc, uint64_t hosttime, uint8_t flags) { sc->time_msg.data = hosttime; if (((flags & HV_ICTIMESYNCFLAG_SYNC) != 0) || ((flags & HV_ICTIMESYNCFLAG_SAMPLE) != 0)) { taskqueue_enqueue(taskqueue_thread, &sc->task); } }
static void nvd_strategy(struct bio *bp) { struct nvd_disk *ndisk; ndisk = (struct nvd_disk *)bp->bio_disk->d_drv1; mtx_lock(&ndisk->bioqlock); bioq_insert_tail(&ndisk->bioq, bp); mtx_unlock(&ndisk->bioqlock); taskqueue_enqueue(ndisk->tq, &ndisk->bioqtask); }
static void soaio_init(void) { soaio_lifetime = AIOD_LIFETIME_DEFAULT; STAILQ_INIT(&soaio_jobs); mtx_init(&soaio_jobs_lock, "soaio jobs", NULL, MTX_DEF); soaio_kproc_unr = new_unrhdr(1, INT_MAX, NULL); TASK_INIT(&soaio_kproc_task, 0, soaio_kproc_create, NULL); if (soaio_target_procs > 0) taskqueue_enqueue(taskqueue_thread, &soaio_kproc_task); }
void mlx4_en_rx_irq(struct mlx4_cq *mcq) { struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); struct mlx4_en_priv *priv = netdev_priv(cq->dev); int done; done = mlx4_en_poll_rx_cq(cq, MLX4_EN_MAX_RX_POLL); if (done == MLX4_EN_MAX_RX_POLL) taskqueue_enqueue(cq->tq, &cq->cq_task); else mlx4_en_arm_cq(priv, cq); }
int ncl_inactive(struct vop_inactive_args *ap) { struct nfsnode *np; struct sillyrename *sp; struct vnode *vp = ap->a_vp; boolean_t retv; np = VTONFS(vp); if (NFS_ISV4(vp) && vp->v_type == VREG) { /* * Since mmap()'d files do I/O after VOP_CLOSE(), the NFSv4 * Close operations are delayed until now. Any dirty * buffers/pages must be flushed before the close, so that the * stateid is available for the writes. */ if (vp->v_object != NULL) { VM_OBJECT_WLOCK(vp->v_object); retv = vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC); VM_OBJECT_WUNLOCK(vp->v_object); } else retv = TRUE; if (retv == TRUE) { (void)ncl_flush(vp, MNT_WAIT, NULL, ap->a_td, 1, 0); (void)nfsrpc_close(vp, 1, ap->a_td); } } mtx_lock(&np->n_mtx); if (vp->v_type != VDIR) { sp = np->n_sillyrename; np->n_sillyrename = NULL; } else sp = NULL; if (sp) { mtx_unlock(&np->n_mtx); (void) ncl_vinvalbuf(vp, 0, ap->a_td, 1); /* * Remove the silly file that was rename'd earlier */ ncl_removeit(sp, vp); crfree(sp->s_cred); TASK_INIT(&sp->s_task, 0, nfs_freesillyrename, sp); taskqueue_enqueue(taskqueue_thread, &sp->s_task); mtx_lock(&np->n_mtx); } np->n_flag &= NMODIFIED; mtx_unlock(&np->n_mtx); return (0); }
void power_pm_suspend(int state) { if (power_pm_fn == NULL) return; if (state != POWER_SLEEP_STATE_STANDBY && state != POWER_SLEEP_STATE_SUSPEND && state != POWER_SLEEP_STATE_HIBERNATE) return; power_pm_task.ta_context = (void *)(intptr_t)state; taskqueue_enqueue(taskqueue_thread, &power_pm_task); }