static int sn_pccard_detach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); ifp->if_flags &= ~IFF_RUNNING; bus_teardown_intr(dev, sc->irq_res, sc->intrhand); lwkt_serialize_enter(ifp->if_serializer); ether_ifdetach(&sc->arpcom.ac_if); sn_deactivate(dev); return 0; }
/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); KASSERT(curthread->td_type != TD_TYPE_NETISR, ("notify in netisr%d", mycpuid)); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }
/* * Re-evaluate the _CST object when we are notified that it changed. */ static void acpi_cst_notify(device_t dev) { struct acpi_cst_softc *sc = device_get_softc(dev); cpuhelper_assert(mycpuid, false); lwkt_serialize_enter(&acpi_cst_slize); /* Update the list of Cx states. */ acpi_cst_cx_reprobe_cst(sc); acpi_cst_support_list(sc); /* Update the new lowest useable Cx state for all CPUs. */ acpi_cst_global_cx_count(); /* * Fix up the lowest Cx being used */ if (acpi_cst_cx_lowest_req < acpi_cst_cx_count) acpi_cst_cx_lowest = acpi_cst_cx_lowest_req; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; lwkt_serialize_exit(&acpi_cst_slize); }
static int le_isa_detach(device_t dev) { struct le_isa_softc *lesc; struct lance_softc *sc; lesc = device_get_softc(dev); sc = &lesc->sc_am7990.lsc; if (device_is_attached(dev)) { lwkt_serialize_enter(sc->ifp->if_serializer); lance_stop(sc); bus_teardown_intr(dev, lesc->sc_ires, lesc->sc_ih); lwkt_serialize_exit(sc->ifp->if_serializer); am7990_detach(&lesc->sc_am7990); } if (lesc->sc_ires) bus_release_resource(dev, SYS_RES_IRQ, lesc->sc_irid, lesc->sc_ires); if (lesc->sc_dres) bus_release_resource(dev, SYS_RES_DRQ, lesc->sc_drid, lesc->sc_dres); if (lesc->sc_rres) bus_release_resource(dev, SYS_RES_IOPORT, lesc->sc_rrid, lesc->sc_rres); if (lesc->sc_dmam) { bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam); bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam); } if (lesc->sc_dmat) bus_dma_tag_destroy(lesc->sc_dmat); if (lesc->sc_pdmat) bus_dma_tag_destroy(lesc->sc_pdmat); return (0); }
/* * INTR arrived */ static void lgue_intreof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ifnet *ifp; struct lgue_softc *sc; sc = priv; if (sc->lgue_dying) return; ifp = &sc->lgue_arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { lwkt_serialize_exit(ifp->if_serializer); return; } if_printf(ifp, "usb error on intr: %s\n", usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->lgue_ep[LGUE_ENDPT_INTR]); lwkt_serialize_exit(ifp->if_serializer); return; } lgue_intrstart(ifp); lwkt_serialize_exit(ifp->if_serializer); }
static int txp_detach(device_t dev) { struct txp_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->sc_arpcom.ac_if; int i; lwkt_serialize_enter(ifp->if_serializer); txp_stop(sc); txp_shutdown(dev); bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); lwkt_serialize_exit(ifp->if_serializer); ifmedia_removeall(&sc->sc_ifmedia); ether_ifdetach(ifp); for (i = 0; i < RXBUF_ENTRIES; i++) kfree(sc->sc_rxbufs[i].rb_sd, M_DEVBUF); txp_release_resources(dev); return(0); }
static int vtblk_detach(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); lwkt_serialize_enter(&sc->vtblk_slz); sc->vtblk_flags |= VTBLK_FLAG_DETACH; if (device_is_attached(dev)) vtblk_stop(sc); lwkt_serialize_exit(&sc->vtblk_slz); vtblk_drain(sc); if (sc->cdev != NULL) { disk_destroy(&sc->vtblk_disk); sc->cdev = NULL; } if (sc->vtblk_sglist != NULL) { sglist_free(sc->vtblk_sglist); sc->vtblk_sglist = NULL; } return (0); }
static void sbsh_shutdown(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); sbsh_stop(sc); lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); }
static void vx_pci_shutdown(device_t dev) { struct vx_softc *sc; sc = device_get_softc(dev); lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); vxstop(sc); lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); }
static int sbsh_suspend(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); lwkt_serialize_enter(sc->arpcom.ac_if.if_serializer); sbsh_stop(sc); lwkt_serialize_exit(sc->arpcom.ac_if.if_serializer); return (0); }
static int rtw_pci_shutdown(device_t dev) { struct rtw_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->sc_ic.ic_if; lwkt_serialize_enter(ifp->if_serializer); rtw_stop(sc, 1); lwkt_serialize_exit(ifp->if_serializer); return 0; }
void virtqueue_notify(struct virtqueue *vq, lwkt_serialize_t interlock) { /* Ensure updated avail->idx is visible to host. */ cpu_mfence(); if (vq_ring_must_notify_host(vq)) { lwkt_serialize_exit(interlock); vq_ring_notify_host(vq); lwkt_serialize_enter(interlock); } vq->vq_queued_cnt = 0; }
static int vtblk_suspend(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); lwkt_serialize_enter(&sc->vtblk_slz); sc->vtblk_flags |= VTBLK_FLAG_SUSPEND; /* XXX BMV: virtio_stop(), etc needed here? */ lwkt_serialize_exit(&sc->vtblk_slz); return (0); }
static int ex_pccard_detach(device_t dev) { struct ex_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); ex_stop(sc); ifp->if_flags &= ~IFF_RUNNING; lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); ex_release_resources(dev); return (0); }
/* * feunload - unload the driver and clear the table. * XXX TODO: * This is usually called when the card is ejected, but * can be caused by a modunload of a controller driver. * The idea is to reset the driver's view of the device * and ensure that any driver entry points such as * read and write do not hang. */ static int fe_pccard_detach(device_t dev) { struct fe_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); fe_stop(sc); bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); fe_release_resource(dev); return 0; }
static int vtblk_resume(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); lwkt_serialize_enter(&sc->vtblk_slz); /* XXX BMV: virtio_reinit(), etc needed here? */ sc->vtblk_flags &= ~VTBLK_FLAG_SUSPEND; #if 0 /* XXX Resume IO? */ vtblk_startio(sc); #endif lwkt_serialize_exit(&sc->vtblk_slz); return (0); }
static int sln_detach(device_t dev) { struct sln_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); sln_stop(sc); bus_teardown_intr(dev, sc->sln_irq, sc->sln_intrhand); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); bus_generic_detach(dev); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sln_irq); bus_release_resource(dev, SL_RES, SL_RID, sc->sln_res); contigfree(sc->sln_bufdata.sln_rx_buf, SL_RX_BUFLEN, M_DEVBUF); return 0; }
static int acpi_cst_global_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cst_softc *sc; char state[8]; int val, error, i; ksnprintf(state, sizeof(state), "C%d", acpi_cst_cx_lowest_req + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); val = (int) strtol(state + 1, NULL, 10) - 1; if (val < 0) return (EINVAL); lwkt_serialize_enter(&acpi_cst_slize); acpi_cst_cx_lowest_req = val; acpi_cst_cx_lowest = val; if (acpi_cst_cx_lowest > acpi_cst_cx_count - 1) acpi_cst_cx_lowest = acpi_cst_cx_count - 1; /* Update the new lowest useable Cx state for all CPUs. */ for (i = 0; i < acpi_cst_ndevices; i++) { sc = device_get_softc(acpi_cst_devices[i]); error = acpi_cst_set_lowest(sc, val); if (error) { KKASSERT(i == 0); break; } } lwkt_serialize_exit(&acpi_cst_slize); return error; }
static int ep_pccard_detach(device_t dev) { struct ep_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); if (sc->gone) { device_printf(dev, "already unloaded\n"); lwkt_serialize_exit(ifp->if_serializer); return (0); } ifp->if_flags &= ~IFF_RUNNING; sc->gone = 1; bus_teardown_intr(dev, sc->irq, sc->ep_intrhand); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(&sc->arpcom.ac_if); ep_free(dev); return (0); }
static int vtblk_strategy(struct dev_strategy_args *ap) { struct vtblk_softc *sc; cdev_t dev = ap->a_head.a_dev; sc = dev->si_drv1; struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; if (sc == NULL) { vtblk_finish_bio(bio, EINVAL); return EINVAL; } /* * Fail any write if RO. Unfortunately, there does not seem to * be a better way to report our readonly'ness to GEOM above. * * XXX: Is that true in DFly? */ if (sc->vtblk_flags & VTBLK_FLAG_READONLY && (bp->b_cmd == BUF_CMD_READ || bp->b_cmd == BUF_CMD_FLUSH)) { vtblk_finish_bio(bio, EROFS); return (EINVAL); } lwkt_serialize_enter(&sc->vtblk_slz); if ((sc->vtblk_flags & VTBLK_FLAG_DETACH) == 0) { devstat_start_transaction(&sc->stats); bioqdisksort(&sc->vtblk_bioq, bio); vtblk_startio(sc); } else { vtblk_finish_bio(bio, ENXIO); } lwkt_serialize_exit(&sc->vtblk_slz); return 0; }
static int sbsh_detach(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; if (device_is_attached(dev)) { lwkt_serialize_enter(ifp->if_serializer); sbsh_stop(sc); bus_teardown_intr(dev, sc->irq_res, sc->intr_hand); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); } if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_MAPS + 4, sc->mem_res); } return (0); }
static int acpi_cst_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cst_softc *sc; char state[8]; int val, error; sc = (struct acpi_cst_softc *)arg1; ksnprintf(state, sizeof(state), "C%d", sc->cst_cx_lowest_req + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); val = (int) strtol(state + 1, NULL, 10) - 1; if (val < 0) return (EINVAL); lwkt_serialize_enter(&acpi_cst_slize); error = acpi_cst_set_lowest(sc, val); lwkt_serialize_exit(&acpi_cst_slize); return error; }
static int ed_pci_detach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; lwkt_serialize_enter(ifp->if_serializer); if (sc->gone) { device_printf(dev, "already unloaded\n"); lwkt_serialize_exit(ifp->if_serializer); return (0); } ed_stop(sc); ifp->if_flags &= ~IFF_RUNNING; sc->gone = 1; bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); ed_release_resources(dev); return (0); }
/* controller interrupt */ static void elintr(void *arg) { int unit = (int)arg; struct el_softc *sc; int base; int stat, rxstat, len, done; lwkt_serialize_enter(&el_serializer); /* Get things pointing properly */ sc = &el_softc[unit]; base = sc->el_base; dprintf(("elintr: ")); /* Check board status */ stat = inb(base+EL_AS); if(stat & EL_AS_RXBUSY) { inb(base+EL_RXC); outb(base+EL_AC,(EL_AC_IRQE|EL_AC_RX)); lwkt_serialize_exit(&el_serializer); return; } done = 0; while(!done) { rxstat = inb(base+EL_RXS); if(rxstat & EL_RXS_STALE) { inb(base+EL_RXC); outb(base+EL_AC,(EL_AC_IRQE|EL_AC_RX)); lwkt_serialize_exit(&el_serializer); return; } /* If there's an overflow, reinit the board. */ if(!(rxstat & EL_RXS_NOFLOW)) { dprintf(("overflow.\n")); el_hardreset(sc); /* Put board back into receive mode */ if(sc->arpcom.ac_if.if_flags & IFF_PROMISC) outb(base+EL_RXC,(EL_RXC_PROMISC|EL_RXC_AGF|EL_RXC_DSHORT|EL_RXC_DDRIB|EL_RXC_DOFLOW)); else outb(base+EL_RXC,(EL_RXC_ABROAD|EL_RXC_AGF|EL_RXC_DSHORT|EL_RXC_DDRIB|EL_RXC_DOFLOW)); inb(base+EL_AS); outb(base+EL_RBC,0); inb(base+EL_RXC); outb(base+EL_AC,(EL_AC_IRQE|EL_AC_RX)); lwkt_serialize_exit(&el_serializer); return; } /* Incoming packet */ len = inb(base+EL_RBL); len |= inb(base+EL_RBH) << 8; dprintf(("receive len=%d rxstat=%x ",len,rxstat)); outb(base+EL_AC,EL_AC_HOST); /* If packet too short or too long, restore rx mode and return */ if((len <= sizeof(struct ether_header)) || (len > ETHER_MAX_LEN)) { if(sc->arpcom.ac_if.if_flags & IFF_PROMISC) outb(base+EL_RXC,(EL_RXC_PROMISC|EL_RXC_AGF|EL_RXC_DSHORT|EL_RXC_DDRIB|EL_RXC_DOFLOW)); else outb(base+EL_RXC,(EL_RXC_ABROAD|EL_RXC_AGF|EL_RXC_DSHORT|EL_RXC_DDRIB|EL_RXC_DOFLOW)); inb(base+EL_AS); outb(base+EL_RBC,0); inb(base+EL_RXC); outb(base+EL_AC,(EL_AC_IRQE|EL_AC_RX)); lwkt_serialize_exit(&el_serializer); return; } sc->arpcom.ac_if.if_ipackets++; /* Copy the data into our buffer */ outb(base+EL_GPBL,0); outb(base+EL_GPBH,0); insb(base+EL_BUF,sc->el_pktbuf,len); outb(base+EL_RBC,0); outb(base+EL_AC,EL_AC_RX); dprintf(("%6D-->",sc->el_pktbuf+6,":")); dprintf(("%6D\n",sc->el_pktbuf,":")); /* Pass data up to upper levels */ elread(sc,(caddr_t)(sc->el_pktbuf),len); /* Is there another packet? */ stat = inb(base+EL_AS); /* If so, do it all again (i.e. don't set done to 1) */ if(!(stat & EL_AS_RXBUSY)) dprintf(("<rescan> ")); else done = 1; } inb(base+EL_RXC); outb(base+EL_AC,(EL_AC_IRQE|EL_AC_RX)); lwkt_serialize_exit(&el_serializer); }
static int lwkt_serialize_waitmsg(lwkt_msg_t msg, int flags) { lwkt_port_t port; int sentabort; int error; KASSERT((msg->ms_flags & MSGF_DROPABLE) == 0, ("can't wait dropable message")); if ((msg->ms_flags & MSGF_DONE) == 0) { port = msg->ms_reply_port; ASSERT_SERIALIZED(port->mpu_serialize); sentabort = 0; while ((msg->ms_flags & MSGF_DONE) == 0) { void *won; /* * If message was sent synchronously from the beginning * the wakeup will be on the message structure, else it * will be on the port structure. */ if (msg->ms_flags & MSGF_SYNC) { won = msg; } else { won = port; port->mp_flags |= MSGPORTF_WAITING; } /* * Only messages which support abort can be interrupted. * We must still wait for message completion regardless. */ if ((flags & PCATCH) && sentabort == 0) { error = zsleep(won, port->mpu_serialize, PCATCH, "waitmsg", 0); if (error) { sentabort = error; lwkt_serialize_exit(port->mpu_serialize); lwkt_abortmsg(msg); lwkt_serialize_enter(port->mpu_serialize); } } else { error = zsleep(won, port->mpu_serialize, 0, "waitmsg", 0); } /* see note at the top on the MSGPORTF_WAITING flag */ } /* * Turn EINTR into ERESTART if the signal indicates. */ if (sentabort && msg->ms_error == EINTR) msg->ms_error = sentabort; if (msg->ms_flags & MSGF_QUEUED) _lwkt_pullmsg(port, msg); } else { if (msg->ms_flags & MSGF_QUEUED) { port = msg->ms_reply_port; ASSERT_SERIALIZED(port->mpu_serialize); _lwkt_pullmsg(port, msg); } } return(msg->ms_error); }