/* * move all requests on a buffer queue to another. */ void bufq_move(struct bufq_state *dst, struct bufq_state *src) { struct buf *bp; while ((bp = BUFQ_GET(src)) != NULL) { BUFQ_PUT(dst, bp); } }
/* * Handle I/O requests, either directly, or * by passing them to the server process. */ static void mdstrategy(struct buf *bp) { struct md_softc *sc; void * addr; size_t off, xfer; sc = device_lookup_private(&md_cd, MD_UNIT(bp->b_dev)); if (sc->sc_type == MD_UNCONFIGURED) { bp->b_error = ENXIO; goto done; } switch (sc->sc_type) { #if MEMORY_DISK_SERVER case MD_UMEM_SERVER: /* Just add this job to the server's queue. */ BUFQ_PUT(sc->sc_buflist, bp); wakeup((void *)sc); /* see md_server_loop() */ /* no biodone in this case */ return; #endif /* MEMORY_DISK_SERVER */ case MD_KMEM_FIXED: case MD_KMEM_ALLOCATED: /* These are in kernel space. Access directly. */ bp->b_resid = bp->b_bcount; off = (bp->b_blkno << DEV_BSHIFT); if (off >= sc->sc_size) { if (bp->b_flags & B_READ) break; /* EOF */ goto set_eio; } xfer = bp->b_resid; if (xfer > (sc->sc_size - off)) xfer = (sc->sc_size - off); addr = (char *)sc->sc_addr + off; if (bp->b_flags & B_READ) memcpy(bp->b_data, addr, xfer); else memcpy(addr, bp->b_data, xfer); bp->b_resid -= xfer; break; default: bp->b_resid = bp->b_bcount; set_eio: bp->b_error = EIO; break; } done: biodone(bp); }
void dk_strategy(struct dk_intf *di, struct dk_softc *dksc, struct buf *bp) { int s; int wlabel; daddr_t blkno; DPRINTF_FOLLOW(("dk_strategy(%s, %p, %p)\n", di->di_dkname, dksc, bp)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("dk_strategy: not inited\n")); bp->b_error = ENXIO; biodone(bp); return; } /* XXX look for some more errors, c.f. ld.c */ bp->b_resid = bp->b_bcount; /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) { biodone(bp); return; } wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) { biodone(bp); return; } blkno = bp->b_blkno; if (DISKPART(bp->b_dev) != RAW_PART) { struct partition *pp; pp = &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno += pp->p_offset; } bp->b_rawblkno = blkno; /* * Start the unit by calling the start routine * provided by the individual driver. */ s = splbio(); BUFQ_PUT(dksc->sc_bufq, bp); dk_start(di, dksc); splx(s); return; }
/* * Only thing to check here is for legal record lengths (writes only). */ static void mtstrategy(struct buf *bp) { struct mt_softc *sc; int s; sc = device_lookup_private(&mt_cd,UNIT(bp->b_dev)); dlog(LOG_DEBUG, "%s strategy", device_xname(sc->sc_dev)); if ((bp->b_flags & (B_CMD | B_READ)) == 0) { #define WRITE_BITS_IGNORED 8 #if 0 if (bp->b_bcount & ((1 << WRITE_BITS_IGNORED) - 1)) { tprintf(sc->sc_ttyp, "%s: write record must be multiple of %d\n", device_xname(sc->sc_dev), 1 << WRITE_BITS_IGNORED); goto error; } #endif s = 16 * 1024; if (sc->sc_stat2 & SR2_LONGREC) { switch (sc->sc_density) { case T_1600BPI: s = 32 * 1024; break; case T_6250BPI: case T_BADBPI: s = 60 * 1024; break; } } if (bp->b_bcount > s) { tprintf(sc->sc_ttyp, "%s: write record (%d) too big: limit (%d)\n", device_xname(sc->sc_dev), bp->b_bcount, s); #if 0 /* XXX see above */ error: #endif bp->b_error = EIO; biodone(bp); return; } } s = splbio(); BUFQ_PUT(sc->sc_tab, bp); if (sc->sc_active == 0) { sc->sc_active = 1; mtustart(sc); } splx(s); }
void dk_start(struct dk_intf *di, struct dk_softc *dksc) { struct buf *bp; DPRINTF_FOLLOW(("dk_start(%s, %p)\n", di->di_dkname, dksc)); /* Process the work queue */ while ((bp = BUFQ_GET(dksc->sc_bufq)) != NULL) { if (di->di_diskstart(dksc, bp) != 0) { BUFQ_PUT(dksc->sc_bufq, bp); break; } } }
void rdstrategy(struct buf *bp) { struct rdsoftc *rd; struct hdcsoftc *sc; struct disklabel *lp; int s; if ((rd = device_lookup_private(&rd_cd, DISKUNIT(bp->b_dev))) == NULL) { bp->b_error = ENXIO; goto done; } sc = rd->sc_hdc; lp = rd->sc_disk.dk_label; if ((bounds_check_with_label(&rd->sc_disk, bp, 1)) <= 0) goto done; if (bp->b_bcount == 0) goto done; bp->b_rawblkno = bp->b_blkno + lp->d_partitions[DISKPART(bp->b_dev)].p_offset; bp->b_cylinder = bp->b_rawblkno / lp->d_secpercyl; s = splbio(); BUFQ_PUT(sc->sc_q, bp); if (inq == 0) { inq = 1; vsbus_dma_start(&sc->sc_vd); } splx(s); return; done: biodone(bp); }
void fdstrategy(struct buf *bp) { struct fd_softc *fd = device_lookup_private(&fd_cd, FDUNIT(bp->b_dev)); struct fdc_softc *fdc = device_private(device_parent(fd->sc_dev)); int sz; /* Valid unit, controller, and request? */ if (bp->b_blkno < 0 || ((bp->b_bcount % FDC_BSIZE) != 0 && (bp->b_flags & B_FORMAT) == 0)) { bp->b_error = EINVAL; goto done; } /* If it's a null transfer, return immediately. */ if (bp->b_bcount == 0) goto done; sz = howmany(bp->b_bcount, FDC_BSIZE); if (bp->b_blkno + sz > fd->sc_type->size) { sz = fd->sc_type->size - bp->b_blkno; if (sz == 0) { /* If exactly at end of disk, return EOF. */ goto done; } if (sz < 0) { /* If past end of disk, return EINVAL. */ bp->b_error = EINVAL; goto done; } /* Otherwise, truncate request. */ bp->b_bcount = sz << DEV_BSHIFT; } bp->b_rawblkno = bp->b_blkno; bp->b_cylinder = bp->b_blkno / (FDC_BSIZE / DEV_BSIZE) / fd->sc_type->seccyl; #ifdef FD_DEBUG printf("fdstrategy: b_blkno %llu b_bcount %d blkno %llu cylin %d " "sz %d\n", (unsigned long long)bp->b_blkno, bp->b_bcount, (unsigned long long)fd->sc_blkno, bp->b_cylinder, sz); #endif /* Queue transfer on drive, activate drive and controller if idle. */ mutex_enter(&fdc->sc_mtx); BUFQ_PUT(fd->sc_q, bp); callout_stop(&fd->sc_motoroff_ch); /* a good idea */ if (fd->sc_active == 0) fdstart(fd); #ifdef DIAGNOSTIC else { if (fdc->sc_state == DEVIDLE) { printf("fdstrategy: controller inactive\n"); fdcstart(fdc); } } #endif mutex_exit(&fdc->sc_mtx); return; done: /* Toss transfer; we're done early. */ bp->b_resid = bp->b_bcount; biodone(bp); }
/* * Actually translate the requested transfer into one the physical * driver can understand The transfer is described by a buf and will * include only one physical transfer. */ static void ssstrategy(struct buf *bp) { struct ss_softc *ss = device_lookup_private(&ss_cd, SSUNIT(bp->b_dev)); struct scsipi_periph *periph = ss->sc_periph; int s; SC_DEBUG(ss->sc_periph, SCSIPI_DB1, ("ssstrategy %d bytes @ blk %" PRId64 "\n", bp->b_bcount, bp->b_blkno)); /* * If the device has been made invalid, error out */ if (!device_is_active(&ss->sc_dev)) { if (periph->periph_flags & PERIPH_OPEN) bp->b_error = EIO; else bp->b_error = ENODEV; goto done; } /* If negative offset, error */ if (bp->b_blkno < 0) { bp->b_error = EINVAL; goto done; } if (bp->b_bcount > ss->sio.scan_window_size) bp->b_bcount = ss->sio.scan_window_size; /* * If it's a null transfer, return immediatly */ if (bp->b_bcount == 0) goto done; s = splbio(); /* * Place it in the queue of activities for this scanner * at the end (a bit silly because we only have on user.. * (but it could fork())) */ BUFQ_PUT(ss->buf_queue, bp); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion * (All a bit silly if we're only allowing 1 open but..) */ ssstart(ss->sc_periph); splx(s); return; done: /* * Correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; biodone(bp); }