static int dk_translate(struct dk_softc *dksc, struct buf *bp) { int part; int wlabel; daddr_t blkno; struct disklabel *lp; struct disk *dk; uint64_t numsecs; unsigned secsize; lp = dksc->sc_dkdev.dk_label; dk = &dksc->sc_dkdev; part = DISKPART(bp->b_dev); numsecs = dk->dk_geom.dg_secperunit; secsize = dk->dk_geom.dg_secsize; /* * The transfer must be a whole number of blocks and the offset must * not be negative. */ if ((bp->b_bcount % secsize) != 0 || bp->b_blkno < 0) { bp->b_error = EINVAL; goto done; } /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) goto done; wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (part == RAW_PART) { uint64_t numblocks = btodb(numsecs * secsize); if (bounds_check_with_mediasize(bp, DEV_BSIZE, numblocks) <= 0) goto done; } else { if (bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) goto done; } /* * Convert the block number to absolute and put it in terms * of the device's logical block size. */ if (secsize >= DEV_BSIZE) blkno = bp->b_blkno / (secsize / DEV_BSIZE); else blkno = bp->b_blkno * (DEV_BSIZE / secsize); if (part != RAW_PART) blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; bp->b_rawblkno = blkno; return -1; done: bp->b_resid = bp->b_bcount; return bp->b_error; }
/* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ void sdstrategy(struct buf *bp) { struct sd_softc *sc; int s; sc = sdlookup(DISKUNIT(bp->b_dev)); if (sc == NULL) { bp->b_error = ENXIO; goto bad; } if (sc->flags & SDF_DYING) { bp->b_error = ENXIO; goto bad; } SC_DEBUG(sc->sc_link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %lld\n", bp->b_bcount, (long long)bp->b_blkno)); /* * If the device has been made invalid, error out */ if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { if (sc->sc_link->flags & SDEV_OPEN) bp->b_error = EIO; else bp->b_error = ENODEV; goto bad; } /* Validate the request. */ if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) goto done; /* Place it in the queue of disk activities for this disk. */ bufq_queue(&sc->sc_bufq, bp); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion */ scsi_xsh_add(&sc->sc_xsh); device_unref(&sc->sc_dev); return; bad: bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; done: s = splbio(); biodone(bp); splx(s); if (sc != NULL) device_unref(&sc->sc_dev); }
void dk_strategy(struct dk_intf *di, struct dk_softc *dksc, struct buf *bp) { int s; int wlabel; daddr_t blkno; DPRINTF_FOLLOW(("dk_strategy(%s, %p, %p)\n", di->di_dkname, dksc, bp)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("dk_strategy: not inited\n")); bp->b_error = ENXIO; biodone(bp); return; } /* XXX look for some more errors, c.f. ld.c */ bp->b_resid = bp->b_bcount; /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) { biodone(bp); return; } wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) { biodone(bp); return; } blkno = bp->b_blkno; if (DISKPART(bp->b_dev) != RAW_PART) { struct partition *pp; pp = &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno += pp->p_offset; } bp->b_rawblkno = blkno; /* * Start the unit by calling the start routine * provided by the individual driver. */ s = splbio(); bufq_put(dksc->sc_bufq, bp); dk_start(di, dksc); splx(s); return; }
/* * Read/write routine for a buffer. Validates the arguments and schedules the * transfer. Does not wait for the transfer to complete. */ void wdstrategy(struct buf *bp) { struct wd_softc *wd; int s; wd = wdlookup(DISKUNIT(bp->b_dev)); if (wd == NULL) { bp->b_error = ENXIO; goto bad; } WDCDEBUG_PRINT(("wdstrategy (%s)\n", wd->sc_dev.dv_xname), DEBUG_XFERS); /* If device invalidated (e.g. media change, door open), error. */ if ((wd->sc_flags & WDF_LOADED) == 0) { bp->b_error = EIO; goto bad; } /* Validate the request. */ if (bounds_check_with_label(bp, wd->sc_dk.dk_label) == -1) goto done; /* Check that the number of sectors can fit in a byte. */ if ((bp->b_bcount / wd->sc_dk.dk_label->d_secsize) >= (1 << NBBY)) { bp->b_error = EINVAL; goto bad; } /* Queue transfer on drive, activate drive and controller if idle. */ bufq_queue(&wd->sc_bufq, bp); s = splbio(); wdstart(wd); splx(s); device_unref(&wd->sc_dev); return; bad: bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; done: s = splbio(); biodone(bp); splx(s); if (wd != NULL) device_unref(&wd->sc_dev); }
void ofdisk_strategy(struct buf *bp) { struct ofdisk_softc *of = device_lookup_private(&ofdisk_cd, DISKUNIT(bp->b_dev)); struct partition *p; u_quad_t off; int read; int (*OF_io)(int, void *, int); daddr_t blkno = bp->b_blkno; bp->b_resid = 0; if (bp->b_bcount == 0) goto done; OF_io = bp->b_flags & B_READ ? OF_read : (int(*)(int, void*, int))OF_write; if (DISKPART(bp->b_dev) != RAW_PART) { if (bounds_check_with_label(&of->sc_dk, bp, 0) <= 0) { bp->b_resid = bp->b_bcount; goto done; } p = &of->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno = bp->b_blkno + p->p_offset; } disk_busy(&of->sc_dk); off = (u_quad_t)blkno * DEV_BSIZE; read = -1; do { if (OF_seek(of->sc_ihandle, off) < 0) break; read = OF_io(of->sc_ihandle, bp->b_data, bp->b_bcount); } while (read == -2); if (read < 0) { bp->b_error = EIO; bp->b_resid = bp->b_bcount; } else bp->b_resid = bp->b_bcount - read; disk_unbusy(&of->sc_dk, bp->b_bcount - bp->b_resid, (bp->b_flags & B_READ)); done: biodone(bp); }
/* * Queue a transfer request, and if possible, hand it to the controller. */ void rastrategy(struct buf *bp) { struct ra_softc *ra = mscp_device_lookup(bp->b_dev); int b; /* * Make sure this is a reasonable drive to use. */ if (ra == NULL) { bp->b_error = ENXIO; goto done; } /* * If drive is open `raw' or reading label, let it at it. */ if (ra->ra_state == DK_RDLABEL) { /* Make some statistics... /bqt */ b = splbio(); disk_busy(&ra->ra_disk); splx(b); mscp_strategy(bp, device_parent(ra->ra_dev)); return; } /* If disk is not online, try to put it online */ if (ra->ra_state == DK_CLOSED) if (ra_putonline(bp->b_dev, ra) == MSCP_FAILED) { bp->b_error = EIO; goto done; } /* * Determine the size of the transfer, and make sure it is * within the boundaries of the partition. */ if (bounds_check_with_label(&ra->ra_disk, bp, ra->ra_wlabel) <= 0) goto done; /* Make some statistics... /bqt */ b = splbio(); disk_busy(&ra->ra_disk); splx(b); mscp_strategy(bp, device_parent(ra->ra_dev)); return; done: biodone(bp); }
void rdstrategy(struct buf *bp) { struct rd_softc *sc; struct partition *p; size_t off, xfer; caddr_t addr; int s; sc = rdlookup(DISKUNIT(bp->b_dev)); if (sc == NULL) { bp->b_error = ENXIO; goto bad; } /* Validate the request. */ if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) goto done; /* Do the transfer. */ /* XXX: Worry about overflow when computing off? */ p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize + (u_int64_t)bp->b_blkno * DEV_BSIZE; if (off > rd_root_size) off = rd_root_size; xfer = bp->b_bcount; if (xfer > rd_root_size - off) xfer = rd_root_size - off; addr = rd_root_image + off; if (bp->b_flags & B_READ) memcpy(bp->b_data, addr, xfer); else memcpy(addr, bp->b_data, xfer); bp->b_resid = bp->b_bcount - xfer; goto done; bad: bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; done: s = splbio(); biodone(bp); splx(s); if (sc != NULL) device_unref(&sc->sc_dev); }
void ccdstrategy(struct buf *bp) { int unit = ccdunit(bp->b_dev); struct ccd_softc *cs = &ccd_softc[unit]; int s; int wlabel; struct disklabel *lp; CCD_DPRINTF(CCDB_FOLLOW, ("ccdstrategy(%p): unit %d\n", bp, unit)); if ((cs->sc_flags & CCDF_INITED) == 0) { bp->b_error = ENXIO; bp->b_resid = bp->b_bcount; bp->b_flags |= B_ERROR; goto done; } /* If it's a nil transfer, wake up the top half now. */ if (bp->b_bcount == 0) goto done; lp = cs->sc_dkdev.dk_label; /* * Do bounds checking and adjust transfer. If there's an * error, the bounds check will flag that for us. */ wlabel = cs->sc_flags & (CCDF_WLABEL|CCDF_LABELLING); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(bp, lp, cs->sc_dkdev.dk_cpulabel, wlabel) <= 0) goto done; bp->b_resid = bp->b_bcount; /* * "Start" the unit. */ s = splbio(); ccdstart(cs, bp); splx(s); return; done: s = splbio(); biodone(bp); splx(s); }
void prestostrategy(struct buf *bp) { int unit, part; struct presto_softc *sc; size_t offset, count; int s; unit = DISKUNIT(bp->b_dev); sc = (struct presto_softc *)device_lookup(&presto_cd, unit); /* Sort rogue requests out */ if (sc == NULL || bp->b_blkno < 0 || (bp->b_bcount % sc->sc_dk.dk_label->d_secsize) != 0) { bp->b_error = EINVAL; goto bad; } /* Do not write on "no trespassing" areas... */ part = DISKPART(bp->b_dev); if (part != RAW_PART && bounds_check_with_label(bp, sc->sc_dk.dk_label, sc->sc_dk.dk_cpulabel, 1) <= 0) goto bad; /* Bound the request size, then move data between buf and nvram */ bp->b_resid = bp->b_bcount; offset = (bp->b_blkno << DEV_BSHIFT) + PSERVE_OFFSET; count = bp->b_bcount; if (count > (sc->sc_memsize - offset)) count = (sc->sc_memsize - offset); if (ISSET(bp->b_flags, B_READ)) bcopy(sc->sc_mem + offset, bp->b_data, count); else bcopy(bp->b_data, sc->sc_mem + offset, count); bp->b_resid -= count; goto done; bad: bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; done: s = splbio(); biodone(bp); splx(s); }
void rdstrategy(struct buf *bp) { struct rdsoftc *rd; struct hdcsoftc *sc; struct disklabel *lp; int s; if ((rd = device_lookup_private(&rd_cd, DISKUNIT(bp->b_dev))) == NULL) { bp->b_error = ENXIO; goto done; } sc = rd->sc_hdc; lp = rd->sc_disk.dk_label; if ((bounds_check_with_label(&rd->sc_disk, bp, 1)) <= 0) goto done; if (bp->b_bcount == 0) goto done; bp->b_rawblkno = bp->b_blkno + lp->d_partitions[DISKPART(bp->b_dev)].p_offset; bp->b_cylinder = bp->b_rawblkno / lp->d_secpercyl; s = splbio(); BUFQ_PUT(sc->sc_q, bp); if (inq == 0) { inq = 1; vsbus_dma_start(&sc->sc_vd); } splx(s); return; done: biodone(bp); }
static int ldstart(struct ld_softc *sc, struct buf *bp) { struct disklabel *lp; int part, s, rv; if ((sc->sc_flags & LDF_DETACH) != 0) { bp->b_error = EIO; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } part = DISKPART(bp->b_dev); lp = sc->sc_dk.dk_label; /* * The transfer must be a whole number of blocks and the offset must * not be negative. */ if ((bp->b_bcount % lp->d_secsize) != 0 || bp->b_blkno < 0) { bp->b_flags |= B_ERROR; biodone(bp); return (-1); } /* * If it's a null transfer, return. */ if (bp->b_bcount == 0) { bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } /* * Do bounds checking and adjust the transfer. If error, process. * If past the end of partition, just return. */ if (part != RAW_PART && bounds_check_with_label(bp, lp, (sc->sc_flags & (LDF_WLABEL | LDF_LABELLING)) != 0) <= 0) { bp->b_resid = bp->b_bcount; biodone(bp); return (-1); } /* * Convert the logical block number to a physical one and put it in * terms of the device's logical block size. */ if (lp->d_secsize >= DEV_BSIZE) bp->b_rawblkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); else bp->b_rawblkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); if (part != RAW_PART) bp->b_rawblkno += lp->d_partitions[part].p_offset; s = splbio(); disk_busy(&sc->sc_dk); sc->sc_queuecnt++; splx(s); if ((rv = (*sc->sc_start)(sc, bp)) != 0) { bp->b_error = rv; bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; s = splbio(); lddone(sc, bp); splx(s); } return (0); }
void mcdstrategy(struct buf *bp) { struct mcd_softc *sc; struct disklabel *lp; daddr_t blkno; int s; sc = device_lookup_private(&mcd_cd, MCDUNIT(bp->b_dev)); lp = sc->sc_dk.dk_label; /* Test validity. */ MCD_TRACE("strategy: buf=0x%p blkno=%d bcount=%d\n", bp, (int) bp->b_blkno, bp->b_bcount); if (bp->b_blkno < 0 || (bp->b_bcount % sc->blksize) != 0) { printf("%s: strategy: blkno = %" PRId64 " bcount = %d\n", device_xname(sc->sc_dev), bp->b_blkno, bp->b_bcount); bp->b_error = EINVAL; goto done; } /* If device invalidated (e.g. media change, door open), error. */ if ((sc->flags & MCDF_LOADED) == 0) { MCD_TRACE("strategy: drive not valid%s", "\n"); bp->b_error = EIO; goto done; } /* No data to read. */ if (bp->b_bcount == 0) goto done; /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (MCDPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&sc->sc_dk, bp, (sc->flags & (MCDF_WLABEL|MCDF_LABELLING)) != 0) <= 0) goto done; /* * Now convert the block number to absolute and put it in * terms of the device's logical block size. */ blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); if (MCDPART(bp->b_dev) != RAW_PART) blkno += lp->d_partitions[MCDPART(bp->b_dev)].p_offset; bp->b_rawblkno = blkno; /* Queue it. */ s = splbio(); bufq_put(sc->buf_queue, bp); splx(s); if (!sc->active) mcdstart(sc); return; done: bp->b_resid = bp->b_bcount; biodone(bp); }
/* * Actually translate the requested transfer into one the physical driver can * understand. The transfer is described by a buf and will include only one * physical transfer. */ void cdstrategy(struct buf *bp) { struct cd_softc *cd; int s; if ((cd = cdlookup(DISKUNIT(bp->b_dev))) == NULL) { bp->b_error = ENXIO; goto bad; } SC_DEBUG(cd->sc_link, SDEV_DB2, ("cdstrategy: %ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); /* * If the device has been made invalid, error out * maybe the media changed, or no media loaded */ if ((cd->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { bp->b_error = EIO; goto bad; } /* * The transfer must be a whole number of blocks. */ if ((bp->b_bcount % cd->sc_dk.dk_label->d_secsize) != 0) { bp->b_error = EINVAL; goto bad; } /* * If it's a null transfer, return immediately */ if (bp->b_bcount == 0) goto done; /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (bounds_check_with_label(bp, cd->sc_dk.dk_label, (cd->flags & (CDF_WLABEL|CDF_LABELLING)) != 0) <= 0) goto done; s = splbio(); /* * Place it in the queue of disk activities for this disk */ disksort(&cd->buf_queue, bp); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion */ cdstart(cd); device_unref(&cd->sc_dev); splx(s); return; bad: bp->b_flags |= B_ERROR; done: /* * Correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; s = splbio(); biodone(bp); splx(s); if (cd != NULL) device_unref(&cd->sc_dev); }
void marustrategy(struct buf *bp) { struct maru_softc *sc; struct disklabel *lp; struct partition *pp; int len; int err = ENXIO; m_u64 offset; DB("marustrategy(%p)\n", bp); maru_printbuf(bp); DB("ms:1\n"); sc = &maru_softc[maruunit(bp->b_dev)]; if (num_maru<1 || maruunit(bp->b_dev) >= num_maru || !(sc->sc_flags&MUF_INITED) || !sc->sc_kapi) { err: DB("ms:2\n"); maru_berror(bp, err); DB("ms:3\n"); return; } DB("ms:4\n"); len = bp->b_bcount; bp->b_resid = len; if (len<1) { DB("ms:5\n"); biodone(bp); DB("ms:6\n"); return; } DB("ms:6.1\n"); offset = dbtob(bp->b_blkno); lp = sc->sc_dkdev.dk_label; /* the transfer must be a whole number of blocks */ if (len % lp->d_secsize != 0) { maru_berror(bp, EINVAL); return; } /* * Do bounds checking and adjust transfer. If there's an error, * the bounds check will flag that for us. */ DB("ms:6.2\n"); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(bp, lp, sc->sc_flags&MUF_WLABEL) <= 0) { biodone(bp); return; } /* * Translate the partition-relative block number to an absolute. */ DB("ms:6.3\n"); if (DISKPART(bp->b_dev) != RAW_PART) { pp = &sc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; offset += pp->p_offset * lp->d_secsize; } if (bp->b_flags & B_READ) { struct maru_message *msg; DB("ms:7\n"); msg = malloc(sizeof *msg, M_DEVBUF, M_NOWAIT); if (!msg) goto err; msg->mm_flags = MARU_READ_REQ; DB("ms:8\n"); msg->mm_id = maru_acquire_token(sc, bp); msg->mm_len = len; msg->mm_offset = offset; DB("ms:9\n"); if ((err = sc->sc_kapi->ka_inject(sc->sc_kapi, msg, sizeof *msg))) { DB("ms:10\n"); free(msg, M_DEVBUF); goto err; } DB("ms:11\n"); sc->sc_reading++; return; } else /* B_WRITE */ { struct maru_message *msg; DB("ms:13\n"); msg = malloc(sizeof *msg, M_DEVBUF, M_NOWAIT); if (!msg) goto err; msg->mm_flags = MARU_WRITE; msg->mm_id = maru_acquire_token(sc, bp); msg->mm_len = len; msg->mm_offset = offset; DB("ms:14\n"); if ((err = sc->sc_kapi->ka_inject(sc->sc_kapi, msg, sizeof(msg)+msg->mm_len))) { DB("ms:15\n"); free(msg, M_DEVBUF); goto err; } DB("ms:16\n"); sc->sc_writing++; return; } DB("ms:17\n"); }
void vndstrategy(struct buf *bp) { int unit = DISKUNIT(bp->b_dev); struct vnd_softc *sc; struct partition *p; off_t off; long origbcount; int s; DNPRINTF(VDB_FOLLOW, "vndstrategy(%p): unit %d\n", bp, unit); if (unit >= numvnd) { bp->b_error = ENXIO; goto bad; } sc = &vnd_softc[unit]; if ((sc->sc_flags & VNF_HAVELABEL) == 0) { bp->b_error = ENXIO; goto bad; } /* * Many of the distrib scripts assume they can issue arbitrary * sized requests to raw vnd devices irrespective of the * emulated disk geometry. * * To continue supporting this, round the block count up to a * multiple of d_secsize for bounds_check_with_label(), and * then restore afterwards. * * We only do this for non-encrypted vnd, because encryption * requires operating on blocks at a time. */ origbcount = bp->b_bcount; if (sc->sc_keyctx == NULL) { u_int32_t secsize = sc->sc_dk.dk_label->d_secsize; bp->b_bcount = ((origbcount + secsize - 1) & ~(secsize - 1)); #ifdef DIAGNOSTIC if (bp->b_bcount != origbcount) { struct proc *pr = curproc; printf("%s: sloppy %s from proc %d (%s): " "blkno %lld bcount %ld\n", sc->sc_dev.dv_xname, (bp->b_flags & B_READ) ? "read" : "write", pr->p_pid, pr->p_comm, (long long)bp->b_blkno, origbcount); } #endif } if (bounds_check_with_label(bp, sc->sc_dk.dk_label) == -1) { bp->b_resid = bp->b_bcount = origbcount; goto done; } if (origbcount < bp->b_bcount) bp->b_bcount = origbcount; p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; off = DL_GETPOFFSET(p) * sc->sc_dk.dk_label->d_secsize + (u_int64_t)bp->b_blkno * DEV_BSIZE; if (sc->sc_keyctx && !(bp->b_flags & B_READ)) vndencryptbuf(sc, bp, 1); /* * Use IO_NOLIMIT because upper layer has already checked I/O * for limits, so there is no need to do it again. */ bp->b_error = vn_rdwr((bp->b_flags & B_READ) ? UIO_READ : UIO_WRITE, sc->sc_vp, bp->b_data, bp->b_bcount, off, UIO_SYSSPACE, IO_NOLIMIT, sc->sc_cred, &bp->b_resid, curproc); if (bp->b_error) bp->b_flags |= B_ERROR; /* Data in buffer cache needs to be in clear */ if (sc->sc_keyctx) vndencryptbuf(sc, bp, 0); goto done; bad: bp->b_flags |= B_ERROR; bp->b_resid = bp->b_bcount; done: s = splbio(); biodone(bp); splx(s); }
/* * Read/write routine for a buffer. Validates the arguments and schedules the * transfer. Does not wait for the transfer to complete. */ void edmcastrategy(struct buf *bp) { struct ed_softc *ed; struct disklabel *lp; daddr_t blkno; ed = device_lookup_private(&ed_cd, DISKUNIT(bp->b_dev)); lp = ed->sc_dk.dk_label; ATADEBUG_PRINT(("edmcastrategy (%s)\n", device_xname(ed->sc_dev)), DEBUG_XFERS); /* Valid request? */ if (bp->b_blkno < 0 || (bp->b_bcount % lp->d_secsize) != 0 || (bp->b_bcount / lp->d_secsize) >= (1 << NBBY)) { bp->b_error = EINVAL; goto done; } /* If device invalidated (e.g. media change, door open), error. */ if ((ed->sc_flags & WDF_LOADED) == 0) { bp->b_error = EIO; goto done; } /* If it's a null transfer, return immediately. */ if (bp->b_bcount == 0) goto done; /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&ed->sc_dk, bp, (ed->sc_flags & (WDF_WLABEL|WDF_LABELLING)) != 0) <= 0) goto done; /* * Now convert the block number to absolute and put it in * terms of the device's logical block size. */ if (lp->d_secsize >= DEV_BSIZE) blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); else blkno = bp->b_blkno * (DEV_BSIZE / lp->d_secsize); if (DISKPART(bp->b_dev) != RAW_PART) blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset; bp->b_rawblkno = blkno; /* Queue transfer on drive, activate drive and controller if idle. */ mutex_enter(&ed->sc_q_lock); bufq_put(ed->sc_q, bp); mutex_exit(&ed->sc_q_lock); /* Ring the worker thread */ wakeup(ed->edc_softc); return; done: /* Toss transfer; we're done early. */ bp->b_resid = bp->b_bcount; biodone(bp); }
/* * Actually translate the requested transfer into one the physical driver * can understand. The transfer is described by a buf and will include * only one physical transfer. */ void sdstrategy(struct buf *bp) { struct sd_softc *sc; int s; sc = sdlookup(DISKUNIT(bp->b_dev)); if (sc == NULL) { bp->b_error = ENXIO; goto bad; } if (sc->flags & SDF_DYING) { bp->b_error = ENXIO; goto bad; } SC_DEBUG(sc->sc_link, SDEV_DB2, ("sdstrategy: %ld bytes @ blk %d\n", bp->b_bcount, bp->b_blkno)); /* * If the device has been made invalid, error out */ if ((sc->sc_link->flags & SDEV_MEDIA_LOADED) == 0) { if (sc->sc_link->flags & SDEV_OPEN) bp->b_error = EIO; else bp->b_error = ENODEV; goto bad; } /* * If it's a null transfer, return immediately */ if (bp->b_bcount == 0) goto done; /* * The transfer must be a whole number of sectors. */ if ((bp->b_bcount % sc->sc_dk.dk_label->d_secsize) != 0) { bp->b_error = EINVAL; goto bad; } /* * Do bounds checking, adjust transfer. if error, process. * If end of partition, just return. */ if (bounds_check_with_label(bp, sc->sc_dk.dk_label, (sc->flags & (SDF_WLABEL|SDF_LABELLING)) != 0) <= 0) goto done; /* * Place it in the queue of disk activities for this disk */ mtx_enter(&sc->sc_buf_mtx); disksort(&sc->sc_buf_queue, bp); mtx_leave(&sc->sc_buf_mtx); /* * Tell the device to get going on the transfer if it's * not doing anything, otherwise just wait for completion */ sdstart(sc); device_unref(&sc->sc_dev); return; bad: bp->b_flags |= B_ERROR; done: /* * Correctly set the buf to indicate a completed xfer */ bp->b_resid = bp->b_bcount; s = splbio(); biodone(bp); splx(s); if (sc != NULL) device_unref(&sc->sc_dev); }
/* * Queue the request, and wakeup the kernel thread to handle it. */ static void vndstrategy(struct buf *bp) { int unit = vndunit(bp->b_dev); struct vnd_softc *vnd = device_lookup_private(&vnd_cd, unit); struct disklabel *lp; daddr_t blkno; int s = splbio(); if (vnd == NULL) { bp->b_error = ENXIO; goto done; } lp = vnd->sc_dkdev.dk_label; if ((vnd->sc_flags & VNF_INITED) == 0) { bp->b_error = ENXIO; goto done; } /* * The transfer must be a whole number of blocks. */ if ((bp->b_bcount % lp->d_secsize) != 0) { bp->b_error = EINVAL; goto done; } /* * check if we're read-only. */ if ((vnd->sc_flags & VNF_READONLY) && !(bp->b_flags & B_READ)) { bp->b_error = EACCES; goto done; } /* If it's a nil transfer, wake up the top half now. */ if (bp->b_bcount == 0) { goto done; } /* * Do bounds checking and adjust transfer. If there's an error, * the bounds check will flag that for us. */ if (DISKPART(bp->b_dev) == RAW_PART) { if (bounds_check_with_mediasize(bp, DEV_BSIZE, vnd->sc_size) <= 0) goto done; } else { if (bounds_check_with_label(&vnd->sc_dkdev, bp, vnd->sc_flags & (VNF_WLABEL|VNF_LABELLING)) <= 0) goto done; } /* * Put the block number in terms of the logical blocksize * of the "device". */ blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE); /* * Translate the partition-relative block number to an absolute. */ if (DISKPART(bp->b_dev) != RAW_PART) { struct partition *pp; pp = &vnd->sc_dkdev.dk_label->d_partitions[ DISKPART(bp->b_dev)]; blkno += pp->p_offset; } bp->b_rawblkno = blkno; #ifdef DEBUG if (vnddebug & VDB_FOLLOW) printf("vndstrategy(%p): unit %d\n", bp, unit); #endif if ((vnd->sc_flags & VNF_USE_VN_RDWR)) { KASSERT(vnd->sc_pending >= 0 && vnd->sc_pending <= VND_MAXPENDING(vnd)); while (vnd->sc_pending == VND_MAXPENDING(vnd)) tsleep(&vnd->sc_pending, PRIBIO, "vndpc", 0); vnd->sc_pending++; } bufq_put(vnd->sc_tab, bp); wakeup(&vnd->sc_tab); splx(s); return; done: bp->b_resid = bp->b_bcount; biodone(bp); splx(s); }