int dk_discard(struct dk_softc *dksc, dev_t dev, off_t pos, off_t len) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; unsigned secsize = dksc->sc_dkdev.dk_geom.dg_secsize; struct buf tmp, *bp = &tmp; int error; DPRINTF_FOLLOW(("%s(%s, %p, 0x"PRIx64", %jd, %jd)\n", __func__, dksc->sc_xname, dksc, (intmax_t)pos, (intmax_t)len)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("%s: not inited\n", __func__)); return ENXIO; } if (secsize == 0 || (pos % secsize) != 0) return EINVAL; /* enough data to please the bounds checking code */ bp->b_dev = dev; bp->b_blkno = (daddr_t)(pos / secsize); bp->b_bcount = len; bp->b_flags = B_WRITE; error = dk_translate(dksc, bp); if (error >= 0) return error; error = dkd->d_discard(dksc->sc_dev, (off_t)bp->b_rawblkno * secsize, (off_t)bp->b_bcount); return error; }
void dk_strategy(struct dk_intf *di, struct dk_softc *dksc, struct buf *bp) { int s; int wlabel; daddr_t blkno; DPRINTF_FOLLOW(("dk_strategy(%s, %p, %p)\n", di->di_dkname, dksc, bp)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("dk_strategy: not inited\n")); bp->b_error = ENXIO; biodone(bp); return; } /* XXX look for some more errors, c.f. ld.c */ bp->b_resid = bp->b_bcount; /* If there is nothing to do, then we are done */ if (bp->b_bcount == 0) { biodone(bp); return; } wlabel = dksc->sc_flags & (DKF_WLABEL|DKF_LABELLING); if (DISKPART(bp->b_dev) != RAW_PART && bounds_check_with_label(&dksc->sc_dkdev, bp, wlabel) <= 0) { biodone(bp); return; } blkno = bp->b_blkno; if (DISKPART(bp->b_dev) != RAW_PART) { struct partition *pp; pp = &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(bp->b_dev)]; blkno += pp->p_offset; } bp->b_rawblkno = blkno; /* * Start the unit by calling the start routine * provided by the individual driver. */ s = splbio(); bufq_put(dksc->sc_bufq, bp); dk_start(di, dksc); splx(s); return; }
static struct xbd_softc * getxbd_softc(dev_t dev) { int unit = XBDUNIT(dev); DPRINTF_FOLLOW(("getxbd_softc(0x%x): major = %d unit = %d\n", dev, major(dev), unit)); #if NXBD > 0 if (major(dev) == xbd_major) return device_lookup(&xbd_cd, unit); #endif #if NWD > 0 if (major(dev) == xbd_wd_major || major(dev) == xbd_wd_cdev_major) return device_lookup(&wd_cd, unit); #endif #if NSD > 0 if (major(dev) == xbd_sd_major || major(dev) == xbd_sd_cdev_major) return device_lookup(&sd_cd, unit); #endif #if NCD > 0 if (major(dev) == xbd_cd_major || major(dev) == xbd_cd_cdev_major) return device_lookup(&cd_cd, unit); #endif return NULL; }
/* ARGSUSED */ int dk_close(struct dk_intf *di, struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { int part = DISKPART(dev); int pmask = 1 << part; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("dk_close(%s, %p, 0x%"PRIx64", 0x%x)\n", di->di_dkname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); switch (fmt) { case S_IFCHR: dk->dk_copenmask &= ~pmask; break; case S_IFBLK: dk->dk_bopenmask &= ~pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; mutex_exit(&dk->dk_openlock); return 0; }
static void cgdstrategy(struct buf *bp) { struct cgd_softc *cs = getcgd_softc(bp->b_dev); struct dk_softc *dksc = &cs->sc_dksc; struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount)); /* * Reject unaligned writes. We can encrypt and decrypt only * complete disk sectors, and we let the ciphers require their * buffers to be aligned to 32-bit boundaries. */ if (bp->b_blkno < 0 || (bp->b_bcount % dg->dg_secsize) != 0 || ((uintptr_t)bp->b_data & 3) != 0) { bp->b_error = EINVAL; bp->b_resid = bp->b_bcount; biodone(bp); return; } /* XXXrcd: Should we test for (cs != NULL)? */ dk_strategy(&cs->sc_dksc, bp); return; }
/* ARGSUSED */ int dk_open(struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { struct disklabel *lp = dksc->sc_dkdev.dk_label; int part = DISKPART(dev); int pmask = 1 << part; int ret = 0; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__, dksc->sc_xname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); /* * If there are wedges, and this is not RAW_PART, then we * need to fail. */ if (dk->dk_nwedges != 0 && part != RAW_PART) { ret = EBUSY; goto done; } /* * If we're init'ed and there are no other open partitions then * update the in-core disklabel. */ if ((dksc->sc_flags & DKF_INITED)) { if ((dksc->sc_flags & DKF_VLABEL) == 0) { dksc->sc_flags |= DKF_VLABEL; dk_getdisklabel(dksc, dev); } } /* Fail if we can't find the partition. */ if (part != RAW_PART && ((dksc->sc_flags & DKF_VLABEL) == 0 || part >= lp->d_npartitions || lp->d_partitions[part].p_fstype == FS_UNUSED)) { ret = ENXIO; goto done; } /* Mark our unit as open. */ switch (fmt) { case S_IFCHR: dk->dk_copenmask |= pmask; break; case S_IFBLK: dk->dk_bopenmask |= pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; done: mutex_exit(&dk->dk_openlock); return ret; }
/* ARGSUSED */ int dk_close(struct dk_softc *dksc, dev_t dev, int flags, int fmt, struct lwp *l) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; int part = DISKPART(dev); int pmask = 1 << part; struct disk *dk = &dksc->sc_dkdev; DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%x)\n", __func__, dksc->sc_xname, dksc, dev, flags)); mutex_enter(&dk->dk_openlock); switch (fmt) { case S_IFCHR: dk->dk_copenmask &= ~pmask; break; case S_IFBLK: dk->dk_bopenmask &= ~pmask; break; } dk->dk_openmask = dk->dk_copenmask | dk->dk_bopenmask; if (dk->dk_openmask == 0) { if (dkd->d_lastclose != NULL) (*dkd->d_lastclose)(dksc->sc_dev); if ((dksc->sc_flags & DKF_KLABEL) == 0) dksc->sc_flags &= ~DKF_VLABEL; } mutex_exit(&dk->dk_openlock); return 0; }
static int cgd_diskstart(device_t dev, struct buf *bp) { struct cgd_softc *cs = device_private(dev); struct dk_softc *dksc = &cs->sc_dksc; struct buf *nbp; void * addr; void * newaddr; daddr_t bn; struct vnode *vp; DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp)); bn = bp->b_rawblkno; /* * We attempt to allocate all of our resources up front, so that * we can fail quickly if they are unavailable. */ nbp = getiobuf(cs->sc_tvn, false); if (nbp == NULL) return EAGAIN; /* * If we are writing, then we need to encrypt the outgoing * block into a new block of memory. */ newaddr = addr = bp->b_data; if ((bp->b_flags & B_READ) == 0) { newaddr = cgd_getdata(dksc, bp->b_bcount); if (!newaddr) { putiobuf(nbp); return EAGAIN; } cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, DEV_BSIZE, CGD_CIPHER_ENCRYPT); } nbp->b_data = newaddr; nbp->b_flags = bp->b_flags; nbp->b_oflags = bp->b_oflags; nbp->b_cflags = bp->b_cflags; nbp->b_iodone = cgdiodone; nbp->b_proc = bp->b_proc; nbp->b_blkno = bn; nbp->b_bcount = bp->b_bcount; nbp->b_private = bp; BIO_COPYPRIO(nbp, bp); if ((nbp->b_flags & B_READ) == 0) { vp = nbp->b_vp; mutex_enter(vp->v_interlock); vp->v_numoutput++; mutex_exit(vp->v_interlock); } VOP_STRATEGY(cs->sc_tvn, nbp); return 0; }
static int cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { struct cgd_softc *cs; struct dk_softc *dksc; int part = DISKPART(dev); int pmask = 1 << part; DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n", dev, cmd, data, flag, l)); switch (cmd) { case CGDIOCGET: return cgd_ioctl_get(dev, data, l); case CGDIOCSET: case CGDIOCCLR: if ((flag & FWRITE) == 0) return EBADF; /* FALLTHROUGH */ default: GETCGD_SOFTC(cs, dev); dksc = &cs->sc_dksc; break; } switch (cmd) { case CGDIOCSET: if (DK_ATTACHED(dksc)) return EBUSY; return cgd_ioctl_set(cs, data, l); case CGDIOCCLR: if (DK_BUSY(&cs->sc_dksc, pmask)) return EBUSY; return cgd_ioctl_clr(cs, l); case DIOCCACHESYNC: /* * XXX Do we really need to care about having a writable * file descriptor here? */ if ((flag & FWRITE) == 0) return (EBADF); /* * We pass this call down to the underlying disk. */ return VOP_IOCTL(cs->sc_tvn, cmd, data, flag, l->l_cred); case DIOCGSTRATEGY: case DIOCSSTRATEGY: if (!DK_ATTACHED(dksc)) return ENOENT; /*FALLTHROUGH*/ default: return dk_ioctl(dksc, dev, cmd, data, flag, l); case CGDIOCGET: KASSERT(0); return EINVAL; } }
static int cgdopen(dev_t dev, int flags, int fmt, struct lwp *l) { struct cgd_softc *cs; DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags)); GETCGD_SOFTC(cs, dev); return dk_open(&cs->sc_dksc, dev, flags, fmt, l); }
void dk_iodone(struct dk_intf *di, struct dk_softc *dksc) { DPRINTF_FOLLOW(("dk_iodone(%s, %p)\n", di->di_dkname, dksc)); /* We kick the queue in case we are able to get more work done */ dk_start(di, dksc); }
static int cgdsize(dev_t dev) { struct cgd_softc *cs = getcgd_softc(dev); DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev)); if (!cs) return -1; return dk_size(&cs->sc_dksc, dev); }
int xbdsize(dev_t dev) { struct xbd_softc *xs = getxbd_softc(dev); DPRINTF_FOLLOW(("xbdsize(%d)\n", dev)); if (xs == NULL || xs->sc_shutdown) return -1; return dk_size(xs->sc_di, &xs->sc_dksc, dev); }
static int cgddump(dev_t dev, daddr_t blkno, void *va, size_t size) { struct cgd_softc *cs; DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n", dev, blkno, va, (unsigned long)size)); GETCGD_SOFTC(cs, dev); return dk_dump(&cs->sc_dksc, dev, blkno, va, size); }
static struct cgd_softc * getcgd_softc(dev_t dev) { int unit = CGDUNIT(dev); struct cgd_softc *sc; DPRINTF_FOLLOW(("getcgd_softc(0x%"PRIx64"): unit = %d\n", dev, unit)); sc = device_lookup_private(&cgd_cd, unit); if (sc == NULL) sc = cgd_spawn(unit); return sc; }
/* XXX: we should probably put these into dksubr.c, mostly */ static int cgdwrite(dev_t dev, struct uio *uio, int flags) { struct cgd_softc *cs; struct dk_softc *dksc; DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags)); GETCGD_SOFTC(cs, dev); dksc = &cs->sc_dksc; if (!DK_ATTACHED(dksc)) return ENXIO; return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio); }
static int dk_strategy1(struct dk_softc *dksc, struct buf *bp) { int error; DPRINTF_FOLLOW(("%s(%s, %p, %p)\n", __func__, dksc->sc_xname, dksc, bp)); if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("%s: not inited\n", __func__)); bp->b_error = ENXIO; biodone(bp); return 1; } error = dk_translate(dksc, bp); if (error >= 0) { biodone(bp); return 1; } return 0; }
/* XXX: we should probably put these into dksubr.c, mostly */ static int cgdread(dev_t dev, struct uio *uio, int flags) { struct cgd_softc *cs; struct dk_softc *dksc; DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n", (unsigned long long)dev, uio, flags)); GETCGD_SOFTC(cs, dev); dksc = &cs->sc_dksc; if (!DK_ATTACHED(dksc)) return ENXIO; return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio); }
static void cgdiodone(struct buf *nbp) { struct buf *obp = nbp->b_private; struct cgd_softc *cs = getcgd_softc(obp->b_dev); struct dk_softc *dksc = &cs->sc_dksc; struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; daddr_t bn; KDASSERT(cs); DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", obp, obp->b_bcount, obp->b_resid)); DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, nbp->b_bcount)); if (nbp->b_error != 0) { obp->b_error = nbp->b_error; DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname, obp->b_error)); } /* Perform the decryption if we are reading. * * Note: use the blocknumber from nbp, since it is what * we used to encrypt the blocks. */ if (nbp->b_flags & B_READ) { bn = dbtob(nbp->b_blkno) / dg->dg_secsize; cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, bn, dg->dg_secsize, CGD_CIPHER_DECRYPT); } /* If we allocated memory, free it now... */ if (nbp->b_data != obp->b_data) cgd_putdata(dksc, nbp->b_data); putiobuf(nbp); /* Request is complete for whatever reason */ obp->b_resid = 0; if (obp->b_error != 0) obp->b_resid = obp->b_bcount; dk_done(dksc, obp); dk_start(dksc, NULL); }
void dk_start(struct dk_intf *di, struct dk_softc *dksc) { struct buf *bp; DPRINTF_FOLLOW(("dk_start(%s, %p)\n", di->di_dkname, dksc)); /* Process the work queue */ while ((bp = bufq_get(dksc->sc_bufq)) != NULL) { if (di->di_diskstart(dksc, bp) != 0) { bufq_put(dksc->sc_bufq, bp); break; } } }
int dk_strategy_pending(struct dk_softc *dksc) { struct buf *bp; if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("%s: not inited\n", __func__)); return 0; } mutex_enter(&dksc->sc_iolock); bp = bufq_peek(dksc->sc_bufq); mutex_exit(&dksc->sc_iolock); return bp != NULL; }
int xbdclose(dev_t dev, int flags, int fmt, struct proc *p) { struct xbd_softc *xs; DPRINTF_FOLLOW(("xbdclose(%d, %d)\n", dev, flags)); switch (fmt) { case S_IFCHR: GETXBD_SOFTC_CDEV(xs, dev); break; case S_IFBLK: GETXBD_SOFTC(xs, dev); break; default: return ENXIO; } return dk_close(xs->sc_di, &xs->sc_dksc, dev, flags, fmt, p); }
void xbdstrategy(struct buf *bp) { struct xbd_softc *xs = getxbd_softc(bp->b_dev); DPRINTF_FOLLOW(("xbdstrategy(%p): b_bcount = %ld\n", bp, (long)bp->b_bcount)); if (xs == NULL || xs->sc_shutdown) { bp->b_flags |= B_ERROR; bp->b_error = EIO; biodone(bp); return; } dk_strategy(xs->sc_di, &xs->sc_dksc, bp); return; }
static int cgdclose(dev_t dev, int flags, int fmt, struct lwp *l) { int error; struct cgd_softc *cs; struct dk_softc *dksc; DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags)); GETCGD_SOFTC(cs, dev); dksc = &cs->sc_dksc; if ((error = dk_close(dksc, dev, flags, fmt, l)) != 0) return error; if (!DK_ATTACHED(dksc)) { if ((error = cgd_destroy(cs->sc_dksc.sc_dev)) != 0) { aprint_error_dev(dksc->sc_dev, "unable to detach instance\n"); return error; } } return 0; }
static int cgd_ioctl_get(dev_t dev, void *data, struct lwp *l) { struct cgd_softc *cs = getcgd_softc(dev); struct cgd_user *cgu; int unit; struct dk_softc *dksc = &cs->sc_dksc; unit = CGDUNIT(dev); cgu = (struct cgd_user *)data; DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n", dev, unit, data, l)); if (cgu->cgu_unit == -1) cgu->cgu_unit = unit; if (cgu->cgu_unit < 0) return EINVAL; /* XXX: should this be ENXIO? */ cs = device_lookup_private(&cgd_cd, unit); if (cs == NULL || !DK_ATTACHED(dksc)) { cgu->cgu_dev = 0; cgu->cgu_alg[0] = '\0'; cgu->cgu_blocksize = 0; cgu->cgu_mode = 0; cgu->cgu_keylen = 0; } else { cgu->cgu_dev = cs->sc_tdev; strlcpy(cgu->cgu_alg, cs->sc_cfuncs->cf_name, sizeof(cgu->cgu_alg)); cgu->cgu_blocksize = cs->sc_cdata.cf_blocksize; cgu->cgu_mode = cs->sc_cdata.cf_mode; cgu->cgu_keylen = cs->sc_cdata.cf_keylen; } return 0; }
int dk_ioctl(struct dk_softc *dksc, dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; struct disklabel *lp; struct disk *dk = &dksc->sc_dkdev; #ifdef __HAVE_OLD_DISKLABEL struct disklabel newlabel; #endif int error; DPRINTF_FOLLOW(("%s(%s, %p, 0x%"PRIx64", 0x%lx)\n", __func__, dksc->sc_xname, dksc, dev, cmd)); /* ensure that the pseudo disk is open for writes for these commands */ switch (cmd) { case DIOCSDINFO: case DIOCWDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCSDINFO: case ODIOCWDINFO: #endif case DIOCKLABEL: case DIOCWLABEL: case DIOCAWEDGE: case DIOCDWEDGE: case DIOCSSTRATEGY: if ((flag & FWRITE) == 0) return EBADF; } /* ensure that the pseudo-disk is initialized for these */ switch (cmd) { case DIOCGDINFO: case DIOCSDINFO: case DIOCWDINFO: case DIOCGPARTINFO: case DIOCKLABEL: case DIOCWLABEL: case DIOCGDEFLABEL: case DIOCAWEDGE: case DIOCDWEDGE: case DIOCLWEDGES: case DIOCMWEDGES: case DIOCCACHESYNC: #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDINFO: case ODIOCSDINFO: case ODIOCWDINFO: case ODIOCGDEFLABEL: #endif if ((dksc->sc_flags & DKF_INITED) == 0) return ENXIO; } error = disk_ioctl(dk, dev, cmd, data, flag, l); if (error != EPASSTHROUGH) return error; else error = 0; switch (cmd) { case DIOCWDINFO: case DIOCSDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCWDINFO: case ODIOCSDINFO: #endif #ifdef __HAVE_OLD_DISKLABEL if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { memset(&newlabel, 0, sizeof newlabel); memcpy(&newlabel, data, sizeof (struct olddisklabel)); lp = &newlabel; } else #endif lp = (struct disklabel *)data; mutex_enter(&dk->dk_openlock); dksc->sc_flags |= DKF_LABELLING; error = setdisklabel(dksc->sc_dkdev.dk_label, lp, 0, dksc->sc_dkdev.dk_cpulabel); if (error == 0) { if (cmd == DIOCWDINFO #ifdef __HAVE_OLD_DISKLABEL || cmd == ODIOCWDINFO #endif ) error = writedisklabel(DKLABELDEV(dev), dkd->d_strategy, dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel); } dksc->sc_flags &= ~DKF_LABELLING; mutex_exit(&dk->dk_openlock); break; case DIOCKLABEL: if (*(int *)data != 0) dksc->sc_flags |= DKF_KLABEL; else dksc->sc_flags &= ~DKF_KLABEL; break; case DIOCWLABEL: if (*(int *)data != 0) dksc->sc_flags |= DKF_WLABEL; else dksc->sc_flags &= ~DKF_WLABEL; break; case DIOCGDEFLABEL: dk_getdefaultlabel(dksc, (struct disklabel *)data); break; #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDEFLABEL: dk_getdefaultlabel(dksc, &newlabel); if (newlabel.d_npartitions > OLDMAXPARTITIONS) return ENOTTY; memcpy(data, &newlabel, sizeof (struct olddisklabel)); break; #endif case DIOCGSTRATEGY: { struct disk_strategy *dks = (void *)data; mutex_enter(&dksc->sc_iolock); strlcpy(dks->dks_name, bufq_getstrategyname(dksc->sc_bufq), sizeof(dks->dks_name)); mutex_exit(&dksc->sc_iolock); dks->dks_paramlen = 0; return 0; } case DIOCSSTRATEGY: { struct disk_strategy *dks = (void *)data; struct bufq_state *new; struct bufq_state *old; if (dks->dks_param != NULL) { return EINVAL; } dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */ error = bufq_alloc(&new, dks->dks_name, BUFQ_EXACT|BUFQ_SORT_RAWBLOCK); if (error) { return error; } mutex_enter(&dksc->sc_iolock); old = dksc->sc_bufq; bufq_move(new, old); dksc->sc_bufq = new; mutex_exit(&dksc->sc_iolock); bufq_free(old); return 0; } default: error = ENOTTY; } return error; }
void dk_start(struct dk_softc *dksc, struct buf *bp) { const struct dkdriver *dkd = dksc->sc_dkdev.dk_driver; int error; if (!(dksc->sc_flags & DKF_INITED)) { DPRINTF_FOLLOW(("%s: not inited\n", __func__)); return; } mutex_enter(&dksc->sc_iolock); if (bp != NULL) bufq_put(dksc->sc_bufq, bp); if (dksc->sc_busy) goto done; dksc->sc_busy = true; /* * Peeking at the buffer queue and committing the operation * only after success isn't atomic. * * So when a diskstart fails, the buffer is saved * and tried again before the next buffer is fetched. * dk_drain() handles flushing of a saved buffer. * * This keeps order of I/O operations, unlike bufq_put. */ bp = dksc->sc_deferred; dksc->sc_deferred = NULL; if (bp == NULL) bp = bufq_get(dksc->sc_bufq); while (bp != NULL) { disk_busy(&dksc->sc_dkdev); mutex_exit(&dksc->sc_iolock); error = dkd->d_diskstart(dksc->sc_dev, bp); mutex_enter(&dksc->sc_iolock); if (error == EAGAIN) { dksc->sc_deferred = bp; disk_unbusy(&dksc->sc_dkdev, 0, (bp->b_flags & B_READ)); break; } if (error != 0) { bp->b_error = error; bp->b_resid = bp->b_bcount; dk_done1(dksc, bp, false); } bp = bufq_get(dksc->sc_bufq); } dksc->sc_busy = false; done: mutex_exit(&dksc->sc_iolock); }
static int xbdstart(struct dk_softc *dksc, struct buf *bp) { struct xbd_softc *xs; struct xbdreq *pxr, *xr; struct partition *pp; daddr_t bn; int ret, runqueue; DPRINTF_FOLLOW(("xbdstart(%p, %p)\n", dksc, bp)); runqueue = 1; ret = -1; xs = getxbd_softc(bp->b_dev); if (xs == NULL || xs->sc_shutdown) { bp->b_flags |= B_ERROR; bp->b_error = EIO; biodone(bp); return 0; } dksc = &xs->sc_dksc; /* XXXrcd: * Translate partition relative blocks to absolute blocks, * this probably belongs (somehow) in dksubr.c, since it * is independant of the underlying code... This will require * that the interface be expanded slightly, though. */ bn = bp->b_blkno; if (DISKPART(bp->b_dev) != RAW_PART) { pp = &xs->sc_dksc.sc_dkdev.dk_label-> d_partitions[DISKPART(bp->b_dev)]; bn += pp->p_offset; } DPRINTF(XBDB_IO, ("xbdstart: addr %p, sector %llu, " "count %ld [%s]\n", bp->b_data, (unsigned long long)bn, bp->b_bcount, bp->b_flags & B_READ ? "read" : "write")); GET_XBDREQ(pxr); if (__predict_false(pxr == NULL)) goto out; disk_busy(&dksc->sc_dkdev); /* XXX: put in dksubr.c */ /* * We have a request slot, return 0 to make dk_start remove * the bp from the work queue. */ ret = 0; pxr->xr_bp = bp; pxr->xr_parent = pxr; pxr->xr_bn = bn; pxr->xr_bqueue = bp->b_bcount; pxr->xr_bdone = bp->b_bcount; pxr->xr_data = (vaddr_t)bp->b_data; pxr->xr_sc = xs; if (pxr->xr_data & (XEN_BSIZE - 1)) map_align(pxr); fill_ring(pxr); while (__predict_false(pxr->xr_bqueue > 0)) { GET_XBDREQ(xr); if (__predict_false(xr == NULL)) break; xr->xr_parent = pxr; fill_ring(xr); } if (__predict_false(pxr->xr_bqueue > 0)) { SIMPLEQ_INSERT_TAIL(&xbdr_suspended, pxr, xr_suspended); DPRINTF(XBDB_IO, ("xbdstart: suspended xbdreq %p " "for bp %p\n", pxr, bp)); } else if (CANGET_XBDREQ() && BUFQ_PEEK(&bufq) != NULL) { /* * We have enough resources to start another bp and * there are additional bps on the queue, dk_start * will call us again and we'll run the queue then. */ runqueue = 0; } out: if (runqueue && last_req_prod != req_prod) signal_requests_to_xen(); return ret; }
int dk_ioctl(struct dk_intf *di, struct dk_softc *dksc, dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { struct disklabel *lp; struct disk *dk; #ifdef __HAVE_OLD_DISKLABEL struct disklabel newlabel; #endif int error = 0; DPRINTF_FOLLOW(("dk_ioctl(%s, %p, 0x%"PRIx64", 0x%lx)\n", di->di_dkname, dksc, dev, cmd)); /* ensure that the pseudo disk is open for writes for these commands */ switch (cmd) { case DIOCSDINFO: case DIOCWDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCSDINFO: case ODIOCWDINFO: #endif case DIOCWLABEL: case DIOCAWEDGE: case DIOCDWEDGE: if ((flag & FWRITE) == 0) return EBADF; } /* ensure that the pseudo-disk is initialized for these */ switch (cmd) { #ifdef DIOCGSECTORSIZE case DIOCGSECTORSIZE: case DIOCGMEDIASIZE: #endif case DIOCGDINFO: case DIOCSDINFO: case DIOCWDINFO: case DIOCGPART: case DIOCWLABEL: case DIOCGDEFLABEL: case DIOCAWEDGE: case DIOCDWEDGE: case DIOCLWEDGES: case DIOCCACHESYNC: #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDINFO: case ODIOCSDINFO: case ODIOCWDINFO: case ODIOCGDEFLABEL: #endif if ((dksc->sc_flags & DKF_INITED) == 0) return ENXIO; } switch (cmd) { #ifdef DIOCGSECTORSIZE case DIOCGSECTORSIZE: *(u_int *)data = dksc->sc_dkdev.dk_geom.dg_secsize; return 0; case DIOCGMEDIASIZE: *(off_t *)data = (off_t)dksc->sc_dkdev.dk_geom.dg_secsize * dksc->sc_dkdev.dk_geom.dg_nsectors; return 0; #endif case DIOCGDINFO: *(struct disklabel *)data = *(dksc->sc_dkdev.dk_label); break; #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDINFO: newlabel = *(dksc->sc_dkdev.dk_label); if (newlabel.d_npartitions > OLDMAXPARTITIONS) return ENOTTY; memcpy(data, &newlabel, sizeof (struct olddisklabel)); break; #endif case DIOCGPART: ((struct partinfo *)data)->disklab = dksc->sc_dkdev.dk_label; ((struct partinfo *)data)->part = &dksc->sc_dkdev.dk_label->d_partitions[DISKPART(dev)]; break; case DIOCWDINFO: case DIOCSDINFO: #ifdef __HAVE_OLD_DISKLABEL case ODIOCWDINFO: case ODIOCSDINFO: #endif #ifdef __HAVE_OLD_DISKLABEL if (cmd == ODIOCSDINFO || cmd == ODIOCWDINFO) { memset(&newlabel, 0, sizeof newlabel); memcpy(&newlabel, data, sizeof (struct olddisklabel)); lp = &newlabel; } else #endif lp = (struct disklabel *)data; dk = &dksc->sc_dkdev; mutex_enter(&dk->dk_openlock); dksc->sc_flags |= DKF_LABELLING; error = setdisklabel(dksc->sc_dkdev.dk_label, lp, 0, dksc->sc_dkdev.dk_cpulabel); if (error == 0) { if (cmd == DIOCWDINFO #ifdef __HAVE_OLD_DISKLABEL || cmd == ODIOCWDINFO #endif ) error = writedisklabel(DKLABELDEV(dev), di->di_strategy, dksc->sc_dkdev.dk_label, dksc->sc_dkdev.dk_cpulabel); } dksc->sc_flags &= ~DKF_LABELLING; mutex_exit(&dk->dk_openlock); break; case DIOCWLABEL: if (*(int *)data != 0) dksc->sc_flags |= DKF_WLABEL; else dksc->sc_flags &= ~DKF_WLABEL; break; case DIOCGDEFLABEL: dk_getdefaultlabel(di, dksc, (struct disklabel *)data); break; #ifdef __HAVE_OLD_DISKLABEL case ODIOCGDEFLABEL: dk_getdefaultlabel(di, dksc, &newlabel); if (newlabel.d_npartitions > OLDMAXPARTITIONS) return ENOTTY; memcpy(data, &newlabel, sizeof (struct olddisklabel)); break; #endif case DIOCAWEDGE: { struct dkwedge_info *dkw = (void *)data; if ((flag & FWRITE) == 0) return (EBADF); /* If the ioctl happens here, the parent is us. */ strcpy(dkw->dkw_parent, dksc->sc_dkdev.dk_name); return (dkwedge_add(dkw)); } case DIOCDWEDGE: { struct dkwedge_info *dkw = (void *)data; if ((flag & FWRITE) == 0) return (EBADF); /* If the ioctl happens here, the parent is us. */ strcpy(dkw->dkw_parent, dksc->sc_dkdev.dk_name); return (dkwedge_del(dkw)); } case DIOCLWEDGES: { struct dkwedge_list *dkwl = (void *)data; return (dkwedge_list(&dksc->sc_dkdev, dkwl, l)); } case DIOCGSTRATEGY: { struct disk_strategy *dks = (void *)data; int s; s = splbio(); strlcpy(dks->dks_name, bufq_getstrategyname(dksc->sc_bufq), sizeof(dks->dks_name)); splx(s); dks->dks_paramlen = 0; return 0; } case DIOCSSTRATEGY: { struct disk_strategy *dks = (void *)data; struct bufq_state *new; struct bufq_state *old; int s; if ((flag & FWRITE) == 0) { return EBADF; } if (dks->dks_param != NULL) { return EINVAL; } dks->dks_name[sizeof(dks->dks_name) - 1] = 0; /* ensure term */ error = bufq_alloc(&new, dks->dks_name, BUFQ_EXACT|BUFQ_SORT_RAWBLOCK); if (error) { return error; } s = splbio(); old = dksc->sc_bufq; bufq_move(new, old); dksc->sc_bufq = new; splx(s); bufq_free(old); return 0; } default: error = ENOTTY; } return error; }
static void cgd_cipher(struct cgd_softc *cs, void *dstv, void *srcv, size_t len, daddr_t blkno, size_t secsize, int dir) { char *dst = dstv; char *src = srcv; cfunc_cipher *cipher = cs->sc_cfuncs->cf_cipher; struct uio dstuio; struct uio srcuio; struct iovec dstiov[2]; struct iovec srciov[2]; size_t blocksize = cs->sc_cdata.cf_blocksize; size_t todo; char sink[CGD_MAXBLOCKSIZE]; char zero_iv[CGD_MAXBLOCKSIZE]; char blkno_buf[CGD_MAXBLOCKSIZE]; DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir)); DIAGCONDPANIC(len % blocksize != 0, ("cgd_cipher: len %% blocksize != 0")); /* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */ DIAGCONDPANIC(sizeof(daddr_t) > blocksize, ("cgd_cipher: sizeof(daddr_t) > blocksize")); memset(zero_iv, 0x0, blocksize); dstuio.uio_iov = dstiov; dstuio.uio_iovcnt = 2; srcuio.uio_iov = srciov; srcuio.uio_iovcnt = 2; dstiov[0].iov_base = sink; dstiov[0].iov_len = blocksize; srciov[0].iov_base = blkno_buf; srciov[0].iov_len = blocksize; for (; len > 0; len -= todo) { todo = MIN(len, secsize); dstiov[1].iov_base = dst; srciov[1].iov_base = src; dstiov[1].iov_len = todo; srciov[1].iov_len = todo; memset(blkno_buf, 0x0, blocksize); blkno2blkno_buf(blkno_buf, blkno); if (dir == CGD_CIPHER_DECRYPT) { dstuio.uio_iovcnt = 1; srcuio.uio_iovcnt = 1; IFDEBUG(CGDB_CRYPTO, hexprint("step 0: blkno_buf", blkno_buf, blocksize)); cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, CGD_CIPHER_ENCRYPT); memcpy(blkno_buf, sink, blocksize); dstuio.uio_iovcnt = 2; srcuio.uio_iovcnt = 2; } IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf", blkno_buf, blocksize)); cipher(cs->sc_cdata.cf_priv, &dstuio, &srcuio, zero_iv, dir); IFDEBUG(CGDB_CRYPTO, hexprint("step 2: sink", sink, blocksize)); dst += todo; src += todo; blkno++; } }