static int cgd_diskstart(device_t dev, struct buf *bp) { struct cgd_softc *cs = device_private(dev); struct dk_softc *dksc = &cs->sc_dksc; struct buf *nbp; void * addr; void * newaddr; daddr_t bn; struct vnode *vp; DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp)); bn = bp->b_rawblkno; /* * We attempt to allocate all of our resources up front, so that * we can fail quickly if they are unavailable. */ nbp = getiobuf(cs->sc_tvn, false); if (nbp == NULL) return EAGAIN; /* * If we are writing, then we need to encrypt the outgoing * block into a new block of memory. */ newaddr = addr = bp->b_data; if ((bp->b_flags & B_READ) == 0) { newaddr = cgd_getdata(dksc, bp->b_bcount); if (!newaddr) { putiobuf(nbp); return EAGAIN; } cgd_cipher(cs, newaddr, addr, bp->b_bcount, bn, DEV_BSIZE, CGD_CIPHER_ENCRYPT); } nbp->b_data = newaddr; nbp->b_flags = bp->b_flags; nbp->b_oflags = bp->b_oflags; nbp->b_cflags = bp->b_cflags; nbp->b_iodone = cgdiodone; nbp->b_proc = bp->b_proc; nbp->b_blkno = bn; nbp->b_bcount = bp->b_bcount; nbp->b_private = bp; BIO_COPYPRIO(nbp, bp); if ((nbp->b_flags & B_READ) == 0) { vp = nbp->b_vp; mutex_enter(vp->v_interlock); vp->v_numoutput++; mutex_exit(vp->v_interlock); } VOP_STRATEGY(cs->sc_tvn, nbp); return 0; }
static void vndthread(void *arg) { struct vnd_softc *vnd = arg; int s; /* Determine whether we can *use* VOP_BMAP and VOP_STRATEGY to * directly access the backing vnode. If we can, use these two * operations to avoid messing with the local buffer cache. * Otherwise fall back to regular VOP_READ/VOP_WRITE operations * which are guaranteed to work with any file system. */ if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0 && ! vnode_has_strategy(vnd)) vnd->sc_flags |= VNF_USE_VN_RDWR; #ifdef DEBUG if (vnddebug & VDB_INIT) printf("vndthread: vp %p, %s\n", vnd->sc_vp, (vnd->sc_flags & VNF_USE_VN_RDWR) == 0 ? "using bmap/strategy operations" : "using read/write operations"); #endif s = splbio(); vnd->sc_flags |= VNF_KTHREAD; wakeup(&vnd->sc_kthread); /* * Dequeue requests and serve them depending on the available * vnode operations. */ while ((vnd->sc_flags & VNF_VUNCONF) == 0) { struct vndxfer *vnx; int flags; struct buf *obp; struct buf *bp; obp = bufq_get(vnd->sc_tab); if (obp == NULL) { tsleep(&vnd->sc_tab, PRIBIO, "vndbp", 0); continue; }; if ((vnd->sc_flags & VNF_USE_VN_RDWR)) { KASSERT(vnd->sc_pending > 0 && vnd->sc_pending <= VND_MAXPENDING(vnd)); if (vnd->sc_pending-- == VND_MAXPENDING(vnd)) wakeup(&vnd->sc_pending); } splx(s); flags = obp->b_flags; #ifdef DEBUG if (vnddebug & VDB_FOLLOW) printf("vndthread(%p)\n", obp); #endif if (vnd->sc_vp->v_mount == NULL) { obp->b_error = ENXIO; goto done; } #ifdef VND_COMPRESSION /* handle a compressed read */ if ((flags & B_READ) != 0 && (vnd->sc_flags & VNF_COMP)) { off_t bn; /* Convert to a byte offset within the file. */ bn = obp->b_rawblkno * vnd->sc_dkdev.dk_label->d_secsize; compstrategy(obp, bn); goto done; } #endif /* VND_COMPRESSION */ /* * Allocate a header for this transfer and link it to the * buffer */ s = splbio(); vnx = VND_GETXFER(vnd); splx(s); vnx->vx_vnd = vnd; s = splbio(); while (vnd->sc_active >= vnd->sc_maxactive) { tsleep(&vnd->sc_tab, PRIBIO, "vndac", 0); } vnd->sc_active++; splx(s); /* Instrumentation. */ disk_busy(&vnd->sc_dkdev); bp = &vnx->vx_buf; buf_init(bp); bp->b_flags = (obp->b_flags & B_READ); bp->b_oflags = obp->b_oflags; bp->b_cflags = obp->b_cflags; bp->b_iodone = vndiodone; bp->b_private = obp; bp->b_vp = vnd->sc_vp; bp->b_objlock = bp->b_vp->v_interlock; bp->b_data = obp->b_data; bp->b_bcount = obp->b_bcount; BIO_COPYPRIO(bp, obp); /* Handle the request using the appropriate operations. */ if ((vnd->sc_flags & VNF_USE_VN_RDWR) == 0) handle_with_strategy(vnd, obp, bp); else handle_with_rdwr(vnd, obp, bp); s = splbio(); continue; done: biodone(obp); s = splbio(); } vnd->sc_flags &= (~VNF_KTHREAD | VNF_VUNCONF); wakeup(&vnd->sc_kthread); splx(s); kthread_exit(0); }