static void vnstrategy(struct buf *bp) { struct vn_softc *vn; int error = 0; long sz; /* in sc_secsize chunks */ daddr64_t blk_num; struct vnode * shadow_vp = NULL; struct vnode * vp = NULL; struct vfs_context context; vn = vn_table + vnunit(buf_device(bp)); if ((vn->sc_flags & VNF_INITED) == 0) { error = ENXIO; goto done; } context.vc_thread = current_thread(); context.vc_ucred = vn->sc_cred; buf_setresid(bp, buf_count(bp)); /* * Check for required alignment. Transfers must be a valid * multiple of the sector size. */ blk_num = buf_blkno(bp); if (buf_count(bp) % vn->sc_secsize != 0) { error = EINVAL; goto done; } sz = howmany(buf_count(bp), vn->sc_secsize); /* * If out of bounds return an error. If at the EOF point, * simply read or write less. */ if (blk_num >= 0 && (u_int64_t)blk_num >= vn->sc_size) { if (blk_num > 0 && (u_int64_t)blk_num > vn->sc_size) { error = EINVAL; } goto done; } /* * If the request crosses EOF, truncate the request. */ if ((blk_num + sz) > 0 && ((u_int64_t)(blk_num + sz)) > vn->sc_size) { buf_setcount(bp, (vn->sc_size - blk_num) * vn->sc_secsize); buf_setresid(bp, buf_count(bp)); } vp = vn->sc_vp; if (vp == NULL) { error = ENXIO; goto done; } error = vnode_getwithvid(vp, vn->sc_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnclear(vn, &context); goto done; } shadow_vp = vn->sc_shadow_vp; if (shadow_vp != NULL) { error = vnode_getwithvid(shadow_vp, vn->sc_shadow_vid); if (error != 0) { /* the vnode is no longer available, abort */ error = ENXIO; vnode_put(vn->sc_vp); vnclear(vn, &context); goto done; } } error = vn_readwrite_io(vn, bp, &context); vnode_put(vp); if (shadow_vp != NULL) { vnode_put(shadow_vp); } done: if (error) { buf_seterror(bp, error); } buf_biodone(bp); return; }
static void mdevstrategy(struct buf *bp) { unsigned int left, lop, csize; vm_offset_t vaddr, blkoff; int devid; addr64_t paddr, fvaddr; ppnum_t pp; devid = minor(buf_device(bp)); /* Get minor device number */ if ((mdev[devid].mdFlags & mdInited) == 0) { /* Have we actually been defined yet? */ buf_seterror(bp, ENXIO); buf_biodone(bp); return; } buf_setresid(bp, buf_count(bp)); /* Set byte count */ blkoff = buf_blkno(bp) * mdev[devid].mdSecsize; /* Get offset into file */ /* * Note that reading past end is an error, but reading at end is an EOF. For these * we just return with resid == count. */ if (blkoff >= (mdev[devid].mdSize << 12)) { /* Are they trying to read/write at/after end? */ if(blkoff != (mdev[devid].mdSize << 12)) { /* Are we trying to read after EOF? */ buf_seterror(bp, EINVAL); /* Yeah, this is an error */ } buf_biodone(bp); /* Return */ return; } if ((blkoff + buf_count(bp)) > (mdev[devid].mdSize << 12)) { /* Will this read go past end? */ buf_setcount(bp, ((mdev[devid].mdSize << 12) - blkoff)); /* Yes, trim to max */ } /* * make sure the buffer's data area is * accessible */ if (buf_map(bp, (caddr_t *)&vaddr)) panic("ramstrategy: buf_map failed\n"); fvaddr = (mdev[devid].mdBase << 12) + blkoff; /* Point to offset into ram disk */ if (buf_flags(bp) & B_READ) { /* Is this a read? */ if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */ bcopy((void *)((uintptr_t)fvaddr), (void *)vaddr, (size_t)buf_count(bp)); /* This is virtual, just get the data */ } else { left = buf_count(bp); /* Init the amount left to copy */ while(left) { /* Go until it is all copied */ lop = min((4096 - (vaddr & 4095)), (4096 - (fvaddr & 4095))); /* Get smallest amount left on sink and source */ csize = min(lop, left); /* Don't move more than we need to */ pp = pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)vaddr)); /* Get the sink physical address */ if(!pp) { /* Not found, what gives? */ panic("mdevstrategy: sink address %016llX not mapped\n", (addr64_t)((uintptr_t)vaddr)); } paddr = (addr64_t)(((addr64_t)pp << 12) | (addr64_t)(vaddr & 4095)); /* Get actual address */ bcopy_phys(fvaddr, paddr, csize); /* Copy this on in */ mapping_set_mod(paddr >> 12); /* Make sure we know that it is modified */ left = left - csize; /* Calculate what is left */ vaddr = vaddr + csize; /* Move to next sink address */ fvaddr = fvaddr + csize; /* Bump to next physical address */ } } } else { /* This is a write */ if(!(mdev[devid].mdFlags & mdPhys)) { /* Physical mapped disk? */
int spec_strategy(struct vnop_strategy_args *ap) { buf_t bp; int bflags; int policy; dev_t bdev; uthread_t ut; size_t devbsdunit; mount_t mp; bp = ap->a_bp; bdev = buf_device(bp); bflags = buf_flags(bp); mp = buf_vnode(bp)->v_mount; if (kdebug_enable) { int code = 0; if (bflags & B_READ) code |= DKIO_READ; if (bflags & B_ASYNC) code |= DKIO_ASYNC; if (bflags & B_META) code |= DKIO_META; else if (bflags & B_PAGEIO) code |= DKIO_PAGING; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, bp, bdev, (int)buf_blkno(bp), buf_count(bp), 0); } if (((bflags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && mp && (mp->mnt_kern_flag & MNTK_ROOTDEV)) hard_throttle_on_root = 1; if (mp != NULL) devbsdunit = mp->mnt_devbsdunit; else devbsdunit = LOWPRI_MAX_NUM_DEV - 1; throttle_info_update(&_throttle_io_info[devbsdunit], bflags); if ((policy = throttle_get_io_policy(&ut)) == IOPOL_THROTTLE) { bp->b_flags |= B_THROTTLED_IO; } if ((bflags & B_READ) == 0) { microuptime(&_throttle_io_info[devbsdunit].last_IO_timestamp); if (mp) { INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size); } } else if (mp) { INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size); } (*bdevsw[major(bdev)].d_strategy)(bp); return (0); }