int spec_strategy(void *v) { struct vop_strategy_args *ap = v; struct buf *bp = ap->a_bp; int maj = major(bp->b_dev); if (LIST_FIRST(&bp->b_dep) != NULL) buf_start(bp); (*bdevsw[maj].d_strategy)(bp); return (0); }
/* * Convert a vnode strategy call into a device strategy call. Vnode strategy * calls are not limited to device DMA limits so we have to deal with the * case. * * spec_strategy(struct vnode *a_vp, struct bio *a_bio) */ static int devfs_spec_strategy(struct vop_strategy_args *ap) { struct bio *bio = ap->a_bio; struct buf *bp = bio->bio_buf; struct buf *nbp; struct vnode *vp; struct mount *mp; int chunksize; int maxiosize; if (bp->b_cmd != BUF_CMD_READ && LIST_FIRST(&bp->b_dep) != NULL) buf_start(bp); /* * Collect statistics on synchronous and asynchronous read * and write counts for disks that have associated filesystems. */ vp = ap->a_vp; KKASSERT(vp->v_rdev != NULL); /* XXX */ if (vn_isdisk(vp, NULL) && (mp = vp->v_rdev->si_mountpoint) != NULL) { if (bp->b_cmd == BUF_CMD_READ) { if (bp->b_flags & BIO_SYNC) mp->mnt_stat.f_syncreads++; else mp->mnt_stat.f_asyncreads++; } else { if (bp->b_flags & BIO_SYNC) mp->mnt_stat.f_syncwrites++; else mp->mnt_stat.f_asyncwrites++; } } /* * Device iosize limitations only apply to read and write. Shortcut * the I/O if it fits. */ if ((maxiosize = vp->v_rdev->si_iosize_max) == 0) { devfs_debug(DEVFS_DEBUG_DEBUG, "%s: si_iosize_max not set!\n", dev_dname(vp->v_rdev)); maxiosize = MAXPHYS; } #if SPEC_CHAIN_DEBUG & 2 maxiosize = 4096; #endif if (bp->b_bcount <= maxiosize || (bp->b_cmd != BUF_CMD_READ && bp->b_cmd != BUF_CMD_WRITE)) { dev_dstrategy_chain(vp->v_rdev, bio); return (0); } /* * Clone the buffer and set up an I/O chain to chunk up the I/O. */ nbp = kmalloc(sizeof(*bp), M_DEVBUF, M_INTWAIT|M_ZERO); initbufbio(nbp); buf_dep_init(nbp); BUF_LOCK(nbp, LK_EXCLUSIVE); BUF_KERNPROC(nbp); nbp->b_vp = vp; nbp->b_flags = B_PAGING | (bp->b_flags & B_BNOCLIP); nbp->b_data = bp->b_data; nbp->b_bio1.bio_done = devfs_spec_strategy_done; nbp->b_bio1.bio_offset = bio->bio_offset; nbp->b_bio1.bio_caller_info1.ptr = bio; /* * Start the first transfer */ if (vn_isdisk(vp, NULL)) chunksize = vp->v_rdev->si_bsize_phys; else chunksize = DEV_BSIZE; chunksize = maxiosize / chunksize * chunksize; #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy chained I/O chunksize=%d\n", chunksize); #endif nbp->b_cmd = bp->b_cmd; nbp->b_bcount = chunksize; nbp->b_bufsize = chunksize; /* used to detect a short I/O */ nbp->b_bio1.bio_caller_info2.index = chunksize; #if SPEC_CHAIN_DEBUG & 1 devfs_debug(DEVFS_DEBUG_DEBUG, "spec_strategy: chain %p offset %d/%d bcount %d\n", bp, 0, bp->b_bcount, nbp->b_bcount); #endif dev_dstrategy(vp->v_rdev, &nbp->b_bio1); if (DEVFS_NODE(vp)) { nanotime(&DEVFS_NODE(vp)->atime); nanotime(&DEVFS_NODE(vp)->mtime); } return (0); }