/* * Core device strategy call, used to issue I/O on a device. There are * two versions, a non-chained version and a chained version. The chained * version reuses a BIO set up by vn_strategy(). The only difference is * that, for now, we do not push a new tracking structure when chaining * from vn_strategy. XXX this will ultimately have to change. */ void dev_dstrategy(cdev_t dev, struct bio *bio) { struct dev_strategy_args ap; struct bio_track *track; int needmplock = dev_needmplock(dev); ap.a_head.a_desc = &dev_strategy_desc; ap.a_head.a_dev = dev; ap.a_bio = bio; KKASSERT(bio->bio_track == NULL); KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE); if (bio->bio_buf->b_cmd == BUF_CMD_READ) track = &dev->si_track_read; else track = &dev->si_track_write; bio_track_ref(track); bio->bio_track = track; if (dsched_is_clear_buf_priv(bio->bio_buf)) dsched_new_buf(bio->bio_buf); KKASSERT((bio->bio_flags & BIO_DONE) == 0); if (needmplock) { get_mplock(); ++mplock_strategies; } else { ++mpsafe_strategies; } (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); }
int dev_dwrite(cdev_t dev, struct uio *uio, int ioflag, struct file *fp) { struct dev_write_args ap; int needmplock = dev_needmplock(dev); int error; dev->si_lastwrite = time_uptime; ap.a_head.a_desc = &dev_write_desc; ap.a_head.a_dev = dev; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_fp = fp; if (needmplock) { get_mplock(); ++mplock_writes; } else { ++mpsafe_writes; } error = dev->si_ops->d_write(&ap); if (needmplock) rel_mplock(); return (error); }
int dev_dread(cdev_t dev, struct uio *uio, int ioflag, struct file *fp) { struct dev_read_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_read_desc; ap.a_head.a_dev = dev; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_fp = fp; if (needmplock) { get_mplock(); ++mplock_reads; } else { ++mpsafe_reads; } error = dev->si_ops->d_read(&ap); if (needmplock) rel_mplock(); if (error == 0) dev->si_lastread = time_uptime; return (error); }
int dev_dclone(cdev_t dev) { struct dev_clone_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_clone_desc; ap.a_head.a_dev = dev; if (needmplock) get_mplock(); error = dev->si_ops->d_clone(&ap); if (needmplock) rel_mplock(); return (error); }
void dev_dstrategy_chain(cdev_t dev, struct bio *bio) { struct dev_strategy_args ap; int needmplock = dev_needmplock(dev); ap.a_head.a_desc = &dev_strategy_desc; ap.a_head.a_dev = dev; ap.a_bio = bio; KKASSERT(bio->bio_track != NULL); KKASSERT((bio->bio_flags & BIO_DONE) == 0); if (needmplock) get_mplock(); (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); }
int dev_dclose(cdev_t dev, int fflag, int devtype) { struct dev_close_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_close_desc; ap.a_head.a_dev = dev; ap.a_fflag = fflag; ap.a_devtype = devtype; if (needmplock) get_mplock(); error = dev->si_ops->d_close(&ap); if (needmplock) rel_mplock(); return (error); }
/************************************************************************ * GENERAL DEVICE API FUNCTIONS * ************************************************************************ * * The MPSAFEness of these depends on dev->si_ops->head.flags */ int dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred) { struct dev_open_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_open_desc; ap.a_head.a_dev = dev; ap.a_oflags = oflags; ap.a_devtype = devtype; ap.a_cred = cred; if (needmplock) get_mplock(); error = dev->si_ops->d_open(&ap); if (needmplock) rel_mplock(); return (error); }
int dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot) { struct dev_mmap_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_mmap_desc; ap.a_head.a_dev = dev; ap.a_offset = offset; ap.a_nprot = nprot; if (needmplock) get_mplock(); error = dev->si_ops->d_mmap(&ap); if (needmplock) rel_mplock(); if (error == 0) return(ap.a_result); return(-1); }
int dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred, struct sysmsg *msg) { struct dev_ioctl_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_ioctl_desc; ap.a_head.a_dev = dev; ap.a_cmd = cmd; ap.a_data = data; ap.a_fflag = fflag; ap.a_cred = cred; ap.a_sysmsg = msg; if (needmplock) get_mplock(); error = dev->si_ops->d_ioctl(&ap); if (needmplock) rel_mplock(); return (error); }
int dev_dmmap_single(cdev_t dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct dev_mmap_single_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_mmap_single_desc; ap.a_head.a_dev = dev; ap.a_offset = offset; ap.a_size = size; ap.a_object = object; ap.a_nprot = nprot; if (needmplock) get_mplock(); error = dev->si_ops->d_mmap_single(&ap); if (needmplock) rel_mplock(); return(error); }
get_mplock(); (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); } /* * note: the disk layer is expected to set count, blkno, and secsize before * forwarding the message. */ int dev_ddump(cdev_t dev, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct dev_dump_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_dump_desc; ap.a_head.a_dev = dev; ap.a_count = 0; ap.a_blkno = 0; ap.a_secsize = 0; ap.a_virtual = virtual; ap.a_physical = physical; ap.a_offset = offset; ap.a_length = length; if (needmplock) get_mplock(); error = dev->si_ops->d_dump(&ap);