static int zvol_read(struct bio *bio) { zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data; uint64_t offset = BIO_BI_SECTOR(bio) << 9; uint64_t size = BIO_BI_SIZE(bio); int error; rl_t *rl; uio_t uio; if (size == 0) return (0); uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; uio.uio_skip = BIO_BI_SKIP(bio); uio.uio_resid = size; uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); uio.uio_loffset = offset; uio.uio_limit = MAXOFFSET_T; uio.uio_segflg = UIO_BVEC; rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER); error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, &uio, size); zfs_range_unlock(rl); /* convert checksum errors into IO errors */ if (error == ECKSUM) error = SET_ERROR(EIO); return (error); }
static int zvol_write(struct bio *bio) { zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data; uint64_t offset = BIO_BI_SECTOR(bio) << 9; uint64_t size = BIO_BI_SIZE(bio); int error = 0; dmu_tx_t *tx; rl_t *rl; uio_t uio; if (bio->bi_rw & VDEV_REQ_FLUSH) zil_commit(zv->zv_zilog, ZVOL_OBJ); /* * Some requests are just for flush and nothing else. */ if (size == 0) goto out; uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; uio.uio_skip = BIO_BI_SKIP(bio); uio.uio_resid = size; uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); uio.uio_loffset = offset; uio.uio_limit = MAXOFFSET_T; uio.uio_segflg = UIO_BVEC; rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER); tx = dmu_tx_create(zv->zv_objset); dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size); /* This will only fail for ENOSPC */ error = dmu_tx_assign(tx, TXG_WAIT); if (error) { dmu_tx_abort(tx); zfs_range_unlock(rl); goto out; } error = dmu_write_uio(zv->zv_objset, ZVOL_OBJ, &uio, size, tx); if (error == 0) zvol_log_write(zv, tx, offset, size, !!(bio->bi_rw & VDEV_REQ_FUA)); dmu_tx_commit(tx); zfs_range_unlock(rl); if ((bio->bi_rw & VDEV_REQ_FUA) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) zil_commit(zv->zv_zilog, ZVOL_OBJ); out: return (error); }
static MAKE_REQUEST_FN_RET zvol_request(struct request_queue *q, struct bio *bio) { uio_t uio; zvol_state_t *zv = q->queuedata; fstrans_cookie_t cookie = spl_fstrans_mark(); int rw = bio_data_dir(bio); #ifdef HAVE_GENERIC_IO_ACCT unsigned long start = jiffies; #endif int error = 0; uio.uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)]; uio.uio_skip = BIO_BI_SKIP(bio); uio.uio_resid = BIO_BI_SIZE(bio); uio.uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio); uio.uio_loffset = BIO_BI_SECTOR(bio) << 9; uio.uio_limit = MAXOFFSET_T; uio.uio_segflg = UIO_BVEC; if (bio_has_data(bio) && uio.uio_loffset + uio.uio_resid > zv->zv_volsize) { printk(KERN_INFO "%s: bad access: offset=%llu, size=%lu\n", zv->zv_disk->disk_name, (long long unsigned)uio.uio_loffset, (long unsigned)uio.uio_resid); error = SET_ERROR(EIO); goto out1; } generic_start_io_acct(rw, bio_sectors(bio), &zv->zv_disk->part0); if (rw == WRITE) { if (unlikely(zv->zv_flags & ZVOL_RDONLY)) { error = SET_ERROR(EROFS); goto out2; } if (bio_is_discard(bio) || bio_is_secure_erase(bio)) { error = zvol_discard(bio); goto out2; } /* * Some requests are just for flush and nothing else. */ if (uio.uio_resid == 0) { if (bio_is_flush(bio)) zil_commit(zv->zv_zilog, ZVOL_OBJ); goto out2; } error = zvol_write(zv, &uio, bio_is_flush(bio) || bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS); } else error = zvol_read(zv, &uio); out2: generic_end_io_acct(rw, &zv->zv_disk->part0, start); out1: BIO_END_IO(bio, -error); spl_fstrans_unmark(cookie); #ifdef HAVE_MAKE_REQUEST_FN_RET_INT return (0); #elif defined(HAVE_MAKE_REQUEST_FN_RET_QC) return (BLK_QC_T_NONE); #endif }