static int vdev_disk_io_start(zio_t *zio) { vdev_t *vd = zio->io_vd; vdev_disk_t *dvd = vd->vdev_tsd; struct buf *bp; vfs_context_t context; int flags, error = 0; if (zio->io_type == ZIO_TYPE_IOCTL) { zio_vdev_io_bypass(zio); /* XXPOLICY */ if (vdev_is_dead(vd)) { zio->io_error = ENXIO; //zio_next_stage_async(zio); return (ZIO_PIPELINE_CONTINUE); //return; } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: if (zfs_nocacheflush) break; if (vd->vdev_nowritecache) { zio->io_error = SET_ERROR(ENOTSUP); break; } context = vfs_context_create((vfs_context_t)0); error = VNOP_IOCTL(dvd->vd_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); (void) vfs_context_rele(context); if (error == 0) vdev_disk_ioctl_done(zio, error); else error = ENOTSUP; if (error == 0) { /* * The ioctl will be done asychronously, * and will call vdev_disk_ioctl_done() * upon completion. */ return ZIO_PIPELINE_STOP; } else if (error == ENOTSUP || error == ENOTTY) { /* * If we get ENOTSUP or ENOTTY, we know that * no future attempts will ever succeed. * In this case we set a persistent bit so * that we don't bother with the ioctl in the * future. */ vd->vdev_nowritecache = B_TRUE; } zio->io_error = error; break; default: zio->io_error = SET_ERROR(ENOTSUP); } //zio_next_stage_async(zio); return (ZIO_PIPELINE_CONTINUE); } if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio) == 0) return (ZIO_PIPELINE_STOP); // return; if ((zio = vdev_queue_io(zio)) == NULL) return (ZIO_PIPELINE_CONTINUE); // return; flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); //flags |= B_NOCACHE; if (zio->io_flags & ZIO_FLAG_FAILFAST) flags |= B_FAILFAST; /* * Check the state of this device to see if it has been offlined or * is in an error state. If the device was offlined or closed, * dvd will be NULL and buf_alloc below will fail */ //error = vdev_is_dead(vd) ? ENXIO : vdev_error_inject(vd, zio); if (vdev_is_dead(vd)) { error = ENXIO; } if (error) { zio->io_error = error; //zio_next_stage_async(zio); return (ZIO_PIPELINE_CONTINUE); } bp = buf_alloc(dvd->vd_devvp); ASSERT(bp != NULL); ASSERT(zio->io_data != NULL); ASSERT(zio->io_size != 0); buf_setflags(bp, flags); buf_setcount(bp, zio->io_size); buf_setdataptr(bp, (uintptr_t)zio->io_data); if (dvd->vd_ashift) { buf_setlblkno(bp, zio->io_offset>>dvd->vd_ashift); buf_setblkno(bp, zio->io_offset>>dvd->vd_ashift); } else {
static void vdev_disk_io_start(zio_t *zio) { vdev_t *vd = zio->io_vd; vdev_disk_t *dvd = vd->vdev_tsd; struct buf *bp; vfs_context_t context; int flags, error = 0; /* * If the vdev is closed, it's likely in the REMOVED or FAULTED state. * Nothing to be done here but return failure. */ if (dvd == NULL || (dvd->vd_offline) || dvd->vd_devvp == NULL) { zio->io_error = ENXIO; zio_interrupt(zio); return; } switch (zio->io_type) { case ZIO_TYPE_IOCTL: if (!vdev_readable(vd)) { zio->io_error = SET_ERROR(ENXIO); zio_interrupt(zio); return; } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: if (zfs_nocacheflush) break; if (vd->vdev_nowritecache) { zio->io_error = SET_ERROR(ENOTSUP); break; } context = vfs_context_create(spl_vfs_context_kernel()); error = VNOP_IOCTL(dvd->vd_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context); (void) vfs_context_rele(context); if (error == 0) vdev_disk_ioctl_done(zio, error); else error = ENOTSUP; if (error == 0) { /* * The ioctl will be done asychronously, * and will call vdev_disk_ioctl_done() * upon completion. */ return; } else if (error == ENOTSUP || error == ENOTTY) { /* * If we get ENOTSUP or ENOTTY, we know that * no future attempts will ever succeed. * In this case we set a persistent bit so * that we don't bother with the ioctl in the * future. */ vd->vdev_nowritecache = B_TRUE; } zio->io_error = error; break; default: zio->io_error = SET_ERROR(ENOTSUP); } /* io_cmd */ zio_execute(zio); return; case ZIO_TYPE_WRITE: if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) flags = B_WRITE; else flags = B_WRITE | B_ASYNC; break; case ZIO_TYPE_READ: if (zio->io_priority == ZIO_PRIORITY_SYNC_READ) flags = B_READ; else flags = B_READ | B_ASYNC; break; default: zio->io_error = SET_ERROR(ENOTSUP); zio_interrupt(zio); return; } /* io_type */ ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); /* Stop OSX from also caching our data */ flags |= B_NOCACHE; if (zio->io_flags & ZIO_FLAG_FAILFAST) flags |= B_FAILFAST; zio->io_target_timestamp = zio_handle_io_delay(zio); bp = buf_alloc(dvd->vd_devvp); ASSERT(bp != NULL); ASSERT(zio->io_data != NULL); ASSERT(zio->io_size != 0); buf_setflags(bp, flags); buf_setcount(bp, zio->io_size); buf_setdataptr(bp, (uintptr_t)zio->io_data); /* * Map offset to blcknumber, based on physical block number. * (512, 4096, ..). If we fail to map, default back to * standard 512. lbtodb() is fixed at 512. */ buf_setblkno(bp, zio->io_offset >> dvd->vd_ashift); buf_setlblkno(bp, zio->io_offset >> dvd->vd_ashift); buf_setsize(bp, zio->io_size); if (buf_setcallback(bp, vdev_disk_io_intr, zio) != 0) panic("vdev_disk_io_start: buf_setcallback failed\n"); if (zio->io_type == ZIO_TYPE_WRITE) { vnode_startwrite(dvd->vd_devvp); } error = VNOP_STRATEGY(bp); ASSERT(error == 0); if (error) { zio->io_error = error; zio_interrupt(zio); return; } }
int physio( void (*f_strategy)(buf_t), buf_t bp, dev_t dev, int flags, u_int (*f_minphys)(buf_t), struct uio *uio, int blocksize) { struct proc *p = current_proc(); int error, i, buf_allocated, todo, iosize; int orig_bflags = 0; int64_t done; error = 0; flags &= B_READ | B_WRITE; buf_allocated = 0; /* * [check user read/write access to the data buffer] * * Check each iov one by one. Note that we know if we're reading or * writing, so we ignore the uio's rw parameter. Also note that if * we're doing a read, that's a *write* to user-space. */ for (i = 0; i < uio->uio_iovcnt; i++) { if (UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { user_addr_t base; user_size_t len; if (uio_getiov(uio, i, &base, &len) || !useracc(base, len, (flags == B_READ) ? B_WRITE : B_READ)) return (EFAULT); } } /* * Make sure we have a buffer, creating one if necessary. */ if (bp == NULL) { bp = buf_alloc((vnode_t)0); buf_allocated = 1; } else orig_bflags = buf_flags(bp); /* * at this point we should have a buffer * that is marked BL_BUSY... we either * acquired it via buf_alloc, or it was * passed into us... if it was passed * in, it needs to already be owned by * the caller (i.e. BL_BUSY is set) */ assert(bp->b_lflags & BL_BUSY); /* * [set up the fixed part of the buffer for a transfer] */ bp->b_dev = dev; bp->b_proc = p; /* * [mark the buffer busy for physical I/O] * (i.e. set B_PHYS (because it's an I/O to user * memory, and B_RAW, because B_RAW is to be * "Set by physio for raw transfers.", in addition * to the read/write flag.) */ buf_setflags(bp, B_PHYS | B_RAW); /* * [while there is data to transfer and no I/O error] * Note that I/O errors are handled with a 'goto' at the bottom * of the 'while' loop. */ while (uio_resid(uio) > 0) { if ( (iosize = uio_curriovlen(uio)) > MAXPHYSIO_WIRED) iosize = MAXPHYSIO_WIRED; /* * make sure we're set to issue a fresh I/O * in the right direction */ buf_reset(bp, flags); /* [set up the buffer for a maximum-sized transfer] */ buf_setblkno(bp, uio_offset(uio) / blocksize); buf_setcount(bp, iosize); buf_setdataptr(bp, (uintptr_t)CAST_DOWN(caddr_t, uio_curriovbase(uio))); /* * [call f_minphys to bound the tranfer size] * and remember the amount of data to transfer, * for later comparison. */ (*f_minphys)(bp); todo = buf_count(bp); /* * [lock the part of the user address space involved * in the transfer] */ if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) { error = vslock(CAST_USER_ADDR_T(buf_dataptr(bp)), (user_size_t)todo); if (error) goto done; } /* [call f_strategy to start the transfer] */ (*f_strategy)(bp); /* [wait for the transfer to complete] */ error = (int)buf_biowait(bp); /* * [unlock the part of the address space previously * locked] */ if(UIO_SEG_IS_USER_SPACE(uio->uio_segflg)) vsunlock(CAST_USER_ADDR_T(buf_dataptr(bp)), (user_size_t)todo, (flags & B_READ)); /* * [deduct the transfer size from the total number * of data to transfer] */ done = buf_count(bp) - buf_resid(bp); uio_update(uio, done); /* * Now, check for an error. * Also, handle weird end-of-disk semantics. */ if (error || done < todo) goto done; } done: if (buf_allocated) buf_free(bp); else buf_setflags(bp, orig_bflags); return (error); }