/* * It appears on export/reboot, iokit can hold a lock, then call our * termination handler, and we end up locking-against-ourselves inside * IOKit. We are then forced to make the vnode_close() call be async. */ static void vdev_disk_close_thread(void *arg) { struct vnode *vp = arg; (void) vnode_close(vp, 0, spl_vfs_context_kernel()); thread_exit(); }
int kern_file_close(struct cfs_kern_file *fp) { vnode_close(fp->f_vp, fp->f_flags, fp->f_ctxt); vfs_context_rele(fp->f_ctxt); _FREE(fp, M_TEMP); return 0; }
void VNodeDiskDeviceClass::closeVNode() { if (m_vnode != NULL) { IOLog("Closing the file node\n"); vfs_context_t vfsContext = vfs_context_create((vfs_context_t) 0); vnode_close(m_vnode, 0, vfsContext); vfs_context_rele(vfsContext); m_vnode = NULL; } }
int VOP_CLOSE(struct vnode *vp, int flag, int count, offset_t off, void *cr, void *k) { vfs_context_t vctx; int error; vctx = vfs_context_create((vfs_context_t)0); error = vnode_close(vp, flag & FWRITE, vctx); (void) vfs_context_rele(vctx); return (error); }
static int sd_closelog(vfs_context_t ctx) { int error = 0; if (sd_logvp != NULLVP) { VNOP_FSYNC(sd_logvp, MNT_WAIT, ctx); error = vnode_close(sd_logvp, FWRITE, ctx); } return error; }
void kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref, off_t write_offset, caddr_t addr, vm_size_t write_length, off_t discard_offset, off_t discard_end) { int error; kprintf("kern_close_file_for_direct_io\n"); if (!ref) return; if (ref->vp) { int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1; void * p2; if (ref->vp->v_type == VREG) { p1 = &ref->device; p2 = kernproc; do_ioctl = &file_ioctl; } else { /* Partition. */ p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); if (addr && write_length) { (void) kern_write_file(ref, write_offset, addr, write_length); } if (discard_offset && discard_end && !ref->pinned) { (void) kern_ioctl_file_extents(ref, DKIOCUNMAP, discard_offset, discard_end); } error = vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; kprintf("vnode_close(%d)\n", error); } vfs_context_rele(ref->ctx); ref->ctx = NULL; kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); }
void vm_swapfile_close(uint64_t path_addr, vnode_t vp) { struct nameidata nd; vfs_context_t context = vfs_context_current(); int error = 0; vnode_getwithref(vp); vnode_close(vp, 0, context); NDINIT(&nd, DELETE, OP_UNLINK, AUDITVNPATH1, UIO_SYSSPACE, path_addr, context); error = unlink1(context, &nd, 0); }
int osi_UFSClose(struct osi_file *afile) { AFS_STATCNT(osi_Close); if (afile->vnode) { #ifdef AFS_DARWIN80_ENV vnode_close(afile->vnode, O_RDWR, afs_osi_ctxtp); #else AFS_RELE(afile->vnode); #endif } osi_FreeSmallSpace(afile); return 0; }
bool VNodeDiskDeviceClass::setupVNode() { int vapError = -1; struct vnode_attr vap; if (m_vnode != NULL) return true; vfs_context_t vfsContext = vfs_context_create((vfs_context_t) 0); int vnodeError = vnode_open(m_filePath->getCStringNoCopy(), (FREAD | FWRITE), 0, 0, &m_vnode, vfsContext); if (vnodeError || m_vnode == NULL) { IOLog("Error when opening file %s: error %d\n", m_filePath->getCStringNoCopy(), vnodeError); goto failure; } if (!vnode_isreg(m_vnode)) { IOLog("Error when opening file %s: not a regular file\n", m_filePath->getCStringNoCopy()); vnode_close(m_vnode, (FREAD | FWRITE), vfsContext); goto failure; } VATTR_INIT(&vap); VATTR_WANTED(&vap, va_data_size); vapError = vnode_getattr(m_vnode, &vap, vfsContext); if (vapError) { IOLog("Error when retrieving vnode's attributes with error code %d\n", vapError); goto failure; } if (vap.va_data_size < m_blockSize * m_blockNum) { IOLog("Error file %s is too small, actual size is %llu\n", m_filePath->getCStringNoCopy(), vap.va_data_size); goto failure; } vfs_context_rele(vfsContext); return true; failure: vfs_context_rele(vfsContext); return false; }
void vm_swapfile_close(uint64_t path_addr, vnode_t vp) { vfs_context_t context = vfs_context_current(); int error; vnode_getwithref(vp); vnode_close(vp, 0, context); error = unlink1(context, NULLVP, CAST_USER_ADDR_T(path_addr), UIO_SYSSPACE, 0); #if DEVELOPMENT || DEBUG if (error) printf("%s : unlink of %s failed with error %d", __FUNCTION__, (char *)path_addr, error); #endif }
IOReturn FileNVRAM::write_buffer(char* aBuffer, vfs_context_t aCtx) { IOReturn error = 0; int length = (int)strlen(aBuffer); struct vnode * vp; if (aCtx) { if ((error = vnode_open(FILE_NVRAM_PATH, (O_TRUNC | O_CREAT | FWRITE | O_NOFOLLOW), S_IRUSR | S_IWUSR, VNODE_LOOKUP_NOFOLLOW, &vp, aCtx))) { printf("FileNVRAM.kext: Error, vnode_open(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); return error; } else { if ((error = vnode_isreg(vp)) == VREG) { if ((error = vn_rdwr(UIO_WRITE, vp, aBuffer, length, 0, UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, vfs_context_ucred(aCtx), (int *) 0, vfs_context_proc(aCtx)))) { printf("FileNVRAM.kext: Error, vn_rdwr(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } if ((error = vnode_close(vp, FWASWRITTEN, aCtx))) { printf("FileNVRAM.kext: Error, vnode_close(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } else { printf("FileNVRAM.kext: Error, vnode_isreg(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } } else { printf("FileNVRAM.kext: aCtx == NULL!\n"); error = 0xFFFF; // EINVAL; } return error; }
static void vdev_disk_close(vdev_t *vd) { vdev_disk_t *dvd = vd->vdev_tsd; if (dvd == NULL) return; if (dvd->vd_devvp != NULL) { vfs_context_t context; context = vfs_context_create((vfs_context_t)0); (void) vnode_close(dvd->vd_devvp, spa_mode(vd->vdev_spa), context); (void) vfs_context_rele(context); } kmem_free(dvd, sizeof (vdev_disk_t)); vd->vdev_tsd = NULL; }
void DldIOShadowFile::free() { assert( preemption_enabled() ); if( NULLVP != this->vnode ){ // // write a terminator // UInt64 terminator = DLD_SHADOW_FILE_TERMINATOR; this->write( &terminator, sizeof( terminator ), DLD_IGNR_FSIZE ); // // TO DO - ponder the vfs context retrieval as it seems vfs_context_current might be // an arbitrary one which differs from the open context // vfs_context_t vfs_context; vfs_context = vfs_context_create( NULL ); assert( vfs_context ); if( vfs_context ){ VNOP_FSYNC( this->vnode, MNT_WAIT, vfs_context ); vnode_close( this->vnode, ( this->bytesWritten )? FWASWRITTEN: 0x0, vfs_context ); vfs_context_rele( vfs_context ); }// end if( vfs_context ) } if( this->path ){ assert( this->pathLength ); IOFree( this->path, this->pathLength ); } if( this->rwLock ) IORWLockFree( this->rwLock ); }
void kern_close_file_for_direct_io(struct kern_direct_file_io_ref_t * ref, off_t write_offset, void * addr, size_t write_length, off_t discard_offset, off_t discard_end) { int error; printf("kern_close_file_for_direct_io(%p)\n", ref); if (!ref) return; if (ref->vp) { int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1; void * p2; discard_offset = ((discard_offset + ref->blksize - 1) & ~(((off_t) ref->blksize) - 1)); discard_end = ((discard_end) & ~(((off_t) ref->blksize) - 1)); if (ref->vp->v_type == VREG) { p1 = &ref->device; p2 = kernproc; do_ioctl = &file_ioctl; } else { /* Partition. */ p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); //XXX If unmapping extents then don't also need to unpin; except ... //XXX if file unaligned (HFS 4k / Fusion 128k) then pin is superset and //XXX unmap is subset, so save extra walk over file extents (and the risk //XXX that CF drain starts) vs leaving partial units pinned to SSD //XXX (until whatever was sharing also unmaps). Err on cleaning up fully. boolean_t will_unmap = (!ref->pinned || ref->cf) && (discard_end > discard_offset); boolean_t will_unpin = (ref->pinned && ref->cf /* && !will_unmap */); (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (will_unpin) ? ref->filelength : 0); if (will_unmap) { (void) kern_ioctl_file_extents(ref, DKIOCUNMAP, discard_offset, (ref->cf) ? ref->filelength : discard_end); } if (ref->frozen) { (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); } if (ref->wbcranged) { (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); } if (addr && write_length) { (void) kern_write_file(ref, write_offset, addr, write_length, IO_SKIP_ENCRYPTION); } error = vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; kprintf("vnode_close(%d)\n", error); } ref->ctx = NULL; kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); }
static int vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) { vdev_file_t *vf; #ifdef __APPLE__ struct vnode *vp, *rootdir; struct vnode_attr vattr; vfs_context_t context; #else vnode_t *vp; vattr_t vattr; #endif int error; /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (EINVAL); } vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP); /* * We always open the files from the root of the global zone, even if * we're in a local zone. If the user has gotten to this point, the * administrator has already decided that the pool should be available * to local zone users, so the underlying devices should be as well. */ ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/'); #ifdef __APPLE__ rootdir = getrootdir(); #endif error = vn_openat(vd->vdev_path + 1, UIO_SYSSPACE, spa_mode | FOFFMAX, 0, &vp, 0, 0, rootdir); if (error) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } vf->vf_vnode = vp; #ifdef _KERNEL /* * Make sure it's a regular file. */ #ifdef __APPLE__ if (!vnode_isreg(vp)) { #else if (vp->v_type != VREG) { #endif vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (ENODEV); } #endif /* * Determine the physical size of the file. */ #ifdef __APPLE__ VATTR_INIT(&vattr); VATTR_WANTED(&vattr, va_data_size); context = vfs_context_create((vfs_context_t)0); error = vnode_getattr(vp, &vattr, context); (void) vfs_context_rele(context); if (error || !VATTR_IS_SUPPORTED(&vattr, va_data_size)) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } *psize = vattr.va_data_size; #else vattr.va_mask = AT_SIZE; error = VOP_GETATTR(vp, &vattr, 0, kcred); if (error) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } *psize = vattr.va_size; #endif *ashift = SPA_MINBLOCKSHIFT; return (0); } static void vdev_file_close(vdev_t *vd) { vdev_file_t *vf = vd->vdev_tsd; if (vf == NULL) return; if (vf->vf_vnode != NULL) { #ifdef __APPLE__ vfs_context_t context; context = vfs_context_create((vfs_context_t)0); /* ### APPLE TODO #### */ // (void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred); (void) vnode_close(vf->vf_vnode, spa_mode, context); (void) vfs_context_rele(context); #else (void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred); (void) VOP_CLOSE(vf->vf_vnode, spa_mode, 1, 0, kcred); VN_RELE(vf->vf_vnode); #endif } kmem_free(vf, sizeof (vdev_file_t)); vd->vdev_tsd = NULL; }
struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, kern_get_file_extents_callback_t callback, void * callback_ref, dev_t * partition_device_result, dev_t * image_device_result, uint64_t * partitionbase_result, uint64_t * maxiocount_result, uint32_t * oflags, off_t offset, caddr_t addr, vm_size_t len) { struct kern_direct_file_io_ref_t * ref; proc_t p; struct vnode_attr va; int error; off_t f_offset; uint64_t fileblk; size_t filechunk; uint64_t physoffset; dev_t device; dev_t target = 0; int isssd = 0; uint32_t flags = 0; uint32_t blksize; off_t maxiocount, count; boolean_t locked = FALSE; int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1 = NULL; void * p2 = NULL; error = EFAULT; ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); if (!ref) { error = EFAULT; goto out; } bzero(ref, sizeof(*ref)); p = kernproc; ref->ctx = vfs_context_create(vfs_context_current()); if ((error = vnode_open(name, (O_CREAT | FWRITE), (0), 0, &ref->vp, ref->ctx))) goto out; if (addr && len) { if ((error = kern_write_file(ref, offset, addr, len))) goto out; } VATTR_INIT(&va); VATTR_WANTED(&va, va_rdev); VATTR_WANTED(&va, va_fsid); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_nlink); error = EFAULT; if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; kprintf("vp va_rdev major %d minor %d\n", major(va.va_rdev), minor(va.va_rdev)); kprintf("vp va_fsid major %d minor %d\n", major(va.va_fsid), minor(va.va_fsid)); kprintf("vp size %qd\n", va.va_data_size); if (ref->vp->v_type == VREG) { /* Don't dump files with links. */ if (va.va_nlink != 1) goto out; device = va.va_fsid; p1 = &device; p2 = p; do_ioctl = &file_ioctl; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { /* Partition. */ device = va.va_rdev; p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } else { /* Don't dump to non-regular files. */ error = EFAULT; goto out; } ref->device = device; // get block size error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); if (error) goto out; if (ref->vp->v_type == VREG) ref->filelength = va.va_data_size; else { error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); if (error) goto out; ref->filelength = fileblk * ref->blksize; } // pin logical extents error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); if (error && (ENOTTY != error)) goto out; ref->pinned = (error == 0); // generate the block list error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); if (error) goto out; locked = TRUE; f_offset = 0; while (f_offset < ref->filelength) { if (ref->vp->v_type == VREG) { filechunk = 1*1024*1024*1024; daddr64_t blkno; error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, 0, NULL); if (error) goto out; fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; filechunk = f_offset ? 0 : ref->filelength; } physoffset = 0; while (physoffset < filechunk) { dk_physical_extent_t getphysreq; bzero(&getphysreq, sizeof(getphysreq)); getphysreq.offset = fileblk + physoffset; getphysreq.length = (filechunk - physoffset); error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); if (error) goto out; if (!target) { target = getphysreq.dev; } else if (target != getphysreq.dev) { error = ENOTSUP; goto out; } callback(callback_ref, getphysreq.offset, getphysreq.length); physoffset += getphysreq.length; } f_offset += filechunk; } callback(callback_ref, 0ULL, 0ULL); if (ref->vp->v_type == VREG) p1 = ⌖ // get partition base error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); if (error) goto out; // get block size & constraints error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); if (error) goto out; maxiocount = 1*1024*1024*1024; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; kprintf("max io 0x%qx bytes\n", maxiocount); if (maxiocount_result) *maxiocount_result = maxiocount; error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); if (!error && isssd) flags |= kIOHibernateOptionSSD; if (partition_device_result) *partition_device_result = device; if (image_device_result) *image_device_result = target; if (flags) *oflags = flags; out: kprintf("kern_open_file_for_direct_io(%d)\n", error); if (error && locked) { p1 = &device; (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); } if (error && ref) { if (ref->vp) { vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; } vfs_context_rele(ref->ctx); kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); ref = NULL; } return(ref); }
static int vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, uint64_t *ashift) { spa_t *spa = vd->vdev_spa; vdev_disk_t *dvd = vd->vdev_tsd; vnode_t *devvp = NULLVP; vfs_context_t context = NULL; uint64_t blkcnt; uint32_t blksize; int fmode = 0; int error = 0; int isssd; /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (SET_ERROR(EINVAL)); } /* * Reopen the device if it's not currently open. Otherwise, * just update the physical size of the device. */ if (dvd != NULL) { if (dvd->vd_offline) { /* * If we are opening a device in its offline notify * context, the LDI handle was just closed. Clean * up the LDI event callbacks and free vd->vdev_tsd. */ vdev_disk_free(vd); } else { ASSERT(vd->vdev_reopening); devvp = dvd->vd_devvp; goto skip_open; } } /* * Create vd->vdev_tsd. */ vdev_disk_alloc(vd); dvd = vd->vdev_tsd; /* * When opening a disk device, we want to preserve the user's original * intent. We always want to open the device by the path the user gave * us, even if it is one of multiple paths to the same device. But we * also want to be able to survive disks being removed/recabled. * Therefore the sequence of opening devices is: * * 1. Try opening the device by path. For legacy pools without the * 'whole_disk' property, attempt to fix the path by appending 's0'. * * 2. If the devid of the device matches the stored value, return * success. * * 3. Otherwise, the device may have moved. Try opening the device * by the devid instead. */ /* ### APPLE TODO ### */ #ifdef illumos if (vd->vdev_devid != NULL) { if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, &dvd->vd_minor) != 0) { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (SET_ERROR(EINVAL)); } } #endif error = EINVAL; /* presume failure */ if (vd->vdev_path != NULL) { context = vfs_context_create( spl_vfs_context_kernel() ); /* Obtain an opened/referenced vnode for the device. */ if ((error = vnode_open(vd->vdev_path, spa_mode(spa), 0, 0, &devvp, context))) { goto out; } if (!vnode_isblk(devvp)) { error = ENOTBLK; goto out; } /* * ### APPLE TODO ### * vnode_authorize devvp for KAUTH_VNODE_READ_DATA and * KAUTH_VNODE_WRITE_DATA */ /* * Disallow opening of a device that is currently in use. * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) { goto out; } if (VNOP_FSYNC(devvp, MNT_WAIT, context) != 0) { error = ENOTBLK; goto out; } if ((error = buf_invalidateblks(devvp, BUF_WRITE_DATA, 0, 0))) { goto out; } } else { goto out; } int len = MAXPATHLEN; if (vn_getpath(devvp, dvd->vd_readlinkname, &len) == 0) { dprintf("ZFS: '%s' resolved name is '%s'\n", vd->vdev_path, dvd->vd_readlinkname); } else { dvd->vd_readlinkname[0] = 0; } skip_open: /* * Determine the actual size of the device. */ if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, context) != 0 || VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context) != 0) { error = EINVAL; goto out; } *psize = blkcnt * (uint64_t)blksize; *max_psize = *psize; dvd->vd_ashift = highbit(blksize) - 1; dprintf("vdev_disk: Device %p ashift set to %d\n", devvp, dvd->vd_ashift); *ashift = highbit(MAX(blksize, SPA_MINBLOCKSIZE)) - 1; /* * ### APPLE TODO ### */ #ifdef illumos if (vd->vdev_wholedisk == 1) { int wce = 1; if (error == 0) { /* * If we have the capability to expand, we'd have * found out via success from DKIOCGMEDIAINFO{,EXT}. * Adjust max_psize upward accordingly since we know * we own the whole disk now. */ *max_psize = capacity * blksz; } /* * Since we own the whole disk, try to enable disk write * caching. We ignore errors because it's OK if we can't do it. */ (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, FKIOCTL, kcred, NULL); } #endif /* * Clear the nowritecache bit, so that on a vdev_reopen() we will * try again. */ vd->vdev_nowritecache = B_FALSE; /* Inform the ZIO pipeline that we are non-rotational */ vd->vdev_nonrot = B_FALSE; if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) { if (isssd) vd->vdev_nonrot = B_TRUE; } dprintf("ZFS: vdev_disk(%s) isSSD %d\n", vd->vdev_path ? vd->vdev_path : "", isssd); dvd->vd_devvp = devvp; out: if (error) { if (devvp) { vnode_close(devvp, fmode, context); dvd->vd_devvp = NULL; } vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; } if (context) (void) vfs_context_rele(context); if (error) printf("ZFS: vdev_disk_open('%s') failed error %d\n", vd->vdev_path ? vd->vdev_path : "", error); return (error); }
struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, uint32_t iflags, kern_get_file_extents_callback_t callback, void * callback_ref, off_t set_file_size, off_t fs_free_size, off_t write_file_offset, void * write_file_addr, size_t write_file_len, dev_t * partition_device_result, dev_t * image_device_result, uint64_t * partitionbase_result, uint64_t * maxiocount_result, uint32_t * oflags) { struct kern_direct_file_io_ref_t * ref; proc_t p; struct vnode_attr va; dk_apfs_wbc_range_t wbc_range; int error; off_t f_offset; uint64_t fileblk; size_t filechunk; uint64_t physoffset, minoffset; dev_t device; dev_t target = 0; int isssd = 0; uint32_t flags = 0; uint32_t blksize; off_t maxiocount, count, segcount, wbctotal; boolean_t locked = FALSE; int fmode, cmode; struct nameidata nd; u_int32_t ndflags; off_t mpFree; int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1 = NULL; void * p2 = NULL; error = EFAULT; ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); if (!ref) { error = EFAULT; goto out; } bzero(ref, sizeof(*ref)); p = kernproc; ref->ctx = vfs_context_kernel(); fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; cmode = S_IRUSR | S_IWUSR; ndflags = NOFOLLOW; NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ref->ctx); VATTR_INIT(&va); VATTR_SET(&va, va_mode, cmode); VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); if ((error = vn_open_auth(&nd, &fmode, &va))) { kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error); goto out; } ref->vp = nd.ni_vp; if (ref->vp->v_type == VREG) { vnode_lock_spin(ref->vp); SET(ref->vp->v_flag, VSWAP); vnode_unlock(ref->vp); } if (write_file_addr && write_file_len) { if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { kprintf("kern_write_file() failed with error: %d\n", error); goto out; } } VATTR_INIT(&va); VATTR_WANTED(&va, va_rdev); VATTR_WANTED(&va, va_fsid); VATTR_WANTED(&va, va_devid); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_data_alloc); VATTR_WANTED(&va, va_nlink); error = EFAULT; if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; wbctotal = 0; mpFree = freespace_mb(ref->vp); mpFree <<= 20; kprintf("kern_direct_file(%s): vp size %qd, alloc %qd, mp free %qd, keep free %qd\n", name, va.va_data_size, va.va_data_alloc, mpFree, fs_free_size); if (ref->vp->v_type == VREG) { /* Don't dump files with links. */ if (va.va_nlink != 1) goto out; device = (VATTR_IS_SUPPORTED(&va, va_devid)) ? va.va_devid : va.va_fsid; ref->filelength = va.va_data_size; p1 = &device; p2 = p; do_ioctl = &file_ioctl; if (kIOPolledFileHibernate & iflags) { error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); ref->wbcranged = (error == 0); } if (ref->wbcranged) { uint32_t idx; assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); for (idx = 0; idx < wbc_range.count; idx++) wbctotal += wbc_range.extents[idx].length; kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); if (wbctotal) target = wbc_range.dev; } if (set_file_size) { if (wbctotal) { if (wbctotal >= set_file_size) set_file_size = HIBERNATE_MIN_FILE_SIZE; else { set_file_size -= wbctotal; if (set_file_size < HIBERNATE_MIN_FILE_SIZE) set_file_size = HIBERNATE_MIN_FILE_SIZE; } } if (fs_free_size) { mpFree += va.va_data_alloc; if ((mpFree < set_file_size) || ((mpFree - set_file_size) < fs_free_size)) { error = ENOSPC; goto out; } } error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL | IO_NOAUTH, ref->ctx); if (error) goto out; ref->filelength = set_file_size; } } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { /* Partition. */ device = va.va_rdev; p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } else { /* Don't dump to non-regular files. */ error = EFAULT; goto out; } ref->device = device; // probe for CF dk_corestorage_info_t cs_info; memset(&cs_info, 0, sizeof(dk_corestorage_info_t)); error = do_ioctl(p1, p2, DKIOCCORESTORAGE, (caddr_t)&cs_info); ref->cf = (error == 0) && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES); // get block size error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); if (error) goto out; minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; if (ref->vp->v_type != VREG) { error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); if (error) goto out; ref->filelength = fileblk * ref->blksize; } // pin logical extents, CS version error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); if (error && (ENOTTY != error)) goto out; ref->pinned = (error == 0); // pin logical extents, apfs version error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); if (error && (ENOTTY != error)) goto out; ref->frozen = (error == 0); // generate the block list error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); if (error) goto out; locked = TRUE; f_offset = 0; for (; f_offset < ref->filelength; f_offset += filechunk) { if (ref->vp->v_type == VREG) { filechunk = 1*1024*1024*1024; daddr64_t blkno; error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); if (error) goto out; if (-1LL == blkno) continue; fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; filechunk = f_offset ? 0 : ref->filelength; } physoffset = 0; while (physoffset < filechunk) { dk_physical_extent_t getphysreq; bzero(&getphysreq, sizeof(getphysreq)); getphysreq.offset = fileblk + physoffset; getphysreq.length = (filechunk - physoffset); error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); if (error) goto out; if (!target) { target = getphysreq.dev; } else if (target != getphysreq.dev) { error = ENOTSUP; goto out; } assert(getphysreq.offset >= minoffset); #if HIBFRAGMENT uint64_t rev; for (rev = 4096; rev <= getphysreq.length; rev += 4096) { callback(callback_ref, getphysreq.offset + getphysreq.length - rev, 4096); } #else callback(callback_ref, getphysreq.offset, getphysreq.length); #endif physoffset += getphysreq.length; } } if (ref->wbcranged) { uint32_t idx; for (idx = 0; idx < wbc_range.count; idx++) { assert(wbc_range.extents[idx].offset >= minoffset); callback(callback_ref, wbc_range.extents[idx].offset, wbc_range.extents[idx].length); } } callback(callback_ref, 0ULL, 0ULL); if (ref->vp->v_type == VREG) p1 = ⌖ else { p1 = ⌖ p2 = p; do_ioctl = &file_ioctl; } // get partition base if (partitionbase_result) { error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); if (error) goto out; } // get block size & constraints error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); if (error) goto out; maxiocount = 1*1024*1024*1024; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; kprintf("max io 0x%qx bytes\n", maxiocount); if (maxiocount_result) *maxiocount_result = maxiocount; error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); if (!error && isssd) flags |= kIOPolledFileSSD; if (partition_device_result) *partition_device_result = device; if (image_device_result) *image_device_result = target; if (oflags) *oflags = flags; if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; ref->ctx = NULL; } out: printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error); if (error && locked) { p1 = &device; (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); } if (error && ref) { if (ref->vp) { (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (ref->pinned && ref->cf) ? ref->filelength : 0); if (ref->frozen) { (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); } if (ref->wbcranged) { (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); } vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; } ref->ctx = NULL; kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); ref = NULL; } return(ref); }
static int vdev_disk_open(vdev_t *vd, uint64_t *size, uint64_t *max_size, uint64_t *ashift) { vdev_disk_t *dvd = NULL; vnode_t *devvp = NULLVP; vfs_context_t context = NULL; uint64_t blkcnt; uint32_t blksize; int fmode = 0; int error = 0; /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (EINVAL); } dvd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); if (dvd == NULL) return ENOMEM; /* * When opening a disk device, we want to preserve the user's original * intent. We always want to open the device by the path the user gave * us, even if it is one of multiple paths to the save device. But we * also want to be able to survive disks being removed/recabled. * Therefore the sequence of opening devices is: * * 1. Try opening the device by path. For legacy pools without the * 'whole_disk' property, attempt to fix the path by appending 's0'. * * 2. If the devid of the device matches the stored value, return * success. * * 3. Otherwise, the device may have moved. Try opening the device * by the devid instead. * */ /* ### APPLE TODO ### */ /* ddi_devid_str_decode */ context = vfs_context_create((vfs_context_t)0); /* Obtain an opened/referenced vnode for the device. */ error = vnode_open(vd->vdev_path, spa_mode(vd->vdev_spa), 0, 0, &devvp, context); if (error) { goto out; } if (!vnode_isblk(devvp)) { error = ENOTBLK; goto out; } /* ### APPLE TODO ### */ /* vnode_authorize devvp for KAUTH_VNODE_READ_DATA and * KAUTH_VNODE_WRITE_DATA */ /* * Disallow opening of a device that is currently in use. * Flush out any old buffers remaining from a previous use. */ if ((error = vfs_mountedon(devvp))) { goto out; } if (VNOP_FSYNC(devvp, MNT_WAIT, context) != 0) { error = ENOTBLK; goto out; } if ((error = buf_invalidateblks(devvp, BUF_WRITE_DATA, 0, 0))) { goto out; } /* * Determine the actual size of the device. */ if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&blksize, 0, context) != 0 || VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&blkcnt, 0, context) != 0) { error = EINVAL; goto out; } *size = blkcnt * (uint64_t)blksize; /* * ### APPLE TODO ### * If we own the whole disk, try to enable disk write caching. */ /* * Take the device's minimum transfer size into account. */ *ashift = highbit(MAX(blksize, SPA_MINBLOCKSIZE)) - 1; /* * Setting the vdev_ashift did in fact break the pool for import * on ZEVO. This puts the logic into question. It appears that vdev_top * will also then change. It then panics in space_map from metaslab_alloc */ //vd->vdev_ashift = *ashift; dvd->vd_ashift = *ashift; /* * Clear the nowritecache bit, so that on a vdev_reopen() we will * try again. */ vd->vdev_nowritecache = B_FALSE; vd->vdev_tsd = dvd; dvd->vd_devvp = devvp; out: if (error) { if (devvp) vnode_close(devvp, fmode, context); if (dvd) kmem_free(dvd, sizeof (vdev_disk_t)); /* * Since the open has failed, vd->vdev_tsd should * be NULL when we get here, signaling to the * rest of the spa not to try and reopen or close this device */ vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; } if (context) { (void) vfs_context_rele(context); } return (error); }
IOReturn FileNVRAM::read_buffer(char** aBuffer, uint64_t* aLength, vfs_context_t aCtx) { IOReturn error = 0; struct vnode * vp; struct vnode_attr va; if (aCtx) { if ((error = vnode_open(FILE_NVRAM_PATH, (O_RDONLY | FREAD | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, aCtx))) { printf("failed opening vnode at path %s, errno %d\n", FILE_NVRAM_PATH, error); return error; } else { if ((error = vnode_isreg(vp)) == VREG) { VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); /* size in bytes of the fork managed by current vnode */ // Determine size of vnode if ((error = vnode_getattr(vp, &va, aCtx))) { printf("FileNVRAM.kext: Error, failed to determine file size of %s, errno %d.\n", FILE_NVRAM_PATH, error); } else { if (aLength) { *aLength = va.va_data_size; } *aBuffer = (char *)IOMalloc((size_t)va.va_data_size); int len = (int)va.va_data_size; if ((error = vn_rdwr(UIO_READ, vp, *aBuffer, len, 0, UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, vfs_context_ucred(aCtx), (int *) 0, vfs_context_proc(aCtx)))) { printf("FileNVRAM.kext: Error, writing to vnode(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } if ((error = vnode_close(vp, 0, aCtx))) { printf("FileNVRAM.kext: Error, vnode_close(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } else { printf("FileNVRAM.kext: Error, vnode_isreg(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } } else { printf("FileNVRAM.kext: aCtx == NULL!\n"); error = 0xFFFF; // EINVAL; } return error; }