/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); int dp_cluster_size; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; if (default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } if (vp->v_mount->mnt_kern_flag & MNTK_SSD) { /* * keep the cluster size small since the * seek cost is effectively 0 which means * we don't care much about fragmentation */ dp_isssd = TRUE; dp_cluster_size = 2 * PAGE_SIZE; } else { /* * use the default cluster size */ dp_isssd = FALSE; dp_cluster_size = 0; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ dp_cluster_size, &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
/* * Routine: macx_swapoff * Function: * Syscall interface to remove a file from backing store */ int macx_swapoff( struct macx_swapoff_args *args) { __unused int flags = args->flags; kern_return_t kr; mach_port_t backing_store; struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); int i; int error; boolean_t funnel_state; vfs_context_t ctx = vfs_context_current(); struct uthread *ut; int orig_iopol_disk; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF); funnel_state = thread_funnel_set(kernel_flock, TRUE); backing_store = NULL; ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapoff_bailout; /* * Get the vnode for the paging area. */ NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapoff_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapoff_bailout; } #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapoff_bailout; #endif for(i = 0; i < MAX_BACKING_STORE; i++) { if(bs_port_table[i].vp == vp) { break; } } if (i == MAX_BACKING_STORE) { error = EINVAL; goto swapoff_bailout; } backing_store = (mach_port_t)bs_port_table[i].bs; ut = get_bsdthread_info(current_thread()); orig_iopol_disk = proc_get_thread_selfdiskacc(); proc_apply_thread_selfdiskacc(IOPOL_THROTTLE); kr = default_pager_backing_store_delete(backing_store); proc_apply_thread_selfdiskacc(orig_iopol_disk); switch (kr) { case KERN_SUCCESS: error = 0; bs_port_table[i].vp = 0; /* This vnode is no longer used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); /* get rid of macx_swapon() "long term" reference */ vnode_rele(vp); break; case KERN_FAILURE: error = EAGAIN; break; default: error = EAGAIN; break; } swapoff_bailout: /* get rid of macx_swapoff() namei() reference */ if (vp) vnode_put(vp); (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); if (error) printf("macx_swapoff FAILED - %d\n", error); else printf("macx_swapoff SUCCESS\n"); return(error); }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif /* * This check exists because dynamic_pager creates the 1st swapfile, * swapfile0, for us from user-space in a supported manner (with IO_NOZEROFILL etc). * * If dynamic_pager, in the future, discontinues creating that file, * then we need to change this check to a panic / assert or return an error. * That's because we can't be sure if the file has been created correctly. */ if ((error = vnode_size(vp, (off_t*) &file_size, ctx)) != 0) { printf("vnode_size (existing files) for swap files failed: %d\n", error); goto done; } else { if (file_size == 0) { error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } } else { *size = file_size; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } error = vnode_size(vp, (off_t*) &file_size, ctx); if (error) { printf("vnode_size (new file) for swap file failed: %d\n", error); goto done; } assert(file_size == *size); if (pin != NULL && *pin != FALSE) { assert(vnode_tag(vp) == VT_HFS); error = hfs_pin_vnode(VTOHFS(vp), vp, HFS_PIN_IT | HFS_DATALESS_PIN, NULL, ctx); if (error) { printf("hfs_pin_vnode for swap files failed: %d\n", error); /* this is not fatal, carry on with files wherever they landed */ *pin = FALSE; error = 0; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, uint32_t iflags, kern_get_file_extents_callback_t callback, void * callback_ref, off_t set_file_size, off_t fs_free_size, off_t write_file_offset, void * write_file_addr, size_t write_file_len, dev_t * partition_device_result, dev_t * image_device_result, uint64_t * partitionbase_result, uint64_t * maxiocount_result, uint32_t * oflags) { struct kern_direct_file_io_ref_t * ref; proc_t p; struct vnode_attr va; dk_apfs_wbc_range_t wbc_range; int error; off_t f_offset; uint64_t fileblk; size_t filechunk; uint64_t physoffset, minoffset; dev_t device; dev_t target = 0; int isssd = 0; uint32_t flags = 0; uint32_t blksize; off_t maxiocount, count, segcount, wbctotal; boolean_t locked = FALSE; int fmode, cmode; struct nameidata nd; u_int32_t ndflags; off_t mpFree; int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1 = NULL; void * p2 = NULL; error = EFAULT; ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); if (!ref) { error = EFAULT; goto out; } bzero(ref, sizeof(*ref)); p = kernproc; ref->ctx = vfs_context_kernel(); fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; cmode = S_IRUSR | S_IWUSR; ndflags = NOFOLLOW; NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ref->ctx); VATTR_INIT(&va); VATTR_SET(&va, va_mode, cmode); VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); if ((error = vn_open_auth(&nd, &fmode, &va))) { kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error); goto out; } ref->vp = nd.ni_vp; if (ref->vp->v_type == VREG) { vnode_lock_spin(ref->vp); SET(ref->vp->v_flag, VSWAP); vnode_unlock(ref->vp); } if (write_file_addr && write_file_len) { if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { kprintf("kern_write_file() failed with error: %d\n", error); goto out; } } VATTR_INIT(&va); VATTR_WANTED(&va, va_rdev); VATTR_WANTED(&va, va_fsid); VATTR_WANTED(&va, va_devid); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_data_alloc); VATTR_WANTED(&va, va_nlink); error = EFAULT; if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; wbctotal = 0; mpFree = freespace_mb(ref->vp); mpFree <<= 20; kprintf("kern_direct_file(%s): vp size %qd, alloc %qd, mp free %qd, keep free %qd\n", name, va.va_data_size, va.va_data_alloc, mpFree, fs_free_size); if (ref->vp->v_type == VREG) { /* Don't dump files with links. */ if (va.va_nlink != 1) goto out; device = (VATTR_IS_SUPPORTED(&va, va_devid)) ? va.va_devid : va.va_fsid; ref->filelength = va.va_data_size; p1 = &device; p2 = p; do_ioctl = &file_ioctl; if (kIOPolledFileHibernate & iflags) { error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); ref->wbcranged = (error == 0); } if (ref->wbcranged) { uint32_t idx; assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); for (idx = 0; idx < wbc_range.count; idx++) wbctotal += wbc_range.extents[idx].length; kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); if (wbctotal) target = wbc_range.dev; } if (set_file_size) { if (wbctotal) { if (wbctotal >= set_file_size) set_file_size = HIBERNATE_MIN_FILE_SIZE; else { set_file_size -= wbctotal; if (set_file_size < HIBERNATE_MIN_FILE_SIZE) set_file_size = HIBERNATE_MIN_FILE_SIZE; } } if (fs_free_size) { mpFree += va.va_data_alloc; if ((mpFree < set_file_size) || ((mpFree - set_file_size) < fs_free_size)) { error = ENOSPC; goto out; } } error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL | IO_NOAUTH, ref->ctx); if (error) goto out; ref->filelength = set_file_size; } } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { /* Partition. */ device = va.va_rdev; p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } else { /* Don't dump to non-regular files. */ error = EFAULT; goto out; } ref->device = device; // probe for CF dk_corestorage_info_t cs_info; memset(&cs_info, 0, sizeof(dk_corestorage_info_t)); error = do_ioctl(p1, p2, DKIOCCORESTORAGE, (caddr_t)&cs_info); ref->cf = (error == 0) && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES); // get block size error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); if (error) goto out; minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; if (ref->vp->v_type != VREG) { error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); if (error) goto out; ref->filelength = fileblk * ref->blksize; } // pin logical extents, CS version error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); if (error && (ENOTTY != error)) goto out; ref->pinned = (error == 0); // pin logical extents, apfs version error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); if (error && (ENOTTY != error)) goto out; ref->frozen = (error == 0); // generate the block list error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); if (error) goto out; locked = TRUE; f_offset = 0; for (; f_offset < ref->filelength; f_offset += filechunk) { if (ref->vp->v_type == VREG) { filechunk = 1*1024*1024*1024; daddr64_t blkno; error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); if (error) goto out; if (-1LL == blkno) continue; fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; filechunk = f_offset ? 0 : ref->filelength; } physoffset = 0; while (physoffset < filechunk) { dk_physical_extent_t getphysreq; bzero(&getphysreq, sizeof(getphysreq)); getphysreq.offset = fileblk + physoffset; getphysreq.length = (filechunk - physoffset); error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); if (error) goto out; if (!target) { target = getphysreq.dev; } else if (target != getphysreq.dev) { error = ENOTSUP; goto out; } assert(getphysreq.offset >= minoffset); #if HIBFRAGMENT uint64_t rev; for (rev = 4096; rev <= getphysreq.length; rev += 4096) { callback(callback_ref, getphysreq.offset + getphysreq.length - rev, 4096); } #else callback(callback_ref, getphysreq.offset, getphysreq.length); #endif physoffset += getphysreq.length; } } if (ref->wbcranged) { uint32_t idx; for (idx = 0; idx < wbc_range.count; idx++) { assert(wbc_range.extents[idx].offset >= minoffset); callback(callback_ref, wbc_range.extents[idx].offset, wbc_range.extents[idx].length); } } callback(callback_ref, 0ULL, 0ULL); if (ref->vp->v_type == VREG) p1 = ⌖ else { p1 = ⌖ p2 = p; do_ioctl = &file_ioctl; } // get partition base if (partitionbase_result) { error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); if (error) goto out; } // get block size & constraints error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); if (error) goto out; maxiocount = 1*1024*1024*1024; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; kprintf("max io 0x%qx bytes\n", maxiocount); if (maxiocount_result) *maxiocount_result = maxiocount; error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); if (!error && isssd) flags |= kIOPolledFileSSD; if (partition_device_result) *partition_device_result = device; if (image_device_result) *image_device_result = target; if (oflags) *oflags = flags; if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; ref->ctx = NULL; } out: printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error); if (error && locked) { p1 = &device; (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); } if (error && ref) { if (ref->vp) { (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (ref->pinned && ref->cf) ? ref->filelength : 0); if (ref->frozen) { (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); } if (ref->wbcranged) { (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); } vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; } ref->ctx = NULL; kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); ref = NULL; } return(ref); }
/* * Caller holds I/O reference on vnode */ int vnode_label(struct mount *mp, struct vnode *dvp, struct vnode *vp, struct componentname *cnp, int flags, vfs_context_t ctx) { int error = 0; /* fast path checks... */ /* are we labeling vnodes? If not still notify of create */ if (mac_label_vnodes == 0) { if (flags & VNODE_LABEL_CREATE) error = mac_vnode_notify_create(ctx, mp, dvp, vp, cnp); return 0; } /* if already VL_LABELED */ if (vp->v_lflag & VL_LABELED) return (0); vnode_lock_spin(vp); /* * must revalidate state once we hold the lock * since we could have blocked and someone else * has since labeled this vnode */ if (vp->v_lflag & VL_LABELED) { vnode_unlock(vp); return (0); } if ((vp->v_lflag & VL_LABEL) == 0) { vp->v_lflag |= VL_LABEL; /* Could sleep on disk I/O, drop lock. */ vnode_unlock(vp); if (vp->v_label == NULL) vp->v_label = mac_vnode_label_alloc(); if (flags & VNODE_LABEL_CREATE) error = mac_vnode_notify_create(ctx, mp, dvp, vp, cnp); else error = mac_vnode_label_associate(mp, vp, ctx); vnode_lock_spin(vp); if ((error == 0) && (vp->v_flag & VNCACHEABLE)) vp->v_lflag |= VL_LABELED; vp->v_lflag &= ~VL_LABEL; if (vp->v_lflag & VL_LABELWAIT) { vp->v_lflag &= ~VL_LABELWAIT; wakeup(&vp->v_label); } } else { struct timespec ts; ts.tv_sec = 10; ts.tv_nsec = 0; while (vp->v_lflag & VL_LABEL) { vp->v_lflag |= VL_LABELWAIT; error = msleep(&vp->v_label, &vp->v_lock, PVFS|PDROP, "vnode_label", &ts); vnode_lock_spin(vp); if (error == EWOULDBLOCK) { vprint("vnode label timeout", vp); break; } } /* XXX: what should be done if labeling failed (above)? */ } vnode_unlock(vp); return (error); }