int vm_swapfile_preallocate(vnode_t vp, uint64_t *size) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif /* * This check exists because dynamic_pager creates the 1st swapfile, * swapfile0, for us from user-space in a supported manner (with IO_NOZEROFILL etc). * * If dynamic_pager, in the future, discontinues creating that file, * then we need to change this check to a panic / assert or return an error. * That's because we can't be sure if the file has been created correctly. */ if ((error = vnode_size(vp, (off_t*) &file_size, ctx)) != 0) { printf("vnode_size (existing files) for swap files failed: %d\n", error); goto done; } else { if (file_size == 0) { error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } } else { *size = file_size; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); int dp_cluster_size; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; #if CONFIG_PROTECT { /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { goto swapon_bailout; } } #endif if (default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } #if CONFIG_EMBEDDED dp_cluster_size = 1 * PAGE_SIZE; #else if ((dp_isssd = vnode_pager_isSSD(vp)) == TRUE) { /* * keep the cluster size small since the * seek cost is effectively 0 which means * we don't care much about fragmentation */ dp_cluster_size = 2 * PAGE_SIZE; } else { /* * use the default cluster size */ dp_cluster_size = 0; } #endif kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ dp_cluster_size, &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); if (error) printf("macx_swapon FAILED - %d\n", error); else printf("macx_swapon SUCCESS\n"); return(error); }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } error = vnode_size(vp, (off_t*) &file_size, ctx); if (error) { printf("vnode_size (new file) for swap file failed: %d\n", error); goto done; } assert(file_size == *size); if (pin != NULL && *pin != FALSE) { assert(vnode_tag(vp) == VT_HFS); error = hfs_pin_vnode(VTOHFS(vp), vp, HFS_PIN_IT | HFS_DATALESS_PIN, NULL, ctx); if (error) { printf("hfs_pin_vnode for swap files failed: %d\n", error); /* this is not fatal, carry on with files wherever they landed */ *pin = FALSE; error = 0; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }