int xfs_fhlookup (d_thread_t *proc, struct xfs_fhandle_t *fhp, struct vnode **vpp) { int error; struct mount *mp; #if !(defined(HAVE_GETFH) && defined(HAVE_FHOPEN)) struct ucred *cred = proc->p_ucred; struct vattr vattr; fsid_t fsid; struct xfs_fh_args *fh_args = (struct xfs_fh_args *)fhp->fhdata; NNPFSDEB(XDEBVFOPS, ("xfs_fhlookup (xfs)\n")); error = xfs_suser (proc); if (error) return EPERM; if (fhp->len < sizeof(struct xfs_fh_args)) return EINVAL; fsid = SCARG(fh_args, fsid); mp = xfs_vfs_getvfs (&fsid); if (mp == NULL) return ENXIO; #ifdef __APPLE__ { uint32_t ino = SCARG(fh_args, fileid); error = VFS_VGET(mp, &ino, vpp); } #else error = VFS_VGET(mp, SCARG(fh_args, fileid), vpp); #endif if (error) return error; if (*vpp == NULL) return ENOENT; error = VOP_GETATTR(*vpp, &vattr, cred, proc); if (error) { vput(*vpp); return error; } if (vattr.va_gen != SCARG(fh_args, gen)) { vput(*vpp); return ENOENT; } #else /* HAVE_GETFH && HAVE_FHOPEN */ { fhandle_t *fh = (fhandle_t *) fhp; NNPFSDEB(XDEBVFOPS, ("xfs_fhlookup (native)\n")); mp = xfs_vfs_getvfs (&fh->fh_fsid); if (mp == NULL) return ESTALE; if ((error = VFS_FHTOVP(mp, &fh->fh_fid, vpp)) != 0) { *vpp = NULL; return error; } } #endif /* HAVE_GETFH && HAVE_FHOPEN */ #ifdef HAVE_KERNEL_VFS_OBJECT_CREATE if ((*vpp)->v_type == VREG && (*vpp)->v_object == NULL) #ifdef HAVE_FREEBSD_THREAD xfs_vfs_object_create (*vpp, proc, proc->td_proc->p_ucred); #else xfs_vfs_object_create (*vpp, proc, proc->p_ucred); #endif #elif __APPLE__ if ((*vpp)->v_type == VREG && (!UBCINFOEXISTS(*vpp))) { ubc_info_init(*vpp); } ubc_hold(*vpp); #endif return 0; }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( char *filename, int flags, long size, long priority) { struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); pager_file_t pf; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; struct vattr vattr; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value, priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(p->p_ucred, &p->p_acflag))) goto swapon_bailout; if(default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE, filename, p); if ((error = namei(ndp))) goto swapon_bailout; vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } UBCINFOCHECK("macx_swapon", vp); if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) { VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } if (vattr.va_size < (u_quad_t)size) { vattr_null(&vattr); vattr.va_size = (u_quad_t)size; error = VOP_SETATTR(vp, &vattr, p->p_ucred, p); if (error) { VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; VOP_UNLOCK(vp, 0, p); goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; VOP_UNLOCK(vp, 0, p); bs_port_table[i].vp = 0; goto swapon_bailout; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ 0, /* default cluster size */ &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; VOP_UNLOCK(vp, 0, p); bs_port_table[i].vp = 0; goto swapon_bailout; } /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ VOP_UNLOCK(vp, 0, p); kr = default_pager_add_file(backing_store, vp, PAGE_SIZE, ((int)vattr.va_size)/PAGE_SIZE); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; if (!ubc_hold(vp)) panic("macx_swapon: hold"); /* Mark this vnode as being used for swapfile */ SET(vp->v_flag, VSWAP); ubc_setcred(vp, p); /* * take an extra reference on the vnode to keep * vnreclaim() away from this vnode. */ VREF(vp); /* Hold on to the namei reference to the paging file vnode */ vp = 0; swapon_bailout: if (vp) { vrele(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }