/* * XXX Liang: * * kern_file_*() are not safe for multi-threads now, * however, we need them only for tracefiled, so it's * not so important to implement for MT. */ int kern_file_size(struct cfs_kern_file *fp, off_t *psize) { int error; off_t size; error = vnode_size(fp->f_vp, &size, fp->f_ctxt); if (error) return error; if (psize) *psize = size; return 0; }
static int vniocattach_shadow(struct vn_softc *vn, struct vn_ioctl_64 *vniop, __unused dev_t dev, int in_kernel, proc_t p) { vfs_context_t ctx = vfs_context_current(); struct nameidata nd; int error, flags; shadow_map_t * map; off_t file_size; flags = FREAD|FWRITE; if (in_kernel) { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx); } else { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), vniop->vn_file, ctx); } /* vn_open gives both long- and short-term references */ error = vn_open(&nd, flags, 0); if (error) { /* shadow MUST be writable! */ return (error); } if (nd.ni_vp->v_type != VREG || (error = vnode_size(nd.ni_vp, &file_size, ctx))) { (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); return (error ? error : EINVAL); } map = shadow_map_create(vn->sc_fsize, file_size, 0, vn->sc_secsize); if (map == NULL) { (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); vn->sc_shadow_vp = NULL; return (ENOMEM); } vn->sc_shadow_vp = nd.ni_vp; vn->sc_shadow_vid = vnode_vid(nd.ni_vp); vn->sc_shadow_vp->v_flag |= VNOCACHE_DATA; vn->sc_shadow_map = map; vn->sc_flags &= ~VNF_READONLY; /* we're now read/write */ /* lose the short-term reference */ vnode_put(nd.ni_vp); return(0); }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } error = vnode_size(vp, (off_t*) &file_size, ctx); if (error) { printf("vnode_size (new file) for swap file failed: %d\n", error); goto done; } assert(file_size == *size); if (pin != NULL && *pin != FALSE) { error = VNOP_IOCTL(vp, FIOPINSWAP, NULL, 0, ctx); if (error) { printf("pin for swap files failed: %d, file_size = %lld\n", error, file_size); /* this is not fatal, carry on with files wherever they landed */ *pin = FALSE; error = 0; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
static int vniocattach_file(struct vn_softc *vn, struct vn_ioctl_64 *vniop, dev_t dev, int in_kernel, proc_t p) { dev_t cdev; vfs_context_t ctx = vfs_context_current(); kauth_cred_t cred; struct nameidata nd; off_t file_size; int error, flags; flags = FREAD|FWRITE; if (in_kernel) { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx); } else { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), vniop->vn_file, ctx); } /* vn_open gives both long- and short-term references */ error = vn_open(&nd, flags, 0); if (error) { if (error != EACCES && error != EPERM && error != EROFS) { return (error); } flags &= ~FWRITE; if (in_kernel) { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, UIO_SYSSPACE, vniop->vn_file, ctx); } else { NDINIT(&nd, LOOKUP, OP_OPEN, FOLLOW, (IS_64BIT_PROCESS(p) ? UIO_USERSPACE64 : UIO_USERSPACE32), vniop->vn_file, ctx); } error = vn_open(&nd, flags, 0); if (error) { return (error); } } if (nd.ni_vp->v_type != VREG) { error = EINVAL; } else { error = vnode_size(nd.ni_vp, &file_size, ctx); } if (error != 0) { (void) vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); return (error); } cred = kauth_cred_proc_ref(p); nd.ni_vp->v_flag |= VNOCACHE_DATA; error = setcred(nd.ni_vp, cred); if (error) { (void)vn_close(nd.ni_vp, flags, ctx); vnode_put(nd.ni_vp); kauth_cred_unref(&cred); return(error); } vn->sc_secsize = DEV_BSIZE; vn->sc_fsize = file_size; vn->sc_size = file_size / vn->sc_secsize; vn->sc_vp = nd.ni_vp; vn->sc_vid = vnode_vid(nd.ni_vp); vn->sc_open_flags = flags; vn->sc_cred = cred; cdev = makedev(vndevice_cdev_major, minor(dev)); vn->sc_cdev = devfs_make_node(cdev, DEVFS_CHAR, UID_ROOT, GID_OPERATOR, 0600, "rvn%d", minor(dev)); vn->sc_flags |= VNF_INITED; if (flags == FREAD) vn->sc_flags |= VNF_READONLY; /* lose the short-term reference */ vnode_put(nd.ni_vp); return(0); }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); int dp_cluster_size; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; if (default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } if (vp->v_mount->mnt_kern_flag & MNTK_SSD) { /* * keep the cluster size small since the * seek cost is effectively 0 which means * we don't care much about fragmentation */ dp_isssd = TRUE; dp_cluster_size = 2 * PAGE_SIZE; } else { /* * use the default cluster size */ dp_isssd = FALSE; dp_cluster_size = 0; } kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ dp_cluster_size, &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); return(error); }
/* * shared_region_map_np() * * This system call is intended for dyld. * * dyld uses this to map a shared cache file into a shared region. * This is usually done only the first time a shared cache is needed. * Subsequent processes will just use the populated shared region without * requiring any further setup. */ int shared_region_map_np( struct proc *p, struct shared_region_map_np_args *uap, __unused int *retvalp) { int error; kern_return_t kr; int fd; struct fileproc *fp; struct vnode *vp, *root_vp; struct vnode_attr va; off_t fs; memory_object_size_t file_size; user_addr_t user_mappings; struct shared_file_mapping_np *mappings; #define SFM_MAX_STACK 8 struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; unsigned int mappings_count; vm_size_t mappings_size; memory_object_control_t file_control; struct vm_shared_region *shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> map\n", current_thread(), p->p_pid, p->p_comm)); shared_region = NULL; mappings_count = 0; mappings_size = 0; mappings = NULL; fp = NULL; vp = NULL; /* get file descriptor for shared region cache file */ fd = uap->fd; /* get file structure from file descriptor */ error = fp_lookup(p, fd, &fp, 0); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d lookup failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, error)); goto done; } /* make sure we're attempting to map a vnode */ if (fp->f_fglob->fg_type != DTYPE_VNODE) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not a vnode (type=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, fp->f_fglob->fg_type)); error = EINVAL; goto done; } /* we need at least read permission on the file */ if (! (fp->f_fglob->fg_flag & FREAD)) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not readable\n", current_thread(), p->p_pid, p->p_comm, fd)); error = EPERM; goto done; } /* get vnode from file structure */ error = vnode_getwithref((vnode_t) fp->f_fglob->fg_data); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d getwithref failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, error)); goto done; } vp = (struct vnode *) fp->f_fglob->fg_data; /* make sure the vnode is a regular file */ if (vp->v_type != VREG) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not a file (type=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp->v_type)); error = EINVAL; goto done; } /* make sure vnode is on the process's root volume */ root_vp = p->p_fd->fd_rdir; if (root_vp == NULL) { root_vp = rootvnode; } if (vp->v_mount != root_vp->v_mount) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not on process's root volume\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = EPERM; goto done; } /* make sure vnode is owned by "root" */ VATTR_INIT(&va); VATTR_WANTED(&va, va_uid); error = vnode_getattr(vp, &va, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_getattr(%p) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp, error)); goto done; } if (va.va_uid != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "owned by uid=%d instead of 0\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, va.va_uid)); error = EPERM; goto done; } /* get vnode size */ error = vnode_size(vp, &fs, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_size(%p) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp, error)); goto done; } file_size = fs; /* get the file's memory object handle */ file_control = ubc_getobject(vp, UBC_HOLDOBJECT); if (file_control == MEMORY_OBJECT_CONTROL_NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no memory object\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = EINVAL; goto done; } /* get the list of mappings the caller wants us to establish */ mappings_count = uap->count; /* number of mappings */ mappings_size = (vm_size_t) (mappings_count * sizeof (mappings[0])); if (mappings_count == 0) { SHARED_REGION_TRACE_INFO( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no mappings\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = 0; /* no mappings: we're done ! */ goto done; } else if (mappings_count <= SFM_MAX_STACK) { mappings = &stack_mappings[0]; } else { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "too many mappings (%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, mappings_count)); error = EINVAL; goto done; } user_mappings = uap->mappings; /* the mappings, in user space */ error = copyin(user_mappings, mappings, mappings_size); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "copyin(0x%llx, %d) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, (uint64_t)user_mappings, mappings_count, error)); goto done; } /* get the process's shared region (setup in vm_map_exec()) */ shared_region = vm_shared_region_get(current_task()); if (shared_region == NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no shared region\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); goto done; } /* map the file into that shared region's submap */ kr = vm_shared_region_map_file(shared_region, mappings_count, mappings, file_control, file_size, (void *) p->p_fd->fd_rdir); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vm_shared_region_map_file() failed kr=0x%x\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, kr)); switch (kr) { case KERN_INVALID_ADDRESS: error = EFAULT; break; case KERN_PROTECTION_FAILURE: error = EPERM; break; case KERN_NO_SPACE: error = ENOMEM; break; case KERN_FAILURE: case KERN_INVALID_ARGUMENT: default: error = EINVAL; break; } goto done; } /* * The mapping was successful. Let the buffer cache know * that we've mapped that file with these protections. This * prevents the vnode from getting recycled while it's mapped. */ (void) ubc_map(vp, VM_PROT_READ); error = 0; /* update the vnode's access time */ if (! (vnode_vfsvisflags(vp) & MNT_NOATIME)) { VATTR_INIT(&va); nanotime(&va.va_access_time); VATTR_SET_ACTIVE(&va, va_access_time); vnode_setattr(vp, &va, vfs_context_current()); } if (p->p_flag & P_NOSHLIB) { /* signal that this process is now using split libraries */ OSBitAndAtomic(~((uint32_t)P_NOSHLIB), (UInt32 *)&p->p_flag); } done: if (vp != NULL) { /* * release the vnode... * ubc_map() still holds it for us in the non-error case */ (void) vnode_put(vp); vp = NULL; } if (fp != NULL) { /* release the file descriptor */ fp_drop(p, fd, fp, 0); fp = NULL; } if (shared_region != NULL) { vm_shared_region_deallocate(shared_region); } SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] <- map\n", current_thread(), p->p_pid, p->p_comm)); return error; }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif /* * This check exists because dynamic_pager creates the 1st swapfile, * swapfile0, for us from user-space in a supported manner (with IO_NOZEROFILL etc). * * If dynamic_pager, in the future, discontinues creating that file, * then we need to change this check to a panic / assert or return an error. * That's because we can't be sure if the file has been created correctly. */ if ((error = vnode_size(vp, (off_t*) &file_size, ctx)) != 0) { printf("vnode_size (existing files) for swap files failed: %d\n", error); goto done; } else { if (file_size == 0) { error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } } else { *size = file_size; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
int vm_swapfile_preallocate(vnode_t vp, uint64_t *size, boolean_t *pin) { int error = 0; uint64_t file_size = 0; vfs_context_t ctx = NULL; ctx = vfs_context_current(); #if CONFIG_PROTECT { #if 0 // <rdar://11771612> if ((error = cp_vnode_setclass(vp, PROTECTION_CLASS_F))) { if(config_protect_bug) { printf("swap protection class set failed with %d\n", error); } else { panic("swap protection class set failed with %d\n", error); } } #endif /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { printf("Content Protection key failure on swap: %d\n", error); vnode_put(vp); vp = NULL; goto done; } } #endif error = vnode_setsize(vp, *size, IO_NOZEROFILL, ctx); if (error) { printf("vnode_setsize for swap files failed: %d\n", error); goto done; } error = vnode_size(vp, (off_t*) &file_size, ctx); if (error) { printf("vnode_size (new file) for swap file failed: %d\n", error); goto done; } assert(file_size == *size); if (pin != NULL && *pin != FALSE) { assert(vnode_tag(vp) == VT_HFS); error = hfs_pin_vnode(VTOHFS(vp), vp, HFS_PIN_IT | HFS_DATALESS_PIN, NULL, ctx); if (error) { printf("hfs_pin_vnode for swap files failed: %d\n", error); /* this is not fatal, carry on with files wherever they landed */ *pin = FALSE; error = 0; } } vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); done: return error; }
/* * This routine exists to support the load_dylinker(). * * This routine has its own, separate, understanding of the FAT file format, * which is terrifically unfortunate. */ static load_return_t get_macho_vnode( char *path, integer_t archbits, struct mach_header *mach_header, off_t *file_offset, off_t *macho_size, struct vnode **vpp ) { struct vnode *vp; vfs_context_t ctx = vfs_context_current(); proc_t p = vfs_context_proc(ctx); kauth_cred_t kerncred; struct nameidata nid, *ndp; boolean_t is_fat; struct fat_arch fat_arch; int error = LOAD_SUCCESS; int resid; union { struct mach_header mach_header; struct fat_header fat_header; char pad[512]; } header; off_t fsize = (off_t)0; int err2; /* * Capture the kernel credential for use in the actual read of the * file, since the user doing the execution may have execute rights * but not read rights, but to exec something, we have to either map * or read it into the new process address space, which requires * read rights. This is to deal with lack of common credential * serialization code which would treat NOCRED as "serialize 'root'". */ kerncred = vfs_context_ucred(vfs_context_kernel()); ndp = &nid; /* init the namei data to point the file user's program name */ NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(path), ctx); if ((error = namei(ndp)) != 0) { if (error == ENOENT) { error = LOAD_ENOENT; } else { error = LOAD_FAILURE; } return(error); } nameidone(ndp); vp = ndp->ni_vp; /* check for regular file */ if (vp->v_type != VREG) { error = LOAD_PROTECT; goto bad1; } /* get size */ if ((error = vnode_size(vp, &fsize, ctx)) != 0) { error = LOAD_FAILURE; goto bad1; } /* Check mount point */ if (vp->v_mount->mnt_flag & MNT_NOEXEC) { error = LOAD_PROTECT; goto bad1; } /* check access */ if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, ctx)) != 0) { error = LOAD_PROTECT; goto bad1; } /* try to open it */ if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) { error = LOAD_PROTECT; goto bad1; } if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0, UIO_SYSSPACE32, IO_NODELOCKED, kerncred, &resid, p)) != 0) { error = LOAD_IOERROR; goto bad2; } if (header.mach_header.magic == MH_MAGIC || header.mach_header.magic == MH_MAGIC_64) is_fat = FALSE; else if (header.fat_header.magic == FAT_MAGIC || header.fat_header.magic == FAT_CIGAM) is_fat = TRUE; else { error = LOAD_BADMACHO; goto bad2; } if (is_fat) { /* Look up our architecture in the fat file. */ error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch); if (error != LOAD_SUCCESS) goto bad2; /* Read the Mach-O header out of it */ error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header, sizeof(header.mach_header), fat_arch.offset, UIO_SYSSPACE32, IO_NODELOCKED, kerncred, &resid, p); if (error) { error = LOAD_IOERROR; goto bad2; } /* Is this really a Mach-O? */ if (header.mach_header.magic != MH_MAGIC && header.mach_header.magic != MH_MAGIC_64) { error = LOAD_BADMACHO; goto bad2; } *file_offset = fat_arch.offset; *macho_size = fat_arch.size; } else { /* * Force get_macho_vnode() to fail if the architecture bits * do not match the expected architecture bits. This in * turn causes load_dylinker() to fail for the same reason, * so it ensures the dynamic linker and the binary are in * lock-step. This is potentially bad, if we ever add to * the CPU_ARCH_* bits any bits that are desirable but not * required, since the dynamic linker might work, but we will * refuse to load it because of this check. */ if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits) return(LOAD_BADARCH); *file_offset = 0; *macho_size = fsize; } *mach_header = header.mach_header; *vpp = vp; ubc_setsize(vp, fsize); return (error); bad2: err2 = VNOP_CLOSE(vp, FREAD, ctx); vnode_put(vp); return (error); bad1: vnode_put(vp); return(error); }