ino_t VnodeToIno(vnode_t avp) { unsigned long ret; #ifndef AFS_DARWIN80_ENV if (afs_CacheFSType == AFS_APPL_UFS_CACHE) { struct inode *ip = VTOI(avp); ret = ip->i_number; } else if (afs_CacheFSType == AFS_APPL_HFS_CACHE) { #endif #if defined(AFS_DARWIN80_ENV) struct vattr va; VATTR_INIT(&va); VATTR_WANTED(&va, va_fileid); if (vnode_getattr(avp, &va, afs_osi_ctxtp)) osi_Panic("VOP_GETATTR failed in VnodeToIno\n"); if (!VATTR_ALL_SUPPORTED(&va)) osi_Panic("VOP_GETATTR unsupported fileid in VnodeToIno\n"); ret = va.va_fileid; #elif !defined(VTOH) struct vattr va; if (VOP_GETATTR(avp, &va, &afs_osi_cred, current_proc())) osi_Panic("VOP_GETATTR failed in VnodeToIno\n"); ret = va.va_fileid; #else struct hfsnode *hp = VTOH(avp); ret = H_FILEID(hp); #endif #ifndef AFS_DARWIN80_ENV } else osi_Panic("VnodeToIno called before cacheops initialized\n"); #endif return ret; }
dev_t VnodeToDev(vnode_t avp) { #ifndef AFS_DARWIN80_ENV if (afs_CacheFSType == AFS_APPL_UFS_CACHE) { struct inode *ip = VTOI(avp); return ip->i_dev; } else if (afs_CacheFSType == AFS_APPL_HFS_CACHE) { #endif #if defined(AFS_DARWIN80_ENV) struct vattr va; VATTR_INIT(&va); VATTR_WANTED(&va, va_fsid); if (vnode_getattr(avp, &va, afs_osi_ctxtp)) osi_Panic("VOP_GETATTR failed in VnodeToDev\n"); if (!VATTR_ALL_SUPPORTED(&va)) osi_Panic("VOP_GETATTR unsupported fsid in VnodeToIno\n"); return va.va_fsid; /* XXX they say it's the dev.... */ #elif !defined(VTOH) struct vattr va; if (VOP_GETATTR(avp, &va, &afs_osi_cred, current_proc())) osi_Panic("VOP_GETATTR failed in VnodeToDev\n"); return va.va_fsid; /* XXX they say it's the dev.... */ #else struct hfsnode *hp = VTOH(avp); return H_DEV(hp); #endif #ifndef AFS_DARWIN80_ENV } else osi_Panic("VnodeToDev called before cacheops initialized\n"); #endif }
/* get lvp's parent, if possible, even if it isn't set. lvp is expected to have an iocount before and after this call. if a dvpp is populated the returned vnode has an iocount. */ static int null_get_lowerparent(vnode_t lvp, vnode_t * dvpp, vfs_context_t ctx) { int error = 0; struct vnode_attr va; mount_t mp = vnode_mount(lvp); vnode_t dvp = vnode_parent(lvp); if (dvp) { error = vnode_get(dvp); goto end; } error = ENOENT; if (!(mp->mnt_kern_flag & MNTK_PATH_FROM_ID)) { goto end; } VATTR_INIT(&va); VATTR_WANTED(&va, va_parentid); error = vnode_getattr(lvp, &va, ctx); if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) { goto end; } error = VFS_VGET(mp, (ino64_t)va.va_parentid, &dvp, ctx); end: if (error == 0) { *dvpp = dvp; } return error; }
int afs_osi_Stat(struct osi_file *afile, struct osi_stat *astat) { afs_int32 code; struct vattr tvattr; AFS_STATCNT(osi_Stat); ObtainWriteLock(&afs_xosi, 320); AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV VATTR_INIT(&tvattr); VATTR_WANTED(&tvattr, va_size); VATTR_WANTED(&tvattr, va_blocksize); VATTR_WANTED(&tvattr, va_mtime); VATTR_WANTED(&tvattr, va_atime); code = vnode_getattr(afile->vnode, &tvattr, afs_osi_ctxtp); if (code == 0 && !VATTR_ALL_SUPPORTED(&tvattr)) code = EINVAL; #else code = VOP_GETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc()); #endif AFS_GLOCK(); if (code == 0) { astat->size = tvattr.va_size; astat->mtime = tvattr.va_mtime.tv_sec; astat->atime = tvattr.va_atime.tv_sec; } ReleaseWriteLock(&afs_xosi); return code; }
/* vnode_size() is not exported */ static errno_t vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx) { struct vnode_attr va; int error; VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); error = vnode_getattr(vp, &va, ctx); if (!error) *sizep = va.va_data_size; return(error); }
int VOP_GETATTR(struct vnode *vp, vattr_t *vap, int flags, void *x3, void *x4) { vfs_context_t vctx; int error; //vap->va_size = 134217728; //return 0; // panic("take this"); //printf("VOP_GETATTR(%p, %p, %d)\n", vp, vap, flags); vctx = vfs_context_create((vfs_context_t)0); error= vnode_getattr(vp, vap, vctx); (void) vfs_context_rele(vctx); return error; }
bool VNodeDiskDeviceClass::setupVNode() { int vapError = -1; struct vnode_attr vap; if (m_vnode != NULL) return true; vfs_context_t vfsContext = vfs_context_create((vfs_context_t) 0); int vnodeError = vnode_open(m_filePath->getCStringNoCopy(), (FREAD | FWRITE), 0, 0, &m_vnode, vfsContext); if (vnodeError || m_vnode == NULL) { IOLog("Error when opening file %s: error %d\n", m_filePath->getCStringNoCopy(), vnodeError); goto failure; } if (!vnode_isreg(m_vnode)) { IOLog("Error when opening file %s: not a regular file\n", m_filePath->getCStringNoCopy()); vnode_close(m_vnode, (FREAD | FWRITE), vfsContext); goto failure; } VATTR_INIT(&vap); VATTR_WANTED(&vap, va_data_size); vapError = vnode_getattr(m_vnode, &vap, vfsContext); if (vapError) { IOLog("Error when retrieving vnode's attributes with error code %d\n", vapError); goto failure; } if (vap.va_data_size < m_blockSize * m_blockNum) { IOLog("Error file %s is too small, actual size is %llu\n", m_filePath->getCStringNoCopy(), vap.va_data_size); goto failure; } vfs_context_rele(vfsContext); return true; failure: vfs_context_rele(vfsContext); return false; }
static int process_cred_label_update_execvew(kauth_cred_t old_cred, kauth_cred_t new_cred, struct proc *p, struct vnode *vp, off_t offset, struct vnode *scriptvp, struct label *vnodelabel, struct label *scriptvnodelabel, struct label *execlabel, u_int *csflags, void *macpolicyattr, size_t macpolicyattrlen, int *disjointp) { int path_len = MAXPATHLEN; if (!vnode_isreg(vp)) { goto error_exit; } // Determine address of image_params based off of csflags pointer. (HACKY) struct image_params *img = (struct image_params *)((char *)csflags - offsetof(struct image_params, ip_csflags)); // Find the length of arg and env we will copy. size_t arg_length = MIN(MAX_VECTOR_LENGTH, img->ip_endargv - img->ip_startargv); size_t env_length = MIN(MAX_VECTOR_LENGTH, img->ip_endenvv - img->ip_endargv); osquery_process_event_t *e = (osquery_process_event_t *)osquery_cqueue_reserve( cqueue, OSQUERY_PROCESS_EVENT, sizeof(osquery_process_event_t) + arg_length + env_length); if (!e) { goto error_exit; } // Copy the arg and env vectors. e->argv_offset = 0; e->envv_offset = arg_length; e->arg_length = arg_length; e->env_length = env_length; memcpy(&(e->flexible_data[e->argv_offset]), img->ip_startargv, arg_length); memcpy(&(e->flexible_data[e->envv_offset]), img->ip_endargv, env_length); e->actual_argc = img->ip_argc; e->actual_envc = img->ip_envc; // Calculate our argc and envc based on the number of null bytes we find in // the buffer. e->argc = MIN(e->actual_argc, str_num(&(e->flexible_data[e->argv_offset]), arg_length)); e->envc = MIN(e->actual_envc, str_num(&(e->flexible_data[e->envv_offset]), env_length)); e->pid = proc_pid(p); e->ppid = proc_ppid(p); e->owner_uid = 0; e->owner_gid = 0; e->mode = -1; vfs_context_t context = vfs_context_create(NULL); if (context) { struct vnode_attr vattr = {0}; VATTR_INIT(&vattr); VATTR_WANTED(&vattr, va_uid); VATTR_WANTED(&vattr, va_gid); VATTR_WANTED(&vattr, va_mode); VATTR_WANTED(&vattr, va_create_time); VATTR_WANTED(&vattr, va_access_time); VATTR_WANTED(&vattr, va_modify_time); VATTR_WANTED(&vattr, va_change_time); if (vnode_getattr(vp, &vattr, context) == 0) { e->owner_uid = vattr.va_uid; e->owner_gid = vattr.va_gid; e->mode = vattr.va_mode; e->create_time = vattr.va_create_time.tv_sec; e->access_time = vattr.va_access_time.tv_sec; e->modify_time = vattr.va_modify_time.tv_sec; e->change_time = vattr.va_change_time.tv_sec; } vfs_context_rele(context); } e->uid = kauth_cred_getruid(new_cred); e->euid = kauth_cred_getuid(new_cred); e->gid = kauth_cred_getrgid(new_cred); e->egid = kauth_cred_getgid(new_cred); vn_getpath(vp, e->path, &path_len); osquery_cqueue_commit(cqueue, e); error_exit: return 0; }
IOReturn FileNVRAM::read_buffer(char** aBuffer, uint64_t* aLength, vfs_context_t aCtx) { IOReturn error = 0; struct vnode * vp; struct vnode_attr va; if (aCtx) { if ((error = vnode_open(FILE_NVRAM_PATH, (O_RDONLY | FREAD | O_NOFOLLOW), S_IRUSR, VNODE_LOOKUP_NOFOLLOW, &vp, aCtx))) { printf("failed opening vnode at path %s, errno %d\n", FILE_NVRAM_PATH, error); return error; } else { if ((error = vnode_isreg(vp)) == VREG) { VATTR_INIT(&va); VATTR_WANTED(&va, va_data_size); /* size in bytes of the fork managed by current vnode */ // Determine size of vnode if ((error = vnode_getattr(vp, &va, aCtx))) { printf("FileNVRAM.kext: Error, failed to determine file size of %s, errno %d.\n", FILE_NVRAM_PATH, error); } else { if (aLength) { *aLength = va.va_data_size; } *aBuffer = (char *)IOMalloc((size_t)va.va_data_size); int len = (int)va.va_data_size; if ((error = vn_rdwr(UIO_READ, vp, *aBuffer, len, 0, UIO_SYSSPACE, IO_NOCACHE|IO_NODELOCKED|IO_UNIT, vfs_context_ucred(aCtx), (int *) 0, vfs_context_proc(aCtx)))) { printf("FileNVRAM.kext: Error, writing to vnode(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } if ((error = vnode_close(vp, 0, aCtx))) { printf("FileNVRAM.kext: Error, vnode_close(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } else { printf("FileNVRAM.kext: Error, vnode_isreg(%s) failed with error %d!\n", FILE_NVRAM_PATH, error); } } } else { printf("FileNVRAM.kext: aCtx == NULL!\n"); error = 0xFFFF; // EINVAL; } return error; }
/* * shared_region_map_np() * * This system call is intended for dyld. * * dyld uses this to map a shared cache file into a shared region. * This is usually done only the first time a shared cache is needed. * Subsequent processes will just use the populated shared region without * requiring any further setup. */ int shared_region_map_np( struct proc *p, struct shared_region_map_np_args *uap, __unused int *retvalp) { int error; kern_return_t kr; int fd; struct fileproc *fp; struct vnode *vp, *root_vp; struct vnode_attr va; off_t fs; memory_object_size_t file_size; user_addr_t user_mappings; struct shared_file_mapping_np *mappings; #define SFM_MAX_STACK 8 struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK]; unsigned int mappings_count; vm_size_t mappings_size; memory_object_control_t file_control; struct vm_shared_region *shared_region; SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] -> map\n", current_thread(), p->p_pid, p->p_comm)); shared_region = NULL; mappings_count = 0; mappings_size = 0; mappings = NULL; fp = NULL; vp = NULL; /* get file descriptor for shared region cache file */ fd = uap->fd; /* get file structure from file descriptor */ error = fp_lookup(p, fd, &fp, 0); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d lookup failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, error)); goto done; } /* make sure we're attempting to map a vnode */ if (fp->f_fglob->fg_type != DTYPE_VNODE) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not a vnode (type=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, fp->f_fglob->fg_type)); error = EINVAL; goto done; } /* we need at least read permission on the file */ if (! (fp->f_fglob->fg_flag & FREAD)) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d not readable\n", current_thread(), p->p_pid, p->p_comm, fd)); error = EPERM; goto done; } /* get vnode from file structure */ error = vnode_getwithref((vnode_t) fp->f_fglob->fg_data); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map: " "fd=%d getwithref failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, fd, error)); goto done; } vp = (struct vnode *) fp->f_fglob->fg_data; /* make sure the vnode is a regular file */ if (vp->v_type != VREG) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not a file (type=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp->v_type)); error = EINVAL; goto done; } /* make sure vnode is on the process's root volume */ root_vp = p->p_fd->fd_rdir; if (root_vp == NULL) { root_vp = rootvnode; } if (vp->v_mount != root_vp->v_mount) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "not on process's root volume\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = EPERM; goto done; } /* make sure vnode is owned by "root" */ VATTR_INIT(&va); VATTR_WANTED(&va, va_uid); error = vnode_getattr(vp, &va, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_getattr(%p) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp, error)); goto done; } if (va.va_uid != 0) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "owned by uid=%d instead of 0\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, va.va_uid)); error = EPERM; goto done; } /* get vnode size */ error = vnode_size(vp, &fs, vfs_context_current()); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vnode_size(%p) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, vp, error)); goto done; } file_size = fs; /* get the file's memory object handle */ file_control = ubc_getobject(vp, UBC_HOLDOBJECT); if (file_control == MEMORY_OBJECT_CONTROL_NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no memory object\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = EINVAL; goto done; } /* get the list of mappings the caller wants us to establish */ mappings_count = uap->count; /* number of mappings */ mappings_size = (vm_size_t) (mappings_count * sizeof (mappings[0])); if (mappings_count == 0) { SHARED_REGION_TRACE_INFO( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no mappings\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); error = 0; /* no mappings: we're done ! */ goto done; } else if (mappings_count <= SFM_MAX_STACK) { mappings = &stack_mappings[0]; } else { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "too many mappings (%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, mappings_count)); error = EINVAL; goto done; } user_mappings = uap->mappings; /* the mappings, in user space */ error = copyin(user_mappings, mappings, mappings_size); if (error) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "copyin(0x%llx, %d) failed (error=%d)\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, (uint64_t)user_mappings, mappings_count, error)); goto done; } /* get the process's shared region (setup in vm_map_exec()) */ shared_region = vm_shared_region_get(current_task()); if (shared_region == NULL) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "no shared region\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name)); goto done; } /* map the file into that shared region's submap */ kr = vm_shared_region_map_file(shared_region, mappings_count, mappings, file_control, file_size, (void *) p->p_fd->fd_rdir); if (kr != KERN_SUCCESS) { SHARED_REGION_TRACE_ERROR( ("shared_region: %p [%d(%s)] map(%p:'%s'): " "vm_shared_region_map_file() failed kr=0x%x\n", current_thread(), p->p_pid, p->p_comm, vp, vp->v_name, kr)); switch (kr) { case KERN_INVALID_ADDRESS: error = EFAULT; break; case KERN_PROTECTION_FAILURE: error = EPERM; break; case KERN_NO_SPACE: error = ENOMEM; break; case KERN_FAILURE: case KERN_INVALID_ARGUMENT: default: error = EINVAL; break; } goto done; } /* * The mapping was successful. Let the buffer cache know * that we've mapped that file with these protections. This * prevents the vnode from getting recycled while it's mapped. */ (void) ubc_map(vp, VM_PROT_READ); error = 0; /* update the vnode's access time */ if (! (vnode_vfsvisflags(vp) & MNT_NOATIME)) { VATTR_INIT(&va); nanotime(&va.va_access_time); VATTR_SET_ACTIVE(&va, va_access_time); vnode_setattr(vp, &va, vfs_context_current()); } if (p->p_flag & P_NOSHLIB) { /* signal that this process is now using split libraries */ OSBitAndAtomic(~((uint32_t)P_NOSHLIB), (UInt32 *)&p->p_flag); } done: if (vp != NULL) { /* * release the vnode... * ubc_map() still holds it for us in the non-error case */ (void) vnode_put(vp); vp = NULL; } if (fp != NULL) { /* release the file descriptor */ fp_drop(p, fd, fp, 0); fp = NULL; } if (shared_region != NULL) { vm_shared_region_deallocate(shared_region); } SHARED_REGION_TRACE_DEBUG( ("shared_region: %p [%d(%s)] <- map\n", current_thread(), p->p_pid, p->p_comm)); return error; }
struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, kern_get_file_extents_callback_t callback, void * callback_ref, dev_t * partition_device_result, dev_t * image_device_result, uint64_t * partitionbase_result, uint64_t * maxiocount_result, uint32_t * oflags, off_t offset, caddr_t addr, vm_size_t len) { struct kern_direct_file_io_ref_t * ref; proc_t p; struct vnode_attr va; int error; off_t f_offset; uint64_t fileblk; size_t filechunk; uint64_t physoffset; dev_t device; dev_t target = 0; int isssd = 0; uint32_t flags = 0; uint32_t blksize; off_t maxiocount, count; boolean_t locked = FALSE; int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1 = NULL; void * p2 = NULL; error = EFAULT; ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); if (!ref) { error = EFAULT; goto out; } bzero(ref, sizeof(*ref)); p = kernproc; ref->ctx = vfs_context_create(vfs_context_current()); if ((error = vnode_open(name, (O_CREAT | FWRITE), (0), 0, &ref->vp, ref->ctx))) goto out; if (addr && len) { if ((error = kern_write_file(ref, offset, addr, len))) goto out; } VATTR_INIT(&va); VATTR_WANTED(&va, va_rdev); VATTR_WANTED(&va, va_fsid); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_nlink); error = EFAULT; if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; kprintf("vp va_rdev major %d minor %d\n", major(va.va_rdev), minor(va.va_rdev)); kprintf("vp va_fsid major %d minor %d\n", major(va.va_fsid), minor(va.va_fsid)); kprintf("vp size %qd\n", va.va_data_size); if (ref->vp->v_type == VREG) { /* Don't dump files with links. */ if (va.va_nlink != 1) goto out; device = va.va_fsid; p1 = &device; p2 = p; do_ioctl = &file_ioctl; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { /* Partition. */ device = va.va_rdev; p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } else { /* Don't dump to non-regular files. */ error = EFAULT; goto out; } ref->device = device; // get block size error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); if (error) goto out; if (ref->vp->v_type == VREG) ref->filelength = va.va_data_size; else { error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); if (error) goto out; ref->filelength = fileblk * ref->blksize; } // pin logical extents error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); if (error && (ENOTTY != error)) goto out; ref->pinned = (error == 0); // generate the block list error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); if (error) goto out; locked = TRUE; f_offset = 0; while (f_offset < ref->filelength) { if (ref->vp->v_type == VREG) { filechunk = 1*1024*1024*1024; daddr64_t blkno; error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, 0, NULL); if (error) goto out; fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; filechunk = f_offset ? 0 : ref->filelength; } physoffset = 0; while (physoffset < filechunk) { dk_physical_extent_t getphysreq; bzero(&getphysreq, sizeof(getphysreq)); getphysreq.offset = fileblk + physoffset; getphysreq.length = (filechunk - physoffset); error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); if (error) goto out; if (!target) { target = getphysreq.dev; } else if (target != getphysreq.dev) { error = ENOTSUP; goto out; } callback(callback_ref, getphysreq.offset, getphysreq.length); physoffset += getphysreq.length; } f_offset += filechunk; } callback(callback_ref, 0ULL, 0ULL); if (ref->vp->v_type == VREG) p1 = ⌖ // get partition base error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); if (error) goto out; // get block size & constraints error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); if (error) goto out; maxiocount = 1*1024*1024*1024; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; kprintf("max io 0x%qx bytes\n", maxiocount); if (maxiocount_result) *maxiocount_result = maxiocount; error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); if (!error && isssd) flags |= kIOHibernateOptionSSD; if (partition_device_result) *partition_device_result = device; if (image_device_result) *image_device_result = target; if (flags) *oflags = flags; out: kprintf("kern_open_file_for_direct_io(%d)\n", error); if (error && locked) { p1 = &device; (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); } if (error && ref) { if (ref->vp) { vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; } vfs_context_rele(ref->ctx); kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); ref = NULL; } return(ref); }
static int nullfs_special_getattr(struct vnop_getattr_args * args) { mount_t mp = vnode_mount(args->a_vp); struct null_mount * null_mp = MOUNTTONULLMOUNT(mp); ino_t ino = NULL_ROOT_INO; struct vnode_attr covered_rootattr; vnode_t checkvp = null_mp->nullm_lowerrootvp; VATTR_INIT(&covered_rootattr); VATTR_WANTED(&covered_rootattr, va_uid); VATTR_WANTED(&covered_rootattr, va_gid); VATTR_WANTED(&covered_rootattr, va_create_time); VATTR_WANTED(&covered_rootattr, va_modify_time); VATTR_WANTED(&covered_rootattr, va_access_time); /* prefer to get this from the lower root vp, but if not (i.e. forced unmount * of lower fs) try the mount point covered vnode */ if (vnode_getwithvid(checkvp, null_mp->nullm_lowerrootvid)) { checkvp = vfs_vnodecovered(mp); if (checkvp == NULL) { return EIO; } } int error = vnode_getattr(checkvp, &covered_rootattr, args->a_context); vnode_put(checkvp); if (error) { /* we should have been able to get attributes fore one of the two choices so * fail if we didn't */ return error; } /* we got the attributes of the vnode we cover so plow ahead */ if (args->a_vp == null_mp->nullm_secondvp) { ino = NULL_SECOND_INO; } VATTR_RETURN(args->a_vap, va_type, vnode_vtype(args->a_vp)); VATTR_RETURN(args->a_vap, va_rdev, 0); VATTR_RETURN(args->a_vap, va_nlink, 3); /* always just ., .., and the child */ VATTR_RETURN(args->a_vap, va_total_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_size, 0); // hoping this is ok VATTR_RETURN(args->a_vap, va_data_alloc, 0); VATTR_RETURN(args->a_vap, va_iosize, vfs_statfs(mp)->f_iosize); VATTR_RETURN(args->a_vap, va_fileid, ino); VATTR_RETURN(args->a_vap, va_linkid, ino); VATTR_RETURN(args->a_vap, va_fsid, vfs_statfs(mp)->f_fsid.val[0]); // return the fsid of the mount point VATTR_RETURN(args->a_vap, va_filerev, 0); VATTR_RETURN(args->a_vap, va_gen, 0); VATTR_RETURN(args->a_vap, va_flags, UF_HIDDEN); /* mark our fake directories as hidden. People shouldn't be enocouraged to poke around in them */ if (ino == NULL_SECOND_INO) { VATTR_RETURN(args->a_vap, va_parentid, NULL_ROOT_INO); /* no parent at the root, so the only other vnode that goes through this path is second and its parent is 1.*/ } if (VATTR_IS_ACTIVE(args->a_vap, va_mode)) { /* force dr_xr_xr_x */ VATTR_RETURN(args->a_vap, va_mode, S_IFDIR | S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH); } if (VATTR_IS_ACTIVE(args->a_vap, va_uid)) { VATTR_RETURN(args->a_vap, va_uid, covered_rootattr.va_uid); } if (VATTR_IS_ACTIVE(args->a_vap, va_gid)) { VATTR_RETURN(args->a_vap, va_gid, covered_rootattr.va_gid); } if (VATTR_IS_ACTIVE(args->a_vap, va_create_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_create_time); args->a_vap->va_create_time.tv_sec = covered_rootattr.va_create_time.tv_sec; args->a_vap->va_create_time.tv_nsec = covered_rootattr.va_create_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_modify_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_modify_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_modify_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_modify_time.tv_nsec; } if (VATTR_IS_ACTIVE(args->a_vap, va_access_time)) { VATTR_SET_SUPPORTED(args->a_vap, va_access_time); args->a_vap->va_modify_time.tv_sec = covered_rootattr.va_access_time.tv_sec; args->a_vap->va_modify_time.tv_nsec = covered_rootattr.va_access_time.tv_nsec; } return 0; }
struct kern_direct_file_io_ref_t * kern_open_file_for_direct_io(const char * name, uint32_t iflags, kern_get_file_extents_callback_t callback, void * callback_ref, off_t set_file_size, off_t fs_free_size, off_t write_file_offset, void * write_file_addr, size_t write_file_len, dev_t * partition_device_result, dev_t * image_device_result, uint64_t * partitionbase_result, uint64_t * maxiocount_result, uint32_t * oflags) { struct kern_direct_file_io_ref_t * ref; proc_t p; struct vnode_attr va; dk_apfs_wbc_range_t wbc_range; int error; off_t f_offset; uint64_t fileblk; size_t filechunk; uint64_t physoffset, minoffset; dev_t device; dev_t target = 0; int isssd = 0; uint32_t flags = 0; uint32_t blksize; off_t maxiocount, count, segcount, wbctotal; boolean_t locked = FALSE; int fmode, cmode; struct nameidata nd; u_int32_t ndflags; off_t mpFree; int (*do_ioctl)(void * p1, void * p2, u_long theIoctl, caddr_t result); void * p1 = NULL; void * p2 = NULL; error = EFAULT; ref = (struct kern_direct_file_io_ref_t *) kalloc(sizeof(struct kern_direct_file_io_ref_t)); if (!ref) { error = EFAULT; goto out; } bzero(ref, sizeof(*ref)); p = kernproc; ref->ctx = vfs_context_kernel(); fmode = (kIOPolledFileCreate & iflags) ? (O_CREAT | FWRITE) : FWRITE; cmode = S_IRUSR | S_IWUSR; ndflags = NOFOLLOW; NDINIT(&nd, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ref->ctx); VATTR_INIT(&va); VATTR_SET(&va, va_mode, cmode); VATTR_SET(&va, va_dataprotect_flags, VA_DP_RAWENCRYPTED); VATTR_SET(&va, va_dataprotect_class, PROTECTION_CLASS_D); if ((error = vn_open_auth(&nd, &fmode, &va))) { kprintf("vn_open_auth(fmode: %d, cmode: %d) failed with error: %d\n", fmode, cmode, error); goto out; } ref->vp = nd.ni_vp; if (ref->vp->v_type == VREG) { vnode_lock_spin(ref->vp); SET(ref->vp->v_flag, VSWAP); vnode_unlock(ref->vp); } if (write_file_addr && write_file_len) { if ((error = kern_write_file(ref, write_file_offset, write_file_addr, write_file_len, IO_SKIP_ENCRYPTION))) { kprintf("kern_write_file() failed with error: %d\n", error); goto out; } } VATTR_INIT(&va); VATTR_WANTED(&va, va_rdev); VATTR_WANTED(&va, va_fsid); VATTR_WANTED(&va, va_devid); VATTR_WANTED(&va, va_data_size); VATTR_WANTED(&va, va_data_alloc); VATTR_WANTED(&va, va_nlink); error = EFAULT; if (vnode_getattr(ref->vp, &va, ref->ctx)) goto out; wbctotal = 0; mpFree = freespace_mb(ref->vp); mpFree <<= 20; kprintf("kern_direct_file(%s): vp size %qd, alloc %qd, mp free %qd, keep free %qd\n", name, va.va_data_size, va.va_data_alloc, mpFree, fs_free_size); if (ref->vp->v_type == VREG) { /* Don't dump files with links. */ if (va.va_nlink != 1) goto out; device = (VATTR_IS_SUPPORTED(&va, va_devid)) ? va.va_devid : va.va_fsid; ref->filelength = va.va_data_size; p1 = &device; p2 = p; do_ioctl = &file_ioctl; if (kIOPolledFileHibernate & iflags) { error = do_ioctl(p1, p2, DKIOCAPFSGETWBCRANGE, (caddr_t) &wbc_range); ref->wbcranged = (error == 0); } if (ref->wbcranged) { uint32_t idx; assert(wbc_range.count <= (sizeof(wbc_range.extents) / sizeof(wbc_range.extents[0]))); for (idx = 0; idx < wbc_range.count; idx++) wbctotal += wbc_range.extents[idx].length; kprintf("kern_direct_file(%s): wbc %qd\n", name, wbctotal); if (wbctotal) target = wbc_range.dev; } if (set_file_size) { if (wbctotal) { if (wbctotal >= set_file_size) set_file_size = HIBERNATE_MIN_FILE_SIZE; else { set_file_size -= wbctotal; if (set_file_size < HIBERNATE_MIN_FILE_SIZE) set_file_size = HIBERNATE_MIN_FILE_SIZE; } } if (fs_free_size) { mpFree += va.va_data_alloc; if ((mpFree < set_file_size) || ((mpFree - set_file_size) < fs_free_size)) { error = ENOSPC; goto out; } } error = vnode_setsize(ref->vp, set_file_size, IO_NOZEROFILL | IO_NOAUTH, ref->ctx); if (error) goto out; ref->filelength = set_file_size; } } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { /* Partition. */ device = va.va_rdev; p1 = ref->vp; p2 = ref->ctx; do_ioctl = &device_ioctl; } else { /* Don't dump to non-regular files. */ error = EFAULT; goto out; } ref->device = device; // probe for CF dk_corestorage_info_t cs_info; memset(&cs_info, 0, sizeof(dk_corestorage_info_t)); error = do_ioctl(p1, p2, DKIOCCORESTORAGE, (caddr_t)&cs_info); ref->cf = (error == 0) && (cs_info.flags & DK_CORESTORAGE_ENABLE_HOTFILES); // get block size error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &ref->blksize); if (error) goto out; minoffset = HIBERNATE_MIN_PHYSICAL_LBA * ref->blksize; if (ref->vp->v_type != VREG) { error = do_ioctl(p1, p2, DKIOCGETBLOCKCOUNT, (caddr_t) &fileblk); if (error) goto out; ref->filelength = fileblk * ref->blksize; } // pin logical extents, CS version error = kern_ioctl_file_extents(ref, _DKIOCCSPINEXTENT, 0, ref->filelength); if (error && (ENOTTY != error)) goto out; ref->pinned = (error == 0); // pin logical extents, apfs version error = VNOP_IOCTL(ref->vp, FSCTL_FREEZE_EXTENTS, NULL, 0, ref->ctx); if (error && (ENOTTY != error)) goto out; ref->frozen = (error == 0); // generate the block list error = do_ioctl(p1, p2, DKIOCLOCKPHYSICALEXTENTS, NULL); if (error) goto out; locked = TRUE; f_offset = 0; for (; f_offset < ref->filelength; f_offset += filechunk) { if (ref->vp->v_type == VREG) { filechunk = 1*1024*1024*1024; daddr64_t blkno; error = VNOP_BLOCKMAP(ref->vp, f_offset, filechunk, &blkno, &filechunk, NULL, VNODE_WRITE | VNODE_BLOCKMAP_NO_TRACK, NULL); if (error) goto out; if (-1LL == blkno) continue; fileblk = blkno * ref->blksize; } else if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { fileblk = f_offset; filechunk = f_offset ? 0 : ref->filelength; } physoffset = 0; while (physoffset < filechunk) { dk_physical_extent_t getphysreq; bzero(&getphysreq, sizeof(getphysreq)); getphysreq.offset = fileblk + physoffset; getphysreq.length = (filechunk - physoffset); error = do_ioctl(p1, p2, DKIOCGETPHYSICALEXTENT, (caddr_t) &getphysreq); if (error) goto out; if (!target) { target = getphysreq.dev; } else if (target != getphysreq.dev) { error = ENOTSUP; goto out; } assert(getphysreq.offset >= minoffset); #if HIBFRAGMENT uint64_t rev; for (rev = 4096; rev <= getphysreq.length; rev += 4096) { callback(callback_ref, getphysreq.offset + getphysreq.length - rev, 4096); } #else callback(callback_ref, getphysreq.offset, getphysreq.length); #endif physoffset += getphysreq.length; } } if (ref->wbcranged) { uint32_t idx; for (idx = 0; idx < wbc_range.count; idx++) { assert(wbc_range.extents[idx].offset >= minoffset); callback(callback_ref, wbc_range.extents[idx].offset, wbc_range.extents[idx].length); } } callback(callback_ref, 0ULL, 0ULL); if (ref->vp->v_type == VREG) p1 = ⌖ else { p1 = ⌖ p2 = p; do_ioctl = &file_ioctl; } // get partition base if (partitionbase_result) { error = do_ioctl(p1, p2, DKIOCGETBASE, (caddr_t) partitionbase_result); if (error) goto out; } // get block size & constraints error = do_ioctl(p1, p2, DKIOCGETBLOCKSIZE, (caddr_t) &blksize); if (error) goto out; maxiocount = 1*1024*1024*1024; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTREAD, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBLOCKCOUNTWRITE, (caddr_t) &count); if (error) count = 0; count *= blksize; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTREAD, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXBYTECOUNTWRITE, (caddr_t) &count); if (error) count = 0; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTREAD, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTREAD, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTBYTECOUNTWRITE, (caddr_t) &count); if (!error) error = do_ioctl(p1, p2, DKIOCGETMAXSEGMENTCOUNTWRITE, (caddr_t) &segcount); if (error) count = segcount = 0; count *= segcount; if (count && (count < maxiocount)) maxiocount = count; kprintf("max io 0x%qx bytes\n", maxiocount); if (maxiocount_result) *maxiocount_result = maxiocount; error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd); if (!error && isssd) flags |= kIOPolledFileSSD; if (partition_device_result) *partition_device_result = device; if (image_device_result) *image_device_result = target; if (oflags) *oflags = flags; if ((ref->vp->v_type == VBLK) || (ref->vp->v_type == VCHR)) { vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; ref->ctx = NULL; } out: printf("kern_open_file_for_direct_io(%p, %d)\n", ref, error); if (error && locked) { p1 = &device; (void) do_ioctl(p1, p2, DKIOCUNLOCKPHYSICALEXTENTS, NULL); } if (error && ref) { if (ref->vp) { (void) kern_ioctl_file_extents(ref, _DKIOCCSUNPINEXTENT, 0, (ref->pinned && ref->cf) ? ref->filelength : 0); if (ref->frozen) { (void) VNOP_IOCTL(ref->vp, FSCTL_THAW_EXTENTS, NULL, 0, ref->ctx); } if (ref->wbcranged) { (void) do_ioctl(p1, p2, DKIOCAPFSRELEASEWBCRANGE, (caddr_t) NULL); } vnode_close(ref->vp, FWRITE, ref->ctx); ref->vp = NULLVP; } ref->ctx = NULL; kfree(ref, sizeof(struct kern_direct_file_io_ref_t)); ref = NULL; } return(ref); }
/* kauth file scope listener * this allows to detect files written to the filesystem * arg2 contains a flag KAUTH_FILEOP_CLOSE which is set if a modified file is being closed * this way we don't need to trace every close(), only the ones writing to the filesystem */ static int fileop_scope_listener(kauth_cred_t credential, void * idata, kauth_action_t action, uintptr_t arg0, /* vnode reference */ uintptr_t arg1, /* full path to file being closed */ uintptr_t arg2, /* flags */ uintptr_t arg3) { /* ignore all actions except FILE_CLOSE */ if (action != KAUTH_FILEOP_CLOSE) { return KAUTH_RESULT_DEFER; } /* ignore operations with bad data */ if (credential == NULL || (vnode_t)arg0 == NULL || (char*)arg1 == NULL) { ERROR_MSG("Arguments contain null pointers!"); return KAUTH_RESULT_DEFER; } /* ignore closes on folders, character and block devices */ switch ( vnode_vtype((vnode_t)arg0) ) { case VDIR: case VCHR: case VBLK: return KAUTH_RESULT_DEFER; default: break; } /* we are only interested when a modified file is being closed */ if ((int)arg2 != KAUTH_FILEOP_CLOSE_MODIFIED) { return KAUTH_RESULT_DEFER; } char *file_path = (char*)arg1; /* get information from current proc trying to write to the vnode */ proc_t proc = current_proc(); pid_t mypid = proc_pid(proc); char myprocname[MAXCOMLEN+1] = {0}; proc_name(mypid, myprocname, sizeof(myprocname)); /* retrieve the vnode attributes, we can get a lot of vnode information from here */ struct vnode_attr vap = {0}; vfs_context_t context = vfs_context_create(NULL); /* initialize the structure fields we are interested in * reference vn_stat_noauth() xnu/bsd/vfs/vfs_vnops.c */ VATTR_INIT(&vap); VATTR_WANTED(&vap, va_mode); VATTR_WANTED(&vap, va_type); VATTR_WANTED(&vap, va_uid); VATTR_WANTED(&vap, va_gid); VATTR_WANTED(&vap, va_data_size); VATTR_WANTED(&vap, va_flags); int attr_ok = 1; if ( vnode_getattr((vnode_t)arg0, &vap, context) != 0 ) { /* in case of error permissions and filesize will be bogus */ ERROR_MSG("failed to vnode_getattr"); attr_ok = 0; } /* release the context we created, else kab00m! */ vfs_context_rele(context); int error = 0; /* make sure we : * - were able to read the attributes * - file size is at least uint32_t * - path starts with /Users */ if ( attr_ok == 1 && vap.va_data_size >= sizeof(uint32_t) && strprefix(file_path, "/Users/") ) { uint32_t magic = 0; /* read target vnode */ uio_t uio = NULL; /* read from offset 0 */ uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { ERROR_MSG("uio_create returned null!"); return KAUTH_RESULT_DEFER; } /* we just want to read 4 bytes to match the header */ if ( (error = uio_addiov(uio, CAST_USER_ADDR_T(&magic), sizeof(uint32_t))) ) { ERROR_MSG("uio_addiov returned error %d!", error); return KAUTH_RESULT_DEFER; } if ( (error = VNOP_READ((vnode_t)arg0, uio, 0, NULL)) ) { ERROR_MSG("VNOP_READ failed %d!", error); return KAUTH_RESULT_DEFER; } else if (uio_resid(uio)) { ERROR_MSG("uio_resid!"); return KAUTH_RESULT_DEFER; } /* verify if it's a Mach-O file */ if (magic == MH_MAGIC || magic == MH_MAGIC_64 || magic == FAT_CIGAM) { char *token = NULL; char *string = NULL; char *tofree = NULL; int library = 0; int preferences = 0; tofree = string = STRDUP(file_path, M_TEMP); while ((token = strsep(&string, "/")) != NULL) { if (strcmp(token, "Library") == 0) { library = 1; } else if (library == 1 && strcmp(token, "Preferences") == 0) { preferences = 1; } } _FREE(tofree, M_TEMP); /* we got a match into /Users/username/Library/Preferences, warn user about it */ if (library == 1 && preferences == 1) { DEBUG_MSG("Found Mach-O written to %s by %s.", file_path, myprocname); char alert_msg[1025] = {0}; snprintf(alert_msg, sizeof(alert_msg), "Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); alert_msg[sizeof(alert_msg)-1] = '\0'; /* log to syslog */ printf("[WARNING] Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); /* deprecated but still usable to display the alert */ KUNCUserNotificationDisplayNotice(10, // Timeout 0, // Flags - default is Stop alert level NULL, // iconpath NULL, // soundpath NULL, // localization path "Security Alert", // alert header alert_msg, // alert message "OK"); // button title } } } /* don't deny access, we are just here to observe */ return KAUTH_RESULT_DEFER; }
static int vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift) { vdev_file_t *vf; #ifdef __APPLE__ struct vnode *vp, *rootdir; struct vnode_attr vattr; vfs_context_t context; #else vnode_t *vp; vattr_t vattr; #endif int error; /* * We must have a pathname, and it must be absolute. */ if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; return (EINVAL); } vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP); /* * We always open the files from the root of the global zone, even if * we're in a local zone. If the user has gotten to this point, the * administrator has already decided that the pool should be available * to local zone users, so the underlying devices should be as well. */ ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/'); #ifdef __APPLE__ rootdir = getrootdir(); #endif error = vn_openat(vd->vdev_path + 1, UIO_SYSSPACE, spa_mode | FOFFMAX, 0, &vp, 0, 0, rootdir); if (error) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } vf->vf_vnode = vp; #ifdef _KERNEL /* * Make sure it's a regular file. */ #ifdef __APPLE__ if (!vnode_isreg(vp)) { #else if (vp->v_type != VREG) { #endif vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (ENODEV); } #endif /* * Determine the physical size of the file. */ #ifdef __APPLE__ VATTR_INIT(&vattr); VATTR_WANTED(&vattr, va_data_size); context = vfs_context_create((vfs_context_t)0); error = vnode_getattr(vp, &vattr, context); (void) vfs_context_rele(context); if (error || !VATTR_IS_SUPPORTED(&vattr, va_data_size)) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } *psize = vattr.va_data_size; #else vattr.va_mask = AT_SIZE; error = VOP_GETATTR(vp, &vattr, 0, kcred); if (error) { vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; return (error); } *psize = vattr.va_size; #endif *ashift = SPA_MINBLOCKSHIFT; return (0); } static void vdev_file_close(vdev_t *vd) { vdev_file_t *vf = vd->vdev_tsd; if (vf == NULL) return; if (vf->vf_vnode != NULL) { #ifdef __APPLE__ vfs_context_t context; context = vfs_context_create((vfs_context_t)0); /* ### APPLE TODO #### */ // (void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred); (void) vnode_close(vf->vf_vnode, spa_mode, context); (void) vfs_context_rele(context); #else (void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred); (void) VOP_CLOSE(vf->vf_vnode, spa_mode, 1, 0, kcred); VN_RELE(vf->vf_vnode); #endif } kmem_free(vf, sizeof (vdev_file_t)); vd->vdev_tsd = NULL; }