/* * Gets the first page of kernel binary at disk into a buffer * Uses KPI VFS functions and a ripped uio_createwithbuffer() from XNU */ static kern_return_t get_k_mh(void *buffer, vnode_t k_vnode, struct kernel_info *kinfo) { uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if(uio == NULL) { return KERN_FAILURE; } /* imitate the kernel and read a single page from the header */ if(uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE) != 0) { return KERN_FAILURE; } /* read kernel vnode into the buffer */ if(VNOP_READ(k_vnode, uio, 0, NULL) != 0) { return KERN_FAILURE; } else if(uio_resid(uio)) { return KERN_FAILURE; } /* process the header */ uint32_t magic = *(uint32_t *)buffer; if(magic == FAT_CIGAM) { struct fat_header *fh = (struct fat_header *)buffer; struct fat_arch *fa = (struct fat_arch *)(buffer + sizeof(struct fat_header)); uint32_t file_off = 0; for(uint32_t i = 0; i < ntohl(fh->nfat_arch); i++) { if(sizeof(void *) == 8 && ntohl(fa->cputype) == CPU_TYPE_X86_64) { file_off = ntohl(fa->offset); break; } else if(sizeof(void *) == 4 && ntohl(fa->cputype) == CPU_TYPE_X86) { file_off = ntohl(fa->offset); break; } fa++; } /* read again */ uio = uio_create(1, file_off, UIO_SYSSPACE, UIO_READ); uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE); VNOP_READ(k_vnode, uio, 0, NULL); kinfo->fat_offset = file_off; } else { kinfo->fat_offset = 0; } return KERN_SUCCESS; }
struct uio * afsio_partialcopy(struct uio *auio, size_t size) { struct uio *res; int i; user_addr_t iovaddr; user_size_t iovsize; if (proc_is64bit(current_proc())) { res = uio_create(uio_iovcnt(auio), uio_offset(auio), uio_isuserspace(auio) ? UIO_USERSPACE64 : UIO_SYSSPACE32, uio_rw(auio)); } else { res = uio_create(uio_iovcnt(auio), uio_offset(auio), uio_isuserspace(auio) ? UIO_USERSPACE32 : UIO_SYSSPACE32, uio_rw(auio)); } for (i = 0;i < uio_iovcnt(auio) && size > 0;i++) { if (uio_getiov(auio, i, &iovaddr, &iovsize)) break; if (iovsize > size) iovsize = size; if (uio_addiov(res, iovaddr, iovsize)) break; size -= iovsize; } return res; }
int zfs_vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, ssize_t len, offset_t offset, enum uio_seg seg, int ioflag, rlim64_t ulimit, cred_t *cr, ssize_t *residp) { uio_t *auio; int spacetype; int error=0; vfs_context_t vctx; spacetype = UIO_SEG_IS_USER_SPACE(seg) ? UIO_USERSPACE32 : UIO_SYSSPACE; vctx = vfs_context_create((vfs_context_t)0); auio = uio_create(1, 0, spacetype, rw); uio_reset(auio, offset, spacetype, rw); uio_addiov(auio, (uint64_t)(uintptr_t)base, len); if (rw == UIO_READ) { error = VNOP_READ(vp, auio, ioflag, vctx); } else { error = VNOP_WRITE(vp, auio, ioflag, vctx); } if (residp) { *residp = uio_resid(auio); } else { if (uio_resid(auio) && error == 0) error = EIO; } uio_free(auio); vfs_context_rele(vctx); return (error); }
int readFile(char *file, uint8_t *buffer, off_t offset, user_size_t size) { int res = EIO; vfs_context_t vfsContext = vfs_context_create(NULL); if (vfsContext == NULL) { return EIO; } vnode_t fileVnode = NULLVP; if (vnode_lookup(file, 0, &fileVnode, vfsContext) == 0) { uio_t uio = uio_create(1, offset, UIO_SYSSPACE, UIO_READ); if (uio == NULL) goto exit; if (uio_addiov(uio, CAST_USER_ADDR_T(buffer), size)) goto exit; if (VNOP_READ(fileVnode, uio, 0, vfsContext)) goto exit; if (uio_resid(uio)) goto exit; res = 0; } else { vfs_context_rele(vfsContext); return ENOENT; } exit: vnode_put(fileVnode); vfs_context_rele(vfsContext); return res; }
/* * retrieve the first page of kernel binary at disk into a buffer * version that uses KPI VFS functions and a ripped uio_createwithbuffer() from XNU */ kern_return_t get_mach_header(void *buffer, vnode_t vnode, vfs_context_t ctxt) { int error = 0; uio_t uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { return KERN_FAILURE; } // imitate the kernel and read a single page from the header error = uio_addiov(uio, CAST_USER_ADDR_T(buffer), HEADER_SIZE); if (error) { return error; } // read kernel vnode into the buffer error = VNOP_READ(vnode, uio, 0, ctxt); if (error) { return error; } else if (uio_resid(uio)) return EINVAL; // process the header uint32_t magic = *(uint32_t*)buffer; if (magic != MH_MAGIC_64) { return KERN_FAILURE; } return KERN_SUCCESS; }
int mac_vnop_setxattr (struct vnode *vp, const char *name, char *buf, size_t len) { vfs_context_t ctx; int options = XATTR_NOSECURITY; char uio_buf[ UIO_SIZEOF(1) ]; uio_t auio; int error; if (vfs_isrdonly(vp->v_mount)) return (EROFS); ctx = vfs_context_current(); auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_WRITE, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(buf), len); error = vn_setxattr(vp, name, auio, options, ctx); #if CONFIG_FSE if (error == 0) { add_fsevent(FSE_XATTR_MODIFIED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE); } #endif return (error); }
static int vnop_strategy_9p(struct vnop_strategy_args *ap) { mount_t mp; struct buf *bp; node_9p *np; caddr_t addr; uio_t uio; int e, flags; TRACE(); bp = ap->a_bp; np = NTO9P(buf_vnode(bp)); flags = buf_flags(bp); uio = NULL; addr = NULL; mp = vnode_mount(buf_vnode(bp)); if (mp == NULL) return ENXIO; if ((e=buf_map(bp, &addr))) goto error; uio = uio_create(1, buf_blkno(bp) * vfs_statfs(mp)->f_bsize, UIO_SYSSPACE, ISSET(flags, B_READ)? UIO_READ: UIO_WRITE); if (uio == NULL) { e = ENOMEM; goto error; } uio_addiov(uio, CAST_USER_ADDR_T(addr), buf_count(bp)); if (ISSET(flags, B_READ)) { if((e=nread_9p(np, uio))) goto error; /* zero the rest of the page if we reached EOF */ if (uio_resid(uio) > 0) { bzero(addr+buf_count(bp)-uio_resid(uio), uio_resid(uio)); uio_update(uio, uio_resid(uio)); } } else { if ((e=nwrite_9p(np, uio))) goto error; } buf_setresid(bp, uio_resid(uio)); error: if (uio) uio_free(uio); if (addr) buf_unmap(bp); buf_seterror(bp, e); buf_biodone(bp); return e; }
void getfinderinfo(znode_t *zp, cred_t *cr, finderinfo_t *fip) { vnode_t *xdvp = NULLVP; vnode_t *xvp = NULLVP; struct uio *auio = NULL; struct componentname cn; int error; uint64_t xattr = 0; if (sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zp->z_zfsvfs), &xattr, sizeof(xattr)) || (xattr == 0)) { goto nodata; } auio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (auio == NULL) { goto nodata; } uio_addiov(auio, CAST_USER_ADDR_T(fip), sizeof (finderinfo_t)); /* * Grab the hidden attribute directory vnode. * * XXX - switch to embedded Finder Info when it becomes available */ if ((error = zfs_get_xattrdir(zp, &xdvp, cr, 0))) { goto out; } bzero(&cn, sizeof (cn)); cn.cn_nameiop = LOOKUP; cn.cn_flags = ISLASTCN; cn.cn_nameptr = XATTR_FINDERINFO_NAME; cn.cn_namelen = strlen(cn.cn_nameptr); if ((error = zfs_dirlook(VTOZ(xdvp), cn.cn_nameptr, &xvp, 0, NULL, &cn))) { goto out; } error = dmu_read_uio(zp->z_zfsvfs->z_os, VTOZ(xvp)->z_id, auio, sizeof (finderinfo_t)); out: if (auio) uio_free(auio); if (xvp) vnode_put(xvp); if (xdvp) vnode_put(xdvp); if (error == 0) return; nodata: bzero(fip, sizeof (finderinfo_t)); }
/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; afs_size_t resid; afs_int32 code; #ifdef AFS_DARWIN80_ENV uio_t uio; #endif AFS_STATCNT(osi_Read); /** * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (afs_shuttingdown == AFS_RUNNING) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV uio=uio_create(1, afile->offset, AFS_UIOSYS, UIO_READ); uio_addiov(uio, CAST_USER_ADDR_T(aptr), asize); code = VNOP_READ(afile->vnode, uio, IO_UNIT, afs_osi_ctxtp); resid = AFS_UIO_RESID(uio); uio_free(uio); #else code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset, AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid); #endif AFS_GLOCK(); if (code == 0) { code = asize - resid; afile->offset += code; osi_DisableAtimes(afile->vnode); } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code); if (code > 0) { code *= -1; } } return code; }
/* * Function: receive_packet * Purpose: * Return a received packet or an error if none available. */ static int receive_packet(struct socket * so, void * pp, int psize, int * actual_size) { uio_t auio; int error; int rcvflg; char uio_buf[ UIO_SIZEOF(1) ]; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(pp), psize); rcvflg = MSG_WAITALL; error = soreceive(so, (struct sockaddr **) 0, auio, 0, 0, &rcvflg); *actual_size = psize - uio_resid(auio); return (error); }
/* Generic write interface */ int afs_osi_Write(struct osi_file *afile, afs_int32 offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; afs_size_t resid; afs_int32 code; #ifdef AFS_DARWIN80_ENV uio_t uio; #endif AFS_STATCNT(osi_Write); if (!afile) osi_Panic("afs_osi_Write called with null param"); if (offset != -1) afile->offset = offset; { AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV uio=uio_create(1, afile->offset, AFS_UIOSYS, UIO_WRITE); uio_addiov(uio, CAST_USER_ADDR_T(aptr), asize); code = VNOP_WRITE(afile->vnode, uio, IO_UNIT, afs_osi_ctxtp); resid = AFS_UIO_RESID(uio); uio_free(uio); #else code = gop_rdwr(UIO_WRITE, afile->vnode, (caddr_t) aptr, asize, afile->offset, AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid); #endif AFS_GLOCK(); } if (code == 0) { code = asize - resid; afile->offset += code; } else { if (code > 0) { code *= -1; } } if (afile->proc) { (*afile->proc) (afile, code); } return code; }
/* * retrieve the whole linkedit segment into target buffer from kernel binary at disk * we keep this buffer until we don't need to solve symbols anymore */ static kern_return_t get_kernel_linkedit(vnode_t kernel_vnode, vfs_context_t ctxt, kernel_info *kinfo) { int error = 0; uio_t uio = uio_create(1, kinfo->linkedit_fileoff, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { return KERN_FAILURE; } error = uio_addiov(uio, CAST_USER_ADDR_T(kinfo->linkedit_buf), kinfo->linkedit_size); if (error) return error; error = VNOP_READ(kernel_vnode, uio, 0, ctxt); if (error) { return error; } else if (uio_resid(uio)) { return EINVAL; } return KERN_SUCCESS; }
int mac_vnop_getxattr (struct vnode *vp, const char *name, char *buf, size_t len, size_t *attrlen) { vfs_context_t ctx = vfs_context_current(); int options = XATTR_NOSECURITY; char uio_buf[ UIO_SIZEOF(1) ]; uio_t auio; int error; auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(buf), len); error = vn_getxattr(vp, name, auio, attrlen, options, ctx); *attrlen = len - uio_resid(auio); return (error); }
/* * Our version of vn_rdwr, here "vp" is not actually a vnode, but a ptr * to the node allocated in getf(). We use the "fp" part of the node to * be able to issue IO. * You must call getf() before calling spl_vn_rdwr(). */ int spl_vn_rdwr(enum uio_rw rw, struct vnode *vp, caddr_t base, ssize_t len, offset_t offset, enum uio_seg seg, int ioflag, rlim64_t ulimit, /* meaningful only if rw is UIO_WRITE */ cred_t *cr, ssize_t *residp) { struct spl_fileproc *sfp = (struct spl_fileproc*)vp; uio_t *auio; int spacetype; int error=0; vfs_context_t vctx; spacetype = UIO_SEG_IS_USER_SPACE(seg) ? UIO_USERSPACE32 : UIO_SYSSPACE; vctx = vfs_context_create((vfs_context_t)0); auio = uio_create(1, 0, spacetype, rw); uio_reset(auio, offset, spacetype, rw); uio_addiov(auio, (uint64_t)(uintptr_t)base, len); if (rw == UIO_READ) { error = fo_read(sfp->f_fp, auio, ioflag, vctx); } else { error = fo_write(sfp->f_fp, auio, ioflag, vctx); sfp->f_writes = 1; } if (residp) { *residp = uio_resid(auio); } else { if (uio_resid(auio) && error == 0) error = EIO; } uio_free(auio); vfs_context_rele(vctx); return (error); }
/* * Get the whole __LINKEDIT segment into target buffer from kernel binary at disk * This buffer is keeped until we don't need to solve symbols anymore */ static kern_return_t get_k_linkedit(vnode_t k_vnode, struct kernel_info *kinfo) { uio_t uio = uio_create(1, kinfo->linkedit_fileoff, UIO_SYSSPACE, UIO_READ); if(uio == NULL) { return KERN_FAILURE; } if(uio_addiov(uio, CAST_USER_ADDR_T(kinfo->linkedit_buf), kinfo->linkedit_size) != 0) { return KERN_FAILURE; } if(VNOP_READ(k_vnode, uio, 0, NULL) != 0) { return KERN_FAILURE; } else if(uio_resid(uio)) { return KERN_FAILURE; } return KERN_SUCCESS; }
static int file_io(struct vnode * vp, vfs_context_t ctx, enum uio_rw op, char * base, off_t offset, user_ssize_t count, user_ssize_t * resid) { uio_t auio; int error; char uio_buf[UIO_SIZEOF(1)]; auio = uio_createwithbuffer(1, offset, UIO_SYSSPACE, op, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(base), count); if (op == UIO_READ) error = VNOP_READ(vp, auio, IO_SYNC, ctx); else error = VNOP_WRITE(vp, auio, IO_SYNC, ctx); if (resid != NULL) { *resid = uio_resid(auio); } return (error); }
/* * Convert a pathname into a pointer to a locked inode. * * The FOLLOW flag is set when symbolic links are to be followed * when they occur at the end of the name translation process. * Symbolic links are always followed for all other pathname * components other than the last. * * The segflg defines whether the name is to be copied from user * space or kernel space. * * Overall outline of namei: * * copy in name * get starting directory * while (!done && !error) { * call lookup to search path. * if symbolic link, massage name in buffer and continue * } * * Returns: 0 Success * ENOENT No such file or directory * ELOOP Too many levels of symbolic links * ENAMETOOLONG Filename too long * copyinstr:EFAULT Bad address * copyinstr:ENAMETOOLONG Filename too long * lookup:EBADF Bad file descriptor * lookup:EROFS * lookup:EACCES * lookup:EPERM * lookup:ERECYCLE vnode was recycled from underneath us in lookup. * This means we should re-drive lookup from this point. * lookup: ??? * VNOP_READLINK:??? */ int namei(struct nameidata *ndp) { struct filedesc *fdp; /* pointer to file descriptor state */ char *cp; /* pointer into pathname argument */ struct vnode *dp; /* the directory we are searching */ struct vnode *usedvp = ndp->ni_dvp; /* store pointer to vp in case we must loop due to heavy vnode pressure */ u_long cnpflags = ndp->ni_cnd.cn_flags; /* store in case we have to restore after loop */ uio_t auio; int error; struct componentname *cnp = &ndp->ni_cnd; vfs_context_t ctx = cnp->cn_context; proc_t p = vfs_context_proc(ctx); /* XXX ut should be from context */ uthread_t ut = (struct uthread *)get_bsdthread_info(current_thread()); char *tmppn; char uio_buf[ UIO_SIZEOF(1) ]; #if DIAGNOSTIC if (!vfs_context_ucred(ctx) || !p) panic ("namei: bad cred/proc"); if (cnp->cn_nameiop & (~OPMASK)) panic ("namei: nameiop contaminated with flags"); if (cnp->cn_flags & OPMASK) panic ("namei: flags contaminated with nameiops"); #endif fdp = p->p_fd; vnode_recycled: /* * Get a buffer for the name to be translated, and copy the * name into the buffer. */ if ((cnp->cn_flags & HASBUF) == 0) { cnp->cn_pnbuf = ndp->ni_pathbuf; cnp->cn_pnlen = PATHBUFLEN; } #if LP64_DEBUG if (IS_VALID_UIO_SEGFLG(ndp->ni_segflg) == 0) { panic("%s :%d - invalid ni_segflg\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ retry_copy: if (UIO_SEG_IS_USER_SPACE(ndp->ni_segflg)) { error = copyinstr(ndp->ni_dirp, cnp->cn_pnbuf, cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen); } else { error = copystr(CAST_DOWN(void *, ndp->ni_dirp), cnp->cn_pnbuf, cnp->cn_pnlen, (size_t *)&ndp->ni_pathlen); } if (error == ENAMETOOLONG && !(cnp->cn_flags & HASBUF)) { MALLOC_ZONE(cnp->cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (cnp->cn_pnbuf == NULL) { error = ENOMEM; goto error_out; } cnp->cn_flags |= HASBUF; cnp->cn_pnlen = MAXPATHLEN; goto retry_copy; } if (error) goto error_out; #if CONFIG_VOLFS /* * Check for legacy volfs style pathnames. * * For compatibility reasons we currently allow these paths, * but future versions of the OS may not support them. */ if (ndp->ni_pathlen >= VOLFS_MIN_PATH_LEN && cnp->cn_pnbuf[0] == '/' && cnp->cn_pnbuf[1] == '.' && cnp->cn_pnbuf[2] == 'v' && cnp->cn_pnbuf[3] == 'o' && cnp->cn_pnbuf[4] == 'l' && cnp->cn_pnbuf[5] == '/' ) { char * realpath; int realpath_err; /* Attempt to resolve a legacy volfs style pathname. */ MALLOC_ZONE(realpath, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK); if (realpath) { if ((realpath_err= vfs_getrealpath(&cnp->cn_pnbuf[6], realpath, MAXPATHLEN, ctx))) { FREE_ZONE(realpath, MAXPATHLEN, M_NAMEI); if (realpath_err == ENOSPC){ error = ENAMETOOLONG; goto error_out; } } else { if (cnp->cn_flags & HASBUF) { FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); } cnp->cn_pnbuf = realpath; cnp->cn_pnlen = MAXPATHLEN; ndp->ni_pathlen = strlen(realpath) + 1; cnp->cn_flags |= HASBUF | CN_VOLFSPATH; } } } #endif /* CONFIG_VOLFS */ /* If we are auditing the kernel pathname, save the user pathname */ if (cnp->cn_flags & AUDITVNPATH1) AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH1); if (cnp->cn_flags & AUDITVNPATH2) AUDIT_ARG(upath, ut->uu_cdir, cnp->cn_pnbuf, ARG_UPATH2); /* * Do not allow empty pathnames */ if (*cnp->cn_pnbuf == '\0') { error = ENOENT; goto error_out; } ndp->ni_loopcnt = 0; /* * determine the starting point for the translation. */ if ((ndp->ni_rootdir = fdp->fd_rdir) == NULLVP) { if ( !(fdp->fd_flags & FD_CHROOT)) ndp->ni_rootdir = rootvnode; } cnp->cn_nameptr = cnp->cn_pnbuf; ndp->ni_usedvp = NULLVP; if (*(cnp->cn_nameptr) == '/') { while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } dp = ndp->ni_rootdir; } else if (cnp->cn_flags & USEDVP) { dp = ndp->ni_dvp; ndp->ni_usedvp = dp; } else dp = vfs_context_cwd(ctx); if (dp == NULLVP || (dp->v_lflag & VL_DEAD)) { error = ENOENT; goto error_out; } ndp->ni_dvp = NULLVP; ndp->ni_vp = NULLVP; for (;;) { int need_newpathbuf; int linklen; ndp->ni_startdir = dp; if ( (error = lookup(ndp)) ) { goto error_out; } /* * Check for symbolic link */ if ((cnp->cn_flags & ISSYMLINK) == 0) { return (0); } if ((cnp->cn_flags & FSNODELOCKHELD)) { cnp->cn_flags &= ~FSNODELOCKHELD; unlock_fsnode(ndp->ni_dvp, NULL); } if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { error = ELOOP; break; } #if CONFIG_MACF if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0) break; #endif /* MAC */ if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF)) need_newpathbuf = 1; else need_newpathbuf = 0; if (need_newpathbuf) { MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (cp == NULL) { error = ENOMEM; break; } } else { cp = cnp->cn_pnbuf; } auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(cp), MAXPATHLEN); error = VNOP_READLINK(ndp->ni_vp, auio, ctx); if (error) { if (need_newpathbuf) FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); break; } // LP64todo - fix this linklen = MAXPATHLEN - uio_resid(auio); if (linklen + ndp->ni_pathlen > MAXPATHLEN) { if (need_newpathbuf) FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); error = ENAMETOOLONG; break; } if (need_newpathbuf) { long len = cnp->cn_pnlen; tmppn = cnp->cn_pnbuf; bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); cnp->cn_pnbuf = cp; cnp->cn_pnlen = MAXPATHLEN; if ( (cnp->cn_flags & HASBUF) ) FREE_ZONE(tmppn, len, M_NAMEI); else cnp->cn_flags |= HASBUF; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; cnp->cn_nameptr = cnp->cn_pnbuf; /* * starting point for 'relative' * symbolic link path */ dp = ndp->ni_dvp; /* * get rid of references returned via 'lookup' */ vnode_put(ndp->ni_vp); vnode_put(ndp->ni_dvp); ndp->ni_vp = NULLVP; ndp->ni_dvp = NULLVP; /* * Check if symbolic link restarts us at the root */ if (*(cnp->cn_nameptr) == '/') { while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } if ((dp = ndp->ni_rootdir) == NULLVP) { error = ENOENT; goto error_out; } } } /* * only come here if we fail to handle a SYMLINK... * if either ni_dvp or ni_vp is non-NULL, then * we need to drop the iocount that was picked * up in the lookup routine */ if (ndp->ni_dvp) vnode_put(ndp->ni_dvp); if (ndp->ni_vp) vnode_put(ndp->ni_vp); error_out: if ( (cnp->cn_flags & HASBUF) ) { cnp->cn_flags &= ~HASBUF; FREE_ZONE(cnp->cn_pnbuf, cnp->cn_pnlen, M_NAMEI); } cnp->cn_pnbuf = NULL; ndp->ni_vp = NULLVP; if (error == ERECYCLE){ /* vnode was recycled underneath us. re-drive lookup to start at the beginning again, since recycling invalidated last lookup*/ ndp->ni_cnd.cn_flags = cnpflags; ndp->ni_dvp = usedvp; goto vnode_recycled; } return (error); }
/* kauth file scope listener * this allows to detect files written to the filesystem * arg2 contains a flag KAUTH_FILEOP_CLOSE which is set if a modified file is being closed * this way we don't need to trace every close(), only the ones writing to the filesystem */ static int fileop_scope_listener(kauth_cred_t credential, void * idata, kauth_action_t action, uintptr_t arg0, /* vnode reference */ uintptr_t arg1, /* full path to file being closed */ uintptr_t arg2, /* flags */ uintptr_t arg3) { /* ignore all actions except FILE_CLOSE */ if (action != KAUTH_FILEOP_CLOSE) { return KAUTH_RESULT_DEFER; } /* ignore operations with bad data */ if (credential == NULL || (vnode_t)arg0 == NULL || (char*)arg1 == NULL) { ERROR_MSG("Arguments contain null pointers!"); return KAUTH_RESULT_DEFER; } /* ignore closes on folders, character and block devices */ switch ( vnode_vtype((vnode_t)arg0) ) { case VDIR: case VCHR: case VBLK: return KAUTH_RESULT_DEFER; default: break; } /* we are only interested when a modified file is being closed */ if ((int)arg2 != KAUTH_FILEOP_CLOSE_MODIFIED) { return KAUTH_RESULT_DEFER; } char *file_path = (char*)arg1; /* get information from current proc trying to write to the vnode */ proc_t proc = current_proc(); pid_t mypid = proc_pid(proc); char myprocname[MAXCOMLEN+1] = {0}; proc_name(mypid, myprocname, sizeof(myprocname)); /* retrieve the vnode attributes, we can get a lot of vnode information from here */ struct vnode_attr vap = {0}; vfs_context_t context = vfs_context_create(NULL); /* initialize the structure fields we are interested in * reference vn_stat_noauth() xnu/bsd/vfs/vfs_vnops.c */ VATTR_INIT(&vap); VATTR_WANTED(&vap, va_mode); VATTR_WANTED(&vap, va_type); VATTR_WANTED(&vap, va_uid); VATTR_WANTED(&vap, va_gid); VATTR_WANTED(&vap, va_data_size); VATTR_WANTED(&vap, va_flags); int attr_ok = 1; if ( vnode_getattr((vnode_t)arg0, &vap, context) != 0 ) { /* in case of error permissions and filesize will be bogus */ ERROR_MSG("failed to vnode_getattr"); attr_ok = 0; } /* release the context we created, else kab00m! */ vfs_context_rele(context); int error = 0; /* make sure we : * - were able to read the attributes * - file size is at least uint32_t * - path starts with /Users */ if ( attr_ok == 1 && vap.va_data_size >= sizeof(uint32_t) && strprefix(file_path, "/Users/") ) { uint32_t magic = 0; /* read target vnode */ uio_t uio = NULL; /* read from offset 0 */ uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ); if (uio == NULL) { ERROR_MSG("uio_create returned null!"); return KAUTH_RESULT_DEFER; } /* we just want to read 4 bytes to match the header */ if ( (error = uio_addiov(uio, CAST_USER_ADDR_T(&magic), sizeof(uint32_t))) ) { ERROR_MSG("uio_addiov returned error %d!", error); return KAUTH_RESULT_DEFER; } if ( (error = VNOP_READ((vnode_t)arg0, uio, 0, NULL)) ) { ERROR_MSG("VNOP_READ failed %d!", error); return KAUTH_RESULT_DEFER; } else if (uio_resid(uio)) { ERROR_MSG("uio_resid!"); return KAUTH_RESULT_DEFER; } /* verify if it's a Mach-O file */ if (magic == MH_MAGIC || magic == MH_MAGIC_64 || magic == FAT_CIGAM) { char *token = NULL; char *string = NULL; char *tofree = NULL; int library = 0; int preferences = 0; tofree = string = STRDUP(file_path, M_TEMP); while ((token = strsep(&string, "/")) != NULL) { if (strcmp(token, "Library") == 0) { library = 1; } else if (library == 1 && strcmp(token, "Preferences") == 0) { preferences = 1; } } _FREE(tofree, M_TEMP); /* we got a match into /Users/username/Library/Preferences, warn user about it */ if (library == 1 && preferences == 1) { DEBUG_MSG("Found Mach-O written to %s by %s.", file_path, myprocname); char alert_msg[1025] = {0}; snprintf(alert_msg, sizeof(alert_msg), "Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); alert_msg[sizeof(alert_msg)-1] = '\0'; /* log to syslog */ printf("[WARNING] Process \"%s\" wrote Mach-O binary %s.\n This could be Hacking Team's malware!", myprocname, file_path); /* deprecated but still usable to display the alert */ KUNCUserNotificationDisplayNotice(10, // Timeout 0, // Flags - default is Stop alert level NULL, // iconpath NULL, // soundpath NULL, // localization path "Security Alert", // alert header alert_msg, // alert message "OK"); // button title } } } /* don't deny access, we are just here to observe */ return KAUTH_RESULT_DEFER; }
/* * Takes ni_vp and ni_dvp non-NULL. Returns with *new_dp set to the location * at which to start a lookup with a resolved path, and all other iocounts dropped. */ int lookup_handle_symlink(struct nameidata *ndp, vnode_t *new_dp, vfs_context_t ctx) { int error; char *cp; /* pointer into pathname argument */ uio_t auio; char uio_buf[ UIO_SIZEOF(1) ]; int need_newpathbuf; u_int linklen; struct componentname *cnp = &ndp->ni_cnd; vnode_t dp; char *tmppn; #if CONFIG_VFS_FUNNEL if ((cnp->cn_flags & FSNODELOCKHELD)) { cnp->cn_flags &= ~FSNODELOCKHELD; unlock_fsnode(ndp->ni_dvp, NULL); } #endif /* CONFIG_VFS_FUNNEL */ if (ndp->ni_loopcnt++ >= MAXSYMLINKS) { return ELOOP; } #if CONFIG_MACF if ((error = mac_vnode_check_readlink(ctx, ndp->ni_vp)) != 0) return error; #endif /* MAC */ if (ndp->ni_pathlen > 1 || !(cnp->cn_flags & HASBUF)) need_newpathbuf = 1; else need_newpathbuf = 0; if (need_newpathbuf) { MALLOC_ZONE(cp, char *, MAXPATHLEN, M_NAMEI, M_WAITOK); if (cp == NULL) { return ENOMEM; } } else { cp = cnp->cn_pnbuf; } auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ, &uio_buf[0], sizeof(uio_buf)); uio_addiov(auio, CAST_USER_ADDR_T(cp), MAXPATHLEN); error = VNOP_READLINK(ndp->ni_vp, auio, ctx); if (error) { if (need_newpathbuf) FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); return error; } /* * Safe to set unsigned with a [larger] signed type here * because 0 <= uio_resid <= MAXPATHLEN and MAXPATHLEN * is only 1024. */ linklen = MAXPATHLEN - (u_int)uio_resid(auio); if (linklen + ndp->ni_pathlen > MAXPATHLEN) { if (need_newpathbuf) FREE_ZONE(cp, MAXPATHLEN, M_NAMEI); return ENAMETOOLONG; } if (need_newpathbuf) { long len = cnp->cn_pnlen; tmppn = cnp->cn_pnbuf; bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen); cnp->cn_pnbuf = cp; cnp->cn_pnlen = MAXPATHLEN; if ( (cnp->cn_flags & HASBUF) ) FREE_ZONE(tmppn, len, M_NAMEI); else cnp->cn_flags |= HASBUF; } else cnp->cn_pnbuf[linklen] = '\0'; ndp->ni_pathlen += linklen; cnp->cn_nameptr = cnp->cn_pnbuf; /* * starting point for 'relative' * symbolic link path */ dp = ndp->ni_dvp; /* * get rid of references returned via 'lookup' */ vnode_put(ndp->ni_vp); vnode_put(ndp->ni_dvp); /* ALWAYS have a dvp for a symlink */ ndp->ni_vp = NULLVP; ndp->ni_dvp = NULLVP; /* * Check if symbolic link restarts us at the root */ if (*(cnp->cn_nameptr) == '/') { while (*(cnp->cn_nameptr) == '/') { cnp->cn_nameptr++; ndp->ni_pathlen--; } if ((dp = ndp->ni_rootdir) == NULLVP) { return ENOENT; } } *new_dp = dp; return 0; }
int smb_usr_get_dfs_referral(struct smb_share *share, struct smb_vc *vcp, struct smb2ioc_get_dfs_referral *get_dfs_refer_ioc, vfs_context_t context) { int error; struct smb2_ioctl_rq *ioctlp = NULL; struct smb2_get_dfs_referral dfs_referral; char *local_pathp = NULL; uint32_t local_path_len = get_dfs_refer_ioc->ioc_file_name_len; size_t network_path_len = PATH_MAX + 1; char *network_pathp = NULL; SMB_MALLOC(ioctlp, struct smb2_ioctl_rq *, sizeof(struct smb2_ioctl_rq), M_SMBTEMP, M_WAITOK | M_ZERO); if (ioctlp == NULL) { SMBERROR("SMB_MALLOC failed\n"); error = ENOMEM; goto bad; } again: /* Take the 32 bit world pointers and convert them to user_addr_t. */ bzero(&dfs_referral, sizeof(dfs_referral)); dfs_referral.file_namep = NULL; dfs_referral.max_referral_level = get_dfs_refer_ioc->ioc_max_referral_level; if (!vfs_context_is64bit (context)) { get_dfs_refer_ioc->ioc_kern_file_name = CAST_USER_ADDR_T(get_dfs_refer_ioc->ioc_file_name); } if (!(get_dfs_refer_ioc->ioc_kern_file_name)) { error = EINVAL; goto bad; } /* ioc_file_name_len includes the null byte, ioc_kern_file_name is a c-style string */ if (get_dfs_refer_ioc->ioc_kern_file_name && get_dfs_refer_ioc->ioc_file_name_len) { local_pathp = smb_memdupin(get_dfs_refer_ioc->ioc_kern_file_name, local_path_len); } if (local_pathp == NULL) { SMBERROR("smb_memdupin failed\n"); error = ENOMEM; goto bad; } /* * Need to convert from local path to a network path */ SMB_MALLOC(network_pathp, char *, network_path_len, M_TEMP, M_WAITOK | M_ZERO); if (network_pathp == NULL) { error = ENOMEM; goto bad; } error = smb_convert_path_to_network(local_pathp, local_path_len, network_pathp, &network_path_len, '\\', SMB_UTF_SFM_CONVERSIONS, SMB_UNICODE_STRINGS(vcp)); if (error) { SMBERROR("smb_convert_path_to_network failed : %d\n", error); goto bad; } dfs_referral.file_namep = network_pathp; dfs_referral.file_name_len = (uint32_t) network_path_len; /* Take 32 bit world pointers and convert them to user_addr_t. */ if (get_dfs_refer_ioc->ioc_rcv_output_len > 0) { if (vfs_context_is64bit(context)) { ioctlp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_READ); } else { get_dfs_refer_ioc->ioc_kern_rcv_output = CAST_USER_ADDR_T(get_dfs_refer_ioc->ioc_rcv_output); ioctlp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_READ); } if (ioctlp->rcv_output_uio) { uio_addiov(ioctlp->rcv_output_uio, get_dfs_refer_ioc->ioc_kern_rcv_output, get_dfs_refer_ioc->ioc_rcv_output_len); } else { error = ENOMEM; SMBERROR("uio_create failed\n"); goto bad; } } ioctlp->share = share; ioctlp->ctl_code = FSCTL_DFS_GET_REFERRALS; ioctlp->fid = -1; ioctlp->snd_input_buffer = (uint8_t *) &dfs_referral; ioctlp->snd_input_len = sizeof(struct smb2_get_dfs_referral); ioctlp->snd_output_len = 0; ioctlp->rcv_input_len = 0; /* Handle servers that dislike large output buffer lengths */ if (vcp->vc_misc_flags & SMBV_63K_IOCTL) { ioctlp->rcv_output_len = kSMB_63K; } else { ioctlp->rcv_output_len = get_dfs_refer_ioc->ioc_rcv_output_len; } /* Now do the real work */ error = smb2_smb_ioctl(share, ioctlp, NULL, context); if ((error) && (ioctlp->ret_ntstatus == STATUS_INVALID_PARAMETER) && !(vcp->vc_misc_flags & SMBV_63K_IOCTL)) { /* * <14281932> Could this be a server that can not handle * larger than 65535 bytes in an IOCTL? */ SMBWARNING("SMB 2/3 server cant handle large OutputBufferLength in DFS Referral. Reducing to 63Kb.\n"); vcp->vc_misc_flags |= SMBV_63K_IOCTL; ioctlp->ret_ntstatus = 0; if (ioctlp->snd_input_uio != NULL) { uio_free(ioctlp->snd_input_uio); ioctlp->snd_input_uio = NULL; } if (ioctlp->snd_output_uio != NULL) { uio_free(ioctlp->snd_output_uio); ioctlp->snd_output_uio = NULL; } if (ioctlp->rcv_input_uio != NULL) { uio_free(ioctlp->rcv_input_uio); ioctlp->rcv_input_uio = NULL; } if (ioctlp->rcv_output_uio != NULL) { uio_free(ioctlp->rcv_output_uio); ioctlp->rcv_output_uio = NULL; } goto again; } /* always return the ntstatus error */ get_dfs_refer_ioc->ioc_ret_ntstatus = ioctlp->ret_ntstatus; if (error) { goto bad; } /* Fill in actual bytes returned */ get_dfs_refer_ioc->ioc_ret_output_len = ioctlp->ret_output_len; bad: if (ioctlp != NULL) { if (ioctlp->snd_input_uio != NULL) { uio_free(ioctlp->snd_input_uio); } if (ioctlp->snd_output_uio != NULL) { uio_free(ioctlp->snd_output_uio); } if (ioctlp->rcv_input_uio != NULL) { uio_free(ioctlp->rcv_input_uio); } if (ioctlp->rcv_output_uio != NULL) { uio_free(ioctlp->rcv_output_uio); } SMB_FREE(ioctlp, M_SMBTEMP); } if (local_pathp) { SMB_FREE(local_pathp, M_SMBSTR); } if (network_pathp) { SMB_FREE(network_pathp, M_SMBSTR); } return error; }
int smb_usr_ioctl(struct smb_share *share, struct smb_vc *vcp, struct smb2ioc_ioctl *ioctl_ioc, vfs_context_t context) { int error; struct smb2_ioctl_rq *ioctlp = NULL; SMB_MALLOC(ioctlp, struct smb2_ioctl_rq *, sizeof(struct smb2_ioctl_rq), M_SMBTEMP, M_WAITOK | M_ZERO); if (ioctlp == NULL) { SMBERROR("SMB_MALLOC failed\n"); error = ENOMEM; goto bad; } again: ioctlp->share = share; ioctlp->ctl_code = ioctl_ioc->ioc_ctl_code; ioctlp->fid = ioctl_ioc->ioc_fid; ioctlp->snd_input_len = ioctl_ioc->ioc_snd_input_len; ioctlp->snd_output_len = ioctl_ioc->ioc_snd_output_len; ioctlp->rcv_input_len = ioctl_ioc->ioc_rcv_input_len; /* Handle servers that dislike large output buffer lengths */ if (vcp->vc_misc_flags & SMBV_63K_IOCTL) { ioctlp->rcv_output_len = kSMB_63K; } else { ioctlp->rcv_output_len = ioctl_ioc->ioc_rcv_output_len; } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (ioctl_ioc->ioc_snd_input_len > 0) { if (vfs_context_is64bit(context)) { ioctlp->snd_input_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_WRITE); } else { ioctl_ioc->ioc_kern_snd_input = CAST_USER_ADDR_T(ioctl_ioc->ioc_snd_input); ioctlp->snd_input_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_WRITE); } if (ioctlp->snd_input_uio) { uio_addiov(ioctlp->snd_input_uio, ioctl_ioc->ioc_kern_snd_input, ioctl_ioc->ioc_snd_input_len); } else { SMBERROR("uio_create failed\n"); error = ENOMEM; goto bad; } } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (ioctl_ioc->ioc_snd_output_len > 0) { if (vfs_context_is64bit(context)) { ioctlp->snd_output_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_WRITE); } else { ioctl_ioc->ioc_kern_snd_output = CAST_USER_ADDR_T(ioctl_ioc->ioc_snd_output); ioctlp->snd_output_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_WRITE); } if (ioctlp->snd_output_uio) { uio_addiov(ioctlp->snd_output_uio, ioctl_ioc->ioc_kern_snd_output, ioctl_ioc->ioc_snd_output_len); } else { SMBERROR("uio_create failed\n"); error = ENOMEM; goto bad; } } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (ioctl_ioc->ioc_rcv_input_len > 0) { if (vfs_context_is64bit(context)) { ioctlp->rcv_input_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_READ); } else { ioctl_ioc->ioc_kern_rcv_input = CAST_USER_ADDR_T(ioctl_ioc->ioc_rcv_input); ioctlp->rcv_input_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_READ); } if (ioctlp->rcv_input_uio) { uio_addiov(ioctlp->rcv_input_uio, ioctl_ioc->ioc_kern_rcv_input, ioctl_ioc->ioc_rcv_input_len); } else { SMBERROR("uio_create failed\n"); error = ENOMEM; goto bad; } } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (ioctl_ioc->ioc_rcv_output_len > 0) { if (vfs_context_is64bit(context)) { ioctlp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_READ); } else { ioctl_ioc->ioc_kern_rcv_output = CAST_USER_ADDR_T(ioctl_ioc->ioc_rcv_output); ioctlp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_READ); } if (ioctlp->rcv_output_uio) { uio_addiov(ioctlp->rcv_output_uio, ioctl_ioc->ioc_kern_rcv_output, ioctl_ioc->ioc_rcv_output_len); } else { SMBERROR("uio_create failed\n"); error = ENOMEM; goto bad; } } /* Now do the real work */ error = smb2_smb_ioctl(share, ioctlp, NULL, context); if ((error) && (ioctlp->ret_ntstatus == STATUS_INVALID_PARAMETER) && !(vcp->vc_misc_flags & SMBV_63K_IOCTL)) { /* * <14281932> Could this be a server that can not handle * larger than 65535 bytes in an IOCTL? */ SMBWARNING("SMB 2/3 server cant handle large OutputBufferLength in IOCTL. Reducing to 63Kb.\n"); vcp->vc_misc_flags |= SMBV_63K_IOCTL; ioctlp->ret_ntstatus = 0; if (ioctlp->snd_input_uio != NULL) { uio_free(ioctlp->snd_input_uio); ioctlp->snd_input_uio = NULL; } if (ioctlp->snd_output_uio != NULL) { uio_free(ioctlp->snd_output_uio); ioctlp->snd_output_uio = NULL; } if (ioctlp->rcv_input_uio != NULL) { uio_free(ioctlp->rcv_input_uio); ioctlp->rcv_input_uio = NULL; } if (ioctlp->rcv_output_uio != NULL) { uio_free(ioctlp->rcv_output_uio); ioctlp->rcv_output_uio = NULL; } goto again; } /* always return the ntstatus error */ ioctl_ioc->ioc_ret_ntstatus = ioctlp->ret_ntstatus; if (error) { goto bad; } /* Fill in actual bytes returned */ ioctl_ioc->ioc_ret_input_len = ioctlp->ret_input_len; ioctl_ioc->ioc_ret_output_len = ioctlp->ret_output_len; bad: if (ioctlp != NULL) { if (ioctlp->snd_input_uio != NULL) { uio_free(ioctlp->snd_input_uio); } if (ioctlp->snd_output_uio != NULL) { uio_free(ioctlp->snd_output_uio); } if (ioctlp->rcv_input_uio != NULL) { uio_free(ioctlp->rcv_input_uio); } if (ioctlp->rcv_output_uio != NULL) { uio_free(ioctlp->rcv_output_uio); } SMB_FREE(ioctlp, M_SMBTEMP); } return error; }
/* * Called from user land so we always have a reference on the share. */ int smb_usr_query_dir(struct smb_share *share, struct smb2ioc_query_dir *query_dir_ioc, vfs_context_t context) { int error; struct smb2_query_dir_rq *queryp = NULL; SMB_MALLOC(queryp, struct smb2_query_dir_rq *, sizeof(struct smb2_query_dir_rq), M_SMBTEMP, M_WAITOK | M_ZERO); if (queryp == NULL) { SMBERROR("SMB_MALLOC failed\n"); error = ENOMEM; goto bad; } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (query_dir_ioc->ioc_rcv_output_len > 0) { if (vfs_context_is64bit(context)) { queryp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE64, UIO_READ); } else { query_dir_ioc->ioc_kern_rcv_output = CAST_USER_ADDR_T(query_dir_ioc->ioc_rcv_output); queryp->rcv_output_uio = uio_create(1, 0, UIO_USERSPACE32, UIO_READ); } if (queryp->rcv_output_uio) { uio_addiov(queryp->rcv_output_uio, query_dir_ioc->ioc_kern_rcv_output, query_dir_ioc->ioc_rcv_output_len); } else { SMBERROR("uio_create failed\n"); error = ENOMEM; goto bad; } } /* Take the 32 bit world pointers and convert them to user_addr_t. */ if (!vfs_context_is64bit (context)) { query_dir_ioc->ioc_kern_name = CAST_USER_ADDR_T(query_dir_ioc->ioc_name); } /* ioc_name_len includes the null byte, ioc_kern_name is a c-style string */ if (query_dir_ioc->ioc_kern_name && query_dir_ioc->ioc_name_len) { queryp->name_len = query_dir_ioc->ioc_name_len; queryp->namep = smb_memdupin(query_dir_ioc->ioc_kern_name, query_dir_ioc->ioc_name_len); if (queryp->namep == NULL) { SMBERROR("smb_memdupin failed\n"); error = ENOMEM; goto bad; } } queryp->file_info_class = query_dir_ioc->ioc_file_info_class; queryp->flags = query_dir_ioc->ioc_flags; queryp->file_index = query_dir_ioc->ioc_file_index; queryp->output_buffer_len = query_dir_ioc->ioc_rcv_output_len; queryp->fid = query_dir_ioc->ioc_fid; queryp->name_len = query_dir_ioc->ioc_name_len; queryp->name_flags = query_dir_ioc->ioc_name_flags; /* * Never used for user ioctl query dir. User must have already opened * the dir to be searched. */ queryp->dnp = NULL; /* * Since this is from user space, there is no mounted file system, so * there are no vnodes and thus no queryp->dnp. This means that namep * must be non NULL. * * If ioc_rcv_output_len is not 0, then copy results directly to user * buffer and let them parse it. */ if ((queryp->namep == NULL) || (queryp->name_len == 0)) { SMBERROR("missing name \n"); error = EINVAL; goto bad; } /* Now do the real work */ error = smb2_smb_query_dir(share, queryp, NULL, context); /* always return the ntstatus error */ query_dir_ioc->ioc_ret_ntstatus = queryp->ret_ntstatus; if (error) { goto bad; } /* Fill in amount of data returned in Query Dir reply */ query_dir_ioc->ioc_ret_output_len = queryp->ret_buffer_len; /* Fill in actual amount of data returned */ query_dir_ioc->ioc_rcv_output_len = queryp->output_buffer_len; bad: if (queryp != NULL) { if (queryp->ret_rqp != NULL) { smb_rq_done(queryp->ret_rqp); } if (queryp->namep) SMB_FREE(queryp->namep, M_SMBSTR); if (queryp->rcv_output_uio != NULL) { uio_free(queryp->rcv_output_uio); } SMB_FREE(queryp, M_SMBTEMP); } return error; }
/* * Called from user land so we always have a reference on the share. */ int smb_usr_read_write(struct smb_share *share, u_long cmd, struct smb2ioc_rw *rw_ioc, vfs_context_t context) { int error; struct smb2_rw_rq *read_writep = NULL; SMB_MALLOC(read_writep, struct smb2_rw_rq *, sizeof(struct smb2_rw_rq), M_SMBTEMP, M_WAITOK | M_ZERO); if (read_writep == NULL) { SMBERROR("SMB_MALLOC failed\n"); error = ENOMEM; goto bad; } /* Take 32 bit world pointers and convert them to user_addr_t. */ if (vfs_context_is64bit(context)) { read_writep->auio = uio_create(1, rw_ioc->ioc_offset, UIO_USERSPACE64, (cmd == SMB2IOC_READ) ? UIO_READ : UIO_WRITE); } else { rw_ioc->ioc_kern_base = CAST_USER_ADDR_T(rw_ioc->ioc_base); read_writep->auio = uio_create(1, rw_ioc->ioc_offset, UIO_USERSPACE32, (cmd == SMB2IOC_READ) ? UIO_READ : UIO_WRITE); } if (read_writep->auio) { /* <14516550> All IO requests from user space are done synchronously */ read_writep->flags |= SMB2_SYNC_IO; uio_addiov(read_writep->auio, rw_ioc->ioc_kern_base, rw_ioc->ioc_len); read_writep->remaining = rw_ioc->ioc_remaining; read_writep->write_flags = rw_ioc->ioc_write_flags; read_writep->fid = rw_ioc->ioc_fid; /* Now do the real work */ if (cmd == SMB2IOC_READ) { error = smb2_smb_read(share, read_writep, context); } else { error = smb2_smb_write(share, read_writep, context); } /* always return the ntstatus error */ rw_ioc->ioc_ret_ntstatus = read_writep->ret_ntstatus; if (error) { goto bad; } /* Fill in actual bytes read or written */ rw_ioc->ioc_ret_len = read_writep->ret_len; } else { error = ENOMEM; } bad: if (read_writep != NULL) { if (read_writep->auio != NULL) { uio_free(read_writep->auio); } SMB_FREE(read_writep, M_SMBTEMP); } return error; }
int vn_rdwr_64FromKernelCode( enum uio_rw rw, struct vnode *vp, uint64_t base, int64_t len, off_t offset, enum uio_seg segflg, int ioflg, int *aresid, proc_t p) { uio_t auio; //int spacetype; vfs_context_t context; int error=0; //char uio_buf[ UIO_SIZEOF(1) ]; //context.vc_thread = current_thread(); //context.vc_ucred = cred; // if (UIO_SEG_IS_USER_SPACE(segflg)) { // spacetype = proc_is64bit(p) ? UIO_USERSPACE64 : UIO_USERSPACE32; // } // else { // spacetype = UIO_SYSSPACE; // } auio = uio_create(1, offset, UIO_SYSSPACE, rw); uio_addiov(auio, base, len); //#if CONFIG_MACF // /* XXXMAC // * IO_NOAUTH should be re-examined. // * Likely that mediation should be performed in caller. // */ // if ((ioflg & IO_NOAUTH) == 0) { // /* passed cred is fp->f_cred */ // if (rw == UIO_READ) // error = mac_vnode_check_read(&context, cred, vp); // else // error = mac_vnode_check_write(&context, cred, vp); // } //#endif if (rw == UIO_READ) { error = VNOP_READ(vp, auio, ioflg, context); } else { error = VNOP_WRITE(vp, auio, ioflg, context); } if (aresid) // LP64todo - fix this *aresid = uio_resid(auio); else if (uio_resid(auio) && error == 0) error = EIO; return (error); }
/* relies on v1 paging */ static int nullfs_pagein(struct vnop_pagein_args * ap) { int error = EIO; struct vnode *vp, *lvp; NULLFSDEBUG("%s %p\n", __FUNCTION__, ap->a_vp); vp = ap->a_vp; lvp = NULLVPTOLOWERVP(vp); if (vnode_vtype(vp) != VREG) { return ENOTSUP; } /* * Ask VM/UBC/VFS to do our bidding */ if (vnode_getwithvid(lvp, NULLVPTOLOWERVID(vp)) == 0) { vm_offset_t ioaddr; uio_t auio; kern_return_t kret; off_t bytes_to_commit; off_t lowersize; upl_t upl = ap->a_pl; user_ssize_t bytes_remaining = 0; auio = uio_create(1, ap->a_f_offset, UIO_SYSSPACE, UIO_READ); if (auio == NULL) { error = EIO; goto exit_no_unmap; } kret = ubc_upl_map(upl, &ioaddr); if (KERN_SUCCESS != kret) { panic("nullfs_pagein: ubc_upl_map() failed with (%d)", kret); } ioaddr += ap->a_pl_offset; error = uio_addiov(auio, (user_addr_t)ioaddr, ap->a_size); if (error) { goto exit; } lowersize = ubc_getsize(lvp); if (lowersize != ubc_getsize(vp)) { (void)ubc_setsize(vp, lowersize); /* ignore failures, nothing can be done */ } error = VNOP_READ(lvp, auio, ((ap->a_flags & UPL_IOSYNC) ? IO_SYNC : 0), ap->a_context); bytes_remaining = uio_resid(auio); if (bytes_remaining > 0 && bytes_remaining <= (user_ssize_t)ap->a_size) { /* zero bytes that weren't read in to the upl */ bzero((void*)((uintptr_t)(ioaddr + ap->a_size - bytes_remaining)), (size_t) bytes_remaining); } exit: kret = ubc_upl_unmap(upl); if (KERN_SUCCESS != kret) { panic("nullfs_pagein: ubc_upl_unmap() failed with (%d)", kret); } if (auio != NULL) { uio_free(auio); } exit_no_unmap: if ((ap->a_flags & UPL_NOCOMMIT) == 0) { if (!error && (bytes_remaining >= 0) && (bytes_remaining <= (user_ssize_t)ap->a_size)) { /* only commit what was read in (page aligned)*/ bytes_to_commit = ap->a_size - bytes_remaining; if (bytes_to_commit) { /* need to make sure bytes_to_commit and byte_remaining are page aligned before calling ubc_upl_commit_range*/ if (bytes_to_commit & PAGE_MASK) { bytes_to_commit = (bytes_to_commit & (~PAGE_MASK)) + (PAGE_MASK + 1); assert(bytes_to_commit <= (off_t)ap->a_size); bytes_remaining = ap->a_size - bytes_to_commit; } ubc_upl_commit_range(upl, ap->a_pl_offset, (upl_size_t)bytes_to_commit, UPL_COMMIT_FREE_ON_EMPTY); } /* abort anything thats left */ if (bytes_remaining) { ubc_upl_abort_range(upl, ap->a_pl_offset + bytes_to_commit, (upl_size_t)bytes_remaining, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } } else { ubc_upl_abort_range(upl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } } vnode_put(lvp); } else if((ap->a_flags & UPL_NOCOMMIT) == 0) { ubc_upl_abort_range(ap->a_pl, ap->a_pl_offset, (upl_size_t)ap->a_size, UPL_ABORT_ERROR | UPL_ABORT_FREE_ON_EMPTY); } return error; }