int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV VATTR_INIT(&tvattr); VATTR_SET(&tvattr, va_size, asize); code = vnode_setattr(afile->vnode, &tvattr, afs_osi_ctxtp); #else VATTR_NULL(&tvattr); tvattr.va_size = asize; code = VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc()); #endif AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* * This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); VATTR_NULL(&tvattr); tvattr.va_size = asize; AFS_GUNLOCK(); VOP_LOCK(afile->vnode, LK_EXCLUSIVE | LK_RETRY, curproc); code = VOP_SETATTR(afile->vnode, &tvattr, afs_osi_credp, curproc); VOP_UNLOCK(afile->vnode, 0, curproc); AFS_GLOCK(); if (code == 0) afile->size = asize; ReleaseWriteLock(&afs_xosi); return code; }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); VATTR_NULL(&tvattr); /* note that this credential swapping stuff is only necessary because * of ufs's references directly to u.u_cred instead of to * credentials parameter. Probably should fix ufs some day. */ oldCred = p_cred(u.u_procp); set_p_cred(u.u_procp, &afs_osi_cred); tvattr.va_size = asize; AFS_GUNLOCK(); code = VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, 0); AFS_GLOCK(); set_p_cred(u.u_procp, oldCred); /* restore */ ReleaseWriteLock(&afs_xosi); return code; }
int osi_UFSTruncate(register struct osi_file *afile, afs_int32 asize) { struct vattr tvattr; struct vnode *vp; register afs_int32 code, glocked; AFS_STATCNT(osi_Truncate); ObtainWriteLock(&afs_xosi, 321); vp = afile->vnode; /* * This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ glocked = ISAFS_GLOCK(); if (glocked) AFS_GUNLOCK(); #if defined(AFS_FBSD80_ENV) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp); #elif defined(AFS_FBSD50_ENV) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp, curthread); #else vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc); code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp, curproc); #endif if (code != 0 || tvattr.va_size <= asize) goto out; VATTR_NULL(&tvattr); tvattr.va_size = asize; #if defined(AFS_FBSD80_ENV) code = VOP_SETATTR(vp, &tvattr, afs_osi_credp); #elif defined(AFS_FBSD50_ENV) code = VOP_SETATTR(vp, &tvattr, afs_osi_credp, curthread); #else code = VOP_SETATTR(vp, &tvattr, afs_osi_credp, curproc); #endif out: #if defined(AFS_FBSD80_ENV) VOP_UNLOCK(vp, 0); #elif defined(AFS_FBSD50_ENV) VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread); #else VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc); #endif if (glocked) AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
/* * Invent attributes for ptyfsnode (vp) and store * them in (vap). * Directories lengths are returned as zero since * any real length would require the genuine size * to be computed, and nothing cares anyway. * * this is relatively minimal for ptyfs. */ int ptyfs_getattr(void *v) { struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; kauth_cred_t a_cred; } */ *ap = v; struct ptyfsnode *ptyfs = VTOPTYFS(ap->a_vp); struct vattr *vap = ap->a_vap; PTYFS_ITIMES(ptyfs, NULL, NULL, NULL); /* start by zeroing out the attributes */ VATTR_NULL(vap); /* next do all the common fields */ vap->va_type = ap->a_vp->v_type; vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsidx.__fsid_val[0]; vap->va_fileid = ptyfs->ptyfs_fileno; vap->va_gen = 0; vap->va_flags = 0; vap->va_nlink = 1; vap->va_blocksize = PAGE_SIZE; vap->va_atime = ptyfs->ptyfs_atime; vap->va_mtime = ptyfs->ptyfs_mtime; vap->va_ctime = ptyfs->ptyfs_ctime; vap->va_birthtime = ptyfs->ptyfs_birthtime; vap->va_mode = ptyfs->ptyfs_mode; vap->va_flags = ptyfs->ptyfs_flags; vap->va_uid = ptyfs->ptyfs_uid; vap->va_gid = ptyfs->ptyfs_gid; switch (ptyfs->ptyfs_type) { case PTYFSpts: case PTYFSptc: if (pty_isfree(ptyfs->ptyfs_pty, 1)) return ENOENT; vap->va_bytes = vap->va_size = 0; vap->va_rdev = ap->a_vp->v_rdev; break; case PTYFSroot: vap->va_rdev = 0; vap->va_bytes = vap->va_size = DEV_BSIZE; break; default: return EOPNOTSUPP; } return 0; }
static void zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode, uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid) { VATTR_NULL(vap); vap->va_mask = (uint_t)mask; if (mask & AT_TYPE) vap->va_type = IFTOVT(mode); if (mask & AT_MODE) vap->va_mode = mode & MODEMASK; if (mask & AT_UID) vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid; if (mask & AT_GID) vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid; vap->va_rdev = zfs_cmpldev(rdev); vap->va_nodeid = nodeid; }
/* * Hacked up version of vn_open. We _only_ handle ptys and only open * them with FREAD|FWRITE and never deal with creat or stuff like that. * * We need it because we have to fake up root credentials to open the pty. */ static int ptm_vn_open(struct nameidata *ndp) { struct proc *p = ndp->ni_cnd.cn_proc; struct ucred *cred; struct vattr vattr; struct vnode *vp; int error; if ((error = namei(ndp)) != 0) return (error); vp = ndp->ni_vp; if (vp->v_type != VCHR) { error = EINVAL; goto bad; } /* * Get us a fresh cred with root privileges. */ cred = crget(); error = VOP_OPEN(vp, FREAD|FWRITE, cred, p); if (!error) { /* update atime/mtime */ VATTR_NULL(&vattr); getnanotime(&vattr.va_atime); vattr.va_mtime = vattr.va_atime; vattr.va_vaflags |= VA_UTIMES_NULL; (void)VOP_SETATTR(vp, &vattr, p->p_ucred, p); } crfree(cred); if (error) goto bad; vp->v_writecount++; return (0); bad: vput(vp); return (error); }
/* afs_notify_change * Linux version of setattr call. What to change is in the iattr struct. * We need to set bits in both the Linux inode as well as the vcache. */ int afs_notify_change(struct dentry *dp, struct iattr *iattrp) { struct vattr vattr; cred_t *credp = crref(); struct inode *ip = dp->d_inode; int code; VATTR_NULL(&vattr); iattr2vattr(&vattr, iattrp); /* Convert for AFS vnodeops call. */ AFS_GLOCK(); code = afs_setattr(VTOAFS(ip), &vattr, credp); if (!code) { afs_getattr(VTOAFS(ip), &vattr, credp); vattr2inode(ip, &vattr); } AFS_GUNLOCK(); crfree(credp); return -code; }
STATIC void vnode_iop_iattr2vattr( struct iattr *src, VATTR_T *dst ) { struct timespec curtime = CURRENT_TIME; VATTR_NULL(dst); if (src->ia_valid & ATTR_MODE) { VATTR_SET_TYPE(dst, vnlayer_mode_to_vtype(src->ia_mode)); VATTR_SET_MODE_RIGHTS(dst, src->ia_mode); dst->va_mask |= AT_MODE|AT_TYPE; } #define SET(UUU,lll) \ if (src->ia_valid & ATTR_ ## UUU) { \ VATTR_SET_ ## UUU(dst, src->ia_ ## lll); \ dst->va_mask |= AT_ ## UUU; \ } SET(UID,uid) SET(GID,gid) SET(SIZE,size) #undef SET if (src->ia_valid & ATTR_ATIME) { /* * If ATTR_ATIME is provided, but not ATTR_ATIME_SET, then we need * the current time. We have to pass ATTR_ATIME_SET through because * Linux uses it to control its validation. */ if (src->ia_valid & ATTR_ATIME_SET) { VATTR_SET_ATIME_TS(dst, &(src->ia_atime)); } else { VATTR_SET_ATIME_TS(dst, &curtime); } dst->va_mask |= AT_ATIME; } if (src->ia_valid & ATTR_ATIME_SET) dst->va_mask |= AT_ATIME_SET; if (src->ia_valid & ATTR_MTIME) { /* * If ATTR_MTIME is provided, but not ATTR_MTIME_SET, then we need * the current time. We have to pass ATTR_MTIME_SET through because * Linux uses it to control its validation. */ if (src->ia_valid & ATTR_MTIME_SET) { VATTR_SET_MTIME_TS(dst, &(src->ia_mtime)); } else { VATTR_SET_MTIME_TS(dst, &curtime); } dst->va_mask |= AT_MTIME; } if (src->ia_valid & ATTR_MTIME_SET) dst->va_mask |= AT_MTIME_SET; /* No current time hack needed for ctime. */ if (src->ia_valid & ATTR_CTIME) { VATTR_SET_CTIME_TS(dst, &(src->ia_ctime)); dst->va_mask |= AT_CTIME; } if (src->ia_valid & ATTR_ATTR_FLAG) { MDKI_VFS_LOG(VFS_LOG_ERR, "What to do with ATTR_FLAGs?: caller %p\n", mdki_getmycaller()); } return; }
/* * Invent attributes for pfsnode (vp) and store * them in (vap). * Directories lengths are returned as zero since * any real length would require the genuine size * to be computed, and nothing cares anyway. * * this is relatively minimal for procfs. */ int procfs_getattr(void *v) { struct vop_getattr_args *ap = v; struct pfsnode *pfs = VTOPFS(ap->a_vp); struct vattr *vap = ap->a_vap; struct proc *procp; int error; /* first check the process still exists */ switch (pfs->pfs_type) { case Proot: case Pcurproc: case Pcpuinfo: case Pmeminfo: procp = 0; break; default: procp = pfind(pfs->pfs_pid); if (procp == 0) return (ENOENT); } error = 0; /* start by zeroing out the attributes */ VATTR_NULL(vap); /* next do all the common fields */ vap->va_type = ap->a_vp->v_type; vap->va_mode = pfs->pfs_mode; vap->va_fileid = pfs->pfs_fileno; vap->va_flags = 0; vap->va_blocksize = PAGE_SIZE; vap->va_bytes = vap->va_size = 0; /* * Make all times be current TOD. * It would be possible to get the process start * time from the p_stat structure, but there's * no "file creation" time stamp anyway, and the * p_stat structure is not addressible if u. gets * swapped out for that process. */ getnanotime(&vap->va_ctime); vap->va_atime = vap->va_mtime = vap->va_ctime; switch (pfs->pfs_type) { case Pregs: case Pfpregs: #ifndef PTRACE break; #endif case Pmem: /* * If the process has exercised some setuid or setgid * privilege, then rip away read/write permission so * that only root can gain access. */ if (procp->p_flag & P_SUGID) vap->va_mode &= ~(S_IRUSR|S_IWUSR); /* FALLTHROUGH */ case Pctl: case Pstatus: case Pnote: case Pnotepg: case Pcmdline: vap->va_nlink = 1; vap->va_uid = procp->p_ucred->cr_uid; vap->va_gid = procp->p_ucred->cr_gid; break; case Pmeminfo: case Pcpuinfo: vap->va_nlink = 1; vap->va_uid = vap->va_gid = 0; break; case Pproc: case Pfile: case Proot: case Pcurproc: case Pself: break; } /* * now do the object specific fields * * The size could be set from struct reg, but it's hardly * worth the trouble, and it puts some (potentially) machine * dependent data into this machine-independent code. If it * becomes important then this function should break out into * a per-file stat function in the corresponding .c file. */ switch (pfs->pfs_type) { case Proot: /* * Set nlink to 1 to tell fts(3) we don't actually know. */ vap->va_nlink = 1; vap->va_uid = 0; vap->va_gid = 0; vap->va_size = vap->va_bytes = DEV_BSIZE; break; case Pcurproc: { char buf[16]; /* should be enough */ int len; len = snprintf(buf, sizeof buf, "%ld", (long)curproc->p_pid); if (len == -1 || len >= sizeof buf) { error = EINVAL; break; } vap->va_nlink = 1; vap->va_uid = 0; vap->va_gid = 0; vap->va_size = vap->va_bytes = len; break; } case Pself: vap->va_nlink = 1; vap->va_uid = 0; vap->va_gid = 0; vap->va_size = vap->va_bytes = sizeof("curproc"); break; case Pproc: vap->va_nlink = 2; vap->va_uid = procp->p_ucred->cr_uid; vap->va_gid = procp->p_ucred->cr_gid; vap->va_size = vap->va_bytes = DEV_BSIZE; break; case Pfile: error = EOPNOTSUPP; break; case Pmem: vap->va_bytes = vap->va_size = ptoa(procp->p_vmspace->vm_tsize + procp->p_vmspace->vm_dsize + procp->p_vmspace->vm_ssize); break; case Pregs: #ifdef PTRACE vap->va_bytes = vap->va_size = sizeof(struct reg); #endif break; case Pfpregs: #if defined(PT_GETFPREGS) || defined(PT_SETFPREGS) #ifdef PTRACE vap->va_bytes = vap->va_size = sizeof(struct fpreg); #endif #endif break; case Pctl: case Pstatus: case Pnote: case Pnotepg: case Pcmdline: case Pmeminfo: case Pcpuinfo: vap->va_bytes = vap->va_size = 0; break; #ifdef DIAGNOSTIC default: panic("procfs_getattr"); #endif } return (error); }
/* * Invent attributes for pfsnode (vp) and store * them in (vap). * Directories lengths are returned as zero since * any real length would require the genuine size * to be computed, and nothing cares anyway. * * this is relatively minimal for procfs. * * procfs_getattr(struct vnode *a_vp, struct vattr *a_vap) */ static int procfs_getattr(struct vop_getattr_args *ap) { struct pfsnode *pfs = VTOPFS(ap->a_vp); struct vattr *vap = ap->a_vap; struct proc *procp; int error; /* * First make sure that the process and its credentials * still exist. */ switch (pfs->pfs_type) { case Proot: case Pcurproc: procp = NULL; break; default: procp = pfs_pfind(pfs->pfs_pid); if (procp == NULL || procp->p_ucred == NULL) { error = ENOENT; goto done; } break; } error = 0; /* start by zeroing out the attributes */ VATTR_NULL(vap); /* next do all the common fields */ vap->va_type = ap->a_vp->v_type; vap->va_mode = pfs->pfs_mode; vap->va_fileid = pfs->pfs_fileno; vap->va_flags = 0; vap->va_blocksize = PAGE_SIZE; vap->va_bytes = vap->va_size = 0; vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; /* * Make all times be current TOD. * It would be possible to get the process start * time from the p_stat structure, but there's * no "file creation" time stamp anyway, and the * p_stat structure is not addressible if u. gets * swapped out for that process. */ nanotime(&vap->va_ctime); vap->va_atime = vap->va_mtime = vap->va_ctime; /* * If the process has exercised some setuid or setgid * privilege, then rip away read/write permission so * that only root can gain access. */ switch (pfs->pfs_type) { case Pctl: case Pregs: case Pfpregs: case Pdbregs: case Pmem: if (procp->p_flags & P_SUGID) { vap->va_mode &= ~((VREAD|VWRITE)| ((VREAD|VWRITE)>>3)| ((VREAD|VWRITE)>>6)); } break; default: break; }
int vnlayer_fill_super( SUPER_T *super_p, void *data_p, int silent ) { INODE_T *ino_p; VNODE_T *rootvp; VATTR_T va; VFS_T *vfsp; int err = 0; CALL_DATA_T cd; ASSERT_KERNEL_LOCKED(); /* sys_mount() */ ASSERT_SB_MOUNT_LOCKED_W(super_p); /* can't assert on mount_sem, we don't have access to it. */ if (vnlayer_vfs_opvec == NULL) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s: VFS operation not set yet " "(no file system module loaded?)\n", __func__); err = -ENODATA; goto return_NULL; } if (MDKI_INOISOURS(vnlayer_get_urdir_inode())) { /* can't handle this case */ MDKI_VFS_LOG(VFS_LOG_ERR, "%s: can't handle mounts inside setview.\n", __func__); err = -EINVAL; goto return_NULL; } /* * The only fields we have coming in are s_type and s_flags. */ /* Verify this */ super_p->s_blocksize = MVFS_DEF_BLKSIZE; super_p->s_blocksize_bits = MVFS_DEF_BLKSIZE_BITS; super_p->s_maxbytes = MVFS_DEF_MAX_FILESIZE; super_p->s_op = &mvfs_super_ops; super_p->s_export_op = &vnlayer_export_ops; super_p->dq_op = NULL; super_p->s_magic = MVFS_SUPER_MAGIC; /* * XXX This module is currently restricted to one client file system * type at a time, as registered via the vnlayer_vfs_opvec. */ vfsp = KMEM_ALLOC(sizeof(*vfsp), KM_SLEEP); if (vfsp == NULL) { MDKI_VFS_LOG(VFS_LOG_ERR, "%s failed: no memory\n", __func__); SET_SBTOVFS(super_p, NULL); err = -ENOMEM; goto return_NULL; } BZERO(vfsp, sizeof(*vfsp)); SET_VFSTOSB(vfsp, super_p); SET_SBTOVFS(super_p, vfsp); vfsp->vfs_op = vnlayer_vfs_opvec; /* XXX fill in more of vfsp (flag?) */ if (super_p->s_flags & MS_RDONLY) vfsp->vfs_flag |= VFS_RDONLY; if (super_p->s_flags & MS_NOSUID) vfsp->vfs_flag |= VFS_NOSUID; err = vnlayer_linux_mount(vfsp, data_p); if (err) { goto bailout; } /* * Now create our dentry and set that up in the superblock. Get * the inode from the vnode at the root of the file system, and * attach it to a new dentry. */ mdki_linux_init_call_data(&cd); err = VFS_ROOT(SBTOVFS(super_p), &rootvp); if (err) { err = mdki_errno_unix_to_linux(err); (void) VFS_UNMOUNT(vfsp,&cd); mdki_linux_destroy_call_data(&cd); goto bailout; } ino_p = VTOI(rootvp); #ifdef CONFIG_FS_POSIX_ACL /* If the system supports ACLs, we set the flag in the superblock * depending on the ability of the underlying filesystem */ if (vfsp->vfs_flag & VFS_POSIXACL) { super_p->s_flags |= MS_POSIXACL; } #endif /* * Call getattr() to prime this inode with real attributes via the * callback to mdki_linux_vattr_pullup() */ VATTR_NULL(&va); /* ignore error code, we're committed */ (void) VOP_GETATTR(rootvp, &va, 0, &cd); /* This will allocate a dentry with a name of /, which is * what Linux uses in all filesystem roots. The dentry is * also not put on the hash chains because Linux does not * hash file system roots. It finds them through the super * blocks. */ super_p->s_root = VNODE_D_ALLOC_ROOT(ino_p); if (super_p->s_root) { if (VFSTOSB(vnlayer_looproot_vp->v_vfsp) == super_p) { /* loopback names are done with regular dentry ops */ MDKI_SET_DOPS(super_p->s_root, &vnode_dentry_ops); } else { /* * setview names come in via VOB mounts, they're marked * with setview dentry ops */ MDKI_SET_DOPS(super_p->s_root, &vnode_setview_dentry_ops); } super_p->s_root->d_fsdata = NULL; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) atomic_set(&super_p->s_root->d_count, 1); #endif /* d_alloc_root assumes that the caller will take care of * bumping the inode count for the dentry. So we will oblige */ igrab(ino_p); } else { VN_RELE(rootvp); (void) VFS_UNMOUNT(vfsp,&cd); mdki_linux_destroy_call_data(&cd); err = -ENOMEM; goto bailout; } mdki_linux_destroy_call_data(&cd); #if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) super_p->s_dirt = 1; /* we want to be called on write_super/sync() */ #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,38) /* write back is delegated to the undelying fs */ super_p->s_bdi = &noop_backing_dev_info; #endif /* * release reference on rootvp--super block holds appropriate * references now */ VN_RELE(rootvp); return(0); bailout: MDKI_VFS_LOG(VFS_LOG_ERR, "%s failed: error %d\n", __func__, vnlayer_errno_linux_to_unix(err)); SET_SBTOVFS(super_p, NULL); KMEM_FREE(vfsp, sizeof(*vfsp)); return_NULL: return(err); }
static int devfs_vop_getattr(struct vop_getattr_args *ap) { struct devfs_node *node = DEVFS_NODE(ap->a_vp); struct vattr *vap = ap->a_vap; struct partinfo pinfo; int error = 0; #if 0 if (!devfs_node_is_accessible(node)) return ENOENT; #endif node_sync_dev_get(node); lockmgr(&devfs_lock, LK_EXCLUSIVE); /* start by zeroing out the attributes */ VATTR_NULL(vap); /* next do all the common fields */ vap->va_type = ap->a_vp->v_type; vap->va_mode = node->mode; vap->va_fileid = DEVFS_NODE(ap->a_vp)->d_dir.d_ino ; vap->va_flags = 0; vap->va_blocksize = DEV_BSIZE; vap->va_bytes = vap->va_size = 0; vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; vap->va_atime = node->atime; vap->va_mtime = node->mtime; vap->va_ctime = node->ctime; vap->va_nlink = 1; /* number of references to file */ vap->va_uid = node->uid; vap->va_gid = node->gid; vap->va_rmajor = 0; vap->va_rminor = 0; if ((node->node_type == Ndev) && node->d_dev) { reference_dev(node->d_dev); vap->va_rminor = node->d_dev->si_uminor; release_dev(node->d_dev); } /* For a softlink the va_size is the length of the softlink */ if (node->symlink_name != 0) { vap->va_bytes = vap->va_size = node->symlink_namelen; } /* * For a disk-type device, va_size is the size of the underlying * device, so that lseek() works properly. */ if ((node->d_dev) && (dev_dflags(node->d_dev) & D_DISK)) { bzero(&pinfo, sizeof(pinfo)); error = dev_dioctl(node->d_dev, DIOCGPART, (void *)&pinfo, 0, proc0.p_ucred, NULL, NULL); if ((error == 0) && (pinfo.media_blksize != 0)) { vap->va_size = pinfo.media_size; } else { vap->va_size = 0; error = 0; } } lockmgr(&devfs_lock, LK_RELEASE); return (error); }
/* * fdesc_getattr(struct vnode *a_vp, struct vattr *a_vap, struct ucred *a_cred) */ static int fdesc_getattr(struct vop_getattr_args *ap) { struct proc *p = curproc; struct vnode *vp = ap->a_vp; struct vattr *vap = ap->a_vap; struct file *fp; struct stat stb; u_int fd; int error = 0; KKASSERT(p); switch (VTOFDESC(vp)->fd_type) { case Froot: VATTR_NULL(vap); vap->va_mode = S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH; vap->va_type = VDIR; vap->va_nlink = 2; vap->va_size = DEV_BSIZE; vap->va_fileid = VTOFDESC(vp)->fd_ix; vap->va_uid = 0; vap->va_gid = 0; vap->va_blocksize = DEV_BSIZE; vap->va_atime.tv_sec = boottime.tv_sec; vap->va_atime.tv_nsec = 0; vap->va_mtime = vap->va_atime; vap->va_ctime = vap->va_mtime; vap->va_gen = 0; vap->va_flags = 0; vap->va_rmajor = VNOVAL; vap->va_rminor = VNOVAL; vap->va_bytes = 0; break; case Fdesc: fd = VTOFDESC(vp)->fd_fd; fp = holdfp(p->p_fd, fd, -1); if (fp == NULL) return (EBADF); bzero(&stb, sizeof(stb)); error = fo_stat(fp, &stb, curproc->p_ucred); fdrop(fp); if (error == 0) { VATTR_NULL(vap); vap->va_type = IFTOVT(stb.st_mode); vap->va_mode = stb.st_mode; #define FDRX (VREAD|VEXEC) if (vap->va_type == VDIR) vap->va_mode &= ~((FDRX)|(FDRX>>3)|(FDRX>>6)); #undef FDRX vap->va_nlink = 1; vap->va_flags = 0; vap->va_bytes = stb.st_blocks * stb.st_blksize; vap->va_fileid = VTOFDESC(vp)->fd_ix; vap->va_size = stb.st_size; vap->va_blocksize = stb.st_blksize; vap->va_rmajor = umajor(stb.st_rdev); vap->va_rminor = uminor(stb.st_rdev); /* * If no time data is provided, use the current time. */ if (stb.st_atimespec.tv_sec == 0 && stb.st_atimespec.tv_nsec == 0) nanotime(&stb.st_atimespec); if (stb.st_ctimespec.tv_sec == 0 && stb.st_ctimespec.tv_nsec == 0) nanotime(&stb.st_ctimespec); if (stb.st_mtimespec.tv_sec == 0 && stb.st_mtimespec.tv_nsec == 0) nanotime(&stb.st_mtimespec); vap->va_atime = stb.st_atimespec; vap->va_mtime = stb.st_mtimespec; vap->va_ctime = stb.st_ctimespec; vap->va_uid = stb.st_uid; vap->va_gid = stb.st_gid; } break; default: panic("fdesc_getattr"); break; }
/* * Common code for vnode open operations. * Check permissions, and call the VOP_OPEN or VOP_CREATE routine. */ int vn_open(struct nameidata *ndp, int fmode, int cmode) { struct vnode *vp; struct proc *p = ndp->ni_cnd.cn_proc; struct ucred *cred = p->p_ucred; struct vattr va; int error; if ((fmode & (FREAD|FWRITE)) == 0) return (EINVAL); if ((fmode & (O_TRUNC | FWRITE)) == O_TRUNC) return (EINVAL); if (fmode & O_CREAT) { ndp->ni_cnd.cn_nameiop = CREATE; ndp->ni_cnd.cn_flags = LOCKPARENT | LOCKLEAF; if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) ndp->ni_cnd.cn_flags |= FOLLOW; if ((error = namei(ndp)) != 0) return (error); if (ndp->ni_vp == NULL) { VATTR_NULL(&va); va.va_type = VREG; va.va_mode = cmode; error = VOP_CREATE(ndp->ni_dvp, &ndp->ni_vp, &ndp->ni_cnd, &va); if (error) return (error); fmode &= ~O_TRUNC; vp = ndp->ni_vp; } else { VOP_ABORTOP(ndp->ni_dvp, &ndp->ni_cnd); if (ndp->ni_dvp == ndp->ni_vp) vrele(ndp->ni_dvp); else vput(ndp->ni_dvp); ndp->ni_dvp = NULL; vp = ndp->ni_vp; if (fmode & O_EXCL) { error = EEXIST; goto bad; } fmode &= ~O_CREAT; } } else { ndp->ni_cnd.cn_nameiop = LOOKUP; ndp->ni_cnd.cn_flags = ((fmode & O_NOFOLLOW) ? NOFOLLOW : FOLLOW) | LOCKLEAF; if ((error = namei(ndp)) != 0) return (error); vp = ndp->ni_vp; } if (vp->v_type == VSOCK) { error = EOPNOTSUPP; goto bad; } if (vp->v_type == VLNK) { error = EMLINK; goto bad; } if ((fmode & O_CREAT) == 0) { if (fmode & FREAD) { if ((error = VOP_ACCESS(vp, VREAD, cred, p)) != 0) goto bad; } if (fmode & FWRITE) { if (vp->v_type == VDIR) { error = EISDIR; goto bad; } if ((error = vn_writechk(vp)) != 0 || (error = VOP_ACCESS(vp, VWRITE, cred, p)) != 0) goto bad; } } if ((fmode & O_TRUNC) && vp->v_type == VREG) { VATTR_NULL(&va); va.va_size = 0; if ((error = VOP_SETATTR(vp, &va, cred, p)) != 0) goto bad; } if ((error = VOP_OPEN(vp, fmode, cred, p)) != 0) goto bad; if (vp->v_flag & VCLONED) { struct cloneinfo *cip = (struct cloneinfo *) vp->v_data; vp->v_flag &= ~VCLONED; ndp->ni_vp = cip->ci_vp; /* return cloned vnode */ vp->v_data = cip->ci_data; /* restore v_data */ VOP_UNLOCK(vp, 0, p); /* keep a reference */ vp = ndp->ni_vp; /* for the increment below */ free(cip, M_TEMP); } if (fmode & FWRITE) vp->v_writecount++; return (0); bad: vput(vp); return (error); }
void nnpfs_attr2vattr(const struct nnpfs_attr *xa, struct vattr *va, int clear_node) { if (clear_node) VATTR_NULL(va); if (XA_VALID_MODE(xa)) va->va_mode = xa->xa_mode; if (XA_VALID_NLINK(xa)) va->va_nlink = xa->xa_nlink; if (XA_VALID_SIZE(xa)) { va->va_size = xa->xa_size; va->va_bytes = va->va_size; } if (XA_VALID_UID(xa)) va->va_uid = xa->xa_uid; if (XA_VALID_GID(xa)) va->va_gid = xa->xa_gid; if (XA_VALID_ATIME(xa)) { SET_TIMEVAL(&va->va_atime, xa->xa_atime, 0); } if (XA_VALID_MTIME(xa)) { SET_TIMEVAL(&va->va_mtime, xa->xa_mtime, 0); } if (XA_VALID_CTIME(xa)) { SET_TIMEVAL(&va->va_ctime, xa->xa_ctime, 0); } if (XA_VALID_FILEID(xa)) { va->va_fileid = xa->xa_fileid; } if (XA_VALID_TYPE(xa)) { switch (xa->xa_type) { case NNPFS_FILE_NON: va->va_type = VNON; break; case NNPFS_FILE_REG: va->va_type = VREG; break; case NNPFS_FILE_DIR: va->va_type = VDIR; break; case NNPFS_FILE_BLK: va->va_type = VBLK; break; case NNPFS_FILE_CHR: va->va_type = VCHR; break; case NNPFS_FILE_LNK: va->va_type = VLNK; break; case NNPFS_FILE_SOCK: va->va_type = VSOCK; break; case NNPFS_FILE_FIFO: va->va_type = VFIFO; break; case NNPFS_FILE_BAD: va->va_type = VBAD; break; default: panic("nnpfs_attr2vattr: bad value"); } } va->va_flags = 0; va->va_blocksize = 8192; }
/* * Invent attributes for pfsnode (vp) and store * them in (vap). * Directories lengths are returned as zero since * any real length would require the genuine size * to be computed, and nothing cares anyway. * * this is relatively minimal for procfs. * * procfs_getattr(struct vnode *a_vp, struct vattr *a_vap) */ static int procfs_getattr(struct vop_getattr_args *ap) { struct pfsnode *pfs = VTOPFS(ap->a_vp); struct vattr *vap = ap->a_vap; struct proc *procp; int error; /* * First make sure that the process and its credentials * still exist. */ switch (pfs->pfs_type) { case Proot: case Pcurproc: procp = NULL; break; default: procp = pfs_pfind(pfs->pfs_pid); if (procp == NULL || procp->p_ucred == NULL) { error = ENOENT; goto done; } } error = 0; /* start by zeroing out the attributes */ VATTR_NULL(vap); /* next do all the common fields */ vap->va_type = ap->a_vp->v_type; vap->va_mode = pfs->pfs_mode; vap->va_fileid = pfs->pfs_fileno; vap->va_flags = 0; vap->va_blocksize = PAGE_SIZE; vap->va_bytes = vap->va_size = 0; vap->va_fsid = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; /* * Make all times be current TOD. * It would be possible to get the process start * time from the p_stat structure, but there's * no "file creation" time stamp anyway, and the * p_stat structure is not addressible if u. gets * swapped out for that process. */ nanotime(&vap->va_ctime); vap->va_atime = vap->va_mtime = vap->va_ctime; /* * If the process has exercised some setuid or setgid * privilege, then rip away read/write permission so * that only root can gain access. */ switch (pfs->pfs_type) { case Pctl: case Pregs: case Pfpregs: case Pdbregs: case Pmem: if (procp->p_flag & P_SUGID) vap->va_mode &= ~((VREAD|VWRITE)| ((VREAD|VWRITE)>>3)| ((VREAD|VWRITE)>>6)); break; default: break; } /* * now do the object specific fields * * The size could be set from struct reg, but it's hardly * worth the trouble, and it puts some (potentially) machine * dependent data into this machine-independent code. If it * becomes important then this function should break out into * a per-file stat function in the corresponding .c file. */ vap->va_nlink = 1; if (procp) { vap->va_uid = procp->p_ucred->cr_uid; vap->va_gid = procp->p_ucred->cr_gid; } switch (pfs->pfs_type) { case Proot: /* * Set nlink to 1 to tell fts(3) we don't actually know. */ vap->va_nlink = 1; vap->va_uid = 0; vap->va_gid = 0; vap->va_size = vap->va_bytes = DEV_BSIZE; break; case Pcurproc: { char buf[16]; /* should be enough */ vap->va_uid = 0; vap->va_gid = 0; vap->va_size = vap->va_bytes = ksnprintf(buf, sizeof(buf), "%ld", (long)curproc->p_pid); break; } case Pproc: vap->va_nlink = nproc_targets; vap->va_size = vap->va_bytes = DEV_BSIZE; break; case Pfile: { char *fullpath, *freepath; error = cache_fullpath(procp, &procp->p_textnch, &fullpath, &freepath, 0); if (error == 0) { vap->va_size = strlen(fullpath); kfree(freepath, M_TEMP); } else { vap->va_size = sizeof("unknown") - 1; error = 0; } vap->va_bytes = vap->va_size; break; } case Pmem: /* * If we denied owner access earlier, then we have to * change the owner to root - otherwise 'ps' and friends * will break even though they are setgid kmem. *SIGH* */ if (procp->p_flag & P_SUGID) vap->va_uid = 0; else vap->va_uid = procp->p_ucred->cr_uid; break; case Pregs: vap->va_bytes = vap->va_size = sizeof(struct reg); break; case Pfpregs: vap->va_bytes = vap->va_size = sizeof(struct fpreg); break; case Pdbregs: vap->va_bytes = vap->va_size = sizeof(struct dbreg); break; case Ptype: case Pmap: case Pctl: case Pstatus: case Pnote: case Pnotepg: case Pcmdline: case Prlimit: break; default: panic("procfs_getattr"); } done: if (procp) PRELE(procp); return (error); }
int ptmioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) { dev_t newdev, error; struct pt_softc * pti; struct nameidata cnd, snd; struct filedesc *fdp = p->p_fd; struct file *cfp = NULL, *sfp = NULL; int cindx, sindx; uid_t uid; gid_t gid; struct vattr vattr; struct ucred *cred; struct ptmget *ptm = (struct ptmget *)data; error = 0; switch (cmd) { case PTMGET: fdplock(fdp); /* Grab two filedescriptors. */ if ((error = falloc(p, &cfp, &cindx)) != 0) { fdpunlock(fdp); break; } if ((error = falloc(p, &sfp, &sindx)) != 0) { fdremove(fdp, cindx); closef(cfp, p); fdpunlock(fdp); break; } retry: /* Find and open a free master pty. */ newdev = pty_getfree(); if ((error = check_pty(minor(newdev)))) goto bad; pti = pt_softc[minor(newdev)]; NDINIT(&cnd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_pn, p); if ((error = ptm_vn_open(&cnd)) != 0) { /* * Check if the master open failed because we lost * the race to grab it. */ if (error == EIO && !pty_isfree(minor(newdev))) goto retry; goto bad; } cfp->f_flag = FREAD|FWRITE; cfp->f_type = DTYPE_VNODE; cfp->f_ops = &vnops; cfp->f_data = (caddr_t) cnd.ni_vp; VOP_UNLOCK(cnd.ni_vp, 0, p); /* * Open the slave. * namei -> setattr -> unlock -> revoke -> vrele -> * namei -> open -> unlock * Three stage rocket: * 1. Change the owner and permissions on the slave. * 2. Revoke all the users of the slave. * 3. open the slave. */ NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); if ((error = namei(&snd)) != 0) goto bad; if ((snd.ni_vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { gid = tty_gid; /* get real uid */ uid = p->p_cred->p_ruid; VATTR_NULL(&vattr); vattr.va_uid = uid; vattr.va_gid = gid; vattr.va_mode = (S_IRUSR|S_IWUSR|S_IWGRP) & ALLPERMS; /* Get a fake cred to pretend we're root. */ cred = crget(); error = VOP_SETATTR(snd.ni_vp, &vattr, cred, p); crfree(cred); if (error) { vput(snd.ni_vp); goto bad; } } VOP_UNLOCK(snd.ni_vp, 0, p); if (snd.ni_vp->v_usecount > 1 || (snd.ni_vp->v_flag & (VALIASED))) VOP_REVOKE(snd.ni_vp, REVOKEALL); /* * The vnode is useless after the revoke, we need to * namei again. */ vrele(snd.ni_vp); NDINIT(&snd, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, pti->pty_sn, p); /* now open it */ if ((error = ptm_vn_open(&snd)) != 0) goto bad; sfp->f_flag = FREAD|FWRITE; sfp->f_type = DTYPE_VNODE; sfp->f_ops = &vnops; sfp->f_data = (caddr_t) snd.ni_vp; VOP_UNLOCK(snd.ni_vp, 0, p); /* now, put the indexen and names into struct ptmget */ ptm->cfd = cindx; ptm->sfd = sindx; memcpy(ptm->cn, pti->pty_pn, sizeof(pti->pty_pn)); memcpy(ptm->sn, pti->pty_sn, sizeof(pti->pty_sn)); /* mark the files mature now that we've passed all errors */ FILE_SET_MATURE(cfp); FILE_SET_MATURE(sfp); fdpunlock(fdp); break; default: error = EINVAL; break; } return (error); bad: fdremove(fdp, cindx); closef(cfp, p); fdremove(fdp, sindx); closef(sfp, p); fdpunlock(fdp); return (error); }
extern int vnode_iop_create( INODE_T * parent, struct dentry * dentry, int mode, struct nameidata *nd ) { int err = 0; VATTR_T *vap; VNODE_T *newvp; struct create_ctx ctx; CALL_DATA_T cd; ASSERT_I_SEM_MINE(parent); ASSERT(MDKI_INOISMVFS(parent)); vap = VATTR_ALLOC(); if (vap == NULL) return -ENOMEM; VATTR_NULL(vap); mdki_linux_init_call_data(&cd); /* * Solaris sends only type, mode, size, so we will too. */ vap->va_type = VREG; vap->va_mode = mode & ~S_IFMT; vap->va_size = 0; vap->va_mask = AT_TYPE|AT_MODE|AT_SIZE; newvp = NULL; dentry->d_inode = NULL; ctx.dentry = dentry; ctx.parent = parent; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) /* break any rcu-walk in progress */ # if defined(MRG) write_seqlock_barrier(&dentry->d_lock); # else /* defined (MRG) */ write_seqcount_barrier(&dentry->d_seq); # endif /* else defined (MRG) */ #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38) */ err = VOP_CREATE(ITOV(parent), (/* drop const */ char *) dentry->d_name.name, vap, NONEXCL, /* XXX handled by generic layer? */ mode, /* not used except for passthrough, see vap->va_mode */ &newvp, &cd, &ctx); err = mdki_errno_unix_to_linux(err); /* dentry reference uses the hold count from a successful create */ if (!err) { if (dentry->d_inode == NULL) { /* Not a shadow object */ ASSERT(newvp != NULL); ASSERT(VTOI(newvp) != NULL); VNODE_D_INSTANTIATE(dentry, VTOI(newvp)); VATTR_SET_MASK(vap, AT_ALL); if (VOP_GETATTR(newvp, vap, 0, &cd) == 0) mdki_linux_vattr_pullup(newvp, vap, AT_ALL); } else { /* drop the extra ref returned in newvp */ VN_RELE(newvp); } /* I nuked the code checking not VCHR, VREG--we are always VREG */ } else { ASSERT(!dentry->d_inode); ASSERT(!newvp); } VATTR_FREE(vap); mdki_linux_destroy_call_data(&cd); return(err); }
/* * Common code for vnode open operations. Check permissions, and call * the VOP_NOPEN or VOP_NCREATE routine. * * The caller is responsible for setting up nd with nlookup_init() and * for cleaning it up with nlookup_done(), whether we return an error * or not. * * On success nd->nl_open_vp will hold a referenced and, if requested, * locked vnode. A locked vnode is requested via NLC_LOCKVP. If fp * is non-NULL the vnode will be installed in the file pointer. * * NOTE: The vnode is referenced just once on return whether or not it * is also installed in the file pointer. */ int vn_open(struct nlookupdata *nd, struct file *fp, int fmode, int cmode) { struct vnode *vp; struct ucred *cred = nd->nl_cred; struct vattr vat; struct vattr *vap = &vat; int error; u_int flags; uint64_t osize; struct mount *mp; /* * Certain combinations are illegal */ if ((fmode & (FWRITE | O_TRUNC)) == O_TRUNC) return(EACCES); /* * Lookup the path and create or obtain the vnode. After a * successful lookup a locked nd->nl_nch will be returned. * * The result of this section should be a locked vnode. * * XXX with only a little work we should be able to avoid locking * the vnode if FWRITE, O_CREAT, and O_TRUNC are *not* set. */ nd->nl_flags |= NLC_OPEN; if (fmode & O_APPEND) nd->nl_flags |= NLC_APPEND; if (fmode & O_TRUNC) nd->nl_flags |= NLC_TRUNCATE; if (fmode & FREAD) nd->nl_flags |= NLC_READ; if (fmode & FWRITE) nd->nl_flags |= NLC_WRITE; if ((fmode & O_EXCL) == 0 && (fmode & O_NOFOLLOW) == 0) nd->nl_flags |= NLC_FOLLOW; if (fmode & O_CREAT) { /* * CONDITIONAL CREATE FILE CASE * * Setting NLC_CREATE causes a negative hit to store * the negative hit ncp and not return an error. Then * nc_error or nc_vp may be checked to see if the ncp * represents a negative hit. NLC_CREATE also requires * write permission on the governing directory or EPERM * is returned. */ nd->nl_flags |= NLC_CREATE; nd->nl_flags |= NLC_REFDVP; bwillinode(1); error = nlookup(nd); } else { /* * NORMAL OPEN FILE CASE */ error = nlookup(nd); } if (error) return (error); /* * split case to allow us to re-resolve and retry the ncp in case * we get ESTALE. */ again: if (fmode & O_CREAT) { if (nd->nl_nch.ncp->nc_vp == NULL) { if ((error = ncp_writechk(&nd->nl_nch)) != 0) return (error); VATTR_NULL(vap); vap->va_type = VREG; vap->va_mode = cmode; if (fmode & O_EXCL) vap->va_vaflags |= VA_EXCLUSIVE; error = VOP_NCREATE(&nd->nl_nch, nd->nl_dvp, &vp, nd->nl_cred, vap); if (error) return (error); fmode &= ~O_TRUNC; /* locked vnode is returned */ } else { if (fmode & O_EXCL) { error = EEXIST; } else { error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp); } if (error) return (error); fmode &= ~O_CREAT; } } else { error = cache_vget(&nd->nl_nch, cred, LK_EXCLUSIVE, &vp); if (error) return (error); } /* * We have a locked vnode and ncp now. Note that the ncp will * be cleaned up by the caller if nd->nl_nch is left intact. */ if (vp->v_type == VLNK) { error = EMLINK; goto bad; } if (vp->v_type == VSOCK) { error = EOPNOTSUPP; goto bad; } if ((fmode & O_CREAT) == 0) { if (fmode & (FWRITE | O_TRUNC)) { if (vp->v_type == VDIR) { error = EISDIR; goto bad; } error = vn_writechk(vp, &nd->nl_nch); if (error) { /* * Special stale handling, re-resolve the * vnode. */ if (error == ESTALE) { vput(vp); vp = NULL; cache_setunresolved(&nd->nl_nch); error = cache_resolve(&nd->nl_nch, cred); if (error == 0) goto again; } goto bad; } } } if (fmode & O_TRUNC) { vn_unlock(vp); /* XXX */ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX */ osize = vp->v_filesize; VATTR_NULL(vap); vap->va_size = 0; error = VOP_SETATTR(vp, vap, cred); if (error) goto bad; error = VOP_GETATTR(vp, vap); if (error) goto bad; mp = vq_vptomp(vp); VFS_ACCOUNT(mp, vap->va_uid, vap->va_gid, -osize); } /* * Set or clear VNSWAPCACHE on the vp based on nd->nl_nch.ncp->nc_flag. * These particular bits a tracked all the way from the root. * * NOTE: Might not work properly on NFS servers due to the * disconnected namecache. */ flags = nd->nl_nch.ncp->nc_flag; if ((flags & (NCF_UF_CACHE | NCF_UF_PCACHE)) && (flags & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE)) == 0) { vsetflags(vp, VSWAPCACHE); } else { vclrflags(vp, VSWAPCACHE); } /* * Setup the fp so VOP_OPEN can override it. No descriptor has been * associated with the fp yet so we own it clean. * * f_nchandle inherits nl_nch. This used to be necessary only for * directories but now we do it unconditionally so f*() ops * such as fchmod() can access the actual namespace that was * used to open the file. */ if (fp) { if (nd->nl_flags & NLC_APPENDONLY) fmode |= FAPPENDONLY; fp->f_nchandle = nd->nl_nch; cache_zero(&nd->nl_nch); cache_unlock(&fp->f_nchandle); } /* * Get rid of nl_nch. vn_open does not return it (it returns the * vnode or the file pointer). Note: we can't leave nl_nch locked * through the VOP_OPEN anyway since the VOP_OPEN may block, e.g. * on /dev/ttyd0 */ if (nd->nl_nch.ncp) cache_put(&nd->nl_nch); error = VOP_OPEN(vp, fmode, cred, fp); if (error) { /* * setting f_ops to &badfileops will prevent the descriptor * code from trying to close and release the vnode, since * the open failed we do not want to call close. */ if (fp) { fp->f_data = NULL; fp->f_ops = &badfileops; } goto bad; } #if 0 /* * Assert that VREG files have been setup for vmio. */ KASSERT(vp->v_type != VREG || vp->v_object != NULL, ("vn_open: regular file was not VMIO enabled!")); #endif /* * Return the vnode. XXX needs some cleaning up. The vnode is * only returned in the fp == NULL case. */ if (fp == NULL) { nd->nl_open_vp = vp; nd->nl_vp_fmode = fmode; if ((nd->nl_flags & NLC_LOCKVP) == 0) vn_unlock(vp); } else { vput(vp); } return (0); bad: if (vp) vput(vp); return (error); }
/* * Create a snapshot of the specified {parent, ochain} with the specified * label. The originating hammer2_inode must be exclusively locked for * safety. * * The ioctl code has already synced the filesystem. */ int hammer2_cluster_snapshot(hammer2_trans_t *trans, hammer2_cluster_t *ocluster, hammer2_ioc_pfs_t *pfs) { hammer2_mount_t *hmp; hammer2_cluster_t *ncluster; const hammer2_inode_data_t *ipdata; hammer2_inode_data_t *wipdata; hammer2_inode_t *nip; size_t name_len; hammer2_key_t lhc; struct vattr vat; uuid_t opfs_clid; int error; kprintf("snapshot %s\n", pfs->name); name_len = strlen(pfs->name); lhc = hammer2_dirhash(pfs->name, name_len); ipdata = &hammer2_cluster_data(ocluster)->ipdata; opfs_clid = ipdata->pfs_clid; hmp = ocluster->focus->hmp; /* * Create the snapshot directory under the super-root * * Set PFS type, generate a unique filesystem id, and generate * a cluster id. Use the same clid when snapshotting a PFS root, * which theoretically allows the snapshot to be used as part of * the same cluster (perhaps as a cache). * * Copy the (flushed) blockref array. Theoretically we could use * chain_duplicate() but it becomes difficult to disentangle * the shared core so for now just brute-force it. */ VATTR_NULL(&vat); vat.va_type = VDIR; vat.va_mode = 0755; ncluster = NULL; nip = hammer2_inode_create(trans, hmp->spmp->iroot, &vat, proc0.p_ucred, pfs->name, name_len, &ncluster, &error); if (nip) { wipdata = hammer2_cluster_modify_ip(trans, nip, ncluster, 0); wipdata->pfs_type = HAMMER2_PFSTYPE_SNAPSHOT; kern_uuidgen(&wipdata->pfs_fsid, 1); if (ocluster->focus->flags & HAMMER2_CHAIN_PFSROOT) wipdata->pfs_clid = opfs_clid; else kern_uuidgen(&wipdata->pfs_clid, 1); hammer2_cluster_set_chainflags(ncluster, HAMMER2_CHAIN_PFSROOT); /* XXX hack blockset copy */ /* XXX doesn't work with real cluster */ KKASSERT(ocluster->nchains == 1); wipdata->u.blockset = ocluster->focus->data->ipdata.u.blockset; hammer2_cluster_modsync(ncluster); hammer2_inode_unlock_ex(nip, ncluster); } return (error); }
/* question: does afs_create need to set CDirty in the adp or the avc? * I think we can get away without it, but I'm not sure. Note that * afs_setattr is called in here for truncation. */ #ifdef AFS_SGI64_ENV int afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, int flags, int amode, struct vcache **avcp, afs_ucred_t *acred) #else /* AFS_SGI64_ENV */ int afs_create(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, enum vcexcl aexcl, int amode, struct vcache **avcp, afs_ucred_t *acred) #endif /* AFS_SGI64_ENV */ { afs_int32 origCBs, origZaps, finalZaps; struct vrequest *treq = NULL; afs_int32 code; struct afs_conn *tc; struct VenusFid newFid; struct AFSStoreStatus InStatus; struct AFSFetchStatus *OutFidStatus, *OutDirStatus; struct AFSVolSync tsync; struct AFSCallBack CallBack; afs_int32 now; struct dcache *tdc; afs_size_t offset, len; struct server *hostp = 0; struct vcache *tvc; struct volume *volp = 0; struct afs_fakestat_state fakestate; struct rx_connection *rxconn; XSTATS_DECLS; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_create); OutFidStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); OutDirStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); memset(&InStatus, 0, sizeof(InStatus)); if ((code = afs_CreateReq(&treq, acred))) goto done2; afs_Trace3(afs_iclSetp, CM_TRACE_CREATE, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname, ICL_TYPE_INT32, amode); afs_InitFakeStat(&fakestate); #ifdef AFS_SGI65_ENV /* If avcp is passed not null, it's the old reference to this file. * We can use this to avoid create races. For now, just decrement * the reference count on it. */ if (*avcp) { AFS_RELE(AFSTOV(*avcp)); *avcp = NULL; } #endif if (strlen(aname) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done3; } if (!afs_ENameOK(aname)) { code = EINVAL; goto done3; } switch (attrs->va_type) { case VBLK: case VCHR: #if !defined(AFS_SUN5_ENV) case VSOCK: #endif case VFIFO: /* We don't support special devices or FIFOs */ code = EINVAL; goto done3; default: ; } AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, treq); if (code) goto done; tagain: code = afs_VerifyVCache(adp, treq); if (code) goto done; /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { code = ENETDOWN; goto done; } tdc = afs_GetDCache(adp, (afs_size_t) 0, treq, &offset, &len, 1); ObtainWriteLock(&adp->lock, 135); if (tdc) ObtainSharedLock(&tdc->lock, 630); /* * Make sure that the data in the cache is current. We may have * received a callback while we were waiting for the write lock. */ if (!(adp->f.states & CStatd) || (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) { ReleaseWriteLock(&adp->lock); if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } goto tagain; } if (tdc) { /* see if file already exists. If it does, we only set * the size attributes (to handle O_TRUNC) */ code = afs_dir_Lookup(tdc, aname, &newFid.Fid); /* use dnlc first xxx */ if (code == 0) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); ReleaseWriteLock(&adp->lock); #ifdef AFS_SGI64_ENV if (flags & VEXCL) { #else if (aexcl != NONEXCL) { #endif code = EEXIST; /* file exists in excl mode open */ goto done; } /* found the file, so use it */ newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; tvc = NULL; if (newFid.Fid.Unique == 0) { tvc = afs_LookupVCache(&newFid, treq, NULL, adp, aname); } if (!tvc) /* lookup failed or wasn't called */ tvc = afs_GetVCache(&newFid, treq, NULL, NULL); if (tvc) { /* if the thing exists, we need the right access to open it. * we must check that here, since no other checks are * made by the open system call */ len = attrs->va_size; /* only do the truncate */ /* * We used to check always for READ access before; the * problem is that we will fail if the existing file * has mode -w-w-w, which is wrong. */ if ((amode & VREAD) && !afs_AccessOK(tvc, PRSFS_READ, treq, CHECK_MODE_BITS)) { afs_PutVCache(tvc); code = EACCES; goto done; } #if defined(AFS_DARWIN80_ENV) if ((amode & VWRITE) || VATTR_IS_ACTIVE(attrs, va_data_size)) #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) if ((amode & VWRITE) || (attrs->va_mask & AT_SIZE)) #else if ((amode & VWRITE) || len != 0xffffffff) #endif { /* needed for write access check */ tvc->f.parent.vnode = adp->f.fid.Fid.Vnode; tvc->f.parent.unique = adp->f.fid.Fid.Unique; /* need write mode for these guys */ if (!afs_AccessOK (tvc, PRSFS_WRITE, treq, CHECK_MODE_BITS)) { afs_PutVCache(tvc); code = EACCES; goto done; } } #if defined(AFS_DARWIN80_ENV) if (VATTR_IS_ACTIVE(attrs, va_data_size)) #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) if (attrs->va_mask & AT_SIZE) #else if (len != 0xffffffff) #endif { if (vType(tvc) != VREG) { afs_PutVCache(tvc); code = EISDIR; goto done; } /* do a truncate */ #if defined(AFS_DARWIN80_ENV) VATTR_INIT(attrs); VATTR_SET_SUPPORTED(attrs, va_data_size); VATTR_SET_ACTIVE(attrs, va_data_size); #elif defined(UKERNEL) attrs->va_mask = ATTR_SIZE; #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) attrs->va_mask = AT_SIZE; #else VATTR_NULL(attrs); #endif attrs->va_size = len; ObtainWriteLock(&tvc->lock, 136); tvc->f.states |= CCreating; ReleaseWriteLock(&tvc->lock); #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) #if defined(AFS_SGI64_ENV) code = afs_setattr(VNODE_TO_FIRST_BHV((vnode_t *) tvc), attrs, 0, acred); #else code = afs_setattr(tvc, attrs, 0, acred); #endif /* AFS_SGI64_ENV */ #else /* SUN5 || SGI */ code = afs_setattr(tvc, attrs, acred); #endif /* SUN5 || SGI */ ObtainWriteLock(&tvc->lock, 137); tvc->f.states &= ~CCreating; ReleaseWriteLock(&tvc->lock); if (code) { afs_PutVCache(tvc); goto done; } } *avcp = tvc; } else code = ENOENT; /* shouldn't get here */ /* make sure vrefCount bumped only if code == 0 */ goto done; } } /* if we create the file, we don't do any access checks, since * that's how O_CREAT is supposed to work */ if (adp->f.states & CForeign) { origCBs = afs_allCBs; origZaps = afs_allZaps; } else { origCBs = afs_evenCBs; /* if changes, we don't really have a callback */ origZaps = afs_evenZaps; /* number of even numbered vnodes discarded */ } InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP; InStatus.ClientModTime = osi_Time(); InStatus.Group = (afs_int32) afs_cr_gid(acred); if (AFS_NFSXLATORREQ(acred)) { /* * XXX The following is mainly used to fix a bug in the HP-UX * nfs client where they create files with mode of 0 without * doing any setattr later on to fix it. * XXX */ #if defined(AFS_AIX_ENV) if (attrs->va_mode != -1) { #else #if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) if (attrs->va_mask & AT_MODE) { #else if (attrs->va_mode != ((unsigned short)-1)) { #endif #endif if (!attrs->va_mode) attrs->va_mode = 0x1b6; /* XXX default mode: rw-rw-rw XXX */ } } if (!AFS_IS_DISCONNECTED) { /* If not disconnected, connect to the server.*/ InStatus.UnixModeBits = attrs->va_mode & 0xffff; /* only care about protection bits */ do { tc = afs_Conn(&adp->f.fid, treq, SHARED_LOCK, &rxconn); if (tc) { hostp = tc->srvr->server; /* remember for callback processing */ now = osi_Time(); XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_CREATEFILE); RX_AFS_GUNLOCK(); code = RXAFS_CreateFile(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname, &InStatus, (struct AFSFid *) &newFid.Fid, OutFidStatus, OutDirStatus, &CallBack, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; CallBack.ExpirationTime += now; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &adp->f.fid, treq, AFS_STATS_FS_RPCIDX_CREATEFILE, SHARED_LOCK, NULL)); if ((code == EEXIST || code == UAEEXIST) && #ifdef AFS_SGI64_ENV !(flags & VEXCL) #else /* AFS_SGI64_ENV */ aexcl == NONEXCL #endif ) { /* if we get an EEXIST in nonexcl mode, just do a lookup */ if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } ReleaseWriteLock(&adp->lock); #if defined(AFS_SGI64_ENV) code = afs_lookup(VNODE_TO_FIRST_BHV((vnode_t *) adp), aname, avcp, NULL, 0, NULL, acred); #elif defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV) code = afs_lookup(adp, aname, avcp, NULL, 0, NULL, acred); #elif defined(UKERNEL) code = afs_lookup(adp, aname, avcp, acred, 0); #elif !defined(AFS_DARWIN_ENV) code = afs_lookup(adp, aname, avcp, acred); #endif goto done; } if (code) { if (code < 0) { ObtainWriteLock(&afs_xcbhash, 488); afs_DequeueCallback(adp); adp->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); osi_dnlc_purgedp(adp); } ReleaseWriteLock(&adp->lock); if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } goto done; } } else { /* Generate a fake FID for disconnected mode. */ newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; afs_GenFakeFid(&newFid, VREG, 1); } /* if (!AFS_IS_DISCON_RW) */ /* otherwise, we should see if we can make the change to the dir locally */ if (tdc) UpgradeSToWLock(&tdc->lock, 631); if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, OutDirStatus, 1)) { /* we can do it locally */ ObtainWriteLock(&afs_xdcache, 291); code = afs_dir_Create(tdc, aname, &newFid.Fid); ReleaseWriteLock(&afs_xdcache); if (code) { ZapDCE(tdc); DZap(tdc); } } if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } if (AFS_IS_DISCON_RW) adp->f.m.LinkCount++; newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; ReleaseWriteLock(&adp->lock); volp = afs_FindVolume(&newFid, READ_LOCK); /* New tricky optimistic callback handling algorithm for file creation works * as follows. We create the file essentially with no locks set at all. File * server may thus handle operations from others cache managers as well as from * this very own cache manager that reference the file in question before * we managed to create the cache entry. However, if anyone else changes * any of the status information for a file, we'll see afs_evenCBs increase * (files always have even fids). If someone on this workstation manages * to do something to the file, they'll end up having to create a cache * entry for the new file. Either we'll find it once we've got the afs_xvcache * lock set, or it was also *deleted* the vnode before we got there, in which case * we will find evenZaps has changed, too. Thus, we only assume we have the right * status information if no callbacks or vnode removals have occurred to even * numbered files from the time the call started until the time that we got the xvcache * lock set. Of course, this also assumes that any call that modifies a file first * gets a write lock on the file's vnode, but if that weren't true, the whole cache manager * would fail, since no call would be able to update the local vnode status after modifying * a file on a file server. */ ObtainWriteLock(&afs_xvcache, 138); if (adp->f.states & CForeign) finalZaps = afs_allZaps; /* do this before calling newvcache */ else finalZaps = afs_evenZaps; /* do this before calling newvcache */ /* don't need to call RemoveVCB, since only path leaving a callback is the * one where we pass through afs_NewVCache. Can't have queued a VCB unless * we created and freed an entry between file creation time and here, and the * freeing of the vnode will change evenZaps. Don't need to update the VLRU * queue, since the find will only succeed in the event of a create race, and * then the vcache will be at the front of the VLRU queue anyway... */ if (!(tvc = afs_FindVCache(&newFid, 0, DO_STATS))) { tvc = afs_NewVCache(&newFid, hostp); if (tvc) { int finalCBs; ObtainWriteLock(&tvc->lock, 139); ObtainWriteLock(&afs_xcbhash, 489); finalCBs = afs_evenCBs; /* add the callback in */ if (adp->f.states & CForeign) { tvc->f.states |= CForeign; finalCBs = afs_allCBs; } if (origCBs == finalCBs && origZaps == finalZaps) { tvc->f.states |= CStatd; /* we've fake entire thing, so don't stat */ tvc->f.states &= ~CBulkFetching; if (!AFS_IS_DISCON_RW) { tvc->cbExpires = CallBack.ExpirationTime; afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), volp); } } else { afs_DequeueCallback(tvc); tvc->f.states &= ~(CStatd | CUnique); tvc->callback = 0; if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)) osi_dnlc_purgedp(tvc); } ReleaseWriteLock(&afs_xcbhash); if (AFS_IS_DISCON_RW) { afs_DisconAddDirty(tvc, VDisconCreate, 0); afs_GenDisconStatus(adp, tvc, &newFid, attrs, treq, VREG); } else { afs_ProcessFS(tvc, OutFidStatus, treq); } tvc->f.parent.vnode = adp->f.fid.Fid.Vnode; tvc->f.parent.unique = adp->f.fid.Fid.Unique; #if !defined(UKERNEL) if (volp && (volp->states & VPartVisible)) tvc->f.states |= CPartVisible; #endif ReleaseWriteLock(&tvc->lock); *avcp = tvc; code = 0; } else code = ENOENT; } else { /* otherwise cache entry already exists, someone else must * have created it. Comments used to say: "don't need write * lock to *clear* these flags" but we should do it anyway. * Code used to clear stat bit and callback, but I don't see * the point -- we didn't have a create race, somebody else just * snuck into NewVCache before we got here, probably a racing * lookup. */ *avcp = tvc; code = 0; } ReleaseWriteLock(&afs_xvcache); done: AFS_DISCON_UNLOCK(); done3: if (volp) afs_PutVolume(volp, READ_LOCK); if (code == 0) { if (afs_mariner) afs_AddMarinerName(aname, *avcp); /* return the new status in vattr */ afs_CopyOutAttrs(*avcp, attrs); if (afs_mariner) afs_MarinerLog("store$Creating", *avcp); } afs_PutFakeStat(&fakestate); code = afs_CheckCode(code, treq, 20); afs_DestroyReq(treq); done2: osi_FreeSmallSpace(OutFidStatus); osi_FreeSmallSpace(OutDirStatus); return code; } /* * Check to see if we can track the change locally: requires that * we have sufficiently recent info in data cache. If so, we * know the new DataVersion number, and place it correctly in both the * data and stat cache entries. This routine returns 1 if we should * do the operation locally, and 0 otherwise. * * This routine must be called with the stat cache entry write-locked, * and dcache entry write-locked. */ int afs_LocalHero(struct vcache *avc, struct dcache *adc, AFSFetchStatus * astat, int aincr) { afs_int32 ok; afs_hyper_t avers; AFS_STATCNT(afs_LocalHero); hset64(avers, astat->dataVersionHigh, astat->DataVersion); /* avers *is* the version number now, no matter what */ if (adc) { /* does what's in the dcache *now* match what's in the vcache *now*, * and do we have a valid callback? if not, our local copy is not "ok" */ ok = (hsame(avc->f.m.DataVersion, adc->f.versionNo) && avc->callback && (avc->f.states & CStatd) && avc->cbExpires >= osi_Time()); } else { ok = 0; } if (ok) { /* check that the DV on the server is what we expect it to be */ afs_hyper_t newDV; hset(newDV, adc->f.versionNo); hadd32(newDV, aincr); if (!hsame(avers, newDV)) { ok = 0; } } #if defined(AFS_SGI_ENV) osi_Assert(avc->v.v_type == VDIR); #endif /* The bulk status code used the length as a sequence number. */ /* Don't update the vcache entry unless the stats are current. */ if (avc->f.states & CStatd) { hset(avc->f.m.DataVersion, avers); #ifdef AFS_64BIT_CLIENT FillInt64(avc->f.m.Length, astat->Length_hi, astat->Length); #else /* AFS_64BIT_CLIENT */ avc->f.m.Length = astat->Length; #endif /* AFS_64BIT_CLIENT */ avc->f.m.Date = astat->ClientModTime; } if (ok) { /* we've been tracking things correctly */ adc->dflags |= DFEntryMod; adc->f.versionNo = avers; return 1; } else { if (adc) { ZapDCE(adc); DZap(adc); } if (avc->f.states & CStatd) { osi_dnlc_purgedp(avc); } return 0; } }
/* * Dump core, into a file named "progname.core" or "core" (depending on the * value of shortcorename), unless the process was setuid/setgid. */ int coredump(struct lwp *l, const char *pattern) { struct vnode *vp; struct proc *p; struct vmspace *vm; kauth_cred_t cred; struct nameidata nd; struct vattr vattr; struct coredump_iostate io; struct plimit *lim; int error, error1; char *name; name = PNBUF_GET(); p = l->l_proc; vm = p->p_vmspace; mutex_enter(proc_lock); /* p_session */ mutex_enter(p->p_lock); /* * Refuse to core if the data + stack + user size is larger than * the core dump limit. XXX THIS IS WRONG, because of mapped * data. */ if (USPACE + ctob(vm->vm_dsize + vm->vm_ssize) >= p->p_rlimit[RLIMIT_CORE].rlim_cur) { error = EFBIG; /* better error code? */ mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * It may well not be curproc, so grab a reference to its current * credentials. */ kauth_cred_hold(p->p_cred); cred = p->p_cred; /* * The core dump will go in the current working directory. Make * sure that the directory is still there and that the mount flags * allow us to write core dumps there. * * XXX: this is partially bogus, it should be checking the directory * into which the file is actually written - which probably needs * a flag on namei() */ vp = p->p_cwdi->cwdi_cdir; if (vp->v_mount == NULL || (vp->v_mount->mnt_flag & MNT_NOCOREDUMP) != 0) { error = EPERM; mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } /* * Make sure the process has not set-id, to prevent data leaks, * unless it was specifically requested to allow set-id coredumps. */ if (p->p_flag & PK_SUGID) { if (!security_setidcore_dump) { error = EPERM; mutex_exit(p->p_lock); mutex_exit(proc_lock); goto done; } pattern = security_setidcore_path; } /* It is (just) possible for p_limit and pl_corename to change */ lim = p->p_limit; mutex_enter(&lim->pl_lock); if (pattern == NULL) pattern = lim->pl_corename; error = coredump_buildname(p, name, pattern, MAXPATHLEN); mutex_exit(&lim->pl_lock); mutex_exit(p->p_lock); mutex_exit(proc_lock); if (error) goto done; NDINIT(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, name); if ((error = vn_open(&nd, O_CREAT | O_NOFOLLOW | FWRITE, S_IRUSR | S_IWUSR)) != 0) goto done; vp = nd.ni_vp; /* Don't dump to non-regular files or files with links. */ if (vp->v_type != VREG || VOP_GETATTR(vp, &vattr, cred) || vattr.va_nlink != 1) { error = EINVAL; goto out; } VATTR_NULL(&vattr); vattr.va_size = 0; if ((p->p_flag & PK_SUGID) && security_setidcore_dump) { vattr.va_uid = security_setidcore_owner; vattr.va_gid = security_setidcore_group; vattr.va_mode = security_setidcore_mode; } VOP_SETATTR(vp, &vattr, cred); p->p_acflag |= ACORE; io.io_lwp = l; io.io_vp = vp; io.io_cred = cred; io.io_offset = 0; /* Now dump the actual core file. */ error = (*p->p_execsw->es_coredump)(l, &io); out: VOP_UNLOCK(vp, 0); error1 = vn_close(vp, FWRITE, cred); if (error == 0) error = error1; done: if (name != NULL) PNBUF_PUT(name); return error; }