int secpolicy_vnode_access(kauth_cred_t cred, struct vnode *vp, uid_t owner, int mode) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int cnioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l) { int error; error = 0; /* * Superuser can always use this to wrest control of console * output from the "virtual" console. */ if (cmd == TIOCCONS && constty != NULL) { error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL); if (!error) constty = NULL; return (error); } /* * Redirect the ioctl, if that's appropriate. * Note that strange things can happen, if a program does * ioctls on /dev/console, then the console is redirected * out from under it. */ if (!cn_redirect(&dev, 0, &error)) return error; return cdev_ioctl(dev, cmd, data, flag, l); }
/* * Perform chown operation on inode ip; * inode must be locked prior to call. */ static int ptyfs_chown(struct vnode *vp, uid_t uid, gid_t gid, kauth_cred_t cred, struct lwp *l) { struct ptyfsnode *ptyfs = VTOPTYFS(vp); int error, ismember = 0; if (uid == (uid_t)VNOVAL) uid = ptyfs->ptyfs_uid; if (gid == (gid_t)VNOVAL) gid = ptyfs->ptyfs_gid; /* * If we don't own the file, are trying to change the owner * of the file, or are not a member of the target group, * the caller's credentials must imply super-user privilege * or the call fails. */ if ((kauth_cred_geteuid(cred) != ptyfs->ptyfs_uid || uid != ptyfs->ptyfs_uid || (gid != ptyfs->ptyfs_gid && !(kauth_cred_getegid(cred) == gid || (kauth_cred_ismember_gid(cred, gid, &ismember) == 0 && ismember)))) && ((error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) != 0)) return error; ptyfs->ptyfs_gid = gid; ptyfs->ptyfs_uid = uid; return 0; }
int cmos_open(dev_t dev, int flags, int ifmt, struct lwp *l) { return kauth_authorize_generic(kauth_cred_get(), KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_vnode_setdac(kauth_cred_t cred, uid_t owner) { if (owner == kauth_cred_getuid(cred)) return (0); return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_vnode_setids_setgids(kauth_cred_t cred, gid_t gid) { if (groupmember(gid, cred)) return (0); return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
void secpolicy_setid_clear(struct vattr *vap, kauth_cred_t cred) { if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) return; if ((vap->va_mode & (S_ISUID | S_ISGID)) != 0) { vap->va_mask |= AT_MODE; vap->va_mode &= ~(S_ISUID|S_ISGID); } return; }
/* * Change the mode on a file. * Inode must be locked before calling. */ static int ptyfs_chmod(struct vnode *vp, mode_t mode, kauth_cred_t cred, struct lwp *l) { struct ptyfsnode *ptyfs = VTOPTYFS(vp); int error; if (kauth_cred_geteuid(cred) != ptyfs->ptyfs_uid && (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) != 0) return error; ptyfs->ptyfs_mode &= ~ALLPERMS; ptyfs->ptyfs_mode |= (mode & ALLPERMS); return 0; }
void secpolicy_setid_clear(struct vattr *vap, kauth_cred_t cred) { if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) return; if ((vap->va_mode & (S_ISUID | S_ISGID)) != 0) { if (priv_check_cred(cred, PRIV_VFS_RETAINSUGID, 0)) { vap->va_mask |= AT_MODE; vap->va_mode &= ~(S_ISUID|S_ISGID); } } }
/* * Check the inode limit, applying corrective action. */ int chkiq(struct inode *ip, int32_t change, kauth_cred_t cred, int flags) { struct dquot *dq; int i; int ncurinodes, error; if ((error = getinoquota(ip)) != 0) return error; if (change == 0) return (0); if (change < 0) { for (i = 0; i < MAXQUOTAS; i++) { if ((dq = ip->i_dquot[i]) == NODQUOT) continue; mutex_enter(&dq->dq_interlock); ncurinodes = dq->dq_curinodes + change; if (ncurinodes >= 0) dq->dq_curinodes = ncurinodes; else dq->dq_curinodes = 0; dq->dq_flags &= ~DQ_INODS; dq->dq_flags |= DQ_MOD; mutex_exit(&dq->dq_interlock); } return (0); } if ((flags & FORCE) == 0 && kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) { for (i = 0; i < MAXQUOTAS; i++) { if ((dq = ip->i_dquot[i]) == NODQUOT) continue; mutex_enter(&dq->dq_interlock); error = chkiqchg(ip, change, cred, i); mutex_exit(&dq->dq_interlock); if (error != 0) return (error); } } for (i = 0; i < MAXQUOTAS; i++) { if ((dq = ip->i_dquot[i]) == NODQUOT) continue; mutex_enter(&dq->dq_interlock); dq->dq_curinodes += change; dq->dq_flags |= DQ_MOD; mutex_exit(&dq->dq_interlock); } return (0); }
int secpolicy_setid_setsticky_clear(struct vnode *vp, struct vattr *vap, const struct vattr *ovap, kauth_cred_t cred) { /* * Privileged processes may set the sticky bit on non-directories, * as well as set the setgid bit on a file with a group that the process * is not a member of. Both of these are allowed in jail(8). */ if (vp->v_type != VDIR && (vap->va_mode & S_ISTXT)) { if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) return (EFTYPE); } /* * Check for privilege if attempting to set the * group-id bit. */ if ((vap->va_mode & S_ISGID) != 0) return (secpolicy_vnode_setids_setgids(cred, ovap->va_gid)); return (0); }
int secpolicy_vnode_utime_modify(kauth_cred_t cred) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_vnode_chown(kauth_cred_t cred, boolean_t check_self) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_vnode_setid_retain(kauth_cred_t cred, boolean_t issuidroot __unused) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_zfs(kauth_cred_t cred) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int hfs_mount(struct mount *mp, const char *path, void *data, size_t *data_len) { struct lwp *l = curlwp; struct nameidata nd; struct hfs_args *args = data; struct vnode *devvp; struct hfsmount *hmp; int error; int update; mode_t accessmode; if (*data_len < sizeof *args) return EINVAL; #ifdef HFS_DEBUG printf("vfsop = hfs_mount()\n"); #endif /* HFS_DEBUG */ if (mp->mnt_flag & MNT_GETARGS) { hmp = VFSTOHFS(mp); if (hmp == NULL) return EIO; args->fspec = NULL; *data_len = sizeof *args; return 0; } if (data == NULL) return EINVAL; /* FIXME: For development ONLY - disallow remounting for now */ #if 0 update = mp->mnt_flag & MNT_UPDATE; #else update = 0; #endif /* Check arguments */ if (args->fspec != NULL) { /* * Look up the name and verify that it's sane. */ NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec); if ((error = namei(&nd)) != 0) return error; devvp = nd.ni_vp; if (!update) { /* * Be sure this is a valid block device */ if (devvp->v_type != VBLK) error = ENOTBLK; else if (bdevsw_lookup(devvp->v_rdev) == NULL) error = ENXIO; } else { /* * Be sure we're still naming the same device * used for our initial mount */ hmp = VFSTOHFS(mp); if (devvp != hmp->hm_devvp) error = EINVAL; } } else { if (update) { /* Use the extant mount */ hmp = VFSTOHFS(mp); devvp = hmp->hm_devvp; vref(devvp); } else { /* New mounts must have a filename for the device */ return EINVAL; } } /* * If mount by non-root, then verify that user has necessary * permissions on the device. */ if (error == 0 && kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL) != 0) { accessmode = VREAD; if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 : (mp->mnt_flag & MNT_RDONLY) == 0) accessmode |= VWRITE; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = VOP_ACCESS(devvp, accessmode, l->l_cred); VOP_UNLOCK(devvp, 0); } if (error != 0) goto error; if (update) { printf("HFS: live remounting not yet supported!\n"); error = EINVAL; goto error; } if ((error = hfs_mountfs(devvp, mp, l, args->fspec)) != 0) goto error; error = set_statvfs_info(path, UIO_USERSPACE, args->fspec, UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l); #ifdef HFS_DEBUG if(!update) { char* volname; hmp = VFSTOHFS(mp); volname = malloc(hmp->hm_vol.name.length + 1, M_TEMP, M_WAITOK); if (volname == NULL) printf("could not allocate volname; ignored\n"); else { if (hfs_unicode_to_ascii(hmp->hm_vol.name.unicode, hmp->hm_vol.name.length, volname) == NULL) printf("could not convert volume name to ascii; ignored\n"); else printf("mounted volume \"%s\"\n", volname); free(volname, M_TEMP); } } #endif /* HFS_DEBUG */ return error; error: vrele(devvp); return error; }
/*ARGSUSED*/ int ptyfs_setattr(void *v) { struct vop_setattr_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; struct vattr *a_vap; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct ptyfsnode *ptyfs = VTOPTYFS(vp); struct vattr *vap = ap->a_vap; kauth_cred_t cred = ap->a_cred; struct lwp *l = curlwp; int error; if (vap->va_size != VNOVAL) { switch (ptyfs->ptyfs_type) { case PTYFSroot: return EISDIR; case PTYFSpts: case PTYFSptc: break; default: return EINVAL; } } if (vap->va_flags != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; if (kauth_cred_geteuid(cred) != ptyfs->ptyfs_uid && (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) != 0) return error; /* Immutable and append-only flags are not supported on ptyfs. */ if (vap->va_flags & (IMMUTABLE | APPEND)) return EINVAL; if (kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL) == 0) { /* Snapshot flag cannot be set or cleared */ if ((vap->va_flags & SF_SNAPSHOT) != (ptyfs->ptyfs_flags & SF_SNAPSHOT)) return EPERM; ptyfs->ptyfs_flags = vap->va_flags; } else { if ((ptyfs->ptyfs_flags & SF_SETTABLE) != (vap->va_flags & SF_SETTABLE)) return EPERM; ptyfs->ptyfs_flags &= SF_SETTABLE; ptyfs->ptyfs_flags |= (vap->va_flags & UF_SETTABLE); } ptyfs->ptyfs_flag |= PTYFS_CHANGE; } /* * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; if (ptyfs->ptyfs_type == PTYFSroot) return EPERM; error = ptyfs_chown(vp, vap->va_uid, vap->va_gid, cred, l); if (error) return error; } if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_birthtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; if ((ptyfs->ptyfs_flags & SF_SNAPSHOT) != 0) return EPERM; if (kauth_cred_geteuid(cred) != ptyfs->ptyfs_uid && (error = kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL)) && ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || (error = VOP_ACCESS(vp, VWRITE, cred)) != 0)) return (error); if (vap->va_atime.tv_sec != VNOVAL) if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) ptyfs->ptyfs_flag |= PTYFS_ACCESS; if (vap->va_mtime.tv_sec != VNOVAL) ptyfs->ptyfs_flag |= PTYFS_CHANGE | PTYFS_MODIFY; if (vap->va_birthtime.tv_sec != VNOVAL) ptyfs->ptyfs_birthtime = vap->va_birthtime; ptyfs->ptyfs_flag |= PTYFS_CHANGE; error = ptyfs_update(vp, &vap->va_atime, &vap->va_mtime, 0); if (error) return error; } if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) return EROFS; if (ptyfs->ptyfs_type == PTYFSroot) return EPERM; if ((ptyfs->ptyfs_flags & SF_SNAPSHOT) != 0 && (vap->va_mode & (S_IXUSR|S_IWUSR|S_IXGRP|S_IWGRP|S_IXOTH|S_IWOTH))) return EPERM; error = ptyfs_chmod(vp, vap->va_mode, cred, l); if (error) return error; } VN_KNOTE(vp, NOTE_ATTRIB); return 0; }
int in6_pcbbind(void *v, struct mbuf *nam, struct lwp *l) { struct in6pcb *in6p = v; struct socket *so = in6p->in6p_socket; struct inpcbtable *table = in6p->in6p_table; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)NULL; u_int16_t lport = 0; int wild = 0, reuseport = (so->so_options & SO_REUSEPORT); if (in6p->in6p_af != AF_INET6) return (EINVAL); if (in6p->in6p_lport || !IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_laddr)) return (EINVAL); if ((so->so_options & (SO_REUSEADDR|SO_REUSEPORT)) == 0 && ((so->so_proto->pr_flags & PR_CONNREQUIRED) == 0 || (so->so_options & SO_ACCEPTCONN) == 0)) wild = 1; if (nam) { int error; sin6 = mtod(nam, struct sockaddr_in6 *); if (nam->m_len != sizeof(*sin6)) return (EINVAL); /* * We should check the family, but old programs * incorrectly fail to intialize it. */ if (sin6->sin6_family != AF_INET6) return (EAFNOSUPPORT); #ifndef INET if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) return (EADDRNOTAVAIL); #endif if ((error = sa6_embedscope(sin6, ip6_use_defzone)) != 0) return (error); lport = sin6->sin6_port; if (IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) { /* * Treat SO_REUSEADDR as SO_REUSEPORT for multicast; * allow compepte duplication of binding if * SO_REUSEPORT is set, or if SO_REUSEADDR is set * and a multicast address is bound on both * new and duplicated sockets. */ if (so->so_options & SO_REUSEADDR) reuseport = SO_REUSEADDR|SO_REUSEPORT; } else if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { if ((in6p->in6p_flags & IN6P_IPV6_V6ONLY) != 0) return (EINVAL); if (sin6->sin6_addr.s6_addr32[3]) { struct sockaddr_in sin; bzero(&sin, sizeof(sin)); sin.sin_len = sizeof(sin); sin.sin_family = AF_INET; bcopy(&sin6->sin6_addr.s6_addr32[3], &sin.sin_addr, sizeof(sin.sin_addr)); if (ifa_ifwithaddr((struct sockaddr *)&sin) == 0) return EADDRNOTAVAIL; } } else if (!IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) { struct ifaddr *ia = NULL; sin6->sin6_port = 0; /* yech... */ if ((in6p->in6p_flags & IN6P_FAITH) == 0 && (ia = ifa_ifwithaddr((struct sockaddr *)sin6)) == 0) return (EADDRNOTAVAIL); /* * bind to an anycast address might accidentally * cause sending a packet with an anycast source * address, so we forbid it. * * We should allow to bind to a deprecated address, * since the application dare to use it. * But, can we assume that they are careful enough * to check if the address is deprecated or not? * Maybe, as a safeguard, we should have a setsockopt * flag to control the bind(2) behavior against * deprecated addresses (default: forbid bind(2)). */ if (ia && ((struct in6_ifaddr *)ia)->ia6_flags & (IN6_IFF_ANYCAST|IN6_IFF_NOTREADY|IN6_IFF_DETACHED)) return (EADDRNOTAVAIL); } if (lport) { #ifndef IPNOPRIVPORTS int priv; /* * NOTE: all operating systems use suser() for * privilege check! do not rewrite it into SS_PRIV. */ priv = (l && !kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) ? 1 : 0; /* GROSS */ if (ntohs(lport) < IPV6PORT_RESERVED && !priv) return (EACCES); #endif if (IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) { #ifdef INET struct inpcb *t; t = in_pcblookup_port(table, *(struct in_addr *)&sin6->sin6_addr.s6_addr32[3], lport, wild); if (t && (reuseport & t->inp_socket->so_options) == 0) return (EADDRINUSE); #else return (EADDRNOTAVAIL); #endif } { struct in6pcb *t; t = in6_pcblookup_port(table, &sin6->sin6_addr, lport, wild); if (t && (reuseport & t->in6p_socket->so_options) == 0) return (EADDRINUSE); } } in6p->in6p_laddr = sin6->sin6_addr; } if (lport == 0) { int e; e = in6_pcbsetport(&in6p->in6p_laddr, in6p, l); if (e != 0) return (e); } else { in6p->in6p_lport = lport; in6_pcbstate(in6p, IN6P_BOUND); } LIST_REMOVE(&in6p->in6p_head, inph_lhash); LIST_INSERT_HEAD(IN6PCBHASH_PORT(table, in6p->in6p_lport), &in6p->in6p_head, inph_lhash); #if 0 in6p->in6p_flowinfo = 0; /* XXX */ #endif return (0); }
/* * Do operations associated with quotas */ int ufs_quotactl(struct mount *mp, int cmds, uid_t uid, void *arg) { struct lwp *l = curlwp; #ifndef QUOTA (void) mp; (void) cmds; (void) uid; (void) arg; (void) l; return (EOPNOTSUPP); #else int cmd, type, error; if (uid == -1) uid = kauth_cred_getuid(l->l_cred); cmd = cmds >> SUBCMDSHIFT; switch (cmd) { case Q_SYNC: break; case Q_GETQUOTA: if (uid == kauth_cred_getuid(l->l_cred)) break; /* fall through */ default: if ((error = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER, NULL)) != 0) return (error); } type = cmds & SUBCMDMASK; if ((u_int)type >= MAXQUOTAS) return (EINVAL); error = vfs_busy(mp, NULL); if (error != 0) return (error); mutex_enter(&mp->mnt_updating); switch (cmd) { case Q_QUOTAON: error = quotaon(l, mp, type, arg); break; case Q_QUOTAOFF: error = quotaoff(l, mp, type); break; case Q_SETQUOTA: error = setquota(mp, uid, type, arg); break; case Q_SETUSE: error = setuse(mp, uid, type, arg); break; case Q_GETQUOTA: error = getquota(mp, uid, type, arg); break; case Q_SYNC: error = qsync(mp); break; default: error = EINVAL; } mutex_exit(&mp->mnt_updating); vfs_unbusy(mp, false, NULL); return (error); #endif }
int secpolicy_sys_config(kauth_cred_t cred, int checkonly __unused) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }
int secpolicy_xvattr(xvattr_t *xvap, uid_t owner, kauth_cred_t cred, vtype_t vtype) { return kauth_authorize_generic(cred, KAUTH_GENERIC_ISSUSER, NULL); }