uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { uint32_t index = FUID_INDEX(fuid); char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); #ifdef TODO if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } #else panic(__func__); #endif return (id); }
/* * If the credential user SID or group SID is mapped to an ephemeral * ID, map the credential to nobody. */ cred_t * crgetmapped(const cred_t *cr) { ephemeral_zsd_t *eph_zsd; /* * Someone incorrectly passed a NULL cred to a vnode operation * either on purpose or by calling CRED() in interrupt context. */ if (cr == NULL) return (NULL); if (cr->cr_ksid != NULL) { if (cr->cr_ksid->kr_sidx[KSID_USER].ks_id > MAXUID) { eph_zsd = get_ephemeral_zsd(crgetzone(cr)); return (eph_zsd->eph_nobody); } if (cr->cr_ksid->kr_sidx[KSID_GROUP].ks_id > MAXUID) { eph_zsd = get_ephemeral_zsd(crgetzone(cr)); return (eph_zsd->eph_nobody); } } return ((cred_t *)cr); }
uid_t zfs_fuid_map_id(zfsvfs_t *zfsvfs, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_ZPL uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zfsvfs, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); #endif if(type == ZFS_OWNER || type == ZFS_ACE_USER) return (crgetuid(cr)); else return (crgetgid(cr)); }
uid_t zfs_fuid_map_id(zfs_sb_t *zsb, uint64_t fuid, cred_t *cr, zfs_fuid_type_t type) { #ifdef HAVE_KSID uint32_t index = FUID_INDEX(fuid); const char *domain; uid_t id; if (index == 0) return (fuid); domain = zfs_fuid_find_by_idx(zsb, index); ASSERT(domain != NULL); if (type == ZFS_OWNER || type == ZFS_ACE_USER) { (void) kidmap_getuidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, FUID_RID(fuid), &id); } return (id); #else /* * The Linux port only supports POSIX IDs, use the passed id. */ return (fuid); #endif /* HAVE_KSID */ }
/* * Returns 0 on success, EACCES on permission failure. */ static int sctp_select_port(sctp_t *sctp, in_port_t *requested_port, int *user_specified) { /* * Get a valid port (within the anonymous range and should not * be a privileged one) to use if the user has not given a port. * If multiple threads are here, they may all start with * with the same initial port. But, it should be fine as long as * sctp_bindi will ensure that no two threads will be assigned * the same port. */ if (*requested_port == 0) { *requested_port = sctp_update_next_port(sctp_next_port_to_try, crgetzone(sctp->sctp_credp)); if (*requested_port == 0) return (EACCES); *user_specified = 0; } else { int i; boolean_t priv = B_FALSE; /* * If the requested_port is in the well-known privileged range, * verify that the stream was opened by a privileged user. * Note: No locks are held when inspecting sctp_g_*epriv_ports * but instead the code relies on: * - the fact that the address of the array and its size never * changes * - the atomic assignment of the elements of the array */ if (*requested_port < sctp_smallest_nonpriv_port) { priv = B_TRUE; } else { for (i = 0; i < sctp_g_num_epriv_ports; i++) { if (*requested_port == sctp_g_epriv_ports[i]) { priv = B_TRUE; break; } } } if (priv) { /* * sctp_bind() should take a cred_t argument so that * we can use it here. */ if (secpolicy_net_privaddr(sctp->sctp_credp, *requested_port) != 0) { dprint(1, ("sctp_bind(x): no prive for port %d", *requested_port)); return (EACCES); } } *user_specified = 1; } return (0); }
/* * Interface to effectively set the PRIV_ALL for * a credential; this interface does no security checks and is * intended for kernel (file)servers to extend the user credentials * to be ALL, like either kcred or zcred. */ void crset_zone_privall(cred_t *cr) { zone_t *zone = crgetzone(cr); priv_fillset(&CR_LPRIV(cr)); CR_EPRIV(cr) = CR_PPRIV(cr) = CR_IPRIV(cr) = CR_LPRIV(cr); priv_intersect(zone->zone_privset, &CR_LPRIV(cr)); priv_intersect(zone->zone_privset, &CR_EPRIV(cr)); priv_intersect(zone->zone_privset, &CR_IPRIV(cr)); priv_intersect(zone->zone_privset, &CR_PPRIV(cr)); }
int crsetugid(cred_t *cr, uid_t uid, gid_t gid) { zone_t *zone = crgetzone(cr); ASSERT(cr->cr_ref <= 2); if (!VALID_UID(uid, zone) || !VALID_GID(gid, zone)) return (-1); cr->cr_uid = cr->cr_ruid = cr->cr_suid = uid; cr->cr_gid = cr->cr_rgid = cr->cr_sgid = gid; return (0); }
static int idmap_unreg(int did) { door_handle_t dh = door_ki_lookup(did); int res; zone_t *zone; if (dh == NULL) return (set_errno(EINVAL)); zone = crgetzone(CRED()); res = idmap_unreg_dh(zone, dh); door_ki_rele(dh); if (res != 0) return (set_errno(res)); return (0); }
static uint64_t allocids(int flag, int nuids, int ngids) { rval_t r; uid_t su = 0; gid_t sg = 0; struct door_info di; door_handle_t dh; int err; zone_t *zone = crgetzone(CRED()); dh = idmap_get_door(zone); if (dh == NULL) return (set_errno(EPERM)); if ((err = door_ki_info(dh, &di)) != 0) { door_ki_rele(dh); return (set_errno(err)); } door_ki_rele(dh); if (curproc->p_pid != di.di_target) return (set_errno(EPERM)); if (flag) idmap_purge_cache(zone); if (nuids < 0 || ngids < 0) return (set_errno(EINVAL)); if (flag != 0 || nuids > 0) err = eph_uid_alloc(zone, flag, &su, nuids); if (err == 0 && (flag != 0 || ngids > 0)) err = eph_gid_alloc(zone, flag, &sg, ngids); if (err != 0) return (set_errno(EOVERFLOW)); r.r_val1 = su; r.r_val2 = sg; return (r.r_vals); }
int crsetresgid(cred_t *cr, gid_t r, gid_t e, gid_t s) { zone_t *zone = crgetzone(cr); ASSERT(cr->cr_ref <= 2); if (BADGID(r, zone) || BADGID(e, zone) || BADGID(s, zone)) return (-1); if (r != -1) cr->cr_rgid = r; if (e != -1) cr->cr_gid = e; if (s != -1) cr->cr_sgid = s; return (0); }
static int idmap_reg(int did) { door_handle_t dh; int err; cred_t *cr = CRED(); if ((err = secpolicy_idmap(cr)) != 0) return (set_errno(err)); dh = door_ki_lookup(did); if (dh == NULL) return (set_errno(EBADF)); if ((err = idmap_reg_dh(crgetzone(cr), dh)) != 0) return (set_errno(err)); return (0); }
/* * Change ownership of file. */ int fchownat(int fd, char *path, uid_t uid, gid_t gid, int flag) { struct vattr vattr; int error; struct zone *zone = crgetzone(CRED()); if (uid != (uid_t)-1 && !VALID_UID(uid, zone) || gid != (gid_t)-1 && !VALID_GID(gid, zone)) { return (set_errno(EINVAL)); } vattr.va_uid = uid; vattr.va_gid = gid; vattr.va_mask = 0; if (vattr.va_uid != -1) vattr.va_mask |= AT_UID; if (vattr.va_gid != -1) vattr.va_mask |= AT_GID; error = fsetattrat(fd, path, flag, &vattr); if (error) return (set_errno(error)); return (0); }
/* * Create a file system FUID for an ACL ace * or a chown/chgrp of the file. * This is similar to zfs_fuid_create_cred, except that * we can't find the domain + rid information in the * cred. Instead we have to query Winchester for the * domain and rid. * * During replay operations the domain+rid information is * found in the zfs_fuid_info_t that the replay code has * attached to the zfsvfs of the file system. */ uint64_t zfs_fuid_create(zfsvfs_t *zfsvfs, uint64_t id, cred_t *cr, zfs_fuid_type_t type, dmu_tx_t *tx, zfs_fuid_info_t **fuidpp) { const char *domain; char *kdomain; uint32_t fuid_idx = FUID_INDEX(id); uint32_t rid; idmap_stat status; uint64_t idx; boolean_t is_replay = (zfsvfs->z_assign >= TXG_INITIAL); zfs_fuid_t *zfuid = NULL; zfs_fuid_info_t *fuidp; /* * If POSIX ID, or entry is already a FUID then * just return the id * * We may also be handed an already FUID'ized id via * chmod. */ if (!zfsvfs->z_use_fuids || !IS_EPHEMERAL(id) || fuid_idx != 0) return (id); if (is_replay) { fuidp = zfsvfs->z_fuid_replay; /* * If we are passed an ephemeral id, but no * fuid_info was logged then return NOBODY. * This is most likely a result of idmap service * not being available. */ if (fuidp == NULL) return (UID_NOBODY); switch (type) { case ZFS_ACE_USER: case ZFS_ACE_GROUP: zfuid = list_head(&fuidp->z_fuids); rid = FUID_RID(zfuid->z_logfuid); idx = FUID_INDEX(zfuid->z_logfuid); break; case ZFS_OWNER: rid = FUID_RID(fuidp->z_fuid_owner); idx = FUID_INDEX(fuidp->z_fuid_owner); break; case ZFS_GROUP: rid = FUID_RID(fuidp->z_fuid_group); idx = FUID_INDEX(fuidp->z_fuid_group); break; }; domain = fuidp->z_domain_table[idx -1]; } else { if (type == ZFS_OWNER || type == ZFS_ACE_USER) status = kidmap_getsidbyuid(crgetzone(cr), id, &domain, &rid); else status = kidmap_getsidbygid(crgetzone(cr), id, &domain, &rid); if (status != 0) { /* * When returning nobody we will need to * make a dummy fuid table entry for logging * purposes. */ rid = UID_NOBODY; domain = ""; } } idx = zfs_fuid_find_by_domain(zfsvfs, domain, &kdomain, tx); if (!is_replay) zfs_fuid_node_add(fuidpp, kdomain, rid, idx, id, type); else if (zfuid != NULL) { list_remove(&fuidp->z_fuids, zfuid); kmem_free(zfuid, sizeof (zfs_fuid_t)); } return (FUID_ENCODE(idx, rid)); }
/* ARGSUSED */ static int xattr_file_write(vnode_t *vp, uio_t *uiop, int ioflag, cred_t *cr, caller_context_t *ct) { int error = 0; char *buf; char *domain; uint32_t rid; ssize_t size = uiop->uio_resid; nvlist_t *nvp; nvpair_t *pair = NULL; vnode_t *ppvp; xvattr_t xvattr; xoptattr_t *xoap = NULL; /* Pointer to optional attributes */ if (vfs_has_feature(vp->v_vfsp, VFSFT_XVATTR) == 0) return (EINVAL); /* * Validate file offset and size. */ if (uiop->uio_loffset < (offset_t)0) return (EINVAL); if (size == 0) return (EINVAL); xva_init(&xvattr); if ((xoap = xva_getxoptattr(&xvattr)) == NULL) { return (EINVAL); } /* * Copy and unpack the nvlist */ buf = kmem_alloc(size, KM_SLEEP); if (uiomove((caddr_t)buf, size, UIO_WRITE, uiop)) { return (EFAULT); } if (nvlist_unpack(buf, size, &nvp, KM_SLEEP) != 0) { kmem_free(buf, size); uiop->uio_resid = size; return (EINVAL); } kmem_free(buf, size); /* * Fasttrack empty writes (nvlist with no nvpairs) */ if (nvlist_next_nvpair(nvp, NULL) == 0) return (0); ppvp = gfs_file_parent(gfs_file_parent(vp)); while (pair = nvlist_next_nvpair(nvp, pair)) { data_type_t type; f_attr_t attr; boolean_t value; uint64_t *time, *times; uint_t elem, nelems; nvlist_t *nvp_sid; uint8_t *scanstamp; /* * Validate the name and type of each attribute. * Log any unknown names and continue. This will * help if additional attributes are added later. */ type = nvpair_type(pair); if ((attr = name_to_attr(nvpair_name(pair))) == F_ATTR_INVAL) { cmn_err(CE_WARN, "Unknown attribute %s", nvpair_name(pair)); continue; } /* * Verify nvlist type matches required type and view is OK */ if (type != attr_to_data_type(attr) || (attr_to_xattr_view(attr) == XATTR_VIEW_READONLY)) { nvlist_free(nvp); return (EINVAL); } /* * For OWNERSID/GROUPSID make sure the target * file system support ephemeral ID's */ if ((attr == F_OWNERSID || attr == F_GROUPSID) && (!(vp->v_vfsp->vfs_flag & VFS_XID))) { nvlist_free(nvp); return (EINVAL); } /* * Retrieve data from nvpair */ switch (type) { case DATA_TYPE_BOOLEAN_VALUE: if (nvpair_value_boolean_value(pair, &value)) { nvlist_free(nvp); return (EINVAL); } break; case DATA_TYPE_UINT64_ARRAY: if (nvpair_value_uint64_array(pair, ×, &nelems)) { nvlist_free(nvp); return (EINVAL); } break; case DATA_TYPE_NVLIST: if (nvpair_value_nvlist(pair, &nvp_sid)) { nvlist_free(nvp); return (EINVAL); } break; case DATA_TYPE_UINT8_ARRAY: if (nvpair_value_uint8_array(pair, &scanstamp, &nelems)) { nvlist_free(nvp); return (EINVAL); } break; default: nvlist_free(nvp); return (EINVAL); } switch (attr) { /* * If we have several similar optional attributes to * process then we should do it all together here so that * xoap and the requested bitmap can be set in one place. */ case F_READONLY: XVA_SET_REQ(&xvattr, XAT_READONLY); xoap->xoa_readonly = value; break; case F_HIDDEN: XVA_SET_REQ(&xvattr, XAT_HIDDEN); xoap->xoa_hidden = value; break; case F_SYSTEM: XVA_SET_REQ(&xvattr, XAT_SYSTEM); xoap->xoa_system = value; break; case F_ARCHIVE: XVA_SET_REQ(&xvattr, XAT_ARCHIVE); xoap->xoa_archive = value; break; case F_IMMUTABLE: XVA_SET_REQ(&xvattr, XAT_IMMUTABLE); xoap->xoa_immutable = value; break; case F_NOUNLINK: XVA_SET_REQ(&xvattr, XAT_NOUNLINK); xoap->xoa_nounlink = value; break; case F_APPENDONLY: XVA_SET_REQ(&xvattr, XAT_APPENDONLY); xoap->xoa_appendonly = value; break; case F_NODUMP: XVA_SET_REQ(&xvattr, XAT_NODUMP); xoap->xoa_nodump = value; break; case F_AV_QUARANTINED: XVA_SET_REQ(&xvattr, XAT_AV_QUARANTINED); xoap->xoa_av_quarantined = value; break; case F_AV_MODIFIED: XVA_SET_REQ(&xvattr, XAT_AV_MODIFIED); xoap->xoa_av_modified = value; break; case F_CRTIME: XVA_SET_REQ(&xvattr, XAT_CREATETIME); time = (uint64_t *)&(xoap->xoa_createtime); for (elem = 0; elem < nelems; elem++) *time++ = times[elem]; break; case F_OWNERSID: case F_GROUPSID: if (nvlist_lookup_string(nvp_sid, SID_DOMAIN, &domain) || nvlist_lookup_uint32(nvp_sid, SID_RID, &rid)) { nvlist_free(nvp); return (EINVAL); } /* * Now map domain+rid to ephemeral id's * * If mapping fails, then the uid/gid will * be set to UID_NOBODY by Winchester. */ if (attr == F_OWNERSID) { (void) kidmap_getuidbysid(crgetzone(cr), domain, rid, &xvattr.xva_vattr.va_uid); xvattr.xva_vattr.va_mask |= AT_UID; } else { (void) kidmap_getgidbysid(crgetzone(cr), domain, rid, &xvattr.xva_vattr.va_gid); xvattr.xva_vattr.va_mask |= AT_GID; } break; case F_AV_SCANSTAMP: if (ppvp->v_type == VREG) { XVA_SET_REQ(&xvattr, XAT_AV_SCANSTAMP); (void) memcpy(xoap->xoa_av_scanstamp, scanstamp, nelems); } else { nvlist_free(nvp); return (EINVAL); } break; case F_REPARSE: XVA_SET_REQ(&xvattr, XAT_REPARSE); xoap->xoa_reparse = value; break; case F_OFFLINE: XVA_SET_REQ(&xvattr, XAT_OFFLINE); xoap->xoa_offline = value; break; case F_SPARSE: XVA_SET_REQ(&xvattr, XAT_SPARSE); xoap->xoa_sparse = value; break; default: break; } } ppvp = gfs_file_parent(gfs_file_parent(vp)); error = VOP_SETATTR(ppvp, &xvattr.xva_vattr, 0, cr, ct); if (error) uiop->uio_resid = size; nvlist_free(nvp); return (error); }
int seteuid(uid_t uid) { proc_t *p; int error = EPERM; int do_nocd = 0; cred_t *cr, *newcr; ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if (!VALID_UID(uid, zone)) return (set_errno(EINVAL)); if (uid > MAXUID) { if (ksid_lookupbyuid(zone, uid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); retry: crhold(cr = p->p_cred); mutex_exit(&p->p_crlock); if (uid == cr->cr_ruid || uid == cr->cr_uid || uid == cr->cr_suid || (error = secpolicy_allow_setid(cr, uid, B_FALSE)) == 0) { /* * A privileged process that makes itself look like a * set-uid process must be marked to produce no core dump, * if the effective uid did changed. */ mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; if (cr->cr_uid != uid && error == 0) do_nocd = 1; error = 0; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_uid = uid; crsetsid(newcr, ksp, KSID_USER); priv_reset_PA(newcr, B_FALSE); mutex_exit(&p->p_crlock); if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } crfree(newcr); crfree(cr); if (ksp != NULL) ksid_rele(ksp); return (set_errno(error)); }
int klpd_unreg(int did, idtype_t type, id_t id) { door_handle_t dh; int res = 0; proc_t *p; pid_t pid; projid_t proj; kproject_t *kpp = NULL; credklpd_t *ckp; switch (type) { case P_PID: pid = (pid_t)id; break; case P_PROJID: proj = (projid_t)id; kpp = project_hold_by_id(proj, crgetzone(CRED()), PROJECT_HOLD_FIND); if (kpp == NULL) return (set_errno(ESRCH)); break; default: return (set_errno(ENOTSUP)); } dh = door_ki_lookup(did); if (dh == NULL) { if (kpp != NULL) project_rele(kpp); return (set_errno(EINVAL)); } if (kpp != NULL) { mutex_enter(&klpd_mutex); if (kpp->kpj_klpd == NULL) res = ESRCH; else klpd_freelist(&kpp->kpj_klpd); mutex_exit(&klpd_mutex); project_rele(kpp); goto out; } else if ((int)pid > 0) { mutex_enter(&pidlock); p = prfind(pid); if (p == NULL) { mutex_exit(&pidlock); door_ki_rele(dh); return (set_errno(ESRCH)); } mutex_enter(&p->p_crlock); mutex_exit(&pidlock); } else if (pid == 0) { p = curproc; mutex_enter(&p->p_crlock); } else { res = klpd_unreg_dh(dh); goto out; } ckp = crgetcrklpd(p->p_cred); if (ckp != NULL) { crklpd_setreg(ckp, NULL); } else { res = ESRCH; } mutex_exit(&p->p_crlock); out: door_ki_rele(dh); if (res != 0) return (set_errno(res)); return (0); }
/* ARGSUSED */ static int xattr_fill_nvlist(vnode_t *vp, xattr_view_t xattr_view, nvlist_t *nvlp, cred_t *cr, caller_context_t *ct) { int error; f_attr_t attr; uint64_t fsid; xvattr_t xvattr; xoptattr_t *xoap; /* Pointer to optional attributes */ vnode_t *ppvp; const char *domain; uint32_t rid; xva_init(&xvattr); if ((xoap = xva_getxoptattr(&xvattr)) == NULL) return (EINVAL); /* * For detecting ephemeral uid/gid */ xvattr.xva_vattr.va_mask |= (AT_UID|AT_GID); /* * We need to access the real fs object. * vp points to a GFS file; ppvp points to the real object. */ ppvp = gfs_file_parent(gfs_file_parent(vp)); /* * Iterate through the attrs associated with this view */ for (attr = 0; attr < F_ATTR_ALL; attr++) { if (xattr_view != attr_to_xattr_view(attr)) { continue; } switch (attr) { case F_SYSTEM: XVA_SET_REQ(&xvattr, XAT_SYSTEM); break; case F_READONLY: XVA_SET_REQ(&xvattr, XAT_READONLY); break; case F_HIDDEN: XVA_SET_REQ(&xvattr, XAT_HIDDEN); break; case F_ARCHIVE: XVA_SET_REQ(&xvattr, XAT_ARCHIVE); break; case F_IMMUTABLE: XVA_SET_REQ(&xvattr, XAT_IMMUTABLE); break; case F_APPENDONLY: XVA_SET_REQ(&xvattr, XAT_APPENDONLY); break; case F_NOUNLINK: XVA_SET_REQ(&xvattr, XAT_NOUNLINK); break; case F_OPAQUE: XVA_SET_REQ(&xvattr, XAT_OPAQUE); break; case F_NODUMP: XVA_SET_REQ(&xvattr, XAT_NODUMP); break; case F_AV_QUARANTINED: XVA_SET_REQ(&xvattr, XAT_AV_QUARANTINED); break; case F_AV_MODIFIED: XVA_SET_REQ(&xvattr, XAT_AV_MODIFIED); break; case F_AV_SCANSTAMP: if (ppvp->v_type == VREG) XVA_SET_REQ(&xvattr, XAT_AV_SCANSTAMP); break; case F_CRTIME: XVA_SET_REQ(&xvattr, XAT_CREATETIME); break; case F_FSID: fsid = (((uint64_t)vp->v_vfsp->vfs_fsid.val[0] << 32) | (uint64_t)(vp->v_vfsp->vfs_fsid.val[1] & 0xffffffff)); VERIFY(nvlist_add_uint64(nvlp, attr_to_name(attr), fsid) == 0); break; case F_REPARSE: XVA_SET_REQ(&xvattr, XAT_REPARSE); break; case F_GEN: XVA_SET_REQ(&xvattr, XAT_GEN); break; case F_OFFLINE: XVA_SET_REQ(&xvattr, XAT_OFFLINE); break; case F_SPARSE: XVA_SET_REQ(&xvattr, XAT_SPARSE); break; default: break; } } error = VOP_GETATTR(ppvp, &xvattr.xva_vattr, 0, cr, ct); if (error) return (error); /* * Process all the optional attributes together here. Notice that * xoap was set when the optional attribute bits were set above. */ if ((xvattr.xva_vattr.va_mask & AT_XVATTR) && xoap) { if (XVA_ISSET_RTN(&xvattr, XAT_READONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_READONLY), xoap->xoa_readonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_HIDDEN)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_HIDDEN), xoap->xoa_hidden) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SYSTEM)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SYSTEM), xoap->xoa_system) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_ARCHIVE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_ARCHIVE), xoap->xoa_archive) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_IMMUTABLE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_IMMUTABLE), xoap->xoa_immutable) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NOUNLINK)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NOUNLINK), xoap->xoa_nounlink) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_APPENDONLY)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_APPENDONLY), xoap->xoa_appendonly) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_NODUMP)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_NODUMP), xoap->xoa_nodump) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OPAQUE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OPAQUE), xoap->xoa_opaque) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_QUARANTINED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_QUARANTINED), xoap->xoa_av_quarantined) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_MODIFIED)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_AV_MODIFIED), xoap->xoa_av_modified) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_AV_SCANSTAMP)) { VERIFY(nvlist_add_uint8_array(nvlp, attr_to_name(F_AV_SCANSTAMP), xoap->xoa_av_scanstamp, sizeof (xoap->xoa_av_scanstamp)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_CREATETIME)) { VERIFY(nvlist_add_uint64_array(nvlp, attr_to_name(F_CRTIME), (uint64_t *)&(xoap->xoa_createtime), sizeof (xoap->xoa_createtime) / sizeof (uint64_t)) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_REPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_REPARSE), xoap->xoa_reparse) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_GEN)) { VERIFY(nvlist_add_uint64(nvlp, attr_to_name(F_GEN), xoap->xoa_generation) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_OFFLINE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_OFFLINE), xoap->xoa_offline) == 0); } if (XVA_ISSET_RTN(&xvattr, XAT_SPARSE)) { VERIFY(nvlist_add_boolean_value(nvlp, attr_to_name(F_SPARSE), xoap->xoa_sparse) == 0); } } /* * Check for optional ownersid/groupsid */ if (xvattr.xva_vattr.va_uid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbyuid(crgetzone(cr), xvattr.xva_vattr.va_uid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_OWNERSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } if (xvattr.xva_vattr.va_gid > MAXUID) { nvlist_t *nvl_sid; if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP)) return (ENOMEM); if (kidmap_getsidbygid(crgetzone(cr), xvattr.xva_vattr.va_gid, &domain, &rid) == 0) { VERIFY(nvlist_add_string(nvl_sid, SID_DOMAIN, domain) == 0); VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0); VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_GROUPSID), nvl_sid) == 0); } nvlist_free(nvl_sid); } return (0); }
int klpd_call(const cred_t *cr, const priv_set_t *req, va_list ap) { klpd_reg_t *p; int rv = -1; credklpd_t *ckp; zone_t *ckzone; /* * These locks must not be held when this code is called; * callbacks to userland with these locks held will result * in issues. That said, the code at the call sides was * restructured not to call with any of the locks held and * no policies operate by default on most processes. */ if (mutex_owned(&pidlock) || mutex_owned(&curproc->p_lock) || mutex_owned(&curproc->p_crlock)) { atomic_inc_32(&klpd_bad_locks); return (-1); } /* * Enforce the limit set for the call process (still). */ if (!priv_issubset(req, &CR_LPRIV(cr))) return (-1); /* Try 1: get the credential specific klpd */ if ((ckp = crgetcrklpd(cr)) != NULL) { mutex_enter(&ckp->crkl_lock); if ((p = ckp->crkl_reg) != NULL && p->klpd_indel == 0 && priv_issubset(req, &p->klpd_pset)) { klpd_hold(p); mutex_exit(&ckp->crkl_lock); rv = klpd_do_call(p, req, ap); mutex_enter(&ckp->crkl_lock); klpd_rele(p); mutex_exit(&ckp->crkl_lock); if (rv != -1) return (rv == 0 ? 0 : -1); } else { mutex_exit(&ckp->crkl_lock); } } /* Try 2: get the project specific klpd */ mutex_enter(&klpd_mutex); if ((p = curproj->kpj_klpd) != NULL) { klpd_hold(p); mutex_exit(&klpd_mutex); if (p->klpd_indel == 0 && priv_issubset(req, &p->klpd_pset)) { rv = klpd_do_call(p, req, ap); } mutex_enter(&klpd_mutex); klpd_rele(p); mutex_exit(&klpd_mutex); if (rv != -1) return (rv == 0 ? 0 : -1); } else { mutex_exit(&klpd_mutex); } /* Try 3: get the global klpd list */ ckzone = crgetzone(cr); mutex_enter(&klpd_mutex); for (p = klpd_list; p != NULL; ) { zone_t *kkzone = crgetzone(p->klpd_cred); if ((kkzone == &zone0 || kkzone == ckzone) && p->klpd_indel == 0 && priv_issubset(req, &p->klpd_pset)) { klpd_hold(p); mutex_exit(&klpd_mutex); rv = klpd_do_call(p, req, ap); mutex_enter(&klpd_mutex); p = klpd_rele_next(p); if (rv != -1) break; } else { p = p->klpd_next; } } mutex_exit(&klpd_mutex); return (rv == 0 ? 0 : -1); }
/* * Register the klpd. * If the pid_t passed in is positive, update the registration for * the specific process; that is only possible if the process already * has a registration on it. This change of registration will affect * all processes which share common ancestry. * * MY_PID (pid 0) can be used to create or change the context for * the current process, typically done after fork(). * * A negative value can be used to register a klpd globally. * * The per-credential klpd needs to be cleaned up when entering * a zone or unsetting the flag. */ int klpd_reg(int did, idtype_t type, id_t id, priv_set_t *psetbuf) { cred_t *cr = CRED(); door_handle_t dh; klpd_reg_t *kpd; priv_set_t pset; door_info_t di; credklpd_t *ckp = NULL; pid_t pid = -1; projid_t proj = -1; kproject_t *kpp = NULL; if (CR_FLAGS(cr) & PRIV_XPOLICY) return (set_errno(EINVAL)); if (copyin(psetbuf, &pset, sizeof (priv_set_t))) return (set_errno(EFAULT)); if (!priv_issubset(&pset, &CR_OEPRIV(cr))) return (set_errno(EPERM)); switch (type) { case P_PID: pid = (pid_t)id; if (pid == P_MYPID) pid = curproc->p_pid; if (pid == curproc->p_pid) ckp = crklpd_alloc(); break; case P_PROJID: proj = (projid_t)id; kpp = project_hold_by_id(proj, crgetzone(cr), PROJECT_HOLD_FIND); if (kpp == NULL) return (set_errno(ESRCH)); break; default: return (set_errno(ENOTSUP)); } /* * Verify the door passed in; it must be a door and we won't * allow processes to be called on their own behalf. */ dh = door_ki_lookup(did); if (dh == NULL || door_ki_info(dh, &di) != 0) { if (ckp != NULL) crklpd_rele(ckp); if (kpp != NULL) project_rele(kpp); return (set_errno(EBADF)); } if (type == P_PID && pid == di.di_target) { if (ckp != NULL) crklpd_rele(ckp); ASSERT(kpp == NULL); return (set_errno(EINVAL)); } kpd = kmem_zalloc(sizeof (*kpd), KM_SLEEP); crhold(kpd->klpd_cred = cr); kpd->klpd_door = dh; kpd->klpd_door_pid = di.di_target; kpd->klpd_ref = 1; kpd->klpd_pset = pset; if (kpp != NULL) { mutex_enter(&klpd_mutex); kpd = klpd_link(kpd, &kpp->kpj_klpd, B_TRUE); mutex_exit(&klpd_mutex); if (kpd != NULL) klpd_rele(kpd); project_rele(kpp); } else if ((int)pid < 0) { /* Global daemon */ mutex_enter(&klpd_mutex); (void) klpd_link(kpd, &klpd_list, B_FALSE); mutex_exit(&klpd_mutex); } else if (pid == curproc->p_pid) { proc_t *p = curproc; cred_t *newcr = cralloc(); /* No need to lock, sole reference to ckp */ kpd = klpd_link(kpd, &ckp->crkl_reg, B_TRUE); if (kpd != NULL) klpd_rele(kpd); mutex_enter(&p->p_crlock); cr = p->p_cred; crdup_to(cr, newcr); crsetcrklpd(newcr, ckp); p->p_cred = newcr; /* Already held for p_cred */ crhold(newcr); /* Hold once for the current thread */ mutex_exit(&p->p_crlock); crfree(cr); /* One for the p_cred */ crset(p, newcr); } else { proc_t *p; cred_t *pcr; mutex_enter(&pidlock); p = prfind(pid); if (p == NULL || !prochasprocperm(p, curproc, CRED())) { mutex_exit(&pidlock); klpd_rele(kpd); return (set_errno(p == NULL ? ESRCH : EPERM)); } mutex_enter(&p->p_crlock); crhold(pcr = p->p_cred); mutex_exit(&pidlock); mutex_exit(&p->p_crlock); /* * We're going to update the credential's ckp in place; * this requires that it exists. */ ckp = crgetcrklpd(pcr); if (ckp == NULL) { crfree(pcr); klpd_rele(kpd); return (set_errno(EINVAL)); } crklpd_setreg(ckp, kpd); crfree(pcr); } return (0); }
int setgid(gid_t gid) { proc_t *p; int error; int do_nocd = 0; cred_t *cr, *newcr; ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if (!VALID_GID(gid, zone)) return (set_errno(EINVAL)); if (gid > MAXUID) { if (ksid_lookupbygid(zone, gid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. We cannot hold the mutex across the * secpolicy functions. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); retry: cr = p->p_cred; crhold(cr); mutex_exit(&p->p_crlock); if ((gid == cr->cr_rgid || gid == cr->cr_sgid) && secpolicy_allow_setid(cr, -1, B_TRUE) != 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; error = 0; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_gid = gid; crsetsid(newcr, ksp, KSID_GROUP); mutex_exit(&p->p_crlock); } else if ((error = secpolicy_allow_setid(cr, -1, B_FALSE)) == 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; /* * A privileged process that makes itself look like a * set-gid process must be marked to produce no core dump. */ if (cr->cr_gid != gid || cr->cr_rgid != gid || cr->cr_sgid != gid) do_nocd = 1; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_gid = gid; newcr->cr_rgid = gid; newcr->cr_sgid = gid; crsetsid(newcr, ksp, KSID_GROUP); mutex_exit(&p->p_crlock); } else { crfree(newcr); crfree(cr); if (ksp != NULL) ksid_rele(ksp); } if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } return (set_errno(error)); }
/* * The typical call consists of: * - priv_set_t * - some integer data (type, value) * for now, it's just one bit. */ static klpd_head_t * klpd_marshall(klpd_reg_t *p, const priv_set_t *rq, va_list ap) { char *tmp; uint_t type; vnode_t *vp; size_t len = sizeof (priv_set_t) + sizeof (klpd_head_t); size_t plen, clen; int proto; klpd_arg_t *kap = NULL; klpd_head_t *khp; type = va_arg(ap, uint_t); switch (type) { case KLPDARG_NOMORE: khp = kmem_zalloc(len, KM_SLEEP); khp->klh_argoff = 0; break; case KLPDARG_VNODE: len += offsetof(klpd_arg_t, kla_str); vp = va_arg(ap, vnode_t *); if (vp == NULL) return (NULL); tmp = va_arg(ap, char *); if (tmp != NULL && *tmp != '\0') clen = strlen(tmp) + 1; else clen = 0; len += ROUNDUP(MAXPATHLEN, sizeof (uint_t)); khp = kmem_zalloc(len, KM_SLEEP); khp->klh_argoff = sizeof (klpd_head_t) + sizeof (priv_set_t); kap = KLH_ARG(khp); if (vnodetopath(crgetzone(p->klpd_cred)->zone_rootvp, vp, kap->kla_str, MAXPATHLEN, p->klpd_cred) != 0) { kmem_free(khp, len); return (NULL); } if (clen != 0) { plen = strlen(kap->kla_str); if (plen + clen + 1 >= MAXPATHLEN) { kmem_free(khp, len); return (NULL); } /* Don't make root into a double "/" */ if (plen <= 2) plen = 0; kap->kla_str[plen] = '/'; bcopy(tmp, &kap->kla_str[plen + 1], clen); } break; case KLPDARG_PORT: proto = va_arg(ap, int); switch (proto) { case IPPROTO_TCP: type = KLPDARG_TCPPORT; break; case IPPROTO_UDP: type = KLPDARG_UDPPORT; break; case IPPROTO_SCTP: type = KLPDARG_SCTPPORT; break; case PROTO_SDP: type = KLPDARG_SDPPORT; break; } /* FALLTHROUGH */ case KLPDARG_INT: case KLPDARG_TCPPORT: case KLPDARG_UDPPORT: case KLPDARG_SCTPPORT: case KLPDARG_SDPPORT: len += sizeof (*kap); khp = kmem_zalloc(len, KM_SLEEP); khp->klh_argoff = sizeof (klpd_head_t) + sizeof (priv_set_t); kap = KLH_ARG(khp); kap->kla_int = va_arg(ap, int); break; default: return (NULL); } khp->klh_vers = KLPDCALL_VERS; khp->klh_len = len; khp->klh_privoff = sizeof (*khp); *KLH_PRIVSET(khp) = *rq; if (kap != NULL) { kap->kla_type = type; kap->kla_dlen = len - khp->klh_argoff; } return (khp); }
/* * Buy-back from SunOS 4.x * * Like setgid() and setegid() combined -except- that non-root users * can change cr_rgid to cr_gid, and the semantics of cr_sgid are * subtly different. */ int setregid(gid_t rgid, gid_t egid) { proc_t *p; int error = EPERM; int do_nocd = 0; cred_t *cr, *newcr; ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if ((rgid != -1 && !VALID_GID(rgid, zone)) || (egid != -1 && !VALID_GID(egid, zone))) return (set_errno(EINVAL)); if (egid != -1 && egid > MAXUID) { if (ksid_lookupbygid(zone, egid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); cr = p->p_cred; if ((rgid == -1 || rgid == cr->cr_rgid || rgid == cr->cr_gid || rgid == cr->cr_sgid) && (egid == -1 || egid == cr->cr_rgid || egid == cr->cr_gid || egid == cr->cr_sgid) || (error = secpolicy_allow_setid(cr, -1, B_FALSE)) == 0) { crhold(cr); crcopy_to(cr, newcr); p->p_cred = newcr; if (egid != -1) { newcr->cr_gid = egid; crsetsid(newcr, ksp, KSID_GROUP); } if (rgid != -1) newcr->cr_rgid = rgid; /* * "If the real gid is being changed, or the effective gid is * being changed to a value not equal to the real gid, the * saved gid is set to the new effective gid." */ if (rgid != -1 || (egid != -1 && newcr->cr_gid != newcr->cr_rgid)) newcr->cr_sgid = newcr->cr_gid; /* * A privileged process that makes itself look like a * set-gid process must be marked to produce no core dump. */ if ((cr->cr_gid != newcr->cr_gid || cr->cr_rgid != newcr->cr_rgid || cr->cr_sgid != newcr->cr_sgid) && error == 0) do_nocd = 1; error = 0; crfree(cr); } mutex_exit(&p->p_crlock); if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } crfree(newcr); if (ksp != NULL) ksid_rele(ksp); return (set_errno(error)); }
/* * Buy-back from SunOS 4.x * * Like setuid() and seteuid() combined -except- that non-root users * can change cr_ruid to cr_uid, and the semantics of cr_suid are * subtly different. */ int setreuid(uid_t ruid, uid_t euid) { proc_t *p; int error = 0; int do_nocd = 0; int uidchge = 0; uid_t oldruid = ruid; cred_t *cr, *newcr; zoneid_t zoneid = getzoneid(); ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if ((ruid != -1 && !VALID_UID(ruid, zone)) || (euid != -1 && !VALID_UID(euid, zone))) return (set_errno(EINVAL)); if (euid != -1 && euid > MAXUID) { if (ksid_lookupbyuid(zone, euid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. */ newcr = cralloc_ksid(); p = ttoproc(curthread); retry: mutex_enter(&p->p_crlock); retry_locked: crhold(cr = p->p_cred); mutex_exit(&p->p_crlock); if (ruid != -1 && ruid != cr->cr_ruid && ruid != cr->cr_uid && secpolicy_allow_setid(cr, ruid, B_FALSE) != 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry_locked; error = EPERM; } else if (euid != -1 && euid != cr->cr_ruid && euid != cr->cr_uid && euid != cr->cr_suid && secpolicy_allow_setid(cr, euid, B_FALSE)) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry_locked; error = EPERM; } else { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry_locked; if (!uidchge && ruid != -1 && cr->cr_ruid != ruid) { /* * The ruid of the process is going to change. In order * to avoid a race condition involving the * process-count associated with the newly given ruid, * we increment the count before assigning the * credential to the process. * To do that, we'll have to take pidlock, so we first * release p_crlock. */ mutex_exit(&p->p_crlock); uidchge = 1; mutex_enter(&pidlock); upcount_inc(ruid, zoneid); mutex_exit(&pidlock); /* * As we released p_crlock we can't rely on the cr * we read. So retry the whole thing. */ goto retry; } crhold(cr); crcopy_to(cr, newcr); p->p_cred = newcr; if (euid != -1) { newcr->cr_uid = euid; crsetsid(newcr, ksp, KSID_USER); } if (ruid != -1) { oldruid = newcr->cr_ruid; newcr->cr_ruid = ruid; ASSERT(ruid != oldruid ? uidchge : 1); } /* * "If the real uid is being changed, or the effective uid is * being changed to a value not equal to the real uid, the * saved uid is set to the new effective uid." */ if (ruid != -1 || (euid != -1 && newcr->cr_uid != newcr->cr_ruid)) newcr->cr_suid = newcr->cr_uid; /* * A process that gives up its privilege * must be marked to produce no core dump. */ if ((cr->cr_uid != newcr->cr_uid || cr->cr_ruid != newcr->cr_ruid || cr->cr_suid != newcr->cr_suid)) do_nocd = 1; priv_reset_PA(newcr, ruid != -1 && euid != -1 && ruid == euid); crfree(cr); } mutex_exit(&p->p_crlock); /* * We decrement the number of processes associated with the oldruid * to match the increment above, even if the ruid of the process * did not change or an error occurred (oldruid == uid). */ if (uidchge) { ASSERT(oldruid != -1 && ruid != -1); mutex_enter(&pidlock); upcount_dec(oldruid, zoneid); mutex_exit(&pidlock); } if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } crfree(newcr); if (ksp != NULL) ksid_rele(ksp); return (set_errno(error)); }
/* * Returns 0 for success, errno value otherwise. * * If the "bind_to_req_port_only" parameter is set and the requested port * number is available, then set allocated_port to it. If not available, * return an error. * * If the "bind_to_req_port_only" parameter is not set and the requested port * number is available, then set allocated_port to it. If not available, * find the first anonymous port we can and set allocated_port to that. If no * anonymous ports are available, return an error. * * In either case, when succeeding, update the sctp_t to record the port number * and insert it in the bind hash table. */ int sctp_bindi(sctp_t *sctp, in_port_t port, boolean_t bind_to_req_port_only, int user_specified, in_port_t *allocated_port) { /* number of times we have run around the loop */ int count = 0; /* maximum number of times to run around the loop */ int loopmax; zoneid_t zoneid = sctp->sctp_zoneid; zone_t *zone = crgetzone(sctp->sctp_credp); /* * Lookup for free addresses is done in a loop and "loopmax" * influences how long we spin in the loop */ if (bind_to_req_port_only) { /* * If the requested port is busy, don't bother to look * for a new one. Setting loop maximum count to 1 has * that effect. */ loopmax = 1; } else { /* * If the requested port is busy, look for a free one * in the anonymous port range. * Set loopmax appropriately so that one does not look * forever in the case all of the anonymous ports are in use. */ loopmax = (sctp_largest_anon_port - sctp_smallest_anon_port + 1); } do { uint16_t lport; sctp_tf_t *tbf; sctp_t *lsctp; int addrcmp; lport = htons(port); /* * Ensure that the sctp_t is not currently in the bind hash. * Hold the lock on the hash bucket to ensure that * the duplicate check plus the insertion is an atomic * operation. * * This function does an inline lookup on the bind hash list * Make sure that we access only members of sctp_t * and that we don't look at sctp_sctp, since we are not * doing a SCTPB_REFHOLD. For more details please see the notes * in sctp_compress() */ sctp_bind_hash_remove(sctp); tbf = &sctp_bind_fanout[SCTP_BIND_HASH(port)]; mutex_enter(&tbf->tf_lock); for (lsctp = tbf->tf_sctp; lsctp != NULL; lsctp = lsctp->sctp_bind_hash) { if (lport != lsctp->sctp_lport || lsctp->sctp_state < SCTPS_BOUND) continue; /* * On a labeled system, we must treat bindings to ports * on shared IP addresses by sockets with MAC exemption * privilege as being in all zones, as there's * otherwise no way to identify the right receiver. */ if (lsctp->sctp_zoneid != zoneid && !lsctp->sctp_mac_exempt && !sctp->sctp_mac_exempt) continue; addrcmp = sctp_compare_saddrs(sctp, lsctp); if (addrcmp != SCTP_ADDR_DISJOINT) { if (!sctp->sctp_reuseaddr) { /* in use */ break; } else if (lsctp->sctp_state == SCTPS_BOUND || lsctp->sctp_state == SCTPS_LISTEN) { /* * socket option SO_REUSEADDR is set * on the binding sctp_t. * * We have found a match of IP source * address and source port, which is * refused regardless of the * SO_REUSEADDR setting, so we break. */ break; } } } if (lsctp != NULL) { /* The port number is busy */ mutex_exit(&tbf->tf_lock); } else { conn_t *connp = sctp->sctp_connp; if (is_system_labeled()) { mlp_type_t addrtype, mlptype; /* * On a labeled system we must check the type * of the binding requested by the user (either * MLP or SLP on shared and private addresses), * and that the user's requested binding * is permitted. */ addrtype = tsol_mlp_addr_type(zone->zone_id, sctp->sctp_ipversion, sctp->sctp_ipversion == IPV4_VERSION ? (void *)&sctp->sctp_ipha->ipha_src : (void *)&sctp->sctp_ip6h->ip6_src); /* * tsol_mlp_addr_type returns the possibilities * for the selected address. Since all local * addresses are either private or shared, the * return value mlptSingle means "local address * not valid (interface not present)." */ if (addrtype == mlptSingle) { mutex_exit(&tbf->tf_lock); return (EADDRNOTAVAIL); } mlptype = tsol_mlp_port_type(zone, IPPROTO_SCTP, port, addrtype); if (mlptype != mlptSingle) { if (secpolicy_net_bindmlp(connp-> conn_cred) != 0) { mutex_exit(&tbf->tf_lock); return (EACCES); } /* * If we're binding a shared MLP, then * make sure that this zone is the one * that owns that MLP. Shared MLPs can * be owned by at most one zone. */ if (mlptype == mlptShared && addrtype == mlptShared && connp->conn_zoneid != tsol_mlp_findzone(IPPROTO_SCTP, lport)) { mutex_exit(&tbf->tf_lock); return (EACCES); } connp->conn_mlp_type = mlptype; } } /* * This port is ours. Insert in fanout and mark as * bound to prevent others from getting the port * number. */ sctp->sctp_state = SCTPS_BOUND; sctp->sctp_lport = lport; sctp->sctp_sctph->sh_sport = lport; ASSERT(&sctp_bind_fanout[SCTP_BIND_HASH(port)] == tbf); sctp_bind_hash_insert(tbf, sctp, 1); mutex_exit(&tbf->tf_lock); /* * We don't want sctp_next_port_to_try to "inherit" * a port number supplied by the user in a bind. * * This is the only place where sctp_next_port_to_try * is updated. After the update, it may or may not * be in the valid range. */ if (user_specified == 0) sctp_next_port_to_try = port + 1; *allocated_port = port; return (0); } if ((count == 0) && (user_specified)) { /* * We may have to return an anonymous port. So * get one to start with. */ port = sctp_update_next_port(sctp_next_port_to_try, zone); user_specified = 0; } else { port = sctp_update_next_port(port + 1, zone); } if (port == 0) break; /* * Don't let this loop run forever in the case where * all of the anonymous ports are in use. */ } while (++count < loopmax); return (bind_to_req_port_only ? EADDRINUSE : EADDRNOTAVAIL); }
int setuid(uid_t uid) { proc_t *p; int error; int do_nocd = 0; int uidchge = 0; cred_t *cr, *newcr; uid_t oldruid = uid; zoneid_t zoneid = getzoneid(); ksid_t ksid, *ksp; zone_t *zone = crgetzone(CRED()); if (!VALID_UID(uid, zone)) return (set_errno(EINVAL)); if (uid > MAXUID) { if (ksid_lookupbyuid(zone, uid, &ksid) != 0) return (set_errno(EINVAL)); ksp = &ksid; } else { ksp = NULL; } /* * Need to pre-allocate the new cred structure before grabbing * the p_crlock mutex. We can't hold on to the p_crlock for most * if this though, now that we allow kernel upcalls from the * policy routines. */ newcr = cralloc_ksid(); p = ttoproc(curthread); retry: mutex_enter(&p->p_crlock); retry_locked: cr = p->p_cred; crhold(cr); mutex_exit(&p->p_crlock); if ((uid == cr->cr_ruid || uid == cr->cr_suid) && secpolicy_allow_setid(cr, uid, B_TRUE) != 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry_locked; error = 0; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_uid = uid; crsetsid(newcr, ksp, KSID_USER); mutex_exit(&p->p_crlock); } else if ((error = secpolicy_allow_setid(cr, uid, B_FALSE)) == 0) { mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry_locked; if (!uidchge && uid != cr->cr_ruid) { /* * The ruid of the process is going to change. In order * to avoid a race condition involving the * process-count associated with the newly given ruid, * we increment the count before assigning the * credential to the process. * To do that, we'll have to take pidlock, so we first * release p_crlock. */ mutex_exit(&p->p_crlock); uidchge = 1; mutex_enter(&pidlock); upcount_inc(uid, zoneid); mutex_exit(&pidlock); /* * As we released p_crlock we can't rely on the cr * we read. So retry the whole thing. */ goto retry; } /* * A privileged process that gives up its privilege * must be marked to produce no core dump. */ if (cr->cr_uid != uid || cr->cr_ruid != uid || cr->cr_suid != uid) do_nocd = 1; oldruid = cr->cr_ruid; crcopy_to(cr, newcr); p->p_cred = newcr; newcr->cr_ruid = uid; newcr->cr_suid = uid; newcr->cr_uid = uid; crsetsid(newcr, ksp, KSID_USER); priv_reset_PA(newcr, B_TRUE); ASSERT(uid != oldruid ? uidchge : 1); mutex_exit(&p->p_crlock); } else { crfree(newcr); crfree(cr); if (ksp != NULL) ksid_rele(ksp); } /* * We decrement the number of processes associated with the oldruid * to match the increment above, even if the ruid of the process * did not change or an error occurred (oldruid == uid). */ if (uidchge) { mutex_enter(&pidlock); upcount_dec(oldruid, zoneid); mutex_exit(&pidlock); } if (error == 0) { if (do_nocd) { mutex_enter(&p->p_lock); p->p_flag |= SNOCD; mutex_exit(&p->p_lock); } crset(p, newcr); /* broadcast to process threads */ return (0); } return (set_errno(error)); }
int setgroups(int gidsetsize, gid_t *gidset) { proc_t *p; cred_t *cr, *newcr; int i; int n = gidsetsize; int error; int scnt = 0; ksidlist_t *ksl = NULL; zone_t *zone; struct credgrp *grps = NULL; /* Perform the cheapest tests before grabbing p_crlock */ if (n > ngroups_max || n < 0) return (set_errno(EINVAL)); zone = crgetzone(CRED()); if (n != 0) { const gid_t *groups; grps = crgrpcopyin(n, gidset); if (grps == NULL) return (set_errno(EFAULT)); groups = crgetggroups(grps); for (i = 0; i < n; i++) { if (!VALID_GID(groups[i], zone)) { crgrprele(grps); return (set_errno(EINVAL)); } if (groups[i] > MAXUID) scnt++; } if (scnt > 0) { ksl = kcrsid_gidstosids(zone, n, (gid_t *)groups); if (ksl == NULL) { crgrprele(grps); return (set_errno(EINVAL)); } } } /* * Need to pre-allocate the new cred structure before acquiring * the p_crlock mutex. */ newcr = cralloc_ksid(); p = ttoproc(curthread); mutex_enter(&p->p_crlock); retry: cr = p->p_cred; crhold(cr); mutex_exit(&p->p_crlock); if ((error = secpolicy_allow_setid(cr, -1, B_FALSE)) != 0) { if (grps != NULL) crgrprele(grps); if (ksl != NULL) ksidlist_rele(ksl); crfree(newcr); crfree(cr); return (set_errno(error)); } mutex_enter(&p->p_crlock); crfree(cr); if (cr != p->p_cred) goto retry; crdup_to(cr, newcr); crsetsidlist(newcr, ksl); crsetcredgrp(newcr, grps); p->p_cred = newcr; crhold(newcr); /* hold for the current thread */ crfree(cr); /* free the old one */ mutex_exit(&p->p_crlock); /* * Broadcast new cred to process threads (including the current one). */ crset(p, newcr); return (0); }