int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl) { int err = 0; dmu_tx_t *tx; nvlist_t *nvarg; if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa)) return (SET_ERROR(EINVAL)); tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); err = dmu_tx_assign(tx, TXG_WAIT); if (err) { dmu_tx_abort(tx); return (err); } VERIFY0(nvlist_dup(nvl, &nvarg, KM_SLEEP)); if (spa_history_zone() != NULL) { fnvlist_add_string(nvarg, ZPOOL_HIST_ZONE, spa_history_zone()); } fnvlist_add_uint64(nvarg, ZPOOL_HIST_WHO, crgetruid(CRED())); /* Kick this off asynchronously; errors are ignored. */ dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync, nvarg, 0, ZFS_SPACE_CHECK_NONE, tx); dmu_tx_commit(tx); /* spa_history_log_sync will free nvl */ return (err); }
static int splat_cred_test1(struct file *file, void *arg) { char str[GROUP_STR_SIZE]; uid_t uid, ruid, suid; gid_t gid, rgid, sgid, *groups; int ngroups, i, count = 0; uid = crgetuid(CRED()); ruid = crgetruid(CRED()); suid = crgetsuid(CRED()); gid = crgetgid(CRED()); rgid = crgetrgid(CRED()); sgid = crgetsgid(CRED()); crhold(CRED()); ngroups = crgetngroups(CRED()); groups = crgetgroups(CRED()); memset(str, 0, GROUP_STR_SIZE); for (i = 0; i < ngroups; i++) { count += sprintf(str + count, "%d ", groups[i]); if (count > (GROUP_STR_SIZE - GROUP_STR_REDZONE)) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed too many group entries for temp " "buffer: %d, %s\n", ngroups, str); return -ENOSPC; } } crfree(CRED()); splat_vprint(file, SPLAT_CRED_TEST1_NAME, "uid: %d ruid: %d suid: %d " "gid: %d rgid: %d sgid: %d\n", uid, ruid, suid, gid, rgid, sgid); splat_vprint(file, SPLAT_CRED_TEST1_NAME, "ngroups: %d groups: %s\n", ngroups, str); if (uid || ruid || suid || gid || rgid || sgid) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed expected all uids+gids to be %d\n", 0); return -EIDRM; } if (ngroups > NGROUPS_MAX) { splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Failed ngroups must not exceed NGROUPS_MAX: " "%d > %d\n", ngroups, NGROUPS_MAX); return -EIDRM; } splat_vprint(file, SPLAT_CRED_TEST1_NAME, "Success sane CRED(): %d\n", 0); return 0; } /* splat_cred_test1() */
/* * * Cachefs used to know too much about how creds looked; since it's * committed to persistent storage, we can't change the layout so * it now has a "dl_cred_t" which (unsurprisingly) looks exactly like * an old credential. * * The dst argument needs to point to: * struct dl_cred_t; * <buffer space> buffer for groups * * The source is a proper kernel cred_t. * */ static size_t copy_cred(cred_t *src, dl_cred_t *dst) { int n; const gid_t *sgrp = crgetgroups(src); n = MIN(NGROUPS_MAX_DEFAULT, crgetngroups(src)); /* copy the fixed fields */ dst->cr_uid = crgetuid(src); dst->cr_ruid = crgetruid(src); dst->cr_suid = crgetsuid(src); dst->cr_gid = crgetgid(src); dst->cr_rgid = crgetrgid(src); dst->cr_sgid = crgetsgid(src); dst->cr_groups[0] = sgrp[0]; dst->cr_ngroups = n; bcopy(sgrp, (void *)(dst + 1), (n - 1) * sizeof (gid_t)); return (sizeof (dl_cred_t) + (n - 1) * sizeof (gid_t)); }
/* * Remove a temporary symlink entry from /afs. */ int afs_DynrootVOPRemove(struct vcache *avc, afs_ucred_t *acred, char *aname) { struct afs_dynSymlink **tpps; struct afs_dynSymlink *tps; int found = 0; #if defined(AFS_SUN510_ENV) if (crgetruid(acred)) #else if (afs_cr_uid(acred)) #endif return EPERM; ObtainWriteLock(&afs_dynSymlinkLock, 97); tpps = &afs_dynSymlinkBase; while (*tpps) { tps = *tpps; if (afs_strcasecmp(aname, tps->name) == 0) { afs_osi_Free(tps->name, strlen(tps->name) + 1); afs_osi_Free(tps->target, strlen(tps->target) + 1); *tpps = tps->next; afs_osi_Free(tps, sizeof(*tps)); afs_dynSymlinkIndex++; found = 1; break; } tpps = &(tps->next); } ReleaseWriteLock(&afs_dynSymlinkLock); if (found) { afs_DynrootInvalidate(); return 0; } if (afs_CellOrAliasExists(aname)) return EROFS; else return ENOENT; }
/* * Q_GETQUOTA - return current values in a dqblk structure. */ static int getquota(uid_t uid, struct ufsvfs *ufsvfsp, caddr_t addr, cred_t *cr) { struct dquot *dqp; struct dquot *xdqp; struct dqblk dqb; int error = 0; if (uid != crgetruid(cr) && secpolicy_fs_quota(cr, ufsvfsp->vfs_vfs) != 0) return (EPERM); rw_enter(&ufsvfsp->vfs_dqrwlock, RW_READER); if ((ufsvfsp->vfs_qflags & MQ_ENABLED) == 0) { rw_exit(&ufsvfsp->vfs_dqrwlock); return (ESRCH); } error = getdiskquota(uid, ufsvfsp, 0, &xdqp); if (error) { rw_exit(&ufsvfsp->vfs_dqrwlock); return (error); } dqp = xdqp; mutex_enter(&dqp->dq_lock); if (dqp->dq_fhardlimit == 0 && dqp->dq_fsoftlimit == 0 && dqp->dq_bhardlimit == 0 && dqp->dq_bsoftlimit == 0) { error = ESRCH; } else { bcopy(&dqp->dq_dqb, &dqb, sizeof (struct dqblk)); } dqput(dqp); mutex_exit(&dqp->dq_lock); rw_exit(&ufsvfsp->vfs_dqrwlock); if (error == 0 && copyout(&dqb, addr, sizeof (struct dqblk)) != 0) error = EFAULT; return (error); }
/* * Allocate a new lxproc node * * This also allocates the vnode associated with it */ lxpr_node_t * lxpr_getnode(vnode_t *dp, lxpr_nodetype_t type, proc_t *p, int fd) { lxpr_node_t *lxpnp; vnode_t *vp; user_t *up; timestruc_t now; /* * Allocate a new node. It is deallocated in vop_innactive */ lxpnp = kmem_cache_alloc(lxpr_node_cache, KM_SLEEP); /* * Set defaults (may be overridden below) */ gethrestime(&now); lxpnp->lxpr_type = type; lxpnp->lxpr_realvp = NULL; lxpnp->lxpr_parent = dp; VN_HOLD(dp); if (p != NULL) { lxpnp->lxpr_pid = ((p->p_pid == curproc->p_zone->zone_proc_initpid) ? 1 : p->p_pid); lxpnp->lxpr_time = PTOU(p)->u_start; lxpnp->lxpr_uid = crgetruid(p->p_cred); lxpnp->lxpr_gid = crgetrgid(p->p_cred); lxpnp->lxpr_ino = lxpr_inode(type, p->p_pid, fd); } else { /* Pretend files without a proc belong to sched */ lxpnp->lxpr_pid = 0; lxpnp->lxpr_time = now; lxpnp->lxpr_uid = lxpnp->lxpr_gid = 0; lxpnp->lxpr_ino = lxpr_inode(type, 0, 0); } /* initialize the vnode data */ vp = lxpnp->lxpr_vnode; vn_reinit(vp); vp->v_flag = VNOCACHE|VNOMAP|VNOSWAP|VNOMOUNT; vp->v_vfsp = dp->v_vfsp; /* * Do node specific stuff */ switch (type) { case LXPR_PROCDIR: vp->v_flag |= VROOT; vp->v_type = VDIR; lxpnp->lxpr_mode = 0555; /* read-search by everyone */ break; case LXPR_PID_CURDIR: ASSERT(p != NULL); /* * Zombie check. p_stat is officially protected by pidlock, * but we can't grab pidlock here because we already hold * p_lock. Luckily if we look at the process exit code * we see that p_stat only transisions from SRUN to SZOMB * while p_lock is held. Aside from this, the only other * p_stat transition that we need to be aware about is * SIDL to SRUN, but that's not a problem since lxpr_lock() * ignores nodes in the SIDL state so we'll never get a node * that isn't already in the SRUN state. */ if (p->p_stat == SZOMB) { lxpnp->lxpr_realvp = NULL; } else { up = PTOU(p); lxpnp->lxpr_realvp = up->u_cdir; ASSERT(lxpnp->lxpr_realvp != NULL); VN_HOLD(lxpnp->lxpr_realvp); } vp->v_type = VLNK; lxpnp->lxpr_mode = 0777; /* anyone does anything ! */ break; case LXPR_PID_ROOTDIR: ASSERT(p != NULL); /* Zombie check. see locking comment above */ if (p->p_stat == SZOMB) { lxpnp->lxpr_realvp = NULL; } else { up = PTOU(p); lxpnp->lxpr_realvp = up->u_rdir != NULL ? up->u_rdir : rootdir; ASSERT(lxpnp->lxpr_realvp != NULL); VN_HOLD(lxpnp->lxpr_realvp); } vp->v_type = VLNK; lxpnp->lxpr_mode = 0777; /* anyone does anything ! */ break; case LXPR_PID_EXE: ASSERT(p != NULL); lxpnp->lxpr_realvp = p->p_exec; if (lxpnp->lxpr_realvp != NULL) { VN_HOLD(lxpnp->lxpr_realvp); } vp->v_type = VLNK; lxpnp->lxpr_mode = 0777; break; case LXPR_SELF: vp->v_type = VLNK; lxpnp->lxpr_mode = 0777; /* anyone does anything ! */ break; case LXPR_PID_FD_FD: ASSERT(p != NULL); /* lxpr_realvp is set after we return */ vp->v_type = VLNK; lxpnp->lxpr_mode = 0700; /* read-write-exe owner only */ break; case LXPR_PID_FDDIR: ASSERT(p != NULL); vp->v_type = VDIR; lxpnp->lxpr_mode = 0500; /* read-search by owner only */ break; case LXPR_PIDDIR: ASSERT(p != NULL); vp->v_type = VDIR; lxpnp->lxpr_mode = 0511; break; case LXPR_SYSDIR: case LXPR_SYS_FSDIR: case LXPR_SYS_FS_INOTIFYDIR: case LXPR_SYS_KERNELDIR: case LXPR_NETDIR: vp->v_type = VDIR; lxpnp->lxpr_mode = 0555; /* read-search by all */ break; case LXPR_PID_ENV: case LXPR_PID_MEM: ASSERT(p != NULL); /*FALLTHRU*/ case LXPR_KCORE: vp->v_type = VREG; lxpnp->lxpr_mode = 0400; /* read-only by owner only */ break; default: vp->v_type = VREG; lxpnp->lxpr_mode = 0444; /* read-only by all */ break; } return (lxpnp); }
/** * open() worker. */ static int VBoxDrvSolarisOpen(dev_t *pDev, int fFlag, int fType, cred_t *pCred) { const bool fUnrestricted = getminor(*pDev) == 0; PSUPDRVSESSION pSession; int rc; LogFlowFunc(("VBoxDrvSolarisOpen: pDev=%p:%#x\n", pDev, *pDev)); /* * Validate input */ if ( (getminor(*pDev) != 0 && getminor(*pDev) != 1) || fType != OTYP_CHR) return EINVAL; /* See mmopen for precedent. */ #ifndef USE_SESSION_HASH /* * Locate a new device open instance. * * For each open call we'll allocate an item in the soft state of the device. * The item index is stored in the dev_t. I hope this is ok... */ vbox_devstate_t *pState = NULL; unsigned iOpenInstance; for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pVBoxDrvSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, iOpenInstance); break; } } if (!pState) { LogRel(("VBoxDrvSolarisOpen: too many open instances.\n")); return ENXIO; } /* * Create a new session. */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); pState->pSession = pSession; *pDev = makedevice(getmajor(*pDev), iOpenInstance); LogFlow(("VBoxDrvSolarisOpen: Dev=%#x pSession=%p pid=%d r0proc=%p thread=%p\n", *pDev, pSession, RTProcSelf(), RTR0ProcHandleSelf(), RTThreadNativeSelf() )); return 0; } /* failed - clean up */ ddi_soft_state_free(g_pVBoxDrvSolarisState, iOpenInstance); #else /* * Create a new session. * Sessions in Solaris driver are mostly useless. It's however needed * in VBoxDrvSolarisIOCtlSlow() while calling supdrvIOCtl() */ rc = supdrvCreateSession(&g_DevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { unsigned iHash; pSession->Uid = crgetruid(pCred); pSession->Gid = crgetrgid(pCred); /* * Insert it into the hash table. */ # error "Only one entry per process!" iHash = SESSION_HASH(pSession->Process); RTSpinlockAcquire(g_Spinlock); pSession->pNextHash = g_apSessionHashTab[iHash]; g_apSessionHashTab[iHash] = pSession; RTSpinlockRelease(g_Spinlock); LogFlow(("VBoxDrvSolarisOpen success\n")); } int instance; for (instance = 0; instance < DEVICE_MAXINSTANCES; instance++) { vbox_devstate_t *pState = ddi_get_soft_state(g_pVBoxDrvSolarisState, instance); if (pState) break; } if (instance >= DEVICE_MAXINSTANCES) { LogRel(("VBoxDrvSolarisOpen: All instances exhausted\n")); return ENXIO; } *pDev = makedevice(getmajor(*pDev), instance); #endif return VBoxSupDrvErr2SolarisErr(rc); }
void exacct_calculate_proc_usage(proc_t *p, proc_usage_t *pu, ulong_t *mask, int flag, int wstat) { timestruc_t ts, ts_run; ASSERT(MUTEX_HELD(&p->p_lock)); /* * Convert CPU and execution times to sec/nsec format. */ if (BT_TEST(mask, AC_PROC_CPU)) { hrt2ts(mstate_aggr_state(p, LMS_USER), &ts); pu->pu_utimesec = (uint64_t)(ulong_t)ts.tv_sec; pu->pu_utimensec = (uint64_t)(ulong_t)ts.tv_nsec; hrt2ts(mstate_aggr_state(p, LMS_SYSTEM), &ts); pu->pu_stimesec = (uint64_t)(ulong_t)ts.tv_sec; pu->pu_stimensec = (uint64_t)(ulong_t)ts.tv_nsec; } if (BT_TEST(mask, AC_PROC_TIME)) { gethrestime(&ts); pu->pu_finishsec = (uint64_t)(ulong_t)ts.tv_sec; pu->pu_finishnsec = (uint64_t)(ulong_t)ts.tv_nsec; hrt2ts(gethrtime() - p->p_mstart, &ts_run); ts.tv_sec -= ts_run.tv_sec; ts.tv_nsec -= ts_run.tv_nsec; if (ts.tv_nsec < 0) { ts.tv_sec--; if ((ts.tv_nsec = ts.tv_nsec + NANOSEC) >= NANOSEC) { ts.tv_sec++; ts.tv_nsec -= NANOSEC; } } pu->pu_startsec = (uint64_t)(ulong_t)ts.tv_sec; pu->pu_startnsec = (uint64_t)(ulong_t)ts.tv_nsec; } pu->pu_pid = p->p_pidp->pid_id; pu->pu_acflag = p->p_user.u_acflag; pu->pu_projid = p->p_task->tk_proj->kpj_id; pu->pu_taskid = p->p_task->tk_tkid; pu->pu_major = getmajor(p->p_sessp->s_dev); pu->pu_minor = getminor(p->p_sessp->s_dev); pu->pu_ancpid = p->p_ancpid; pu->pu_wstat = wstat; /* * Compute average RSS in K. The denominator is the number of * samples: the number of clock ticks plus the initial value. */ pu->pu_mem_rss_avg = (PTOU(p)->u_mem / (p->p_stime + p->p_utime + 1)) * (PAGESIZE / 1024); pu->pu_mem_rss_max = PTOU(p)->u_mem_max * (PAGESIZE / 1024); mutex_enter(&p->p_crlock); pu->pu_ruid = crgetruid(p->p_cred); pu->pu_rgid = crgetrgid(p->p_cred); mutex_exit(&p->p_crlock); bcopy(p->p_user.u_comm, pu->pu_command, strlen(p->p_user.u_comm) + 1); bcopy(p->p_zone->zone_name, pu->pu_zonename, strlen(p->p_zone->zone_name) + 1); bcopy(p->p_zone->zone_nodename, pu->pu_nodename, strlen(p->p_zone->zone_nodename) + 1); /* * Calculate microstate accounting data for a process that is still * running. Presently, we explicitly collect all of the LWP usage into * the proc usage structure here. */ if (flag & EW_PARTIAL) exacct_calculate_proc_mstate(p, pu); if (flag & EW_FINAL) exacct_copy_proc_mstate(p, pu); }
/** * User context entry points * * @remarks fFlags are the flags passed to open() or to ldi_open_by_name. In * the latter case the FKLYR flag is added to indicate that the caller * is a kernel component rather than user land. */ static int vgdrvSolarisOpen(dev_t *pDev, int fFlags, int fType, cred_t *pCred) { int rc; PVBOXGUESTSESSION pSession = NULL; LogFlow(("vgdrvSolarisOpen:\n")); /* * Verify we are being opened as a character device. */ if (fType != OTYP_CHR) return EINVAL; vboxguest_state_t *pState = NULL; unsigned iOpenInstance; for (iOpenInstance = 0; iOpenInstance < 4096; iOpenInstance++) { if ( !ddi_get_soft_state(g_pvgdrvSolarisState, iOpenInstance) /* faster */ && ddi_soft_state_zalloc(g_pvgdrvSolarisState, iOpenInstance) == DDI_SUCCESS) { pState = ddi_get_soft_state(g_pvgdrvSolarisState, iOpenInstance); break; } } if (!pState) { Log(("vgdrvSolarisOpen: too many open instances.")); return ENXIO; } /* * Create a new session. * * Note! The devfs inode with the gid isn't readily available here, so we cannot easily * to the vbox group detection like on linux. Read config instead? */ if (!(fFlags & FKLYR)) { uint32_t fRequestor = VMMDEV_REQUESTOR_USERMODE | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN; if (crgetruid(pCred) == 0) fRequestor |= VMMDEV_REQUESTOR_USR_ROOT; else fRequestor |= VMMDEV_REQUESTOR_USR_USER; if (secpolicy_coreadm(pCred) == 0) fRequestor |= VMMDEV_REQUESTOR_GRP_WHEEL; /** @todo is there any way of detecting that the process belongs to someone on the physical console? * secpolicy_console() [== PRIV_SYS_DEVICES] doesn't look quite right, or does it? */ fRequestor |= VMMDEV_REQUESTOR_CON_DONT_KNOW; fRequestor |= VMMDEV_REQUESTOR_NO_USER_DEVICE; /** @todo implement vboxuser device node. */ rc = VGDrvCommonCreateUserSession(&g_DevExt, fRequestor, &pSession); } else rc = VGDrvCommonCreateKernelSession(&g_DevExt, &pSession); if (RT_SUCCESS(rc)) { if (!(fFlags & FKLYR)) pState->pvProcRef = proc_ref(); else pState->pvProcRef = NULL; pState->pSession = pSession; *pDev = makedevice(getmajor(*pDev), iOpenInstance); Log(("vgdrvSolarisOpen: pSession=%p pState=%p pid=%d\n", pSession, pState, (int)RTProcSelf())); return 0; } /* Failed, clean up. */ ddi_soft_state_free(g_pvgdrvSolarisState, iOpenInstance); LogRel((DEVICE_NAME "::Open: VGDrvCommonCreateUserSession failed. rc=%d\n", rc)); return EFAULT; }
/* * Remove zombie children from the process table. */ void freeproc(proc_t *p) { proc_t *q; task_t *tk; ASSERT(p->p_stat == SZOMB); ASSERT(p->p_tlist == NULL); ASSERT(MUTEX_HELD(&pidlock)); sigdelq(p, NULL, 0); if (p->p_killsqp) { siginfofree(p->p_killsqp); p->p_killsqp = NULL; } prfree(p); /* inform /proc */ /* * Don't free the init processes. * Other dying processes will access it. */ if (p == proc_init) return; /* * We wait until now to free the cred structure because a * zombie process's credentials may be examined by /proc. * No cred locking needed because there are no threads at this point. */ upcount_dec(crgetruid(p->p_cred), crgetzoneid(p->p_cred)); crfree(p->p_cred); if (p->p_corefile != NULL) { corectl_path_rele(p->p_corefile); p->p_corefile = NULL; } if (p->p_content != NULL) { corectl_content_rele(p->p_content); p->p_content = NULL; } if (p->p_nextofkin && !((p->p_nextofkin->p_flag & SNOWAIT) || (PTOU(p->p_nextofkin)->u_signal[SIGCLD - 1] == SIG_IGN))) { /* * This should still do the right thing since p_utime/stime * get set to the correct value on process exit, so it * should get properly updated */ p->p_nextofkin->p_cutime += p->p_utime; p->p_nextofkin->p_cstime += p->p_stime; p->p_nextofkin->p_cacct[LMS_USER] += p->p_acct[LMS_USER]; p->p_nextofkin->p_cacct[LMS_SYSTEM] += p->p_acct[LMS_SYSTEM]; p->p_nextofkin->p_cacct[LMS_TRAP] += p->p_acct[LMS_TRAP]; p->p_nextofkin->p_cacct[LMS_TFAULT] += p->p_acct[LMS_TFAULT]; p->p_nextofkin->p_cacct[LMS_DFAULT] += p->p_acct[LMS_DFAULT]; p->p_nextofkin->p_cacct[LMS_KFAULT] += p->p_acct[LMS_KFAULT]; p->p_nextofkin->p_cacct[LMS_USER_LOCK] += p->p_acct[LMS_USER_LOCK]; p->p_nextofkin->p_cacct[LMS_SLEEP] += p->p_acct[LMS_SLEEP]; p->p_nextofkin->p_cacct[LMS_WAIT_CPU] += p->p_acct[LMS_WAIT_CPU]; p->p_nextofkin->p_cacct[LMS_STOPPED] += p->p_acct[LMS_STOPPED]; p->p_nextofkin->p_cru.minflt += p->p_ru.minflt; p->p_nextofkin->p_cru.majflt += p->p_ru.majflt; p->p_nextofkin->p_cru.nswap += p->p_ru.nswap; p->p_nextofkin->p_cru.inblock += p->p_ru.inblock; p->p_nextofkin->p_cru.oublock += p->p_ru.oublock; p->p_nextofkin->p_cru.msgsnd += p->p_ru.msgsnd; p->p_nextofkin->p_cru.msgrcv += p->p_ru.msgrcv; p->p_nextofkin->p_cru.nsignals += p->p_ru.nsignals; p->p_nextofkin->p_cru.nvcsw += p->p_ru.nvcsw; p->p_nextofkin->p_cru.nivcsw += p->p_ru.nivcsw; p->p_nextofkin->p_cru.sysc += p->p_ru.sysc; p->p_nextofkin->p_cru.ioch += p->p_ru.ioch; } q = p->p_nextofkin; if (q && q->p_orphan == p) q->p_orphan = p->p_nextorph; else if (q) { for (q = q->p_orphan; q; q = q->p_nextorph) if (q->p_nextorph == p) break; ASSERT(q && q->p_nextorph == p); q->p_nextorph = p->p_nextorph; } /* * The process table slot is being freed, so it is now safe to give up * task and project membership. */ mutex_enter(&p->p_lock); tk = p->p_task; task_detach(p); mutex_exit(&p->p_lock); proc_detach(p); pid_exit(p, tk); /* frees pid and proc structure */ task_rele(tk); }
/*ARGSUSED*/ int quotactl(struct vnode *vp, intptr_t arg, int flag, struct cred *cr) { struct quotctl quot; struct ufsvfs *ufsvfsp; int error = 0; if ((flag & DATAMODEL_MASK) == DATAMODEL_NATIVE) { if (copyin((caddr_t)arg, ", sizeof (struct quotctl))) return (EFAULT); } #ifdef _SYSCALL32_IMPL else { /* quotctl struct from ILP32 callers */ struct quotctl32 quot32; if (copyin((caddr_t)arg, "32, sizeof (struct quotctl32))) return (EFAULT); quot.op = quot32.op; quot.uid = quot32.uid; quot.addr = (caddr_t)(uintptr_t)quot32.addr; } #endif /* _SYSCALL32_IMPL */ if (quot.uid < 0) quot.uid = crgetruid(cr); if (quot.op == Q_SYNC && vp == NULL) { ufsvfsp = NULL; } else if (quot.op != Q_ALLSYNC) { ufsvfsp = (struct ufsvfs *)(vp->v_vfsp->vfs_data); } switch (quot.op) { case Q_QUOTAON: rw_enter(&dq_rwlock, RW_WRITER); if (quotas_initialized == 0) { qtinit2(); quotas_initialized = 1; } rw_exit(&dq_rwlock); error = opendq(ufsvfsp, vp, cr); break; case Q_QUOTAOFF: error = closedq(ufsvfsp, cr); if (!error) { invalidatedq(ufsvfsp); } break; case Q_SETQUOTA: case Q_SETQLIM: error = setquota(quot.op, (uid_t)quot.uid, ufsvfsp, quot.addr, cr); break; case Q_GETQUOTA: error = getquota((uid_t)quot.uid, ufsvfsp, (caddr_t)quot.addr, cr); break; case Q_SYNC: error = qsync(ufsvfsp); break; case Q_ALLSYNC: (void) qsync(NULL); break; default: error = EINVAL; break; } return (error); }
int signotify(int cmd, siginfo_t *siginfo, signotify_id_t *sn_id) { k_siginfo_t info; signotify_id_t id; proc_t *p; proc_t *cp = curproc; signotifyq_t *snqp; struct cred *cr; sigqueue_t *sqp; sigqhdr_t *sqh; u_longlong_t sid; model_t datamodel = get_udatamodel(); if (copyin(sn_id, &id, sizeof (signotify_id_t))) return (set_errno(EFAULT)); if (id.sn_index >= _SIGNOTIFY_MAX || id.sn_index < 0) return (set_errno(EINVAL)); switch (cmd) { case SN_PROC: /* get snid for the given user address of signotifyid_t */ sid = get_sigid(cp, (caddr_t)sn_id); if (id.sn_pid > 0) { mutex_enter(&pidlock); if ((p = prfind(id.sn_pid)) != NULL) { mutex_enter(&p->p_lock); if (p->p_signhdr != NULL) { snqp = SIGN_PTR(p, id.sn_index); if (snqp->sn_snid == sid) { mutex_exit(&p->p_lock); mutex_exit(&pidlock); return (set_errno(EBUSY)); } } mutex_exit(&p->p_lock); } mutex_exit(&pidlock); } if (copyin_siginfo(datamodel, siginfo, &info)) return (set_errno(EFAULT)); /* The si_code value must indicate the signal will be queued */ if (!sigwillqueue(info.si_signo, info.si_code)) return (set_errno(EINVAL)); if (cp->p_signhdr == NULL) { /* Allocate signotify pool first time */ sqh = sigqhdralloc(sizeof (signotifyq_t), _SIGNOTIFY_MAX); mutex_enter(&cp->p_lock); if (cp->p_signhdr == NULL) { /* hang the pool head on proc */ cp->p_signhdr = sqh; } else { /* another lwp allocated the pool, free ours */ sigqhdrfree(sqh); } } else { mutex_enter(&cp->p_lock); } sqp = sigqalloc(cp->p_signhdr); if (sqp == NULL) { mutex_exit(&cp->p_lock); return (set_errno(EAGAIN)); } cr = CRED(); sqp->sq_info = info; sqp->sq_info.si_pid = cp->p_pid; sqp->sq_info.si_ctid = PRCTID(cp); sqp->sq_info.si_zoneid = getzoneid(); sqp->sq_info.si_uid = crgetruid(cr); /* fill the signotifyq_t fields */ ((signotifyq_t *)sqp)->sn_snid = sid; mutex_exit(&cp->p_lock); /* complete the signotify_id_t fields */ id.sn_index = (signotifyq_t *)sqp - SIGN_PTR(cp, 0); id.sn_pid = cp->p_pid; break; case SN_CANCEL: case SN_SEND: sid = get_sigid(cp, (caddr_t)sn_id); mutex_enter(&pidlock); if ((id.sn_pid <= 0) || ((p = prfind(id.sn_pid)) == NULL)) { mutex_exit(&pidlock); return (set_errno(EINVAL)); } mutex_enter(&p->p_lock); mutex_exit(&pidlock); if (p->p_signhdr == NULL) { mutex_exit(&p->p_lock); return (set_errno(EINVAL)); } snqp = SIGN_PTR(p, id.sn_index); if (snqp->sn_snid == 0) { mutex_exit(&p->p_lock); return (set_errno(EINVAL)); } if (snqp->sn_snid != sid) { mutex_exit(&p->p_lock); return (set_errno(EINVAL)); } snqp->sn_snid = 0; /* cmd == SN_CANCEL or signo == 0 (SIGEV_NONE) */ if (((sigqueue_t *)snqp)->sq_info.si_signo <= 0) cmd = SN_CANCEL; sigqsend(cmd, p, 0, (sigqueue_t *)snqp); mutex_exit(&p->p_lock); id.sn_pid = 0; id.sn_index = 0; break; default : return (set_errno(EINVAL)); } if (copyout(&id, sn_id, sizeof (signotify_id_t))) return (set_errno(EFAULT)); return (0); }