Example #1
0
/*
 * Bind the lwp:id of process:pid to processor set: pset
 */
static int
pset_bind_lwp(psetid_t pset, id_t id, pid_t pid, psetid_t *opset)
{
	kthread_t	*tp;
	proc_t		*pp;
	psetid_t	oldpset;
	void		*projbuf, *zonebuf;
	int		error = 0;

	pool_lock();
	mutex_enter(&cpu_lock);
	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);

	mutex_enter(&pidlock);
	if ((pid == P_MYID && id == P_MYID) ||
	    (pid == curproc->p_pid && id == P_MYID)) {
		pp = curproc;
		tp = curthread;
		mutex_enter(&pp->p_lock);
	} else {
		if (pid == P_MYID) {
			pp = curproc;
		} else if ((pp = prfind(pid)) == NULL) {
			error = ESRCH;
			goto err;
		}
		if (pp != curproc && id == P_MYID) {
			error = EINVAL;
			goto err;
		}
		mutex_enter(&pp->p_lock);
		if ((tp = idtot(pp, id)) == NULL) {
			mutex_exit(&pp->p_lock);
			error = ESRCH;
			goto err;
		}
	}

	error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
	mutex_exit(&pp->p_lock);
err:
	mutex_exit(&pidlock);

	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
	mutex_exit(&cpu_lock);
	pool_unlock();
	if (opset != NULL) {
		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
			return (set_errno(EFAULT));
	}
	if (error != 0)
		return (set_errno(error));
	return (0);
}
Example #2
0
static int
pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset)
{
	kthread_t	*tp;
	proc_t		*pp;
	task_t		*tk;
	kproject_t	*kpj;
	contract_t	*ct;
	zone_t		*zptr;
	psetid_t	oldpset;
	int		error = 0;
	void		*projbuf, *zonebuf;

	pool_lock();
	if ((pset != PS_QUERY) && (pset != PS_SOFT) &&
	    (pset != PS_HARD) && (pset != PS_QUERY_TYPE)) {
		/*
		 * Check if the set actually exists before checking
		 * permissions.  This is the historical error
		 * precedence.  Note that if pset was PS_MYID, the
		 * cpupart_get_cpus call will change it to the
		 * processor set id of the caller (or PS_NONE if the
		 * caller is not bound to a processor set).
		 */
		if (pool_state == POOL_ENABLED) {
			pool_unlock();
			return (set_errno(ENOTSUP));
		}
		if (cpupart_get_cpus(&pset, NULL, NULL) != 0) {
			pool_unlock();
			return (set_errno(EINVAL));
		} else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) {
			pool_unlock();
			return (set_errno(EPERM));
		}
	}

	/*
	 * Pre-allocate enough buffers for FSS for all active projects
	 * and for all active zones on the system.  Unused buffers will
	 * be freed later by fss_freebuf().
	 */
	mutex_enter(&cpu_lock);
	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);

	switch (idtype) {
	case P_LWPID:
		pp = curproc;
		mutex_enter(&pidlock);
		mutex_enter(&pp->p_lock);
		if (id == P_MYID) {
			tp = curthread;
		} else {
			if ((tp = idtot(pp, id)) == NULL) {
				mutex_exit(&pp->p_lock);
				mutex_exit(&pidlock);
				error = ESRCH;
				break;
			}
		}
		error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pp->p_lock);
		mutex_exit(&pidlock);
		break;

	case P_PID:
		mutex_enter(&pidlock);
		if (id == P_MYID) {
			pp = curproc;
		} else if ((pp = prfind(id)) == NULL) {
			mutex_exit(&pidlock);
			error = ESRCH;
			break;
		}
		error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		break;

	case P_TASKID:
		mutex_enter(&pidlock);
		if (id == P_MYID)
			id = curproc->p_task->tk_tkid;
		if ((tk = task_hold_by_id(id)) == NULL) {
			mutex_exit(&pidlock);
			error = ESRCH;
			break;
		}
		error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		task_rele(tk);
		break;

	case P_PROJID:
		pp = curproc;
		if (id == P_MYID)
			id = curprojid();
		if ((kpj = project_hold_by_id(id, pp->p_zone,
		    PROJECT_HOLD_FIND)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_project(kpj, pset, &oldpset, projbuf,
		    zonebuf);
		mutex_exit(&pidlock);
		project_rele(kpj);
		break;

	case P_ZONEID:
		if (id == P_MYID)
			id = getzoneid();
		if ((zptr = zone_find_by_id(id)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		zone_rele(zptr);
		break;

	case P_CTID:
		if (id == P_MYID)
			id = PRCTID(curproc);
		if ((ct = contract_type_ptr(process_type, id,
		    curproc->p_zone->zone_uniqid)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf,
		    zonebuf);
		mutex_exit(&pidlock);
		contract_rele(ct);
		break;

	case P_PSETID:
		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
			error = EINVAL;
			break;
		}
		error = pset_unbind(id, projbuf, zonebuf, idtype);
		break;

	case P_ALL:
		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
			error = EINVAL;
			break;
		}
		error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype);
		break;

	default:
		error = EINVAL;
		break;
	}

	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
	mutex_exit(&cpu_lock);
	pool_unlock();

	if (error != 0)
		return (set_errno(error));
	if (opset != NULL) {
		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
			return (set_errno(EFAULT));
	}
	return (0);
}
Example #3
0
/*
 * task_join()
 *
 * Overview
 *   task_join() contains the actions that must be executed when the first
 *   member (curproc) of a newly created task joins it.  It may never fail.
 *
 *   The caller must make sure holdlwps() is called so that all other lwps are
 *   stopped prior to calling this function.
 *
 *   NB: It returns with curproc->p_lock held.
 *
 * Return values
 *   Pointer to the old task.
 *
 * Caller's context
 *   cpu_lock must be held entering the function.  It will acquire pidlock,
 *   p_crlock and p_lock during execution.
 */
task_t *
task_join(task_t *tk, uint_t flags)
{
	proc_t *p = ttoproc(curthread);
	task_t *prev_tk;
	void *projbuf, *zonebuf;
	zone_t *zone = tk->tk_zone;
	projid_t projid = tk->tk_proj->kpj_id;
	cred_t *oldcr;

	/*
	 * We can't know for sure if holdlwps() was called, but we can check to
	 * ensure we're single-threaded.
	 */
	ASSERT(curthread == p->p_agenttp || p->p_lwprcnt == 1);

	/*
	 * Changing the credential is always hard because we cannot
	 * allocate memory when holding locks but we don't know whether
	 * we need to change it.  We first get a reference to the current
	 * cred if we need to change it.  Then we create a credential
	 * with an updated project id.  Finally we install it, first
	 * releasing the reference we had on the p_cred at the time we
	 * acquired the lock the first time and later we release the
	 * reference to p_cred at the time we acquired the lock the
	 * second time.
	 */
	mutex_enter(&p->p_crlock);
	if (crgetprojid(p->p_cred) == projid)
		oldcr = NULL;
	else
		crhold(oldcr = p->p_cred);
	mutex_exit(&p->p_crlock);

	if (oldcr != NULL) {
		cred_t *newcr = crdup(oldcr);
		crsetprojid(newcr, projid);
		crfree(oldcr);

		mutex_enter(&p->p_crlock);
		oldcr = p->p_cred;
		p->p_cred = newcr;
		mutex_exit(&p->p_crlock);
		crfree(oldcr);
	}

	/*
	 * Make sure that the number of processor sets is constant
	 * across this operation.
	 */
	ASSERT(MUTEX_HELD(&cpu_lock));

	projbuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_PROJ);
	zonebuf = fss_allocbuf(FSS_NPSET_BUF, FSS_ALLOC_ZONE);

	mutex_enter(&pidlock);
	mutex_enter(&p->p_lock);

	prev_tk = p->p_task;
	task_change(tk, p);

	/*
	 * Now move threads one by one to their new project.
	 */
	changeproj(p, tk->tk_proj, zone, projbuf, zonebuf);
	if (flags & TASK_FINAL)
		p->p_task->tk_flags |= TASK_FINAL;

	mutex_exit(&pidlock);

	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
	return (prev_tk);
}
Example #4
0
/*
 * Move threads from specified partition to cp_default. If `force' is specified,
 * move all threads, otherwise move only soft-bound threads.
 */
static int
cpupart_unbind_threads(cpupart_t *pp, boolean_t unbind_all)
{
	void 	*projbuf, *zonebuf;
	kthread_t *t;
	proc_t	*p;
	int	err = 0;
	psetid_t psid = pp->cp_id;

	ASSERT(pool_lock_held());
	ASSERT(MUTEX_HELD(&cpu_lock));

	if (pp == NULL || pp == &cp_default) {
		return (EINVAL);
	}

	/*
	 * Pre-allocate enough buffers for FSS for all active projects and
	 * for all active zones on the system.  Unused buffers will be
	 * freed later by fss_freebuf().
	 */
	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);

	mutex_enter(&pidlock);
	t = curthread;
	do {
		if (t->t_bind_pset == psid) {
again:			p = ttoproc(t);
			mutex_enter(&p->p_lock);
			if (ttoproc(t) != p) {
				/*
				 * lwp_exit has changed this thread's process
				 * pointer before we grabbed its p_lock.
				 */
				mutex_exit(&p->p_lock);
				goto again;
			}

			/*
			 * Can only unbind threads which have revocable binding
			 * unless force unbinding requested.
			 */
			if (unbind_all || TB_PSET_IS_SOFT(t)) {
				err = cpupart_bind_thread(t, PS_NONE, 1,
				    projbuf, zonebuf);
				if (err) {
					mutex_exit(&p->p_lock);
					mutex_exit(&pidlock);
					fss_freebuf(projbuf, FSS_ALLOC_PROJ);
					fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
					return (err);
				}
				t->t_bind_pset = PS_NONE;
			}
			mutex_exit(&p->p_lock);
		}
		t = t->t_next;
	} while (t != curthread);

	mutex_exit(&pidlock);
	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
	return (err);
}