Beispiel #1
0
static int
putacct(idtype_t idtype, id_t id, void *buf, size_t bufsize, int flags)
{
	int error;
	taskid_t tkid;
	proc_t *p;
	task_t *tk;
	void *kbuf;
	struct exacct_globals *acg;

	if (bufsize == 0 || bufsize > EXACCT_MAX_BUFSIZE)
		return (set_errno(EINVAL));

	kbuf = kmem_alloc(bufsize, KM_SLEEP);
	if (copyin(buf, kbuf, bufsize) != 0) {
		error = EFAULT;
		goto out;
	}

	acg = zone_getspecific(exacct_zone_key, curproc->p_zone);
	switch (idtype) {
	case P_PID:
		mutex_enter(&pidlock);
		if ((p = prfind(id)) == NULL) {
			mutex_exit(&pidlock);
			error = ESRCH;
		} else {
			zone_t *zone = p->p_zone;

			tkid = p->p_task->tk_tkid;
			zone_hold(zone);
			mutex_exit(&pidlock);

			error = exacct_tag_proc(&acg->ac_proc, id, tkid, kbuf,
			    bufsize, flags, zone->zone_nodename);
			zone_rele(zone);
		}
		break;
	case P_TASKID:
		if ((tk = task_hold_by_id(id)) != NULL) {
			error = exacct_tag_task(&acg->ac_task, tk, kbuf,
			    bufsize, flags);
			task_rele(tk);
		} else {
			error = ESRCH;
		}
		break;
	default:
		error = EINVAL;
		break;
	}
out:
	kmem_free(kbuf, bufsize);
	return (error == 0 ? error : set_errno(error));
}
Beispiel #2
0
static int
wracct_task(ac_info_t *ac_task, taskid_t tkid, int flag, size_t *sizep)
{
	task_t *tk;
	int error;

	mutex_enter(&ac_task->ac_lock);
	if (ac_task->ac_state == AC_OFF || ac_task->ac_vnode == NULL) {
		mutex_exit(&ac_task->ac_lock);
		return (ENOTACTIVE);
	}
	mutex_exit(&ac_task->ac_lock);

	if ((tk = task_hold_by_id(tkid)) == NULL)
		return (ESRCH);
	error = exacct_assemble_task_usage(ac_task, tk, exacct_commit_callback,
	    NULL, 0, sizep, flag);
	task_rele(tk);

	return (error);
}
Beispiel #3
0
static int
getacct_task(ac_info_t *ac_task, taskid_t tkid, void *buf, size_t bufsize,
    size_t *sizep)
{
	task_t *tk;
	int error;

	mutex_enter(&ac_task->ac_lock);
	if (ac_task->ac_state == AC_OFF) {
		mutex_exit(&ac_task->ac_lock);
		return (ENOTACTIVE);
	}
	mutex_exit(&ac_task->ac_lock);

	if ((tk = task_hold_by_id(tkid)) == NULL)
		return (ESRCH);
	error = exacct_assemble_task_usage(ac_task, tk,
	    getacct_callback, buf, bufsize, sizep, EW_PARTIAL);
	task_rele(tk);

	return (error);
}
Beispiel #4
0
static int
pset_bind(psetid_t pset, idtype_t idtype, id_t id, psetid_t *opset)
{
	kthread_t	*tp;
	proc_t		*pp;
	task_t		*tk;
	kproject_t	*kpj;
	contract_t	*ct;
	zone_t		*zptr;
	psetid_t	oldpset;
	int		error = 0;
	void		*projbuf, *zonebuf;

	pool_lock();
	if ((pset != PS_QUERY) && (pset != PS_SOFT) &&
	    (pset != PS_HARD) && (pset != PS_QUERY_TYPE)) {
		/*
		 * Check if the set actually exists before checking
		 * permissions.  This is the historical error
		 * precedence.  Note that if pset was PS_MYID, the
		 * cpupart_get_cpus call will change it to the
		 * processor set id of the caller (or PS_NONE if the
		 * caller is not bound to a processor set).
		 */
		if (pool_state == POOL_ENABLED) {
			pool_unlock();
			return (set_errno(ENOTSUP));
		}
		if (cpupart_get_cpus(&pset, NULL, NULL) != 0) {
			pool_unlock();
			return (set_errno(EINVAL));
		} else if (pset != PS_NONE && secpolicy_pset(CRED()) != 0) {
			pool_unlock();
			return (set_errno(EPERM));
		}
	}

	/*
	 * Pre-allocate enough buffers for FSS for all active projects
	 * and for all active zones on the system.  Unused buffers will
	 * be freed later by fss_freebuf().
	 */
	mutex_enter(&cpu_lock);
	projbuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_PROJ);
	zonebuf = fss_allocbuf(FSS_NPROJ_BUF, FSS_ALLOC_ZONE);

	switch (idtype) {
	case P_LWPID:
		pp = curproc;
		mutex_enter(&pidlock);
		mutex_enter(&pp->p_lock);
		if (id == P_MYID) {
			tp = curthread;
		} else {
			if ((tp = idtot(pp, id)) == NULL) {
				mutex_exit(&pp->p_lock);
				mutex_exit(&pidlock);
				error = ESRCH;
				break;
			}
		}
		error = pset_bind_thread(tp, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pp->p_lock);
		mutex_exit(&pidlock);
		break;

	case P_PID:
		mutex_enter(&pidlock);
		if (id == P_MYID) {
			pp = curproc;
		} else if ((pp = prfind(id)) == NULL) {
			mutex_exit(&pidlock);
			error = ESRCH;
			break;
		}
		error = pset_bind_process(pp, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		break;

	case P_TASKID:
		mutex_enter(&pidlock);
		if (id == P_MYID)
			id = curproc->p_task->tk_tkid;
		if ((tk = task_hold_by_id(id)) == NULL) {
			mutex_exit(&pidlock);
			error = ESRCH;
			break;
		}
		error = pset_bind_task(tk, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		task_rele(tk);
		break;

	case P_PROJID:
		pp = curproc;
		if (id == P_MYID)
			id = curprojid();
		if ((kpj = project_hold_by_id(id, pp->p_zone,
		    PROJECT_HOLD_FIND)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_project(kpj, pset, &oldpset, projbuf,
		    zonebuf);
		mutex_exit(&pidlock);
		project_rele(kpj);
		break;

	case P_ZONEID:
		if (id == P_MYID)
			id = getzoneid();
		if ((zptr = zone_find_by_id(id)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_zone(zptr, pset, &oldpset, projbuf, zonebuf);
		mutex_exit(&pidlock);
		zone_rele(zptr);
		break;

	case P_CTID:
		if (id == P_MYID)
			id = PRCTID(curproc);
		if ((ct = contract_type_ptr(process_type, id,
		    curproc->p_zone->zone_uniqid)) == NULL) {
			error = ESRCH;
			break;
		}
		mutex_enter(&pidlock);
		error = pset_bind_contract(ct->ct_data, pset, &oldpset, projbuf,
		    zonebuf);
		mutex_exit(&pidlock);
		contract_rele(ct);
		break;

	case P_PSETID:
		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
			error = EINVAL;
			break;
		}
		error = pset_unbind(id, projbuf, zonebuf, idtype);
		break;

	case P_ALL:
		if (id == P_MYID || pset != PS_NONE || !INGLOBALZONE(curproc)) {
			error = EINVAL;
			break;
		}
		error = pset_unbind(PS_NONE, projbuf, zonebuf, idtype);
		break;

	default:
		error = EINVAL;
		break;
	}

	fss_freebuf(projbuf, FSS_ALLOC_PROJ);
	fss_freebuf(zonebuf, FSS_ALLOC_ZONE);
	mutex_exit(&cpu_lock);
	pool_unlock();

	if (error != 0)
		return (set_errno(error));
	if (opset != NULL) {
		if (copyout(&oldpset, opset, sizeof (psetid_t)) != 0)
			return (set_errno(EFAULT));
	}
	return (0);
}
Beispiel #5
0
/*
 * task_t *task_create(projid_t, zone *)
 *
 * Overview
 *   A process constructing a new task calls task_create() to construct and
 *   preinitialize the task for the appropriate destination project.  Only one
 *   task, the primordial task0, is not created with task_create().
 *
 * Return values
 *   None.
 *
 * Caller's context
 *   Caller's context should be safe for KM_SLEEP allocations.
 *   The caller should appropriately bump the kpj_ntasks counter on the
 *   project that contains this task.
 */
task_t *
task_create(projid_t projid, zone_t *zone)
{
	task_t *tk = kmem_cache_alloc(task_cache, KM_SLEEP);
	task_t *ancestor_tk;
	taskid_t tkid;
	task_usage_t *tu = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
	mod_hash_hndl_t hndl;
	rctl_set_t *set = rctl_set_create();
	rctl_alloc_gp_t *gp;
	rctl_entity_p_t e;

	bzero(tk, sizeof (task_t));

	tk->tk_tkid = tkid = id_alloc(taskid_space);
	tk->tk_nlwps = 0;
	tk->tk_nlwps_ctl = INT_MAX;
	tk->tk_nprocs = 0;
	tk->tk_nprocs_ctl = INT_MAX;
	tk->tk_usage = tu;
	tk->tk_inherited = kmem_zalloc(sizeof (task_usage_t), KM_SLEEP);
	tk->tk_proj = project_hold_by_id(projid, zone, PROJECT_HOLD_INSERT);
	tk->tk_flags = TASK_NORMAL;
	tk->tk_commit_next = NULL;

	/*
	 * Copy ancestor task's resource controls.
	 */
	zone_task_hold(zone);
	mutex_enter(&curproc->p_lock);
	ancestor_tk = curproc->p_task;
	task_hold(ancestor_tk);
	tk->tk_zone = zone;
	mutex_exit(&curproc->p_lock);

	for (;;) {
		gp = rctl_set_dup_prealloc(ancestor_tk->tk_rctls);

		mutex_enter(&ancestor_tk->tk_rctls->rcs_lock);
		if (rctl_set_dup_ready(ancestor_tk->tk_rctls, gp))
			break;

		mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);

		rctl_prealloc_destroy(gp);
	}

	/*
	 * At this point, curproc does not have the appropriate linkage
	 * through the task to the project. So, rctl_set_dup should only
	 * copy the rctls, and leave the callbacks for later.
	 */
	e.rcep_p.task = tk;
	e.rcep_t = RCENTITY_TASK;
	tk->tk_rctls = rctl_set_dup(ancestor_tk->tk_rctls, curproc, curproc, &e,
	    set, gp, RCD_DUP);
	mutex_exit(&ancestor_tk->tk_rctls->rcs_lock);

	rctl_prealloc_destroy(gp);

	/*
	 * Record the ancestor task's ID for use by extended accounting.
	 */
	tu->tu_anctaskid = ancestor_tk->tk_tkid;
	task_rele(ancestor_tk);

	/*
	 * Put new task structure in the hash table.
	 */
	(void) mod_hash_reserve(task_hash, &hndl);
	mutex_enter(&task_hash_lock);
	ASSERT(task_find(tkid, zone->zone_id) == NULL);
	if (mod_hash_insert_reserve(task_hash, (mod_hash_key_t)(uintptr_t)tkid,
	    (mod_hash_val_t *)tk, hndl) != 0) {
		mod_hash_cancel(task_hash, &hndl);
		panic("unable to insert task %d(%p)", tkid, (void *)tk);
	}
	mutex_exit(&task_hash_lock);

	tk->tk_nprocs_kstat = task_kstat_create(tk, zone);
	return (tk);
}
Beispiel #6
0
/*
 * Remove zombie children from the process table.
 */
void
freeproc(proc_t *p)
{
	proc_t *q;
	task_t *tk;

	ASSERT(p->p_stat == SZOMB);
	ASSERT(p->p_tlist == NULL);
	ASSERT(MUTEX_HELD(&pidlock));

	sigdelq(p, NULL, 0);
	if (p->p_killsqp) {
		siginfofree(p->p_killsqp);
		p->p_killsqp = NULL;
	}

	prfree(p);	/* inform /proc */

	/*
	 * Don't free the init processes.
	 * Other dying processes will access it.
	 */
	if (p == proc_init)
		return;


	/*
	 * We wait until now to free the cred structure because a
	 * zombie process's credentials may be examined by /proc.
	 * No cred locking needed because there are no threads at this point.
	 */
	upcount_dec(crgetruid(p->p_cred), crgetzoneid(p->p_cred));
	crfree(p->p_cred);
	if (p->p_corefile != NULL) {
		corectl_path_rele(p->p_corefile);
		p->p_corefile = NULL;
	}
	if (p->p_content != NULL) {
		corectl_content_rele(p->p_content);
		p->p_content = NULL;
	}

	if (p->p_nextofkin && !((p->p_nextofkin->p_flag & SNOWAIT) ||
	    (PTOU(p->p_nextofkin)->u_signal[SIGCLD - 1] == SIG_IGN))) {
		/*
		 * This should still do the right thing since p_utime/stime
		 * get set to the correct value on process exit, so it
		 * should get properly updated
		 */
		p->p_nextofkin->p_cutime += p->p_utime;
		p->p_nextofkin->p_cstime += p->p_stime;

		p->p_nextofkin->p_cacct[LMS_USER] += p->p_acct[LMS_USER];
		p->p_nextofkin->p_cacct[LMS_SYSTEM] += p->p_acct[LMS_SYSTEM];
		p->p_nextofkin->p_cacct[LMS_TRAP] += p->p_acct[LMS_TRAP];
		p->p_nextofkin->p_cacct[LMS_TFAULT] += p->p_acct[LMS_TFAULT];
		p->p_nextofkin->p_cacct[LMS_DFAULT] += p->p_acct[LMS_DFAULT];
		p->p_nextofkin->p_cacct[LMS_KFAULT] += p->p_acct[LMS_KFAULT];
		p->p_nextofkin->p_cacct[LMS_USER_LOCK]
		    += p->p_acct[LMS_USER_LOCK];
		p->p_nextofkin->p_cacct[LMS_SLEEP] += p->p_acct[LMS_SLEEP];
		p->p_nextofkin->p_cacct[LMS_WAIT_CPU]
		    += p->p_acct[LMS_WAIT_CPU];
		p->p_nextofkin->p_cacct[LMS_STOPPED] += p->p_acct[LMS_STOPPED];

		p->p_nextofkin->p_cru.minflt	+= p->p_ru.minflt;
		p->p_nextofkin->p_cru.majflt	+= p->p_ru.majflt;
		p->p_nextofkin->p_cru.nswap	+= p->p_ru.nswap;
		p->p_nextofkin->p_cru.inblock	+= p->p_ru.inblock;
		p->p_nextofkin->p_cru.oublock	+= p->p_ru.oublock;
		p->p_nextofkin->p_cru.msgsnd	+= p->p_ru.msgsnd;
		p->p_nextofkin->p_cru.msgrcv	+= p->p_ru.msgrcv;
		p->p_nextofkin->p_cru.nsignals	+= p->p_ru.nsignals;
		p->p_nextofkin->p_cru.nvcsw	+= p->p_ru.nvcsw;
		p->p_nextofkin->p_cru.nivcsw	+= p->p_ru.nivcsw;
		p->p_nextofkin->p_cru.sysc	+= p->p_ru.sysc;
		p->p_nextofkin->p_cru.ioch	+= p->p_ru.ioch;

	}

	q = p->p_nextofkin;
	if (q && q->p_orphan == p)
		q->p_orphan = p->p_nextorph;
	else if (q) {
		for (q = q->p_orphan; q; q = q->p_nextorph)
			if (q->p_nextorph == p)
				break;
		ASSERT(q && q->p_nextorph == p);
		q->p_nextorph = p->p_nextorph;
	}

	/*
	 * The process table slot is being freed, so it is now safe to give up
	 * task and project membership.
	 */
	mutex_enter(&p->p_lock);
	tk = p->p_task;
	task_detach(p);
	mutex_exit(&p->p_lock);

	proc_detach(p);
	pid_exit(p, tk);	/* frees pid and proc structure */

	task_rele(tk);
}
/*
 * taskid_t tasksys_settaskid(projid_t projid, uint_t flags);
 *
 * Overview
 *   Place the calling process in a new task if sufficiently privileged.  If the
 *   present task is finalized, the process may not create a new task.
 *
 * Return values
 *   0 on success, errno on failure.
 */
static long
tasksys_settaskid(projid_t projid, uint_t flags)
{
	proc_t *p = ttoproc(curthread);
	kproject_t *oldpj;
	kproject_t *kpj;
	task_t *tk, *oldtk;
	rctl_entity_p_t e;
	zone_t *zone;
	int rctlfail = 0;

	if (secpolicy_tasksys(CRED()) != 0)
		return (set_errno(EPERM));

	if (projid < 0 || projid > MAXPROJID)
		return (set_errno(EINVAL));

	if (flags & ~TASK_FINAL)
		return (set_errno(EINVAL));

	mutex_enter(&pidlock);
	if (p->p_task->tk_flags & TASK_FINAL) {
		mutex_exit(&pidlock);
		return (set_errno(EACCES));
	}
	mutex_exit(&pidlock);

	/*
	 * Try to stop all other lwps in the process while we're changing
	 * our project.  This way, curthread doesn't need to grab its own
	 * thread_lock to find its project ID (see curprojid()).  If this
	 * is the /proc agent lwp, we know that the other lwps are already
	 * held.  If we failed to hold all lwps, bail out and return EINTR.
	 */
	if (curthread != p->p_agenttp && !holdlwps(SHOLDFORK1))
		return (set_errno(EINTR));
	/*
	 * Put a hold on our new project and make sure that nobody is
	 * trying to bind it to a pool while we're joining.
	 */
	kpj = project_hold_by_id(projid, p->p_zone, PROJECT_HOLD_INSERT);
	e.rcep_p.proj = kpj;
	e.rcep_t = RCENTITY_PROJECT;

	mutex_enter(&p->p_lock);
	oldpj = p->p_task->tk_proj;
	zone = p->p_zone;

	mutex_enter(&zone->zone_nlwps_lock);
	mutex_enter(&zone->zone_mem_lock);

	if (kpj->kpj_nlwps + p->p_lwpcnt > kpj->kpj_nlwps_ctl)
		if (rctl_test_entity(rc_project_nlwps, kpj->kpj_rctls, p, &e,
		    p->p_lwpcnt, 0) & RCT_DENY)
			rctlfail = 1;

	if (kpj->kpj_ntasks + 1 > kpj->kpj_ntasks_ctl)
		if (rctl_test_entity(rc_project_ntasks, kpj->kpj_rctls, p, &e,
		    1, 0) & RCT_DENY)
			rctlfail = 1;

	if (kpj->kpj_data.kpd_locked_mem + p->p_locked_mem >
	    kpj->kpj_data.kpd_locked_mem_ctl)
		if (rctl_test_entity(rc_project_locked_mem, kpj->kpj_rctls, p,
		    &e, p->p_locked_mem, 0) & RCT_DENY)
			rctlfail = 1;

	mutex_enter(&(kpj->kpj_data.kpd_crypto_lock));
	if (kpj->kpj_data.kpd_crypto_mem + p->p_crypto_mem >
	    kpj->kpj_data.kpd_crypto_mem_ctl)
		if (rctl_test_entity(rc_project_crypto_mem, kpj->kpj_rctls, p,
		    &e, p->p_crypto_mem, 0) & RCT_DENY)
			rctlfail = 1;

	if (rctlfail) {
		mutex_exit(&(kpj->kpj_data.kpd_crypto_lock));
		mutex_exit(&zone->zone_mem_lock);
		mutex_exit(&zone->zone_nlwps_lock);
		if (curthread != p->p_agenttp)
			continuelwps(p);
		mutex_exit(&p->p_lock);
		return (set_errno(EAGAIN));
	}
	kpj->kpj_data.kpd_crypto_mem += p->p_crypto_mem;
	mutex_exit(&(kpj->kpj_data.kpd_crypto_lock));
	kpj->kpj_data.kpd_locked_mem += p->p_locked_mem;
	kpj->kpj_nlwps += p->p_lwpcnt;
	kpj->kpj_ntasks++;

	oldpj->kpj_data.kpd_locked_mem -= p->p_locked_mem;
	mutex_enter(&(oldpj->kpj_data.kpd_crypto_lock));
	oldpj->kpj_data.kpd_crypto_mem -= p->p_crypto_mem;
	mutex_exit(&(oldpj->kpj_data.kpd_crypto_lock));
	oldpj->kpj_nlwps -= p->p_lwpcnt;

	mutex_exit(&zone->zone_mem_lock);
	mutex_exit(&zone->zone_nlwps_lock);
	mutex_exit(&p->p_lock);

	mutex_enter(&kpj->kpj_poolbind);
	tk = task_create(projid, curproc->p_zone);
	mutex_enter(&cpu_lock);
	/*
	 * Returns with p_lock held.
	 */
	oldtk = task_join(tk, flags);
	if (curthread != p->p_agenttp)
		continuelwps(p);
	mutex_exit(&p->p_lock);
	mutex_exit(&cpu_lock);
	mutex_exit(&kpj->kpj_poolbind);
	task_rele(oldtk);
	project_rele(kpj);
	return (tk->tk_tkid);
}