コード例 #1
0
ファイル: dr_quiesce.c プロジェクト: andreiw/polaris
static void
dr_start_user_threads(void)
{
	kthread_id_t tp;

	mutex_enter(&pidlock);

	/* walk all threads and release them */
	for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
		proc_t *p = ttoproc(tp);

		/* skip kernel threads */
		if (ttoproc(tp)->p_as == &kas)
			continue;

		mutex_enter(&p->p_lock);
		tp->t_proc_flag &= ~TP_CHKPT;
		mutex_exit(&p->p_lock);

		thread_lock(tp);
		if (CPR_ISTOPPED(tp)) {
			/* back on the runq */
			tp->t_schedflag |= TS_RESUME;
			setrun_locked(tp);
		}
		thread_unlock(tp);
	}

	mutex_exit(&pidlock);
}
コード例 #2
0
ファイル: namevno.c プロジェクト: apprisi/illumos-gate
/*
 * Close a mounted file descriptor.
 * Remove any locks and apply the VOP_CLOSE operation to the vnode for
 * the file descriptor.
 */
static int
nm_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *crp,
	caller_context_t *ct)
{
	struct namenode *nodep = VTONM(vp);
	int error = 0;

	(void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	error = VOP_CLOSE(nodep->nm_filevp, flag, count, offset, crp, ct);
	if (count == 1) {
		(void) VOP_FSYNC(nodep->nm_filevp, FSYNC, crp, ct);
		/*
		 * Before VN_RELE() we need to remove the vnode from
		 * the hash table.  We should only do so in the  NMNMNT case.
		 * In other cases, nodep->nm_filep keeps a reference
		 * to nm_filevp and the entry in the hash table doesn't
		 * hurt.
		 */
		if ((nodep->nm_flag & NMNMNT) != 0) {
			mutex_enter(&ntable_lock);
			nameremove(nodep);
			mutex_exit(&ntable_lock);
		}
		VN_RELE(nodep->nm_filevp);
	}
	return (error);
}
コード例 #3
0
ファイル: sockcommon_vnops.c プロジェクト: pcd1193182/openzfs
/*ARGSUSED*/
static int
socket_vop_close(struct vnode *vp, int flag, int count, offset_t offset,
    struct cred *cr, caller_context_t *ct)
{
	struct sonode *so;
	int error = 0;

	so = VTOSO(vp);
	ASSERT(vp->v_type == VSOCK);

	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);

	if (vp->v_stream)
		strclean(vp);

	if (count > 1) {
		dprint(2, ("socket_vop_close: count %d\n", count));
		return (0);
	}

	mutex_enter(&so->so_lock);
	if (--so->so_count == 0) {
		/*
		 * Initiate connection shutdown.
		 */
		mutex_exit(&so->so_lock);
		error = socket_close_internal(so, flag, cr);
	} else {
		mutex_exit(&so->so_lock);
	}

	return (error);
}
コード例 #4
0
ファイル: xmem_vnops.c プロジェクト: andreiw/polaris
/* ARGSUSED1 */
static int
xmem_close(struct vnode *vp, int flag, int count, offset_t offset,
	struct cred *cred)
{
	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	return (0);
}
コード例 #5
0
ファイル: devfs_vnops.c プロジェクト: andreiw/polaris
/*ARGSUSED1*/
static int
devfs_close(struct vnode *vp, int flag, int count,
    offset_t offset, struct cred *cred)
{
	struct dv_node	*dv = VTODV(vp);

	dcmn_err2(("devfs_close %s\n", dv->dv_name));
	ASSERT(vp->v_type == VDIR);

	cleanlocks(vp, ttoproc(curthread)->p_pid, 0);
	cleanshares(vp, ttoproc(curthread)->p_pid);
	return (0);
}
コード例 #6
0
ファイル: schedctl.c プロジェクト: apprisi/illumos-gate
/*
 * Clean up scheduler activations state associated with an exiting
 * (or execing) lwp.  t is always the current thread.
 */
void
schedctl_lwp_cleanup(kthread_t *t)
{
	sc_shared_t	*ssp = t->t_schedctl;
	proc_t		*p = ttoproc(t);
	sc_page_ctl_t	*pagep;
	index_t		index;

	ASSERT(MUTEX_NOT_HELD(&p->p_lock));

	thread_lock(t);		/* protect against ts_tick and ts_update */
	t->t_schedctl = NULL;
	t->t_sc_uaddr = 0;
	thread_unlock(t);

	/*
	 * Remove the context op to avoid the final call to
	 * schedctl_save when switching away from this lwp.
	 */
	(void) removectx(t, ssp, schedctl_save, schedctl_restore,
	    schedctl_fork, NULL, NULL, NULL);

	/*
	 * Do not unmap the shared page until the process exits.
	 * User-level library code relies on this for adaptive mutex locking.
	 */
	mutex_enter(&p->p_sc_lock);
	ssp->sc_state = SC_FREE;
	pagep = schedctl_page_lookup(ssp);
	index = (index_t)(ssp - pagep->spc_base);
	BT_CLEAR(pagep->spc_map, index);
	pagep->spc_space += sizeof (sc_shared_t);
	mutex_exit(&p->p_sc_lock);
}
コード例 #7
0
ファイル: cpr_uthread.c プロジェクト: andreiw/polaris
/*
 * CPR user thread related support routines
 */
void
cpr_signal_user(int sig)
{
/*
 * The signal SIGTHAW and SIGFREEZE cannot be sent to every thread yet
 * since openwin is catching every signal and default action is to exit.
 * We also need to implement the true SIGFREEZE and SIGTHAW to stop threads.
 */
	struct proc *p;

	mutex_enter(&pidlock);

	for (p = practive; p; p = p->p_next) {
		/* only user threads */
		if (p->p_exec == NULL || p->p_stat == SZOMB ||
			p == proc_init || p == ttoproc(curthread))
			continue;

		mutex_enter(&p->p_lock);
		sigtoproc(p, NULL, sig);
		mutex_exit(&p->p_lock);
	}
	mutex_exit(&pidlock);

	DELAY(MICROSEC);
}
コード例 #8
0
/*
 * Call to check if can have access after a cache miss has occurred.
 * Only read access is allowed, do not call this routine if want
 * to write.
 * Returns 1 if yes, 0 if no.
 */
int
cachefs_cd_access_miss(fscache_t *fscp)
{
	cachefscache_t *cachep;
	pid_t pid;

#ifdef CFS_CD_DEBUG
	ASSERT(curthread->t_flag & T_CD_HELD);
#endif

	/* should not get called if connected */
	ASSERT(fscp->fs_cdconnected != CFS_CD_CONNECTED);

	/* if no back file system, then no */
	if (fscp->fs_backvfsp == NULL)
		return (0);

	/* if daemon is not running, then yes */
	if (fscp->fs_cddaemonid == 0) {
		return (1);
	}

	pid = ttoproc(curthread)->p_pid;
	cachep = fscp->fs_cache;

	/* if daemon is running, only daemon is allowed to have access */
	if ((fscp->fs_cddaemonid != pid) &&
	    (cachep->c_rootdaemonid != pid)) {
		return (0);
	}

	return (1);
}
コード例 #9
0
static kthread_t *
getlwpptr(id_t id)
{
	proc_t		*p;
	kthread_t	*t;

	ASSERT(MUTEX_HELD(&(ttoproc(curthread)->p_lock)));

	if (id == P_MYID)
		t = curthread;
	else {
		p = ttoproc(curthread);
		t = idtot(p, id);
	}

	return (t);
}
コード例 #10
0
ファイル: lwp_create.c プロジェクト: andreiw/polaris
/*
 * Exit the calling lwp
 */
void
syslwp_exit()
{
	proc_t *p = ttoproc(curthread);

	mutex_enter(&p->p_lock);
	lwp_exit();
	/* NOTREACHED */
}
コード例 #11
0
ファイル: condvar.c プロジェクト: BjoKaSH/mac-zfs
/*
 * Like cv_wait_sig_swap but allows the caller to indicate (with a
 * non-NULL sigret) that they will take care of signalling the cv
 * after wakeup, if necessary.  This is a vile hack that should only
 * be used when no other option is available; almost all callers
 * should just use cv_wait_sig_swap (which takes care of the cv_signal
 * stuff automatically) instead.
 */
int
cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	int rval = 1;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr) {
		cv_wait(cvp, mp);
		return (rval);
	}

	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	t->t_kpri_req = 0;	/* don't need kernel priority */
	cv_block_sig(t, (condvar_impl_t *)cvp);
	/* I can be swapped now */
	curthread->t_schedflag &= ~TS_DONT_SWAP;
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	/* TS_DONT_SWAP set by disp() */
	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
	mutex_enter(mp);
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval == 0) {
		if (sigret != NULL)
			*sigret = signalled;	/* just tell the caller */
		else if (signalled)
			cv_signal(cvp);	/* avoid consuming the cv_signal() */
	}
	return (rval);
}
コード例 #12
0
ファイル: pset.c プロジェクト: MatiasNAmendola/AuroraUX-SunOS
static int
pset_bind_thread(kthread_t *tp, psetid_t pset, psetid_t *oldpset, void *projbuf,
    void *zonebuf)
{
	int error = 0;

	ASSERT(pool_lock_held());
	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));

	*oldpset = tp->t_bind_pset;

	switch (pset) {
	case PS_SOFT:
		TB_PSET_SOFT_SET(tp);
		break;

	case PS_HARD:
		TB_PSET_HARD_SET(tp);
		break;

	case PS_QUERY:
		break;

	case PS_QUERY_TYPE:
		*oldpset = TB_PSET_IS_SOFT(tp) ? PS_SOFT : PS_HARD;
		break;

	default:
		/*
		 * Must have the same UID as the target process or
		 * have PRIV_PROC_OWNER privilege.
		 */
		if (!hasprocperm(tp->t_cred, CRED()))
			return (EPERM);
		/*
		 * Unbinding of an unbound thread should always succeed.
		 */
		if (*oldpset == PS_NONE && pset == PS_NONE)
			return (0);
		/*
		 * Only privileged processes can move threads from psets with
		 * PSET_NOESCAPE attribute.
		 */
		if ((tp->t_cpupart->cp_attr & PSET_NOESCAPE) &&
		    secpolicy_pset(CRED()) != 0)
			return (EPERM);
		if ((error = cpupart_bind_thread(tp, pset, 0,
		    projbuf, zonebuf)) == 0)
			tp->t_bind_pset = pset;

		break;
	}

	return (error);
}
コード例 #13
0
ファイル: xmem_vnops.c プロジェクト: andreiw/polaris
static int
xmem_freesp(struct vnode *vp, struct flock64 *lp, int flag)
{
	register int i;
	register struct xmemnode *xp = VTOXN(vp);
	int error;

	ASSERT(vp->v_type == VREG);
	ASSERT(lp->l_start >= 0);

	if (lp->l_len != 0)
		return (EINVAL);

	rw_enter(&xp->xn_rwlock, RW_WRITER);
	if (xp->xn_size == lp->l_start) {
		rw_exit(&xp->xn_rwlock);
		return (0);
	}

	/*
	 * Check for any mandatory locks on the range
	 */
	if (MANDLOCK(vp, xp->xn_mode)) {
		long save_start;

		save_start = lp->l_start;

		if (xp->xn_size < lp->l_start) {
			/*
			 * "Truncate up" case: need to make sure there
			 * is no lock beyond current end-of-file. To
			 * do so, we need to set l_start to the size
			 * of the file temporarily.
			 */
			lp->l_start = xp->xn_size;
		}
		lp->l_type = F_WRLCK;
		lp->l_sysid = 0;
		lp->l_pid = ttoproc(curthread)->p_pid;
		i = (flag & (FNDELAY|FNONBLOCK)) ? 0 : SLPFLCK;
		if ((i = reclock(vp, lp, i, 0, lp->l_start, NULL)) != 0 ||
		    lp->l_type != F_UNLCK) {
			rw_exit(&xp->xn_rwlock);
			return (i ? i : EAGAIN);
		}

		lp->l_start = save_start;
	}

	rw_enter(&xp->xn_contents, RW_WRITER);
	error = xmemnode_trunc((struct xmount *)VFSTOXM(vp->v_vfsp),
						xp, lp->l_start);
	rw_exit(&xp->xn_contents);
	rw_exit(&xp->xn_rwlock);
	return (error);
}
コード例 #14
0
ファイル: afs_vnop_flock.c プロジェクト: jblaine/openafs
static int
lockIdcmp2(struct AFS_FLOCK *flock1, struct vcache *vp,
	   struct SimpleLocks *alp, int onlymine, int clid)
{
    struct SimpleLocks *slp;
#if	defined(AFS_SUN5_ENV)
    proc_t *procp = ttoproc(curthread);
#else
#if !defined(AFS_AIX41_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_SGI65_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_XBSD_ENV)
#ifdef AFS_SGI64_ENV
    afs_proc_t *procp = curprocp;
#elif defined(UKERNEL)
    afs_proc_t *procp = get_user_struct()->u_procp;
#else
    afs_proc_t *procp = u.u_procp;
#endif /* AFS_SGI64_ENV */
#endif
#endif

    if (alp) {
#if	defined(AFS_AIX_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
	if (flock1->l_sysid != alp->sysid) {
	    return 1;
	}
#endif
	if ((flock1->l_pid == alp->pid) ||
#if defined(AFS_AIX41_ENV) || defined(AFS_LINUX20_ENV) || defined(AFS_HPUX_ENV)
	    (!onlymine && (flock1->l_pid == getppid()))
#else
#if defined(AFS_SGI65_ENV) || defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
	    /* XXX check this. used to be *only* irix for some reason. */
	    (!onlymine && (flock1->l_pid == clid))
#else
	    (!onlymine && (flock1->l_pid == procp->p_ppid))
#endif
#endif
	    ) {
	    return 0;
	}
	return 1;
    }

    for (slp = vp->slocks; slp; slp = slp->next) {
#if defined(AFS_HAVE_FLOCK_SYSID)
	if (flock1->l_sysid != slp->sysid) {
	    continue;
	}
#endif
	if (flock1->l_pid == slp->pid) {
	    return 0;
	}
    }
    return (1);			/* failure */
}
コード例 #15
0
void
tnf_thread_create(kthread_t *t)
{
	/* If the allocation fails, this thread doesn't trace */
	t->t_tnf_tpdp = kmem_zalloc(sizeof (tnf_ops_t), KM_NOSLEEP);

	TNF_PROBE_3(thread_create, "thread", /* CSTYLED */,
		tnf_kthread_id,	tid,		t,
		tnf_pid,	pid,		ttoproc(t)->p_pid,
		tnf_symbol,	start_pc,	t->t_startpc);
}
コード例 #16
0
/*
 * projid_t tasksys_getprojid(void);
 *
 * Overview
 *   Return the current project ID for this process.
 *
 * Return value
 *   The ID for the project to which the current process belongs.
 */
static long
tasksys_getprojid()
{
	long ret;
	proc_t *p = ttoproc(curthread);

	mutex_enter(&pidlock);
	ret = p->p_task->tk_proj->kpj_id;
	mutex_exit(&pidlock);
	return (ret);
}
コード例 #17
0
ファイル: cred.c プロジェクト: MatiasNAmendola/AuroraUX-SunOS
/*
 * Return the (held) credentials for the current running process.
 */
cred_t *
crgetcred(void)
{
	cred_t *cr;
	proc_t *p;

	p = ttoproc(curthread);
	mutex_enter(&p->p_crlock);
	crhold(cr = p->p_cred);
	mutex_exit(&p->p_crlock);
	return (cr);
}
コード例 #18
0
ファイル: schedctl.c プロジェクト: andreiw/polaris
/*
 * If the sc_sigblock field is set for the specified thread, set
 * its signal mask to block all maskable signals, then clear the
 * sc_sigblock field.  This finishes what user-level code requested
 * to be done when it set tdp->sc_shared->sc_sigblock non-zero.
 * Called by signal-related code that holds the process's p_lock.
 */
void
schedctl_finish_sigblock(kthread_t *t)
{
	sc_shared_t *tdp = t->t_schedctl;

	ASSERT(MUTEX_HELD(&ttoproc(t)->p_lock));

	if (tdp && tdp->sc_sigblock) {
		t->t_hold.__sigbits[0] = FILLSET0 & ~CANTMASK0;
		t->t_hold.__sigbits[1] = FILLSET1 & ~CANTMASK1;
		tdp->sc_sigblock = 0;
	}
}
コード例 #19
0
ファイル: schedctl.c プロジェクト: apprisi/illumos-gate
/*
 * On fork, remove inherited mappings from the child's address space.
 * The child's threads must call schedctl() to get new shared mappings.
 */
static void
schedctl_fork(kthread_t *pt, kthread_t *ct)
{
	proc_t *pp = ttoproc(pt);
	proc_t *cp = ttoproc(ct);
	sc_page_ctl_t *pagep;

	ASSERT(ct->t_schedctl == NULL);

	/*
	 * Do this only once, whether we are doing fork1() or forkall().
	 * Don't do it at all if the child process is a child of vfork()
	 * because a child of vfork() borrows the parent's address space.
	 */
	if (pt != curthread || (cp->p_flag & SVFORK))
		return;

	mutex_enter(&pp->p_sc_lock);
	for (pagep = pp->p_pagep; pagep != NULL; pagep = pagep->spc_next)
		(void) as_unmap(cp->p_as, pagep->spc_uaddr, PAGESIZE);
	mutex_exit(&pp->p_sc_lock);
}
コード例 #20
0
ファイル: afs_vnop_flock.c プロジェクト: jblaine/openafs
void
lockIdSet(struct AFS_FLOCK *flock, struct SimpleLocks *slp, int clid)
{
    proc_t *procp = ttoproc(curthread);

    if (slp) {
	slp->sysid = 0;
	slp->pid = procp->p_pid;
    } else {
	flock->l_sysid = 0;
	flock->l_pid = procp->p_pid;
    }
}
コード例 #21
0
ファイル: cpr_uthread.c プロジェクト: andreiw/polaris
/*
 * start all threads that were stopped for checkpoint.
 */
void
cpr_start_user_threads()
{
	kthread_id_t tp;
	proc_t *p;

	mutex_enter(&pidlock);
	tp = curthread->t_next;
	do {
		p = ttoproc(tp);
		/*
		 * kernel threads are callback'ed rather than setrun.
		 */
		if (ttoproc(tp)->p_as == &kas) continue;
		/*
		 * t_proc_flag should have been cleared. Just to make sure here
		 */
		mutex_enter(&p->p_lock);
		tp->t_proc_flag &= ~TP_CHKPT;
		mutex_exit(&p->p_lock);

		thread_lock(tp);
		if (CPR_ISTOPPED(tp)) {

			/*
			 * put it back on the runq
			 */
			tp->t_schedflag |= TS_RESUME;
			setrun_locked(tp);
		}
		thread_unlock(tp);
		/*
		 * DEBUG - Keep track of current and next thread pointer.
		 */
	} while ((tp = tp->t_next) != curthread);

	mutex_exit(&pidlock);
}
コード例 #22
0
ファイル: syscall.c プロジェクト: andreiw/polaris
/*
 * Called from post_syscall() when a deferred singlestep is to be taken.
 */
void
deferred_singlestep_trap(caddr_t pc)
{
	proc_t *p = ttoproc(curthread);
	klwp_t *lwp = ttolwp(curthread);
	pcb_t *pcb = &lwp->lwp_pcb;
	uint_t fault = 0;
	k_siginfo_t siginfo;

	bzero(&siginfo, sizeof (siginfo));

	/*
	 * If both NORMAL_STEP and WATCH_STEP are in
	 * effect, give precedence to NORMAL_STEP.
	 * If neither is set, user must have set the
	 * PS_T bit in %efl; treat this as NORMAL_STEP.
	 */
	if ((pcb->pcb_flags & NORMAL_STEP) ||
	    !(pcb->pcb_flags & WATCH_STEP)) {
		siginfo.si_signo = SIGTRAP;
		siginfo.si_code = TRAP_TRACE;
		siginfo.si_addr  = pc;
		fault = FLTTRACE;
		if (pcb->pcb_flags & WATCH_STEP)
			(void) undo_watch_step(NULL);
	} else {
		fault = undo_watch_step(&siginfo);
	}
	pcb->pcb_flags &= ~(DEBUG_PENDING|NORMAL_STEP|WATCH_STEP);

	if (fault) {
		/*
		 * Remember the fault and fault adddress
		 * for real-time (SIGPROF) profiling.
		 */
		lwp->lwp_lastfault = fault;
		lwp->lwp_lastfaddr = siginfo.si_addr;
		/*
		 * If a debugger has declared this fault to be an
		 * event of interest, stop the lwp.  Otherwise just
		 * deliver the associated signal.
		 */
		if (prismember(&p->p_fltmask, fault) &&
		    stop_on_fault(fault, &siginfo) == 0)
			siginfo.si_signo = 0;
	}

	if (siginfo.si_signo)
		trapsig(&siginfo, 1);
}
コード例 #23
0
ファイル: sigqueue.c プロジェクト: andreiw/polaris
static int
sigqkill(pid_t pid, sigsend_t *sigsend)
{
	proc_t *p;
	int error;

	if ((uint_t)sigsend->sig >= NSIG)
		return (EINVAL);

	if (pid == -1) {
		procset_t set;

		setprocset(&set, POP_AND, P_ALL, P_MYID, P_ALL, P_MYID);
		error = sigsendset(&set, sigsend);
	} else if (pid > 0) {
		mutex_enter(&pidlock);
		if ((p = prfind(pid)) == NULL || p->p_stat == SIDL)
			error = ESRCH;
		else {
			error = sigsendproc(p, sigsend);
			if (error == 0 && sigsend->perm == 0)
				error = EPERM;
		}
		mutex_exit(&pidlock);
	} else {
		int nfound = 0;
		pid_t pgid;

		if (pid == 0)
			pgid = ttoproc(curthread)->p_pgrp;
		else
			pgid = -pid;

		error = 0;
		mutex_enter(&pidlock);
		for (p = pgfind(pgid); p && !error; p = p->p_pglink) {
			if (p->p_stat != SIDL) {
				nfound++;
				error = sigsendproc(p, sigsend);
			}
		}
		mutex_exit(&pidlock);
		if (nfound == 0)
			error = ESRCH;
		else if (error == 0 && sigsend->perm == 0)
			error = EPERM;
	}

	return (error);
}
コード例 #24
0
/*
 * Check for common cases of procsets which specify only the
 * current process.  cur_inset_only() returns B_TRUE when
 * the current process is the only one in the set.  B_FALSE
 * is returned to indicate that this may not be the case.
 */
boolean_t
cur_inset_only(procset_t *psp)
{
	if (((psp->p_lidtype == P_PID &&
	    (psp->p_lid == P_MYID ||
	    psp->p_lid == ttoproc(curthread)->p_pid)) ||
	    ((psp->p_lidtype == P_LWPID) &&
	    (psp->p_lid == P_MYID ||
	    psp->p_lid == curthread->t_tid))) &&
	    psp->p_op == POP_AND && psp->p_ridtype == P_ALL)
		return (B_TRUE);

	if (((psp->p_ridtype == P_PID &&
	    (psp->p_rid == P_MYID ||
	    psp->p_rid == ttoproc(curthread)->p_pid)) ||
	    ((psp->p_ridtype == P_LWPID) &&
	    (psp->p_rid == P_MYID ||
	    psp->p_rid == curthread->t_tid))) &&
	    psp->p_op == POP_AND && psp->p_lidtype == P_ALL)
		return (B_TRUE);

	return (B_FALSE);
}
コード例 #25
0
ファイル: condvar.c プロジェクト: BjoKaSH/mac-zfs
int
cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	int rval = 1;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr) {
		cv_wait(cvp, mp);
		return (rval);
	}

	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	cv_block_sig(t, (condvar_impl_t *)cvp);
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	mutex_enter(mp);
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
		cv_signal(cvp);
	return (rval);
}
コード例 #26
0
ファイル: cpr_uthread.c プロジェクト: andreiw/polaris
/*
 * Stop kernel threads by using the callback mechanism.  If any thread
 * cannot be stopped, return failure.
 */
int
cpr_stop_kernel_threads(void)
{
	caddr_t	name;
	kthread_id_t tp;
	proc_t *p;

	callb_lock_table();	/* Note: we unlock the table in resume. */

	DEBUG1(errp("stopping kernel daemons..."));
	if ((name = callb_execute_class(CB_CL_CPR_DAEMON,
	    CB_CODE_CPR_CHKPT)) != (caddr_t)NULL) {
		cpr_err(CE_WARN,
		    "Could not stop \"%s\" kernel thread.  "
		    "Please try again later.", name);
		return (EBUSY);
	}

	/*
	 * We think we stopped all the kernel threads.  Just in case
	 * someone is not playing by the rules, take a spin through
	 * the threadlist and see if we can account for everybody.
	 */
	mutex_enter(&pidlock);
	tp = curthread->t_next;
	do {
		p = ttoproc(tp);
		if (p->p_as != &kas)
			continue;

		if (tp->t_flag & T_INTR_THREAD)
			continue;

		if (! callb_is_stopped(tp, &name)) {
			mutex_exit(&pidlock);
			cpr_err(CE_WARN,
			    "\"%s\" kernel thread not stopped.", name);
			return (EBUSY);
		}
	} while ((tp = tp->t_next) != curthread);
	mutex_exit(&pidlock);

	DEBUG1(errp("done\n"));
	return (0);
}
コード例 #27
0
ファイル: cpupart.c プロジェクト: apprisi/illumos-gate
/*
 * This function binds a thread to a partition.  Must be called with the
 * p_lock of the containing process held (to keep the thread from going
 * away), and thus also with cpu_lock held (since cpu_lock must be
 * acquired before p_lock).  If ignore is non-zero, then CPU bindings
 * should be ignored (this is used when destroying a partition).
 */
int
cpupart_bind_thread(kthread_id_t tp, psetid_t psid, int ignore, void *projbuf,
    void *zonebuf)
{
	cpupart_t	*newpp;

	ASSERT(pool_lock_held());
	ASSERT(MUTEX_HELD(&cpu_lock));
	ASSERT(MUTEX_HELD(&pidlock));
	ASSERT(MUTEX_HELD(&ttoproc(tp)->p_lock));

	if (psid == PS_NONE)
		newpp = &cp_default;
	else {
		newpp = cpupart_find(psid);
		if (newpp == NULL) {
			return (EINVAL);
		}
	}
	return (cpupart_move_thread(tp, newpp, ignore, projbuf, zonebuf));
}
コード例 #28
0
ファイル: tnf_trace.c プロジェクト: andreiw/polaris
tnf_record_p
tnf_kernel_schedule(tnf_ops_t *ops, tnf_schedule_t *sched)
{
	tnf_tag_data_t *metatag_data;
	tnf_record_p metatag_index;
	tnf_schedule_prototype_t *buffer;
	kthread_t *t;

	t = curthread;

	/* Cannot be called when writing into tag space */
	ASSERT(ops->mode == TNF_ALLOC_REUSABLE);

	ALLOC(ops, sizeof (*buffer), buffer, sched->record_p,
	    TNF_ALLOC_REUSABLE); /* XXX see comment above */

	metatag_data = TAG_DATA(tnf_kernel_schedule);
	metatag_index = metatag_data->tag_index ?
		metatag_data->tag_index :
		metatag_data->tag_desc(ops, metatag_data);

	ASSIGN(buffer,	tag, 		metatag_index);
	ASSIGN2(buffer, tid, 		t,		kthread_id);
	ASSIGN(buffer,	lwpid, 		t->t_tid);
	ASSIGN(buffer,	pid, 		ttoproc(t)->p_pid);
	ASSIGN(buffer,	time_base, 	sched->time_base);
	ASSIGN(buffer,	cpuid, 		sched->cpuid);

	/*
	 * Remember schedule record generation number so the distance
	 * in virtual space can be calculated from an event record
	 */
	sched->record_gen = ((tnf_block_header_t *)
	    ((uintptr_t)buffer & TNF_BLOCK_MASK))->generation;
	/* Cannot have been written into tag space */
	ASSERT(sched->record_gen != TNF_TAG_GENERATION_NUM);

	return ((tnf_record_p)buffer);
}
コード例 #29
0
ファイル: cpr_uthread.c プロジェクト: andreiw/polaris
/*
 * Checks and makes sure all user threads are stopped
 */
static int
cpr_check_user_threads()
{
	kthread_id_t tp;
	int rc = 0;

	mutex_enter(&pidlock);
	tp = curthread->t_next;
	do {
		if (ttoproc(tp)->p_as == &kas || ttoproc(tp)->p_stat == SZOMB)
			continue;

		thread_lock(tp);
		/*
		 * make sure that we are off all the queues and in a stopped
		 * state.
		 */
		if (!CPR_ISTOPPED(tp)) {
			thread_unlock(tp);
			mutex_exit(&pidlock);

			if (count == CPR_UTSTOP_RETRY) {
			DEBUG1(errp("Suspend failed: cannt stop "
				"uthread\n"));
			cpr_err(CE_WARN, "Suspend cannot stop "
				"process %s (%p:%x).",
				ttoproc(tp)->p_user.u_psargs, (void *)tp,
				tp->t_state);
			cpr_err(CE_WARN, "Process may be waiting for"
				" network request, please try again.");
			}

			DEBUG2(errp("cant stop t=%p state=%x pfg=%x sched=%x\n",
			tp, tp->t_state, tp->t_proc_flag, tp->t_schedflag));
			DEBUG2(errp("proc %p state=%x pid=%d\n",
				ttoproc(tp), ttoproc(tp)->p_stat,
				ttoproc(tp)->p_pidp->pid_id));
			return (1);
		}
		thread_unlock(tp);

	} while ((tp = tp->t_next) != curthread && rc == 0);

	mutex_exit(&pidlock);
	return (0);
}
コード例 #30
0
ファイル: syscall.c プロジェクト: andreiw/polaris
/*
 * Arrange for the real time profiling signal to be dispatched.
 */
void
realsigprof(int sysnum, int error)
{
	proc_t *p;
	klwp_t *lwp;

	if (curthread->t_rprof->rp_anystate == 0)
		return;
	p = ttoproc(curthread);
	lwp = ttolwp(curthread);
	mutex_enter(&p->p_lock);
	if (sigismember(&p->p_ignore, SIGPROF) ||
	    signal_is_blocked(curthread, SIGPROF)) {
		mutex_exit(&p->p_lock);
		return;
	}
	lwp->lwp_siginfo.si_signo = SIGPROF;
	lwp->lwp_siginfo.si_code = PROF_SIG;
	lwp->lwp_siginfo.si_errno = error;
	hrt2ts(gethrtime(), &lwp->lwp_siginfo.si_tstamp);
	lwp->lwp_siginfo.si_syscall = sysnum;
	lwp->lwp_siginfo.si_nsysarg = (sysnum > 0 && sysnum < NSYSCALL) ?
		LWP_GETSYSENT(lwp)[sysnum].sy_narg : 0;
	lwp->lwp_siginfo.si_fault = lwp->lwp_lastfault;
	lwp->lwp_siginfo.si_faddr = lwp->lwp_lastfaddr;
	lwp->lwp_lastfault = 0;
	lwp->lwp_lastfaddr = NULL;
	sigtoproc(p, curthread, SIGPROF);
	mutex_exit(&p->p_lock);
	ASSERT(lwp->lwp_cursig == 0);
	if (issig(FORREAL)) {
		psig();
	}
	mutex_enter(&p->p_lock);
	lwp->lwp_siginfo.si_signo = 0;
	bzero(curthread->t_rprof, sizeof (*curthread->t_rprof));
	mutex_exit(&p->p_lock);
}