Пример #1
0
int
dmu_objset_userspace_upgrade(objset_t *os)
{
	uint64_t obj;
	int err = 0;

	if (dmu_objset_userspace_present(os))
		return (0);
	if (!dmu_objset_userused_enabled(os->os))
		return (ENOTSUP);
	if (dmu_objset_is_snapshot(os))
		return (EINVAL);

	/*
	 * We simply need to mark every object dirty, so that it will be
	 * synced out and now accounted.  If this is called
	 * concurrently, or if we already did some work before crashing,
	 * that's fine, since we track each object's accounted state
	 * independently.
	 */

	for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
		dmu_tx_t *tx;
		dmu_buf_t *db;
		int objerr;

		if (issig(JUSTLOOKING) && issig(FORREAL))
			return (EINTR);

		objerr = dmu_bonus_hold(os, obj, FTAG, &db);
		if (objerr)
			continue;
		tx = dmu_tx_create(os);
		dmu_tx_hold_bonus(tx, obj);
		objerr = dmu_tx_assign(tx, TXG_WAIT);
		if (objerr) {
			dmu_tx_abort(tx);
			continue;
		}
		dmu_buf_will_dirty(db, tx);
		dmu_buf_rele(db, FTAG);
		dmu_tx_commit(tx);
	}

	os->os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
	txg_wait_synced(dmu_objset_pool(os), 0);
	return (0);
}
Пример #2
0
/*
 * Like cv_wait_sig_swap but allows the caller to indicate (with a
 * non-NULL sigret) that they will take care of signalling the cv
 * after wakeup, if necessary.  This is a vile hack that should only
 * be used when no other option is available; almost all callers
 * should just use cv_wait_sig_swap (which takes care of the cv_signal
 * stuff automatically) instead.
 */
int
cv_wait_sig_swap_core(kcondvar_t *cvp, kmutex_t *mp, int *sigret)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	int rval = 1;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr) {
		cv_wait(cvp, mp);
		return (rval);
	}

	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	t->t_kpri_req = 0;	/* don't need kernel priority */
	cv_block_sig(t, (condvar_impl_t *)cvp);
	/* I can be swapped now */
	curthread->t_schedflag &= ~TS_DONT_SWAP;
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	/* TS_DONT_SWAP set by disp() */
	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
	mutex_enter(mp);
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval == 0) {
		if (sigret != NULL)
			*sigret = signalled;	/* just tell the caller */
		else if (signalled)
			cv_signal(cvp);	/* avoid consuming the cv_signal() */
	}
	return (rval);
}
Пример #3
0
/* ARGSUSED */
static int
backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
{
	dmu_sendarg_t *dsp = arg;
	dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
	int err = 0;

	if (issig(JUSTLOOKING) && issig(FORREAL))
		return (EINTR);

	if (zb->zb_object != DMU_META_DNODE_OBJECT &&
	    DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
		return (0);
	} else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
		uint64_t span = BP_SPAN(dnp, zb->zb_level);
		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
		err = dump_freeobjects(dsp, dnobj, span >> DNODE_SHIFT);
	} else if (bp == NULL) {
Пример #4
0
/*
 * Give up the processor till a wakeup occurs
 * on chan, at which time the process
 * enters the scheduling queue at priority pri.
 * The most important effect of pri is that when
 * pri<0 a signal cannot disturb the sleep;
 * if pri>=0 signals will be processed.
 * Callers of this routine must be prepared for
 * premature return, and check that the reason for
 * sleeping has gone away.
 */
sleep(chan, pri)
{
    register *rp, s;

    s = PS->integ;
    rp = u.u_procp;
    if(pri >= 0) {
        if(issig())
            goto psig;
        spl6();
        rp->p_wchan = chan;
        rp->p_stat = SWAIT;
        rp->p_pri = pri;
        spl0();
        if(runin != 0) {
            runin = 0;
            wakeup(&runin);
        }
        swtch();
        if(issig())
            goto psig;
    } else {
        spl6();
        rp->p_wchan = chan;
        rp->p_stat = SSLEEP;
        rp->p_pri = pri;
        spl0();
        swtch();
    }
    PS->integ = s;
    return;

    /*
     * If priority was low (>=0) and
     * there has been a signal,
     * execute non-local goto to
     * the qsav location.
     * (see trap1/trap.c)
     */
psig:
    aretu(u.u_qsav);
}
Пример #5
0
/* ARGSUSED */
static int
diff_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
    const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
{
	struct diffarg *da = arg;
	int err = 0;

	if (issig(JUSTLOOKING) && issig(FORREAL))
		return (EINTR);

	if (zb->zb_object != DMU_META_DNODE_OBJECT)
		return (0);

	if (bp == NULL) {
		uint64_t span = DBP_SPAN(dnp, zb->zb_level);
		uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;

		err = report_free_dnode_range(da, dnobj,
		    dnobj + (span >> DNODE_SHIFT) - 1);
		if (err)
			return (err);
	} else if (zb->zb_level == 0) {
Пример #6
0
/**
 * @brief Sleeps if the raw input buffer of a TTY device is empty.
 * 
 * @details Puts the calling process to sleep if the raw input buffer of the TTY
 *          device pointed to by @p ttyp is empty.
 * 
 * @param ttyp TTY device to sleep for.
 * 
 * @returns Upon sucessful completion, zero is returned, meaning that the 
 *          input buffer of the target TTY device is no longer empty. If while
 *          sleeping, the process gets awaken due to the deliver of a signal,
 *          -#EINTR is returned instead. In this later case, it is undefined
 *          whether the buffer is no longer empty.
 * 
 * @note @p ttyp must point to a valid TTY device.
 */
PRIVATE int tty_sleep_empty(struct tty *ttyp)
{
	/* Sleep while raw input buffer is empty. */
	while (KBUFFER_EMPTY(ttyp->rinput))
	{
		sleep(&ttyp->rinput.chain, PRIO_TTY);
		
		/* Awaken by signal. */
		if (issig() != SIGNULL)
			return (-EINTR);
	}
	
	return (0);
}
Пример #7
0
int
cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	int rval = 1;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr) {
		cv_wait(cvp, mp);
		return (rval);
	}

	ASSERT(curthread->t_schedflag & TS_DONT_SWAP);
	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	cv_block_sig(t, (condvar_impl_t *)cvp);
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	mutex_enter(mp);
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval == 0 && signalled)	/* avoid consuming the cv_signal() */
		cv_signal(cvp);
	return (rval);
}
Пример #8
0
/*
 * Suspends the calling process until a signal is received.
 */
PUBLIC int sys_pause()
{	
	/* Susped process. */
	while (1)
	{
		sleep(&chain, PRIO_USER);
		
		/*  Wakeup on signal receipt. */
		if (issig() != SIGNULL)
			goto awaken;
	}

awaken:

	return (-EINTR);
}
Пример #9
0
/**
 * @brief Sleeps if the output buffer of a TTY device is full.
 * 
 * @details Puts the calling process to sleep if the output buffer of the TTY
 *          device pointed to by @p ttyp is full.
 * 
 * @param ttyp TTY device to sleep for.
 * 
 * @returns Upon successful completion, zero is returned, meaning that the 
 *          output buffer of the target TTY device is no longer full. If while
 *          sleeping, the process gets awaken due to the deliver of a signal,
 *          -#EINTR is returned instead. In this later case, it is undefined
 *          whether the buffer is no longer full.
 */
PRIVATE int tty_sleep_full(struct tty *ttyp)
{
	/* Sleep while output buffer is full. */
	while (KBUFFER_FULL(ttyp->output))
	{
		sleep(&ttyp->output.chain, PRIO_TTY);
		
		/* Awaken by signal. */
		if (issig() != SIGNULL)
			return (-EINTR);
		
		/* Awaken by START character. */
		disable_interrupts();
		console_write(&ttyp->output);
		enable_interrupts();
	}
	
	return (0);
}
Пример #10
0
/*
 * Arrange for the real time profiling signal to be dispatched.
 */
void
realsigprof(int sysnum, int error)
{
	proc_t *p;
	klwp_t *lwp;

	if (curthread->t_rprof->rp_anystate == 0)
		return;
	p = ttoproc(curthread);
	lwp = ttolwp(curthread);
	mutex_enter(&p->p_lock);
	if (sigismember(&p->p_ignore, SIGPROF) ||
	    signal_is_blocked(curthread, SIGPROF)) {
		mutex_exit(&p->p_lock);
		return;
	}
	lwp->lwp_siginfo.si_signo = SIGPROF;
	lwp->lwp_siginfo.si_code = PROF_SIG;
	lwp->lwp_siginfo.si_errno = error;
	hrt2ts(gethrtime(), &lwp->lwp_siginfo.si_tstamp);
	lwp->lwp_siginfo.si_syscall = sysnum;
	lwp->lwp_siginfo.si_nsysarg = (sysnum > 0 && sysnum < NSYSCALL) ?
		LWP_GETSYSENT(lwp)[sysnum].sy_narg : 0;
	lwp->lwp_siginfo.si_fault = lwp->lwp_lastfault;
	lwp->lwp_siginfo.si_faddr = lwp->lwp_lastfaddr;
	lwp->lwp_lastfault = 0;
	lwp->lwp_lastfaddr = NULL;
	sigtoproc(p, curthread, SIGPROF);
	mutex_exit(&p->p_lock);
	ASSERT(lwp->lwp_cursig == 0);
	if (issig(FORREAL)) {
		psig();
	}
	mutex_enter(&p->p_lock);
	lwp->lwp_siginfo.si_signo = 0;
	bzero(curthread->t_rprof, sizeof (*curthread->t_rprof));
	mutex_exit(&p->p_lock);
}
Пример #11
0
/*
 * Post-syscall processing.  Perform abnormal system call completion
 * actions such as /proc tracing, profiling, signals, preemption, etc.
 *
 * This routine is called only if t_post_sys, t_sig_check, or t_astflag is set.
 * Any condition requiring pre-syscall handling must set one of these.
 * If the condition is persistent, this routine will repost t_post_sys.
 */
void
post_syscall(long rval1, long rval2)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	proc_t *p = ttoproc(t);
	struct regs *rp = lwptoregs(lwp);
	uint_t	error;
	uint_t	code = t->t_sysnum;
	int	repost = 0;
	int	proc_stop = 0;		/* non-zero if stopping */
	int	sigprof = 0;		/* non-zero if sending SIGPROF */

	t->t_post_sys = 0;

	error = lwp->lwp_errno;

	/*
	 * Code can be zero if this is a new LWP returning after a forkall(),
	 * other than the one which matches the one in the parent which called
	 * forkall().  In these LWPs, skip most of post-syscall activity.
	 */
	if (code == 0)
		goto sig_check;
	/*
	 * If the trace flag is set, mark the lwp to take a single-step trap
	 * on return to user level (below). The x86 lcall interface and
	 * sysenter has already done this, and turned off the flag, but
	 * amd64 syscall interface has not.
	 */
	if (rp->r_ps & PS_T) {
		lwp->lwp_pcb.pcb_flags |= DEBUG_PENDING;
		rp->r_ps &= ~PS_T;
		aston(curthread);
	}
#ifdef C2_AUDIT
	if (audit_active) {	/* put out audit record for this syscall */
		rval_t	rval;

		/* XX64 -- truncation of 64-bit return values? */
		rval.r_val1 = (int)rval1;
		rval.r_val2 = (int)rval2;
		audit_finish(T_SYSCALL, code, error, &rval);
		repost = 1;
	}
#endif /* C2_AUDIT */

	if (curthread->t_pdmsg != NULL) {
		char *m = curthread->t_pdmsg;

		uprintf("%s", m);
		kmem_free(m, strlen(m) + 1);
		curthread->t_pdmsg = NULL;
	}

	/*
	 * If we're going to stop for /proc tracing, set the flag and
	 * save the arguments so that the return values don't smash them.
	 */
	if (PTOU(p)->u_systrap) {
		if (prismember(&PTOU(p)->u_exitmask, code)) {
			if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
				(void) save_syscall_args();
			proc_stop = 1;
		}
		repost = 1;
	}

	/*
	 * Similarly check to see if SIGPROF might be sent.
	 */
	if (curthread->t_rprof != NULL &&
	    curthread->t_rprof->rp_anystate != 0) {
		if (lwp_getdatamodel(lwp) == DATAMODEL_LP64)
			(void) save_syscall_args();
		sigprof = 1;
	}

	if (lwp->lwp_eosys == NORMALRETURN) {
		if (error == 0) {
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf(
				    "%d: r_val1=0x%lx, r_val2=0x%lx, id 0x%p\n",
				    p->p_pid, rval1, rval2, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			rp->r_ps &= ~PS_C;
			rp->r_r0 = rval1;
			rp->r_r1 = rval2;
		} else {
			int sig;
#ifdef SYSCALLTRACE
			if (syscalltrace) {
				mutex_enter(&systrace_lock);
				printf("%d: error=%d, id 0x%p\n",
				    p->p_pid, error, curthread);
				mutex_exit(&systrace_lock);
			}
#endif /* SYSCALLTRACE */
			if (error == EINTR && t->t_activefd.a_stale)
				error = EBADF;
			if (error == EINTR &&
			    (sig = lwp->lwp_cursig) != 0 &&
			    sigismember(&PTOU(p)->u_sigrestart, sig) &&
			    PTOU(p)->u_signal[sig - 1] != SIG_DFL &&
			    PTOU(p)->u_signal[sig - 1] != SIG_IGN)
				error = ERESTART;
			rp->r_r0 = error;
			rp->r_ps |= PS_C;
		}
	}

	/*
	 * From the proc(4) manual page:
	 * When exit from a system call is being traced, the traced process
	 * stops on completion of the system call just prior to checking for
	 * signals and returning to user level.  At this point all return
	 * values have been stored into the traced process's saved registers.
	 */
	if (proc_stop) {
		mutex_enter(&p->p_lock);
		if (PTOU(p)->u_systrap &&
		    prismember(&PTOU(p)->u_exitmask, code))
			stop(PR_SYSEXIT, code);
		mutex_exit(&p->p_lock);
	}

	/*
	 * If we are the parent returning from a successful
	 * vfork, wait for the child to exec or exit.
	 * This code must be here and not in the bowels of the system
	 * so that /proc can intercept exit from vfork in a timely way.
	 */
	if (code == SYS_vfork && rp->r_r1 == 0 && error == 0)
		vfwait((pid_t)rval1);

	/*
	 * If profiling is active, bill the current PC in user-land
	 * and keep reposting until profiling is disabled.
	 */
	if (p->p_prof.pr_scale) {
		if (lwp->lwp_oweupc)
			profil_tick(rp->r_pc);
		repost = 1;
	}

sig_check:
	/*
	 * Reset flag for next time.
	 * We must do this after stopping on PR_SYSEXIT
	 * because /proc uses the information in lwp_eosys.
	 */
	lwp->lwp_eosys = NORMALRETURN;
	clear_stale_fd();
	t->t_flag &= ~T_FORKALL;

	if (t->t_astflag | t->t_sig_check) {
		/*
		 * Turn off the AST flag before checking all the conditions that
		 * may have caused an AST.  This flag is on whenever a signal or
		 * unusual condition should be handled after the next trap or
		 * syscall.
		 */
		astoff(t);
		/*
		 * If a single-step trap occurred on a syscall (see trap())
		 * recognize it now.  Do this before checking for signals
		 * because deferred_singlestep_trap() may generate a SIGTRAP to
		 * the LWP or may otherwise mark the LWP to call issig(FORREAL).
		 */
		if (lwp->lwp_pcb.pcb_flags & DEBUG_PENDING)
			deferred_singlestep_trap((caddr_t)rp->r_pc);

		t->t_sig_check = 0;

		/*
		 * The following check is legal for the following reasons:
		 *	1) The thread we are checking, is ourselves, so there is
		 *	   no way the proc can go away.
		 *	2) The only time we need to be protected by the
		 *	   lock is if the binding is changed.
		 *
		 *	Note we will still take the lock and check the binding
		 *	if the condition was true without the lock held.  This
		 *	prevents lock contention among threads owned by the
		 * 	same proc.
		 */

		if (curthread->t_proc_flag & TP_CHANGEBIND) {
			mutex_enter(&p->p_lock);
			if (curthread->t_proc_flag & TP_CHANGEBIND) {
				timer_lwpbind();
				curthread->t_proc_flag &= ~TP_CHANGEBIND;
			}
			mutex_exit(&p->p_lock);
		}

		/*
		 * for kaio requests on the special kaio poll queue,
		 * copyout their results to user memory.
		 */
		if (p->p_aio)
			aio_cleanup(0);
		/*
		 * If this LWP was asked to hold, call holdlwp(), which will
		 * stop.  holdlwps() sets this up and calls pokelwps() which
		 * sets the AST flag.
		 *
		 * Also check TP_EXITLWP, since this is used by fresh new LWPs
		 * through lwp_rtt().  That flag is set if the lwp_create(2)
		 * syscall failed after creating the LWP.
		 */
		if (ISHOLD(p) || (t->t_proc_flag & TP_EXITLWP))
			holdlwp();

		/*
		 * All code that sets signals and makes ISSIG_PENDING
		 * evaluate true must set t_sig_check afterwards.
		 */
		if (ISSIG_PENDING(t, lwp, p)) {
			if (issig(FORREAL))
				psig();
			t->t_sig_check = 1;	/* recheck next time */
		}

		if (sigprof) {
			realsigprof(code, error);
			t->t_sig_check = 1;	/* recheck next time */
		}

		/*
		 * If a performance counter overflow interrupt was
		 * delivered *during* the syscall, then re-enable the
		 * AST so that we take a trip through trap() to cause
		 * the SIGEMT to be delivered.
		 */
		if (lwp->lwp_pcb.pcb_flags & CPC_OVERFLOW)
			aston(t);

		/*
		 * /proc can't enable/disable the trace bit itself
		 * because that could race with the call gate used by
		 * system calls via "lcall". If that happened, an
		 * invalid EFLAGS would result. prstep()/prnostep()
		 * therefore schedule an AST for the purpose.
		 */
		if (lwp->lwp_pcb.pcb_flags & REQUEST_STEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_STEP;
			rp->r_ps |= PS_T;
		}
		if (lwp->lwp_pcb.pcb_flags & REQUEST_NOSTEP) {
			lwp->lwp_pcb.pcb_flags &= ~REQUEST_NOSTEP;
			rp->r_ps &= ~PS_T;
		}
	}

	lwp->lwp_errno = 0;		/* clear error for next time */

#ifndef NPROBE
	/* Kernel probe */
	if (tnf_tracing_active) {
		TNF_PROBE_3(syscall_end, "syscall thread", /* CSTYLED */,
			tnf_long,	rval1,		rval1,
			tnf_long,	rval2,		rval2,
			tnf_long,	errno,		(long)error);
		repost = 1;
	}
#endif /* NPROBE */

	/*
	 * Set state to LWP_USER here so preempt won't give us a kernel
	 * priority if it occurs after this point.  Call CL_TRAPRET() to
	 * restore the user-level priority.
	 *
	 * It is important that no locks (other than spinlocks) be entered
	 * after this point before returning to user mode (unless lwp_state
	 * is set back to LWP_SYS).
	 *
	 * XXX Sampled times past this point are charged to the user.
	 */
	lwp->lwp_state = LWP_USER;

	if (t->t_trapret) {
		t->t_trapret = 0;
		thread_lock(t);
		CL_TRAPRET(t);
		thread_unlock(t);
	}
	if (CPU->cpu_runrun)
		preempt();

	lwp->lwp_errno = 0;		/* clear error for next time */

	/*
	 * The thread lock must be held in order to clear sysnum and reset
	 * lwp_ap atomically with respect to other threads in the system that
	 * may be looking at the args via lwp_ap from get_syscall_args().
	 */

	thread_lock(t);
	t->t_sysnum = 0;		/* no longer in a system call */

	if (lwp_getdatamodel(lwp) == DATAMODEL_NATIVE) {
#if defined(_LP64)
		/*
		 * In case the args were copied to the lwp, reset the
		 * pointer so the next syscall will have the right
		 * lwp_ap pointer.
		 */
		lwp->lwp_ap = (long *)&rp->r_rdi;
	} else {
#endif
		lwp->lwp_ap = NULL;	/* reset on every syscall entry */
	}
	thread_unlock(t);

	lwp->lwp_argsaved = 0;

	/*
	 * If there was a continuing reason for post-syscall processing,
	 * set the t_post_sys flag for the next system call.
	 */
	if (repost)
		t->t_post_sys = 1;

	/*
	 * If there is a ustack registered for this lwp, and the stack rlimit
	 * has been altered, read in the ustack. If the saved stack rlimit
	 * matches the bounds of the ustack, update the ustack to reflect
	 * the new rlimit. If the new stack rlimit is RLIM_INFINITY, disable
	 * stack checking by setting the size to 0.
	 */
	if (lwp->lwp_ustack != 0 && lwp->lwp_old_stk_ctl != 0) {
		rlim64_t new_size;
		caddr_t top;
		stack_t stk;
		struct rlimit64 rl;

		mutex_enter(&p->p_lock);
		new_size = p->p_stk_ctl;
		top = p->p_usrstack;
		(void) rctl_rlimit_get(rctlproc_legacy[RLIMIT_STACK], p, &rl);
		mutex_exit(&p->p_lock);

		if (rl.rlim_cur == RLIM64_INFINITY)
			new_size = 0;

		if (copyin((stack_t *)lwp->lwp_ustack, &stk,
		    sizeof (stack_t)) == 0 &&
		    (stk.ss_size == lwp->lwp_old_stk_ctl ||
			stk.ss_size == 0) &&
		    stk.ss_sp == top - stk.ss_size) {
			stk.ss_sp = (void *)((uintptr_t)stk.ss_sp +
			    stk.ss_size - (uintptr_t)new_size);
			stk.ss_size = new_size;

			(void) copyout(&stk, (stack_t *)lwp->lwp_ustack,
			    sizeof (stack_t));
		}

		lwp->lwp_old_stk_ctl = 0;
	}
}
Пример #12
0
static void b_whatis(char **av) {
	bool ess, eff, vee, pee, bee;
	bool f, found;
	int i, ac, c;
	List *s;
	Node *n;
	char *e;
	for (rc_optind = ac = 0; av[ac] != NULL; ac++)
		; /* count the arguments for getopt */
	ess = eff = vee = pee = bee = FALSE;
	while ((c = rc_getopt(ac, av, "sfvpb")) != -1)
		switch (c) {
		default: set(FALSE); return;
		case 's': ess = TRUE; break;
		case 'f': eff = TRUE; break;
		case 'v': vee = TRUE; break;
		case 'p': pee = TRUE; break;
		case 'b': bee = TRUE; break;
		}
	av += rc_optind;
	if (*av == NULL) {
		if (vee|eff)
			whatare_all_vars(eff, vee);
		if (ess)
			whatare_all_signals();
		if (bee)
			for (i = 0; i < arraysize(builtins); i++)
				fprint(1, "builtin %s\n", builtins[i].name);
		if (pee)
			fprint(2, "whatis -p: must specify argument\n");
		if (show(FALSE)) /* no options? */
			whatare_all_vars(TRUE, TRUE);
		set(TRUE);
		return;
	}
	found = TRUE;
	for (i = 0; av[i] != NULL; i++) {
		f = FALSE;
		errno = ENOENT;
		if (show(vee) && (s = varlookup(av[i])) != NULL) {
			f = TRUE;
			prettyprint_var(1, av[i], s);
		}
		if (((show(ess)&&issig(av[i])) || show(eff)) && (n = fnlookup(av[i])) != NULL) {
			f = TRUE;
			prettyprint_fn(1, av[i], n);
		} else if (show(bee) && isbuiltin(av[i]) != NULL) {
			f = TRUE;
			fprint(1, "builtin %s\n", av[i]);
		} else if (show(pee) && (e = which(av[i], FALSE)) != NULL) {
			f = TRUE;
			fprint(1, "%S\n", e);
		}
		if (!f) {
			found = FALSE;
			if (errno != ENOENT)
				uerror(av[i]);
			else
				fprint(2, "%s not found\n", av[i]);
		}
	}
	set(found);
}
Пример #13
0
/*
 * Returns:
 * 	Function result in order of presidence:
 *		 0 if a signal was received
 *		-1 if timeout occured
 *		>0 if awakened via cv_signal() or cv_broadcast().
 *		   (returns time remaining)
 *
 * cv_timedwait_sig() is now part of the DDI.
 */
clock_t
cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t tim)
{
	kthread_t *t = curthread;
	proc_t *p = ttoproc(t);
	klwp_t *lwp = ttolwp(t);
	timeout_id_t id;
	clock_t rval = 1;
	clock_t timeleft;
	int signalled = 0;

	if (panicstr)
		return (rval);

	/*
	 * If there is no lwp, then we don't need to wait for a signal.
	 * The check for t_intr is to catch an interrupt thread
	 * that has not yet unpinned the thread underneath.
	 */
	if (lwp == NULL || t->t_intr)
		return (cv_timedwait(cvp, mp, tim));

	/*
	 * If tim is less than or equal to lbolt, then the timeout
	 * has already occured.  So just check to see if there is a signal
	 * pending.  If so return 0 indicating that there is a signal pending.
	 * Else return -1 indicating that the timeout occured. No need to
	 * wait on anything.
	 */
	timeleft = tim - lbolt;
	if (timeleft <= 0) {
		lwp->lwp_asleep = 1;
		lwp->lwp_sysabort = 0;
		rval = -1;
		goto out;
	}

	/*
	 * Set the timeout and wait.
	 */
	id = realtime_timeout((void (*)(void *))setrun, t, timeleft);
	lwp->lwp_asleep = 1;
	lwp->lwp_sysabort = 0;
	thread_lock(t);
	cv_block_sig(t, (condvar_impl_t *)cvp);
	thread_unlock_nopreempt(t);
	mutex_exit(mp);
	if (ISSIG(t, JUSTLOOKING) || MUSTRETURN(p, t) || (tim - lbolt <= 0))
		setrun(t);
	/* ASSERT(no locks are held) */
	swtch();
	signalled = (t->t_schedflag & TS_SIGNALLED);
	t->t_flag &= ~T_WAKEABLE;
	mutex_enter(mp);

	/*
	 * Untimeout the thread.  untimeout() returns -1 if the timeout has
	 * occured or the time remaining.  If the time remaining is zero,
	 * the timeout has occured between when we were awoken and
	 * we called untimeout.  We will treat this as if the timeout
	 * has occured and set rval to -1.
	 */
	rval = untimeout(id);
	if (rval <= 0)
		rval = -1;

	/*
	 * Check to see if a signal is pending.  If so, regardless of whether
	 * or not we were awoken due to the signal, the signal is now pending
	 * and a return of 0 has the highest priority.
	 */
out:
	if (ISSIG_PENDING(t, lwp, p)) {
		mutex_exit(mp);
		if (issig(FORREAL))
			rval = 0;
		mutex_enter(mp);
	}
	if (lwp->lwp_sysabort || MUSTRETURN(p, t))
		rval = 0;
	lwp->lwp_asleep = 0;
	lwp->lwp_sysabort = 0;
	if (rval <= 0 && signalled)	/* avoid consuming the cv_signal() */
		cv_signal(cvp);
	return (rval);
}