Example #1
0
/*
 * Return value:
 *   1 - exitlwps() failed, call (or continue) lwp_exit()
 *   0 - restarting init.  Return through system call path
 */
int
proc_exit(int why, int what)
{
	kthread_t *t = curthread;
	klwp_t *lwp = ttolwp(t);
	proc_t *p = ttoproc(t);
	zone_t *z = p->p_zone;
	timeout_id_t tmp_id;
	int rv;
	proc_t *q;
	task_t *tk;
	vnode_t *exec_vp, *execdir_vp, *cdir, *rdir;
	sigqueue_t *sqp;
	lwpdir_t *lwpdir;
	uint_t lwpdir_sz;
	tidhash_t *tidhash;
	uint_t tidhash_sz;
	ret_tidhash_t *ret_tidhash;
	refstr_t *cwd;
	hrtime_t hrutime, hrstime;
	int evaporate;

	/*
	 * Stop and discard the process's lwps except for the current one,
	 * unless some other lwp beat us to it.  If exitlwps() fails then
	 * return and the calling lwp will call (or continue in) lwp_exit().
	 */
	proc_is_exiting(p);
	if (exitlwps(0) != 0)
		return (1);

	mutex_enter(&p->p_lock);
	if (p->p_ttime > 0) {
		/*
		 * Account any remaining ticks charged to this process
		 * on its way out.
		 */
		(void) task_cpu_time_incr(p->p_task, p->p_ttime);
		p->p_ttime = 0;
	}
	mutex_exit(&p->p_lock);

	DTRACE_PROC(lwp__exit);
	DTRACE_PROC1(exit, int, why);

	/*
	 * Will perform any brand specific proc exit processing, since this
	 * is always the last lwp, will also perform lwp_exit and free brand
	 * data
	 */
	if (PROC_IS_BRANDED(p)) {
		lwp_detach_brand_hdlrs(lwp);
		brand_clearbrand(p, B_FALSE);
	}

	/*
	 * Don't let init exit unless zone_start_init() failed its exec, or
	 * we are shutting down the zone or the machine.
	 *
	 * Since we are single threaded, we don't need to lock the
	 * following accesses to zone_proc_initpid.
	 */
	if (p->p_pid == z->zone_proc_initpid) {
		if (z->zone_boot_err == 0 &&
		    zone_status_get(z) < ZONE_IS_SHUTTING_DOWN &&
		    zone_status_get(global_zone) < ZONE_IS_SHUTTING_DOWN &&
		    z->zone_restart_init == B_TRUE &&
		    restart_init(what, why) == 0)
			return (0);
		/*
		 * Since we didn't or couldn't restart init, we clear
		 * the zone's init state and proceed with exit
		 * processing.
		 */
		z->zone_proc_initpid = -1;
	}

	lwp_pcb_exit();

	/*
	 * Allocate a sigqueue now, before we grab locks.
	 * It will be given to sigcld(), below.
	 * Special case:  If we will be making the process disappear
	 * without a trace because it is either:
	 *	* an exiting SSYS process, or
	 *	* a posix_spawn() vfork child who requests it,
	 * we don't bother to allocate a useless sigqueue.
	 */
	evaporate = (p->p_flag & SSYS) || ((p->p_flag & SVFORK) &&
	    why == CLD_EXITED && what == _EVAPORATE);
	if (!evaporate)
		sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);

	/*
	 * revoke any doors created by the process.
	 */
	if (p->p_door_list)
		door_exit();

	/*
	 * Release schedctl data structures.
	 */
	if (p->p_pagep)
		schedctl_proc_cleanup();

	/*
	 * make sure all pending kaio has completed.
	 */
	if (p->p_aio)
		aio_cleanup_exit();

	/*
	 * discard the lwpchan cache.
	 */
	if (p->p_lcp != NULL)
		lwpchan_destroy_cache(0);

	/*
	 * Clean up any DTrace helper actions or probes for the process.
	 */
	if (p->p_dtrace_helpers != NULL) {
		ASSERT(dtrace_helpers_cleanup != NULL);
		(*dtrace_helpers_cleanup)();
	}

	/* untimeout the realtime timers */
	if (p->p_itimer != NULL)
		timer_exit();

	if ((tmp_id = p->p_alarmid) != 0) {
		p->p_alarmid = 0;
		(void) untimeout(tmp_id);
	}

	/*
	 * Remove any fpollinfo_t's for this (last) thread from our file
	 * descriptors so closeall() can ASSERT() that they're all gone.
	 */
	pollcleanup();

	if (p->p_rprof_cyclic != CYCLIC_NONE) {
		mutex_enter(&cpu_lock);
		cyclic_remove(p->p_rprof_cyclic);
		mutex_exit(&cpu_lock);
	}

	mutex_enter(&p->p_lock);

	/*
	 * Clean up any DTrace probes associated with this process.
	 */
	if (p->p_dtrace_probes) {
		ASSERT(dtrace_fasttrap_exit_ptr != NULL);
		dtrace_fasttrap_exit_ptr(p);
	}

	while ((tmp_id = p->p_itimerid) != 0) {
		p->p_itimerid = 0;
		mutex_exit(&p->p_lock);
		(void) untimeout(tmp_id);
		mutex_enter(&p->p_lock);
	}

	lwp_cleanup();

	/*
	 * We are about to exit; prevent our resource associations from
	 * being changed.
	 */
	pool_barrier_enter();

	/*
	 * Block the process against /proc now that we have really
	 * acquired p->p_lock (to manipulate p_tlist at least).
	 */
	prbarrier(p);

	sigfillset(&p->p_ignore);
	sigemptyset(&p->p_siginfo);
	sigemptyset(&p->p_sig);
	sigemptyset(&p->p_extsig);
	sigemptyset(&t->t_sig);
	sigemptyset(&t->t_extsig);
	sigemptyset(&p->p_sigmask);
	sigdelq(p, t, 0);
	lwp->lwp_cursig = 0;
	lwp->lwp_extsig = 0;
	p->p_flag &= ~(SKILLED | SEXTKILLED);
	if (lwp->lwp_curinfo) {
		siginfofree(lwp->lwp_curinfo);
		lwp->lwp_curinfo = NULL;
	}

	t->t_proc_flag |= TP_LWPEXIT;
	ASSERT(p->p_lwpcnt == 1 && p->p_zombcnt == 0);
	prlwpexit(t);		/* notify /proc */
	lwp_hash_out(p, t->t_tid);
	prexit(p);

	p->p_lwpcnt = 0;
	p->p_tlist = NULL;
	sigqfree(p);
	term_mstate(t);
	p->p_mterm = gethrtime();

	exec_vp = p->p_exec;
	execdir_vp = p->p_execdir;
	p->p_exec = NULLVP;
	p->p_execdir = NULLVP;
	mutex_exit(&p->p_lock);

	pr_free_watched_pages(p);

	closeall(P_FINFO(p));

	/* Free the controlling tty.  (freectty() always assumes curproc.) */
	ASSERT(p == curproc);
	(void) freectty(B_TRUE);

#if defined(__sparc)
	if (p->p_utraps != NULL)
		utrap_free(p);
#endif
	if (p->p_semacct)			/* IPC semaphore exit */
		semexit(p);
	rv = wstat(why, what);

	acct(rv & 0xff);
	exacct_commit_proc(p, rv);

	/*
	 * Release any resources associated with C2 auditing
	 */
	if (AU_AUDITING()) {
		/*
		 * audit exit system call
		 */
		audit_exit(why, what);
	}

	/*
	 * Free address space.
	 */
	relvm();

	if (exec_vp) {
		/*
		 * Close this executable which has been opened when the process
		 * was created by getproc().
		 */
		(void) VOP_CLOSE(exec_vp, FREAD, 1, (offset_t)0, CRED(), NULL);
		VN_RELE(exec_vp);
	}
	if (execdir_vp)
		VN_RELE(execdir_vp);

	/*
	 * Release held contracts.
	 */
	contract_exit(p);

	/*
	 * Depart our encapsulating process contract.
	 */
	if ((p->p_flag & SSYS) == 0) {
		ASSERT(p->p_ct_process);
		contract_process_exit(p->p_ct_process, p, rv);
	}

	/*
	 * Remove pool association, and block if requested by pool_do_bind.
	 */
	mutex_enter(&p->p_lock);
	ASSERT(p->p_pool->pool_ref > 0);
	atomic_add_32(&p->p_pool->pool_ref, -1);
	p->p_pool = pool_default;
	/*
	 * Now that our address space has been freed and all other threads
	 * in this process have exited, set the PEXITED pool flag.  This
	 * tells the pools subsystems to ignore this process if it was
	 * requested to rebind this process to a new pool.
	 */
	p->p_poolflag |= PEXITED;
	pool_barrier_exit();
	mutex_exit(&p->p_lock);

	mutex_enter(&pidlock);

	/*
	 * Delete this process from the newstate list of its parent. We
	 * will put it in the right place in the sigcld in the end.
	 */
	delete_ns(p->p_parent, p);

	/*
	 * Reassign the orphans to the next of kin.
	 * Don't rearrange init's orphanage.
	 */
	if ((q = p->p_orphan) != NULL && p != proc_init) {

		proc_t *nokp = p->p_nextofkin;

		for (;;) {
			q->p_nextofkin = nokp;
			if (q->p_nextorph == NULL)
				break;
			q = q->p_nextorph;
		}
		q->p_nextorph = nokp->p_orphan;
		nokp->p_orphan = p->p_orphan;
		p->p_orphan = NULL;
	}

	/*
	 * Reassign the children to init.
	 * Don't try to assign init's children to init.
	 */
	if ((q = p->p_child) != NULL && p != proc_init) {
		struct proc	*np;
		struct proc	*initp = proc_init;
		boolean_t	setzonetop = B_FALSE;

		if (!INGLOBALZONE(curproc))
			setzonetop = B_TRUE;

		pgdetach(p);

		do {
			np = q->p_sibling;
			/*
			 * Delete it from its current parent new state
			 * list and add it to init new state list
			 */
			delete_ns(q->p_parent, q);

			q->p_ppid = 1;
			q->p_pidflag &= ~(CLDNOSIGCHLD | CLDWAITPID);
			if (setzonetop) {
				mutex_enter(&q->p_lock);
				q->p_flag |= SZONETOP;
				mutex_exit(&q->p_lock);
			}
			q->p_parent = initp;

			/*
			 * Since q will be the first child,
			 * it will not have a previous sibling.
			 */
			q->p_psibling = NULL;
			if (initp->p_child) {
				initp->p_child->p_psibling = q;
			}
			q->p_sibling = initp->p_child;
			initp->p_child = q;
			if (q->p_proc_flag & P_PR_PTRACE) {
				mutex_enter(&q->p_lock);
				sigtoproc(q, NULL, SIGKILL);
				mutex_exit(&q->p_lock);
			}
			/*
			 * sigcld() will add the child to parents
			 * newstate list.
			 */
			if (q->p_stat == SZOMB)
				sigcld(q, NULL);
		} while ((q = np) != NULL);

		p->p_child = NULL;
		ASSERT(p->p_child_ns == NULL);
	}

	TRACE_1(TR_FAC_PROC, TR_PROC_EXIT, "proc_exit: %p", p);

	mutex_enter(&p->p_lock);
	CL_EXIT(curthread); /* tell the scheduler that curthread is exiting */

	/*
	 * Have our task accummulate our resource usage data before they
	 * become contaminated by p_cacct etc., and before we renounce
	 * membership of the task.
	 *
	 * We do this regardless of whether or not task accounting is active.
	 * This is to avoid having nonsense data reported for this task if
	 * task accounting is subsequently enabled. The overhead is minimal;
	 * by this point, this process has accounted for the usage of all its
	 * LWPs. We nonetheless do the work here, and under the protection of
	 * pidlock, so that the movement of the process's usage to the task
	 * happens at the same time as the removal of the process from the
	 * task, from the point of view of exacct_snapshot_task_usage().
	 */
	exacct_update_task_mstate(p);

	hrutime = mstate_aggr_state(p, LMS_USER);
	hrstime = mstate_aggr_state(p, LMS_SYSTEM);
	p->p_utime = (clock_t)NSEC_TO_TICK(hrutime) + p->p_cutime;
	p->p_stime = (clock_t)NSEC_TO_TICK(hrstime) + p->p_cstime;

	p->p_acct[LMS_USER]	+= p->p_cacct[LMS_USER];
	p->p_acct[LMS_SYSTEM]	+= p->p_cacct[LMS_SYSTEM];
	p->p_acct[LMS_TRAP]	+= p->p_cacct[LMS_TRAP];
	p->p_acct[LMS_TFAULT]	+= p->p_cacct[LMS_TFAULT];
	p->p_acct[LMS_DFAULT]	+= p->p_cacct[LMS_DFAULT];
	p->p_acct[LMS_KFAULT]	+= p->p_cacct[LMS_KFAULT];
	p->p_acct[LMS_USER_LOCK] += p->p_cacct[LMS_USER_LOCK];
	p->p_acct[LMS_SLEEP]	+= p->p_cacct[LMS_SLEEP];
	p->p_acct[LMS_WAIT_CPU]	+= p->p_cacct[LMS_WAIT_CPU];
	p->p_acct[LMS_STOPPED]	+= p->p_cacct[LMS_STOPPED];

	p->p_ru.minflt	+= p->p_cru.minflt;
	p->p_ru.majflt	+= p->p_cru.majflt;
	p->p_ru.nswap	+= p->p_cru.nswap;
	p->p_ru.inblock	+= p->p_cru.inblock;
	p->p_ru.oublock	+= p->p_cru.oublock;
	p->p_ru.msgsnd	+= p->p_cru.msgsnd;
	p->p_ru.msgrcv	+= p->p_cru.msgrcv;
	p->p_ru.nsignals += p->p_cru.nsignals;
	p->p_ru.nvcsw	+= p->p_cru.nvcsw;
	p->p_ru.nivcsw	+= p->p_cru.nivcsw;
	p->p_ru.sysc	+= p->p_cru.sysc;
	p->p_ru.ioch	+= p->p_cru.ioch;

	p->p_stat = SZOMB;
	p->p_proc_flag &= ~P_PR_PTRACE;
	p->p_wdata = what;
	p->p_wcode = (char)why;

	cdir = PTOU(p)->u_cdir;
	rdir = PTOU(p)->u_rdir;
	cwd = PTOU(p)->u_cwd;

	ASSERT(cdir != NULL || p->p_parent == &p0);

	/*
	 * Release resource controls, as they are no longer enforceable.
	 */
	rctl_set_free(p->p_rctls);

	/*
	 * Decrement tk_nlwps counter for our task.max-lwps resource control.
	 * An extended accounting record, if that facility is active, is
	 * scheduled to be written.  We cannot give up task and project
	 * membership at this point because that would allow zombies to escape
	 * from the max-processes resource controls.  Zombies stay in their
	 * current task and project until the process table slot is released
	 * in freeproc().
	 */
	tk = p->p_task;

	mutex_enter(&p->p_zone->zone_nlwps_lock);
	tk->tk_nlwps--;
	tk->tk_proj->kpj_nlwps--;
	p->p_zone->zone_nlwps--;
	mutex_exit(&p->p_zone->zone_nlwps_lock);

	/*
	 * Clear the lwp directory and the lwpid hash table
	 * now that /proc can't bother us any more.
	 * We free the memory below, after dropping p->p_lock.
	 */
	lwpdir = p->p_lwpdir;
	lwpdir_sz = p->p_lwpdir_sz;
	tidhash = p->p_tidhash;
	tidhash_sz = p->p_tidhash_sz;
	ret_tidhash = p->p_ret_tidhash;
	p->p_lwpdir = NULL;
	p->p_lwpfree = NULL;
	p->p_lwpdir_sz = 0;
	p->p_tidhash = NULL;
	p->p_tidhash_sz = 0;
	p->p_ret_tidhash = NULL;

	/*
	 * If the process has context ops installed, call the exit routine
	 * on behalf of this last remaining thread. Normally exitpctx() is
	 * called during thread_exit() or lwp_exit(), but because this is the
	 * last thread in the process, we must call it here. By the time
	 * thread_exit() is called (below), the association with the relevant
	 * process has been lost.
	 *
	 * We also free the context here.
	 */
	if (p->p_pctx) {
		kpreempt_disable();
		exitpctx(p);
		kpreempt_enable();

		freepctx(p, 0);
	}

	/*
	 * curthread's proc pointer is changed to point to the 'sched'
	 * process for the corresponding zone, except in the case when
	 * the exiting process is in fact a zsched instance, in which
	 * case the proc pointer is set to p0.  We do so, so that the
	 * process still points at the right zone when we call the VN_RELE()
	 * below.
	 *
	 * This is because curthread's original proc pointer can be freed as
	 * soon as the child sends a SIGCLD to its parent.  We use zsched so
	 * that for user processes, even in the final moments of death, the
	 * process is still associated with its zone.
	 */
	if (p != t->t_procp->p_zone->zone_zsched)
		t->t_procp = t->t_procp->p_zone->zone_zsched;
	else
		t->t_procp = &p0;

	mutex_exit(&p->p_lock);
	if (!evaporate) {
		p->p_pidflag &= ~CLDPEND;
		sigcld(p, sqp);
	} else {
		/*
		 * Do what sigcld() would do if the disposition
		 * of the SIGCHLD signal were set to be ignored.
		 */
		cv_broadcast(&p->p_srwchan_cv);
		freeproc(p);
	}
	mutex_exit(&pidlock);

	/*
	 * We don't release u_cdir and u_rdir until SZOMB is set.
	 * This protects us against dofusers().
	 */
	if (cdir)
		VN_RELE(cdir);
	if (rdir)
		VN_RELE(rdir);
	if (cwd)
		refstr_rele(cwd);

	/*
	 * task_rele() may ultimately cause the zone to go away (or
	 * may cause the last user process in a zone to go away, which
	 * signals zsched to go away).  So prior to this call, we must
	 * no longer point at zsched.
	 */
	t->t_procp = &p0;

	kmem_free(lwpdir, lwpdir_sz * sizeof (lwpdir_t));
	kmem_free(tidhash, tidhash_sz * sizeof (tidhash_t));
	while (ret_tidhash != NULL) {
		ret_tidhash_t *next = ret_tidhash->rth_next;
		kmem_free(ret_tidhash->rth_tidhash,
		    ret_tidhash->rth_tidhash_sz * sizeof (tidhash_t));
		kmem_free(ret_tidhash, sizeof (*ret_tidhash));
		ret_tidhash = next;
	}

	thread_exit();
	/* NOTREACHED */
}
Example #2
0
/*
 * System call to create an lwp.
 *
 * Notes on the LWP_DETACHED and LWP_DAEMON flags:
 *
 * A detached lwp (LWP_DETACHED) cannot be the specific target of
 * lwp_wait() (it is not joinable), but lwp_wait(0, ...) is required
 * to sleep until all non-daemon detached lwps have terminated before
 * returning EDEADLK because a detached lwp might create a non-detached lwp
 * that could then be returned by lwp_wait(0, ...).  See also lwp_detach().
 *
 * A daemon lwp (LWP_DAEMON) is a detached lwp that has the additional
 * property that it does not affect the termination condition of the
 * process:  The last non-daemon lwp to call lwp_exit() causes the process
 * to exit and lwp_wait(0, ...) does not sleep waiting for daemon lwps
 * to terminate.  See the block comment before lwp_wait().
 */
int
syslwp_create(ucontext_t *ucp, int flags, id_t *new_lwp)
{
	klwp_t *lwp;
	proc_t *p = ttoproc(curthread);
	kthread_t *t;
	ucontext_t uc;
#ifdef _SYSCALL32_IMPL
	ucontext32_t uc32;
#endif /* _SYSCALL32_IMPL */
	k_sigset_t sigmask;
	int	tid;
	model_t model = get_udatamodel();
	uintptr_t thrptr = 0;

	if (flags & ~(LWP_DAEMON|LWP_DETACHED|LWP_SUSPENDED))
		return (set_errno(EINVAL));

	/*
	 * lwp_create() is disallowed for the /proc agent lwp.
	 */
	if (curthread == p->p_agenttp)
		return (set_errno(ENOTSUP));

	if (model == DATAMODEL_NATIVE) {
		if (copyin(ucp, &uc, sizeof (ucontext_t)))
			return (set_errno(EFAULT));
		sigutok(&uc.uc_sigmask, &sigmask);
#if defined(__i386)
		/*
		 * libc stashed thrptr into unused kernel %sp.
		 * See setup_context() in libc.
		 */
		thrptr = (uint32_t)uc.uc_mcontext.gregs[ESP];
#endif
	}
#ifdef _SYSCALL32_IMPL
	else {
		if (copyin(ucp, &uc32, sizeof (ucontext32_t)))
			return (set_errno(EFAULT));
		sigutok(&uc32.uc_sigmask, &sigmask);
#if defined(__sparc)
		ucontext_32ton(&uc32, &uc, NULL, NULL);
#else	/* __amd64 */
		ucontext_32ton(&uc32, &uc);
		/*
		 * libc stashed thrptr into unused kernel %sp.
		 * See setup_context() in libc.
		 */
		thrptr = (uint32_t)uc32.uc_mcontext.gregs[ESP];
#endif
	}
#endif /* _SYSCALL32_IMPL */

	(void) save_syscall_args();	/* save args for tracing first */

	mutex_enter(&curproc->p_lock);
	pool_barrier_enter();
	mutex_exit(&curproc->p_lock);
	lwp = lwp_create(lwp_rtt, NULL, NULL, curproc, TS_STOPPED,
		curthread->t_pri, &sigmask, curthread->t_cid, 0);
	mutex_enter(&curproc->p_lock);
	pool_barrier_exit();
	mutex_exit(&curproc->p_lock);
	if (lwp == NULL)
		return (set_errno(EAGAIN));

	lwp_load(lwp, uc.uc_mcontext.gregs, thrptr);

	t = lwptot(lwp);
	/*
	 * Copy the new lwp's lwpid into the caller's specified buffer.
	 */
	if (new_lwp && copyout(&t->t_tid, new_lwp, sizeof (id_t))) {
		/*
		 * caller's buffer is not writable, return
		 * EFAULT, and terminate new lwp.
		 */
		mutex_enter(&p->p_lock);
		t->t_proc_flag |= TP_EXITLWP;
		t->t_sig_check = 1;
		t->t_sysnum = 0;
		t->t_proc_flag &= ~TP_HOLDLWP;
		lwp_create_done(t);
		mutex_exit(&p->p_lock);
		return (set_errno(EFAULT));
	}

	/*
	 * clone callers context, if any.  must be invoked
	 * while -not- holding p_lock.
	 */
	if (curthread->t_ctx)
		lwp_createctx(curthread, t);

	/*
	 * copy current contract templates
	 */
	lwp_ctmpl_copy(lwp, ttolwp(curthread));

	mutex_enter(&p->p_lock);
	/*
	 * Copy the syscall arguments to the new lwp's arg area
	 * for the benefit of debuggers.
	 */
	t->t_sysnum = SYS_lwp_create;
	lwp->lwp_ap = lwp->lwp_arg;
	lwp->lwp_arg[0] = (long)ucp;
	lwp->lwp_arg[1] = (long)flags;
	lwp->lwp_arg[2] = (long)new_lwp;
	lwp->lwp_argsaved = 1;

	if (!(flags & (LWP_DETACHED|LWP_DAEMON)))
		t->t_proc_flag |= TP_TWAIT;
	if (flags & LWP_DAEMON) {
		t->t_proc_flag |= TP_DAEMON;
		p->p_lwpdaemon++;
	}

	tid = (int)t->t_tid;	/* for /proc debuggers */

	/*
	 * We now set the newly-created lwp running.
	 * If it is being created as LWP_SUSPENDED, we leave its
	 * TP_HOLDLWP flag set so it will stop in system call exit.
	 */
	if (!(flags & LWP_SUSPENDED))
		t->t_proc_flag &= ~TP_HOLDLWP;
	lwp_create_done(t);
	mutex_exit(&p->p_lock);

	return (tid);
}
Example #3
0
/* ARGSUSED */
static int64_t
cfork(int isvfork, int isfork1, int flags)
{
	proc_t *p = ttoproc(curthread);
	struct as *as;
	proc_t *cp, **orphpp;
	klwp_t *clone;
	kthread_t *t;
	task_t *tk;
	rval_t	r;
	int error;
	int i;
	rctl_set_t *dup_set;
	rctl_alloc_gp_t *dup_gp;
	rctl_entity_p_t e;
	lwpdir_t *ldp;
	lwpent_t *lep;
	lwpent_t *clep;

	/*
	 * Allow only these two flags.
	 */
	if ((flags & ~(FORK_NOSIGCHLD | FORK_WAITPID)) != 0) {
		error = EINVAL;
		goto forkerr;
	}

	/*
	 * fork is not supported for the /proc agent lwp.
	 */
	if (curthread == p->p_agenttp) {
		error = ENOTSUP;
		goto forkerr;
	}

	if ((error = secpolicy_basic_fork(CRED())) != 0)
		goto forkerr;

	/*
	 * If the calling lwp is doing a fork1() then the
	 * other lwps in this process are not duplicated and
	 * don't need to be held where their kernel stacks can be
	 * cloned.  If doing forkall(), the process is held with
	 * SHOLDFORK, so that the lwps are at a point where their
	 * stacks can be copied which is on entry or exit from
	 * the kernel.
	 */
	if (!holdlwps(isfork1 ? SHOLDFORK1 : SHOLDFORK)) {
		aston(curthread);
		error = EINTR;
		goto forkerr;
	}

#if defined(__sparc)
	/*
	 * Ensure that the user stack is fully constructed
	 * before creating the child process structure.
	 */
	(void) flush_user_windows_to_stack(NULL);
#endif

	mutex_enter(&p->p_lock);
	/*
	 * If this is vfork(), cancel any suspend request we might
	 * have gotten from some other thread via lwp_suspend().
	 * Otherwise we could end up with a deadlock on return
	 * from the vfork() in both the parent and the child.
	 */
	if (isvfork)
		curthread->t_proc_flag &= ~TP_HOLDLWP;
	/*
	 * Prevent our resource set associations from being changed during fork.
	 */
	pool_barrier_enter();
	mutex_exit(&p->p_lock);

	/*
	 * Create a child proc struct. Place a VN_HOLD on appropriate vnodes.
	 */
	if (getproc(&cp, 0) < 0) {
		mutex_enter(&p->p_lock);
		pool_barrier_exit();
		continuelwps(p);
		mutex_exit(&p->p_lock);
		error = EAGAIN;
		goto forkerr;
	}

	TRACE_2(TR_FAC_PROC, TR_PROC_FORK, "proc_fork:cp %p p %p", cp, p);

	/*
	 * Assign an address space to child
	 */
	if (isvfork) {
		/*
		 * Clear any watched areas and remember the
		 * watched pages for restoring in vfwait().
		 */
		as = p->p_as;
		if (avl_numnodes(&as->a_wpage) != 0) {
			AS_LOCK_ENTER(as, &as->a_lock, RW_WRITER);
			as_clearwatch(as);
			p->p_wpage = as->a_wpage;
			avl_create(&as->a_wpage, wp_compare,
			    sizeof (struct watched_page),
			    offsetof(struct watched_page, wp_link));
			AS_LOCK_EXIT(as, &as->a_lock);
		}
		cp->p_as = as;
		cp->p_flag |= SVFORK;
	} else {