static void
bcmmdio_ccb_attach(device_t parent, device_t self, void *aux)
{
	struct bcmmdio_softc * const sc = device_private(self);
	struct bcmccb_attach_args * const ccbaa = aux;
	const struct bcm_locators * const loc = &ccbaa->ccbaa_loc;

	sc->sc_dev = self;

	sc->sc_bst = ccbaa->ccbaa_ccb_bst;
	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_VM);

	bus_space_subregion(sc->sc_bst, ccbaa->ccbaa_ccb_bsh,
	    loc->loc_offset, loc->loc_size, &sc->sc_bsh);

	uint32_t miimgt = bcmmdio_read_4(sc, MIIMGT);
	uint32_t div = __SHIFTOUT(miimgt, MIIMGT_MDCDIV);
	if (div == 0) {
		div = 33;			// divisor to get 2MHz
		miimgt &= ~MIIMGT_MDCDIV;
		miimgt |= __SHIFTIN(div, MIIMGT_MDCDIV);
		bcmmdio_write_4(sc, MIIMGT, miimgt);
	}
	uint32_t freq = 66000000 / div;

	aprint_naive("\n");
	aprint_normal(": MDIO bus @ %u MHz\n", freq / 1000000);
}
示例#2
0
void
rump_scheduler_init(int numcpu)
{
    struct rumpcpu *rcpu;
    struct cpu_info *ci;
    int i;

    rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
    rumpuser_cv_init(&lwp0cv);
    for (i = 0; i < numcpu; i++) {
        if (i == 0) {
            ci = &rump_bootcpu;
        } else {
            ci = kmem_zalloc(sizeof(*ci), KM_SLEEP);
            ci->ci_index = i;
        }

        rcpu = &rcpu_storage[i];
        rcpu->rcpu_ci = ci;
        rcpu->rcpu_wanted = 0;
        rumpuser_cv_init(&rcpu->rcpu_cv);
        rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);

        ci->ci_schedstate.spc_mutex =
            mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
        ci->ci_schedstate.spc_flags = SPCF_RUNNING;
    }

    mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
示例#3
0
/*
 * sleeptab_init:
 *
 *	Initialize a sleep table.
 */
void
sleeptab_init(sleeptab_t *st)
{
	sleepq_t *sq;
	int i;

	for (i = 0; i < SLEEPTAB_HASH_SIZE; i++) {
		sq = &st->st_queues[i].st_queue;
		st->st_queues[i].st_mutex =
		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
		sleepq_init(sq);
	}
}
/*
 * callout_startup:
 *
 *	Initialize the callout facility, called at system startup time.
 *	Do just enough to allow callouts to be safely registered.
 */
void
callout_startup(void)
{
	struct callout_cpu *cc;
	int b;

	KASSERT(curcpu()->ci_data.cpu_callout == NULL);

	cc = &callout_cpu0;
	cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
	CIRCQ_INIT(&cc->cc_todo);
	for (b = 0; b < BUCKETS; b++)
		CIRCQ_INIT(&cc->cc_wheel[b]);
	curcpu()->ci_data.cpu_callout = cc;
}
/*
 * callout_init_cpu:
 *
 *	Per-CPU initialization.
 */
void
callout_init_cpu(struct cpu_info *ci)
{
	struct callout_cpu *cc;
	int b;

	CTASSERT(sizeof(callout_impl_t) <= sizeof(callout_t));

	if ((cc = ci->ci_data.cpu_callout) == NULL) {
		cc = kmem_zalloc(sizeof(*cc), KM_SLEEP);
		if (cc == NULL)
			panic("callout_init_cpu (1)");
		cc->cc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
		CIRCQ_INIT(&cc->cc_todo);
		for (b = 0; b < BUCKETS; b++)
			CIRCQ_INIT(&cc->cc_wheel[b]);
	} else {
		/* Boot CPU, one time only. */
		callout_sih = softint_establish(SOFTINT_CLOCK | SOFTINT_MPSAFE,
		    callout_softclock, NULL);
		if (callout_sih == NULL)
			panic("callout_init_cpu (2)");
	}

	sleepq_init(&cc->cc_sleepq);

	snprintf(cc->cc_name1, sizeof(cc->cc_name1), "late/%u",
	    cpu_index(ci));
	evcnt_attach_dynamic(&cc->cc_ev_late, EVCNT_TYPE_MISC,
	    NULL, "callout", cc->cc_name1);

	snprintf(cc->cc_name2, sizeof(cc->cc_name2), "wait/%u",
	    cpu_index(ci));
	evcnt_attach_dynamic(&cc->cc_ev_block, EVCNT_TYPE_MISC,
	    NULL, "callout", cc->cc_name2);

	ci->ci_data.cpu_callout = cc;
}
示例#6
0
void
rump_scheduler_init(int numcpu)
{
	struct rumpcpu *rcpu;
	struct cpu_info *ci;
	int i;

	rumpuser_mutex_init(&lwp0mtx, RUMPUSER_MTX_SPIN);
	rumpuser_cv_init(&lwp0cv);
	for (i = 0; i < numcpu; i++) {
		rcpu = &rcpu_storage[i];
		ci = &rump_cpus[i];
		rcpu->rcpu_ci = ci;
		ci->ci_schedstate.spc_mutex =
		    mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
		ci->ci_schedstate.spc_flags = SPCF_RUNNING;
		rcpu->rcpu_wanted = 0;
		rumpuser_cv_init(&rcpu->rcpu_cv);
		rumpuser_mutex_init(&rcpu->rcpu_mtx, RUMPUSER_MTX_SPIN);
	}

	mutex_init(&unruntime_lock, MUTEX_DEFAULT, IPL_SCHED);
}
void
sscom_attach_subr(struct sscom_softc *sc)
{
	int unit = sc->sc_unit;
	bus_space_tag_t iot = sc->sc_iot;
	bus_space_handle_t ioh = sc->sc_ioh;
	struct tty *tp;

	callout_init(&sc->sc_diag_callout, 0);
#if (defined(MULTIPROCESSOR) || defined(LOCKDEBUG)) && defined(SSCOM_MPLOCK)
	sc->sc_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SERIAL);
#endif

	sc->sc_ucon = UCON_RXINT_ENABLE|UCON_TXINT_ENABLE;

	/*
	 * set default for modem control hook
	 */
	if (sc->sc_set_modem_control == NULL)
		sc->sc_set_modem_control = sscom_set_modem_control;
	if (sc->sc_read_modem_status == NULL)
		sc->sc_read_modem_status = sscom_read_modem_status;

	/* Disable interrupts before configuring the device. */
	KASSERT(sc->sc_change_txrx_interrupts != NULL);
	KASSERT(sc->sc_clear_interrupts != NULL);
	sscom_disable_txrxint(sc);

#ifdef KGDB
	/*
	 * Allow kgdb to "take over" this port.  If this is
	 * the kgdb device, it has exclusive use.
	 */
	if (unit == sscom_kgdb_unit) {
		SET(sc->sc_hwflags, SSCOM_HW_KGDB);
		sc->sc_ucon = UCON_DEBUGPORT;
	}
#endif

	if (unit == sscomconsunit) {
		uint32_t stat;
		int timo;

		sscomconsattached = 1;
		sscomconstag = iot;
		sscomconsioh = ioh;

		/* wait for this transmission to complete */
		timo = 1500000;
		do {
			stat = bus_space_read_4(iot, ioh, SSCOM_UTRSTAT);
		} while ((stat & UTRSTAT_TXEMPTY) == 0 && --timo > 0);

		/* Make sure the console is always "hardwired". */
		SET(sc->sc_hwflags, SSCOM_HW_CONSOLE);
		SET(sc->sc_swflags, TIOCFLAG_SOFTCAR);

		sc->sc_ucon = UCON_DEBUGPORT;
	}

	/* set RX/TX trigger to half values */
	bus_space_write_4(iot, ioh, SSCOM_UFCON,
	    __SHIFTIN(4, UFCON_TXTRIGGER) |
	    __SHIFTIN(4, UFCON_RXTRIGGER) |
	     UFCON_FIFO_ENABLE | 
	     UFCON_TXFIFO_RESET|
	     UFCON_RXFIFO_RESET);
	/* tx/rx fifo reset are auto-cleared */

	bus_space_write_4(iot, ioh, SSCOM_UCON, sc->sc_ucon);

#ifdef KGDB
	if (ISSET(sc->sc_hwflags, SSCOM_HW_KGDB)) {
		sscom_kgdb_attached = 1;
		printf("%s: kgdb\n", device_xname(sc->sc_dev));
		sscom_enable_debugport(sc);
		return;
	}
#endif

	tp = tty_alloc();
	tp->t_oproc = sscomstart;
	tp->t_param = sscomparam;
	tp->t_hwiflow = sscomhwiflow;

	sc->sc_tty = tp;
	sc->sc_rbuf = malloc(sscom_rbuf_size << 1, M_DEVBUF, M_NOWAIT);
	sc->sc_rbput = sc->sc_rbget = sc->sc_rbuf;
	sc->sc_rbavail = sscom_rbuf_size;
	if (sc->sc_rbuf == NULL) {
		printf("%s: unable to allocate ring buffer\n",
		    device_xname(sc->sc_dev));
		return;
	}
	sc->sc_ebuf = sc->sc_rbuf + (sscom_rbuf_size << 1);

	tty_attach(tp);

	if (ISSET(sc->sc_hwflags, SSCOM_HW_CONSOLE)) {
		int maj;

		/* locate the major number */
		maj = cdevsw_lookup_major(&sscom_cdevsw);

		cn_tab->cn_dev = makedev(maj, device_unit(sc->sc_dev));

		printf("%s: console (major=%d)\n", device_xname(sc->sc_dev), maj);
	}


	sc->sc_si = softint_establish(SOFTINT_SERIAL, sscomsoft, sc);

#ifdef RND_COM
	rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev),
			  RND_TYPE_TTY, RND_FLAG_DEFAULT);
#endif

	/* if there are no enable/disable functions, assume the device
	   is always enabled */

	if (ISSET(sc->sc_hwflags, SSCOM_HW_CONSOLE))
		sscom_enable_debugport(sc);
	else 
		sscom_disable_txrxint(sc);

	SET(sc->sc_hwflags, SSCOM_HW_DEV_OK);
}
示例#8
0
static void
bt_init(void)
{

	bt_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
}
示例#9
0
/*
 * General fork call.  Note that another LWP in the process may call exec()
 * or exit() while we are forking.  It's safe to continue here, because
 * neither operation will complete until all LWPs have exited the process.
 */
int
fork1(struct lwp *l1, int flags, int exitsig, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc	*p1, *p2, *parent;
	struct plimit   *p1_lim;
	uid_t		uid;
	struct lwp	*l2;
	int		count;
	vaddr_t		uaddr;
	int		tnprocs;
	int		tracefork;
	int		error = 0;

	p1 = l1->l_proc;
	uid = kauth_cred_getuid(l1->l_cred);
	tnprocs = atomic_inc_uint_nv(&nprocs);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create.
	 */
	if (__predict_false(tnprocs >= maxproc))
		error = -1;
	else
		error = kauth_authorize_process(l1->l_cred,
		    KAUTH_PROCESS_FORK, p1, KAUTH_ARG(tnprocs), NULL, NULL);

	if (error) {
		static struct timeval lasttfm;
		atomic_dec_uint(&nprocs);
		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc", "increase kern.maxproc or NPROC");
		if (forkfsleep)
			kpause("forkmx", false, forkfsleep, NULL);
		return EAGAIN;
	}

	/*
	 * Enforce limits.
	 */
	count = chgproccnt(uid, 1);
	if (__predict_false(count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur)) {
		if (kauth_authorize_process(l1->l_cred, KAUTH_PROCESS_RLIMIT,
		    p1, KAUTH_ARG(KAUTH_REQ_PROCESS_RLIMIT_BYPASS),
		    &p1->p_rlimit[RLIMIT_NPROC], KAUTH_ARG(RLIMIT_NPROC)) != 0) {
			(void)chgproccnt(uid, -1);
			atomic_dec_uint(&nprocs);
			if (forkfsleep)
				kpause("forkulim", false, forkfsleep, NULL);
			return EAGAIN;
		}
	}

	/*
	 * Allocate virtual address space for the U-area now, while it
	 * is still easy to abort the fork operation if we're out of
	 * kernel virtual address space.
	 */
	uaddr = uvm_uarea_alloc();
	if (__predict_false(uaddr == 0)) {
		(void)chgproccnt(uid, -1);
		atomic_dec_uint(&nprocs);
		return ENOMEM;
	}

	/*
	 * We are now committed to the fork.  From here on, we may
	 * block on resources, but resource allocation may NOT fail.
	 */

	/* Allocate new proc. */
	p2 = proc_alloc();

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	memset(&p2->p_startzero, 0,
	    (unsigned) ((char *)&p2->p_endzero - (char *)&p2->p_startzero));
	memcpy(&p2->p_startcopy, &p1->p_startcopy,
	    (unsigned) ((char *)&p2->p_endcopy - (char *)&p2->p_startcopy));

	TAILQ_INIT(&p2->p_sigpend.sp_info);

	LIST_INIT(&p2->p_lwps);
	LIST_INIT(&p2->p_sigwaiters);

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * Inherit flags we want to keep.  The flags related to SIGCHLD
	 * handling are important in order to keep a consistent behaviour
	 * for the child after the fork.  If we are a 32-bit process, the
	 * child will be too.
	 */
	p2->p_flag =
	    p1->p_flag & (PK_SUGID | PK_NOCLDWAIT | PK_CLDSIGIGN | PK_32);
	p2->p_emul = p1->p_emul;
	p2->p_execsw = p1->p_execsw;

	if (flags & FORK_SYSTEM) {
		/*
		 * Mark it as a system process.  Set P_NOCLDWAIT so that
		 * children are reparented to init(8) when they exit.
		 * init(8) can easily wait them out for us.
		 */
		p2->p_flag |= (PK_SYSTEM | PK_NOCLDWAIT);
	}

	mutex_init(&p2->p_stmutex, MUTEX_DEFAULT, IPL_HIGH);
	mutex_init(&p2->p_auxlock, MUTEX_DEFAULT, IPL_NONE);
	rw_init(&p2->p_reflock);
	cv_init(&p2->p_waitcv, "wait");
	cv_init(&p2->p_lwpcv, "lwpwait");

	/*
	 * Share a lock between the processes if they are to share signal
	 * state: we must synchronize access to it.
	 */
	if (flags & FORK_SHARESIGS) {
		p2->p_lock = p1->p_lock;
		mutex_obj_hold(p1->p_lock);
	} else
		p2->p_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);

	kauth_proc_fork(p1, p2);

	p2->p_raslist = NULL;
#if defined(__HAVE_RAS)
	ras_fork(p1, p2);
#endif

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		vref(p2->p_textvp);

	if (flags & FORK_SHAREFILES)
		fd_share(p2);
	else if (flags & FORK_CLEANFILES)
		p2->p_fd = fd_init(NULL);
	else
		p2->p_fd = fd_copy();

	/* XXX racy */
	p2->p_mqueue_cnt = p1->p_mqueue_cnt;

	if (flags & FORK_SHARECWD)
		cwdshare(p2);
	else
		p2->p_cwdi = cwdinit();

	/*
	 * Note: p_limit (rlimit stuff) is copy-on-write, so normally
	 * we just need increase pl_refcnt.
	 */
	p1_lim = p1->p_limit;
	if (!p1_lim->pl_writeable) {
		lim_addref(p1_lim);
		p2->p_limit = p1_lim;
	} else {
		p2->p_limit = lim_copy(p1_lim);
	}

	if (flags & FORK_PPWAIT) {
		/* Mark ourselves as waiting for a child. */
		l1->l_pflag |= LP_VFORKWAIT;
		p2->p_lflag = PL_PPWAIT;
		p2->p_vforklwp = l1;
	} else {
		p2->p_lflag = 0;
	}
	p2->p_sflag = 0;
	p2->p_slflag = 0;
	parent = (flags & FORK_NOWAIT) ? initproc : p1;
	p2->p_pptr = parent;
	p2->p_ppid = parent->p_pid;
	LIST_INIT(&p2->p_children);

	p2->p_aio = NULL;

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		mutex_enter(&ktrace_lock);
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			ktradref(p2);
		mutex_exit(&ktrace_lock);
	}
#endif

	/*
	 * Create signal actions for the child process.
	 */
	p2->p_sigacts = sigactsinit(p1, flags & FORK_SHARESIGS);
	mutex_enter(p1->p_lock);
	p2->p_sflag |=
	    (p1->p_sflag & (PS_STOPFORK | PS_STOPEXEC | PS_NOCLDSTOP));
	sched_proc_fork(p1, p2);
	mutex_exit(p1->p_lock);

	p2->p_stflag = p1->p_stflag;

	/*
	 * p_stats.
	 * Copy parts of p_stats, and zero out the rest.
	 */
	p2->p_stats = pstatscopy(p1->p_stats);

	/*
	 * Set up the new process address space.
	 */
	uvm_proc_fork(p1, p2, (flags & FORK_SHAREVM) ? true : false);

	/*
	 * Finish creating the child process.
	 * It will return through a different path later.
	 */
	lwp_create(l1, p2, uaddr, (flags & FORK_PPWAIT) ? LWP_VFORK : 0,
	    stack, stacksize, (func != NULL) ? func : child_return, arg, &l2,
	    l1->l_class);

	/*
	 * Inherit l_private from the parent.
	 * Note that we cannot use lwp_setprivate() here since that
	 * also sets the CPU TLS register, which is incorrect if the
	 * process has changed that without letting the kernel know.
	 */
	l2->l_private = l1->l_private;

	/*
	 * If emulation has a process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, l1, flags);

	/*
	 * ...and finally, any other random fork hooks that subsystems
	 * might have registered.
	 */
	doforkhooks(p2, p1);

	SDT_PROBE(proc,,,create, p2, p1, flags, 0, 0);

	/*
	 * It's now safe for the scheduler and other processes to see the
	 * child process.
	 */
	mutex_enter(proc_lock);

	if (p1->p_session->s_ttyvp != NULL && p1->p_lflag & PL_CONTROLT)
		p2->p_lflag |= PL_CONTROLT;

	LIST_INSERT_HEAD(&parent->p_children, p2, p_sibling);
	p2->p_exitsig = exitsig;		/* signal for parent on exit */

	/*
	 * We don't want to tracefork vfork()ed processes because they
	 * will not receive the SIGTRAP until it is too late.
	 */
	tracefork = (p1->p_slflag & (PSL_TRACEFORK|PSL_TRACED)) ==
	    (PSL_TRACEFORK|PSL_TRACED) && (flags && FORK_PPWAIT) == 0;
	if (tracefork) {
		p2->p_slflag |= PSL_TRACED;
		p2->p_opptr = p2->p_pptr;
		if (p2->p_pptr != p1->p_pptr) {
			struct proc *parent1 = p2->p_pptr;

			if (parent1->p_lock < p2->p_lock) {
				if (!mutex_tryenter(parent1->p_lock)) {
					mutex_exit(p2->p_lock);
					mutex_enter(parent1->p_lock);
				}
			} else if (parent1->p_lock > p2->p_lock) {
				mutex_enter(parent1->p_lock);
			}
			parent1->p_slflag |= PSL_CHTRACED;
			proc_reparent(p2, p1->p_pptr);
			if (parent1->p_lock != p2->p_lock)
				mutex_exit(parent1->p_lock);
		}

		/*
		 * Set ptrace status.
		 */
		p1->p_fpid = p2->p_pid;
		p2->p_fpid = p1->p_pid;
	}

	LIST_INSERT_AFTER(p1, p2, p_pglist);
	LIST_INSERT_HEAD(&allproc, p2, p_list);

	p2->p_trace_enabled = trace_is_enabled(p2);
#ifdef __HAVE_SYSCALL_INTERN
	(*p2->p_emul->e_syscall_intern)(p2);
#endif

	/*
	 * Update stats now that we know the fork was successful.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	if (ktrpoint(KTR_EMUL))
		p2->p_traceflag |= KTRFAC_TRC_EMUL;

	/*
	 * Notify any interested parties about the new process.
	 */
	if (!SLIST_EMPTY(&p1->p_klist)) {
		mutex_exit(proc_lock);
		KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);
		mutex_enter(proc_lock);
	}

	/*
	 * Make child runnable, set start time, and add to run queue except
	 * if the parent requested the child to start in SSTOP state.
	 */
	mutex_enter(p2->p_lock);

	/*
	 * Start profiling.
	 */
	if ((p2->p_stflag & PST_PROFIL) != 0) {
		mutex_spin_enter(&p2->p_stmutex);
		startprofclock(p2);
		mutex_spin_exit(&p2->p_stmutex);
	}

	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	lwp_lock(l2);
	KASSERT(p2->p_nrlwps == 1);
	if (p2->p_sflag & PS_STOPFORK) {
		struct schedstate_percpu *spc = &l2->l_cpu->ci_schedstate;
		p2->p_nrlwps = 0;
		p2->p_stat = SSTOP;
		p2->p_waited = 0;
		p1->p_nstopchild++;
		l2->l_stat = LSSTOP;
		KASSERT(l2->l_wchan == NULL);
		lwp_unlock_to(l2, spc->spc_lwplock);
	} else {
		p2->p_nrlwps = 1;
		p2->p_stat = SACTIVE;
		l2->l_stat = LSRUN;
		sched_enqueue(l2, false);
		lwp_unlock(l2);
	}

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	mutex_exit(p2->p_lock);

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, sleep until it clears LP_VFORKWAIT.
	 */
#if 0
	while (l1->l_pflag & LP_VFORKWAIT) {
		cv_wait(&l1->l_waitcv, proc_lock);
	}
#else
	while (p2->p_lflag & PL_PPWAIT)
		cv_wait(&p1->p_waitcv, proc_lock);
#endif

	/*
	 * Let the parent know that we are tracing its child.
	 */
	if (tracefork) {
		ksiginfo_t ksi;

		KSI_INIT_EMPTY(&ksi);
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_lid = l1->l_lid;
		kpsignal(p1, &ksi, NULL);
	}
	mutex_exit(proc_lock);

	return 0;
}