Beispiel #1
0
/*
 * The CPU ends up here when it's ready to run
 * XXX should share some of this with init386 in machdep.c
 * for now it jumps into an infinite loop.
 */
void
cpu_hatch(void *v)
{
	struct cpu_info *ci = (struct cpu_info *)v;
	int s;

	cpu_init_idt();
	lapic_enable();
	lapic_startclock();
	lapic_set_lvt();
	gdt_init_cpu(ci);

	lldt(0);

	npxinit(ci);

	cpu_init(ci);

	/* Re-initialise memory range handling on AP */
	if (mem_range_softc.mr_op != NULL)
		mem_range_softc.mr_op->initAP(&mem_range_softc);

	s = splhigh();		/* XXX prevent softints from running here.. */
	lapic_tpr = 0;
	enable_intr();
	if (mp_verbose)
		printf("%s: CPU at apid %ld running\n",
		    ci->ci_dev.dv_xname, ci->ci_cpuid);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	splx(s);

	SCHED_LOCK(s);
	cpu_switchto(NULL, sched_chooseproc());
}
Beispiel #2
0
/*
 * Further secondary CPU initialization.
 *
 * We are now running on our startup stack, with proper page tables.
 * There is nothing to do but display some details about the CPU and its CMMUs.
 */
void
secondary_main()
{
	struct cpu_info *ci = curcpu();
	int s;

	cpu_configuration_print(0);
	ncpus++;

	sched_init_cpu(ci);
	nanouptime(&ci->ci_schedstate.spc_runtime);
	ci->ci_curproc = NULL;
	ci->ci_randseed = (arc4random() & 0x7fffffff) + 1;

	/*
	 * Release cpu_hatch_mutex to let other secondary processors
	 * have a chance to run.
	 */
	hatch_pending_count--;
	__cpu_simple_unlock(&cpu_hatch_mutex);

	/* wait for cpu_boot_secondary_processors() */
	__cpu_simple_lock(&cpu_boot_mutex);
	__cpu_simple_unlock(&cpu_boot_mutex);

	spl0();
	SCHED_LOCK(s);
	set_psr(get_psr() & ~PSR_IND);

	SET(ci->ci_flags, CIF_ALIVE);

	cpu_switchto(NULL, sched_chooseproc());
}
Beispiel #3
0
/*
 * The CPU ends up here when its ready to run
 * This is called from code in mptramp.s; at this point, we are running
 * in the idle pcb/idle stack of the new cpu.  When this function returns,
 * this processor will enter the idle loop and start looking for work.
 *
 * XXX should share some of this with init386 in machdep.c
 */
void
cpu_hatch(void *v)
{
	struct cpu_info *ci = (struct cpu_info *)v;
	int s;

	cpu_init_msrs(ci);

	cpu_probe_features(ci);
	cpu_feature &= ci->ci_feature_flags;

#ifdef DEBUG
	if (ci->ci_flags & CPUF_PRESENT)
		panic("%s: already running!?", ci->ci_dev->dv_xname);
#endif

	ci->ci_flags |= CPUF_PRESENT;

	lapic_enable();
	lapic_initclocks();

	while ((ci->ci_flags & CPUF_GO) == 0)
		delay(10);
#ifdef DEBUG
	if (ci->ci_flags & CPUF_RUNNING)
		panic("%s: already running!?", ci->ci_dev->dv_xname);
#endif

	lcr0(ci->ci_idle_pcb->pcb_cr0);
	cpu_init_idt();
	lapic_set_lvt();
	gdt_init_cpu(ci);
	fpuinit(ci);

	lldt(GSYSSEL(GLDT_SEL, SEL_KPL));

	cpu_init(ci);

	s = splhigh();
	lcr8(0);
	enable_intr();

	microuptime(&ci->ci_schedstate.spc_runtime);
	splx(s);

	SCHED_LOCK(s);
	cpu_switchto(NULL, sched_chooseproc());
}
/*
 * Request the syncer daemon to speed up its work.
 * We never push it to speed up more than half of its
 * normal turn time, otherwise it could take over the cpu.
 */
int
speedup_syncer(void)
{
	int s;

	SCHED_LOCK(s);
	if (syncerproc && syncerproc->p_wchan == &lbolt)
		setrunnable(syncerproc);
	SCHED_UNLOCK(s);
	if (rushjob < syncdelay / 2) {
		rushjob += 1;
		stat_rush_requests += 1;
		return 1;
	}
	return 0;
}
/*
 * Process debugging system call.
 */
int
sys_ptrace(struct proc *p, void *v, register_t *retval)
{
	struct sys_ptrace_args /* {
		syscallarg(int) req;
		syscallarg(pid_t) pid;
		syscallarg(caddr_t) addr;
		syscallarg(int) data;
	} */ *uap = v;
	struct proc *t;				/* target thread */
	struct process *tr;			/* target process */
	struct uio uio;
	struct iovec iov;
	struct ptrace_io_desc piod;
	struct ptrace_event pe;
	struct ptrace_thread_state pts;
	struct reg *regs;
#if defined (PT_SETFPREGS) || defined (PT_GETFPREGS)
	struct fpreg *fpregs;
#endif
#if defined (PT_SETXMMREGS) || defined (PT_GETXMMREGS)
	struct xmmregs *xmmregs;
#endif
#ifdef PT_WCOOKIE
	register_t wcookie;
#endif
	int error, write;
	int temp;
	int req = SCARG(uap, req);
	int s;

	/* "A foolish consistency..." XXX */
	switch (req) {
	case PT_TRACE_ME:
		t = p;
		break;

	/* calls that only operate on the PID */
	case PT_READ_I:
	case PT_READ_D:
	case PT_WRITE_I:
	case PT_WRITE_D:
	case PT_KILL:
	case PT_ATTACH:
	case PT_IO:
	case PT_SET_EVENT_MASK:
	case PT_GET_EVENT_MASK:
	case PT_GET_PROCESS_STATE:
	case PT_GET_THREAD_FIRST:
	case PT_GET_THREAD_NEXT:
	default:
		/* Find the process we're supposed to be operating on. */
		if ((t = pfind(SCARG(uap, pid))) == NULL)
			return (ESRCH);
		if (t->p_flag & P_THREAD)
			return (ESRCH);
		break;

	/* calls that accept a PID or a thread ID */
	case PT_CONTINUE:
	case PT_DETACH:
#ifdef PT_STEP
	case PT_STEP:
#endif
	case PT_GETREGS:
	case PT_SETREGS:
#ifdef PT_GETFPREGS
	case PT_GETFPREGS:
#endif
#ifdef PT_SETFPREGS
	case PT_SETFPREGS:
#endif
#ifdef PT_GETXMMREGS
	case PT_GETXMMREGS:
#endif
#ifdef PT_SETXMMREGS
	case PT_SETXMMREGS:
#endif
		if (SCARG(uap, pid) > THREAD_PID_OFFSET) {
			t = pfind(SCARG(uap, pid) - THREAD_PID_OFFSET);
			if (t == NULL)
				return (ESRCH);
		} else {
			if ((t = pfind(SCARG(uap, pid))) == NULL)
				return (ESRCH);
			if (t->p_flag & P_THREAD)
				return (ESRCH);
		}
		break;
	}
	tr = t->p_p;

	if ((tr->ps_flags & PS_INEXEC) != 0)
		return (EAGAIN);

	/* Make sure we can operate on it. */
	switch (req) {
	case  PT_TRACE_ME:
		/* Saying that you're being traced is always legal. */
		break;

	case  PT_ATTACH:
		/*
		 * You can't attach to a process if:
		 *	(1) it's the process that's doing the attaching,
		 */
		if (tr == p->p_p)
			return (EINVAL);

		/*
		 *	(2) it's a system process
		 */
		if (ISSET(tr->ps_flags, PS_SYSTEM))
			return (EPERM);

		/*
		 *	(3) it's already being traced, or
		 */
		if (ISSET(tr->ps_flags, PS_TRACED))
			return (EBUSY);

		/*
		 *	(4) it's not owned by you, or the last exec
		 *	    gave us setuid/setgid privs (unless
		 *	    you're root), or...
		 * 
		 *      [Note: once PS_SUGID or PS_SUGIDEXEC gets set in
		 *	execve(), they stay set until the process does
		 *	another execve().  Hence this prevents a setuid
		 *	process which revokes its special privileges using
		 *	setuid() from being traced.  This is good security.]
		 */
		if ((tr->ps_ucred->cr_ruid != p->p_ucred->cr_ruid ||
		    ISSET(tr->ps_flags, PS_SUGIDEXEC | PS_SUGID)) &&
		    (error = suser(p, 0)) != 0)
			return (error);

		/*
		 * 	(4.5) it's not a child of the tracing process.
		 */
		if (global_ptrace == 0 && !inferior(tr, p->p_p) &&
		    (error = suser(p, 0)) != 0)
			return (error);

		/*
		 *	(5) ...it's init, which controls the security level
		 *	    of the entire system, and the system was not
		 *          compiled with permanently insecure mode turned
		 *	    on.
		 */
		if ((tr->ps_pid == 1) && (securelevel > -1))
			return (EPERM);

		/*
		 *	(6) it's an ancestor of the current process and
		 *	    not init (because that would create a loop in
		 *	    the process graph).
		 */
		if (tr->ps_pid != 1 && inferior(p->p_p, tr))
			return (EINVAL);
		break;

	case  PT_READ_I:
	case  PT_READ_D:
	case  PT_WRITE_I:
	case  PT_WRITE_D:
	case  PT_IO:
	case  PT_CONTINUE:
	case  PT_KILL:
	case  PT_DETACH:
#ifdef PT_STEP
	case  PT_STEP:
#endif
	case  PT_SET_EVENT_MASK:
	case  PT_GET_EVENT_MASK:
	case  PT_GET_PROCESS_STATE:
	case  PT_GETREGS:
	case  PT_SETREGS:
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
#endif
#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
#endif
#ifdef PT_GETXMMREGS
	case  PT_GETXMMREGS:
#endif
#ifdef PT_SETXMMREGS
	case  PT_SETXMMREGS:
#endif
#ifdef PT_WCOOKIE
	case  PT_WCOOKIE:
#endif
		/*
		 * You can't do what you want to the process if:
		 *	(1) It's not being traced at all,
		 */
		if (!ISSET(tr->ps_flags, PS_TRACED))
			return (EPERM);

		/*
		 *	(2) it's not being traced by _you_, or
		 */
		if (tr->ps_pptr != p->p_p)
			return (EBUSY);

		/*
		 *	(3) it's not currently stopped.
		 */
		if (t->p_stat != SSTOP || !ISSET(tr->ps_flags, PS_WAITED))
			return (EBUSY);
		break;

	case  PT_GET_THREAD_FIRST:
	case  PT_GET_THREAD_NEXT:
		/*
		 * You can't do what you want to the process if:
		 *	(1) It's not being traced at all,
		 */
		if (!ISSET(tr->ps_flags, PS_TRACED))
			return (EPERM);

		/*
		 *	(2) it's not being traced by _you_, or
		 */
		if (tr->ps_pptr != p->p_p)
			return (EBUSY);

		/*
		 * Do the work here because the request isn't actually
		 * associated with 't'
		 */
		if (SCARG(uap, data) != sizeof(pts))
			return (EINVAL);

		if (req == PT_GET_THREAD_NEXT) {
			error = copyin(SCARG(uap, addr), &pts, sizeof(pts));
			if (error)
				return (error);

			t = pfind(pts.pts_tid - THREAD_PID_OFFSET);
			if (t == NULL || ISSET(t->p_flag, P_WEXIT))
				return (ESRCH);
			if (t->p_p != tr)
				return (EINVAL);
			t = TAILQ_NEXT(t, p_thr_link);
		} else {
			t = TAILQ_FIRST(&tr->ps_threads);
		}

		if (t == NULL)
			pts.pts_tid = -1;
		else
			pts.pts_tid = t->p_pid + THREAD_PID_OFFSET;
		return (copyout(&pts, SCARG(uap, addr), sizeof(pts)));

	default:			/* It was not a legal request. */
		return (EINVAL);
	}

	/* Do single-step fixup if needed. */
	FIX_SSTEP(t);

	/* Now do the operation. */
	write = 0;
	*retval = 0;

	switch (req) {
	case  PT_TRACE_ME:
		/* Just set the trace flag. */
		atomic_setbits_int(&tr->ps_flags, PS_TRACED);
		tr->ps_oppid = tr->ps_pptr->ps_pid;
		if (tr->ps_ptstat == NULL)
			tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat),
			    M_SUBPROC, M_WAITOK);
		memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat));
		return (0);

	case  PT_WRITE_I:		/* XXX no separate I and D spaces */
	case  PT_WRITE_D:
		write = 1;
		temp = SCARG(uap, data);
	case  PT_READ_I:		/* XXX no separate I and D spaces */
	case  PT_READ_D:
		/* write = 0 done above. */
		iov.iov_base = (caddr_t)&temp;
		iov.iov_len = sizeof(int);
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(vaddr_t)SCARG(uap, addr);
		uio.uio_resid = sizeof(int);
		uio.uio_segflg = UIO_SYSSPACE;
		uio.uio_rw = write ? UIO_WRITE : UIO_READ;
		uio.uio_procp = p;
		error = process_domem(p, t, &uio, write ? PT_WRITE_I :
				PT_READ_I);
		if (write == 0)
			*retval = temp;
		return (error);
	case  PT_IO:
		error = copyin(SCARG(uap, addr), &piod, sizeof(piod));
		if (error)
			return (error);
		iov.iov_base = piod.piod_addr;
		iov.iov_len = piod.piod_len;
		uio.uio_iov = &iov;
		uio.uio_iovcnt = 1;
		uio.uio_offset = (off_t)(vaddr_t)piod.piod_offs;
		uio.uio_resid = piod.piod_len;
		uio.uio_segflg = UIO_USERSPACE;
		uio.uio_procp = p;
		switch (piod.piod_op) {
		case PIOD_READ_I:
			req = PT_READ_I;
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_READ_D:
			req = PT_READ_D;
			uio.uio_rw = UIO_READ;
			break;
		case PIOD_WRITE_I:
			req = PT_WRITE_I;
			uio.uio_rw = UIO_WRITE;
			break;
		case PIOD_WRITE_D:
			req = PT_WRITE_D;
			uio.uio_rw = UIO_WRITE;
			break;
		case PIOD_READ_AUXV:
			req = PT_READ_D;
			uio.uio_rw = UIO_READ;
			temp = tr->ps_emul->e_arglen * sizeof(char *);
			if (uio.uio_offset > temp)
				return (EIO);
			if (uio.uio_resid > temp - uio.uio_offset)
				uio.uio_resid = temp - uio.uio_offset;
			piod.piod_len = iov.iov_len = uio.uio_resid;
			error = process_auxv_offset(p, t, &uio);
			if (error)
				return (error);
			break;
		default:
			return (EINVAL);
		}
		error = process_domem(p, t, &uio, req);
		piod.piod_len -= uio.uio_resid;
		(void) copyout(&piod, SCARG(uap, addr), sizeof(piod));
		return (error);
#ifdef PT_STEP
	case  PT_STEP:
		/*
		 * From the 4.4BSD PRM:
		 * "Execution continues as in request PT_CONTINUE; however
		 * as soon as possible after execution of at least one
		 * instruction, execution stops again. [ ... ]"
		 */
#endif
	case  PT_CONTINUE:
		/*
		 * From the 4.4BSD PRM:
		 * "The data argument is taken as a signal number and the
		 * child's execution continues at location addr as if it
		 * incurred that signal.  Normally the signal number will
		 * be either 0 to indicate that the signal that caused the
		 * stop should be ignored, or that value fetched out of
		 * the process's image indicating which signal caused
		 * the stop.  If addr is (int *)1 then execution continues
		 * from where it stopped."
		 */

		if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single)
			t = tr->ps_single;

		/* Check that the data is a valid signal number or zero. */
		if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG)
			return (EINVAL);

		/* If the address parameter is not (int *)1, set the pc. */
		if ((int *)SCARG(uap, addr) != (int *)1)
			if ((error = process_set_pc(t, SCARG(uap, addr))) != 0)
				goto relebad;

#ifdef PT_STEP
		/*
		 * Arrange for a single-step, if that's requested and possible.
		 */
		error = process_sstep(t, req == PT_STEP);
		if (error)
			goto relebad;
#endif
		goto sendsig;

	case  PT_DETACH:
		/*
		 * From the 4.4BSD PRM:
		 * "The data argument is taken as a signal number and the
		 * child's execution continues at location addr as if it
		 * incurred that signal.  Normally the signal number will
		 * be either 0 to indicate that the signal that caused the
		 * stop should be ignored, or that value fetched out of
		 * the process's image indicating which signal caused
		 * the stop.  If addr is (int *)1 then execution continues
		 * from where it stopped."
		 */

		if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single)
			t = tr->ps_single;

		/* Check that the data is a valid signal number or zero. */
		if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG)
			return (EINVAL);

#ifdef PT_STEP
		/*
		 * Arrange for a single-step, if that's requested and possible.
		 */
		error = process_sstep(t, req == PT_STEP);
		if (error)
			goto relebad;
#endif

		/* give process back to original parent or init */
		if (tr->ps_oppid != tr->ps_pptr->ps_pid) {
			struct process *ppr;

			ppr = prfind(tr->ps_oppid);
			proc_reparent(tr, ppr ? ppr : initprocess);
		}

		/* not being traced any more */
		tr->ps_oppid = 0;
		atomic_clearbits_int(&tr->ps_flags, PS_TRACED|PS_WAITED);

	sendsig:
		memset(tr->ps_ptstat, 0, sizeof(*tr->ps_ptstat));

		/* Finally, deliver the requested signal (or none). */
		if (t->p_stat == SSTOP) {
			t->p_xstat = SCARG(uap, data);
			SCHED_LOCK(s);
			setrunnable(t);
			SCHED_UNLOCK(s);
		} else {
			if (SCARG(uap, data) != 0)
				psignal(t, SCARG(uap, data));
		}

		return (0);

	relebad:
		return (error);

	case  PT_KILL:
		if (SCARG(uap, pid) < THREAD_PID_OFFSET && tr->ps_single)
			t = tr->ps_single;

		/* just send the process a KILL signal. */
		SCARG(uap, data) = SIGKILL;
		goto sendsig;	/* in PT_CONTINUE, above. */

	case  PT_ATTACH:
		/*
		 * As was done in procfs:
		 * Go ahead and set the trace flag.
		 * Save the old parent (it's reset in
		 *   _DETACH, and also in kern_exit.c:wait4()
		 * Reparent the process so that the tracing
		 *   proc gets to see all the action.
		 * Stop the target.
		 */
		atomic_setbits_int(&tr->ps_flags, PS_TRACED);
		tr->ps_oppid = tr->ps_pptr->ps_pid;
		if (tr->ps_pptr != p->p_p)
			proc_reparent(tr, p->p_p);
		if (tr->ps_ptstat == NULL)
			tr->ps_ptstat = malloc(sizeof(*tr->ps_ptstat),
			    M_SUBPROC, M_WAITOK);
		SCARG(uap, data) = SIGSTOP;
		goto sendsig;

	case  PT_GET_EVENT_MASK:
		if (SCARG(uap, data) != sizeof(pe))
			return (EINVAL);
		memset(&pe, 0, sizeof(pe));
		pe.pe_set_event = tr->ps_ptmask;
		return (copyout(&pe, SCARG(uap, addr), sizeof(pe)));
	case  PT_SET_EVENT_MASK:
		if (SCARG(uap, data) != sizeof(pe))
			return (EINVAL);
		if ((error = copyin(SCARG(uap, addr), &pe, sizeof(pe))))
			return (error);
		tr->ps_ptmask = pe.pe_set_event;
		return (0);

	case  PT_GET_PROCESS_STATE:
		if (SCARG(uap, data) != sizeof(*tr->ps_ptstat))
			return (EINVAL);

		if (tr->ps_single)
			tr->ps_ptstat->pe_tid =
			    tr->ps_single->p_pid + THREAD_PID_OFFSET;

		return (copyout(tr->ps_ptstat, SCARG(uap, addr),
		    sizeof(*tr->ps_ptstat)));

	case  PT_SETREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		regs = malloc(sizeof(*regs), M_TEMP, M_WAITOK);
		error = copyin(SCARG(uap, addr), regs, sizeof(*regs));
		if (error == 0) {
			error = process_write_regs(t, regs);
		}
		free(regs, M_TEMP, sizeof(*regs));
		return (error);
	case  PT_GETREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		regs = malloc(sizeof(*regs), M_TEMP, M_WAITOK);
		error = process_read_regs(t, regs);
		if (error == 0)
			error = copyout(regs,
			    SCARG(uap, addr), sizeof (*regs));
		free(regs, M_TEMP, sizeof(*regs));
		return (error);
#ifdef PT_SETFPREGS
	case  PT_SETFPREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		fpregs = malloc(sizeof(*fpregs), M_TEMP, M_WAITOK);
		error = copyin(SCARG(uap, addr), fpregs, sizeof(*fpregs));
		if (error == 0) {
			error = process_write_fpregs(t, fpregs);
		}
		free(fpregs, M_TEMP, sizeof(*fpregs));
		return (error);
#endif
#ifdef PT_GETFPREGS
	case  PT_GETFPREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		fpregs = malloc(sizeof(*fpregs), M_TEMP, M_WAITOK);
		error = process_read_fpregs(t, fpregs);
		if (error == 0)
			error = copyout(fpregs,
			    SCARG(uap, addr), sizeof(*fpregs));
		free(fpregs, M_TEMP, sizeof(*fpregs));
		return (error);
#endif
#ifdef PT_SETXMMREGS
	case  PT_SETXMMREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		xmmregs = malloc(sizeof(*xmmregs), M_TEMP, M_WAITOK);
		error = copyin(SCARG(uap, addr), xmmregs, sizeof(*xmmregs));
		if (error == 0) {
			error = process_write_xmmregs(t, xmmregs);
		}
		free(xmmregs, M_TEMP, sizeof(*xmmregs));
		return (error);
#endif
#ifdef PT_GETXMMREGS
	case  PT_GETXMMREGS:
		KASSERT((p->p_flag & P_SYSTEM) == 0);
		if ((error = process_checkioperm(p, tr)) != 0)
			return (error);

		xmmregs = malloc(sizeof(*xmmregs), M_TEMP, M_WAITOK);
		error = process_read_xmmregs(t, xmmregs);
		if (error == 0)
			error = copyout(xmmregs,
			    SCARG(uap, addr), sizeof(*xmmregs));
		free(xmmregs, M_TEMP, sizeof(*xmmregs));
		return (error);
#endif
#ifdef PT_WCOOKIE
	case  PT_WCOOKIE:
		wcookie = process_get_wcookie (t);
		return (copyout(&wcookie, SCARG(uap, addr),
		    sizeof (register_t)));
#endif
	}

#ifdef DIAGNOSTIC
	panic("ptrace: impossible");
#endif
	return 0;
}
Beispiel #6
0
/*
 * The CPU ends up here when its ready to run
 * This is called from code in mptramp.s; at this point, we are running
 * in the idle pcb/idle stack of the new cpu.  When this function returns,
 * this processor will enter the idle loop and start looking for work.
 *
 * XXX should share some of this with init386 in machdep.c
 */
void
cpu_hatch(void *v)
{
	struct cpu_info *ci = (struct cpu_info *)v;
	int s;

	cpu_init_msrs(ci);

#ifdef DEBUG
	if (ci->ci_flags & CPUF_PRESENT)
		panic("%s: already running!?", ci->ci_dev->dv_xname);
#endif

	ci->ci_flags |= CPUF_PRESENT;

	lapic_enable();
	lapic_startclock();

	if ((ci->ci_flags & CPUF_IDENTIFIED) == 0) {
		/*
		 * We need to wait until we can identify, otherwise dmesg
		 * output will be messy.
		 */
		while ((ci->ci_flags & CPUF_IDENTIFY) == 0)
			delay(10);

		identifycpu(ci);

		/* Signal we're done */
		atomic_clearbits_int(&ci->ci_flags, CPUF_IDENTIFY);
		/* Prevent identifycpu() from running again */
		atomic_setbits_int(&ci->ci_flags, CPUF_IDENTIFIED);
	}

	while ((ci->ci_flags & CPUF_GO) == 0)
		delay(10);
#ifdef DEBUG
	if (ci->ci_flags & CPUF_RUNNING)
		panic("%s: already running!?", ci->ci_dev->dv_xname);
#endif

	lcr0(ci->ci_idle_pcb->pcb_cr0);
	cpu_init_idt();
	lapic_set_lvt();
	gdt_init_cpu(ci);
	fpuinit(ci);

	lldt(0);

	cpu_init(ci);

	s = splhigh();
	lcr8(0);
	enable_intr();

	microuptime(&ci->ci_schedstate.spc_runtime);
	splx(s);

	SCHED_LOCK(s);
	cpu_switchto(NULL, sched_chooseproc());
}
Beispiel #7
0
int
fork1(struct proc *p1, int exitsig, int flags, void *stack, size_t stacksize,
    void (*func)(void *), void *arg, register_t *retval,
    struct proc **rnewprocp)
{
	struct proc *p2;
	uid_t uid;
	struct vmspace *vm;
	int count;
	vaddr_t uaddr;
	int s;
	extern void endtsleep(void *);
	extern void realitexpire(void *);

	/*
	 * Although process entries are dynamically created, we still keep
	 * a global limit on the maximum number we will create. We reserve
	 * the last 5 processes to root. The variable nprocs is the current
	 * number of processes, maxproc is the limit.
	 */
	uid = p1->p_cred->p_ruid;
	if ((nprocs >= maxproc - 5 && uid != 0) || nprocs >= maxproc) {
		static struct timeval lasttfm;

		if (ratecheck(&lasttfm, &fork_tfmrate))
			tablefull("proc");
		return (EAGAIN);
	}
	nprocs++;

	/*
	 * Increment the count of procs running with this uid. Don't allow
	 * a nonprivileged user to exceed their current limit.
	 */
	count = chgproccnt(uid, 1);
	if (uid != 0 && count > p1->p_rlimit[RLIMIT_NPROC].rlim_cur) {
		(void)chgproccnt(uid, -1);
		nprocs--;
		return (EAGAIN);
	}

	uaddr = uvm_km_alloc1(kernel_map, USPACE, USPACE_ALIGN, 1);
	if (uaddr == 0) {
		chgproccnt(uid, -1);
		nprocs--;
		return (ENOMEM);
	}

	/*
	 * From now on, we're committed to the fork and cannot fail.
	 */

	/* Allocate new proc. */
	p2 = pool_get(&proc_pool, PR_WAITOK);

	p2->p_stat = SIDL;			/* protect against others */
	p2->p_exitsig = exitsig;
	p2->p_forw = p2->p_back = NULL;

#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		atomic_setbits_int(&p2->p_flag, P_THREAD);
		p2->p_p = p1->p_p;
		TAILQ_INSERT_TAIL(&p2->p_p->ps_threads, p2, p_thr_link);
	} else {
		process_new(p2, p1);
	}
#else
	process_new(p2, p1);
#endif

	/*
	 * Make a proc table entry for the new process.
	 * Start by zeroing the section of proc that is zero-initialized,
	 * then copy the section that is copied directly from the parent.
	 */
	bzero(&p2->p_startzero,
	    (unsigned) ((caddr_t)&p2->p_endzero - (caddr_t)&p2->p_startzero));
	bcopy(&p1->p_startcopy, &p2->p_startcopy,
	    (unsigned) ((caddr_t)&p2->p_endcopy - (caddr_t)&p2->p_startcopy));

	/*
	 * Initialize the timeouts.
	 */
	timeout_set(&p2->p_sleep_to, endtsleep, p2);
	timeout_set(&p2->p_realit_to, realitexpire, p2);

#if defined(__HAVE_CPUINFO)
	p2->p_cpu = p1->p_cpu;
#endif

	/*
	 * Duplicate sub-structures as needed.
	 * Increase reference counts on shared objects.
	 * The p_stats and p_sigacts substructs are set in vm_fork.
	 */
	p2->p_flag = 0;
	p2->p_emul = p1->p_emul;
	if (p1->p_flag & P_PROFIL)
		startprofclock(p2);
	atomic_setbits_int(&p2->p_flag, p1->p_flag & (P_SUGID | P_SUGIDEXEC));
	if (flags & FORK_PTRACE)
		atomic_setbits_int(&p2->p_flag, p1->p_flag & P_TRACED);
#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		/* nothing */
	} else
#endif
	{
		p2->p_p->ps_cred = pool_get(&pcred_pool, PR_WAITOK);
		bcopy(p1->p_p->ps_cred, p2->p_p->ps_cred, sizeof(*p2->p_p->ps_cred));
		p2->p_p->ps_cred->p_refcnt = 1;
		crhold(p1->p_ucred);
	}

	TAILQ_INIT(&p2->p_selects);

	/* bump references to the text vnode (for procfs) */
	p2->p_textvp = p1->p_textvp;
	if (p2->p_textvp)
		VREF(p2->p_textvp);

	if (flags & FORK_CLEANFILES)
		p2->p_fd = fdinit(p1);
	else if (flags & FORK_SHAREFILES)
		p2->p_fd = fdshare(p1);
	else
		p2->p_fd = fdcopy(p1);

	/*
	 * If ps_limit is still copy-on-write, bump refcnt,
	 * otherwise get a copy that won't be modified.
	 * (If PL_SHAREMOD is clear, the structure is shared
	 * copy-on-write.)
	 */
#ifdef RTHREADS
	if (flags & FORK_THREAD) {
		/* nothing */
	} else
#endif
	{
		if (p1->p_p->ps_limit->p_lflags & PL_SHAREMOD)
			p2->p_p->ps_limit = limcopy(p1->p_p->ps_limit);
		else {
			p2->p_p->ps_limit = p1->p_p->ps_limit;
			p2->p_p->ps_limit->p_refcnt++;
		}
	}

	if (p1->p_session->s_ttyvp != NULL && p1->p_flag & P_CONTROLT)
		atomic_setbits_int(&p2->p_flag, P_CONTROLT);
	if (flags & FORK_PPWAIT)
		atomic_setbits_int(&p2->p_flag, P_PPWAIT);
	p2->p_pptr = p1;
	if (flags & FORK_NOZOMBIE)
		atomic_setbits_int(&p2->p_flag, P_NOZOMBIE);
	LIST_INIT(&p2->p_children);

#ifdef KTRACE
	/*
	 * Copy traceflag and tracefile if enabled.
	 * If not inherited, these were zeroed above.
	 */
	if (p1->p_traceflag & KTRFAC_INHERIT) {
		p2->p_traceflag = p1->p_traceflag;
		if ((p2->p_tracep = p1->p_tracep) != NULL)
			VREF(p2->p_tracep);
	}
#endif

	/*
	 * set priority of child to be that of parent
	 * XXX should move p_estcpu into the region of struct proc which gets
	 * copied.
	 */
	scheduler_fork_hook(p1, p2);

	/*
	 * Create signal actions for the child process.
	 */
	if (flags & FORK_SIGHAND)
		sigactsshare(p1, p2);
	else
		p2->p_sigacts = sigactsinit(p1);

	/*
	 * If emulation has process fork hook, call it now.
	 */
	if (p2->p_emul->e_proc_fork)
		(*p2->p_emul->e_proc_fork)(p2, p1);

	p2->p_addr = (struct user *)uaddr;

	/*
	 * Finish creating the child process.  It will return through a
	 * different path later.
	 */
	uvm_fork(p1, p2, ((flags & FORK_SHAREVM) ? TRUE : FALSE), stack,
	    stacksize, func ? func : child_return, arg ? arg : p2);

	timeout_set(&p2->p_stats->p_virt_to, virttimer_trampoline, p2);
	timeout_set(&p2->p_stats->p_prof_to, proftimer_trampoline, p2);

	vm = p2->p_vmspace;

	if (flags & FORK_FORK) {
		forkstat.cntfork++;
		forkstat.sizfork += vm->vm_dsize + vm->vm_ssize;
	} else if (flags & FORK_VFORK) {
		forkstat.cntvfork++;
		forkstat.sizvfork += vm->vm_dsize + vm->vm_ssize;
	} else if (flags & FORK_RFORK) {
		forkstat.cntrfork++;
		forkstat.sizrfork += vm->vm_dsize + vm->vm_ssize;
	} else {
		forkstat.cntkthread++;
		forkstat.sizkthread += vm->vm_dsize + vm->vm_ssize;
	}

	/* Find an unused pid satisfying 1 <= lastpid <= PID_MAX */
	do {
		lastpid = 1 + (randompid ? arc4random() : lastpid) % PID_MAX;
	} while (pidtaken(lastpid));
	p2->p_pid = lastpid;

	LIST_INSERT_HEAD(&allproc, p2, p_list);
	LIST_INSERT_HEAD(PIDHASH(p2->p_pid), p2, p_hash);
	LIST_INSERT_HEAD(&p1->p_children, p2, p_sibling);
	LIST_INSERT_AFTER(p1, p2, p_pglist);
	if (p2->p_flag & P_TRACED) {
		p2->p_oppid = p1->p_pid;
		if (p2->p_pptr != p1->p_pptr)
			proc_reparent(p2, p1->p_pptr);

		/*
		 * Set ptrace status.
		 */
		if (flags & FORK_FORK) {
			p2->p_ptstat = malloc(sizeof(*p2->p_ptstat),
			    M_SUBPROC, M_WAITOK);
			p1->p_ptstat->pe_report_event = PTRACE_FORK;
			p2->p_ptstat->pe_report_event = PTRACE_FORK;
			p1->p_ptstat->pe_other_pid = p2->p_pid;
			p2->p_ptstat->pe_other_pid = p1->p_pid;
		}
	}

#if NSYSTRACE > 0
	if (ISSET(p1->p_flag, P_SYSTRACE))
		systrace_fork(p1, p2);
#endif

	/*
	 * Make child runnable, set start time, and add to run queue.
	 */
	SCHED_LOCK(s);
 	getmicrotime(&p2->p_stats->p_start);
	p2->p_acflag = AFORK;
	p2->p_stat = SRUN;
	setrunqueue(p2);
	SCHED_UNLOCK(s);

	/*
	 * Notify any interested parties about the new process.
	 */
	KNOTE(&p1->p_klist, NOTE_FORK | p2->p_pid);

	/*
	 * Update stats now that we know the fork was successfull.
	 */
	uvmexp.forks++;
	if (flags & FORK_PPWAIT)
		uvmexp.forks_ppwait++;
	if (flags & FORK_SHAREVM)
		uvmexp.forks_sharevm++;

	/*
	 * Pass a pointer to the new process to the caller.
	 */
	if (rnewprocp != NULL)
		*rnewprocp = p2;

	/*
	 * Preserve synchronization semantics of vfork.  If waiting for
	 * child to exec or exit, set P_PPWAIT on child, and sleep on our
	 * proc (in case of exit).
	 */
	if (flags & FORK_PPWAIT)
		while (p2->p_flag & P_PPWAIT)
			tsleep(p1, PWAIT, "ppwait", 0);

	/*
	 * If we're tracing the child, alert the parent too.
	 */
	if ((flags & FORK_PTRACE) && (p1->p_flag & P_TRACED))
		psignal(p1, SIGTRAP);

	/*
	 * Return child pid to parent process,
	 * marking us as parent via retval[1].
	 */
	if (retval != NULL) {
		retval[0] = p2->p_pid;
		retval[1] = 0;
	}
	return (0);
}