Beispiel #1
0
void
init0(void)
{
	int i;
	char buf[2*KNAMELEN];

	up->nerrlab = 0;

	spllo();

	/*
	 * These are o.k. because rootinit is null.
	 * Then early kproc's will have a root and dot.
	 */
	up->slash = namec("#/", Atodir, 0, 0);
	pathclose(up->slash->path);
	up->slash->path = newpath("/");
	up->dot = cclone(up->slash);

	chandevinit();

	if(!waserror()){
		snprint(buf, sizeof(buf), "%s %s", arch->id, conffile);
		ksetenv("terminal", buf, 0);
		ksetenv("cputype", "386", 0);
		if(cpuserver)
			ksetenv("service", "cpu", 0);
		else
			ksetenv("service", "terminal", 0);
		for(i = 0; i < nconf; i++){
			if(confname[i][0] != '*')
				ksetenv(confname[i], confval[i], 0);
			ksetenv(confname[i], confval[i], 1);
		}
		poperror();
	}
	kproc("alarm", alarmkproc, 0);
	touser(sp);
}
Beispiel #2
0
/*
 *  pick a process to run.
 *  most of this is used in AMP sched.
 *  (on a quad core or less, we use SMP).
 *  In the case of core 0 we always return nil, but
 *  schedule the picked process at any other available TC.
 *  In the case of other cores we wait until a process is given
 *  by core 0.
 */
Proc*
runproc(void)
{
	Schedq *rq;
	Proc *p;
	uint32_t start, now;

	if(sys->nmach <= AMPmincores)
		return smprunproc();

	start = perfticks();
	run.preempts++;
	rq = nil;
	if(machp()->machno != 0){
		do{
			spllo();
			while(m->proc == nil)
				idlehands();
			now = perfticks();
			m->perf.inidle += now-start;
			start = now;
			splhi();
			p = m->proc;
		}while(p == nil);
		p->state = Scheding;
		p->mp = sys->machptr[machp()->machno];

		if(edflock(p)){
			edfrun(p, rq == &run.runq[PriEdf]);	/* start deadline timer and do admin */
			edfunlock();
		}
		if(p->trace)
			proctrace(p, SRun, 0);
		return p;
	}

	mach0sched();
	return nil;	/* not reached */
}
Beispiel #3
0
void
init0(void)
{
	Osenv *o;
	char buf[2*KNAMELEN];

	up->nerrlab = 0;

	//print("Starting init0()\n");
	spllo();

	if(waserror())
		panic("init0 %r");

	/* These are o.k. because rootinit is null.
	 * Then early kproc's will have a root and dot. */

	o = up->env;
	o->pgrp->slash = namec("#/", Atodir, 0, 0);
	cnameclose(o->pgrp->slash->name);
	o->pgrp->slash->name = newcname("/");
	o->pgrp->dot = cclone(o->pgrp->slash);

	chandevinit();

	if(!waserror()){
		ksetenv("cputype", "arm", 0);
		snprint(buf, sizeof(buf), "arm %s", conffile);
		ksetenv("terminal", buf, 0);
		snprint(buf, sizeof(buf), "%s", getethermac());
		ksetenv("ethermac", buf, 0);
		poperror();
	}

	poperror();

	disinit("/osinit.dis");
}
Beispiel #4
0
static void
shutdown(int ispanic)
{
	Mach *m = machp();
	int ms, once;

	lock(&active);
	if(ispanic)
		active.ispanic = ispanic;
	else if(m->machno == 0 && m->online == 0)
		active.ispanic = 0;
	once = m->online;
	m->online = 0;
	adec(&active.nonline);
	active.exiting = 1;
	unlock(&active);

	if(once)
		iprint("cpu%d: exiting\n", m->machno);

	spllo();
	for(ms = 5*1000; ms > 0; ms -= TK2MS(2)){
		delay(TK2MS(2));
		if(active.nonline == 0)
			break;
	}

	if(active.ispanic && m->machno == 0){
		if(cpuserver)
			delay(30000);
		else
			for(;;)
				halt();
	}
	else
		delay(1000);
}
Beispiel #5
0
void
init0(void)
{
	char buf[2*KNAMELEN];

	up->nerrlab = 0;

//	if(consuart == nil)
//		i8250console("0");
	spllo();

	/*
	 * These are o.k. because rootinit is null.
	 * Then early kproc's will have a root and dot.
	 */
	up->slash = namec("#/", Atodir, 0, 0);
	pathclose(up->slash->path);
	up->slash->path = newpath("/");
	up->dot = cclone(up->slash);

	devtabinit();

	if(!waserror()){
		snprint(buf, sizeof(buf), "%s %s", "AMD64", conffile);
		ksetenv("terminal", buf, 0);
		ksetenv("cputype", "amd64", 0);
		if(cpuserver)
			ksetenv("service", "cpu", 0);
		else
			ksetenv("service", "terminal", 0);
		ksetenv("pgsz", "2097152", 0);
		confsetenv();
		poperror();
	}
	kproc("alarm", alarmkproc, 0);
	touser(sp);
}
Beispiel #6
0
static void
shutdown(int ispanic)
{
	int ms, once;

	lock(&active);
	if(ispanic)
		active.ispanic = ispanic;
	else if(m->machno == 0 && (active.machs & (1<<m->machno)) == 0)
		active.ispanic = 0;
	once = active.machs & (1<<m->machno);
	/*
	 * setting exiting will make hzclock() on each processor call exit(0),
	 * which calls shutdown(0) and idles non-bootstrap cpus and returns
	 * on bootstrap processors (to permit a reboot).  clearing our bit
	 * in machs avoids calling exit(0) from hzclock() on this processor.
	 */
	active.machs &= ~(1<<m->machno);
	active.exiting = 1;
	unlock(&active);

	if(once) {
		delay(m->machno*1000);		/* stagger them */
		iprint("cpu%d: exiting\n", m->machno);
	}
	spllo();
	if (m->machno == 0)
		ms = 5*1000;
	else
		ms = 2*1000;
	for(; ms > 0; ms -= TK2MS(2)){
		delay(TK2MS(2));
		if(active.machs == 0 && consactive() == 0)
			break;
	}
	delay(500);
}
Beispiel #7
0
/*
 *  if the controller or a specific drive is in a confused state,
 *  reset it and get back to a kown state
 */
static void
floppyrevive(void)
{
    FDrive *dp;

    /*
     *  reset the controller if it's confused
     */
    if(fl.confused) {
        DPRINT("floppyrevive in\n");
        fldump();

        /* reset controller and turn all motors off */
        splhi();
        fl.ncmd = 1;
        fl.cmd[0] = 0;
        outb(Pdor, 0);
        delay(10);
        outb(Pdor, Fintena|Fena);
        delay(10);
        spllo();
        fl.motor = 0;
        fl.confused = 0;
        floppywait(0);

        /* mark all drives in an unknown state */
        for(dp = fl.d; dp < &fl.d[fl.ndrive]; dp++)
            dp->confused = 1;

        /* set rate to a known value */
        outb(Pdsr, 0);
        fl.rate = 0;

        DPRINT("floppyrevive out\n");
        fldump();
    }
}
Beispiel #8
0
int
fault(uintptr addr, int read)
{
	Segment *s;
	char *sps;

	if(up == nil)
		panic("fault: nil up");
	if(up->nlocks.ref)
		print("fault: addr %#p: nlocks %ld\n", addr, up->nlocks.ref);

	sps = up->psstate;
	up->psstate = "Fault";
	spllo();

	m->pfault++;
	for(;;) {
		s = seg(up, addr, 1);		/* leaves s->lk qlocked if seg != nil */
		if(s == 0) {
			up->psstate = sps;
			return -1;
		}

		if(!read && (s->type&SG_RONLY)) {
			qunlock(&s->lk);
			up->psstate = sps;
			return -1;
		}

		if(fixfault(s, addr, read, 1) == 0)	/* qunlocks s->lk */
			break;
	}

	up->psstate = sps;
	return 0;
}
Beispiel #9
0
void
shutdown(int ispanic)
{
	int ms, once;

	lock(&active);
	if(ispanic)
		active.ispanic = ispanic;
	else if(m->machno == 0 && (active.machs & (1<<m->machno)) == 0)
		active.ispanic = 0;
	once = active.machs & (1<<m->machno);
	active.machs &= ~(1<<m->machno);
	active.exiting = 1;
	unlock(&active);

	if(once)
		print("cpu%d: exiting\n", m->machno);
	spllo();
	for(ms = 5*1000; ms > 0; ms -= TK2MS(2)){
		delay(TK2MS(2));
		if(active.machs == 0 && consactive() == 0)
			break;
	}

#ifdef notdef
	if(active.ispanic && m->machno == 0){
		if(cpuserver)
			delay(30000);
		else
			for(;;)
				halt();
	}
	else
#endif /* notdef */
		delay(1000);
}
Beispiel #10
0
/*
 *  sleep if a condition is not true.  Another process will
 *  awaken us after it sets the condition.  When we awaken
 *  the condition may no longer be true.
 *
 *  we lock both the process and the rendezvous to keep r->p
 *  and p->r synchronized.
 */
void
sleep(Rendez *r, int (*f)(void*), void *arg)
{
	int s;
	void (*pt)(Proc*, int, vlong);

	s = splhi();

	if(up->nlocks)
		print("process %lud sleeps with %d locks held, last lock %#p locked at pc %#p, sleep called from %#p\n",
			up->pid, up->nlocks, up->lastlock, up->lastlock->pc, getcallerpc(&r));
	lock(r);
	lock(&up->rlock);
	if(r->p != nil){
		print("double sleep called from %#p, %lud %lud\n", getcallerpc(&r), r->p->pid, up->pid);
		dumpstack();
	}

	/*
	 *  Wakeup only knows there may be something to do by testing
	 *  r->p in order to get something to lock on.
	 *  Flush that information out to memory in case the sleep is
	 *  committed.
	 */
	r->p = up;

	if((*f)(arg) || up->notepending){
		/*
		 *  if condition happened or a note is pending
		 *  never mind
		 */
		r->p = nil;
		unlock(&up->rlock);
		unlock(r);
	} else {
		/*
		 *  now we are committed to
		 *  change state and call scheduler
		 */
		pt = proctrace;
		if(pt != nil)
			pt(up, SSleep, 0);
		up->state = Wakeme;
		up->r = r;

		/* statistics */
		m->cs++;

		procsave(up);
		if(setlabel(&up->sched)) {
			/*
			 *  here when the process is awakened
			 */
			procrestore(up);
			spllo();
		} else {
			/*
			 *  here to go to sleep (i.e. stop Running)
			 */
			unlock(&up->rlock);
			unlock(r);
			gotolabel(&m->sched);
		}
	}

	if(up->notepending) {
		up->notepending = 0;
		splx(s);
		interrupted();
	}

	splx(s);
}
Beispiel #11
0
/*
 *  pick a process to run
 */
Proc*
runproc(void)
{
	Schedq *rq;
	Proc *p;
	ulong start, now;
	int i;
	void (*pt)(Proc*, int, vlong);

	start = perfticks();

	/* cooperative scheduling until the clock ticks */
	if((p = m->readied) != nil && p->mach == nil && p->state == Ready
	&& (p->wired == nil || p->wired == MACHP(m->machno))
	&& runq[Nrq-1].head == nil && runq[Nrq-2].head == nil){
		skipscheds++;
		rq = &runq[p->priority];
		goto found;
	}

	preempts++;

loop:
	/*
	 *  find a process that last ran on this processor (affinity),
	 *  or one that hasn't moved in a while (load balancing).  Every
	 *  time around the loop affinity goes down.
	 */
	spllo();
	for(i = 0;; i++){
		/*
		 *  find the highest priority target process that this
		 *  processor can run given affinity constraints.
		 *
		 */
		for(rq = &runq[Nrq-1]; rq >= runq; rq--){
			for(p = rq->head; p != nil; p = p->rnext){
				if(p->mp == nil || p->mp == MACHP(m->machno)
				|| (p->wired == nil && i > 0))
					goto found;
			}
		}

		/* waste time or halt the CPU */
		idlehands();

		/* remember how much time we're here */
		now = perfticks();
		m->perf.inidle += now-start;
		start = now;
	}

found:
	splhi();
	p = dequeueproc(rq, p);
	if(p == nil)
		goto loop;

	p->state = Scheding;
	p->mp = MACHP(m->machno);

	if(edflock(p)){
		edfrun(p, rq == &runq[PriEdf]);	/* start deadline timer and do admin */
		edfunlock();
	}
	pt = proctrace;
	if(pt != nil)
		pt(p, SRun, 0);
	return p;
}
Beispiel #12
0
/*
 *  If changing this routine, look also at sleep().  It
 *  contains a copy of the guts of sched().
 */
void
sched(void)
{
	Proc *p;

	if(m->ilockdepth)
		panic("cpu%d: ilockdepth %d, last lock %#p at %#p, sched called from %#p",
			m->machno,
			m->ilockdepth,
			up != nil ? up->lastilock: nil,
			(up != nil && up->lastilock != nil) ? up->lastilock->pc: 0,
			getcallerpc(&p+2));
	if(up != nil) {
		/*
		 * Delay the sched until the process gives up the locks
		 * it is holding.  This avoids dumb lock loops.
		 * Don't delay if the process is Moribund.
		 * It called sched to die.
		 * But do sched eventually.  This avoids a missing unlock
		 * from hanging the entire kernel. 
		 * But don't reschedule procs holding palloc or procalloc.
		 * Those are far too important to be holding while asleep.
		 *
		 * This test is not exact.  There can still be a few instructions
		 * in the middle of taslock when a process holds a lock
		 * but Lock.p has not yet been initialized.
		 */
		if(up->nlocks)
		if(up->state != Moribund)
		if(up->delaysched < 20
		|| palloc.Lock.p == up
		|| procalloc.Lock.p == up){
			up->delaysched++;
 			delayedscheds++;
			return;
		}
		up->delaysched = 0;

		splhi();

		/* statistics */
		m->cs++;

		procsave(up);
		if(setlabel(&up->sched)){
			procrestore(up);
			spllo();
			return;
		}
		gotolabel(&m->sched);
	}
	p = runproc();
	if(p->edf == nil){
		updatecpu(p);
		p->priority = reprioritize(p);
	}
	if(p != m->readied)
		m->schedticks = m->ticks + HZ/10;
	m->readied = nil;
	up = p;
	up->state = Running;
	up->mach = MACHP(m->machno);
	m->proc = up;
	mmuswitch(up);
	gotolabel(&up->sched);
}
Beispiel #13
0
/*
 *  All traps come here.  It is slower to have all traps call trap()
 *  rather than directly vectoring the handler.  However, this avoids a
 *  lot of code duplication and possible bugs.  The only exception is
 *  VectorSYSCALL.
 *  Trap is called with interrupts disabled via interrupt-gates.
 */
void
trap(Ureg* ureg)
{
	int clockintr, vno, user;
	// cache the previous vno to see what might be causing
	// trouble
	static int lastvno;
	vno = ureg->type;
	uint64_t gsbase = rdmsr(GSbase);
	//if (sce > scx) iprint("====================");
	if (vno == 8) {
		iprint("Lstar is %p\n", (void *)rdmsr(Lstar));
		iprint("GSbase is %p\n", (void *)gsbase);
		iprint("ire %d irx %d sce %d scx %d lastvno %d\n", 
			ire, irx, sce, scx, lastvno);
		iprint("irxe %d \n",
			irxe);
		die("8");
	}
	lastvno = vno;
	if (gsbase < 1ULL<<63)
		die("bogus gsbase");
	Mach *m = machp();
	char buf[ERRMAX];
	Vctl *ctl, *v;

	if (0 && m && m->externup && m->externup->pid == 6) {
		//iprint("type %x\n", ureg->type);
		if (ureg->type != 0x49)
			die("6\n");
	}
	m->perf.intrts = perfticks();
	user = userureg(ureg);
	if(user && (m->nixtype == NIXTC)){
		m->externup->dbgreg = ureg;
		cycles(&m->externup->kentry);
	}

	clockintr = 0;

	//_pmcupdate(m);

	if(ctl = vctl[vno]){
		if(ctl->isintr){
			m->intr++;
			if(vno >= VectorPIC && vno != VectorSYSCALL)
				m->lastintr = ctl->irq;
		}else
			if(m->externup)
				m->externup->nqtrap++;

		if(ctl->isr)
			ctl->isr(vno);
		for(v = ctl; v != nil; v = v->next){
			if(v->f)
				v->f(ureg, v->a);
		}
		if(ctl->eoi)
			ctl->eoi(vno);
		intrtime(vno);
		if(ctl->isintr){
			if(ctl->irq == IrqCLOCK || ctl->irq == IrqTIMER)
				clockintr = 1;

			if(m->externup && !clockintr)
				preempted();
		}
	}
	else if(vno < nelem(excname) && user){
		spllo();
		snprint(buf, sizeof buf, "sys: trap: %s", excname[vno]);
		postnote(m->externup, 1, buf, NDebug);
	}
	else if(vno >= VectorPIC && vno != VectorSYSCALL){
		/*
		 * An unknown interrupt.
		 * Check for a default IRQ7. This can happen when
		 * the IRQ input goes away before the acknowledge.
		 * In this case, a 'default IRQ7' is generated, but
		 * the corresponding bit in the ISR isn't set.
		 * In fact, just ignore all such interrupts.
		 */

		/* clear the interrupt */
		i8259isr(vno);

		iprint("cpu%d: spurious interrupt %d, last %d\n",
			m->machno, vno, m->lastintr);
		intrtime(vno);
		if(user)
			kexit(ureg);
		return;
	}
	else{
		if(vno == VectorNMI){
			nmienable();
			if(m->machno != 0){
				iprint("cpu%d: PC %#llux\n",
					m->machno, ureg->ip);
				for(;;);
			}
		}
		dumpregs(ureg);
		if(!user){
			ureg->sp = PTR2UINT(&ureg->sp);
			dumpstackwithureg(ureg);
		}
		if(vno < nelem(excname))
			panic("%s", excname[vno]);
		panic("unknown trap/intr: %d\n", vno);
	}
	splhi();

	/* delaysched set because we held a lock or because our quantum ended */
	if(m->externup && m->externup->delaysched && clockintr){
		if(0)
		if(user && m->externup->ac == nil && m->externup->nqtrap == 0 && m->externup->nqsyscall == 0){
			if(!waserror()){
				m->externup->ac = getac(m->externup, -1);
				poperror();
				runacore();
				return;
			}
		}
		sched();
		splhi();
	}


	if(user){
		if(m->externup && m->externup->procctl || m->externup->nnote)
			notify(ureg);
		kexit(ureg);
	}
}
Beispiel #14
0
static inline void 
enable_ints_and_unlock(void)
{
	thread_call_unlock();
	(void)spllo();
}
Beispiel #15
0
void
trap(Ureg *ureg)
{
	int rem, itype, t;

	if(up != nil)
		rem = ((char*)ureg)-up->kstack;
	else rem = ((char*)ureg)-(char*)m->stack;

	if(ureg->type != PsrMfiq && rem < 256) {
		dumpregs(ureg);
		panic("trap %d stack bytes remaining (%s), "
			  "up=#%8.8lux ureg=#%8.8lux pc=#%8.8ux"
			  ,rem, up?up->text:"", up, ureg, ureg->pc);
		for(;;);
	}

	itype = ureg->type;
	/*	All interrupts/exceptions should be resumed at ureg->pc-4,
		except for Data Abort which resumes at ureg->pc-8. */
	if(itype == PsrMabt+1)
		ureg->pc -= 8;
	else ureg->pc -= 4;

	if(up){
		up->pc = ureg->pc;
		up->dbgreg = ureg;
	}

	switch(itype) {
	case PsrMirq:
		t = m->ticks;		/* CPU time per proc */
		up = nil;		/* no process at interrupt level */
		irq(ureg);
		up = m->proc;
		preemption(m->ticks - t);
		m->intr++;
		break;

	case PsrMund:
		if(*(ulong*)ureg->pc == BREAK && breakhandler) {
			int s;
			Proc *p;

			p = up;
			s = breakhandler(ureg, p);
			if(s == BrkSched) {
				p->preempted = 0;
				sched();
			} else if(s == BrkNoSched) {
				/* stop it being preempted until next instruction */
				p->preempted = 1;
				if(up)
					up->dbgreg = 0;
				return;
			}
			break;
		}
		if(up == nil) goto faultpanic;
		spllo();
		if(waserror()) {
			if(waslo(ureg->psr) && up->type == Interp)
				disfault(ureg, up->env->errstr);
			setpanic();
			dumpregs(ureg);
			panic("%s", up->env->errstr);
		}
		if(!fpiarm(ureg)) {
			dumpregs(ureg);
			sys_trap_error(ureg->type);
		}
		poperror();
		break;

	case PsrMsvc: /* Jump through 0 or SWI */
		if(waslo(ureg->psr) && up && up->type == Interp) {
			spllo();
			dumpregs(ureg);
			sys_trap_error(ureg->type);
		}
		setpanic();
		dumpregs(ureg);
		panic("SVC/SWI exception");
		break;

	case PsrMabt: /* Prefetch abort */
		if(catchdbg && catchdbg(ureg, 0))
			break;
		/* FALL THROUGH */
	case PsrMabt+1: /* Data abort */
		if(waslo(ureg->psr) && up && up->type == Interp) {
			spllo();
			faultarm(ureg);
		}
		print("Data Abort\n");
		/* FALL THROUGH */

	default:
faultpanic:
		setpanic();
		dumpregs(ureg);
		panic("exception %uX %s\n", ureg->type, trapname(ureg->type));
		break;
	}

	splhi();
	if(up)
		up->dbgreg = 0;		/* becomes invalid after return from trap */
}
Beispiel #16
0
Datei: proc.c Projekt: 8l/inferno
Proc*
runproc(void)
{
	Proc *p, *l;
	Schedq *rq, *erq;

	erq = runq + Nrq - 1;
loop:
	splhi();
	for(rq = runq; rq->head == 0; rq++)
		if(rq >= erq) {
			idlehands();
			spllo();
			goto loop;
		}

	if(!canlock(runq))
		goto loop;
	/* choose first one we last ran on this processor at this level or hasn't moved recently */
	l = nil;
	for(p = rq->head; p != nil; p = p->rnext)
		if(p->mp == nil || p->mp == MACHP(m->machno) || p->movetime < MACHP(0)->ticks)
			break;
	if(p == nil)
		p = rq->head;
	/* p->mach==0 only when process state is saved */
	if(p == 0 || p->mach) {
		unlock(runq);
		goto loop;
	}
	if(p->rnext == nil)
		rq->tail = l;
	if(l)
		l->rnext = p->rnext;
	else
		rq->head = p->rnext;
	if(rq->head == nil){
		rq->tail = nil;
		occupied &= ~(1<<p->pri);
	}
	nrdy--;
	if(p->dbgstop){
		p->state = Stopped;
		unlock(runq);
		goto loop;
	}
	if(p->state != Ready)
		print("runproc %s %lud %s\n", p->text, p->pid, statename[p->state]);
	unlock(runq);
	p->state = Scheding;
	if(p->mp != MACHP(m->machno))
		p->movetime = MACHP(0)->ticks + HZ/10;
	p->mp = MACHP(m->machno);

/*
	if(edflock(p)){
		edfrun(p, rq == &runq[PriEdf]);	// start deadline timer and do admin
		edfunlock();
	}
*/
	return p;
}
Beispiel #17
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t	self, thread;
	task_t		task;

	self = current_thread();
	self->options |= TH_OPT_SYSTEM_CRITICAL;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		if (thread->precise_user_kernel_time) {
			task->total_system_time += timer_grab(&thread->system_timer);
		} else {
			task->total_user_time += timer_grab(&thread->system_timer);
		}

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		task->syscalls_unix += thread->syscalls_unix;
		task->syscalls_mach += thread->syscalls_mach;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	self->options &= ~TH_OPT_SYSTEM_CRITICAL;
	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Beispiel #18
0
/*
 * Now running in a thread.  Kick off other services,
 * invoke user bootstrap, enter pageout loop.
 */
static void
kernel_bootstrap_thread(void)
{
    processor_t		processor = current_processor();

#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
    kernel_bootstrap_thread_kprintf("calling idle_thread_create\n");
    /*
     * Create the idle processor thread.
     */
    idle_thread_create(processor);

    /*
     * N.B. Do not stick anything else
     * before this point.
     *
     * Start up the scheduler services.
     */
    kernel_bootstrap_thread_kprintf("calling sched_startup\n");
    sched_startup();

    /*
     * Thread lifecycle maintenance (teardown, stack allocation)
     */
    kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n");
    thread_daemon_init();

    /*
     * Thread callout service.
     */
    kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n");
    thread_call_initialize();

    /*
     * Remain on current processor as
     * additional processors come online.
     */
    kernel_bootstrap_thread_kprintf("calling thread_bind\n");
    thread_bind(processor);

    /*
     * Kick off memory mapping adjustments.
     */
    kernel_bootstrap_thread_kprintf("calling mapping_adjust\n");
    mapping_adjust();

    /*
     *	Create the clock service.
     */
    kernel_bootstrap_thread_kprintf("calling clock_service_create\n");
    clock_service_create();

    /*
     *	Create the device service.
     */
    device_service_create();

    kth_started = 1;

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the physical copy window for processor 0
     * This is required before starting kicking off  IOKit.
     */
    cpu_physwindow_init(0);
#endif

    vm_kernel_reserved_entry_init();

#if MACH_KDP
    kernel_bootstrap_kprintf("calling kdp_init\n");
    kdp_init();
#endif

#if CONFIG_COUNTERS
    pmc_bootstrap();
#endif

#if (defined(__i386__) || defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

#ifdef	IOKIT
    PE_init_iokit();
#endif

    (void) spllo();		/* Allow interruptions */

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the copy window for processor 0
     * This also allocates window space for all other processors.
     * However, this is dependent on the number of processors - so this call
     * must be after IOKit has been started because IOKit performs processor
     * discovery.
     */
    cpu_userwindow_init(0);
#endif

#if (!defined(__i386__) && !defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

    /*
     *	Initialize the shared region module.
     */
    vm_shared_region_init();
    vm_commpage_init();
    vm_commpage_text_init();

#if CONFIG_MACF
    mac_policy_initmach();
#endif

    /*
     * Initialize the global used for permuting kernel
     * addresses that may be exported to userland as tokens
     * using VM_KERNEL_ADDRPERM(). Force the random number
     * to be odd to avoid mapping a non-zero
     * word-aligned address to zero via addition.
     */
    vm_kernel_addrperm = (vm_offset_t)early_random() | 1;

    /*
     *	Start the user bootstrap.
     */
#ifdef	MACH_BSD
    bsd_init();
#endif

    /*
     * Get rid of segments used to bootstrap kext loading. This removes
     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
     */
#if 0
    OSKextRemoveKextBootstrap();
#endif

    serial_keyboard_init();		/* Start serial keyboard if wanted */

    vm_page_init_local_q();

    thread_bind(PROCESSOR_NULL);

    /*
     *	Become the pageout daemon.
     */
    vm_pageout();
    /*NOTREACHED*/
}
Beispiel #19
0
/*
 *  If changing this routine, look also at sleep().  It
 *  contains a copy of the guts of sched().
 */
void
sched(void)
{
	Mach *m = machp();
	Proc *p;

	if(m->ilockdepth)
		panic("cpu%d: ilockdepth %d, last lock %#p at %#p, sched called from %#p",
			m->machno,
			m->ilockdepth,
			m->externup? m->externup->lastilock: nil,
			(m->externup && m->externup->lastilock)? m->externup->lastilock->_pc: 0,
			getcallerpc(&p+2));

	kstackok();
	if(m->externup){
		/*
		 * Delay the sched until the process gives up the locks
		 * it is holding.  This avoids dumb lock loops.
		 * Don't delay if the process is Moribund.
		 * It called sched to die.
		 * But do sched eventually.  This avoids a missing unlock
		 * from hanging the entire kernel.
		 * But don't reschedule procs holding palloc or procalloc.
		 * Those are far too important to be holding while asleep.
		 *
		 * This test is not exact.  There can still be a few
		 * instructions in the middle of taslock when a process
		 * holds a lock but Lock.p has not yet been initialized.
		 */
		if(m->externup->nlocks)
		if(m->externup->state != Moribund)
		if(m->externup->delaysched < 20
		|| pga.Lock.p == m->externup
		|| procalloc.Lock.p == m->externup){
			m->externup->delaysched++;
 			run.delayedscheds++;
			return;
		}
		m->externup->delaysched = 0;

		splhi();
		/* statistics */
		if(m->externup->nqtrap == 0 && m->externup->nqsyscall == 0)
			m->externup->nfullq++;
		m->cs++;

		procsave(m->externup);
		mmuflushtlb(m->pml4->pa);
		if(setlabel(&m->externup->sched)){
			procrestore(m->externup);
			spllo();
			return;
		}
		/*debug*/gotolabel(&m->sched);
	}

	m->inidle = 1;
	p = runproc();	/* core 0 never returns */
	m->inidle = 0;

	if(!p->edf){
		updatecpu(p);
		p->priority = reprioritize(p);
	}
	if(nosmp){
		if(p != m->readied)
			m->schedticks = m->ticks + HZ/10;
	m->readied = 0;
	}
	m->externup = p;
	m->qstart = m->ticks;
	m->externup->nqtrap = 0;
	m->externup->nqsyscall = 0;
	m->externup->state = Running;
	//m->externup->mach = m;
	m->externup->mach = sys->machptr[m->machno];
	m->proc = m->externup;
//	iprint("m->externup->sched.sp %p * %p\n", up->sched.sp,
//		*(void **) m->externup->sched.sp);
	mmuswitch(m->externup);

	assert(!m->externup->wired || m->externup->wired == m);
	if (0) hi("gotolabel\n");
	/*debug*/gotolabel(&m->externup->sched);
}
Beispiel #20
0
/*
 *  sleep if a condition is not true.  Another process will
 *  awaken us after it sets the condition.  When we awaken
 *  the condition may no longer be true.
 *
 *  we lock both the process and the rendezvous to keep r->p
 *  and p->r synchronized.
 */
void
sleep(Rendez *r, int (*f)(void*), void *arg)
{
	Mach *m = machp();
	Mpl pl;

	pl = splhi();

	if(m->externup->nlocks)
		print("process %d sleeps with %d locks held, last lock %#p locked at pc %#p, sleep called from %#p\n",
			m->externup->pid, m->externup->nlocks, m->externup->lastlock, m->externup->lastlock->_pc, getcallerpc(&r));
	lock(r);
	lock(&m->externup->rlock);
	if(r->_p){
		print("double sleep called from %#p, %d %d\n",
			getcallerpc(&r), r->_p->pid, m->externup->pid);
		dumpstack();
	}

	/*
	 *  Wakeup only knows there may be something to do by testing
	 *  r->p in order to get something to lock on.
	 *  Flush that information out to memory in case the sleep is
	 *  committed.
	 */
	r->_p = m->externup;

	if((*f)(arg) || m->externup->notepending){
		/*
		 *  if condition happened or a note is pending
		 *  never mind
		 */
		r->_p = nil;
		unlock(&m->externup->rlock);
		unlock(r);
	} else {
		/*
		 *  now we are committed to
		 *  change state and call scheduler
		 */
		if(m->externup->trace)
			proctrace(m->externup, SSleep, 0);
		m->externup->state = Wakeme;
		m->externup->r = r;

		/* statistics */
		m->cs++;

		procsave(m->externup);
		mmuflushtlb(m->pml4->pa);
		if(setlabel(&m->externup->sched)) {
			/*
			 *  here when the process is awakened
			 */
			procrestore(m->externup);
			spllo();
		} else {
			/*
			 *  here to go to sleep (i.e. stop Running)
			 */
			unlock(&m->externup->rlock);
			unlock(r);
			/*debug*/gotolabel(&m->sched);
		}
	}

	if(m->externup->notepending) {
		m->externup->notepending = 0;
		splx(pl);
		if(m->externup->procctl == Proc_exitme && m->externup->closingfgrp)
			forceclosefgrp();
		error(Eintr);
	}

	splx(pl);
}
Beispiel #21
0
/* it should be unsigned. FIXME */
void
syscall(int badscallnr, Ureg* ureg)
{
	unsigned int scallnr = (unsigned int) badscallnr;
	char *e;
	uintptr	sp;
	int s;
	vlong startns, stopns;
	Ar0 ar0;
	static Ar0 zar0;

	if(!userureg(ureg))
		panic("syscall: cs %#llux\n", ureg->cs);

	cycles(&up->kentry);

	m->syscall++;
	up->nsyscall++;
	up->nqsyscall++;
	up->insyscall = 1;
	up->pc = ureg->ip;
	up->dbgreg = ureg;
	sp = ureg->sp;
	startns = 0;

	if(up->procctl == Proc_tracesyscall){
		/*
		 * Redundant validaddr.  Do we care?
		 * Tracing syscalls is not exactly a fast path...
		 * Beware, validaddr currently does a pexit rather
		 * than an error if there's a problem; that might
		 * change in the future.
		 */
		if(sp < (USTKTOP-BIGPGSZ) || sp > (USTKTOP-sizeof(up->arg)-BY2SE))
			validaddr(UINT2PTR(sp), sizeof(up->arg)+BY2SE, 0);

		syscallfmt(scallnr, (va_list)(sp+BY2SE));	
		up->procctl = Proc_stopme;
		procctl(up);
		if(up->syscalltrace)
			free(up->syscalltrace);
		up->syscalltrace = nil;
		startns = todget(nil);		
	}

	up->scallnr = scallnr;
	if(scallnr == RFORK)
		fpusysrfork(ureg);
	spllo();

	sp = ureg->sp;
	up->nerrlab = 0;
	ar0 = zar0;
	if(!waserror()){
		if(scallnr >= nsyscall || systab[scallnr].f == nil){
			pprint("bad sys call number %d pc %#llux\n",
				scallnr, ureg->ip);
			postnote(up, 1, "sys: bad sys call", NDebug);
			error(Ebadarg);
		}

		if(sp < (USTKTOP-BIGPGSZ) || sp > (USTKTOP-sizeof(up->arg)-BY2SE))
			validaddr(UINT2PTR(sp), sizeof(up->arg)+BY2SE, 0);

		memmove(up->arg, UINT2PTR(sp+BY2SE), sizeof(up->arg));
		up->psstate = systab[scallnr].n;

		systab[scallnr].f(&ar0, (va_list)up->arg);
		if(scallnr == SYSR1){
			/*
			 * BUG: must go when ron binaries go.
			 * NIX: Returning from execac().
			 * This means that the process is back to the
			 * time sharing core. However, the process did
			 * already return from the system call, when dispatching
			 * the user code to the AC. The only thing left is to
			 * return. The user registers should be ok, because
			 * up->dbgreg has been the user context for the process.
			 */
			return;
		}
		poperror();
	}
	else{
		/* failure: save the error buffer for errstr */
		e = up->syserrstr;
		up->syserrstr = up->errstr;
		up->errstr = e;
		if(DBGFLG && up->pid == 1)
			iprint("%s: syscall %s error %s\n",
				up->text, systab[scallnr].n, up->syserrstr);
		ar0 = systab[scallnr].r;
	}

	/*
	 * NIX: for the execac() syscall, what follows is done within
	 * the system call, because it never returns.
	 * See acore.c:/^retfromsyscall
	 */

	noerrorsleft();

	/*
	 * Put return value in frame.
	 */
	ureg->ax = ar0.p;

	if(up->procctl == Proc_tracesyscall){
		stopns = todget(nil);
		up->procctl = Proc_stopme;
		sysretfmt(scallnr, (va_list)(sp+BY2SE), &ar0, startns, stopns);
		s = splhi();
		procctl(up);
		splx(s);
		if(up->syscalltrace)
			free(up->syscalltrace);
		up->syscalltrace = nil;		
	}else if(up->procctl == Proc_totc || up->procctl == Proc_toac)
		procctl(up);


	up->insyscall = 0;
	up->psstate = 0;

	if(scallnr == NOTED)
		noted(ureg, *(uintptr*)(sp+BY2SE));

	splhi();
	if(scallnr != RFORK && (up->procctl || up->nnote))
		notify(ureg);

	/* if we delayed sched because we held a lock, sched now */
	if(up->delaysched){
		sched();
		splhi();
	}
	kexit(ureg);
}
Beispiel #22
0
/*
 *  Call user, if necessary, with note.
 *  Pass user the Ureg struct and the note on his stack.
 */
int
notify(Ureg* ureg)
{
	int l;
	Mpl pl;
	Note note;
	uintptr sp;
	NFrame *nf;

	/*
	 * Calls procctl splhi, see comment in procctl for the reasoning.
	 */
	if(up->procctl)
		procctl(up);
	if(up->nnote == 0)
		return 0;

	fpunotify(ureg);

	pl = spllo();
	qlock(&up->debug);

	up->notepending = 0;
	memmove(&note, &up->note[0], sizeof(Note));
	if(strncmp(note.msg, "sys:", 4) == 0){
		l = strlen(note.msg);
		if(l > ERRMAX-sizeof(" pc=0x0123456789abcdef"))
			l = ERRMAX-sizeof(" pc=0x0123456789abcdef");
		sprint(note.msg+l, " pc=%#p", ureg->ip);
	}

	if(note.flag != NUser && (up->notified || up->notify == nil)){
		qunlock(&up->debug);
		if(note.flag == NDebug)
			pprint("suicide: %s\n", note.msg);
		pexit(note.msg, note.flag != NDebug);
	}

	if(up->notified){
		qunlock(&up->debug);
		splhi();
		return 0;
	}

	if(up->notify == nil){
		qunlock(&up->debug);
		pexit(note.msg, note.flag != NDebug);
	}
	if(!okaddr(PTR2UINT(up->notify), sizeof(ureg->ip), 0)){
		qunlock(&up->debug);
		pprint("suicide: bad function address %#p in notify\n",
			up->notify);
		pexit("Suicide", 0);
	}

	sp = ureg->sp - sizeof(NFrame);
	if(!okaddr(sp, sizeof(NFrame), 1)){
		qunlock(&up->debug);
		pprint("suicide: bad stack address %#p in notify\n", sp);
		pexit("Suicide", 0);
	}

	nf = UINT2PTR(sp);
	memmove(&nf->ureg, ureg, sizeof(Ureg));
	nf->old = up->ureg;
	up->ureg = nf;	/* actually the NFrame, for noted */
	memmove(nf->msg, note.msg, ERRMAX);
	nf->arg1 = nf->msg;
	nf->arg0 = &nf->ureg;
	ureg->bp = PTR2UINT(nf->arg0);
	nf->ip = 0;

	ureg->sp = sp;
	ureg->ip = PTR2UINT(up->notify);
	up->notified = 1;
	up->nnote--;
	memmove(&up->lastnote, &note, sizeof(Note));
	memmove(&up->note[0], &up->note[1], up->nnote*sizeof(Note));

	qunlock(&up->debug);
	splx(pl);

	return 1;
}
Beispiel #23
0
void	halt(void) { spllo(); for(;;); }
Beispiel #24
0
void
clockinit(void)
{
	int i, s;
	Timerregs *tn;

	clockshutdown();

	/* turn cycle counter on */
	cpwrsc(0, CpCLD, CpCLDena, CpCLDenacyc, 1<<31);

	/* turn all counters on and clear the cycle counter */
	cpwrsc(0, CpCLD, CpCLDena, CpCLDenapmnc, 1<<2 | 1);

	/* let users read the cycle counter directly */
	cpwrsc(0, CpCLD, CpCLDena, CpCLDenapmnc, 1);

	ilock(&clklck);
	m->fastclock = 1;
	m->ticks = ticks = 0;

	/*
	 * T0 is a freerunning timer (cycle counter); it wraps,
	 * automatically reloads, and does not dispatch interrupts.
	 */
	tn = (Timerregs *)Tn0;
	tn->tcrr = Freebase;			/* count up to 0 */
	tn->tldr = Freebase;
	coherence();
	tn->tclr = Ar | St;
	iunlock(&clklck);

	/*
	 * T1 is the interrupting timer and does not participate
	 * in measuring time.  It is initially set to HZ.
	 */
	tn = (Timerregs *)Tn1;
	irqenable(Tn0irq+1, clockintr, tn, "clock");
	ilock(&clklck);
	tn->tcrr = -Tcycles;			/* approx.; count up to 0 */
	tn->tldr = -Tcycles;
	coherence();
	tn->tclr = Ar | St;
	coherence();
	tn->tier = Ovf_it;
	coherence();
	iunlock(&clklck);

	/*
	 * verify sanity of timer1
	 */
	s = spllo();			/* risky */
	for (i = 0; i < 5 && ticks == 0; i++) {
		delay(10);
		cachedwbinvse(&ticks, sizeof ticks);
	}
	splx(s);
	if (ticks == 0) {
		if (tn->tcrr == 0)
			panic("clock not interrupting");
		else if (tn->tcrr == tn->tldr)
			panic("clock not ticking at all");
#ifdef PARANOID
		else
			panic("clock running very slowly");
#endif
	}

	guessmips(issue1loop, "single");
	if (Debug)
		iprint(", ");
	guessmips(issue2loop, "dual");
	if (Debug)
		iprint("\n");

	/*
	 * m->delayloop should be the number of delay loop iterations
	 * needed to consume 1 ms.  2 is min. instructions in the delay loop.
	 */
	m->delayloop = m->cpuhz / (1000 * 2);
//	iprint("m->delayloop = %lud\n", m->delayloop);

	/*
	 *  desynchronize the processor clocks so that they all don't
	 *  try to resched at the same time.
	 */
	delay(m->machno*2);
}
Beispiel #25
0
/*
 *  Call user, if necessary, with note.
 *  Pass user the Ureg struct and the note on his stack.
 */
int
notify(Ureg* ureg)
{
	int l;
	ulong s, sp;
	Note *n;

	if(up->procctl)
		procctl(up);
	if(up->nnote == 0)
		return 0;

	if(up->fpstate == FPactive){
		fpsave(&up->fpsave);
		up->fpstate = FPinactive;
	}
	up->fpstate |= FPillegal;

	s = spllo();
	qlock(&up->debug);
	up->notepending = 0;
	n = &up->note[0];
	if(strncmp(n->msg, "sys:", 4) == 0){
		l = strlen(n->msg);
		if(l > ERRMAX-15)	/* " pc=0x12345678\0" */
			l = ERRMAX-15;
		sprint(n->msg+l, " pc=0x%.8lux", ureg->pc);
	}

	if(n->flag!=NUser && (up->notified || up->notify==0)){
		if(n->flag == NDebug)
			pprint("suicide: %s\n", n->msg);
		qunlock(&up->debug);
		pexit(n->msg, n->flag!=NDebug);
	}

	if(up->notified){
		qunlock(&up->debug);
		splhi();
		return 0;
	}

	if(!up->notify){
		qunlock(&up->debug);
		pexit(n->msg, n->flag!=NDebug);
	}
	sp = ureg->usp;
	sp -= 256;	/* debugging: preserve context causing problem */
	sp -= sizeof(Ureg);
if(0) print("%s %lud: notify %.8lux %.8lux %.8lux %s\n", 
	up->text, up->pid, ureg->pc, ureg->usp, sp, n->msg);

	if(!okaddr((ulong)up->notify, 1, 0)
	|| !okaddr(sp-ERRMAX-4*BY2WD, sizeof(Ureg)+ERRMAX+4*BY2WD, 1)){
		pprint("suicide: bad address in notify\n");
		qunlock(&up->debug);
		pexit("Suicide", 0);
	}

	memmove((Ureg*)sp, ureg, sizeof(Ureg));
	*(Ureg**)(sp-BY2WD) = up->ureg;	/* word under Ureg is old up->ureg */
	up->ureg = (void*)sp;
	sp -= BY2WD+ERRMAX;
	memmove((char*)sp, up->note[0].msg, ERRMAX);
	sp -= 3*BY2WD;
	*(ulong*)(sp+2*BY2WD) = sp+3*BY2WD;		/* arg 2 is string */
	*(ulong*)(sp+1*BY2WD) = (ulong)up->ureg;	/* arg 1 is ureg* */
	*(ulong*)(sp+0*BY2WD) = 0;			/* arg 0 is pc */
	ureg->usp = sp;
	ureg->pc = (ulong)up->notify;
	up->notified = 1;
	up->nnote--;
	memmove(&up->lastnote, &up->note[0], sizeof(Note));
	memmove(&up->note[0], &up->note[1], up->nnote*sizeof(Note));

	qunlock(&up->debug);
	splx(s);
	return 1;
}
Beispiel #26
0
/*
 * Now running in a thread.  Kick off other services,
 * invoke user bootstrap, enter pageout loop.
 */
static void
kernel_bootstrap_thread(void)
{
	processor_t		processor = current_processor();

#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
	kernel_bootstrap_thread_log("idle_thread_create");
	/*
	 * Create the idle processor thread.
	 */
	idle_thread_create(processor);

	/*
	 * N.B. Do not stick anything else
	 * before this point.
	 *
	 * Start up the scheduler services.
	 */
	kernel_bootstrap_thread_log("sched_startup");
	sched_startup();

	/*
	 * Thread lifecycle maintenance (teardown, stack allocation)
	 */
	kernel_bootstrap_thread_log("thread_daemon_init");
	thread_daemon_init();

	/* Create kernel map entry reserve */
	vm_kernel_reserved_entry_init();

	/*
	 * Thread callout service.
	 */
	kernel_bootstrap_thread_log("thread_call_initialize");
	thread_call_initialize();

	/*
	 * Remain on current processor as
	 * additional processors come online.
	 */
	kernel_bootstrap_thread_log("thread_bind");
	thread_bind(processor);

	/*
	 * Initialize ipc thread call support.
	 */
	kernel_bootstrap_thread_log("ipc_thread_call_init");
	ipc_thread_call_init();

	/*
	 * Kick off memory mapping adjustments.
	 */
	kernel_bootstrap_thread_log("mapping_adjust");
	mapping_adjust();

	/*
	 *	Create the clock service.
	 */
	kernel_bootstrap_thread_log("clock_service_create");
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	kth_started = 1;
		
#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
	/*
	 * Create and initialize the physical copy window for processor 0
	 * This is required before starting kicking off  IOKit.
	 */
	cpu_physwindow_init(0);
#endif


	
#if MACH_KDP 
	kernel_bootstrap_log("kdp_init");
	kdp_init();
#endif

#if ALTERNATE_DEBUGGER
	alternate_debugger_init();
#endif

#if KPC
	kpc_init();
#endif

#if CONFIG_ECC_LOGGING
	ecc_log_init();
#endif 

#if KPERF
	kperf_bootstrap();
#endif

#if HYPERVISOR
	hv_support_init();
#endif

#if CONFIG_TELEMETRY
	kernel_bootstrap_log("bootprofile_init");
	bootprofile_init();
#endif

#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
	vmx_init();
#endif

#if (defined(__i386__) || defined(__x86_64__))
	if (kdebug_serial) {
		new_nkdbufs = 1;
		if (trace_typefilter == 0)
			trace_typefilter = 1;
	}
	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	if (trace_typefilter)
		start_kern_tracing_with_typefilter(new_nkdbufs,
						   FALSE,
						   trace_typefilter);
	else
		start_kern_tracing(new_nkdbufs, FALSE);
	if (turn_on_log_leaks)
		log_leaks = 1;

#endif

	kernel_bootstrap_log("prng_init");
	prng_cpu_init(master_cpu);

#ifdef	IOKIT
	PE_init_iokit();
#endif

	assert(ml_get_interrupts_enabled() == FALSE);
	(void) spllo();		/* Allow interruptions */

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
	/*
	 * Create and initialize the copy window for processor 0
	 * This also allocates window space for all other processors.
	 * However, this is dependent on the number of processors - so this call
	 * must be after IOKit has been started because IOKit performs processor
	 * discovery.
	 */
	cpu_userwindow_init(0);
#endif

#if (!defined(__i386__) && !defined(__x86_64__))
	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	if (trace_typefilter)
		start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter);
	else
		start_kern_tracing(new_nkdbufs, FALSE);
	if (turn_on_log_leaks)
		log_leaks = 1;
#endif

	/*
	 *	Initialize the shared region module.
	 */
	vm_shared_region_init();
	vm_commpage_init();
	vm_commpage_text_init();


#if CONFIG_MACF
	kernel_bootstrap_log("mac_policy_initmach");
	mac_policy_initmach();
#endif

#if CONFIG_SCHED_SFI
	kernel_bootstrap_log("sfi_init");
	sfi_init();
#endif

	/*
	 * Initialize the globals used for permuting kernel
	 * addresses that may be exported to userland as tokens
	 * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL().
	 * Force the random number to be odd to avoid mapping a non-zero
	 * word-aligned address to zero via addition.
	 * Note: at this stage we can use the cryptographically secure PRNG
	 * rather than early_random().
	 */
	read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm));
	vm_kernel_addrperm |= 1;
	read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm));
	buf_kernel_addrperm |= 1;
	read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext));
	vm_kernel_addrperm_ext |= 1;

	vm_set_restrictions();



	/*
	 *	Start the user bootstrap.
	 */
#ifdef	MACH_BSD
	bsd_init();
#endif

    /*
     * Get rid of segments used to bootstrap kext loading. This removes
     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
     */
	OSKextRemoveKextBootstrap();

	serial_keyboard_init();		/* Start serial keyboard if wanted */

	vm_page_init_local_q();

	thread_bind(PROCESSOR_NULL);

	/*
	 *	Become the pageout daemon.
	 */
	vm_pageout();
	/*NOTREACHED*/
}
Beispiel #27
0
static Proc*
singlerunproc(void)
{
	Mach *m = machp();
	Schedq *rq;
	Proc *p;
	uint32_t start, now, skipscheds;
	int i;

	start = perfticks();

	/* cooperative scheduling until the clock ticks */
	if((p=m->readied) && p->mach==0 && p->state==Ready
	&& &run.runq[Nrq-1].head == nil && &run.runq[Nrq-2].head == nil){
		skipscheds++;
		rq = &run.runq[p->priority];
		if(0)hi("runproc going to found before loop...\n");
		goto found;
	}

	run.preempts++;

loop:
	/*
	 *  find a process that last ran on this processor (affinity),
	 *  or one that hasn't moved in a while (load balancing).  Every
	 *  time around the loop affinity goes down.
	 */
	spllo();
	for(i = 0;; i++){
		/*
		 *  find the highest priority target process that this
		 *  processor can run given affinity constraints.
		 *
		 */
		for(rq = &run.runq[Nrq-1]; rq >= run.runq; rq--){
			for(p = rq->head; p; p = p->rnext){
				if(p->mp == nil || p->mp == sys->machptr[m->machno]
				|| (!p->wired && i > 0))
				{
					if(0)hi("runproc going to found inside loop...\n");
					goto found;
				}
			}
		}

		/* waste time or halt the CPU */
		idlehands();

		/* remember how much time we're here */
		now = perfticks();
		m->perf.inidle += now-start;
		start = now;
	}

found:
	splhi();
	if(0)hi("runproc into found...\n");
	p = dequeueproc(&run, rq, p);
	if(p == nil)
	{
		if(0)hi("runproc p=nil :(\n");
			goto loop;
	}

	p->state = Scheding;
	if(0)hi("runproc, pm->mp = sys->machptr[m->machno]\n");
	p->mp = sys->machptr[m->machno];
	if(0){hi("runproc, sys->machptr[m->machno] = "); put64((uint64_t)p->mp); hi("\n");}

	if(edflock(p)){
		edfrun(p, rq == &run.runq[PriEdf]);	/* start deadline timer and do admin */
		edfunlock();
	}
	if(p->trace)
		proctrace(p, SRun, 0);
	/* avoiding warnings, this will be removed */
	USED(mach0sched); USED(smprunproc);
	if(0){hi("runproc, returning p ");
	put64((uint64_t)p);
	hi("\n");}

	
	return p;
}
Beispiel #28
0
void
setpanic(void)
{
	spllo();
	consoleprint = 1;
}
Beispiel #29
0
/*
 * Scheduling thread run as the main loop of cpu 0
 * Used in AMP sched.
 */
static void
mach0sched(void)
{
	Mach *m = machp();
	Schedq *rq;
	Proc *p;
	Mach *mp;
	uint32_t start, now;
	int n, i; //, j;

	assert(m->machno == 0);
	acmodeset(NIXKC);		/* we don't time share any more */
	n = 0;
	start = perfticks();
loop:

	/*
	 * find a ready process that we might run.
	 */
	spllo();
	for(rq = &run.runq[Nrq-1]; rq >= run.runq; rq--)
		for(p = rq->head; p; p = p->rnext){
			/*
			 * wired processes may only run when their core is available.
			 */
			if(p->wired != nil){
				if(p->wired->proc == nil)
					goto found;
				continue;
			}
			/*
			 * find a ready process that did run at an available core
			 * or one that has not moved for some time.
			 */
			if(p->mp == nil || p->mp->proc == nil || n>0){
				goto found;
			}
		}
	/* waste time or halt the CPU */
	idlehands();
	/* remember how much time we're here */
	now = perfticks();
	m->perf.inidle += now-start;
	start = now;
	n++;
	goto loop;

found:
	assert(m->machno == 0);
	splhi();
	/*
	 * find a core for this process, but honor wiring.
	 */
	mp = p->wired;
	if(mp != nil){
		if(mp->proc != nil)
			goto loop;
	}else{
		for(i = 0; i < MACHMAX; i++){
			/*j = pickcore(p->color, i);
			if((mp = sys->machptr[j]) != nil && mp->online && mp->nixtype == NIXTC){*/
			if((mp = sys->machptr[i]) != nil){ // && mp->online && mp->nixtype == NIXTC){
				if(mp != m && mp->proc == nil)
					break;
			}
		}
		if(i == MACHMAX){
			preemptfor(p);
			goto loop;
		}
	}

	p = dequeueproc(&run, rq, p);
	mp->proc = p;
	if(p != nil){
		p->state = Scheding;
		p->mp = mp;
	}

	n = 0;
	goto loop;
}
Beispiel #30
0
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void
start_kernel_threads(void)
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;
		spl_t	s;
		processor_t processor = cpu_to_processor(i);

		(void) thread_create_at(kernel_task, &th, idle_thread);
		s=splsched();
		thread_lock(th);
		thread_bind_locked(th, processor);
		processor->idle_thread = th;
		/*(void) thread_resume(th->top_act);*/
		th->state |= TH_RUN;
		thread_setrun( th, TRUE, TAIL_Q);
		thread_unlock( th );
		splx(s);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
#if	THREAD_SWAPPER
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapout_thread, (char *) 0);
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	if (task_swap_on) {
		(void) kernel_thread(kernel_task, task_swapper, (char *) 0);
		(void) kernel_thread(kernel_task, task_swap_swapout_thread,
				     (char *) 0);
	}
#endif	/* TASK_SWAPPER */
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);
	(void) kernel_thread(kernel_task, timeout_thread, (char *) 0);
#if	NORMA_VM
	(void) kernel_thread(kernel_task, vm_object_thread, (char *) 0);
#endif	/* NORMA_VM */

	/*
	 *	Create the clock service.
	 */
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 *	Initialize distributed services, starting
	 *	with distributed IPC and progressing to any
	 *	services layered on top of that.
	 *
	 *	This stub exists even in non-NORMA systems.
	 */
	norma_bootstrap();

	/*
	 *	Initialize any testing services blocking the main kernel
	 *	thread so that the in-kernel tests run without interference
	 *	from other boot time activities. We will resume this thread
	 *	in kernel_test_thread(). 
	 */

#if	KERNEL_TEST

	/*
	 *	Initialize the lock that will be used to guard
	 *	variables that will be used in the test synchronization
	 *	scheme.
	 */ 
		simple_lock_init(&kernel_test_lock, ETAP_MISC_KERNEL_TEST);

#if	PARAGON860
	{
		char		*s;
		unsigned int	firstnode;

		/*
		 *	Only start up loopback tests on boot node.
		 */
		if ((s = (char *) getbootenv("BOOT_FIRST_NODE")) == 0)
			panic("startup");
		firstnode = atoi(s);
		(void) kernel_thread(kernel_task, kernel_test_thread,
				     (char * )(dipc_node_self() == 
						       (node_name) firstnode));
	}
#else	/* PARAGON860 */
	(void) kernel_thread(kernel_task, kernel_test_thread, (char *) 0);
#endif	/* PARAGON860 */
	{

		/*
		 *	The synchronization scheme uses a simple lock, two
		 *	booleans and the wakeup event. The wakeup event will
		 *	be posted by kernel_test_thread().
		 */
		spl_t s;
		s = splsched();
		simple_lock(&kernel_test_lock);
		while(!kernel_test_thread_sync_done){
			assert_wait((event_t) &start_kernel_threads, FALSE);
			start_kernel_threads_blocked = TRUE;
			simple_unlock(&kernel_test_lock);
			splx(s);
			thread_block((void (*)(void)) 0);
			s = splsched();
			simple_lock(&kernel_test_lock);
			start_kernel_threads_blocked = FALSE;
		}
		kernel_test_thread_sync_done = FALSE; /* Reset for next use */
		simple_unlock(&kernel_test_lock);
		splx(s);
	}


#endif	/* KERNEL_TEST */
	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	/* XPR_DEBUG */

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 *
	 * (this must be last, to allow bootstrap_create to fiddle with
	 *  its child thread before some cpu tries to run it)
	 */
	start_other_cpus();
#endif	/* NCPUS > 1 */

	/*
	 *	Become the pageout daemon.
	 */
	(void) spllo();
	vm_pageout();
	/*NOTREACHED*/
}