long tseconds(void) { vlong x; int i; x = todget(nil); x = x/TODFREQ; i = x; return i; }
void edfinit(Proc*p) { if(!edfinited){ fmtinstall('t', timeconv); edfinited++; } now = todget(nil); DPRINT("%t edfinit %lud[%s]\n", now, p->pid, statename[p->state]); p->edf = malloc(sizeof(Edf)); if(p->edf == nil) error(Enomem); return; }
Edf* edflock(Proc *p) { Edf *e; if (p->edf == nil) return nil; ilock(&thelock); if ((e = p->edf) && (e->flags & Admitted)){ now = todget(nil); return e; } iunlock(&thelock); return nil; }
static void release(Proc *p) { /* Called with edflock held */ Edf *e; void (*pt)(Proc*, int, vlong, vlong); long n; vlong nowns; e = p->edf; e->flags &= ~Yield; if(e->d - now < 0){ e->periods++; e->r = now; if((e->flags & Sporadic) == 0){ /* * Non sporadic processes stay true to their period; * calculate next release time. * Second test limits duration of while loop. */ if((n = now - e->t) > 0){ if(n < e->T) e->t += e->T; else e->t = now + e->T - (n % e->T); } }else{ /* Sporadic processes may not be released earlier than * one period after this release */ e->t = e->r + e->T; } e->d = e->r + e->D; e->S = e->C; DPRINT("%lud release %d[%s], r=%lud, d=%lud, t=%lud, S=%lud\n", now, p->pid, statename[p->state], e->r, e->d, e->t, e->S); if(pt = proctrace){ nowns = todget(nil); pt(p, SRelease, nowns, 0); pt(p, SDeadline, nowns + 1000LL*e->D, 0); } }else{ DPRINT("%lud release %d[%s], too late t=%lud, called from %#p\n", now, p->pid, statename[p->state], e->t, getcallerpc(&p)); } }
/* * like the old #c/time but with added info. Return * * secs nanosecs fastticks fasthz */ static int readtime(uint32_t off, char *buf, int n) { int64_t nsec, ticks; int32_t sec; char str[7*NUMSIZE]; nsec = todget(&ticks); if(fasthz == 0LL) fastticks((uint64_t*)&fasthz); sec = nsec/1000000000ULL; snprint(str, sizeof(str), "%*lu %*llu %*llu %*llu ", NUMSIZE-1, sec, VLNUMSIZE-1, nsec, VLNUMSIZE-1, ticks, VLNUMSIZE-1, fasthz); return readstr(off, buf, n, str); }
/* * like the old #c/time but with added info. Return * * secs nanosecs fastticks fasthz */ static int readtime(ulong off, char *buf, int n) { vlong nsec, ticks; long sec; char str[7*NUMSIZE]; nsec = todget(&ticks); if(fasthz == 0LL) fastticks((uvlong*)&fasthz); sec = nsec/1000000000ULL; snprint(str, sizeof(str), "%*lud %*llud %*llud %*llud ", NUMSIZE-1, sec, VLNUMSIZE-1, nsec, VLNUMSIZE-1, ticks, VLNUMSIZE-1, fasthz); return readstr(off, buf, n, str); }
void edfrun(Proc *p, int edfpri) { Edf *e; int32_t tns; Sched *sch; e = p->edf; sch = procsched(p); /* Called with edflock held */ if(edfpri){ tns = e->d - now; if(tns <= 0 || e->S == 0){ /* Deadline reached or resources exhausted, * deschedule forthwith */ p->delaysched++; sch->delayedscheds++; e->s = now; return; } if(e->S < tns) tns = e->S; if(tns < 20) tns = 20; e->Timer.tns = 1000LL * tns; /* µs to ns */ if(e->Timer.tt == nil || e->Timer.tf != deadlineintr){ DPRINT("%lu edfrun, deadline=%lu\n", now, tns); }else{ DPRINT("v"); } if(p->trace) proctrace(p, SInte, todget(nil) + e->Timer.tns); e->Timer.tmode = Trelative; e->Timer.tf = deadlineintr; e->Timer.ta = p; timeradd(&e->Timer); }else{ DPRINT("<"); } e->s = now; }
/* * like the old #c/time but with added info. Return * * secs nanosecs fastticks fasthz */ static int readtime(uint32_t off, char *buf, int n) { int64_t nsec, ticks; long sec; char str[7 * NUMSIZE]; if (fasthz == 0LL) fasthz = system_timing.tsc_freq; #if 0 fastticks((uint64_t *) & fasthz); nsec = todget(&ticks); #endif ticks = read_tsc(); nsec = tsc2nsec(ticks); sec = nsec / 1000000000ULL; snprintf(str, sizeof(str), "%*lud %*llud %*llud %*llud ", NUMSIZE - 1, sec, VLNUMSIZE - 1, nsec, VLNUMSIZE - 1, ticks, VLNUMSIZE - 1, fasthz); return consreadstr(off, buf, n, str); }
void edfrun(Proc *p, int edfpri) { Edf *e; void (*pt)(Proc*, int, vlong, vlong); long tns; e = p->edf; /* Called with edflock held */ if(edfpri){ tns = e->d - now; if(tns <= 0 || e->S == 0){ /* Deadline reached or resources exhausted, * deschedule forthwith */ p->delaysched++; delayedscheds++; e->s = now; return; } if(e->S < tns) tns = e->S; if(tns < 20) tns = 20; e->tns = 1000LL * tns; /* µs to ns */ if(e->tt == nil || e->tf != deadlineintr){ DPRINT("%lud edfrun, deadline=%lud\n", now, tns); }else{ DPRINT("v"); } if(p->trace && (pt = proctrace)) pt(p, SInte, todget(nil) + e->tns, 0); e->tmode = Trelative; e->tf = deadlineintr; e->ta = p; timeradd(e); }else{ DPRINT("<"); } e->s = now; }
int lock(Lock *l) { Proc *up = externup(); int i; uintptr_t pc; uint64_t t0; pc = getcallerpc(); lockstats.locks++; if(up) ainc(&up->nlocks); /* prevent being scheded */ if(TAS(&l->key) == 0){ if(up) up->lastlock = l; l->_pc = pc; l->p = up; l->isilock = 0; if(LOCKCYCLES) cycles(&l->lockcycles); return 0; } if(up) adec(&up->nlocks); cycles(&t0); lockstats.glare++; for(;;){ lockstats.inglare++; i = 0; while(l->key){ if(sys->nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){ /* * Priority inversion, yield on a uniprocessor; on a * multiprocessor, the other processor will unlock */ print("inversion %#p pc %#p proc %d held by pc %#p proc %d\n", l, pc, up ? up->pid : 0, l->_pc, l->p ? l->p->pid : 0); up->edf->d = todget(nil); /* yield to process with lock */ } if(i++ > 100000000){ i = 0; lockloop(l, pc); } } if(up) ainc(&up->nlocks); if(TAS(&l->key) == 0){ if(up) up->lastlock = l; l->_pc = pc; l->p = up; l->isilock = 0; if(LOCKCYCLES) cycles(&l->lockcycles); if(l != &waitstatslk) addwaitstat(pc, t0, WSlock); return 1; } if(up) adec(&up->nlocks); } }
void todinit(void) { start = todget(nil); }
static int yfn(void *) { return up->trend == nil || todget(nil) >= up->edf->r; }
/* it should be unsigned. FIXME */ void syscall(int badscallnr, Ureg* ureg) { unsigned int scallnr = (unsigned int) badscallnr; char *e; uintptr sp; int s; vlong startns, stopns; Ar0 ar0; static Ar0 zar0; if(!userureg(ureg)) panic("syscall: cs %#llux\n", ureg->cs); cycles(&up->kentry); m->syscall++; up->nsyscall++; up->nqsyscall++; up->insyscall = 1; up->pc = ureg->ip; up->dbgreg = ureg; sp = ureg->sp; startns = 0; if(up->procctl == Proc_tracesyscall){ /* * Redundant validaddr. Do we care? * Tracing syscalls is not exactly a fast path... * Beware, validaddr currently does a pexit rather * than an error if there's a problem; that might * change in the future. */ if(sp < (USTKTOP-BIGPGSZ) || sp > (USTKTOP-sizeof(up->arg)-BY2SE)) validaddr(UINT2PTR(sp), sizeof(up->arg)+BY2SE, 0); syscallfmt(scallnr, (va_list)(sp+BY2SE)); up->procctl = Proc_stopme; procctl(up); if(up->syscalltrace) free(up->syscalltrace); up->syscalltrace = nil; startns = todget(nil); } up->scallnr = scallnr; if(scallnr == RFORK) fpusysrfork(ureg); spllo(); sp = ureg->sp; up->nerrlab = 0; ar0 = zar0; if(!waserror()){ if(scallnr >= nsyscall || systab[scallnr].f == nil){ pprint("bad sys call number %d pc %#llux\n", scallnr, ureg->ip); postnote(up, 1, "sys: bad sys call", NDebug); error(Ebadarg); } if(sp < (USTKTOP-BIGPGSZ) || sp > (USTKTOP-sizeof(up->arg)-BY2SE)) validaddr(UINT2PTR(sp), sizeof(up->arg)+BY2SE, 0); memmove(up->arg, UINT2PTR(sp+BY2SE), sizeof(up->arg)); up->psstate = systab[scallnr].n; systab[scallnr].f(&ar0, (va_list)up->arg); if(scallnr == SYSR1){ /* * BUG: must go when ron binaries go. * NIX: Returning from execac(). * This means that the process is back to the * time sharing core. However, the process did * already return from the system call, when dispatching * the user code to the AC. The only thing left is to * return. The user registers should be ok, because * up->dbgreg has been the user context for the process. */ return; } poperror(); } else{ /* failure: save the error buffer for errstr */ e = up->syserrstr; up->syserrstr = up->errstr; up->errstr = e; if(DBGFLG && up->pid == 1) iprint("%s: syscall %s error %s\n", up->text, systab[scallnr].n, up->syserrstr); ar0 = systab[scallnr].r; } /* * NIX: for the execac() syscall, what follows is done within * the system call, because it never returns. * See acore.c:/^retfromsyscall */ noerrorsleft(); /* * Put return value in frame. */ ureg->ax = ar0.p; if(up->procctl == Proc_tracesyscall){ stopns = todget(nil); up->procctl = Proc_stopme; sysretfmt(scallnr, (va_list)(sp+BY2SE), &ar0, startns, stopns); s = splhi(); procctl(up); splx(s); if(up->syscalltrace) free(up->syscalltrace); up->syscalltrace = nil; }else if(up->procctl == Proc_totc || up->procctl == Proc_toac) procctl(up); up->insyscall = 0; up->psstate = 0; if(scallnr == NOTED) noted(ureg, *(uintptr*)(sp+BY2SE)); splhi(); if(scallnr != RFORK && (up->procctl || up->nnote)) notify(ureg); /* if we delayed sched because we held a lock, sched now */ if(up->delaysched){ sched(); splhi(); } kexit(ureg); }
long seconds(void) { return (vlong)todget(nil) / TODFREQ; }
/* * Syscall is called directly from assembler without going through trap(). */ void syscall(Ureg* ureg) { char *e; ulong sp; long ret; int i, s; ulong scallnr; vlong startns, stopns; if(!userureg(ureg)) panic("syscall: cs 0x%4.4luX", ureg->cs); cycles(&up->kentry); m->syscall++; up->insyscall = 1; up->pc = ureg->pc; up->dbgreg = ureg; sp = ureg->usp; scallnr = ureg->ax; up->scallnr = scallnr; spllo(); up->nerrlab = 0; ret = -1; if(!waserror()){ if(sp<(USTKTOP-BY2PG) || sp>(USTKTOP-sizeof(Sargs)-BY2WD)) validaddr(sp, sizeof(Sargs)+BY2WD, 0); up->s = *((Sargs*)(sp+BY2WD)); if(up->procctl == Proc_tracesyscall){ syscallfmt(scallnr, ureg->pc, (va_list)up->s.args); s = splhi(); up->procctl = Proc_stopme; procctl(); splx(s); startns = todget(nil); } if(scallnr >= nsyscall || systab[scallnr] == 0){ pprint("bad sys call number %lud pc %lux\n", scallnr, ureg->pc); postnote(up, 1, "sys: bad sys call", NDebug); error(Ebadarg); } up->psstate = sysctab[scallnr]; ret = systab[scallnr]((va_list)up->s.args); poperror(); }else{ /* failure: save the error buffer for errstr */ e = up->syserrstr; up->syserrstr = up->errstr; up->errstr = e; if(0 && up->pid == 1) print("syscall %lud error %s\n", scallnr, up->syserrstr); } if(up->nerrlab){ print("bad errstack [%lud]: %d extra\n", scallnr, up->nerrlab); for(i = 0; i < NERR; i++) print("sp=%lux pc=%lux\n", up->errlab[i].sp, up->errlab[i].pc); panic("error stack"); } /* * Put return value in frame. On the x86 the syscall is * just another trap and the return value from syscall is * ignored. On other machines the return value is put into * the results register by caller of syscall. */ ureg->ax = ret; if(up->procctl == Proc_tracesyscall){ stopns = todget(nil); sysretfmt(scallnr, (va_list)up->s.args, ret, startns, stopns); s = splhi(); up->procctl = Proc_stopme; procctl(); splx(s); } up->insyscall = 0; up->psstate = 0; if(scallnr == NOTED) noted(ureg, *((ulong*)up->s.args)); if(scallnr!=RFORK && (up->procctl || up->nnote)){ splhi(); notify(ureg); } /* if we delayed sched because we held a lock, sched now */ if(up->delaysched) sched(); kexit(ureg); }
void deepsleep(void) { static int power_pl; ulong xsp, xlink; // ulong mecr; ulong clkd; vlong savedtod; extern void power_resume(void); power_pl = splhi(); xlink = getcallerpc(&xlink); /* Power down */ pcmciapower(0); irpower(0); audiopower(0); screenpower(0); µcpower(0); iprint("entering suspend mode, sp = %#p, pc = 0x%lux, psw = 0x%ux\n", &xsp, xlink, power_pl); // dumpitall(); delay(1000); uartpower(0); rs232power(0); clockpower(0); gpiosave(&savedgpioregs, gpioregs); intrcpy(&savedintrregs, intrregs); cacheflush(); delay(50); if(setpowerlabel()){ /* return here with mmu back on */ trapresume(); gpiorestore(gpioregs, &savedgpioregs); delay(50); intrcpy(intrregs, &savedintrregs); if(intrregs->icip & (1<<IRQgpio0)){ // don't want to sleep now. clear on/off irq. gpioregs->edgestatus = (1<<IRQgpio0); intrregs->icip = (1<<IRQgpio0); } clkd = clockpower(1); gpclkregs->r0 = 1<<0; todset(savedtod + clkd * TODFREQ, 0LL, 0); resetsuspendtimer(); rs232power(1); uartpower(1); delay(100); xlink = getcallerpc(&xlink); iprint("\nresuming execution, sp = %#p, pc = 0x%lux, psw = 0x%ux\n", &xsp, xlink, splhi()); // dumpitall(); delay(1000); // irpower(1); audiopower(1); µcpower(1); screenpower(1); pcmciapower(1); splx(power_pl); return; } cacheflush(); delay(100); savedtod = todget(nil); power_down(); /* no return */ }
void syscall(Ureg* ureg) { char *e; u32int s; ulong sp; long ret; int i, scallnr; vlong startns, stopns; if(!userureg(ureg)) panic("syscall: from kernel: pc %#lux r14 %#lux psr %#lux", ureg->pc, ureg->r14, ureg->psr); cycles(&up->kentry); m->syscall++; up->insyscall = 1; up->pc = ureg->pc; up->dbgreg = ureg; scallnr = ureg->r0; up->scallnr = scallnr; if(scallnr == RFORK) fpusysrfork(ureg); spllo(); sp = ureg->sp; if(up->procctl == Proc_tracesyscall){ /* * Redundant validaddr. Do we care? * Tracing syscalls is not exactly a fast path... * Beware, validaddr currently does a pexit rather * than an error if there's a problem; that might * change in the future. */ if(sp < (USTKTOP-BY2PG) || sp > (USTKTOP-sizeof(Sargs)-BY2WD)) validaddr(sp, sizeof(Sargs)+BY2WD, 0); syscallfmt(scallnr, ureg->pc, (va_list)(sp+BY2WD)); up->procctl = Proc_stopme; procctl(up); if (up->syscalltrace) free(up->syscalltrace); up->syscalltrace = nil; } up->nerrlab = 0; ret = -1; startns = todget(nil); if(!waserror()){ if(scallnr >= nsyscall){ pprint("bad sys call number %d pc %#lux\n", scallnr, ureg->pc); postnote(up, 1, "sys: bad sys call", NDebug); error(Ebadarg); } if(sp < (USTKTOP-BY2PG) || sp > (USTKTOP-sizeof(Sargs)-BY2WD)) validaddr(sp, sizeof(Sargs)+BY2WD, 0); up->s = *((Sargs*)(sp+BY2WD)); up->psstate = sysctab[scallnr]; /* iprint("%s: syscall %s\n", up->text, sysctab[scallnr]?sysctab[scallnr]:"huh?"); */ ret = systab[scallnr](up->s.args); poperror(); }else{ /* failure: save the error buffer for errstr */ e = up->syserrstr; up->syserrstr = up->errstr; up->errstr = e; } if(up->nerrlab){ print("bad errstack [%d]: %d extra\n", scallnr, up->nerrlab); for(i = 0; i < NERR; i++) print("sp=%#p pc=%#p\n", up->errlab[i].sp, up->errlab[i].pc); panic("error stack"); } /* * Put return value in frame. On the x86 the syscall is * just another trap and the return value from syscall is * ignored. On other machines the return value is put into * the results register by caller of syscall. */ ureg->r0 = ret; if(up->procctl == Proc_tracesyscall){ stopns = todget(nil); up->procctl = Proc_stopme; sysretfmt(scallnr, (va_list)(sp+BY2WD), ret, startns, stopns); s = splhi(); procctl(up); splx(s); if(up->syscalltrace) free(up->syscalltrace); up->syscalltrace = nil; } up->insyscall = 0; up->psstate = 0; if(scallnr == NOTED) noted(ureg, *(ulong*)(sp+BY2WD)); splhi(); if(scallnr != RFORK && (up->procctl || up->nnote)) notify(ureg); /* if we delayed sched because we held a lock, sched now */ if(up->delaysched){ sched(); splhi(); } kexit(ureg); }
int lock(Lock *l) { int i; ulong pc; pc = getcallerpc(&l); lockstats.locks++; if(up) inccnt(&up->nlocks); /* prevent being scheded */ if(tas(&l->key) == 0){ if(up) up->lastlock = l; l->pc = pc; l->p = up; l->isilock = 0; #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif return 0; } if(up) deccnt(&up->nlocks); lockstats.glare++; for(;;){ lockstats.inglare++; i = 0; while(l->key){ if(conf.nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){ /* * Priority inversion, yield on a uniprocessor; on a * multiprocessor, the other processor will unlock */ print("inversion %#p pc %#lux proc %lud held by pc %#lux proc %lud\n", l, pc, up ? up->pid : 0, l->pc, l->p ? l->p->pid : 0); up->edf->d = todget(nil); /* yield to process with lock */ } if(i++ > 100000000){ i = 0; lockloop(l, pc); } } if(up) inccnt(&up->nlocks); if(tas(&l->key) == 0){ if(up) up->lastlock = l; l->pc = pc; l->p = up; l->isilock = 0; #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif return 1; } if(up) deccnt(&up->nlocks); } }