void clockinit(void) { Systimers *tn; Armtimer *tm; u32int t0, t1, tstart, tend; tn = (Systimers*)SYSTIMERS; tm = (Armtimer*)ARMTIMER; tm->load = 0; tm->ctl = TmrPrescale1|CntEnable|CntWidth32; tstart = tn->clo; do{ t0 = lcycles(); }while(tn->clo == tstart); tend = tstart + 10000; do{ t1 = lcycles(); }while(tn->clo != tend); t1 -= t0; m->cpuhz = 100 * t1; m->cpumhz = (m->cpuhz + Mhz/2 - 1) / Mhz; m->cyclefreq = m->cpuhz; tn->c3 = tn->clo - 1; intrenable(IRQtimer3, clockintr, nil, 0, "clock"); }
void iunlock(Lock *l) { ulong sr; #ifdef LOCKCYCLES l->lockcycles += lcycles(); cumilockcycles += l->lockcycles; if(l->lockcycles > maxilockcycles){ maxilockcycles = l->lockcycles; maxilockpc = l->pc; } if(l->lockcycles > 2400) ilockpcs[n++ & 0xff] = l->pc; #endif if(l->key == 0) print("iunlock: not locked: pc %#p\n", getcallerpc(&l)); if(!l->isilock) print("iunlock of lock: pc %#p, held by %#lux\n", getcallerpc(&l), l->pc); if(islo()) print("iunlock while lo: pc %#p, held by %#lux\n", getcallerpc(&l), l->pc); sr = l->sr; l->m = nil; coherence(); l->key = 0; coherence(); m->ilockdepth--; if(up) up->lastilock = nil; splx(sr); }
void unlock(Lock *l) { #ifdef LOCKCYCLES l->lockcycles += lcycles(); cumlockcycles += l->lockcycles; if(l->lockcycles > maxlockcycles){ maxlockcycles = l->lockcycles; maxlockpc = l->pc; } #endif if(l->key == 0) print("unlock: not locked: pc %#p\n", getcallerpc(&l)); if(l->isilock) print("unlock of ilock: pc %lux, held by %lux\n", getcallerpc(&l), l->pc); if(l->p != up) print("unlock: up changed: pc %#p, acquired at pc %lux, lock p %#p, unlock up %#p\n", getcallerpc(&l), l->pc, l->p, up); l->m = nil; coherence(); l->key = 0; coherence(); if(up && deccnt(&up->nlocks) == 0 && up->delaysched && islo()){ /* * Call sched if the need arose while locks were held * But, don't do it from interrupt routines, hence the islo() test */ sched(); } }
void edfunlock(void) { #ifdef EDFCYCLES edfcycles += lcycles(); #endif edfnrun++; iunlock(&thelock); }
void procrestore(Proc* p) { uvlong t; if(p->kp) return; t = lcycles(); p->pcycles -= t; fpuprocrestore(p); }
void clockinit(void) { Systimers *tn; Armtimer *tm; u32int t0, t1, tstart, tend; if(((cprdsc(0, CpID, CpIDfeat, 1) >> 16) & 0xF) != 0) { /* generic timer supported */ if(m->machno == 0){ *(ulong*)(ARMLOCAL + Localctl) = 0; /* magic */ *(ulong*)(ARMLOCAL + Prescaler) = 0x06aaaaab; /* magic for 1 Mhz */ } cpwrsc(0, CpTIMER, CpTIMERphys, CpTIMERphysctl, Imask); } tn = (Systimers*)SYSTIMERS; tstart = tn->clo; do{ t0 = lcycles(); }while(tn->clo == tstart); tend = tstart + 10000; do{ t1 = lcycles(); }while(tn->clo != tend); t1 -= t0; m->cpuhz = 100 * t1; m->cpumhz = (m->cpuhz + Mhz/2 - 1) / Mhz; m->cyclefreq = m->cpuhz; if(m->machno == 0){ tn->c3 = tn->clo - 1; tm = (Armtimer*)ARMTIMER; tm->load = 0; tm->ctl = TmrPrescale1|CntEnable|CntWidth32; intrenable(IRQtimer3, clockintr, nil, 0, "clock"); }else intrenable(IRQcntpns, localclockintr, nil, 0, "clock"); }
Edf* edflock(Proc *p) { Edf *e; if (p->edf == nil) return nil; ilock(&thelock); if((e = p->edf) && (e->flags & Admitted)){ thelock._pc = getcallerpc(); #ifdef EDFCYCLES edfcycles -= lcycles(); #endif now = ms(); return e; } iunlock(&thelock); return nil; }
void ilock(Lock *l) { ulong x; ulong pc; pc = getcallerpc(&l); lockstats.locks++; x = splhi(); if(tas(&l->key) != 0){ lockstats.glare++; /* * Cannot also check l->pc, l->m, or l->isilock here * because they might just not be set yet, or * (for pc and m) the lock might have just been unlocked. */ for(;;){ lockstats.inglare++; splx(x); while(l->key) ; x = splhi(); if(tas(&l->key) == 0) goto acquire; } } acquire: m->ilockdepth++; if(up) up->lastilock = l; l->sr = x; l->pc = pc; l->p = up; l->isilock = 1; l->m = MACHP(m->machno); #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif }
int canlock(Lock *l) { if(up) inccnt(&up->nlocks); if(tas(&l->key)){ if(up) deccnt(&up->nlocks); return 0; } if(up) up->lastlock = l; l->pc = getcallerpc(&l); l->p = up; l->m = MACHP(m->machno); l->isilock = 0; #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif return 1; }
int lock(Lock *l) { int i; ulong pc; pc = getcallerpc(&l); lockstats.locks++; if(up) inccnt(&up->nlocks); /* prevent being scheded */ if(tas(&l->key) == 0){ if(up) up->lastlock = l; l->pc = pc; l->p = up; l->isilock = 0; #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif return 0; } if(up) deccnt(&up->nlocks); lockstats.glare++; for(;;){ lockstats.inglare++; i = 0; while(l->key){ if(conf.nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){ /* * Priority inversion, yield on a uniprocessor; on a * multiprocessor, the other processor will unlock */ print("inversion %#p pc %#lux proc %lud held by pc %#lux proc %lud\n", l, pc, up ? up->pid : 0, l->pc, l->p ? l->p->pid : 0); up->edf->d = todget(nil); /* yield to process with lock */ } if(i++ > 100000000){ i = 0; lockloop(l, pc); } } if(up) inccnt(&up->nlocks); if(tas(&l->key) == 0){ if(up) up->lastlock = l; l->pc = pc; l->p = up; l->isilock = 0; #ifdef LOCKCYCLES l->lockcycles = -lcycles(); #endif return 1; } if(up) deccnt(&up->nlocks); } }