void ilock(Lock *l) { Proc *up = externup(); Mpl pl; uintptr_t pc; uint64_t t0; pc = getcallerpc(); lockstats.locks++; pl = splhi(); if(TAS(&l->key) != 0){ cycles(&t0); lockstats.glare++; /* * Cannot also check l->pc, l->m, or l->isilock here * because they might just not be set yet, or * (for pc and m) the lock might have just been unlocked. */ for(;;){ lockstats.inglare++; splx(pl); while(l->key) ; pl = splhi(); if(TAS(&l->key) == 0){ if(l != &waitstatslk) addwaitstat(pc, t0, WSlock); goto acquire; } } } acquire: machp()->ilockdepth++; if(up) up->lastilock = l; l->pl = pl; l->_pc = pc; l->p = up; l->isilock = 1; l->m = machp(); if(LOCKCYCLES) cycles(&l->lockcycles); }
int lock(Lock *l) { Proc *up = externup(); int i; uintptr_t pc; uint64_t t0; pc = getcallerpc(); lockstats.locks++; if(up) ainc(&up->nlocks); /* prevent being scheded */ if(TAS(&l->key) == 0){ if(up) up->lastlock = l; l->_pc = pc; l->p = up; l->isilock = 0; if(LOCKCYCLES) cycles(&l->lockcycles); return 0; } if(up) adec(&up->nlocks); cycles(&t0); lockstats.glare++; for(;;){ lockstats.inglare++; i = 0; while(l->key){ if(sys->nmach < 2 && up && up->edf && (up->edf->flags & Admitted)){ /* * Priority inversion, yield on a uniprocessor; on a * multiprocessor, the other processor will unlock */ print("inversion %#p pc %#p proc %d held by pc %#p proc %d\n", l, pc, up ? up->pid : 0, l->_pc, l->p ? l->p->pid : 0); up->edf->d = todget(nil); /* yield to process with lock */ } if(i++ > 100000000){ i = 0; lockloop(l, pc); } } if(up) ainc(&up->nlocks); if(TAS(&l->key) == 0){ if(up) up->lastlock = l; l->_pc = pc; l->p = up; l->isilock = 0; if(LOCKCYCLES) cycles(&l->lockcycles); if(l != &waitstatslk) addwaitstat(pc, t0, WSlock); return 1; } if(up) adec(&up->nlocks); } }
static void slockstat(uintptr_t pc, uint64_t w) { addwaitstat(pc, w, WSslock); }