void copen(Chan *c) { int h; Mntcache *m, *f, **l; /* directories aren't cacheable and append-only files confuse us */ if(c->qid.type&(QTDIR|QTAPPEND)) return; h = c->qid.path%NHASH; lock(&cache); for(m = cache.hash[h]; m; m = m->hash) { if(m->qid.path == c->qid.path) if(m->qid.type == c->qid.type) if(m->dev == c->dev && m->type == c->type) { /* File was updated, invalidate cache */ if(m->qid.vers != c->qid.vers){ if(!canqlock(m)) goto Busy; m->qid.vers = c->qid.vers; goto Update; } ctail(m); c->mcp = m; unlock(&cache); return; } } /* LRU the cache headers */ m = cache.head; if(!canqlock(m)) goto Busy; l = &cache.hash[m->qid.path%NHASH]; for(f = *l; f; f = f->hash) { if(f == m) { *l = m->hash; break; } l = &f->hash; } m->qid = c->qid; m->dev = c->dev; m->type = c->type; l = &cache.hash[h]; m->hash = *l; *l = m; Update: ctail(m); c->mcp = m; unlock(&cache); cnodata(m); qunlock(m); return; Busy: unlock(&cache); c->mcp = 0; }
/* same as rlock but punts if there are any writers waiting */ int canrlock(RWlock *l) { if (!canqlock(&l->x)) return 0; lock(&l->l); l->readers++; canqlock(&l->k); /* block writers if we are the first reader */ unlock(&l->l); qunlock(&l->x); return 1; }
void alarmkproc(void*) { Proc *rp; ulong now; for(;;){ now = MACHP(0)->ticks; qlock(&alarms); while((rp = alarms.head) && rp->alarm <= now){ if(rp->alarm != 0L){ if(canqlock(&rp->debug)){ if(!waserror()){ postnote(rp, 0, "alarm", NUser); poperror(); } qunlock(&rp->debug); rp->alarm = 0L; }else break; } alarms.head = rp->palarm; } qunlock(&alarms); sleep(&alarmr, return0, 0); } }
/* * since close can block, this has to be called outside of * spin locks. */ static void imagechanreclaim(void) { Chan *c; /* Somebody is already cleaning the image chans */ if(!canqlock(&imagealloc.fcreclaim)) return; /* * We don't have to recheck that nfreechan > 0 after we * acquire the lock, because we're the only ones who decrement * it (the other lock contender increments it), and there's only * one of us thanks to the qlock above. */ while(imagealloc.nfreechan > 0){ lock(&imagealloc); imagealloc.nfreechan--; c = imagealloc.freechan[imagealloc.nfreechan]; unlock(&imagealloc); cclose(c); } qunlock(&imagealloc.fcreclaim); }
void alarmkproc(void*) { Proc *rp; ulong now, when; while(waserror()) ; for(;;){ now = MACHP(0)->ticks; qlock(&alarms); for(rp = alarms.head; rp != nil; rp = rp->palarm){ if((when = rp->alarm) == 0) continue; if((long)(now - when) < 0) break; if(!canqlock(&rp->debug)) break; if(rp->alarm != 0){ postnote(rp, 0, "alarm", NUser); rp->alarm = 0; } qunlock(&rp->debug); } alarms.head = rp; qunlock(&alarms); sleep(&alarmr, return0, 0); } }
void alarmkproc(void*) { Proc *rp; ulong now; for(;;){ now = MACHP(0)->ticks; qlock(&alarms); /* * the odd test of now vs. rp->alarm is to cope with * now wrapping around. */ while((rp = alarms.head) && (long)(now - rp->alarm) >= 0){ if(rp->alarm != 0L){ if(canqlock(&rp->debug)){ if(!waserror()){ postnote(rp, 0, "alarm", NUser); poperror(); } qunlock(&rp->debug); rp->alarm = 0L; }else break; } alarms.head = rp->palarm; } qunlock(&alarms); sleep(&alarmr, return0, 0); } }
/* * Need to lock drawlock for ourselves. */ void swenable(VGAscr *v) { swenabled = 1; if(canqlock(&drawlock)){ swcursordraw(); qunlock(&drawlock); } }
void swdisable(VGAscr *v) { swenabled = 0; if(canqlock(&drawlock)){ swcursorhide(); qunlock(&drawlock); } }
void killbig(char *why) { int i, x; Segment *s; uintptr_t l, max; Proc *p, *kp; max = 0; kp = nil; for(x = 0; (p = psincref(x)) != nil; x++) { if(p->state == Dead || p->kp){ psdecref(p); continue; } l = 0; for(i=1; i<NSEG; i++) { s = p->seg[i]; if(s != 0) l += s->top - s->base; } if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) { if(kp != nil) psdecref(kp); kp = p; max = l; } else psdecref(p); } if(kp == nil) return; print("%d: %s killed: %s\n", kp->pid, kp->text, why); for(x = 0; (p = psincref(x)) != nil; x++) { if(p->state == Dead || p->kp){ psdecref(p); continue; } /* TODO(aki): figure out what this was for. the oom killer is broken anyway though? if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG]) p->procctl = Proc_exitbig; */ psdecref(p); } kp->procctl = Proc_exitbig; for(i = 0; i < NSEG; i++) { s = kp->seg[i]; if(s != 0 && canqlock(&s->lk)) { mfreeseg(s, s->base, (s->top - s->base)/BIGPGSZ); qunlock(&s->lk); } } psdecref(kp); }
static int pageout(Proc *p, Segment *s) { Proc *up = externup(); int i, size, n; Pte *l; Page **pg, *entry; if((s->type&SG_TYPE) != SG_LOAD && (s->type&SG_TYPE) != SG_TEXT) panic("pageout"); if(!canqlock(&s->lk)) /* We cannot afford to wait, we will surely deadlock */ return 0; if(s->steal){ /* Protected by /dev/proc */ qunlock(&s->lk); return 0; } if(!canflush(p, s)){ /* Able to invalidate all tlbs with references */ qunlock(&s->lk); putseg(s); return 0; } if(waserror()){ qunlock(&s->lk); putseg(s); return 0; } /* Pass through the pte tables looking for text memory pages to put */ n = 0; size = s->mapsize; for(i = 0; i < size; i++){ l = s->map[i]; if(l == 0) continue; for(pg = l->first; pg < l->last; pg++){ entry = *pg; if(pagedout(entry)) continue; n++; if(entry->modref & PG_REF){ entry->modref &= ~PG_REF; continue; } putpage(*pg); *pg = nil; } } poperror(); qunlock(&s->lk); putseg(s); return n; }
void rlock(RWlock *l) { qlock(&l->x); /* wait here for writers and exclusion */ lock(&l->l); l->readers++; canqlock(&l->k); /* block writers if we are the first reader */ unlock(&l->l); qunlock(&l->x); }
static void pageout(Proc *p, Segment *s) { int type, i, size; Pte *l; Page **pg, *entry; if(!canqlock(&s->lk)) /* We cannot afford to wait, we will surely deadlock */ return; if(s->steal) { /* Protected by /dev/proc */ qunlock(&s->lk); return; } if(!canflush(p, s)) { /* Able to invalidate all tlbs with references */ qunlock(&s->lk); putseg(s); return; } if(waserror()) { qunlock(&s->lk); putseg(s); return; } /* Pass through the pte tables looking for memory pages to swap out */ type = s->type&SG_TYPE; size = s->mapsize; for(i = 0; i < size; i++) { l = s->map[i]; if(l == 0) continue; for(pg = l->first; pg < l->last; pg++) { entry = *pg; if(pagedout(entry)) continue; if(entry->modref & PG_REF) { entry->modref &= ~PG_REF; continue; } pagepte(type, pg); if(ioptr >= conf.nswppo) goto out; } } out: poperror(); qunlock(&s->lk); putseg(s); }
void lockdisplay(Display *disp) { if(debuglockdisplay){ /* avoid busy looping; it's rare we collide anyway */ while(!canqlock(&disp->qlock)){ fprint(1, "proc %d waiting for display lock...\n", getpid()); sleep(1000); } }else qlock(&disp->qlock); }
static void imagereclaim(void) { Image *i; uvlong ticks0, ticks; irstats.calls++; /* Somebody is already cleaning the page cache */ if(!canqlock(&imagealloc.ireclaim)) return; DBG("imagereclaim maxt %ulld noluck %d nolock %d\n", irstats.maxt, irstats.noluck, irstats.nolock); ticks0 = fastticks(nil); if(!canlock(&imagealloc)){ /* never happen in the experiments I made */ qunlock(&imagealloc.ireclaim); return; } for(i = imagealloc.lru; i != nil; i = i->prev){ if(canlock(i)){ i->ref++; /* make sure it does not go away */ unlock(i); pagereclaim(i); lock(i); DBG("imagereclaim: image %p(c%p, r%d)\n", i, i->c, i->ref); if(i->ref == 1){ /* no pages referring to it, it's ours */ unlock(i); unlock(&imagealloc); putimage(i); break; }else --i->ref; unlock(i); } } if(i == nil){ irstats.noluck++; unlock(&imagealloc); } irstats.loops++; ticks = fastticks(nil) - ticks0; irstats.ticks += ticks; if(ticks > irstats.maxt) irstats.maxt = ticks; //print("T%llud+", ticks); qunlock(&imagealloc.ireclaim); }
void putbuf(Iobuf *p) { if(canqlock(p)) print("buffer not locked %Z(%lld)\n", p->dev, (Wideoff)p->addr); if(p->flags & Bimm) { if(!(p->flags & Bmod)) print("imm and no mod %Z(%lld)\n", p->dev, (Wideoff)p->addr); if(!devwrite(p->dev, p->addr, p->iobuf)) p->flags &= ~(Bmod|Bimm); } iobufunmap(p); qunlock(p); }
static void pageouttext(int pgszi, int color) { Proc *p; Pgsza *pa; int i, n, np, x; Segment *s; int prepaged; USED(color); pa = &pga.pgsza[pgszi]; n = x = 0; prepaged = 0; /* * Try first to steal text pages from non-prepaged processes, * then from anyone. */ Again: do{ if((p = psincref(x)) == nil) break; np = 0; if(p->prepagemem == 0 || prepaged != 0) if(p->state != Dead && p->noswap == 0 && canqlock(&p->seglock)){ for(i = 0; i < NSEG; i++){ if((s = p->seg[i]) == nil) continue; if((s->type&SG_TYPE) == SG_TEXT) np = pageout(p, s); } qunlock(&p->seglock); } /* * else process dead or locked or changing its segments */ psdecref(p); n += np; if(np > 0) DBG("pager: %d from proc #%d %#p\n", np, x, p); x++; }while(pa->freecount < Minpages); if(pa->freecount < Minpages && prepaged++ == 0) goto Again; }
static void aoeinit(void) { static int init; static QLock l; if(!canqlock(&l)) return; if(init == 0){ fmtinstall(L'æ', fmtæ); events.rp = events.wp = events.buf; kproc("aoesweep", aoesweepproc, nil); aoecfg(); init = 1; } qunlock(&l); }
/* * syncblock tries to put out a block per hashline * returns 0 all done, * returns 1 if it missed something */ int syncblock(void) { Iobuf *p, *s, *q; Hiob *hp; int32_t h; int flag; flag = 0; for(h=0; h<nhiob; h++) { q = 0; hp = &hiob[h]; lock(hp); s = hp->link; for(p=s;;) { if(p->flags & Bmod) { if(q) flag = 1; /* more than 1 mod/line */ q = p; } p = p->fore; if(p == s) break; } unlock(hp); if(q) { if(!canqlock(q)) { flag = 1; /* missed -- was locked */ continue; } if(!(q->flags & Bmod)) { qunlock(q); continue; } if(iobufmap(q)) { if(!devwrite(q->dev, q->addr, q->iobuf)) q->flags &= ~(Bmod|Bimm); iobufunmap(q); } else flag = 1; qunlock(q); } } return flag; }
int cursoron(int dolock) { int retry; if (dolock) lock(&cursor); if (canqlock(&drawlock)) { retry = 0; swcursorhide(); swcursordraw(); qunlock(&drawlock); } else retry = 1; if (dolock) unlock(&cursor); return retry; }
void swcursordraw(void) { if(swvisible) return; if(swenabled == 0) return; if(swback == nil || swimg1 == nil || swmask1 == nil) return; assert(!canqlock(&drawlock)); swvispt = swpt; swvisvers = swvers; swrect = rectaddpt(Rect(0,0,16,16), swvispt); memimagedraw(swback, swback->r, gscreen, swpt, memopaque, ZP, S); memimagedraw(gscreen, swrect, swimg1, ZP, swmask1, ZP, SoverD); flushmemscreen(swrect); swvisible = 1; }
static Conv* cmdclone(char *user) { Conv *c, **pp, **ep; int i; c = nil; ep = &cmd.conv[cmd.maxconv]; for(pp = cmd.conv; pp < ep; pp++) { c = *pp; if(c == nil) { c = malloc(sizeof(Conv)); if(c == nil) error(Enomem); qlock(&c->l); c->inuse = 1; c->x = pp - cmd.conv; cmd.nc++; *pp = c; break; } if(canqlock(&c->l)){ if(c->inuse == 0 && c->child == nil) break; qunlock(&c->l); } } if(pp >= ep) return nil; c->inuse = 1; kstrdup(&c->owner, user); kstrdup(&c->dir, "FIXME"); c->perm = 0660; c->state = "Closed"; c->esz = 0; for(i=0; i<nelem(c->fd); i++) c->fd[i] = -1; // XXX: this should go somewhere else. c->p = setupseg(0); qunlock(&c->l); return c; }
void swcursorclock(void) { int x; if(!swenabled) return; if(swvisible && eqpt(swpt, swvispt) && swvers==swvisvers) return; x = splhi(); if(swenabled) if(!swvisible || !eqpt(swpt, swvispt) || swvers!=swvisvers) if(canqlock(&drawlock)){ swcursorhide(); swcursordraw(); qunlock(&drawlock); } splx(x); }
static ISum* scacheevict(void) { ISum *s; int i; for(i=icache.nsum-1; i>=0; i--){ s = icache.sum[i]; if(canqlock(&s->lock)){ if(i > 0){ memmove(icache.sum+1, icache.sum, i*sizeof icache.sum[0]); icache.sum[0] = s; } sumclear(s); return s; } } return nil; }
void killbig(char *why) { int i; Segment *s; ulong l, max; Proc *p, *ep, *kp; max = 0; kp = 0; ep = procalloc.arena+conf.nproc; for(p = procalloc.arena; p < ep; p++) { if(p->state == Dead || p->kp) continue; l = 0; for(i=1; i<NSEG; i++) { s = p->seg[i]; if(s != 0) l += s->top - s->base; } if(l > max && ((p->procmode&0222) || strcmp(eve, p->user)!=0)) { kp = p; max = l; } } print("%lud: %s killed: %s\n", kp->pid, kp->text, why); for(p = procalloc.arena; p < ep; p++) { if(p->state == Dead || p->kp) continue; if(p != kp && p->seg[BSEG] && p->seg[BSEG] == kp->seg[BSEG]) p->procctl = Proc_exitbig; } kp->procctl = Proc_exitbig; for(i = 0; i < NSEG; i++) { s = kp->seg[i]; if(s != 0 && canqlock(&s->lk)) { mfreeseg(s, s->base, (s->top - s->base)/BY2PG); qunlock(&s->lk); } } }
static void floppykproc(void *) { FDrive *dp; while(waserror()) ; for(;;){ for(dp = fl.d; dp < &fl.d[fl.ndrive]; dp++){ if((fl.motor&MOTORBIT(dp->dev)) && TK2SEC(m->ticks - dp->lasttouched) > 5 && canqlock(&fl)){ if(TK2SEC(m->ticks - dp->lasttouched) > 5) floppyoff(dp); qunlock(&fl); } } tsleep(&up->sleep, return0, 0, 1000); } }
static Chan* pointeropen(Chan* c, int omode) { c = devopen(c, omode, pointertab, nelem(pointertab), devgen); if((ulong)c->qid.path == Qpointer){ if(waserror()){ c->flag &= ~COPEN; nexterror(); } if(!canqlock(&mouse.q)) error(Einuse); if(incref(&mouse.ref) != 1){ qunlock(&mouse.q); error(Einuse); } cursorenable(); qunlock(&mouse.q); poperror(); } return c; }
int pwait(Waitmsg *w) { Mach *m = machp(); int cpid; Waitq *wq; if(!canqlock(&m->externup->qwaitr)) error(Einuse); if(waserror()) { qunlock(&m->externup->qwaitr); nexterror(); } lock(&m->externup->exl); if(m->externup->nchild == 0 && m->externup->waitq == 0) { unlock(&m->externup->exl); error(Enochild); } unlock(&m->externup->exl); sleep(&m->externup->waitr, haswaitq, m->externup); lock(&m->externup->exl); wq = m->externup->waitq; m->externup->waitq = wq->next; m->externup->nwait--; unlock(&m->externup->exl); qunlock(&m->externup->qwaitr); poperror(); if(w) memmove(w, &wq->w, sizeof(Waitmsg)); cpid = wq->w.pid; free(wq); return cpid; }
static long powerread(Chan* c, void* a, long n, vlong offset) { Puser *p; char *msg; switch(c->qid.path & ~CHDIR){ case Qdir: return devdirread(c, a, n, powertab, nelem(powertab), devgen); case Qdata: p = c->aux; for(;;){ if(!canqlock(&p->rl)) error(Einuse); /* only one reader at a time */ if(waserror()){ qunlock(&p->rl); nexterror(); } sleep(&p->r, isshutdown, p); poperror(); qunlock(&p->rl); msg = nil; lock(p); if(p->state == Pwroff){ msg = "power off"; p->state = Pwrack; } unlock(p); if(msg != nil) return readstr(offset, a, n, msg); } break; case Qctl: default: n=0; break; } return n; }
ulong pwait(Waitmsg *w) { ulong cpid; Waitq *wq; if(!canqlock(&up->qwaitr)) error(Einuse); if(waserror()) { qunlock(&up->qwaitr); nexterror(); } lock(&up->exl); while(up->waitq == nil) { if(up->nchild == 0) { unlock(&up->exl); error(Enochild); } unlock(&up->exl); sleep(&up->waitr, haswaitq, up); lock(&up->exl); } wq = up->waitq; up->waitq = wq->next; up->nwait--; unlock(&up->exl); qunlock(&up->qwaitr); poperror(); if(w != nil) memmove(w, &wq->w, sizeof(Waitmsg)); cpid = wq->w.pid; free(wq); return cpid; }
static void imagereclaim(void) { int n; Page *p; uvlong ticks; irstats.calls++; /* Somebody is already cleaning the page cache */ if(!canqlock(&imagealloc.ireclaim)) return; lock(&palloc); ticks = fastticks(nil); n = 0; /* * All the pages with images backing them are at the * end of the list (see putpage) so start there and work * backward. */ for(p = palloc.tail; p && p->image && (n<1000 || !imagealloc.free); p = p->prev) { if(p->ref == 0 && canlock(p)) { if(p->ref == 0 && p->image && !p->image->notext) { n++; uncachepage(p); } unlock(p); } } ticks = fastticks(nil) - ticks; unlock(&palloc); irstats.loops++; irstats.ticks += ticks; if(ticks > irstats.maxt) irstats.maxt = ticks; //print("T%llud+", ticks); qunlock(&imagealloc.ireclaim); }