void unlock(Lock *l) { if(adec(&l->key) == 0) return; /* changed from 1 -> 0: no contention */ semrelease(&l->sem, 1); }
int canlock(Lock *l) { if(ainc(&l->key) == 1) return 1; /* changed from 0 -> 1: success */ /* Undo increment (but don't miss wakeup) */ if(adec(&l->key) == 0) return 0; /* changed from 1 -> 0: no contention */ semrelease(&l->sem, 1); return 0; }
void semloop(void) { int i; i = 0; while(i < nloops){ if(semacquire(&x, 1)){ incr++; i++; semrelease(&x, 1); } else { sysfatal("semacquire failed"); } } }
static void syssemrelease(void) { u32int addr, count; long *addrt; Segment *seg; addr = arg(0); count = arg(1); if(systrace) fprint(2, "semrelease(%#ux, %ud)\n", addr, count); addrt = vaddr(addr, 4, &seg); P->R[0] = noteerr(semrelease(addrt, count), 0); segunlock(seg); }
long syssemrelease(ulong *arg) { long *addr, delta; Segment *s; validaddr(arg[0], sizeof(long), 1); evenaddr(arg[0]); addr = (long*)arg[0]; delta = arg[1]; if((s = seg(up, (ulong)addr, 0)) == nil) error(Ebadarg); if(delta < 0 || *addr < 0) error(Ebadarg); return semrelease(s, addr, arg[1]); }
void tsemloop(void) { int i; i = 0; while(i < nloops){ if(tsemacquire(&x, 10)){ if((i % 1000) == 0) sleep(10); incr++; i++; semrelease(&x, 1); } else { //print("pid %d timeout\n", getpid()); } } exits(nil); }
// func RaceSemrelease(s *uint32) void runtime·RaceSemrelease(uint32 *s) { runtime·semrelease(s); }
void gc(int32 force) { int64 t0, t1; byte *p; Finalizer *fp; // The gc is turned off (via enablegc) until // the bootstrap has completed. // Also, malloc gets called in the guts // of a number of libraries that might be // holding locks. To avoid priority inversion // problems, don't bother trying to run gc // while holding a lock. The next mallocgc // without a lock will do the gc instead. if(!mstats.enablegc || m->locks > 0 || panicking) return; if(gcpercent == -2) { // first time through p = getenv("GOGC"); if(p == nil || p[0] == '\0') gcpercent = 100; else if(strcmp(p, (byte*)"off") == 0) gcpercent = -1; else gcpercent = atoi(p); } if(gcpercent < 0) return; semacquire(&gcsema); t0 = nanotime(); m->gcing = 1; stoptheworld(); if(mheap.Lock.key != 0) throw("mheap locked during gc"); if(force || mstats.heap_alloc >= mstats.next_gc) { mark(); sweep(); stealcache(); mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100; } m->gcing = 0; m->locks++; // disable gc during the mallocs in newproc fp = finq; if(fp != nil) { // kick off or wake up goroutine to run queued finalizers if(fing == nil) fing = newproc1((byte*)runfinq, nil, 0, 0); else if(fingwait) { fingwait = 0; ready(fing); } } m->locks--; t1 = nanotime(); mstats.numgc++; mstats.pause_ns += t1 - t0; if(mstats.debuggc) printf("pause %D\n", t1-t0); semrelease(&gcsema); starttheworld(); // give the queued finalizers, if any, a chance to run if(fp != nil) gosched(); }