void runtime·profileloop1(void) { M *mp, *allm; void *thread; runtime·stdcall(runtime·SetThreadPriority, 2, (uintptr)-2, (uintptr)THREAD_PRIORITY_HIGHEST); for(;;) { runtime·stdcall(runtime·WaitForSingleObject, 2, profiletimer, (uintptr)-1); allm = runtime·atomicloadp(&runtime·allm); for(mp = allm; mp != nil; mp = mp->alllink) { thread = runtime·atomicloadp(&mp->thread); // Do not profile threads blocked on Notes, // this includes idle worker threads, // idle timer thread, idle heap scavenger, etc. if(thread == nil || mp->profilehz == 0 || mp->blocked) continue; runtime·stdcall(runtime·SuspendThread, 1, thread); if(mp->profilehz != 0 && !mp->blocked) profilem(mp); runtime·stdcall(runtime·ResumeThread, 1, thread); } } }
void runtime·lock(Lock *l) { uintptr v; uint32 i, spin; if(m->locks++ < 0) runtime·throw("runtime·lock: lock count"); // Speculative grab for lock. if(runtime·casp((void**)&l->key, nil, (void*)LOCKED)) return; if(m->waitsema == 0) m->waitsema = runtime·semacreate(); // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin = 0; if(runtime·ncpu > 1) spin = ACTIVE_SPIN; for(i=0;; i++) { v = (uintptr)runtime·atomicloadp((void**)&l->key); if((v&LOCKED) == 0) { unlocked: if(runtime·casp((void**)&l->key, (void*)v, (void*)(v|LOCKED))) return; i = 0; } if(i<spin) runtime·procyield(ACTIVE_SPIN_CNT); else if(i<spin+PASSIVE_SPIN) runtime·osyield(); else { // Someone else has it. // l->waitm points to a linked list of M's waiting // for this lock, chained through m->nextwaitm. // Queue this M. for(;;) { m->nextwaitm = (void*)(v&~LOCKED); if(runtime·casp((void**)&l->key, (void*)v, (void*)((uintptr)m|LOCKED))) break; v = (uintptr)runtime·atomicloadp((void**)&l->key); if((v&LOCKED) == 0) goto unlocked; } if(v&LOCKED) { // Queued. Wait. runtime·semasleep(-1); i = 0; } } } }
void runtime·unlock(Lock *l) { uintptr v; M *mp; if(--m->locks < 0) runtime·throw("runtime·unlock: lock count"); for(;;) { v = (uintptr)runtime·atomicloadp((void**)&l->key); if(v == LOCKED) { if(runtime·casp((void**)&l->key, (void*)LOCKED, nil)) break; } else { // Other M's are waiting for the lock. // Dequeue an M. mp = (void*)(v&~LOCKED); if(runtime·casp((void**)&l->key, (void*)v, mp->nextwaitm)) { // Dequeued an M. Wake it. runtime·semawakeup(mp); break; } } } }
void runtime·NumCgoCall(int64 ret) { M *m; ret = 0; for(m=runtime·atomicloadp(&runtime·allm); m; m=m->alllink) ret += m->ncgocall; FLUSH(&ret); }
void runtime·goexitsall(int8 *status) { M *mp; int32 pid; pid = getpid(); for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink) if(mp->procid != pid) runtime·postnote(mp->procid, status); }
void goexitsall(void) { M *m; int32 pid; pid = getpid(); for(m=runtime·atomicloadp(&runtime·allm); m; m=m->alllink) if(m->procid != pid) runtime·postnote(m->procid, "gointr"); }
void runtime·profileloop1(void) { M *mp, *allm; void *thread; runtime·stdcall(runtime·SetThreadPriority, 2, (uintptr)-2, (uintptr)THREAD_PRIORITY_HIGHEST); for(;;) { runtime·stdcall(runtime·WaitForSingleObject, 2, profiletimer, (uintptr)-1); allm = runtime·atomicloadp(&runtime·allm); for(mp = allm; mp != nil; mp = mp->alllink) { thread = runtime·atomicloadp(&mp->thread); if(thread == nil) continue; runtime·stdcall(runtime·SuspendThread, 1, thread); if(mp->profilehz != 0) profilem(mp); runtime·stdcall(runtime·ResumeThread, 1, thread); } } }
void runtime·NumCgoCall(int64 ret) { ret = 0; FLUSH(&ret); #line 18 "/home/14/ren/source/golang/go/src/pkg/runtime/runtime1.goc" M *mp; ret = 0; for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink) ret += mp->ncgocall; FLUSH(&ret); }
void runtime·NumCgoCall(int64 ret) { ret = 0; FLUSH(&ret); #line 18 "C:\Users\ADMINI~1\AppData\Local\Temp\2\makerelease686069423\go\src\pkg\runtime\runtime1.goc" M *mp; ret = 0; for(mp=runtime·atomicloadp(&runtime·allm); mp; mp=mp->alllink) ret += mp->ncgocall; FLUSH(&ret); }
void runtime·convT2I(Type* t, InterfaceType* inter, Itab** cache, byte* elem, Iface ret) { #line 178 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" Itab *tab; tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret.tab = tab; copyin(t, elem, &ret.data); FLUSH(&ret); }
void runtime·convT2I(Type *t, InterfaceType *inter, Itab **cache, ...) { byte *elem; Iface *ret; Itab *tab; int32 wid; elem = (byte*)(&cache+1); wid = t->size; ret = (Iface*)(elem + ROUND(wid, Structrnd)); tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret->tab = tab; copyin(t, elem, &ret->data); }
void runtime·convT2I(Type* t, InterfaceType* inter, Itab** cache, byte* elem, Iface ret) { ret.tab = 0; ret.data = 0; FLUSH(&ret); #line 192 "/home/14/ren/source/golang/go/src/pkg/runtime/iface.goc" Itab *tab; tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret.tab = tab; copyin(t, elem, &ret.data); FLUSH(&ret); }
void runtime·notewakeup(Note *n) { M *mp; do mp = runtime·atomicloadp((void**)&n->key); while(!runtime·casp((void**)&n->key, mp, (void*)LOCKED)); // Successfully set waitm to LOCKED. // What was it before? if(mp == nil) { // Nothing was waiting. Done. } else if(mp == (M*)LOCKED) { // Two notewakeups! Not allowed. runtime·throw("notewakeup - double wakeup"); } else { // Must be the waiting m. Wake it up. runtime·semawakeup(mp); } }
static Itab* itab ( InterfaceType *inter , Type *type , int32 canfail ) { int32 locked; int32 ni; Method *t , *et; IMethod *i , *ei; uint32 h; String *iname , *ipkgPath; Itab *m; UncommonType *x; Type *itype; Eface err; #line 38 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" if ( inter->mhdr.len == 0 ) runtime·throw ( "internal error - misuse of itab" ) ; #line 41 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" locked = 0; #line 44 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" x = type->x; if ( x == nil ) { if ( canfail ) return nil; iname = inter->m[0].name; goto throw; } #line 53 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" h = inter->hash; h += 17 * type->hash; #line 56 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" h %= nelem ( hash ) ; #line 60 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" for ( locked=0; locked<2; locked++ ) { if ( locked ) runtime·lock ( &ifacelock ) ; for ( m=runtime·atomicloadp ( &hash[h] ) ; m!=nil; m=m->link ) { if ( m->inter == inter && m->type == type ) { if ( m->bad ) { m = nil; if ( !canfail ) { #line 75 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" goto search; } } if ( locked ) runtime·unlock ( &ifacelock ) ; return m; } } } #line 85 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" ni = inter->mhdr.len; m = runtime·persistentalloc ( sizeof ( *m ) + ni*sizeof m->fun[0] , 0 , &mstats.other_sys ) ; m->inter = inter; m->type = type; #line 90 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" search: #line 95 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" i = inter->m; ei = i + inter->mhdr.len; t = x->m; et = t + x->mhdr.len; for ( ; i < ei; i++ ) { itype = i->type; iname = i->name; ipkgPath = i->pkgPath; for ( ;; t++ ) { if ( t >= et ) { if ( !canfail ) { throw: #line 108 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" runtime·newTypeAssertionError ( nil , type->string , inter->string , iname , &err ) ; if ( locked ) runtime·unlock ( &ifacelock ) ; runtime·panic ( err ) ; return nil; } m->bad = 1; goto out; } if ( t->mtyp == itype && t->name == iname && t->pkgPath == ipkgPath ) break; } if ( m ) m->fun[i - inter->m] = t->ifn; }
static Itab* itab(InterfaceType *inter, Type *type, int32 canfail) { int32 locked; int32 ni; Method *t, *et; IMethod *i, *ei; uint32 h; String *iname, *ipkgPath; Itab *m; UncommonType *x; Type *itype; Eface err; if(inter->mhdr.len == 0) runtime·throw("internal error - misuse of itab"); locked = 0; // easy case x = type->x; if(x == nil) { if(canfail) return nil; iname = inter->m[0].name; goto throw; } // compiler has provided some good hash codes for us. h = inter->hash; h += 17 * type->hash; // TODO(rsc): h += 23 * x->mhash ? h %= nelem(hash); // look twice - once without lock, once with. // common case will be no lock contention. for(locked=0; locked<2; locked++) { if(locked) runtime·lock(&ifacelock); for(m=runtime·atomicloadp(&hash[h]); m!=nil; m=m->link) { if(m->inter == inter && m->type == type) { if(m->bad) { m = nil; if(!canfail) { // this can only happen if the conversion // was already done once using the , ok form // and we have a cached negative result. // the cached result doesn't record which // interface function was missing, so jump // down to the interface check, which will // do more work but give a better error. goto search; } } if(locked) runtime·unlock(&ifacelock); return m; } } } ni = inter->mhdr.len; m = runtime·persistentalloc(sizeof(*m) + ni*sizeof m->fun[0], 0, &mstats.other_sys); m->inter = inter; m->type = type; search: // both inter and type have method sorted by name, // and interface names are unique, // so can iterate over both in lock step; // the loop is O(ni+nt) not O(ni*nt). i = inter->m; ei = i + inter->mhdr.len; t = x->m; et = t + x->mhdr.len; for(; i < ei; i++) { itype = i->type; iname = i->name; ipkgPath = i->pkgPath; for(;; t++) { if(t >= et) { if(!canfail) { throw: // didn't find method runtime·newTypeAssertionError( nil, type->string, inter->string, iname, &err); if(locked) runtime·unlock(&ifacelock); runtime·panic(err); return nil; // not reached } m->bad = 1; goto out; } if(t->mtyp == itype && t->name == iname && t->pkgPath == ipkgPath) break; } if(m) m->fun[i - inter->m] = t->ifn; }
void runtime·notetsleep(Note *n, int64 ns) { M *mp; int64 deadline, now; if(ns < 0) { runtime·notesleep(n); return; } if(m->waitsema == 0) m->waitsema = runtime·semacreate(); // Register for wakeup on n->waitm. if(!runtime·casp((void**)&n->key, nil, m)) { // must be LOCKED (got wakeup already) if(n->key != LOCKED) runtime·throw("notetsleep - waitm out of sync"); return; } if(m->profilehz > 0) runtime·setprof(false); deadline = runtime·nanotime() + ns; for(;;) { // Registered. Sleep. if(runtime·semasleep(ns) >= 0) { // Acquired semaphore, semawakeup unregistered us. // Done. if(m->profilehz > 0) runtime·setprof(true); return; } // Interrupted or timed out. Still registered. Semaphore not acquired. now = runtime·nanotime(); if(now >= deadline) break; // Deadline hasn't arrived. Keep sleeping. ns = deadline - now; } if(m->profilehz > 0) runtime·setprof(true); // Deadline arrived. Still registered. Semaphore not acquired. // Want to give up and return, but have to unregister first, // so that any notewakeup racing with the return does not // try to grant us the semaphore when we don't expect it. for(;;) { mp = runtime·atomicloadp((void**)&n->key); if(mp == m) { // No wakeup yet; unregister if possible. if(runtime·casp((void**)&n->key, mp, nil)) return; } else if(mp == (M*)LOCKED) { // Wakeup happened so semaphore is available. // Grab it to avoid getting out of sync. if(runtime·semasleep(-1) < 0) runtime·throw("runtime: unable to acquire - semaphore out of sync"); return; } else { runtime·throw("runtime: unexpected waitm - semaphore out of sync"); } } }