void runtime·resetcpuprofiler(int32 hz) { static Lock lock; void *timer, *thread; int32 ms; int64 due; runtime·lock(&lock); if(profiletimer == nil) { timer = runtime·stdcall(runtime·CreateWaitableTimer, 3, nil, nil, nil); runtime·atomicstorep(&profiletimer, timer); thread = runtime·stdcall(runtime·CreateThread, 6, nil, nil, runtime·profileloop, nil, nil, nil); runtime·stdcall(runtime·CloseHandle, 1, thread); } runtime·unlock(&lock); ms = 0; due = 1LL<<63; if(hz > 0) { ms = 1000 / hz; if(ms == 0) ms = 1; due = ms * -10000; } runtime·stdcall(runtime·SetWaitableTimer, 6, profiletimer, &due, (uintptr)ms, nil, nil, nil); runtime·atomicstore((uint32*)&m->profilehz, hz); }
void net·runtime_pollUnblock(PollDesc* pd) { #line 228 "/home/14/ren/source/golang/go/src/pkg/runtime/netpoll.goc" G *rg, *wg; runtime·lock(pd); if(pd->closing) runtime·throw("runtime_pollUnblock: already closing"); pd->closing = true; pd->seq++; runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock rg = netpollunblock(pd, 'r', false); wg = netpollunblock(pd, 'w', false); if(pd->rt.fv) { runtime·deltimer(&pd->rt); pd->rt.fv = nil; } if(pd->wt.fv) { runtime·deltimer(&pd->wt); pd->wt.fv = nil; } runtime·unlock(pd); if(rg) runtime·ready(rg); if(wg) runtime·ready(wg); }
void net·runtime_pollUnblock(PollDesc* pd) { #line 228 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" G *rg, *wg; runtime·lock(pd); if(pd->closing) runtime·throw("runtime_pollUnblock: already closing"); pd->closing = true; pd->seq++; runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock rg = netpollunblock(pd, 'r', false); wg = netpollunblock(pd, 'w', false); if(pd->rt.fv) { runtime·deltimer(&pd->rt); pd->rt.fv = nil; } if(pd->wt.fv) { runtime·deltimer(&pd->wt); pd->wt.fv = nil; } runtime·unlock(pd); if(rg) runtime·ready(rg); if(wg) runtime·ready(wg); }
void runtime·typ2Itab(Type* t, InterfaceType* inter, Itab** cache, Itab* tab) { #line 172 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); FLUSH(&tab); }
void runtime·typ2Itab(Type *t, InterfaceType *inter, Itab **cache, Itab *ret) { Itab *tab; tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); ret = tab; FLUSH(&ret); }
// Called to initialize a new m (including the bootstrap m). // Called on the new thread, can not allocate memory. void runtime·minit(void) { void *thandle; // -1 = current process, -2 = current thread runtime·stdcall(runtime·DuplicateHandle, 7, (uintptr)-1, (uintptr)-2, (uintptr)-1, &thandle, (uintptr)0, (uintptr)0, (uintptr)DUPLICATE_SAME_ACCESS); runtime·atomicstorep(&m->thread, thandle); }
void runtime·typ2Itab(Type* t, InterfaceType* inter, Itab** cache, Itab* tab) { tab = 0; FLUSH(&tab); #line 186 "/home/14/ren/source/golang/go/src/pkg/runtime/iface.goc" tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); FLUSH(&tab); }
void runtime·typ2Itab(Type* t, InterfaceType* inter, Itab** cache, Itab* tab) { tab = 0; FLUSH(&tab); #line 186 "/tmp/makerelease197226928/go/src/pkg/runtime/iface.goc" tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); FLUSH(&tab); }
void runtime·convT2I(Type* t, InterfaceType* inter, Itab** cache, byte* elem, Iface ret) { #line 178 "/home/pi/go_build/hg/go/src/pkg/runtime/iface.goc" Itab *tab; tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret.tab = tab; copyin(t, elem, &ret.data); FLUSH(&ret); }
void runtime·newosproc(M *m, G *g, void *stk, void (*fn)(void)) { void *thandle; USED(stk); USED(g); // assuming g = m->g0 USED(fn); // assuming fn = mstart thandle = runtime·stdcall(runtime·CreateThread, 6, nil, (uintptr)0x20000, runtime·tstart_stdcall, m, STACK_SIZE_PARAM_IS_A_RESERVATION, nil); if(thandle == nil) { runtime·printf("runtime: failed to create new OS thread (have %d already; errno=%d)\n", runtime·mcount(), runtime·getlasterror()); runtime·throw("runtime.newosproc"); } runtime·atomicstorep(&m->thread, thandle); }
static void mcommoninit(M *mp) { mp->id = runtime·sched.mcount++; mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks(); if(mp->mcache == nil) mp->mcache = runtime·allocmcache(); runtime·callers(1, mp->createstack, nelem(mp->createstack)); // Add to runtime·allm so garbage collector doesn't free m // when it is just in a register or thread-local storage. mp->alllink = runtime·allm; // runtime·NumCgoCall() iterates over allm w/o schedlock, // so we need to publish it safely. runtime·atomicstorep(&runtime·allm, mp); }
void runtime·convT2I(Type *t, InterfaceType *inter, Itab **cache, ...) { byte *elem; Iface *ret; Itab *tab; int32 wid; elem = (byte*)(&cache+1); wid = t->size; ret = (Iface*)(elem + ROUND(wid, Structrnd)); tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret->tab = tab; copyin(t, elem, &ret->data); }
void runtime·convT2I(Type* t, InterfaceType* inter, Itab** cache, byte* elem, Iface ret) { ret.tab = 0; ret.data = 0; FLUSH(&ret); #line 192 "/home/14/ren/source/golang/go/src/pkg/runtime/iface.goc" Itab *tab; tab = runtime·atomicloadp(cache); if(!tab) { tab = itab(inter, t, 0); runtime·atomicstorep(cache, tab); } ret.tab = tab; copyin(t, elem, &ret.data); FLUSH(&ret); }
static void mcommoninit(M *m) { m->id = runtime·sched.mcount++; m->fastrand = 0x49f6428aUL + m->id + runtime·cputicks(); m->stackalloc = runtime·malloc(sizeof(*m->stackalloc)); runtime·FixAlloc_Init(m->stackalloc, FixedStack, runtime·SysAlloc, nil, nil); if(m->mcache == nil) m->mcache = runtime·allocmcache(); runtime·callers(1, m->createstack, nelem(m->createstack)); // Add to runtime·allm so garbage collector doesn't free m // when it is just in a register or thread-local storage. m->alllink = runtime·allm; // runtime·NumCgoCall() iterates over allm w/o schedlock, // so we need to publish it safely. runtime·atomicstorep(&runtime·allm, m); }
void net·runtime_pollSetDeadline(PollDesc* pd, int64 d, intgo mode) { #line 164 "C:\Users\gopher\AppData\Local\Temp\1\makerelease745458658\go\src\pkg\runtime\netpoll.goc" G *rg, *wg; runtime·lock(pd); if(pd->closing) { runtime·unlock(pd); return; } pd->seq++; // invalidate current timers // Reset current timers. if(pd->rt.fv) { runtime·deltimer(&pd->rt); pd->rt.fv = nil; } if(pd->wt.fv) { runtime·deltimer(&pd->wt); pd->wt.fv = nil; } // Setup new timers. if(d != 0 && d <= runtime·nanotime()) d = -1; if(mode == 'r' || mode == 'r'+'w') pd->rd = d; if(mode == 'w' || mode == 'r'+'w') pd->wd = d; if(pd->rd > 0 && pd->rd == pd->wd) { pd->rt.fv = &deadlineFn; pd->rt.when = pd->rd; // Copy current seq into the timer arg. // Timer func will check the seq against current descriptor seq, // if they differ the descriptor was reused or timers were reset. pd->rt.arg.type = (Type*)pd->seq; pd->rt.arg.data = pd; runtime·addtimer(&pd->rt); } else { if(pd->rd > 0) { pd->rt.fv = &readDeadlineFn; pd->rt.when = pd->rd; pd->rt.arg.type = (Type*)pd->seq; pd->rt.arg.data = pd; runtime·addtimer(&pd->rt); } if(pd->wd > 0) { pd->wt.fv = &writeDeadlineFn; pd->wt.when = pd->wd; pd->wt.arg.type = (Type*)pd->seq; pd->wt.arg.data = pd; runtime·addtimer(&pd->wt); } } // If we set the new deadline in the past, unblock currently pending IO if any. rg = nil; runtime·atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock if(pd->rd < 0) rg = netpollunblock(pd, 'r', false); if(pd->wd < 0) wg = netpollunblock(pd, 'w', false); runtime·unlock(pd); if(rg) runtime·ready(rg); if(wg) runtime·ready(wg); }