// Arrange to call fn with a traceback hz times a second. void runtime_setcpuprofilerate(void (*fn)(uintptr*, int32), int32 hz) { // Force sane arguments. if(hz < 0) hz = 0; if(hz == 0) fn = nil; if(fn == nil) hz = 0; // Stop profiler on this cpu so that it is safe to lock prof. // if a profiling signal came in while we had prof locked, // it would deadlock. runtime_resetcpuprofiler(0); runtime_lock(&prof); prof.fn = fn; prof.hz = hz; runtime_unlock(&prof); runtime_lock(&runtime_sched); runtime_sched.profilehz = hz; runtime_unlock(&runtime_sched); if(hz != 0) runtime_resetcpuprofiler(hz); }
// Free n objects from a span s back into the central free list c. // Called during sweep. // Returns true if the span was returned to heap. bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end) { int32 size; runtime_lock(c); // Move to nonempty if necessary. if(s->freelist == nil) { runtime_MSpanList_Remove(s); runtime_MSpanList_Insert(&c->nonempty, s); } // Add the objects back to s's free list. end->next = s->freelist; s->freelist = start; s->ref -= n; c->nfree += n; if(s->ref != 0) { runtime_unlock(c); return false; } // s is completely freed, return it to the heap. size = runtime_class_to_size[c->sizeclass]; runtime_MSpanList_Remove(s); s->needzero = 1; s->freelist = nil; c->nfree -= (s->npages << PageShift) / size; runtime_unlock(c); runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); runtime_MHeap_Free(&runtime_mheap, s, 0); return true; }
bool runtime_addfinalizer(void *p, FuncVal *f, const struct __go_func_type *ft) { Fintab *tab; byte *base; if(debug) { if(!runtime_mlookup(p, &base, nil, nil) || p != base) runtime_throw("addfinalizer on invalid pointer"); } tab = TAB(p); runtime_lock(tab); if(f == nil) { lookfintab(tab, p, true, nil); runtime_unlock(tab); return true; } if(lookfintab(tab, p, false, nil)) { runtime_unlock(tab); return false; } if(tab->nkey >= tab->max/2+tab->max/4) { // keep table at most 3/4 full: // allocate new table and rehash. resizefintab(tab); } addfintab(tab, p, f, ft); runtime_setblockspecial(p, true); runtime_unlock(tab); return true; }
// Allocate up to n objects from the central free list. // Return the number of objects allocated. // The objects are linked together by their first words. // On return, *pstart points at the first object and *pend at the last. int32 runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **pfirst) { MLink *first, *last, *v; int32 i; runtime_lock(c); // Replenish central list if empty. if(runtime_MSpanList_IsEmpty(&c->nonempty)) { if(!MCentral_Grow(c)) { runtime_unlock(c); *pfirst = nil; return 0; } } // Copy from list, up to n. // First one is guaranteed to work, because we just grew the list. first = MCentral_Alloc(c); last = first; for(i=1; i<n && (v = MCentral_Alloc(c)) != nil; i++) { last->next = v; last = v; } last->next = nil; c->nfree -= i; runtime_unlock(c); *pfirst = first; return i; }
// Return span from an MCache. void runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s) { MLink *v; int32 cap, n; runtime_lock(&c->lock); s->incache = false; // Move any explicitly freed items from the freebuf to the freelist. while((v = s->freebuf) != nil) { s->freebuf = v->next; runtime_markfreed(v); v->next = s->freelist; s->freelist = v; s->ref--; } if(s->ref == 0) { // Free back to heap. Unlikely, but possible. MCentral_ReturnToHeap(c, s); // unlocks c return; } cap = (s->npages << PageShift) / s->elemsize; n = cap - s->ref; if(n > 0) { c->nfree += n; runtime_MSpanList_Remove(s); runtime_MSpanList_Insert(&c->nonempty, s); } runtime_unlock(&c->lock); }
int32 runtime_helpgc(bool *extra) { M *mp; int32 n, max; // Figure out how many CPUs to use. // Limited by gomaxprocs, number of actual CPUs, and MaxGcproc. max = runtime_gomaxprocs; if(max > runtime_ncpu) max = runtime_ncpu > 0 ? runtime_ncpu : 1; if(max > MaxGcproc) max = MaxGcproc; // We're going to use one CPU no matter what. // Figure out the max number of additional CPUs. max--; runtime_lock(&runtime_sched); n = 0; while(n < max && (mp = mget(nil)) != nil) { n++; mp->helpgc = 1; mp->waitnextg = 0; runtime_notewakeup(&mp->havenextg); } runtime_unlock(&runtime_sched); if(extra) *extra = n != max; return n; }
void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)) { void **key; void **ekey; int32 i; if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1)) runtime_throw("finalizer deadlock"); for(i=0; i<TABSZ; i++) { runtime_lock(&fintab[i]); key = fintab[i].fkey; ekey = key + fintab[i].max; for(; key < ekey; key++) if(*key != nil && *key != ((void*)-1)) fn(*key); scan((byte*)&fintab[i].fkey, sizeof(void*)); scan((byte*)&fintab[i].val, sizeof(void*)); runtime_unlock(&fintab[i]); } __sync_bool_compare_and_swap(&m->holds_finlock, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) { runtime_throw("walkfintab not called from gc"); } }
// Free n objects from a span s back into the central free list c. // Called during sweep. // Returns true if the span was returned to heap. Sets sweepgen to // the latest generation. bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end) { if(s->incache) runtime_throw("freespan into cached span"); runtime_lock(&c->lock); // Move to nonempty if necessary. if(s->freelist == nil) { runtime_MSpanList_Remove(s); runtime_MSpanList_Insert(&c->nonempty, s); } // Add the objects back to s's free list. end->next = s->freelist; s->freelist = start; s->ref -= n; c->nfree += n; // delay updating sweepgen until here. This is the signal that // the span may be used in an MCache, so it must come after the // linked list operations above (actually, just after the // lock of c above.) runtime_atomicstore(&s->sweepgen, runtime_mheap.sweepgen); if(s->ref != 0) { runtime_unlock(&c->lock); return false; } // s is completely freed, return it to the heap. MCentral_ReturnToHeap(c, s); // unlocks c return true; }
// get finalizer; if del, delete finalizer. // caller is responsible for updating RefHasFinalizer (special) bit. bool runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft) { Fintab *tab; bool res; Fin f; if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1)) runtime_throw("finalizer deadlock"); tab = TAB(p); runtime_lock(tab); res = lookfintab(tab, p, del, &f); runtime_unlock(tab); __sync_bool_compare_and_swap(&m->holds_finlock, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) { __go_run_goroutine_gc(201); } if(res==false) return false; *fn = f.fn; *ft = f.ft; return true; }
// Sweeps spans in list until reclaims at least npages into heap. // Returns the actual number of pages reclaimed. static uintptr MHeap_ReclaimList(MHeap *h, MSpan *list, uintptr npages) { MSpan *s; uintptr n; uint32 sg; n = 0; sg = runtime_mheap.sweepgen; retry: for(s = list->next; s != list; s = s->next) { if(s->sweepgen == sg-2 && runtime_cas(&s->sweepgen, sg-2, sg-1)) { runtime_MSpanList_Remove(s); // swept spans are at the end of the list runtime_MSpanList_InsertBack(list, s); runtime_unlock(h); n += runtime_MSpan_Sweep(s); runtime_lock(h); if(n >= npages) return n; // the span could have been moved elsewhere goto retry; } if(s->sweepgen == sg-1) { // the span is being sweept by background sweeper, skip continue; } // already swept empty span, // all subsequent ones must also be either swept or in process of sweeping break; } return n; }
int64 runtime_tickspersecond(void) { int64 res, t0, t1, c0, c1; res = (int64)runtime_atomicload64((uint64*)&ticks); if(res != 0) return ticks; runtime_lock(&ticksLock); res = ticks; if(res == 0) { t0 = runtime_nanotime(); c0 = runtime_cputicks(); runtime_usleep(100*1000); t1 = runtime_nanotime(); c1 = runtime_cputicks(); if(t1 == t0) t1++; res = (c1-c0)*1000*1000*1000/(t1-t0); if(res == 0) res++; runtime_atomicstore64((uint64*)&ticks, res); } runtime_unlock(&ticksLock); return res; }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero) { MSpan *s; runtime_lock(h); mstats.heap_alloc += (intptr)runtime_m()->mcache->local_cachealloc; runtime_m()->mcache->local_cachealloc = 0; s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(large) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; // Swept spans are at the end of lists. if(s->npages < nelem(h->free)) runtime_MSpanList_InsertBack(&h->busy[s->npages], s); else runtime_MSpanList_InsertBack(&h->busylarge, s); } } runtime_unlock(h); if(s != nil) { if(needzero && s->needzero) runtime_memclr((byte*)(s->start<<PageShift), s->npages<<PageShift); s->needzero = 0; } return s; }
// Called if we receive a SIGPROF signal. void runtime_sigprof() { int32 n; if(prof.fn == nil || prof.hz == 0) return; runtime_lock(&prof); if(prof.fn == nil) { runtime_unlock(&prof); return; } n = runtime_callers(0, prof.pcbuf, nelem(prof.pcbuf)); if(n > 0) prof.fn(prof.pcbuf, n); runtime_unlock(&prof); }
void runtime_freemcache(MCache *c) { runtime_MCache_ReleaseAll(c); runtime_lock(&runtime_mheap); runtime_purgecachedstats(c); runtime_FixAlloc_Free(&runtime_mheap.cachealloc, c); runtime_unlock(&runtime_mheap); }
// get finalizer; if del, delete finalizer. // caller is responsible for updating RefHasFinalizer bit. Finalizer* runtime_getfinalizer(void *p, bool del) { Finalizer *f; runtime_lock(&finlock); f = lookfintab(&fintab, p, del); runtime_unlock(&finlock); return f; }
// Unlock the scheduler. static void schedunlock(void) { M *m; m = mwakeup; mwakeup = nil; runtime_unlock(&runtime_sched); if(m != nil) runtime_notewakeup(&m->havenextg); }
void * __go_allocate_trampoline (uintptr_t size, void *closure) { uintptr_t ptr_size; uintptr_t full_size; unsigned char *ret; /* Because the garbage collector only looks at aligned addresses, we need to store the closure at an aligned address to ensure that it sees it. */ ptr_size = sizeof (void *); full_size = (((size + ptr_size - 1) / ptr_size) * ptr_size); full_size += ptr_size; runtime_lock (&trampoline_lock); if (full_size < trampoline_page_size - trampoline_page_used) trampoline_page = NULL; if (trampoline_page == NULL) { uintptr_t page_size; unsigned char *page; page_size = getpagesize (); __go_assert (page_size >= full_size); page = (unsigned char *) runtime_mallocgc (2 * page_size - 1, 0, 0, 0); page = (unsigned char *) (((uintptr_t) page + page_size - 1) & ~ (page_size - 1)); #ifdef HAVE_SYS_MMAN_H { int i; i = mprotect (page, page_size, PROT_READ | PROT_WRITE | PROT_EXEC); __go_assert (i == 0); } #endif trampoline_page = page; trampoline_page_size = page_size; trampoline_page_used = 0; } ret = trampoline_page + trampoline_page_used; trampoline_page_used += full_size; runtime_unlock (&trampoline_lock); __builtin_memcpy (ret + full_size - ptr_size, &closure, ptr_size); return (void *) ret; }
// Free the list of objects back into the central free list. void runtime_MCentral_FreeList(MCentral *c, MLink *start) { MLink *next; runtime_lock(c); for(; start != nil; start = next) { next = start->next; MCentral_Free(c, start); } runtime_unlock(c); }
int32 runtime_netpollopen(uintptr fd, PollDesc *pd) { byte b; runtime_lock(&selectlock); if((int)fd >= allocated) { int c; PollDesc **n; c = allocated; runtime_unlock(&selectlock); while((int)fd >= c) c *= 2; n = runtime_mallocgc(c * sizeof(PollDesc *), 0, FlagNoScan|FlagNoProfiling|FlagNoInvokeGC); runtime_lock(&selectlock); if(c > allocated) { __builtin_memcpy(n, data, allocated * sizeof(PollDesc *)); allocated = c; data = n; } } FD_SET(fd, &fds); data[fd] = pd; runtime_unlock(&selectlock); b = 0; write(wrwake, &b, sizeof b); return 0; }
bool runtime_addfinalizer(void *p, void (*f)(void*), const struct __go_func_type *ft) { Fintab *tab; byte *base; bool ret = false; if(debug) { if(!runtime_mlookup(p, &base, nil, nil) || p != base) runtime_throw("addfinalizer on invalid pointer"); } if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1)) runtime_throw("finalizer deadlock"); tab = TAB(p); runtime_lock(tab); if(f == nil) { if(lookfintab(tab, p, true, nil)) runtime_setblockspecial(p, false); ret = true; goto unlock; } if(lookfintab(tab, p, false, nil)) { ret = false; goto unlock; } if(tab->nkey >= tab->max/2+tab->max/4) { // keep table at most 3/4 full: // allocate new table and rehash. resizefintab(tab); } addfintab(tab, p, f, ft); runtime_setblockspecial(p, true); ret = true; unlock: runtime_unlock(tab); __sync_bool_compare_and_swap(&m->holds_finlock, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) { __go_run_goroutine_gc(200); } return ret; }
void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)) { void **key; void **ekey; scan((byte*)&fintab, sizeof fintab); runtime_lock(&finlock); key = fintab.key; ekey = key + fintab.max; for(; key < ekey; key++) if(*key != nil && *key != ((void*)-1)) fn(*key); runtime_unlock(&finlock); }
// Return s to the heap. s must be unused (s->ref == 0). Unlocks c. static void MCentral_ReturnToHeap(MCentral *c, MSpan *s) { int32 size; size = runtime_class_to_size[c->sizeclass]; runtime_MSpanList_Remove(s); s->needzero = 1; s->freelist = nil; if(s->ref != 0) runtime_throw("ref wrong"); c->nfree -= (s->npages << PageShift) / size; runtime_unlock(&c->lock); runtime_unmarkspan((byte*)(s->start<<PageShift), s->npages<<PageShift); runtime_MHeap_Free(&runtime_mheap, s, 0); }
// get finalizer; if del, delete finalizer. // caller is responsible for updating RefHasFinalizer (special) bit. bool runtime_getfinalizer(void *p, bool del, FuncVal **fn, const struct __go_func_type **ft) { Fintab *tab; bool res; Fin f; tab = TAB(p); runtime_lock(tab); res = lookfintab(tab, p, del, &f); runtime_unlock(tab); if(res==false) return false; *fn = f.fn; *ft = f.ft; return true; }
int32 runtime_netpollclose(uintptr fd) { byte b; runtime_lock(&selectlock); FD_CLR(fd, &fds); data[fd] = nil; runtime_unlock(&selectlock); b = 0; write(wrwake, &b, sizeof b); return 0; }
// Free n objects back into the central free list. // Return the number of objects allocated. // The objects are linked together by their first words. // On return, *pstart points at the first object and *pend at the last. void runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *start) { MLink *v, *next; // Assume next == nil marks end of list. // n and end would be useful if we implemented // the transfer cache optimization in the TODO above. USED(n); runtime_lock(c); for(v=start; v; v=next) { next = v->next; MCentral_Free(c, v); } runtime_unlock(c); }
// Allocate a new span of npage pages from the heap // and record its size class in the HeapMap and HeapMapCache. MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct) { MSpan *s; runtime_lock(h); runtime_purgecachedstats(m); s = MHeap_AllocLocked(h, npage, sizeclass); if(s != nil) { mstats.heap_inuse += npage<<PageShift; if(acct) { mstats.heap_objects++; mstats.heap_alloc += npage<<PageShift; } } runtime_unlock(h); return s; }
void runtime_dopanic(int32 unused __attribute__ ((unused))) { G *g; static bool didothers; bool crash; int32 t; g = runtime_g(); if(g->sig != 0) runtime_printf("[signal %x code=%p addr=%p]\n", g->sig, (void*)g->sigcode0, (void*)g->sigcode1); if((t = runtime_gotraceback(&crash)) > 0){ if(g != runtime_m()->g0) { runtime_printf("\n"); runtime_goroutineheader(g); runtime_traceback(); runtime_printcreatedby(g); } else if(t >= 2 || runtime_m()->throwing > 0) { runtime_printf("\nruntime stack:\n"); runtime_traceback(); } if(!didothers) { didothers = true; runtime_tracebackothers(g); } } runtime_unlock(&paniclk); if(runtime_xadd(&runtime_panicking, -1) != 0) { // Some other m is panicking too. // Let it print what it needs to print. // Wait forever without chewing up cpu. // It will exit when it's done. static Lock deadlock; runtime_lock(&deadlock); runtime_lock(&deadlock); } if(crash) runtime_crash(); runtime_exit(2); }
void runtime_walkfintab(void (*fn)(void*), void (*addroot)(Obj)) { void **key; void **ekey; int32 i; for(i=0; i<TABSZ; i++) { runtime_lock(&fintab[i]); key = fintab[i].fkey; ekey = key + fintab[i].max; for(; key < ekey; key++) if(*key != nil && *key != ((void*)-1)) fn(*key); addroot((Obj){(byte*)&fintab[i].fkey, sizeof(void*), 0}); addroot((Obj){(byte*)&fintab[i].val, sizeof(void*), 0}); runtime_unlock(&fintab[i]); } }
void runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, int64)) { void **key; void **ekey; int32 i; for(i=0; i<TABSZ; i++) { runtime_lock(&fintab[i]); key = fintab[i].fkey; ekey = key + fintab[i].max; for(; key < ekey; key++) if(*key != nil && *key != ((void*)-1)) fn(*key); scan((byte*)&fintab[i].fkey, sizeof(void*)); scan((byte*)&fintab[i].val, sizeof(void*)); runtime_unlock(&fintab[i]); } }
// get finalizer; if del, delete finalizer. // caller is responsible for updating RefHasFinalizer bit. Finalizer* runtime_getfinalizer(void *p, bool del) { Finalizer *f; if(!__sync_bool_compare_and_swap(&m->holds_finlock, 0, 1)) runtime_throw("finalizer deadlock"); runtime_lock(&finlock); f = lookfintab(&fintab, p, del); runtime_unlock(&finlock); __sync_bool_compare_and_swap(&m->holds_finlock, 1, 0); if(__sync_bool_compare_and_swap(&m->gcing_for_finlock, 1, 0)) { __go_run_goroutine_gc(201); } return f; }