runtime·findfunc(uintptr addr) { Func *f; int32 nf, n; // Use atomic double-checked locking, // because when called from pprof signal // handler, findfunc must run without // grabbing any locks. // (Before enabling the signal handler, // SetCPUProfileRate calls findfunc to trigger // the initialization outside the handler.) // Avoid deadlock on fault during malloc // by not calling buildfuncs if we're already in malloc. if(!m->mallocing && !m->gcing) { if(runtime·atomicload(&funcinit) == 0) { runtime·lock(&funclock); if(funcinit == 0) { buildfuncs(); runtime·atomicstore(&funcinit, 1); } runtime·unlock(&funclock); } } if(nfunc == 0) return nil; if(addr < func[0].entry || addr >= func[nfunc].entry) return nil; // binary search to find func with entry <= addr. f = func; nf = nfunc; while(nf > 0) { n = nf/2; if(f[n].entry <= addr && addr < f[n+1].entry) return &f[n]; else if(addr < f[n].entry) nf = n; else { f += n+1; nf -= n+1; } } // can't get here -- we already checked above // that the address was in the table bounds. // this can only happen if the table isn't sorted // by address or if the binary search above is buggy. runtime·prints("findfunc unreachable\n"); return nil; }
Func* findfunc(uintptr addr) { Func *f; int32 nf, n; lock(&funclock); if(func == nil) buildfuncs(); unlock(&funclock); if(nfunc == 0) return nil; if(addr < func[0].entry || addr >= func[nfunc].entry) return nil; // binary search to find func with entry <= addr. f = func; nf = nfunc; while(nf > 0) { n = nf/2; if(f[n].entry <= addr && addr < f[n+1].entry) return &f[n]; else if(addr < f[n].entry) nf = n; else { f += n+1; nf -= n+1; } } // can't get here -- we already checked above // that the address was in the table bounds. // this can only happen if the table isn't sorted // by address or if the binary search above is buggy. prints("findfunc unreachable\n"); return nil; }