/* Try to find a symbol table in either MOD->main.elf or MOD->debug.elf. */ static void find_symtab (Dwfl_Module *mod) { if (mod->symdata != NULL /* Already done. */ || mod->symerr != DWFL_E_NOERROR) /* Cached previous failure. */ return; find_file (mod); mod->symerr = mod->elferr; if (mod->symerr != DWFL_E_NOERROR) return; /* First see if the main ELF file has the debugging information. */ Elf_Scn *symscn = NULL, *xndxscn = NULL; GElf_Word strshndx = 0; mod->symerr = load_symtab (&mod->main, &mod->symfile, &symscn, &xndxscn, &mod->syments, &strshndx); switch (mod->symerr) { default: return; case DWFL_E_NOERROR: break; case DWFL_E_NO_SYMTAB: /* Now we have to look for a separate debuginfo file. */ mod->symerr = find_debuginfo (mod); switch (mod->symerr) { default: return; case DWFL_E_NOERROR: mod->symerr = load_symtab (&mod->debug, &mod->symfile, &symscn, &xndxscn, &mod->syments, &strshndx); break; case DWFL_E_CB: /* The find_debuginfo hook failed. */ mod->symerr = DWFL_E_NO_SYMTAB; break; } switch (mod->symerr) { default: return; case DWFL_E_NOERROR: break; case DWFL_E_NO_SYMTAB: if (symscn != NULL) { /* We still have the dynamic symbol table. */ mod->symerr = DWFL_E_NOERROR; break; } /* Last ditch, look for dynamic symbols without section headers. */ find_dynsym (mod); return; } break; } /* This does some sanity checks on the string table section. */ if (elf_strptr (mod->symfile->elf, strshndx, 0) == NULL) { elferr: mod->symerr = DWFL_E (LIBELF, elf_errno ()); return; } /* Cache the data; MOD->syments was set above. */ mod->symstrdata = elf_getdata (elf_getscn (mod->symfile->elf, strshndx), NULL); if (mod->symstrdata == NULL) goto elferr; if (xndxscn == NULL) mod->symxndxdata = NULL; else { mod->symxndxdata = elf_getdata (xndxscn, NULL); if (mod->symxndxdata == NULL) goto elferr; } mod->symdata = elf_getdata (symscn, NULL); if (mod->symdata == NULL) goto elferr; }
unsigned long plthook_entry(unsigned long *ret_addr, unsigned long child_idx, unsigned long module_id, struct mcount_regs *regs) { struct sym *sym; unsigned long child_ip; struct mcount_thread_data *mtdp; struct mcount_ret_stack *rstack; struct ftrace_trigger tr = { .flags = 0, }; bool skip = false; enum filter_result filtered; if (unlikely(mcount_should_stop())) return 0; mtd.recursion_guard = true; mtdp = get_thread_data(); if (unlikely(check_thread_data(mtdp))) { mcount_prepare(); mtdp = get_thread_data(); assert(mtdp); } /* * There was a recursion like below: * * plthook_entry -> mcount_entry -> mcount_prepare -> xmalloc * -> plthook_entry */ if (mtdp->plthook_guard) goto out; if (check_dynsym_idxlist(&skip_idxlist, child_idx)) goto out; sym = find_dynsym(&symtabs, child_idx); pr_dbg3("[%d] enter %lx: %s\n", child_idx, sym->addr, sym->name); child_ip = sym ? sym->addr : 0; if (child_ip == 0) { pr_err_ns("invalid function idx found! (idx: %d, %#lx)\n", (int) child_idx, child_idx); } filtered = mcount_entry_filter_check(mtdp, sym->addr, &tr); if (filtered != FILTER_IN) { /* * Skip recording but still hook the return address, * otherwise it cannot trace further invocations due to * the overwritten PLT entry by the resolver function. */ skip = true; /* but if we don't have rstack, just bail out */ if (filtered == FILTER_RSTACK) goto out; } mtdp->plthook_guard = true; rstack = &mtdp->rstack[mtdp->idx++]; rstack->depth = mtdp->record_idx; rstack->dyn_idx = child_idx; rstack->parent_loc = ret_addr; rstack->parent_ip = *ret_addr; rstack->child_ip = child_ip; rstack->start_time = skip ? 0 : mcount_gettime(); rstack->end_time = 0; rstack->flags = skip ? MCOUNT_FL_NORECORD : 0; mcount_entry_filter_record(mtdp, rstack, &tr, regs); *ret_addr = (unsigned long)plthook_return; if (check_dynsym_idxlist(&setjmp_idxlist, child_idx)) setup_jmpbuf_rstack(mtdp, mtdp->idx - 1); else if (check_dynsym_idxlist(&longjmp_idxlist, child_idx)) rstack->flags |= MCOUNT_FL_LONGJMP; else if (check_dynsym_idxlist(&vfork_idxlist, child_idx)) { rstack->flags |= MCOUNT_FL_VFORK; prepare_vfork(mtdp, rstack); } /* force flush rstack on some special functions */ if (check_dynsym_idxlist(&flush_idxlist, child_idx)) { record_trace_data(mtdp, rstack, NULL); } if (plthook_dynsym_resolved[child_idx]) { volatile unsigned long *resolved_addr = plthook_dynsym_addr + child_idx; /* ensure resolved address was set */ while (!*resolved_addr) cpu_relax(); mtdp->recursion_guard = false; return *resolved_addr; } mtdp->plthook_addr = plthook_got_ptr[3 + child_idx]; out: mtdp->recursion_guard = false; return 0; } unsigned long plthook_exit(long *retval) { int dyn_idx; unsigned long new_addr; struct mcount_thread_data *mtdp; struct mcount_ret_stack *rstack; mtdp = get_thread_data(); assert(mtdp); mtdp->recursion_guard = true; again: rstack = &mtdp->rstack[mtdp->idx - 1]; if (unlikely(rstack->flags & (MCOUNT_FL_LONGJMP | MCOUNT_FL_VFORK))) { if (rstack->flags & MCOUNT_FL_LONGJMP) { restore_jmpbuf_rstack(mtdp, mtdp->idx + 1); rstack->flags &= ~MCOUNT_FL_LONGJMP; goto again; } if (rstack->flags & MCOUNT_FL_VFORK) setup_vfork(mtdp); } if (unlikely(vfork_parent)) rstack = restore_vfork(mtdp, rstack); dyn_idx = rstack->dyn_idx; if (dyn_idx == MCOUNT_INVALID_DYNIDX) pr_err_ns("<%d> invalid dynsym idx: %d\n", mtdp->idx, dyn_idx); pr_dbg3("[%d] exit %lx: %s\n", dyn_idx, plthook_dynsym_addr[dyn_idx], find_dynsym(&symtabs, dyn_idx)->name); if (!(rstack->flags & MCOUNT_FL_NORECORD)) rstack->end_time = mcount_gettime(); mcount_exit_filter_record(mtdp, rstack, retval); if (!plthook_dynsym_resolved[dyn_idx]) { #ifndef SINGLE_THREAD static pthread_mutex_t resolver_mutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_lock(&resolver_mutex); #endif if (!plthook_dynsym_resolved[dyn_idx]) { new_addr = plthook_got_ptr[3 + dyn_idx]; /* restore GOT so plt_hooker keep called */ plthook_got_ptr[3 + dyn_idx] = mtdp->plthook_addr; plthook_dynsym_addr[dyn_idx] = new_addr; plthook_dynsym_resolved[dyn_idx] = true; } #ifndef SINGLE_THREAD pthread_mutex_unlock(&resolver_mutex); #endif } compiler_barrier(); mtdp->idx--; mtdp->recursion_guard = false; mtdp->plthook_guard = false; return rstack->parent_ip; }