int lastpc_platform_reg_dump(char *buf) { /* Get core numbers */ int cnt = num_possible_cpus(); char *ptr = buf; unsigned long pc_value; unsigned long fp_value; unsigned long sp_value; unsigned long size = 0; unsigned long offset = 0; char str[KSYM_SYMBOL_LEN]; int i; int cluster; int cpu_in_cluster; int mcu_base; #ifdef CONFIG_OF mcu_base=(unsigned int )MCUSYS_CFGREG_BASE; mcu_base = mcu_base + 0x410; #else mcu_base=reg_dump_driver_data.mcu_regs; #endif #if 0 if(cnt < 0) return ret; #endif #ifdef CONFIG_64BIT /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { cluster = i / 4; cpu_in_cluster = i % 4; pc_value = readl(IOMEM((mcu_base+0x0) + (cpu_in_cluster << 5) + (0x100 * cluster))); fp_value = readl(IOMEM((mcu_base+0x10) + (cpu_in_cluster << 5) + (0x100 * cluster))); sp_value = readl(IOMEM((mcu_base+0x18) + (cpu_in_cluster << 5) + (0x100 * cluster))); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "[LAST PC] CORE_%d PC = 0x%lx(%s + 0x%lx), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, offset, fp_value, sp_value); printk("[LAST PC] CORE_%d PC = 0x%lx(%s), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, fp_value, sp_value); } #else printk("[LAST PC] mcu_base 0x%x,cnt 0x%x\n",mcu_base,cnt); /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { cluster = i / 4; cpu_in_cluster = i % 4; pc_value = readl(IOMEM((mcu_base+0x0) + (cpu_in_cluster << 5) + (0x100 * cluster))); fp_value = readl(IOMEM((mcu_base+0x8) + (cpu_in_cluster << 5) + (0x100 * cluster))); sp_value = readl(IOMEM((mcu_base+0xc) + (cpu_in_cluster << 5) + (0x100 * cluster))); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "[LAST PC] CORE_%d PC = 0x%lx(%s + 0x%lx), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, offset, fp_value, sp_value); printk("[LAST PC] CORE_%d PC = 0x%lx(%s), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, fp_value, sp_value); } #endif return 0; }
static int lastpc_plt_dump(struct lastpc_plt *plt, char *buf, int len) { void __iomem *mcu_base = plt->common->base + 0x300; int ret = -1, cnt = num_possible_cpus(); char *ptr = buf; unsigned long pc_value; unsigned long fp_value; unsigned long sp_value; unsigned long size = 0; unsigned long offset = 0; char str[KSYM_SYMBOL_LEN]; int i; if(cnt < 0) return ret; /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { pc_value = readl(IOMEM(mcu_base+(i*0x10))); fp_value = readl(IOMEM(mcu_base+0x4+(i*0x10))); sp_value = readl(IOMEM(mcu_base+0x8+(i*0x10))); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "[LAST PC] CORE_%d PC = 0x%lx(%s + 0x%lx), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, offset, fp_value, sp_value); pr_notice("[LAST PC] CORE_%d PC = 0x%lx(%s), FP = 0x%lx, SP = 0x%lx\n", i, pc_value, str, fp_value, sp_value); } return 0; }
int reg_dump_platform(char *buf) { /* Get core numbers */ int ret = -1, cnt = num_possible_cpus(); char *ptr = buf; unsigned int pc_value; unsigned int fp_value; unsigned int sp_value; unsigned long size = 0; unsigned long offset = 0; char str[KSYM_SYMBOL_LEN]; int i; int cluster; int cpu_in_cluster; if(cnt < 0) return ret; /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { cluster = i / 4; cpu_in_cluster = i % 4; pc_value = readl(IOMEM((reg_dump_driver_data.mcu_regs+0x0) + (cpu_in_cluster * 12) + (0x100 * cluster))); fp_value = readl(IOMEM((reg_dump_driver_data.mcu_regs+0x4) + (cpu_in_cluster * 12) + (0x100 * cluster))); sp_value = readl(IOMEM((reg_dump_driver_data.mcu_regs+0x8) + (cpu_in_cluster * 12) + (0x100 * cluster))); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "CORE_%d PC = 0x%x(%s + 0x%lx), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, offset, fp_value, sp_value); //printk("CORE_%d PC = 0x%x(%s), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, fp_value, sp_value); } return 0; }
/* Need to know about CPUs going up/down? */ int __ref register_cpu_notifier(struct notifier_block *nb) { int ret; #ifdef MTK_CPU_HOTPLUG_DEBUG static int index = 0; #ifdef CONFIG_KALLSYMS char namebuf[128] = {0}; const char *symname; symname = kallsyms_lookup((unsigned long)nb->notifier_call, NULL, NULL, NULL, namebuf); if (symname) printk("[cpu_ntf] <%02d>%08lx (%s)\n", index++, (unsigned long)nb->notifier_call, symname); else printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call); #else //#ifdef CONFIG_KALLSYMS printk("[cpu_ntf] <%02d>%08lx\n", index++, (unsigned long)nb->notifier_call); #endif //#ifdef CONFIG_KALLSYMS #endif //#ifdef MTK_CPU_HOTPLUG_DEBUG cpu_maps_update_begin(); ret = raw_notifier_chain_register(&cpu_chain, nb); cpu_maps_update_done(); return ret; }
static int __init frame_info_init(void) { int i; #ifdef CONFIG_KALLSYMS char *modname; char namebuf[KSYM_NAME_LEN + 1]; unsigned long start, size, ofs; extern char __sched_text_start[], __sched_text_end[]; extern char __lock_text_start[], __lock_text_end[]; start = (unsigned long)__sched_text_start; for (i = 0; i < ARRAY_SIZE(mfinfo); i++) { if (start == (unsigned long)schedule) schedule_frame = &mfinfo[i]; if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf)) break; mfinfo[i].func = (void *)(start + ofs); mfinfo[i].func_size = size; start += size - ofs; if (start >= (unsigned long)__lock_text_end) break; if (start == (unsigned long)__sched_text_end) start = (unsigned long)__lock_text_start; } #else mfinfo[0].func = schedule; schedule_frame = &mfinfo[0]; #endif for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++) get_frame_info(&mfinfo[i]); mfinfo_num = i; return 0; }
void check_memleak(const void *startAddr, const void *endAddr) { int i; struct kma_caller *c; for (i = 0; i < MAX_CALLER_TABLE; i++) { if(kma_caller[i].frees != kma_caller[i].allocs) { if( (void *)startAddr <= (void*)(kma_caller[i].caller) && (void *)(kma_caller[i].caller) <= (void *)endAddr) { #ifdef CONFIG_KALLSYMS char *modname; const char *name; unsigned long offset = 0, size; char namebuf[128]; c = &kma_caller[i]; name = kallsyms_lookup((int)c->caller, &size, &offset, &modname, namebuf); printk("%8ld %8ld %8ld %5d/%-5d %s+0x%lx\n", c->total, c->slack, c->net, c->allocs, c->frees, name, offset); #else c = &kma_caller[n]; printk("%8d %8d %8d %5d/%-5d %p\n", c->total, c->slack, c->net, c->allocs, c->frees, c->caller); #endif } } } }
/* * This method wraps the backtracer's more generic support. * It is only invoked from the architecture-specific code; show_stack() * and dump_stack() (in entry.S) are architecture-independent entry points. */ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) { int i; if (headers) { /* * Add a blank line since if we are called from panic(), * then bust_spinlocks() spit out a space in front of us * and it will mess up our KERN_ERR. */ pr_err("\n"); pr_err("Starting stack dump of tid %d, pid %d (%s)" " on cpu %d at cycle %lld\n", kbt->task->pid, kbt->task->tgid, kbt->task->comm, smp_processor_id(), get_cycles()); } kbt->verbose = 1; i = 0; for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { char *modname; const char *name; unsigned long address = kbt->it.pc; unsigned long offset, size; char namebuf[KSYM_NAME_LEN+100]; if (address >= PAGE_OFFSET) name = kallsyms_lookup(address, &size, &offset, &modname, namebuf); else name = NULL; if (!name) namebuf[0] = '\0'; else { size_t namelen = strlen(namebuf); size_t remaining = (sizeof(namebuf) - 1) - namelen; char *p = namebuf + namelen; int rc = snprintf(p, remaining, "+%#lx/%#lx ", offset, size); if (modname && rc < remaining) snprintf(p + rc, remaining - rc, "[%s] ", modname); namebuf[sizeof(namebuf)-1] = '\0'; } pr_err(" frame %d: 0x%lx %s(sp 0x%lx)\n", i++, address, namebuf, (unsigned long)(kbt->it.sp)); if (i >= 100) { pr_err("Stack dump truncated" " (%d frames)\n", i); break; } } if (kbt->end == KBT_LOOP) pr_err("Stack dump stopped; next frame identical to this one\n"); if (headers) pr_err("Stack dump complete\n"); }
static struct syscall_metadata *find_syscall_meta(unsigned long syscall) { struct syscall_metadata *start; struct syscall_metadata *stop; char str[KSYM_SYMBOL_LEN]; start = (struct syscall_metadata *)__start_syscalls_metadata; stop = (struct syscall_metadata *)__stop_syscalls_metadata; kallsyms_lookup(syscall, NULL, NULL, NULL, str); for ( ; start < stop; start++) { if (start->name && !strcmp(start->name + 3, str + 3)) return start; } return NULL; }
int printk_address(unsigned long address) { unsigned long offset = 0, symsize; const char *symname; char *modname; char *delim = ":"; char namebuf[128]; symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); if (!symname) return printk("[<%016lx>]", address); if (!modname) modname = delim = ""; return printk("<%016lx>{%s%s%s%s%+ld}", address,delim,modname,delim,symname,offset); }
int reg_dump_platform(char *buf) { /* Get core numbers */ int ret = -1, cnt = num_possible_cpus(); char *ptr = buf; unsigned int pc_value; unsigned int pc_i1_value; unsigned int fp_value; unsigned int sp_value; unsigned long size = 0; unsigned long offset = 0; char str[KSYM_SYMBOL_LEN]; int i; int cluster, cpu_in_cluster; if(cnt < 0) return ret; /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { cluster = i / 4; cpu_in_cluster = i % 4; if(cluster == 0) { writel(LASTPC + i, MUX_CONTOL_CA7_REG); pc_value = readl(MUX_READ_CA7_REG); writel(LASTSP + i, MUX_CONTOL_CA7_REG); sp_value = readl(MUX_READ_CA7_REG); writel(LASTFP + i, MUX_CONTOL_CA7_REG); fp_value = readl(MUX_READ_CA7_REG); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "CORE_%d PC = 0x%x(%s + 0x%lx), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, offset, fp_value, sp_value); } else{ writel(LASTPC_MAGIC_NUM[cpu_in_cluster], MUX_CONTOL_CA17_REG); pc_value = readl(MUX_READ_CA17_REG); writel(LASTPC_MAGIC_NUM[cpu_in_cluster] + 1, MUX_CONTOL_CA17_REG); pc_i1_value = readl(MUX_READ_CA17_REG); ptr += sprintf(ptr, "CORE_%d PC_i0 = 0x%x, PC_i1 = 0x%x\n", i, pc_value, pc_i1_value); } } //printk("CORE_%d PC = 0x%x(%s), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, fp_value, sp_value); return 0; }
/*decode the cookie into name of kernel application It is based on logic of __print_symbol() function in kallsyms.c file. Store the symbol name corresponding to address in buffer */ static void aop_decode_cookie_kernel_symbol(char *buffer, int buf_len, unsigned long address, unsigned long *offset) { char *modname; const char *name; unsigned long size; char namebuf[KSYM_NAME_LEN + 1]; /* validate the buffer length */ if (buf_len < (AOP_MAX_SYM_LEN)) { aop_printk("insufficient length buf_len= %d\n", buf_len); snprintf(buffer, AOP_MAX_SYM_LEN, "0x%lx", address); return; } *offset = 0; /* get the symbol name, module name etc for given kernel address */ name = kallsyms_lookup(address, &size, offset, &modname, namebuf); if (!name) /* we require only 11 bytes for storing this, safely assume that buf_len is sufficiently more than 11, considering KSYM_NAME_LEN is itself 127 (normally) and we already checked above */ snprintf(buffer, AOP_MAX_SYM_LEN, "0x%lx", address); else { /* Note: the precision values are hard-coded here, any change in the related macro should be corrected here also. */ if (modname) snprintf(buffer, AOP_MAX_SYM_LEN, "[%.10s] %.22s", modname, name); else snprintf(buffer, AOP_MAX_SYM_LEN, "%.32s", name); } /* On overflow of buffer, it would have already corrupted memory, but anyway this check may be helpful. BTW, it should never overflow because we already handled that in the beginning. */ WARN_ON(strlen(buffer) >= buf_len); }
/* used by show_backtrace() */ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, unsigned long pc, unsigned long ra) { unsigned long stack_page; struct mips_frame_info info; char *modname; char namebuf[KSYM_NAME_LEN + 1]; unsigned long size, ofs; int leaf; stack_page = (unsigned long)task_stack_page(task); if (!stack_page) return 0; if (!kallsyms_lookup(pc, &size, &ofs, &modname, namebuf)) return 0; if (ofs == 0) return 0; info.func = (void *)(pc - ofs); info.func_size = ofs; /* analyze from start to ofs */ leaf = get_frame_info(&info); if (leaf < 0) return 0; if (*sp < stack_page || *sp + info.frame_size > stack_page + THREAD_SIZE - 32) return 0; if (leaf) /* * For some extreme cases, get_frame_info() can * consider wrongly a nested function as a leaf * one. In that cases avoid to return always the * same value. */ pc = pc != ra ? ra : 0; else pc = ((unsigned long *)(*sp))[info.pc_offset]; *sp += info.frame_size; return __kernel_text_address(pc) ? pc : 0; }
void set_idtvec_handler(unsigned int vector, idtvec_handler_t handler) { char namebuf[KSYM_NAME_LEN+1]; unsigned long symsize, offset; unsigned long irqstate; ASSERT(vector < NUM_IDT_ENTRIES); if (handler != &do_unhandled_idt_vector) { printk(KERN_DEBUG "IDT Vector %3u -> %s()\n", vector, kallsyms_lookup( (unsigned long)handler, &symsize, &offset, namebuf ) ); } spin_lock_irqsave(&idtvec_table_lock, irqstate); idtvec_table[vector] = handler; spin_unlock_irqrestore(&idtvec_table_lock, irqstate); }
static int __init frame_info_init(void) { int i; #ifdef CONFIG_KALLSYMS char *modname; char namebuf[KSYM_NAME_LEN + 1]; unsigned long start, size, ofs; extern char __sched_text_start[], __sched_text_end[]; extern char __lock_text_start[], __lock_text_end[]; start = (unsigned long)__sched_text_start; for (i = 0; i < ARRAY_SIZE(mfinfo); i++) { if (start == (unsigned long)schedule) schedule_frame = &mfinfo[i]; if (!kallsyms_lookup(start, &size, &ofs, &modname, namebuf)) break; mfinfo[i].func = (void *)(start + ofs); mfinfo[i].func_size = size; start += size - ofs; if (start >= (unsigned long)__lock_text_end) break; if (start == (unsigned long)__sched_text_end) start = (unsigned long)__lock_text_start; } #else mfinfo[0].func = schedule; schedule_frame = &mfinfo[0]; #endif for (i = 0; i < ARRAY_SIZE(mfinfo) && mfinfo[i].func; i++) get_frame_info(mfinfo + i); /* * Without schedule() frame info, result given by * thread_saved_pc() and get_wchan() are not reliable. */ if (schedule_frame->pc_offset < 0) printk("Can't analyze schedule() prologue at %p\n", schedule); mfinfo_num = i; return 0; }
static __init struct syscall_metadata * find_syscall_meta(unsigned long syscall) { struct syscall_metadata **start; struct syscall_metadata **stop; char str[KSYM_SYMBOL_LEN]; start = __start_syscalls_metadata; stop = __stop_syscalls_metadata; kallsyms_lookup(syscall, NULL, NULL, NULL, str); if (arch_syscall_match_sym_name(str, "sys_ni_syscall")) return NULL; for ( ; start < stop; start++) { if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name)) return *start; } return NULL; }
/* Look up a kernel symbol and return it in a text buffer. */ static __notrace int __sprint_symbol(char *buffer, unsigned long address, int symbol_offset) { const char *name; unsigned long offset, size; int len; address += symbol_offset; name = kallsyms_lookup(address, &size, &offset, buffer); if (!name) return vmm_sprintf(buffer, "0x%lx", address); if (name != buffer) strcpy(buffer, name); len = strlen(buffer); buffer += len; offset -= symbol_offset; len += vmm_sprintf(buffer, "+%#lx/%#lx", offset, size); return len; }
static int ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, struct ftrace_probe_ops *ops, void *data) { char str[KSYM_SYMBOL_LEN]; long count = (long)data; kallsyms_lookup(ip, NULL, NULL, NULL, str); seq_printf(m, "%s:", str); if (ops == &traceon_probe_ops) seq_printf(m, "traceon"); else seq_printf(m, "traceoff"); if (count == -1) seq_printf(m, ":unlimited\n"); else seq_printf(m, ":count=%ld\n", count); return 0; }
/* Check if paddr is at an instruction boundary */ static int __kprobes can_probe(unsigned long paddr) { int ret; unsigned long addr, offset = 0; struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf)) return 0; /* Decode instructions */ addr = paddr - offset; while (addr < paddr) { kernel_insn_init(&insn, (void *)addr); insn_get_opcode(&insn); /* * Check if the instruction has been modified by another * kprobe, in which case we replace the breakpoint by the * original instruction in our buffer. */ if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) { ret = recover_probed_instruction(buf, addr); if (ret) /* * Another debugging subsystem might insert * this breakpoint. In that case, we can't * recover it. */ return 0; kernel_insn_init(&insn, buf); } insn_get_length(&insn); addr += insn.length; } return (addr == paddr); }
static void dis_printaddr_flg(bfd_vma addr, disassemble_info *dip, int flag) { unsigned long sym_offset; char * sym_name = kzalloc(256, GFP_KERNEL); int spaces = 5; /* * Print a symbol name or address as necessary. */ kallsyms_lookup(addr, NULL, &sym_offset, NULL, sym_name); if (sym_name[0] != '\0') { dip->fprintf_func(dip->stream,"0x%0*lx %s",(int)(2*sizeof(addr)), addr, sym_name); if (sym_offset == 0) { spaces += 4; } else { unsigned long o = sym_offset; while (o >>= 4) --spaces; dip->fprintf_func(dip->stream,"+0x%lx", sym_offset); } } else {
static inline int print_reset_handler(char *buf, int i, ifx_rcu_handler_t *handler) { int len = 0; const char *name = NULL; #if defined(CONFIG_KALLSYMS) unsigned long offset, size; char *modname; char namebuf[KSYM_NAME_LEN+1]; #endif #if defined(CONFIG_KALLSYMS) name = kallsyms_lookup((unsigned long)handler->fn, &size, &offset, &modname, namebuf); #endif len += sprintf(buf + len, " %d. next - %#08x\n", i, (unsigned int)handler->next); if ( name != NULL ) len += sprintf(buf + len, " fn - %s (%#08x)\n", name, (unsigned int)handler->fn); else len += sprintf(buf + len, " fn - %#08x\n", (unsigned int)handler->fn); len += sprintf(buf + len, " arg - %#08lx\n", handler->arg); len += sprintf(buf + len, " module - %s\n", g_rcu_module_name[handler->module_id]); return len; }
/* Look up a kernel symbol and return it in a text buffer. */ int sprint_symbol(char *buffer, unsigned long address) { char *modname; const char *name; unsigned long offset, size; int len; name = kallsyms_lookup(address, &size, &offset, &modname, buffer); if (!name) return sprintf(buffer, "0x%lx", address); if (name != buffer) strcpy(buffer, name); len = strlen(buffer); buffer += len; if (modname) len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname); else len += sprintf(buffer, "+%#lx/%#lx", offset, size); return len; }
int mt_reg_dump(char *buf) { /* Get core numbers */ int ret = -1, cnt = num_possible_cpus(); char *ptr = buf; unsigned int pc_value; unsigned int fp_value; unsigned int sp_value; unsigned long size = 0; unsigned long offset = 0; char str[KSYM_SYMBOL_LEN]; int i; if(cnt < 0) return ret; if(is_reg_dump_device_registered) { #if !defined(CONFIG_ARCH_MT6572) && !defined(CONFIG_ARCH_MT6572) /* Get PC, FP, SP and save to buf */ for (i = 0; i < cnt; i++) { pc_value = readl(IOMEM(mcu_reg_base + (i << 4))); fp_value = readl(IOMEM((mcu_reg_base+0x4) + (i << 4))); sp_value = readl(IOMEM((mcu_reg_base+0x8) + (i << 4))); kallsyms_lookup((unsigned long)pc_value, &size, &offset, NULL, str); ptr += sprintf(ptr, "CORE_%d PC = 0x%x(%s + 0x%lx), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, offset, fp_value, sp_value); //printk("CORE_%d PC = 0x%x(%s), FP = 0x%x, SP = 0x%x\n", i, pc_value, str, fp_value, sp_value); } #endif reg_dump_platform(ptr); return 0; } return 1; }
static int as_show(struct seq_file *m, void *p) { int n = (int)p-1; struct kma_caller *c; #ifdef CONFIG_KALLSYMS char *modname; const char *name; unsigned long offset = 0, size; char namebuf[128]; c = &kma_caller[n]; name = kallsyms_lookup((int)c->caller, &size, &offset, &modname, namebuf); seq_printf(m, "%8d %8d %8d %5d/%-5d %s+0x%lx\n", c->total, c->slack, c->net, c->allocs, c->frees, name, offset); #else c = &kma_caller[n]; seq_printf(m, "%8d %8d %8d %5d/%-5d %p\n", c->total, c->slack, c->net, c->allocs, c->frees, c->caller); #endif return 0; }
/* * kdbnearsym - Return the name of the symbol with the nearest address * less than 'addr'. * * Parameters: * addr Address to check for symbol near * symtab Structure to receive results * Returns: * 0 No sections contain this address, symtab zero filled * 1 Address mapped to module/symbol/section, data in symtab * Remarks: * 2.6 kallsyms has a "feature" where it unpacks the name into a * string. If that string is reused before the caller expects it * then the caller sees its string change without warning. To * avoid cluttering up the main kdb code with lots of kdb_strdup, * tests and kfree calls, kdbnearsym maintains an LRU list of the * last few unique strings. The list is sized large enough to * hold active strings, no kdb caller of kdbnearsym makes more * than ~20 later calls before using a saved value. */ int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) { int ret = 0; unsigned long symbolsize = 0; unsigned long offset = 0; #define knt1_size 128 /* must be >= kallsyms table size */ char *knt1 = NULL; if (KDB_DEBUG(AR)) kdb_printf("kdbnearsym: addr=0x%lx, symtab=%p\n", addr, symtab); memset(symtab, 0, sizeof(*symtab)); if (addr < 4096) goto out; knt1 = debug_kmalloc(knt1_size, GFP_ATOMIC); if (!knt1) { kdb_printf("kdbnearsym: addr=0x%lx cannot kmalloc knt1\n", addr); goto out; } symtab->sym_name = kallsyms_lookup(addr, &symbolsize , &offset, (char **)(&symtab->mod_name), knt1); if (offset > 8*1024*1024) { symtab->sym_name = NULL; addr = offset = symbolsize = 0; } symtab->sym_start = addr - offset; symtab->sym_end = symtab->sym_start + symbolsize; ret = symtab->sym_name != NULL && *(symtab->sym_name) != '\0'; if (ret) { int i; /* Another 2.6 kallsyms "feature". Sometimes the sym_name is * set but the buffer passed into kallsyms_lookup is not used, * so it contains garbage. The caller has to work out which * buffer needs to be saved. * * What was Rusty smoking when he wrote that code? */ if (symtab->sym_name != knt1) { strncpy(knt1, symtab->sym_name, knt1_size); knt1[knt1_size-1] = '\0'; } for (i = 0; i < ARRAY_SIZE(kdb_name_table); ++i) { if (kdb_name_table[i] && strcmp(kdb_name_table[i], knt1) == 0) break; } if (i >= ARRAY_SIZE(kdb_name_table)) { debug_kfree(kdb_name_table[0]); memcpy(kdb_name_table, kdb_name_table+1, sizeof(kdb_name_table[0]) * (ARRAY_SIZE(kdb_name_table)-1)); } else { debug_kfree(knt1); knt1 = kdb_name_table[i]; memcpy(kdb_name_table+i, kdb_name_table+i+1, sizeof(kdb_name_table[0]) * (ARRAY_SIZE(kdb_name_table)-i-1)); } i = ARRAY_SIZE(kdb_name_table) - 1; kdb_name_table[i] = knt1; symtab->sym_name = kdb_name_table[i]; knt1 = NULL; } if (symtab->mod_name == NULL) symtab->mod_name = "kernel"; if (KDB_DEBUG(AR)) kdb_printf("kdbnearsym: returns %d symtab->sym_start=0x%lx, " "symtab->mod_name=%p, symtab->sym_name=%p (%s)\n", ret, symtab->sym_start, symtab->mod_name, symtab->sym_name, symtab->sym_name); out: debug_kfree(knt1); return ret; }
void decode_address(char *buf, unsigned long address) { struct task_struct *p; struct mm_struct *mm; unsigned long flags, offset; unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); struct rb_node *n; #ifdef CONFIG_KALLSYMS unsigned long symsize; const char *symname; char *modname; char *delim = ":"; char namebuf[128]; #endif buf += sprintf(buf, "<0x%08lx> ", address); #ifdef CONFIG_KALLSYMS symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); if (symname) { if (!modname) modname = delim = ""; sprintf(buf, "{ %s%s%s%s + 0x%lx }", delim, modname, delim, symname, (unsigned long)offset); return; } #endif if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { strcat(buf, "/* Maybe fixed code section */"); return; } else if (address < CONFIG_BOOT_LOAD) { strcat(buf, "/* Maybe null pointer? */"); return; } else if (address >= COREMMR_BASE) { strcat(buf, "/* core mmrs */"); return; } else if (address >= SYSMMR_BASE) { strcat(buf, "/* system mmrs */"); return; } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { strcat(buf, "/* on-chip L1 ROM */"); return; } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) { strcat(buf, "/* on-chip scratchpad */"); return; } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) { strcat(buf, "/* unconnected memory */"); return; } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) { strcat(buf, "/* reserved memory */"); return; } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) { strcat(buf, "/* on-chip Data Bank A */"); return; } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) { strcat(buf, "/* on-chip Data Bank B */"); return; } if (oops_in_progress) { strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); return; } write_lock_irqsave(&tasklist_lock, flags); for_each_process(p) { mm = (in_atomic ? p->mm : get_task_mm(p)); if (!mm) continue; if (!down_read_trylock(&mm->mmap_sem)) { if (!in_atomic) mmput(mm); continue; } for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { struct vm_area_struct *vma; vma = rb_entry(n, struct vm_area_struct, vm_rb); if (address >= vma->vm_start && address < vma->vm_end) { char _tmpbuf[256]; char *name = p->comm; struct file *file = vma->vm_file; if (file) { char *d_name = d_path(&file->f_path, _tmpbuf, sizeof(_tmpbuf)); if (!IS_ERR(d_name)) name = d_name; } if ((unsigned long)current >= FIXED_CODE_START && !((unsigned long)current & 0x3)) { if (current->mm && (address > current->mm->start_code) && (address < current->mm->end_code)) offset = address - current->mm->start_code; else offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); sprintf(buf, "[ %s + 0x%lx ]", name, offset); } else sprintf(buf, "[ %s vma:0x%lx-0x%lx]", name, vma->vm_start, vma->vm_end); up_read(&mm->mmap_sem); if (!in_atomic) mmput(mm); if (buf[0] == '\0') sprintf(buf, "[ %s ] dynamic memory", name); goto done; } } up_read(&mm->mmap_sem); if (!in_atomic) mmput(mm); } sprintf(buf, "/* kernel dynamic memory */"); done: write_unlock_irqrestore(&tasklist_lock, flags); }
static void decode_address(char *buf, unsigned long address) { struct vm_list_struct *vml; struct task_struct *p; struct mm_struct *mm; unsigned long flags, offset; unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); #ifdef CONFIG_KALLSYMS unsigned long symsize; const char *symname; char *modname; char *delim = ":"; char namebuf[128]; /* look up the address and see if we are in kernel space */ symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); if (symname) { /* yeah! kernel space! */ if (!modname) modname = delim = ""; sprintf(buf, "<0x%p> { %s%s%s%s + 0x%lx }", (void *)address, delim, modname, delim, symname, (unsigned long)offset); return; } #endif /* Problem in fixed code section? */ if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { sprintf(buf, "<0x%p> /* Maybe fixed code section */", (void *)address); return; } /* Problem somewhere before the kernel start address */ if (address < CONFIG_BOOT_LOAD) { sprintf(buf, "<0x%p> /* Maybe null pointer? */", (void *)address); return; } /* looks like we're off in user-land, so let's walk all the * mappings of all our processes and see if we can't be a whee * bit more specific */ write_lock_irqsave(&tasklist_lock, flags); for_each_process(p) { mm = (in_atomic ? p->mm : get_task_mm(p)); if (!mm) continue; vml = mm->context.vmlist; while (vml) { struct vm_area_struct *vma = vml->vma; if (address >= vma->vm_start && address < vma->vm_end) { char _tmpbuf[256]; char *name = p->comm; struct file *file = vma->vm_file; if (file) name = d_path(&file->f_path, _tmpbuf, sizeof(_tmpbuf)); /* FLAT does not have its text aligned to the start of * the map while FDPIC ELF does ... */ /* before we can check flat/fdpic, we need to * make sure current is valid */ if ((unsigned long)current >= FIXED_CODE_START && !((unsigned long)current & 0x3)) { if (current->mm && (address > current->mm->start_code) && (address < current->mm->end_code)) offset = address - current->mm->start_code; else offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); sprintf(buf, "<0x%p> [ %s + 0x%lx ]", (void *)address, name, offset); } else sprintf(buf, "<0x%p> [ %s vma:0x%lx-0x%lx]", (void *)address, name, vma->vm_start, vma->vm_end); if (!in_atomic) mmput(mm); if (!strlen(buf)) sprintf(buf, "<0x%p> [ %s ] dynamic memory", (void *)address, name); goto done; } vml = vml->next; } if (!in_atomic) mmput(mm); } /* we were unable to find this address anywhere */ sprintf(buf, "<0x%p> /* kernel dynamic memory */", (void *)address); done: write_unlock_irqrestore(&tasklist_lock, flags); }