int show_stack() { unsigned long *sp = (unsigned long *)rdusp(); int i; raw_printk("Stack dump [0x%08lx]:\n", (unsigned long)sp); for(i = 0; i < 16; i++) raw_printk("sp + %d: 0x%08lx\n", i*4, sp[i]); return 0; }
void show_trace(unsigned long * stack) { unsigned long addr, module_start, module_end; extern char _stext, _etext; int i; raw_printk("\nCall Trace: "); i = 1; module_start = VMALLOC_START; module_end = VMALLOC_END; while (((long) stack & (THREAD_SIZE-1)) != 0) { if (__get_user (addr, stack)) { /* This message matches "failing address" marked s390 in ksymoops, so lines containing it will not be filtered out by ksymoops. */ raw_printk ("Failing address 0x%lx\n", (unsigned long)stack); break; } stack++; /* * If the address is either in the text segment of the * kernel, or in the region which contains vmalloc'ed * memory, it *may* be the address of a calling * routine; if so, print it so that someone tracing * down the cause of the crash will be able to figure * out the call path that was taken. */ if (((addr >= (unsigned long) &_stext) && (addr <= (unsigned long) &_etext)) || ((addr >= module_start) && (addr <= module_end))) { if (i && ((i % 8) == 0)) raw_printk("\n "); raw_printk("[<%08lx>] ", addr); i++; } } }
void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack, addr; int i; /* * debugging aid: "show_stack(NULL);" prints a * back trace. */ if(sp == NULL) { if (task) sp = (unsigned long*)task->thread.ksp; else sp = (unsigned long*)rdsp(); } stack = sp; raw_printk("\nStack from %08lx:\n ", (unsigned long)stack); for(i = 0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 8) == 0)) raw_printk("\n "); if (__get_user (addr, stack)) { /* This message matches "failing address" marked s390 in ksymoops, so lines containing it will not be filtered out by ksymoops. */ raw_printk ("Failing address 0x%lx\n", (unsigned long)stack); break; } stack++; raw_printk("%08lx ", addr); } show_trace(sp); }
void handle_watchdog_bite(struct pt_regs* regs) { #if defined(CONFIG_ETRAX_WATCHDOG) extern int cause_of_death; raw_printk("Watchdog bite\n"); /* Check if forced restart or unexpected watchdog */ if (cause_of_death == 0xbedead) { while(1); } /* Unexpected watchdog, stop the watchdog and dump registers*/ stop_watchdog(); raw_printk("Oops: bitten by watchdog\n"); show_registers(regs); #ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY reset_watchdog(); #endif while(1) /* nothing */; #endif }
/* This is normally the 'Oops' routine */ void die_if_kernel(const char * str, struct pt_regs * regs, long err) { if(user_mode(regs)) return; #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY /* This printout might take too long and trigger the * watchdog normally. If we're in the nice doggy * development mode, stop the watchdog during printout. */ stop_watchdog(); #endif raw_printk("%s: %04lx\n", str, err & 0xffff); show_registers(regs); #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY reset_watchdog(); #endif do_exit(SIGSEGV); }
void spinning_cpu(void* addr) { raw_printk("CPU %d spinning on %X\n", smp_processor_id(), addr); dump_stack(); }
void oops_nmi_handler(struct pt_regs* regs) { stop_watchdog(); raw_printk("NMI!\n"); show_registers(regs); }
asmlinkage void do_page_fault(unsigned long address, struct pt_regs *regs, int protection, int writeaccess) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct * vma; siginfo_t info; D(printk("Page fault for %lX on %X at %lX, prot %d write %d\n", address, smp_processor_id(), instruction_pointer(regs), protection, writeaccess)); tsk = current; /* * We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. * * NOTE! We MUST NOT take any locks for this case. We may * be in an interrupt or a critical region, and should * only copy the information from the master page table, * nothing more. * * NOTE2: This is done so that, when updating the vmalloc * mappings we don't have to walk all processes pgdirs and * add the high mappings all at once. Instead we do it as they * are used. However vmalloc'ed page entries have the PAGE_GLOBAL * bit set so sometimes the TLB can use a lingering entry. * * This verifies that the fault happens in kernel space * and that the fault was not a protection error (error_code & 1). */ if (address >= VMALLOC_START && !protection && !user_mode(regs)) goto vmalloc_fault; /* When stack execution is not allowed we store the signal * trampolines in the reserved cris_signal_return_page. * Handle this in the exact same way as vmalloc (we know * that the mapping is there and is valid so no need to * call handle_mm_fault). */ if (cris_signal_return_page && address == cris_signal_return_page && !protection && user_mode(regs)) goto vmalloc_fault; /* we can and should enable interrupts at this point */ local_irq_enable(); mm = tsk->mm; info.si_code = SEGV_MAPERR; /* * If we're in an interrupt or have no user * context, we must not take the fault.. */ if (in_atomic() || !mm) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto bad_area; if (vma->vm_start <= address) goto good_area; if (!(vma->vm_flags & VM_GROWSDOWN)) goto bad_area; if (user_mode(regs)) { /* * accessing the stack below usp is always a bug. * we get page-aligned addresses so we can only check * if we're within a page from usp, but that might be * enough to catch brutal errors at least. */ if (address + PAGE_SIZE < rdusp()) goto bad_area; } if (expand_stack(vma, address)) goto bad_area; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ good_area: info.si_code = SEGV_ACCERR; /* first do some preliminary protection checks */ if (writeaccess == 2){ if (!(vma->vm_flags & VM_EXEC)) goto bad_area; } else if (writeaccess == 1) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { if (!(vma->vm_flags & (VM_READ | VM_EXEC))) goto bad_area; } /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ switch (handle_mm_fault(mm, vma, address, writeaccess & 1)) { case VM_FAULT_MINOR: tsk->min_flt++; break; case VM_FAULT_MAJOR: tsk->maj_flt++; break; case VM_FAULT_SIGBUS: goto do_sigbus; default: goto out_of_memory; } up_read(&mm->mmap_sem); return; /* * Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: up_read(&mm->mmap_sem); bad_area_nosemaphore: DPG(show_registers(regs)); /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) { info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ info.si_addr = (void *)address; force_sig_info(SIGSEGV, &info, tsk); return; } no_context: /* Are we prepared to handle this kernel fault? * * (The kernel has valid exception-points in the source * when it acesses user-memory. When it fails in one * of those points, we find it in a table and do a jump * to some fixup code that loads an appropriate error * code) */ if (find_fixup_code(regs)) return; /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ if ((unsigned long) (address) < PAGE_SIZE) raw_printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference"); else raw_printk(KERN_ALERT "Unable to handle kernel access"); raw_printk(" at virtual address %08lx\n",address); die_if_kernel("Oops", regs, (writeaccess << 1) | protection); do_exit(SIGKILL); /* * We ran out of memory, or some other thing happened to us that made * us unable to handle the page fault gracefully. */ out_of_memory: up_read(&mm->mmap_sem); printk("VM: killing process %s\n", tsk->comm); if (user_mode(regs)) do_exit(SIGKILL); goto no_context; do_sigbus: up_read(&mm->mmap_sem); /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRERR; info.si_addr = (void *)address; force_sig_info(SIGBUS, &info, tsk); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) goto no_context; return; vmalloc_fault: { /* * Synchronize this task's top level page-table * with the 'reference' page table. * * Use current_pgd instead of tsk->active_mm->pgd * since the latter might be unavailable if this * code is executed in a misfortunately run irq * (like inside schedule() between switch_mm and * switch_to...). */ int offset = pgd_index(address); pgd_t *pgd, *pgd_k; pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; pte_t *pte_k; pgd = (pgd_t *)per_cpu(current_pgd, smp_processor_id()) + offset; pgd_k = init_mm.pgd + offset; /* Since we're two-level, we don't need to do both * set_pgd and set_pmd (they do the same thing). If * we go three-level at some point, do the right thing * with pgd_present and set_pgd here. * * Also, since the vmalloc area is global, we don't * need to copy individual PTE's, it is enough to * copy the pgd pointer into the pte page of the * root task. If that is there, we'll find our pte if * it exists. */ pud = pud_offset(pgd, address); pud_k = pud_offset(pgd_k, address); if (!pud_present(*pud_k)) goto no_context; pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); if (!pmd_present(*pmd_k)) goto bad_area_nosemaphore; set_pmd(pmd, *pmd_k); /* Make sure the actual PTE exists as well to * catch kernel vmalloc-area accesses to non-mapped * addresses. If we don't do this, this will just * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; return; } }
void show_registers(struct pt_regs * regs) { /* We either use rdusp() - the USP register, which might not correspond to the current process for all cases we're called, or we use the current->thread.usp, which is not up to date for the current process. Experience shows we want the USP register. */ unsigned long usp = rdusp(); raw_printk("IRP: %08lx SRP: %08lx DCCR: %08lx USP: %08lx MOF: %08lx\n", regs->irp, regs->srp, regs->dccr, usp, regs->mof ); raw_printk(" r0: %08lx r1: %08lx r2: %08lx r3: %08lx\n", regs->r0, regs->r1, regs->r2, regs->r3); raw_printk(" r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n", regs->r4, regs->r5, regs->r6, regs->r7); raw_printk(" r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n", regs->r8, regs->r9, regs->r10, regs->r11); raw_printk("r12: %08lx r13: %08lx oR10: %08lx sp: %08lx\n", regs->r12, regs->r13, regs->orig_r10, regs); raw_printk("R_MMU_CAUSE: %08lx\n", (unsigned long)*R_MMU_CAUSE); raw_printk("Process %s (pid: %d, stackpage=%08lx)\n", current->comm, current->pid, (unsigned long)current); /* * When in-kernel, we also print out the stack and code at the * time of the fault.. */ if (! user_mode(regs)) { int i; show_stack(NULL, (unsigned long*)usp); /* Dump kernel stack if the previous dump wasn't one. */ if (usp != 0) show_stack (NULL, NULL); raw_printk("\nCode: "); if(regs->irp < PAGE_OFFSET) goto bad; /* Often enough the value at regs->irp does not point to the interesting instruction, which is most often the _previous_ instruction. So we dump at an offset large enough that instruction decoding should be in sync at the interesting point, but small enough to fit on a row (sort of). We point out the regs->irp location in a ksymoops-friendly way by wrapping the byte for that address in parentheses. */ for(i = -12; i < 12; i++) { unsigned char c; if(__get_user(c, &((unsigned char*)regs->irp)[i])) { bad: raw_printk(" Bad IP value."); break; } if (i == 0) raw_printk("(%02x) ", c); else raw_printk("%02x ", c); } raw_printk("\n"); } }