static enum stack_type analyze_stack(int cpu, struct task_struct *task, unsigned long *stack, unsigned long **stack_end, unsigned long *irq_stack, unsigned *used, char **id) { unsigned long addr; addr = ((unsigned long)stack & (~(THREAD_SIZE - 1))); if ((unsigned long)task_stack_page(task) == addr) return STACK_IS_NORMAL; *stack_end = in_exception_stack(cpu, (unsigned long)stack, used, id); if (*stack_end) return STACK_IS_EXCEPTION; if (!irq_stack) return STACK_IS_NORMAL; *stack_end = irq_stack; irq_stack = irq_stack - irq_stack_size; if (in_irq_stack(stack, irq_stack, *stack_end)) return STACK_IS_IRQ; return STACK_IS_UNKNOWN; }
int get_stack_info(unsigned long *stack, struct task_struct *task, struct stack_info *info, unsigned long *visit_mask) { if (!stack) goto unknown; task = task ? : current; if (in_task_stack(stack, task, info)) goto recursion_check; if (task != current) goto unknown; if (in_exception_stack(stack, info)) goto recursion_check; if (in_irq_stack(stack, info)) goto recursion_check; if (in_sysenter_stack(stack, info)) goto recursion_check; goto unknown; recursion_check: /* * Make sure we don't iterate through any given stack more than once. * If it comes up a second time then there's something wrong going on: * just break out and report an unknown stack type. */ if (visit_mask) { if (*visit_mask & (1UL << info->type)) { printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type); goto unknown; } *visit_mask |= 1UL << info->type; } return 0; unknown: info->type = STACK_TYPE_UNKNOWN; return -EINVAL; }
void __used pax_check_alloca(unsigned long size) { unsigned long sp = (unsigned long)&sp, stack_start, stack_end; unsigned cpu, used; char *id; /* check the process stack first */ stack_start = (unsigned long)task_stack_page(current); stack_end = stack_start + THREAD_SIZE; if (likely(stack_start <= sp && sp < stack_end)) { unsigned long stack_left = sp & (THREAD_SIZE - 1); BUG_ON(stack_left < 256 || size >= stack_left - 256); return; } cpu = get_cpu(); /* check the irq stacks */ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu); stack_start = stack_end - IRQ_STACK_SIZE; if (stack_start <= sp && sp < stack_end) { unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1); put_cpu(); BUG_ON(stack_left < 256 || size >= stack_left - 256); return; } /* check the exception stacks */ used = 0; stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id); stack_start = stack_end - EXCEPTION_STKSZ; if (stack_end && stack_start <= sp && sp < stack_end) { unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1); put_cpu(); BUG_ON(stack_left < 256 || size >= stack_left - 256); return; } put_cpu(); /* unknown stack */ BUG(); }
void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, const struct stacktrace_ops *ops, void *data) { const unsigned cpu = get_cpu(); unsigned long *irq_stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned used = 0; struct thread_info *tinfo; int graph = 0; unsigned long bp; if (!task) task = current; if (!stack) { unsigned long dummy; stack = &dummy; if (task && task != current) stack = (unsigned long *)task->thread.sp; } bp = stack_frame(task, regs); /* * Print function call entries in all stacks, starting at the * current stack address. If the stacks consist of nested * exceptions */ tinfo = task_thread_info(task); for (;;) { char *id; unsigned long *estack_end; estack_end = in_exception_stack(cpu, (unsigned long)stack, &used, &id); if (estack_end) { if (ops->stack(data, id) < 0) break; bp = ops->walk_stack(tinfo, stack, bp, ops, data, estack_end, &graph); ops->stack(data, "<EOE>"); /* * We link to the next stack via the * second-to-last pointer (index -2 to end) in the * exception stack: */ stack = (unsigned long *) estack_end[-2]; continue; } if (irq_stack_end) { unsigned long *irq_stack; irq_stack = irq_stack_end - (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); if (in_irq_stack(stack, irq_stack, irq_stack_end)) { if (ops->stack(data, "IRQ") < 0) break; bp = ops->walk_stack(tinfo, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be * the process stack normally) the last * pointer (index -1 to end) in the IRQ stack: */ stack = (unsigned long *) (irq_stack_end[-1]); bp = fixup_bp_irq_link(bp, stack, irq_stack, irq_stack_end); irq_stack_end = NULL; ops->stack(data, "EOI"); continue; } } break; } /* * This handles the process stack: */ bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); }
void dump_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, unsigned long bp, const struct stacktrace_ops *ops, void *data) { const unsigned cpu = get_cpu(); unsigned long *irq_stack_end = (unsigned long *)per_cpu(irq_stack_ptr, cpu); unsigned used = 0; struct thread_info *tinfo; int graph = 0; if (!task) task = current; if (!stack) { unsigned long dummy; stack = &dummy; if (task && task != current) stack = (unsigned long *)task->thread.sp; } #ifdef CONFIG_FRAME_POINTER if (!bp) { if (task == current) { /* Grab bp right from our regs */ get_bp(bp); } else { /* bp is the last reg pushed by switch_to */ bp = *(unsigned long *) task->thread.sp; } } #endif /* * Print function call entries in all stacks, starting at the * current stack address. If the stacks consist of nested * exceptions */ tinfo = task_thread_info(task); for (;;) { char *id; unsigned long *estack_end; estack_end = in_exception_stack(cpu, (unsigned long)stack, &used, &id); if (estack_end) { if (ops->stack(data, id) < 0) break; bp = print_context_stack(tinfo, stack, bp, ops, data, estack_end, &graph); ops->stack(data, "<EOE>"); /* * We link to the next stack via the * second-to-last pointer (index -2 to end) in the * exception stack: */ stack = (unsigned long *) estack_end[-2]; continue; } if (irq_stack_end) { unsigned long *irq_stack; irq_stack = irq_stack_end - (IRQ_STACK_SIZE - 64) / sizeof(*irq_stack); if (stack >= irq_stack && stack < irq_stack_end) { if (ops->stack(data, "IRQ") < 0) break; bp = print_context_stack(tinfo, stack, bp, ops, data, irq_stack_end, &graph); /* * We link to the next stack (which would be * the process stack normally) the last * pointer (index -1 to end) in the IRQ stack: */ stack = (unsigned long *) (irq_stack_end[-1]); irq_stack_end = NULL; ops->stack(data, "EOI"); continue; } } break; } /* * This handles the process stack: */ bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph); put_cpu(); }
void show_trace(unsigned long *stack) { unsigned long addr; const unsigned cpu = safe_smp_processor_id(); unsigned long *irqstack_end = (unsigned long *)cpu_pda[cpu].irqstackptr; int i; unsigned used = 0; printk("\nCall Trace:"); #define HANDLE_STACK(cond) \ do while (cond) { \ addr = *stack++; \ if (kernel_text_address(addr)) { \ /* \ * If the address is either in the text segment of the \ * kernel, or in the region which contains vmalloc'ed \ * memory, it *may* be the address of a calling \ * routine; if so, print it so that someone tracing \ * down the cause of the crash will be able to figure \ * out the call path that was taken. \ */ \ i += printk_address(addr); \ if (i > 50) { \ printk("\n "); \ i = 0; \ } \ else \ i += printk(" "); \ } \ } while (0) for(i = 0; ; ) { const char *id; unsigned long *estack_end; estack_end = in_exception_stack(cpu, (unsigned long)stack, &used, &id); if (estack_end) { i += printk(" <%s> ", id); HANDLE_STACK (stack < estack_end); i += printk(" <EOE> "); stack = (unsigned long *) estack_end[-2]; continue; } if (irqstack_end) { unsigned long *irqstack; irqstack = irqstack_end - (IRQSTACKSIZE - 64) / sizeof(*irqstack); if (stack >= irqstack && stack < irqstack_end) { i += printk(" <IRQ> "); HANDLE_STACK (stack < irqstack_end); stack = (unsigned long *) (irqstack_end[-1]); irqstack_end = NULL; i += printk(" <EOI> "); continue; } } break; } HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0); #undef HANDLE_STACK printk("\n"); }