Пример #1
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
    uint32_t *ignored)
{
	int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
	/***********************************************/
	/*   This  is  a basic stack walker - we dont  */
	/*   care  about  omit-frame-pointer,  and we  */
	/*   can  have  false positives. We also dont  */
	/*   handle  exception  stacks properly - but  */
	/*   this  is  for  older  kernels, where the  */
	/*   kernel  wont  help  us,  so they may not  */
	/*   have exception stacks anyhow.	       */
	/***********************************************/

	cpu_core_t	*this_cpu = cpu_get_this();
	struct pt_regs *regs = this_cpu->cpuc_regs;
	uintptr_t *sp = (uintptr_t *) &regs->r_rsp;
	uintptr_t *spend;
	
	if (regs == NULL)
		sp = (uintptr_t *) &depth;

	spend = sp + THREAD_SIZE / sizeof(uintptr_t);

	for (depth = 0; depth < pcstack_limit && sp < spend; ) {
		if (sp && is_kernel_text((unsigned long) *sp)) {
			pcstack[depth++] = *sp;
		}
		sp++;
	}
#else

	dmutex_enter(&dtrace_stack_mutex);
	g_depth = 0;
	g_pcstack = pcstack;
	g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
	dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
	dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
	depth = g_depth;
	dmutex_exit(&dtrace_stack_mutex);
#endif

	while (depth < pcstack_limit)
		pcstack[depth++] = (pc_t) NULL;
}
Пример #2
0
void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, char *log_lvl)
{
	printk("%sCall Trace:\n", log_lvl);
	dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
}
Пример #3
0
/* Used for kernel backtrace printing when other mechanisms fail. */
static void _stp_stack_print_fallback(unsigned long stack,
				      int sym_flags, int levels, int skip)
{
        struct print_stack_data print_data;
        print_data.flags = sym_flags;
        print_data.levels = levels;
        print_data.skip = skip;
#if defined(STAPCONF_KERNEL_STACKTRACE)
        dump_trace(current, NULL, (long *)stack, 0, &print_stack_ops,
                   &print_data);
#else
	/* STAPCONF_KERNEL_STACKTRACE_NO_BP */
        dump_trace(current, NULL, (long *)stack, &print_stack_ops,
                   &print_data);
#endif
}
Пример #4
0
void libcfs_debug_dumpstack(struct task_struct *tsk)
{
#if defined(HAVE_DUMP_TRACE)
        /* dump_stack() */
        /* show_trace() */
        if (tsk == NULL)
                tsk = current;
	printk("Pid: %d, comm: %.20s\n", tsk->pid, tsk->comm);
        /* show_trace_log_lvl() */
	printk("\nCall Trace:\n");
	dump_trace(tsk, NULL, NULL,
#ifdef HAVE_DUMP_TRACE_ADDRESS
                   0,
#endif /* HAVE_DUMP_TRACE_ADDRESS */
                   &print_trace_ops, NULL);
	printk("\n");
#elif defined(HAVE_SHOW_TASK)
        /* this is exported by lustre kernel version 42 */
        extern void show_task(struct task_struct *);

        if (tsk == NULL)
                tsk = current;
        CWARN("showing stack for process %d\n", tsk->pid);
        show_task(tsk);
#else
        if ((tsk == NULL) || (tsk == current))
                dump_stack();
        else
                CWARN("can't show stack: kernel doesn't export show_task\n");
#endif
}
Пример #5
0
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
			   struct pt_regs *regs)
{
	if (user_mode(regs))
		return;
	dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
}
Пример #6
0
/**
 * segvhandler()
 * ----------------
 * Handler for the SIGSEGV, Segmentation Fault.  Tries to clear the screen
 * and issue a better message than "Segmentation fault".  May not always
 * clean up properly.
 **/
void segvhandler()
{
  dump_trace();
  
  fprintf(stderr, "Fatal: memory allocation error\n\n");
  fprintf(stderr, "If you suspect a bug, please report the exact circumstances under which this\n");
  fprintf(stderr, "error was generated.  If possible, include gdb or strace data which may point\n");
  fprintf(stderr, "out where the error occured.  Bug reports may be sent in to [email protected].\n\n");
  fprintf(stderr, "AirTraf process %u aborting on signal 11.\n\n", getpid());
  
  /*    if (active_facility_lockfile[0] != '\0') */
  /* 	unlink(active_facility_lockfile); */
  
  /*     if (is_first_instance) */
  /* 	unlink(IPTIDFILE); */
  
  /*     if (active_facility_lockfile[0] != '\0') { */
  /* 	unlink(active_facility_lockfile); */
  /* 	adjust_instance_count(PROCCOUNTFILE, -1); */
  /* 	if (active_facility_countfile[0] != '\0') */
  /* 	    adjust_instance_count(active_facility_countfile, -1); */
  /*     } */
  
  exit(2);
}
static int
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
	     struct trace_array_cpu *data)
{
	struct backtrace_info info;
	unsigned long bp;
	char *stack;

	info.tr = tr;
	info.data = data;
	info.pos = 1;

	__trace_special(info.tr, info.data, 1, regs->ip, 0);

	stack = ((char *)regs + sizeof(struct pt_regs));
#ifdef CONFIG_FRAME_POINTER
	bp = regs->bp;
#else
	bp = 0;
#endif

	dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info);

	return info.pos;
}
Пример #8
0
void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
		unsigned long *stack, unsigned long bp, char *log_lvl)
{
	printk("%sCall Trace:\n", log_lvl);
	/* FIXME this should also dump into a buffer */
	dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
}
Пример #9
0
void save_stack_trace(struct stack_trace *trace)
{
	unsigned long sp;

	sp = current_stack_pointer();
	dump_trace(save_address, trace, NULL, sp);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #10
0
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
	unsigned long sp;

	sp = kernel_stack_pointer(regs);
	dump_trace(save_address, trace, NULL, sp);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #11
0
static void
validate_trace(Oracle *oracle, const std::vector<unsigned long> &rips)
{
	CfgLabelAllocator allocLabel;

	assert(!rips.empty());

	if (!oracle->type_db->ripPresent(DynAnalysisRip(VexRip::invent_vex_rip(rips.back())))) {
		dump_trace(invalid_log, rips);
		nr_invalid++;
		return;
	}

	HashedSet<HashedPtr<CFGNode> > roots;
	HashedSet<HashedPtr<const CFGNode> > targetNodes;
	getProbeCFGs(allocLabel, oracle, VexRip::invent_vex_rip(rips.back()),
		     roots, targetNodes, rips.size());

	std::set<CFGNode *> live;
	for (auto it = roots.begin(); !it.finished(); it.advance())
		live.insert(*it);
	for (auto it = rips.begin(); it != rips.end(); it++) {
		if (live.empty()) {
			dump_trace(fail_log, rips);
			nr_fail++;
			return;
		}
		std::set<CFGNode *> newLive;
		for (auto it2 = live.begin(); it2 != live.end(); it2++) {
			CFGNode *n = *it2;
			if (n->rip.unwrap_vexrip() == *it) {
				for (auto it3 = n->successors.begin();
				     it3 != n->successors.end();
				     it3++)
					newLive.insert(it3->instr);
			}
		}
	}

	dump_trace(success_log, rips);
	nr_success++;
}
Пример #12
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	unsigned long sp;

	sp = tsk->thread.ksp;
	if (tsk == current)
		sp = current_stack_pointer();
	dump_trace(save_address_nosched, trace, tsk, sp);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #13
0
void MonteCarlo(collector *C, int NMC, REAL emean, REAL estdv, REAL sigma, REAL angle, REAL azimuth) {
    particle	    *P;
    trace	    *T;
    int		    NF;
    unsigned int seed;
    NF = 0;
#if DEBUG
    printf("entering MonteCarlo for %d samples and storing into %lx (%d)\n",
	   NMC, (long unsigned int) C, C->NTOTAL);
#endif
/* create the trace and particle */
    fast_srand(&seed);
    T = alloc_trace();
    P = (particle *) malloc(sizeof(particle));
/* the Monte Carlo loop */
//what is sigma??
    if (sigma > AS_REAL(0.0)) {
	while (NF++ < NMC) {
	    MakeParticle(P, normal(emean, estdv,&seed), angle+normal(AS_REAL(0.0), sigma,&seed), azimuth+normal(AS_REAL(0.0), sigma,&seed), &seed);
	    reset_trace(T);
	    while (P->energy > AS_REAL(0.0)) {
		proton_event(P, T, &seed); //stepping()
	    }
	    if (TRACE) dump_trace(stdout, T, P);
	    collect(C, P, T); //Hits collection
	}
    } else {
	while (NF++ < NMC) {
	    MakeParticle(P, normal(emean, estdv,&seed), angle, azimuth, &seed);
	    reset_trace(T);
	    while (P->energy > AS_REAL(0.0)) {
		proton_event(P, T, &seed);
	    }
	    if (TRACE) dump_trace(stdout, T, P);
	    collect(C, P, T);
	}
    }
}
Пример #14
0
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
	struct frame_head *head = (struct frame_head *)frame_pointer(regs);

	if (!user_mode_vm(regs)) {
		unsigned long stack = kernel_stack_pointer(regs);
		if (depth)
			dump_trace(NULL, regs, (unsigned long *)stack, 0,
				   &backtrace_ops, &depth);
		return;
	}

	while (depth-- && head)
		head = dump_user_backtrace(head);
}
Пример #15
0
static int
trace_kernel(struct pt_regs *regs, struct trace_array *tr,
             struct trace_array_cpu *data)
{
    struct backtrace_info info;
    char *stack;

    info.tr = tr;
    info.data = data;
    info.pos = 1;

    __trace_special(info.tr, info.data, 1, regs->ip, 0);
    stack = ((char *)regs + sizeof(struct pt_regs));
    dump_trace(NULL, regs, (void *)stack, &backtrace_ops, &info);

    return info.pos;
}
Пример #16
0
static void libcfs_call_trace(struct task_struct *tsk)
{
#ifdef HAVE_STACKTRACE_OPS
	printk("Pid: %d, comm: %.20s\n", tsk->pid, tsk->comm);
	printk("\nCall Trace:\n");
	dump_trace(tsk, NULL, NULL,
#ifdef HAVE_DUMP_TRACE_ADDRESS
		   0,
#endif /* HAVE_DUMP_TRACE_ADDRESS */
		   &print_trace_ops, NULL);
	printk("\n");
#else /* !HAVE_STACKTRACE_OPS */
	if (tsk == current)
		dump_stack();
	else
		CWARN("can't show stack: kernel doesn't export show_task\n");
#endif /* HAVE_STACKTRACE_OPS */
}
Пример #17
0
void
x86_backtrace(struct pt_regs * const regs, unsigned int depth)
{
	struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);

	if (!user_mode_vm(regs)) {
		unsigned long stack = kernel_stack_pointer(regs);

		if (!((unsigned long)stack & (THREAD_SIZE - 1)))
			stack = 0;

		if (depth)
			dump_trace(NULL, regs, (unsigned long *)stack, 0,
				   &backtrace_ops, &depth);
		return;
	}

	if (x86_backtrace_32(regs, depth))
		return;

	while (depth-- && head)
		head = dump_user_backtrace(head);
}
Пример #18
0
void simulator_ctt::execute_assert(
  statet &state,
  const program_formulat::formula_goto_programt::instructiont &instruction)
{
  std::cout << "CHECKING ASSERTION\n";

  formulat condition=
    instantiate(state, 0, instruction.guard);
    
  formulat property=
    formula_container.gen_and(
      state.data().guard,
      formula_container.gen_not(condition));

  // see if it is reachable
  if(!property.is_false() &&
     is_satisfiable(property))
  {
    tracet trace;

    compute_trace(state, trace, true);
    dump_trace(trace, instruction);
    std::cout << "Assertion violated" << std::endl;
    std::cout << std::endl;

    error_state_found=true;
  }
  
  #if 0
  else
  {
    // otherwise, treat this like an assumption
    state.data_w().guard=
      formula_container.gen_and(state.data().guard, condition);
  }
  #endif
}
Пример #19
0
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
	if (user_mode(regs))
		return;
	dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
}
Пример #20
0
int vtss_stack_dump(struct vtss_transport_data* trnd, stack_control_t* stk, struct task_struct* task, struct pt_regs* regs, void* reg_fp, int in_irq)
{
    int rc;
    user_vm_accessor_t* acc;
    void* stack_base = stk->bp.vdp;
    void *reg_ip, *reg_sp;

    if (unlikely(regs == NULL)) {
        rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x: incorrect regs",
                        task->pid, smp_processor_id());
        if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) {
            stk->dbgmsg[rc] = '\0';
            vtss_record_debug_info(trnd, stk->dbgmsg, 0);
        }
        return -EFAULT;
    }
    stk->dbgmsg[0] = '\0';

    /* Get IP and SP registers from current space */
    reg_ip = (void*)REG(ip, regs);
    reg_sp = (void*)REG(sp, regs);

#if defined(CONFIG_X86_64) && defined(VTSS_AUTOCONF_STACKTRACE_OPS_WALK_STACK)
    { /* Unwind kernel stack and get user BP if possible */
        unsigned long bp = 0UL;
        unsigned long kstart = (unsigned long)__START_KERNEL_map + ((CONFIG_PHYSICAL_START + (CONFIG_PHYSICAL_ALIGN - 1)) & ~(CONFIG_PHYSICAL_ALIGN - 1));

#ifdef VTSS_AUTOCONF_DUMP_TRACE_HAVE_BP
        dump_trace(task, NULL, NULL, 0, &vtss_stack_ops, &bp);
#else
        dump_trace(task, NULL, NULL, &vtss_stack_ops, &bp);
#endif
//        TRACE("bp=0x%p <=> fp=0x%p", (void*)bp, reg_fp);
        reg_fp = bp ? (void*)bp : reg_fp;
#ifdef VTSS_DEBUG_TRACE
        if (reg_fp > (void*)kstart) {
            printk("Warning: bp=0x%p in kernel\n", reg_fp);
            dump_stack();
            rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p]: User bp=0x%p inside kernel space",
                            task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp);
            if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) {
                stk->dbgmsg[rc] = '\0';
                vtss_record_debug_info(trnd, stk->dbgmsg, 0);
            }
        }
#endif
    }
#endif /* CONFIG_X86_64 && VTSS_AUTOCONF_STACKTRACE_OPS_WALK_STACK */

    if (unlikely(!user_mode_vm(regs))) {
        /* kernel mode regs, so get a user mode regs */
#if defined(CONFIG_X86_64) || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38)
        regs = task_pt_regs(task); /*< get user mode regs */
        if (regs == NULL || !user_mode_vm(regs))
#endif
        {
#ifdef VTSS_DEBUG_TRACE
            strcat(stk->dbgmsg, "Cannot get user mode regs");
            vtss_record_debug_info(trnd, stk->dbgmsg, 0);
            printk("Warning: %s\n", stk->dbgmsg);
            dump_stack();
#endif
            return -EFAULT;
        }
    }

    /* Get IP and SP registers from user space */
    reg_ip = (void*)REG(ip, regs);
    reg_sp = (void*)REG(sp, regs);

    { /* Check for correct stack range in task->mm */
        struct vm_area_struct* vma;

#ifdef VTSS_CHECK_IP_IN_MAP
        /* Check IP in module map */
        vma = find_vma(task->mm, (unsigned long)reg_ip);
        if (likely(vma != NULL)) {
            unsigned long vm_start = vma->vm_start;
            unsigned long vm_end   = vma->vm_end;

            if ((unsigned long)reg_ip < vm_start ||
                (!((vma->vm_flags & (VM_EXEC | VM_WRITE)) == VM_EXEC &&
                    vma->vm_file && vma->vm_file->f_dentry) &&
                 !(vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)))
            {
#ifdef VTSS_DEBUG_TRACE
                rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p, found_vma=[0x%lx,0x%lx]: Unable to find executable module",
                                task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp, vm_start, vm_end);
                if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) {
                    stk->dbgmsg[rc] = '\0';
                    vtss_record_debug_info(trnd, stk->dbgmsg, 0);
                }
#endif
                return -EFAULT;
            }
        } else {
#ifdef VTSS_DEBUG_TRACE
            rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p: Unable to find executable region",
                            task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp);
            if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) {
                stk->dbgmsg[rc] = '\0';
                vtss_record_debug_info(trnd, stk->dbgmsg, 0);
            }
#endif
            return -EFAULT;
        }
#endif /* VTSS_CHECK_IP_IN_MAP */

        /* Check SP in module map */
        vma = find_vma(task->mm, (unsigned long)reg_sp);
        if (likely(vma != NULL)) {
            unsigned long vm_start = vma->vm_start + ((vma->vm_flags & VM_GROWSDOWN) ? PAGE_SIZE : 0UL);
            unsigned long vm_end   = vma->vm_end;

//            TRACE("vma=[0x%lx - 0x%lx], flags=0x%lx", vma->vm_start, vma->vm_end, vma->vm_flags);
            if ((unsigned long)reg_sp < vm_start ||
                (vma->vm_flags & (VM_READ | VM_WRITE)) != (VM_READ | VM_WRITE))
            {
#ifdef VTSS_DEBUG_TRACE
                rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p, found_vma=[0x%lx,0x%lx]: Unable to find user stack boundaries",
                                task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp, vm_start, vm_end);
                if (rc > 0 && rc < sizeof(stk->dbgmsg)-1) {
                    stk->dbgmsg[rc] = '\0';
                    vtss_record_debug_info(trnd, stk->dbgmsg, 0);
                }
#endif
                return -EFAULT;
            }
            if (!((unsigned long)stack_base >= vm_start &&
                  (unsigned long)stack_base <= vm_end)  ||
                 ((unsigned long)stack_base <= (unsigned long)reg_sp))
            {
                if ((unsigned long)stack_base != 0UL) {
                    TRACE("Fixup stack base to 0x%lx instead of 0x%lx", vm_end, (unsigned long)stack_base);
                }
                stack_base = (void*)vm_end;
                stk->clear(stk);
#ifdef VTSS_STACK_LIMIT
                stack_base = (void*)min((unsigned long)reg_sp + VTSS_STACK_LIMIT, vm_end);
                if ((unsigned long)stack_base != vm_end) {
                    TRACE("Limiting stack base to 0x%lx instead of 0x%lx, drop 0x%lx bytes", (unsigned long)stack_base, vm_end, (vm_end - (unsigned long)stack_base));
                }
            } else {
                stack_base = (void*)min((unsigned long)reg_sp + VTSS_STACK_LIMIT, vm_end);
                if ((unsigned long)stack_base != vm_end) {
                    TRACE("Limiting stack base to 0x%lx instead of 0x%lx, drop 0x%lx bytes", (unsigned long)stack_base, vm_end, (vm_end - (unsigned long)stack_base));
                }
#endif /* VTSS_STACK_LIMIT */
            }
        }
    }

#ifdef VTSS_DEBUG_TRACE
    /* Create a common header for debug message */
    rc = snprintf(stk->dbgmsg, sizeof(stk->dbgmsg)-1, "tid=0x%08x, cpu=0x%08x, ip=0x%p, sp=[0x%p,0x%p], fp=0x%p: USER STACK: ",
                    task->pid, smp_processor_id(), reg_ip, reg_sp, stack_base, reg_fp);
    if (!(rc > 0 && rc < sizeof(stk->dbgmsg)-1))
        rc = 0;
    stk->dbgmsg[rc] = '\0';
#else
    stk->dbgmsg[0] = '\0';
#endif

    if (stk->ip.vdp == reg_ip &&
        stk->sp.vdp == reg_sp &&
        stk->bp.vdp == stack_base &&
        stk->fp.vdp == reg_fp)
    {
        strcat(stk->dbgmsg, "The same context");
        vtss_record_debug_info(trnd, stk->dbgmsg, 0);
        return 0; /* Assume that nothing was changed */
    }

    /* Try to lock vm accessor */
    acc = vtss_user_vm_accessor_init(in_irq, vtss_time_limit);
    if (unlikely((acc == NULL) || acc->trylock(acc, task))) {
        vtss_user_vm_accessor_fini(acc);
        strcat(stk->dbgmsg, "Unable to lock vm accessor");
        vtss_record_debug_info(trnd, stk->dbgmsg, 0);
        return -EBUSY;
    }

    /* stk->setup(stk, acc, reg_ip, reg_sp, stack_base, reg_fp, stk->wow64); */
    stk->acc    = acc;
    stk->ip.vdp = reg_ip;
    stk->sp.vdp = reg_sp;
    stk->bp.vdp = stack_base;
    stk->fp.vdp = reg_fp;
    VTSS_PROFILE(unw, rc = stk->unwind(stk));
    /* Check unwind result */
    if (unlikely(rc == VTSS_ERR_NOMEMORY)) {
        /* Try again with realloced buffer */
        while (rc == VTSS_ERR_NOMEMORY && !stk->realloc(stk)) {
            VTSS_PROFILE(unw, rc = stk->unwind(stk));
        }
        if (rc == VTSS_ERR_NOMEMORY) {
            strcat(stk->dbgmsg, "Not enough memory - ");
        }
    }
    vtss_user_vm_accessor_fini(acc);
    if (unlikely(rc)) {
        stk->clear(stk);
        strcat(stk->dbgmsg, "Unwind error");
        vtss_record_debug_info(trnd, stk->dbgmsg, 0);
    }
    return rc;
}
Пример #21
0
void save_stack_trace_regs(struct stack_trace *trace, struct pt_regs *regs)
{
	dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #22
0
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #23
0
/*
 * Save stack-backtrace addresses into a stack_trace buffer.
 */
void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
{
	dump_trace(task, NULL, NULL, &save_stack_ops, trace);
	trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #24
0
/* 测试函数 2 */
void test_meloner() {
    dump_trace();
    return;
}
Пример #25
0
/*
 * Save stack-backtrace addresses into a stack_trace buffer.
 */
void save_stack_trace(struct stack_trace *trace)
{
	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #26
0
void save_stack_trace_bp(struct stack_trace *trace, unsigned long bp)
{
	dump_trace(current, NULL, NULL, bp, &save_stack_ops, trace);
	if (trace->nr_entries < trace->max_entries)
		trace->entries[trace->nr_entries++] = ULONG_MAX;
}
Пример #27
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
                  uint32_t *ignored)
{
    int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
    int	lim;
    /***********************************************/
    /*   This  is  a basic stack walker - we dont  */
    /*   care  about  omit-frame-pointer,  and we  */
    /*   can  have  false positives. We also dont  */
    /*   handle  exception  stacks properly - but  */
    /*   this  is  for  older  kernels, where the  */
    /*   kernel  wont  help  us,  so they may not  */
    /*   have exception stacks anyhow.	       */
    /***********************************************/

    /***********************************************/
    /*   20121125 Lets use this always - it avoid  */
    /*   kernel  specific  issues in the official  */
    /*   stack  walker and will give us a vehicle  */
    /*   later  for adding reliable vs guess-work  */
    /*   stack entries.			       */
    /***********************************************/
    cpu_core_t	*this_cpu = cpu_get_this();
    struct pt_regs *regs = this_cpu->cpuc_regs;
    struct thread_info *context;
    uintptr_t *sp;
    uintptr_t *spend;

    /***********************************************/
    /*   For   syscalls,  we  will  have  a  null  */
    /*   cpuc_regs,  since  we dont intercept the  */
    /*   trap,   but   instead  intercept  the  C  */
    /*   syscall function.			       */
    /***********************************************/
    if (regs == NULL)
        sp = (uintptr_t *) &depth;
    else
        sp = (uintptr_t *) regs->r_rsp;

    /***********************************************/
    /*   Daisy  chain the interrupt and any other  */
    /*   stacks.  Limit  ourselves in case of bad  */
    /*   corruptions.			       */
    /***********************************************/
    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    depth = 0;
    for (lim = 0; lim < 3 && depth < pcstack_limit; lim++) {
        int	ndepth = depth;
        uintptr_t *prev_esp;

        context = (struct thread_info *) ((unsigned long) sp & (~(THREAD_SIZE - 1)));
        spend = (uintptr_t *) ((unsigned long) sp | (THREAD_SIZE - 1));
        for ( ; depth < pcstack_limit && sp < spend; sp++) {
            if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
                goto end_stack;
            if (*sp && is_kernel_text((unsigned long) *sp)) {
                pcstack[depth++] = *sp;
            }
        }
        if (depth >= pcstack_limit || ndepth == depth)
            break;

        prev_esp = (uintptr_t *) ((char *) context + sizeof(struct thread_info));
        if ((sp = prev_esp) == NULL)
            break;
        /***********************************************/
        /*   Special signal to mark the IRQ stack.     */
        /***********************************************/
        if (depth < pcstack_limit) {
            pcstack[depth++] = 1;
        }
    }
end_stack:
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_FAULT);
#else

    /***********************************************/
    /*   I'm  a  little tired of the kernel dying  */
    /*   in  the  callback, so lets avoid relying  */
    /*   on the kernel stack walker.	       */
    /***********************************************/
    dmutex_enter(&dtrace_stack_mutex);
    g_depth = 0;
    g_pcstack = pcstack;
    g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
    dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
    dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
    depth = g_depth;
    dmutex_exit(&dtrace_stack_mutex);
#endif

    while (depth < pcstack_limit)
        pcstack[depth++] = (pc_t) NULL;
}
Пример #28
0
void handler(int signal)
{
	printf("===> handle segfault \n");
	dump_trace();
	exit(3);
}