Beispiel #1
0
int
dtrace_getustackdepth(void)
{
	proc_t *p = curproc;
	struct reg *tf;
	uintptr_t pc, fp, sp;
	int n = 0;

	if (p == NULL || (tf = curthread->tf) == NULL)
		return (0);

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		CONTEXT ct;
		uint64_t pcstack[100];
		int pcstack_limit = 100;
		
		winos_reg_to_context(&ct, tf);
		n += winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/*
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		pc = dtrace_fuword32((void *) sp);
		n++;
	}

	n += dtrace_getustack_common(NULL, 0, pc, fp);
#ifdef __amd64
	}
#endif
	return (n);
}
Beispiel #2
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	proc_t *p = curproc;
	struct trapframe *tf;
	uintptr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
	int n;

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = curthread->td_frame) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->p_pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = tf->srr0;
	sp = tf->fixreg[1];

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/* 
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = tf->lr;
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
Beispiel #3
0
/*
 * The return value indicates if we've modified the stack.
 */
static int
dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
                    user_addr_t sp)
{
    int64_t missing_tos;
    int rc = 0;
    boolean_t is64Bit = proc_is64bit(current_proc());

    ASSERT(pc != NULL);

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        /*
         * If we found ourselves in an entry probe, the frame pointer has not
         * yet been pushed (that happens in the
         * function prologue).  The best approach is to
	 * add the current pc as a missing top of stack,
         * and back the pc up to the caller, which is stored  at the
         * current stack pointer address since the call
         * instruction puts it there right before
         * the branch.
         */

        missing_tos = *pc;

        if (is64Bit)
            *pc = dtrace_fuword64(sp);
        else
            *pc = dtrace_fuword32(sp);
    } else {
        /*
         * We might have a top of stack override, in which case we just
         * add that frame without question to the top.  This
         * happens in return probes where you have a valid
         * frame pointer, but it's for the callers frame
         * and you'd like to add the pc of the return site
         * to the frame.
         */
        missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
    }

    if (missing_tos != 0) {
        if (pcstack != NULL && pcstack_limit != NULL) {
            /*
	     * If the missing top of stack has been filled out, then
	     * we add it and adjust the size.
             */
	    *(*pcstack)++ = missing_tos;
	    (*pcstack_limit)--;
	}
        /*
	 * return 1 because we would have changed the
	 * stack whether or not it was passed in.  This
	 * ensures the stack count is correct
	 */
         rc = 1;
    }
    return rc;
}
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		n++;
		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n += dtrace_getustack_common(NULL, 0, pc, sp);

	return (n);
}
Beispiel #5
0
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	x86_saved_state_t *regs;
	user_addr_t pc, sp, fp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	pal_register_cache_state(thread, VALID);
	regs = (x86_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	if (is64Bit) {
		pc = regs->ss_64.isf.rip;
		sp = regs->ss_64.isf.rsp;
		fp = regs->ss_64.rbp;
	} else {
		pc = regs->ss_32.eip;
		sp = regs->ss_32.uesp;
		fp = regs->ss_32.ebp;
	}

	if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
	    /*
	     * we would have adjusted the stack if we had
	     * supplied one (that is what rc == 1 means).
	     * Also, as a side effect, the pc might have
	     * been fixed up, which is good for calling
	     * in to dtrace_getustack_common.
	     */
	    n++;
	}
	
	/*
	 * Note that unlike ppc, the x86 code does not use
	 * CPU_DTRACE_USTACK_FP. This is because x86 always
	 * traces from the fp, even in syscall/profile/fbt
	 * providers.
	 */

	n += dtrace_getustack_common(NULL, 0, pc, fp);

	return (n);
}
Beispiel #6
0
int
dtrace_getustackdepth(void)
{
	proc_t *p = curproc;
	struct trapframe *tf;
	uintptr_t pc, sp;
	int n = 0;

	if (p == NULL || (tf = curthread->td_frame) == NULL)
		return (0);

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	pc = tf->srr0;
	sp = tf->fixreg[1];

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/* 
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		if (SV_PROC_FLAG(p, SV_ILP32)) {
			pc = dtrace_fuword32((void *) sp);
		}
		else
			pc = dtrace_fuword64((void *) sp);
		n++;
	}

	n += dtrace_getustack_common(NULL, 0, pc, sp);

	return (n);
}
Beispiel #7
0
int
dtrace_getustackdepth(void)
{
    printk("need to do this dtrace_getustackdepth\n");
# if 0
    klwp_t *lwp = ttolwp(curthread);
    proc_t *p = curproc;
    struct regs *rp;
    uintptr_t pc, sp;
    int n = 0;

    if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
        return (0);

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
        return (-1);

    pc = rp->r_pc;
    sp = rp->r_fp;

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        n++;

        if (dtrace_data_model(p) == DATAMODEL_NATIVE)
            pc = dtrace_fulword((void *)rp->r_sp);
        else
            pc = dtrace_fuword32((void *)rp->r_sp);
    }

    n += dtrace_getustack_common(NULL, 0, pc, sp);
    return (n);
# else
    TODO();
    return 0;
# endif

}
Beispiel #8
0
/*ARGSUSED*/
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
    printk("need to do this dtrace_getufpstack\n");
# if 0
    klwp_t *lwp = ttolwp(curthread);
    proc_t *p = ttoproc(curthread);
    struct regs *rp;
    uintptr_t pc, sp, oldcontext;
    volatile uint8_t *flags =
        (volatile uint8_t *)&cpu_core[cpu_get_id()].cpuc_dtrace_flags;
    size_t s1, s2;

    if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
        return;

    if (*flags & CPU_DTRACE_FAULT)
        return;

    if (pcstack_limit <= 0)
        return;

    *pcstack++ = (uint64_t)p->p_pid;
    pcstack_limit--;

    if (pcstack_limit <= 0)
        return;

    pc = rp->r_pc;
    sp = rp->r_fp;
    oldcontext = lwp->lwp_oldcontext;

    if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
        s1 = sizeof (struct frame) + 2 * sizeof (long);
        s2 = s1 + sizeof (siginfo_t);
    } else {
        s1 = sizeof (struct frame32) + 3 * sizeof (int);
        s2 = s1 + sizeof (siginfo32_t);
    }

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = 0;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            return;

        if (dtrace_data_model(p) == DATAMODEL_NATIVE)
            pc = dtrace_fulword((void *)rp->r_sp);
        else
            pc = dtrace_fuword32((void *)rp->r_sp);
    }

    while (pc != 0 && sp != 0) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = sp;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            break;

        if (oldcontext == sp + s1 || oldcontext == sp + s2) {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fulword(&gregs[REG_FP]);
                pc = dtrace_fulword(&gregs[REG_PC]);

                oldcontext = dtrace_fulword(&ucp->uc_link);
            } else {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fuword32(&gregs[EBP]);
                pc = dtrace_fuword32(&gregs[EIP]);

                oldcontext = dtrace_fuword32(&ucp->uc_link);
            }
        } else {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                struct frame *fr = (struct frame *)sp;

                pc = dtrace_fulword(&fr->fr_savpc);
                sp = dtrace_fulword(&fr->fr_savfp);
            } else {
                struct frame32 *fr = (struct frame32 *)sp;

                pc = dtrace_fuword32(&fr->fr_savpc);
                sp = dtrace_fuword32(&fr->fr_savfp);
            }
        }

        /*
         * This is totally bogus:  if we faulted, we're going to clear
         * the fault and break.  This is to deal with the apparently
         * broken Java stacks on x86.
         */
        if (*flags & CPU_DTRACE_FAULT) {
            *flags &= ~CPU_DTRACE_FAULT;
            break;
        }
    }

    while (pcstack_limit-- > 0)
        *pcstack++ = NULL;
# endif
}
Beispiel #9
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
                  uint32_t *ignored)
{
    int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
    int	lim;
    /***********************************************/
    /*   This  is  a basic stack walker - we dont  */
    /*   care  about  omit-frame-pointer,  and we  */
    /*   can  have  false positives. We also dont  */
    /*   handle  exception  stacks properly - but  */
    /*   this  is  for  older  kernels, where the  */
    /*   kernel  wont  help  us,  so they may not  */
    /*   have exception stacks anyhow.	       */
    /***********************************************/

    /***********************************************/
    /*   20121125 Lets use this always - it avoid  */
    /*   kernel  specific  issues in the official  */
    /*   stack  walker and will give us a vehicle  */
    /*   later  for adding reliable vs guess-work  */
    /*   stack entries.			       */
    /***********************************************/
    cpu_core_t	*this_cpu = cpu_get_this();
    struct pt_regs *regs = this_cpu->cpuc_regs;
    struct thread_info *context;
    uintptr_t *sp;
    uintptr_t *spend;

    /***********************************************/
    /*   For   syscalls,  we  will  have  a  null  */
    /*   cpuc_regs,  since  we dont intercept the  */
    /*   trap,   but   instead  intercept  the  C  */
    /*   syscall function.			       */
    /***********************************************/
    if (regs == NULL)
        sp = (uintptr_t *) &depth;
    else
        sp = (uintptr_t *) regs->r_rsp;

    /***********************************************/
    /*   Daisy  chain the interrupt and any other  */
    /*   stacks.  Limit  ourselves in case of bad  */
    /*   corruptions.			       */
    /***********************************************/
    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    depth = 0;
    for (lim = 0; lim < 3 && depth < pcstack_limit; lim++) {
        int	ndepth = depth;
        uintptr_t *prev_esp;

        context = (struct thread_info *) ((unsigned long) sp & (~(THREAD_SIZE - 1)));
        spend = (uintptr_t *) ((unsigned long) sp | (THREAD_SIZE - 1));
        for ( ; depth < pcstack_limit && sp < spend; sp++) {
            if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
                goto end_stack;
            if (*sp && is_kernel_text((unsigned long) *sp)) {
                pcstack[depth++] = *sp;
            }
        }
        if (depth >= pcstack_limit || ndepth == depth)
            break;

        prev_esp = (uintptr_t *) ((char *) context + sizeof(struct thread_info));
        if ((sp = prev_esp) == NULL)
            break;
        /***********************************************/
        /*   Special signal to mark the IRQ stack.     */
        /***********************************************/
        if (depth < pcstack_limit) {
            pcstack[depth++] = 1;
        }
    }
end_stack:
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_FAULT);
#else

    /***********************************************/
    /*   I'm  a  little tired of the kernel dying  */
    /*   in  the  callback, so lets avoid relying  */
    /*   on the kernel stack walker.	       */
    /***********************************************/
    dmutex_enter(&dtrace_stack_mutex);
    g_depth = 0;
    g_pcstack = pcstack;
    g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
    dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
    dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
    depth = g_depth;
    dmutex_exit(&dtrace_stack_mutex);
#endif

    while (depth < pcstack_limit)
        pcstack[depth++] = (pc_t) NULL;
}
Beispiel #10
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	proc_t *p = curproc;
	struct trapframe *tf;
	uintptr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#ifdef notyet	/* XXX signal stack */
	uintptr_t oldcontext;
	size_t s1, s2;
#endif

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = curthread->td_frame) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->p_pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = tf->srr0;
	sp = tf->fixreg[1];

#ifdef notyet /* XXX signal stack */
	oldcontext = lwp->lwp_oldcontext;
	s1 = sizeof (struct xframe) + 2 * sizeof (long);
	s2 = s1 + sizeof (siginfo_t);
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		if (SV_PROC_FLAG(p, SV_ILP32)) {
			pc = dtrace_fuword32((void *)sp);
		}
		else {
			pc = dtrace_fuword64((void *)sp);
		}
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#ifdef notyet /* XXX signal stack */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			ucontext_t *ucp = (ucontext_t *)oldcontext;
			greg_t *gregs = ucp->uc_mcontext.gregs;

			sp = dtrace_fulword(&gregs[REG_FP]);
			pc = dtrace_fulword(&gregs[REG_PC]);

			oldcontext = dtrace_fulword(&ucp->uc_link);
		} else
#endif /* XXX */
		{
			if (SV_PROC_FLAG(p, SV_ILP32)) {
				pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
				sp = dtrace_fuword32((void *)sp);
			}
			else {
				pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
				sp = dtrace_fuword64((void *)sp);
			}
		}

		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
#if 0
	uintptr_t oldcontext;
	size_t s1, s2;
#endif
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;
	
#if 0 /* XXX signal stack crawl*/
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		/*
		 * XXX This is wrong, but we do not yet support stack helpers.
		 */
		if (is64Bit)
			pc = dtrace_fuword64(sp);
		else
			pc = dtrace_fuword32(sp);
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl*/
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
	int n;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
Beispiel #13
0
/*
 * Get user stack entries up to the pcstack_limit; return the number of entries
 * acquired.  If pcstack is NULL, return the number of entries potentially
 * acquirable.
 */
unsigned long dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack,
				 int pcstack_limit)
{
	struct task_struct	*p = current;
	struct mm_struct	*mm = p->mm;
	unsigned long		tos, bos, fpc;
	unsigned long		*sp;
	unsigned long		depth = 0;
	struct vm_area_struct	*stack_vma;
	struct page		*stack_page = NULL;
	struct pt_regs		*regs = current_pt_regs();

	if (pcstack) {
		if (unlikely(pcstack_limit < 2)) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
			return 0;
		}
		*pcstack++ = (uint64_t)p->pid;
		*pcstack++ = (uint64_t)p->tgid;
		pcstack_limit -= 2;
	}

	if (!user_mode(regs))
		goto out;

	/*
	 * There is always at least one address to report: the instruction
	 * pointer itself (frame 0).
	 */
	depth++;

	fpc = instruction_pointer(regs);
	if (pcstack) {
		*pcstack++ = (uint64_t)fpc;
		pcstack_limit--;
	}

	/*
	 * We cannot ustack() if this task has no mm, if this task is a kernel
	 * thread, or when someone else has the mmap_sem or the page_table_lock
	 * (because find_user_vma() ultimately does a __get_user_pages() and
	 * thence a follow_page(), which can take that lock).
	 */
	if (mm == NULL || (p->flags & PF_KTHREAD) ||
	    spin_is_locked(&mm->page_table_lock))
		goto out;

	if (!down_read_trylock(&mm->mmap_sem))
		goto out;
	atomic_inc(&mm->mm_users);

	/*
	 * The following construct can be replaced with:
	 * 	tos = current_user_stack_pointer();
	 * once support for 4.0 is no longer necessary.
	 */
#ifdef CONFIG_X86_64
	tos = current_pt_regs()->sp;
#else
	tos = user_stack_pointer(current_pt_regs());
#endif
	stack_vma = find_user_vma(p, mm, NULL, (unsigned long) tos, 0);
	if (!stack_vma ||
	    stack_vma->vm_start > (unsigned long) tos)
		goto unlock_out;

#ifdef CONFIG_STACK_GROWSUP
#error This code does not yet work on STACK_GROWSUP platforms.
#endif
	bos = stack_vma->vm_end;
	if (stack_guard_page_end(stack_vma, bos))
                bos -= PAGE_SIZE;

	/*
	 * If we have a pcstack, loop as long as we are within the stack limit.
	 * Otherwise, loop until we run out of stack.
	 */
	for (sp = (unsigned long *)tos;
	     sp <= (unsigned long *)bos &&
		     ((pcstack && pcstack_limit > 0) ||
		      !pcstack);
	     sp++) {
		struct vm_area_struct	*code_vma;
		unsigned long		addr;

		/*
		 * Recheck for faultedness and pin at page boundaries.
		 */
		if (!stack_page || (((unsigned long)sp & PAGE_MASK) == 0)) {
			if (stack_page) {
				put_page(stack_page);
				stack_page = NULL;
			}

			if (!find_user_vma(p, mm, &stack_page,
					   (unsigned long) sp, 1))
				break;
		}

		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		get_user(addr, sp);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);

		if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) {
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_BADADDR);
			break;
		}

		if (addr == fpc)
			continue;

		code_vma = find_user_vma(p, mm, NULL, addr, 0);

		if (!code_vma || code_vma->vm_start > addr)
			continue;

		if ((addr >= tos && addr <= bos) ||
		    (code_vma->vm_flags & VM_GROWSDOWN)) {
			/* stack address - may need it for the fpstack. */
		} else if (code_vma->vm_flags & VM_EXEC) {
			if (pcstack) {
				*pcstack++ = addr;
				pcstack_limit--;
			}
			depth++;
		}
	}
	if (stack_page != NULL)
		put_page(stack_page);

unlock_out:
	atomic_dec(&mm->mm_users);
	up_read(&mm->mmap_sem);

out:
	if (pcstack)
		while (pcstack_limit--)
			*pcstack++ = 0;

	return depth;
}
Beispiel #14
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	proc_t *p = curproc;
	thread_t *td = curthread;
	
	struct reg *tf;
	uintptr_t pc, sp, fp;
	
	volatile uint16_t *flags =
#if defined(sun)
	   (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#else	   
	   (volatile uint16_t *)&cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_flags;
#endif	
	int n = 0;
	CONTEXT ct;

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = td->tf) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		winos_reg_to_context(&ct, tf);
		n = winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
		pcstack = &pcstack[n];
		pcstack_limit -= n;
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;

#ifdef notyet /* XXX signal stack */
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = dtrace_fuword32((void *)sp);
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = fp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (fp == 0)
			break;

#ifdef notyet /* XXX signal stack */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} else
#endif /* XXX */
		{
			pc = dtrace_fuword32((void *)(fp + 4));
			fp = dtrace_fuword32((void *)fp);
		}

		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
	}
#ifdef __amd64
	}
#endif

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
	return;
}
Beispiel #15
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	proc_t *p = curproc;
	thread_t *td = curthread;
	struct reg *tf;
	uintptr_t pc, sp, fp;
	volatile uint16_t *flags =
#if defined(sun)
	   (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#else	   
	   (volatile uint16_t *)&cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_flags;
#endif	
	int n = 0;
	CONTEXT ct;

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = td->tf) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		winos_reg_to_context(&ct, tf);
		n = winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
		pcstack = &pcstack[n];
		pcstack_limit -= n;
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/*
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = dtrace_fuword32((void *) sp);
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

#ifdef __amd64
	}
#endif

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}