示例#1
0
static int
dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
    uintptr_t sp)
{
	proc_t *p = curproc;
	int ret = 0;

	ASSERT(pcstack == NULL || pcstack_limit > 0);

	while (pc != 0) {
		ret++;
		if (pcstack != NULL) {
			*pcstack++ = (uint64_t)pc;
			pcstack_limit--;
			if (pcstack_limit <= 0)
				break;
		}

		if (sp == 0)
			break;

		if (SV_PROC_FLAG(p, SV_ILP32)) {
			pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
			sp = dtrace_fuword32((void *)sp);
		}
		else {
			pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
			sp = dtrace_fuword64((void *)sp);
		}
	}

	return (ret);
}
示例#2
0
static uint64_t
fasttrap_anarg(struct reg *rp, int function_entry, int argno)
{
	uint64_t value = 0;
	int shift = function_entry ? 1 : 0;

#ifdef __amd64
	if (curproc->p_model == DATAMODEL_LP64) {
		uintptr_t *stack;

		/*
		 * In 64-bit mode, the first six arguments are stored in
		 * registers.
		 */
		if (argno < 6)
			return ((&rp->r_rdi)[argno]);

		stack = (uintptr_t *)rp->r_rsp;
		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		value = dtrace_fulword(&stack[argno - 6 + shift]);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
	} else {
#endif
#ifdef __i386
		uint32_t *stack = (uint32_t *)rp->r_esp;
		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		value = dtrace_fuword32(&stack[argno + shift]);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
#endif
#ifdef __amd64
	}
#endif

	return (value);
}
示例#3
0
文件: dtrace_isa.c 项目: 0xffea/xnu
/*
 * The return value indicates if we've modified the stack.
 */
static int
dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
                    user_addr_t sp)
{
    int64_t missing_tos;
    int rc = 0;
    boolean_t is64Bit = proc_is64bit(current_proc());

    ASSERT(pc != NULL);

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        /*
         * If we found ourselves in an entry probe, the frame pointer has not
         * yet been pushed (that happens in the
         * function prologue).  The best approach is to
	 * add the current pc as a missing top of stack,
         * and back the pc up to the caller, which is stored  at the
         * current stack pointer address since the call
         * instruction puts it there right before
         * the branch.
         */

        missing_tos = *pc;

        if (is64Bit)
            *pc = dtrace_fuword64(sp);
        else
            *pc = dtrace_fuword32(sp);
    } else {
        /*
         * We might have a top of stack override, in which case we just
         * add that frame without question to the top.  This
         * happens in return probes where you have a valid
         * frame pointer, but it's for the callers frame
         * and you'd like to add the pc of the return site
         * to the frame.
         */
        missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
    }

    if (missing_tos != 0) {
        if (pcstack != NULL && pcstack_limit != NULL) {
            /*
	     * If the missing top of stack has been filled out, then
	     * we add it and adjust the size.
             */
	    *(*pcstack)++ = missing_tos;
	    (*pcstack_limit)--;
	}
        /*
	 * return 1 because we would have changed the
	 * stack whether or not it was passed in.  This
	 * ensures the stack count is correct
	 */
         rc = 1;
    }
    return rc;
}
示例#4
0
int
dtrace_getustackdepth(void)
{
	proc_t *p = curproc;
	struct reg *tf;
	uintptr_t pc, fp, sp;
	int n = 0;

	if (p == NULL || (tf = curthread->tf) == NULL)
		return (0);

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		CONTEXT ct;
		uint64_t pcstack[100];
		int pcstack_limit = 100;
		
		winos_reg_to_context(&ct, tf);
		n += winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/*
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		pc = dtrace_fuword32((void *) sp);
		n++;
	}

	n += dtrace_getustack_common(NULL, 0, pc, fp);
#ifdef __amd64
	}
#endif
	return (n);
}
示例#5
0
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		n++;
		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n += dtrace_getustack_common(NULL, 0, pc, sp);

	return (n);
}
示例#6
0
int
dtrace_getustackdepth(void)
{
	proc_t *p = curproc;
	struct trapframe *tf;
	uintptr_t pc, sp;
	int n = 0;

	if (p == NULL || (tf = curthread->td_frame) == NULL)
		return (0);

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	pc = tf->srr0;
	sp = tf->fixreg[1];

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/* 
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		if (SV_PROC_FLAG(p, SV_ILP32)) {
			pc = dtrace_fuword32((void *) sp);
		}
		else
			pc = dtrace_fuword64((void *) sp);
		n++;
	}

	n += dtrace_getustack_common(NULL, 0, pc, sp);

	return (n);
}
示例#7
0
int
dtrace_getustackdepth(void)
{
    printk("need to do this dtrace_getustackdepth\n");
# if 0
    klwp_t *lwp = ttolwp(curthread);
    proc_t *p = curproc;
    struct regs *rp;
    uintptr_t pc, sp;
    int n = 0;

    if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
        return (0);

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
        return (-1);

    pc = rp->r_pc;
    sp = rp->r_fp;

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        n++;

        if (dtrace_data_model(p) == DATAMODEL_NATIVE)
            pc = dtrace_fulword((void *)rp->r_sp);
        else
            pc = dtrace_fuword32((void *)rp->r_sp);
    }

    n += dtrace_getustack_common(NULL, 0, pc, sp);
    return (n);
# else
    TODO();
    return 0;
# endif

}
示例#8
0
/*ARGSUSED*/
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
    printk("need to do this dtrace_getufpstack\n");
# if 0
    klwp_t *lwp = ttolwp(curthread);
    proc_t *p = ttoproc(curthread);
    struct regs *rp;
    uintptr_t pc, sp, oldcontext;
    volatile uint8_t *flags =
        (volatile uint8_t *)&cpu_core[cpu_get_id()].cpuc_dtrace_flags;
    size_t s1, s2;

    if (lwp == NULL || p == NULL || (rp = lwp->lwp_regs) == NULL)
        return;

    if (*flags & CPU_DTRACE_FAULT)
        return;

    if (pcstack_limit <= 0)
        return;

    *pcstack++ = (uint64_t)p->p_pid;
    pcstack_limit--;

    if (pcstack_limit <= 0)
        return;

    pc = rp->r_pc;
    sp = rp->r_fp;
    oldcontext = lwp->lwp_oldcontext;

    if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
        s1 = sizeof (struct frame) + 2 * sizeof (long);
        s2 = s1 + sizeof (siginfo_t);
    } else {
        s1 = sizeof (struct frame32) + 3 * sizeof (int);
        s2 = s1 + sizeof (siginfo32_t);
    }

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = 0;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            return;

        if (dtrace_data_model(p) == DATAMODEL_NATIVE)
            pc = dtrace_fulword((void *)rp->r_sp);
        else
            pc = dtrace_fuword32((void *)rp->r_sp);
    }

    while (pc != 0 && sp != 0) {
        *pcstack++ = (uint64_t)pc;
        *fpstack++ = sp;
        pcstack_limit--;
        if (pcstack_limit <= 0)
            break;

        if (oldcontext == sp + s1 || oldcontext == sp + s2) {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fulword(&gregs[REG_FP]);
                pc = dtrace_fulword(&gregs[REG_PC]);

                oldcontext = dtrace_fulword(&ucp->uc_link);
            } else {
                ucontext_t *ucp = (ucontext_t *)oldcontext;
                greg_t *gregs = ucp->uc_mcontext.gregs;

                sp = dtrace_fuword32(&gregs[EBP]);
                pc = dtrace_fuword32(&gregs[EIP]);

                oldcontext = dtrace_fuword32(&ucp->uc_link);
            }
        } else {
            if (dtrace_data_model(p) == DATAMODEL_NATIVE) {
                struct frame *fr = (struct frame *)sp;

                pc = dtrace_fulword(&fr->fr_savpc);
                sp = dtrace_fulword(&fr->fr_savfp);
            } else {
                struct frame32 *fr = (struct frame32 *)sp;

                pc = dtrace_fuword32(&fr->fr_savpc);
                sp = dtrace_fuword32(&fr->fr_savfp);
            }
        }

        /*
         * This is totally bogus:  if we faulted, we're going to clear
         * the fault and break.  This is to deal with the apparently
         * broken Java stacks on x86.
         */
        if (*flags & CPU_DTRACE_FAULT) {
            *flags &= ~CPU_DTRACE_FAULT;
            break;
        }
    }

    while (pcstack_limit-- > 0)
        *pcstack++ = NULL;
# endif
}
示例#9
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	proc_t *p = curproc;
	struct trapframe *tf;
	uintptr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#ifdef notyet	/* XXX signal stack */
	uintptr_t oldcontext;
	size_t s1, s2;
#endif

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = curthread->td_frame) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->p_pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = tf->srr0;
	sp = tf->fixreg[1];

#ifdef notyet /* XXX signal stack */
	oldcontext = lwp->lwp_oldcontext;
	s1 = sizeof (struct xframe) + 2 * sizeof (long);
	s2 = s1 + sizeof (siginfo_t);
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		if (SV_PROC_FLAG(p, SV_ILP32)) {
			pc = dtrace_fuword32((void *)sp);
		}
		else {
			pc = dtrace_fuword64((void *)sp);
		}
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#ifdef notyet /* XXX signal stack */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			ucontext_t *ucp = (ucontext_t *)oldcontext;
			greg_t *gregs = ucp->uc_mcontext.gregs;

			sp = dtrace_fulword(&gregs[REG_FP]);
			pc = dtrace_fulword(&gregs[REG_PC]);

			oldcontext = dtrace_fulword(&ucp->uc_link);
		} else
#endif /* XXX */
		{
			if (SV_PROC_FLAG(p, SV_ILP32)) {
				pc = dtrace_fuword32((void *)(sp + RETURN_OFFSET));
				sp = dtrace_fuword32((void *)sp);
			}
			else {
				pc = dtrace_fuword64((void *)(sp + RETURN_OFFSET64));
				sp = dtrace_fuword64((void *)sp);
			}
		}

		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
示例#10
0
文件: dtrace_isa.c 项目: 0xffea/xnu
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	savearea_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
#if 0
	uintptr_t oldcontext;
	size_t s1, s2;
#endif
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (savearea_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->ss_32.eip;
	sp = regs->ss_32.ebp;
	
#if 0 /* XXX signal stack crawl */
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) {
            /*
	     * we made a change.
	     */
	    *fpstack++ = 0;
	    if (pcstack_limit <= 0)
		return;
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}

#if 0 /* XXX */
		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
#endif
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
示例#11
0
文件: dtrace_isa.c 项目: 0xffea/xnu
static int
dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
    user_addr_t sp)
{
#if 0
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;

	uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */
	size_t s1, s2;
#endif
	int ret = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	ASSERT(pcstack == NULL || pcstack_limit > 0);
	
#if 0 /* XXX signal stack crawl */
	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	while (pc != 0) {
		ret++;
		if (pcstack != NULL) {
			*pcstack++ = (uint64_t)pc;
			pcstack_limit--;
			if (pcstack_limit <= 0)
				break;
		}

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
				greg32_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}

#if 0 /* XXX */
		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
#endif
	}

	return (ret);
}
示例#12
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
#if 0
	uintptr_t oldcontext;
	size_t s1, s2;
#endif
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;
	
#if 0 /* XXX signal stack crawl*/
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		/*
		 * XXX This is wrong, but we do not yet support stack helpers.
		 */
		if (is64Bit)
			pc = dtrace_fuword64(sp);
		else
			pc = dtrace_fuword32(sp);
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl*/
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
示例#13
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
	int n;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
示例#14
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	proc_t *p = curproc;
	thread_t *td = curthread;
	
	struct reg *tf;
	uintptr_t pc, sp, fp;
	
	volatile uint16_t *flags =
#if defined(sun)
	   (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#else	   
	   (volatile uint16_t *)&cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_flags;
#endif	
	int n = 0;
	CONTEXT ct;

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = td->tf) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		winos_reg_to_context(&ct, tf);
		n = winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
		pcstack = &pcstack[n];
		pcstack_limit -= n;
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;

#ifdef notyet /* XXX signal stack */
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = dtrace_fuword32((void *)sp);
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = fp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (fp == 0)
			break;

#ifdef notyet /* XXX signal stack */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} else
#endif /* XXX */
		{
			pc = dtrace_fuword32((void *)(fp + 4));
			fp = dtrace_fuword32((void *)fp);
		}

		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
	}
#ifdef __amd64
	}
#endif

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
	return;
}
示例#15
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	proc_t *p = curproc;
	thread_t *td = curthread;
	struct reg *tf;
	uintptr_t pc, sp, fp;
	volatile uint16_t *flags =
#if defined(sun)
	   (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#else	   
	   (volatile uint16_t *)&cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_flags;
#endif	
	int n = 0;
	CONTEXT ct;

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (p == NULL || (tf = td->tf) == NULL)
		goto zero;

	*pcstack++ = (uint64_t)p->pid;
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;
		
#ifdef __amd64
	if (p->p_model == DATAMODEL_NATIVE) {	
		winos_reg_to_context(&ct, tf);
		n = winos_unwind_user_stack(&ct, pcstack_limit, (uintptr_t) pcstack);
		pcstack = &pcstack[n];
		pcstack_limit -= n;
	} else {
#endif // i386
	pc = tf->r_rip;
	fp = tf->r_rbp;
	sp = tf->r_rsp;
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		/*
		 * In an entry probe.  The frame pointer has not yet been
		 * pushed (that happens in the function prologue).  The
		 * best approach is to add the current pc as a missing top
		 * of stack and back the pc up to the caller, which is stored
		 * at the current stack pointer address since the call 
		 * instruction puts it there right before the branch.
		 */

		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = dtrace_fuword32((void *) sp);
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

#ifdef __amd64
	}
#endif

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
示例#16
0
static int
dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, uintptr_t pc,
    uintptr_t sp)
{
#ifdef notyet
	proc_t *p = curproc;
	uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack. */
	size_t s1, s2;
#endif
	volatile uint16_t *flags =
#if defined(sun)
	   (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags;
#else	   
	   (volatile uint16_t *)&cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_flags;
#endif	
	int ret = 0;

	ASSERT(pcstack == NULL || pcstack_limit > 0);

#ifdef notyet /* XXX signal stack. */
	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	while (pc != 0) {
		ret++;
		if (pcstack != NULL) {
			*pcstack++ = (uint64_t)pc;
			pcstack_limit--;
			if (pcstack_limit <= 0)
				break;
		}
		if (sp == 0)
			break;
			
#if defined(sun) 	 /* XXX signal stack. */ 
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
				greg32_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} else {
			if (p->p_model == DATAMODEL_NATIVE) {
				struct frame *fr = (struct frame *)sp;

				pc = dtrace_fulword(&fr->fr_savpc);
				sp = dtrace_fulword(&fr->fr_savfp);
			} else {
				struct frame32 *fr = (struct frame32 *)sp;

				pc = dtrace_fuword32(&fr->fr_savpc);
				sp = dtrace_fuword32(&fr->fr_savfp);
			}
		}
#else
		pc = dtrace_fuword32((void *)(sp + 4));
		sp = dtrace_fuword32((void *)sp);
	
#endif 
		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
	}
	return (ret);
}