예제 #1
0
static void
profile_tick(void *arg)
{
	profile_probe_t *prof = arg;

#if defined(__x86_64__)
	x86_saved_state_t *kern_regs = find_kern_regs(current_thread());

	if (NULL != kern_regs) {
		/* Kernel was interrupted. */
		dtrace_probe(prof->prof_id, saved_state64(kern_regs)->isf.rip,  0x0, 0, 0, 0);
	} else {
		pal_register_cache_state(current_thread(), VALID);
		/* Possibly a user interrupt */
		x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)find_user_regs(current_thread());

		if (NULL == tagged_regs) {
			/* Too bad, so sad, no useful interrupt state. */
			dtrace_probe(prof->prof_id, 0xcafebabe,
	    		0x0, 0, 0, 0); /* XXX_BOGUS also see profile_usermode() below. */
		} else if (is_saved_state64(tagged_regs)) {
			x86_saved_state64_t *regs = saved_state64(tagged_regs);

			dtrace_probe(prof->prof_id, 0x0, regs->isf.rip, 0, 0, 0);
		} else {
			x86_saved_state32_t *regs = saved_state32(tagged_regs);

			dtrace_probe(prof->prof_id, 0x0, regs->eip, 0, 0, 0);
		}
	}
#else
#error Unknown architecture
#endif
}
예제 #2
0
파일: fbt.c 프로젝트: coolgoose85/FreeBSD
static int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
{
	solaris_cpu_t *cpu = &solaris_cpu[curcpu];
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			fbt->fbtp_invop_cnt++;
			if (fbt->fbtp_roffset == 0) {
				int i = 0;
				/*
				 * When accessing the arguments on the stack,
				 * we must protect against accessing beyond
				 * the stack.  We can safely set NOFAULT here
				 * -- we know that interrupts are already
				 * disabled.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				cpu->cpu_dtrace_caller = stack[i++];
				stack0 = stack[i++];
				stack1 = stack[i++];
				stack2 = stack[i++];
				stack3 = stack[i++];
				stack4 = stack[i++];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);

				dtrace_probe(fbt->fbtp_id, stack0, stack1,
				    stack2, stack3, stack4);

				cpu->cpu_dtrace_caller = 0;
			} else {
#ifdef __amd64__
				/*
				 * On amd64, we instrument the ret, not the
				 * leave.  We therefore need to set the caller
				 * to assure that the top frame of a stack()
				 * action is correct.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				cpu->cpu_dtrace_caller = stack[0];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);
#endif

				dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
				    rval, 0, 0, 0);
				cpu->cpu_dtrace_caller = 0;
			}

			return (fbt->fbtp_rval);
		}
	}

	return (0);
}
예제 #3
0
/**
 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireKernel}
 */
static DECLCALLBACK(void) vboxDtTOps_ProbeFireKernel(struct VTGPROBELOC *pVtgProbeLoc, uintptr_t uArg0, uintptr_t uArg1, uintptr_t uArg2,
        uintptr_t uArg3, uintptr_t uArg4)
{
    AssertPtrReturnVoid(pVtgProbeLoc);
    LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pVtgProbeLoc, pVtgProbeLoc->idProbe));
    AssertPtrReturnVoid(pVtgProbeLoc->pProbe);
    AssertPtrReturnVoid(pVtgProbeLoc->pszFunction);

    VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireKernel);

    pStackData->u.ProbeFireKernel.pauStackArgs  = &uArg4 + 1;

#if defined(RT_OS_DARWIN) && ARCH_BITS == 32
    /*
     * Convert arguments from uintptr_t to uint64_t.
     */
    PVTGDESCPROBE   pProbe   = (PVTGDESCPROBE)((PVTGPROBELOC)pVtgProbeLoc)->pProbe;
    AssertPtrReturnVoid(pProbe);
    PVTGOBJHDR      pVtgHdr  = (PVTGOBJHDR)((uintptr_t)pProbe + pProbe->offObjHdr);
    AssertPtrReturnVoid(pVtgHdr);
    PVTGDESCARGLIST pArgList = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbe->offArgList);
    AssertPtrReturnVoid(pArgList);
    if (!pArgList->fHaveLargeArgs)
        dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
    else
    {
        uintptr_t *auSrcArgs = &uArg0;
        uint32_t   iSrcArg   = 0;
        uint32_t   iDstArg   = 0;
        uint64_t   au64DstArgs[5];

        while (   iDstArg < RT_ELEMENTS(au64DstArgs)
                  && iSrcArg < pArgList->cArgs)
        {
            au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
            if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
                au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
            iSrcArg++;
            iDstArg++;
        }
        while (iDstArg < RT_ELEMENTS(au64DstArgs))
            au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];

        pStackData->u.ProbeFireKernel.pauStackArgs = &auSrcArgs[iSrcArg];
        dtrace_probe(pVtgProbeLoc->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
    }
#else
    dtrace_probe(pVtgProbeLoc->idProbe, uArg0, uArg1, uArg2, uArg3, uArg4);
#endif

    VBDT_CLEAR_STACK_DATA();
    LOG_DTRACE(("%s: returns\n", __FUNCTION__));
}
예제 #4
0
파일: fbt_isa.c 프로젝트: Lxg1582/freebsd
int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
{
	struct trapframe *frame = (struct trapframe *)stack;
	solaris_cpu_t *cpu = &solaris_cpu[curcpu];
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
	uintptr_t tmp;

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			fbt->fbtp_invop_cnt++;
			if (fbt->fbtp_roffset == 0) {
				cpu->cpu_dtrace_caller = addr;

				dtrace_probe(fbt->fbtp_id, frame->fixreg[3],
				    frame->fixreg[4], frame->fixreg[5],
				    frame->fixreg[6], frame->fixreg[7]);

				cpu->cpu_dtrace_caller = 0;
			} else {

				dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
				    rval, 0, 0, 0);
				/*
				 * The caller doesn't have the fbt item, so
				 * fixup tail calls here.
				 */
				if (fbt->fbtp_rval == DTRACE_INVOP_JUMP) {
					frame->srr0 = (uintptr_t)fbt->fbtp_patchpoint;
					tmp = fbt->fbtp_savedval & FBT_BR_MASK;
					/* Sign extend. */
					if (tmp & 0x02000000)
#ifdef __powerpc64__
						tmp |= 0xfffffffffc000000ULL;
#else
						tmp |= 0xfc000000UL;
#endif
					frame->srr0 += tmp;
				}
				cpu->cpu_dtrace_caller = 0;
			}

			return (fbt->fbtp_rval);
		}
	}

	return (0);
}
예제 #5
0
/*
 * Probe callback function.
 *
 * Note: This function is called for _all_ syscalls, regardless of which sysent
 *       array the syscall comes from. It could be a standard syscall or a
 *       compat syscall from something like Linux.
 */
static void
systrace_probe(u_int32_t id, int sysnum, struct sysent *sysent, void *params)
{
	int		n_args	= 0;
	u_int64_t	uargs[8];

	/*
	 * Check if this syscall has an argument conversion function
	 * registered.
	 */
	if (sysent->sy_systrace_args_func != NULL)
		/*
		 * Convert the syscall parameters using the registered
		 * function.
		 */
		(*sysent->sy_systrace_args_func)(sysnum, params, uargs, &n_args);
	else
		/*
		 * Use the built-in system call argument conversion
		 * function to translate the syscall structure fields
		 * into the array of 64-bit values that DTrace 
		 * expects.
		 */
		systrace_args(sysnum, params, uargs, &n_args);

	/* Process the probe using the converted argments. */
	dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
}
예제 #6
0
파일: fbt_isa.c 프로젝트: Enstone/freebsd
int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
{
	struct trapframe *frame = (struct trapframe *)stack;
	solaris_cpu_t *cpu = &solaris_cpu[curcpu];
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
	register_t fifthparam;

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			cpu->cpu_dtrace_caller = addr;

			/* Get 5th parameter from stack */
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			fifthparam = *(register_t *)frame->tf_usr_sp;
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);

			dtrace_probe(fbt->fbtp_id, frame->tf_r0,
			    frame->tf_r1, frame->tf_r2,
			    frame->tf_r3, fifthparam);

			cpu->cpu_dtrace_caller = 0;

			return (fbt->fbtp_rval | (fbt->fbtp_savedval << DTRACE_INVOP_SHIFT));
		}
	}

	return (0);
}
예제 #7
0
파일: profile.c 프로젝트: Alkzndr/freebsd
static void
profile_tick(void *arg)
{
	profile_probe_t *prof = arg;

	dtrace_probe(prof->prof_id, CPU->cpu_profile_pc,
	    CPU->cpu_profile_upc, 0, 0, 0);
}
예제 #8
0
void
dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
    int fault, int fltoffs, uintptr_t illval)
{

	dtrace_probe(dtrace_probeid_error, (uint64_t)(uintptr_t)state,
	    (uintptr_t)epid,
	    (uintptr_t)which, (uintptr_t)fault, (uintptr_t)fltoffs);
}
예제 #9
0
static void
fasttrap_return_common(struct reg *rp, uintptr_t pc, pid_t pid,
    uintptr_t new_pc)
{
	fasttrap_tracepoint_t *tp;
	fasttrap_bucket_t *bucket;
	fasttrap_id_t *id;
#if defined(sun)
	kmutex_t *pid_mtx;
#endif

#if defined(sun)
	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
	mutex_enter(pid_mtx);
#endif
	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];

	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
		    tp->ftt_proc->ftpc_acount != 0)
			break;
	}

	/*
	 * Don't sweat it if we can't find the tracepoint again; unlike
	 * when we're in fasttrap_pid_probe(), finding the tracepoint here
	 * is not essential to the correct execution of the process.
	 */
	if (tp == NULL) {
#if defined(sun)
		mutex_exit(pid_mtx);
#endif
		return;
	}

	for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
		/*
		 * If there's a branch that could act as a return site, we
		 * need to trace it, and check here if the program counter is
		 * external to the function.
		 */
		if (tp->ftt_type != FASTTRAP_T_RET &&
		    tp->ftt_type != FASTTRAP_T_RET16 &&
		    new_pc - id->fti_probe->ftp_faddr <
		    id->fti_probe->ftp_fsize)
			continue;

		dtrace_probe(id->fti_probe->ftp_id,
		    pc - id->fti_probe->ftp_faddr,
		    rp->r_rax, rp->r_rbx, 0, 0);
	}

#if defined(sun)
	mutex_exit(pid_mtx);
#endif
}
예제 #10
0
파일: dtrace_isa.c 프로젝트: 0xffea/xnu
void
dtrace_probe_error(dtrace_state_t *state, dtrace_epid_t epid, int which,
    int fltoffs, int fault, uint64_t illval)
{
    /*
     * For the case of the error probe firing lets
     * stash away "illval" here, and special-case retrieving it in DIF_VARIABLE_ARG.
     */
    state->dts_arg_error_illval = illval;
    dtrace_probe( dtrace_probeid_error, (uint64_t)(uintptr_t)state, epid, which, fltoffs, fault );
}
예제 #11
0
파일: profile.c 프로젝트: Alkzndr/freebsd
static void
profile_fire(void *arg)
{
	profile_probe_percpu_t *pcpu = arg;
	profile_probe_t *prof = pcpu->profc_probe;
	hrtime_t late;

	late = dtrace_gethrtime() - pcpu->profc_expected;
	pcpu->profc_expected += pcpu->profc_interval;

	dtrace_probe(prof->prof_id, CPU->cpu_profile_pc,
	    CPU->cpu_profile_upc, late, 0, 0);
}
예제 #12
0
/*ARGSUSED*/
static int
sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	int i = 0;
	sdt_probe_t *sdt = sdt_probetab[SDT_ADDR2NDX(addr)];

#ifdef __amd64
	/*
	 * On amd64, stack[0] contains the dereferenced stack pointer,
	 * stack[1] contains savfp, stack[2] contains savpc.  We want
	 * to step over these entries.
	 */
	i += 3;
#endif

	for (; sdt != NULL; sdt = sdt->sdp_hashnext) {
		if ((uintptr_t)sdt->sdp_patchpoint == addr) {
			/***********************************************/
			/*   Dont fire probe if this is unsafe.	       */
			/***********************************************/
			if (!tinfo->t_doprobe)
				return (DTRACE_INVOP_NOP);
			/*
			 * When accessing the arguments on the stack, we must
			 * protect against accessing beyond the stack.  We can
			 * safely set NOFAULT here -- we know that interrupts
			 * are already disabled.
			 */
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			stack0 = stack[i++];
			stack1 = stack[i++];
			stack2 = stack[i++];
			stack3 = stack[i++];
			stack4 = stack[i++];
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
			    CPU_DTRACE_BADADDR);

			dtrace_probe(sdt->sdp_id, stack0, stack1,
			    stack2, stack3, stack4);

			return (DTRACE_INVOP_NOP);
		}
	}

	return (0);
}
예제 #13
0
/*
 * Probe callback function.
 *
 * Note: This function is called for _all_ syscalls, regardless of which sysent
 *       array the syscall comes from. It could be a standard syscall or a
 *       compat syscall from something like Linux.
 */
static void
systrace_probe(struct syscall_args *sa, enum systrace_probe_t type, int retval)
{
	uint64_t uargs[nitems(sa->args)];
	dtrace_id_t id;
	int n_args, sysnum;

	sysnum = sa->code;
	memset(uargs, 0, sizeof(uargs));

	if (type == SYSTRACE_ENTRY) {
		if ((id = sa->callp->sy_entry) == DTRACE_IDNONE)
			return;

		if (sa->callp->sy_systrace_args_func != NULL)
			/*
			 * Convert the syscall parameters using the registered
			 * function.
			 */
			(*sa->callp->sy_systrace_args_func)(sysnum, sa->args,
			    uargs, &n_args);
		else
			/*
			 * Use the built-in system call argument conversion
			 * function to translate the syscall structure fields
			 * into the array of 64-bit values that DTrace expects.
			 */
			systrace_args(sysnum, sa->args, uargs, &n_args);
		/*
		 * Save probe arguments now so that we can retrieve them if
		 * the getargval method is called from further down the stack.
		 */
		curthread->t_dtrace_systrace_args = uargs;
	} else {
		if ((id = sa->callp->sy_return) == DTRACE_IDNONE)
			return;

		curthread->t_dtrace_systrace_args = NULL;
		/* Set arg0 and arg1 as the return value of this syscall. */
		uargs[0] = uargs[1] = retval;
	}

	/* Process the probe using the converted argments. */
	dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
}
예제 #14
0
파일: sdt_x86.c 프로젝트: aglab2/darwin-xnu
/*ARGSUSED*/
int
sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax)
{
#pragma unused(eax)
	sdt_probe_t *sdt = sdt_probetab[SDT_ADDR2NDX(addr)];

	for (; sdt != NULL; sdt = sdt->sdp_hashnext) {
		if ((uintptr_t)sdt->sdp_patchpoint == addr) {
			x86_saved_state64_t *regs = (x86_saved_state64_t *)stack;

			dtrace_probe(sdt->sdp_id, regs->rdi, regs->rsi, regs->rdx, regs->rcx, regs->r8);

			return (DTRACE_INVOP_NOP);
		}
	}

	return (0);
}
예제 #15
0
static long dt_perf_ioctl(struct file *file,
			 unsigned int cmd, unsigned long arg)
{
	unsigned long	i;
	ktime_t		tm0, tm1;

	if (!enabled)
		return -EAGAIN;

	switch (cmd) {
	case _IOW(1, 1, int):
		tm0 = dtrace_gethrtime();
		for (i = 0; i < arg; i++)
		    dtrace_probe(invoke_pid, cmd, arg, 2, 3, 4);

		tm1 = dtrace_gethrtime();
		tm1.tv64 -= tm0.tv64;

		dtrace_probe(result_pid, cmd, arg, tm1.tv64, tm1.tv64 >> 32, 0);
		break;

	case _IOW(1, 2, int): {
		extern void dtrace_sdt_perf(void);

		tm0 = dtrace_gethrtime();
		for (i = 0; i < arg; i++)
		    dtrace_sdt_perf();

		tm1 = dtrace_gethrtime();
		tm1.tv64 -= tm0.tv64;

		dtrace_probe(result_pid, cmd, arg, tm1.tv64, tm1.tv64 >> 32, 0);
		break;
	}

	default:
		return -EINVAL;
	}

	return 0;
}
예제 #16
0
/*
 * Probe callback function.
 *
 * Note: This function is called for _all_ syscalls, regardless of which sysent
 *       array the syscall comes from. It could be a standard syscall or a
 *       compat syscall from something like Linux.
 */
static void
systrace_probe(uint32_t id, register_t sysnum, const struct sysent *se,
    const void *params, const register_t *ret, int error)
{
	size_t		n_args	= 0;
	uintptr_t	uargs[SYS_MAXSYSARGS + 3];

	memset(uargs, 0, sizeof(uargs));
	if (ret == NULL) {
		/* entry syscall, convert params */
		systrace_args(sysnum, params, uargs, &n_args);
	} else {
		/* return syscall, set values and params: */
		uargs[0] = ret[0];
		uargs[1] = ret[1];
		uargs[2] = error;
		systrace_args(sysnum, params, uargs + 3, &n_args);
	}
	/* Process the probe using the converted argments. */
	/* XXX: fix for more arguments! */
	dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
}
예제 #17
0
/*
 * Probe callback function.
 *
 * Note: This function is called for _all_ syscalls, regardless of which sysent
 *       array the syscall comes from. It could be a standard syscall or a
 *       compat syscall from something like Linux.
 */
static void
systrace_probe(u_int32_t id, int sysnum, struct sysent *sysent, void *params,
               int ret)
{
    int		n_args	= 0;
    u_int64_t	uargs[8];

    memset(uargs, 0, sizeof(uargs));
    /*
     * Check if this syscall has an argument conversion function
     * registered.
     */
    if (params && sysent->sy_systrace_args_func != NULL) {
        /*
         * Convert the syscall parameters using the registered
         * function.
         */
        (*sysent->sy_systrace_args_func)(sysnum, params, uargs, &n_args);
    } else if (params) {
        /*
         * Use the built-in system call argument conversion
         * function to translate the syscall structure fields
         * into the array of 64-bit values that DTrace
         * expects.
         */
        systrace_args(sysnum, params, uargs, &n_args);
    } else {
        /*
         * Since params is NULL, this is a 'return' probe.
         * Set arg0 and arg1 as the return value of this syscall.
         */
        uargs[0] = uargs[1] = ret;
    }

    /* Process the probe using the converted argments. */
    dtrace_probe(id, uargs[0], uargs[1], uargs[2], uargs[3], uargs[4]);
}
예제 #18
0
int
fbt_invop(uintptr_t addr, struct trapframe *frame, uintptr_t rval)
{
	solaris_cpu_t *cpu;
	fbt_probe_t *fbt;

	cpu = &solaris_cpu[curcpu];
	fbt = fbt_probetab[FBT_ADDR2NDX(addr)];

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			cpu->cpu_dtrace_caller = addr;

			dtrace_probe(fbt->fbtp_id, frame->a0,
			    frame->a1, frame->a2,
			    frame->a3, frame->a4);

			cpu->cpu_dtrace_caller = 0;
			return (fbt->fbtp_savedval);
		}
	}

	return (0);
}
예제 #19
0
파일: fasttrap_isa.c 프로젝트: argp/xnu
static void
fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_addr_t new_pc)
{
	pid_t pid = p->p_pid;
	fasttrap_tracepoint_t *tp;
	fasttrap_bucket_t *bucket;
	fasttrap_id_t *id;
	lck_mtx_t *pid_mtx;
	int retire_tp = 1;

	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
	lck_mtx_lock(pid_mtx);
	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];

	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
	    	tp->ftt_proc->ftpc_acount != 0)
			break;
	}

	/*
	 * Don't sweat it if we can't find the tracepoint again; unlike
	 * when we're in fasttrap_pid_probe(), finding the tracepoint here
	 * is not essential to the correct execution of the process.
 	 */
	if (tp == NULL) {
		lck_mtx_unlock(pid_mtx);
		return;
	}

	for (id = tp->ftt_retids; id != NULL; id = id->fti_next) {
		fasttrap_probe_t *probe = id->fti_probe;
		/*
		 * If there's a branch that could act as a return site, we
		 * need to trace it, and check here if the program counter is
		 * external to the function.
		 */
		if (tp->ftt_type != FASTTRAP_T_LDM_PC &&
		    tp->ftt_type != FASTTRAP_T_POP_PC &&
		    new_pc - probe->ftp_faddr < probe->ftp_fsize)
			continue;

		if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) {
			uint8_t already_triggered = atomic_or_8(&probe->ftp_triggered, 1);
			if (already_triggered) {
				continue;
			}
		}
		/*
		 * If we have at least one probe associated that
		 * is not a oneshot probe, don't remove the
		 * tracepoint
		 */
		else {
			retire_tp = 0;
		}
#ifndef CONFIG_EMBEDDED
		if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) {
			dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id,
				     1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV);
#else
		if (FALSE) {
#endif
		} else {
			dtrace_probe(id->fti_probe->ftp_id,
				     pc - id->fti_probe->ftp_faddr,
				     regs->r[0], 0, 0, 0);
		}
	}
	if (retire_tp) {
		fasttrap_tracepoint_retire(p, tp);
	}

	lck_mtx_unlock(pid_mtx);
}

static void
fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *regs)
{
	/* TODO: This function isn't implemented yet. In debug mode, panic the system to
	 * find out why we're hitting this point. In other modes, kill the process.
	 */
#if DEBUG
#pragma unused(p,t,addr,arm_saved_state)
	panic("fasttrap: sigsegv not yet implemented");
#else
#pragma unused(p,t,addr)
	/* Kill the process */
	regs->pc = 0;
#endif

#if 0
	proc_lock(p);

	/* Set fault address and mark signal */
	t->uu_code = addr;
	t->uu_siglist |= sigmask(SIGSEGV);

	/* 
	 * XXX These two line may be redundant; if not, then we need
	 * XXX to potentially set the data address in the machine
	 * XXX specific thread state structure to indicate the address.
	 */         
	t->uu_exception = KERN_INVALID_ADDRESS;         /* SIGSEGV */
	t->uu_subcode = 0;      /* XXX pad */
                
	proc_unlock(p); 
                                     
	/* raise signal */
	signal_setast(t->uu_context.vc_thread);
#endif
}
예제 #20
0
int
fasttrap_pid_probe(struct reg *rp)
{
	proc_t *p = curproc;
	uintptr_t pc = rp->r_rip - 1;
	uintptr_t new_pc = 0;
	fasttrap_bucket_t *bucket;
#if defined(sun)
	kmutex_t *pid_mtx;
#endif
	fasttrap_tracepoint_t *tp, tp_local;
	pid_t pid;
	dtrace_icookie_t cookie;
	uint_t is_enabled = 0;

	/*
	 * It's possible that a user (in a veritable orgy of bad planning)
	 * could redirect this thread's flow of control before it reached the
	 * return probe fasttrap. In this case we need to kill the process
	 * since it's in a unrecoverable state.
	 */
	if (curthread->t_dtrace_step) {
		ASSERT(curthread->t_dtrace_on);
		fasttrap_sigtrap(p, curthread, pc);
		return (0);
	}

	/*
	 * Clear all user tracing flags.
	 */
	curthread->t_dtrace_ft = 0;
	curthread->t_dtrace_pc = 0;
	curthread->t_dtrace_npc = 0;
	curthread->t_dtrace_scrpc = 0;
	curthread->t_dtrace_astpc = 0;
#ifdef __amd64
	curthread->t_dtrace_regv = 0;
#endif

#if defined(sun)
	/*
	 * Treat a child created by a call to vfork(2) as if it were its
	 * parent. We know that there's only one thread of control in such a
	 * process: this one.
	 */
	while (p->p_flag & SVFORK) {
		p = p->p_parent;
	}
#endif

	PROC_LOCK(p);
	_PHOLD(p);
	pid = p->p_pid;
#if defined(sun)
	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
	mutex_enter(pid_mtx);
#endif
	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];

	/*
	 * Lookup the tracepoint that the process just hit.
	 */
	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
		    tp->ftt_proc->ftpc_acount != 0)
			break;
	}

	/*
	 * If we couldn't find a matching tracepoint, either a tracepoint has
	 * been inserted without using the pid<pid> ioctl interface (see
	 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
	 */
	if (tp == NULL) {
#if defined(sun)
		mutex_exit(pid_mtx);
#endif
		_PRELE(p);
		PROC_UNLOCK(p);
		return (-1);
	}

	/*
	 * Set the program counter to the address of the traced instruction
	 * so that it looks right in ustack() output.
	 */
	rp->r_rip = pc;

	if (tp->ftt_ids != NULL) {
		fasttrap_id_t *id;

#ifdef __amd64
		if (p->p_model == DATAMODEL_LP64) {
			for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
				fasttrap_probe_t *probe = id->fti_probe;

				if (id->fti_ptype == DTFTP_ENTRY) {
					/*
					 * We note that this was an entry
					 * probe to help ustack() find the
					 * first caller.
					 */
					cookie = dtrace_interrupt_disable();
					DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
					dtrace_probe(probe->ftp_id, rp->r_rdi,
					    rp->r_rsi, rp->r_rdx, rp->r_rcx,
					    rp->r_r8);
					DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
					dtrace_interrupt_enable(cookie);
				} else if (id->fti_ptype == DTFTP_IS_ENABLED) {
					/*
					 * Note that in this case, we don't
					 * call dtrace_probe() since it's only
					 * an artificial probe meant to change
					 * the flow of control so that it
					 * encounters the true probe.
					 */
					is_enabled = 1;
				} else if (probe->ftp_argmap == NULL) {
					dtrace_probe(probe->ftp_id, rp->r_rdi,
					    rp->r_rsi, rp->r_rdx, rp->r_rcx,
					    rp->r_r8);
				} else {
					uintptr_t t[5];

					fasttrap_usdt_args64(probe, rp,
					    sizeof (t) / sizeof (t[0]), t);

					dtrace_probe(probe->ftp_id, t[0], t[1],
					    t[2], t[3], t[4]);
				}
			}
		} else {
#else /* __amd64 */
			uintptr_t s0, s1, s2, s3, s4, s5;
			uint32_t *stack = (uint32_t *)rp->r_esp;

			/*
			 * In 32-bit mode, all arguments are passed on the
			 * stack. If this is a function entry probe, we need
			 * to skip the first entry on the stack as it
			 * represents the return address rather than a
			 * parameter to the function.
			 */
			s0 = fasttrap_fuword32_noerr(&stack[0]);
			s1 = fasttrap_fuword32_noerr(&stack[1]);
			s2 = fasttrap_fuword32_noerr(&stack[2]);
			s3 = fasttrap_fuword32_noerr(&stack[3]);
			s4 = fasttrap_fuword32_noerr(&stack[4]);
			s5 = fasttrap_fuword32_noerr(&stack[5]);

			for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
				fasttrap_probe_t *probe = id->fti_probe;

				if (id->fti_ptype == DTFTP_ENTRY) {
					/*
					 * We note that this was an entry
					 * probe to help ustack() find the
					 * first caller.
					 */
					cookie = dtrace_interrupt_disable();
					DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
					dtrace_probe(probe->ftp_id, s1, s2,
					    s3, s4, s5);
					DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
					dtrace_interrupt_enable(cookie);
				} else if (id->fti_ptype == DTFTP_IS_ENABLED) {
					/*
					 * Note that in this case, we don't
					 * call dtrace_probe() since it's only
					 * an artificial probe meant to change
					 * the flow of control so that it
					 * encounters the true probe.
					 */
					is_enabled = 1;
				} else if (probe->ftp_argmap == NULL) {
					dtrace_probe(probe->ftp_id, s0, s1,
					    s2, s3, s4);
				} else {
					uint32_t t[5];

					fasttrap_usdt_args32(probe, rp,
					    sizeof (t) / sizeof (t[0]), t);

					dtrace_probe(probe->ftp_id, t[0], t[1],
					    t[2], t[3], t[4]);
				}
			}
#endif /* __amd64 */
#ifdef __amd64
		}
#endif
	}

	/*
	 * We're about to do a bunch of work so we cache a local copy of
	 * the tracepoint to emulate the instruction, and then find the
	 * tracepoint again later if we need to light up any return probes.
	 */
	tp_local = *tp;
	PROC_UNLOCK(p);
#if defined(sun)
	mutex_exit(pid_mtx);
#endif
	tp = &tp_local;

	/*
	 * Set the program counter to appear as though the traced instruction
	 * had completely executed. This ensures that fasttrap_getreg() will
	 * report the expected value for REG_RIP.
	 */
	rp->r_rip = pc + tp->ftt_size;

	/*
	 * If there's an is-enabled probe connected to this tracepoint it
	 * means that there was a 'xorl %eax, %eax' or 'xorq %rax, %rax'
	 * instruction that was placed there by DTrace when the binary was
	 * linked. As this probe is, in fact, enabled, we need to stuff 1
	 * into %eax or %rax. Accordingly, we can bypass all the instruction
	 * emulation logic since we know the inevitable result. It's possible
	 * that a user could construct a scenario where the 'is-enabled'
	 * probe was on some other instruction, but that would be a rather
	 * exotic way to shoot oneself in the foot.
	 */
	if (is_enabled) {
		rp->r_rax = 1;
		new_pc = rp->r_rip;
		goto done;
	}

	/*
	 * We emulate certain types of instructions to ensure correctness
	 * (in the case of position dependent instructions) or optimize
	 * common cases. The rest we have the thread execute back in user-
	 * land.
	 */
	switch (tp->ftt_type) {
	case FASTTRAP_T_RET:
	case FASTTRAP_T_RET16:
	{
		uintptr_t dst = 0;
		uintptr_t addr = 0;
		int ret = 0;

		/*
		 * We have to emulate _every_ facet of the behavior of a ret
		 * instruction including what happens if the load from %esp
		 * fails; in that case, we send a SIGSEGV.
		 */
#ifdef __amd64
		if (p->p_model == DATAMODEL_NATIVE) {
			ret = dst = fasttrap_fulword((void *)rp->r_rsp);
			addr = rp->r_rsp + sizeof (uintptr_t);
		} else {
#endif
#ifdef __i386__
			uint32_t dst32;
			ret = dst32 = fasttrap_fuword32((void *)rp->r_esp);
			dst = dst32;
			addr = rp->r_esp + sizeof (uint32_t);
#endif
#ifdef __amd64
		}
#endif

		if (ret == -1) {
			fasttrap_sigsegv(p, curthread, rp->r_rsp);
			new_pc = pc;
			break;
		}

		if (tp->ftt_type == FASTTRAP_T_RET16)
			addr += tp->ftt_dest;

		rp->r_rsp = addr;
		new_pc = dst;
		break;
	}

	case FASTTRAP_T_JCC:
	{
		uint_t taken = 0;

		switch (tp->ftt_code) {
		case FASTTRAP_JO:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) != 0;
			break;
		case FASTTRAP_JNO:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0;
			break;
		case FASTTRAP_JB:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0;
			break;
		case FASTTRAP_JAE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0;
			break;
		case FASTTRAP_JE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0;
			break;
		case FASTTRAP_JNE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0;
			break;
		case FASTTRAP_JBE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0 ||
			    (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0;
			break;
		case FASTTRAP_JA:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0 &&
			    (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0;
			break;
		case FASTTRAP_JS:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) != 0;
			break;
		case FASTTRAP_JNS:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0;
			break;
		case FASTTRAP_JP:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) != 0;
			break;
		case FASTTRAP_JNP:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) == 0;
			break;
		case FASTTRAP_JL:
			taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) !=
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JGE:
			taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) ==
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JLE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 ||
			    ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) !=
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JG:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 &&
			    ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) ==
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;

		}

		if (taken)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_LOOP:
	{
		uint_t taken = 0;
#ifdef __amd64
		greg_t cx = rp->r_rcx--;
#else
		greg_t cx = rp->r_ecx--;
#endif

		switch (tp->ftt_code) {
		case FASTTRAP_LOOPNZ:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 &&
			    cx != 0;
			break;
		case FASTTRAP_LOOPZ:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 &&
			    cx != 0;
			break;
		case FASTTRAP_LOOP:
			taken = (cx != 0);
			break;
		}

		if (taken)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_JCXZ:
	{
#ifdef __amd64
		greg_t cx = rp->r_rcx;
#else
		greg_t cx = rp->r_ecx;
#endif

		if (cx == 0)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_PUSHL_EBP:
	{
		int ret = 0;
		uintptr_t addr = 0;

#ifdef __amd64
		if (p->p_model == DATAMODEL_NATIVE) {
			addr = rp->r_rsp - sizeof (uintptr_t);
			ret = fasttrap_sulword((void *)addr, &rp->r_rsp);
		} else {
#endif
#ifdef __i386__
			addr = rp->r_rsp - sizeof (uint32_t);
			ret = fasttrap_suword32((void *)addr, &rp->r_rsp);
#endif
#ifdef __amd64
		}
#endif

		if (ret == -1) {
			fasttrap_sigsegv(p, curthread, addr);
			new_pc = pc;
			break;
		}

		rp->r_rsp = addr;
		new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_NOP:
		new_pc = pc + tp->ftt_size;
		break;

	case FASTTRAP_T_JMP:
	case FASTTRAP_T_CALL:
		if (tp->ftt_code == 0) {
			new_pc = tp->ftt_dest;
		} else {
#ifdef __amd64
			uintptr_t value;
#endif
			uintptr_t addr = tp->ftt_dest;

			if (tp->ftt_base != FASTTRAP_NOREG)
				addr += fasttrap_getreg(rp, tp->ftt_base);
			if (tp->ftt_index != FASTTRAP_NOREG)
				addr += fasttrap_getreg(rp, tp->ftt_index) <<
				    tp->ftt_scale;

			if (tp->ftt_code == 1) {
				/*
				 * If there's a segment prefix for this
				 * instruction, we'll need to check permissions
				 * and bounds on the given selector, and adjust
				 * the address accordingly.
				 */
				if (tp->ftt_segment != FASTTRAP_SEG_NONE &&
				    fasttrap_do_seg(tp, rp, &addr) != 0) {
					fasttrap_sigsegv(p, curthread, addr);
					new_pc = pc;
					break;
				}

#ifdef __amd64
				if (p->p_model == DATAMODEL_NATIVE) {
					if ((value = fasttrap_fulword((void *)addr))
					     == -1) {
						fasttrap_sigsegv(p, curthread,
						    addr);
						new_pc = pc;
						break;
					}
					new_pc = value;
				} else {
#endif
#ifdef __i386__
					uint32_t value32;
					addr = (uintptr_t)(uint32_t)addr;
					if ((value32 = fasttrap_fuword32((void *)addr))
					    == -1) {
						fasttrap_sigsegv(p, curthread,
						    addr);
						new_pc = pc;
						break;
					}
					new_pc = value32;
#endif
				}
#ifdef __amd64
			} else {
				new_pc = addr;
			}
#endif
		}

		/*
		 * If this is a call instruction, we need to push the return
		 * address onto the stack. If this fails, we send the process
		 * a SIGSEGV and reset the pc to emulate what would happen if
		 * this instruction weren't traced.
		 */
		if (tp->ftt_type == FASTTRAP_T_CALL) {
			int ret = 0;
			uintptr_t addr = 0, pcps;
#ifdef __amd64
			if (p->p_model == DATAMODEL_NATIVE) {
				addr = rp->r_rsp - sizeof (uintptr_t);
				pcps = pc + tp->ftt_size;
				ret = fasttrap_sulword((void *)addr, &pcps);
			} else {
#endif
#ifdef __i386__
				addr = rp->r_rsp - sizeof (uint32_t);
				pcps = (uint32_t)(pc + tp->ftt_size);
				ret = fasttrap_suword32((void *)addr, &pcps);
#endif
#ifdef __amd64
			}
#endif

			if (ret == -1) {
				fasttrap_sigsegv(p, curthread, addr);
				new_pc = pc;
				break;
			}

			rp->r_rsp = addr;
		}

		break;

	case FASTTRAP_T_COMMON:
	{
		uintptr_t addr;
#if defined(__amd64)
		uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 22];
#else
		uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 7];
#endif
		uint_t i = 0;
#if defined(sun)
		klwp_t *lwp = ttolwp(curthread);
#endif

		/*
		 * Compute the address of the ulwp_t and step over the
		 * ul_self pointer. The method used to store the user-land
		 * thread pointer is very different on 32- and 64-bit
		 * kernels.
		 */
#if defined(sun)
#if defined(__amd64)
		if (p->p_model == DATAMODEL_LP64) {
			addr = lwp->lwp_pcb.pcb_fsbase;
			addr += sizeof (void *);
		} else {
			addr = lwp->lwp_pcb.pcb_gsbase;
			addr += sizeof (caddr32_t);
		}
#else
		addr = USD_GETBASE(&lwp->lwp_pcb.pcb_gsdesc);
		addr += sizeof (void *);
#endif
#endif /* sun */
#ifdef __i386__
		addr = USD_GETBASE(&curthread->td_pcb->pcb_gsd);
#else
		addr = curthread->td_pcb->pcb_gsbase;
#endif
		addr += sizeof (void *);

		/*
		 * Generic Instruction Tracing
		 * ---------------------------
		 *
		 * This is the layout of the scratch space in the user-land
		 * thread structure for our generated instructions.
		 *
		 *	32-bit mode			bytes
		 *	------------------------	-----
		 * a:	<original instruction>		<= 15
		 *	jmp	<pc + tp->ftt_size>	    5
		 * b:	<original instruction>		<= 15
		 *	int	T_DTRACE_RET		    2
		 *					-----
		 *					<= 37
		 *
		 *	64-bit mode			bytes
		 *	------------------------	-----
		 * a:	<original instruction>		<= 15
		 *	jmp	0(%rip)			    6
		 *	<pc + tp->ftt_size>		    8
		 * b:	<original instruction>		<= 15
		 * 	int	T_DTRACE_RET		    2
		 * 					-----
		 * 					<= 46
		 *
		 * The %pc is set to a, and curthread->t_dtrace_astpc is set
		 * to b. If we encounter a signal on the way out of the
		 * kernel, trap() will set %pc to curthread->t_dtrace_astpc
		 * so that we execute the original instruction and re-enter
		 * the kernel rather than redirecting to the next instruction.
		 *
		 * If there are return probes (so we know that we're going to
		 * need to reenter the kernel after executing the original
		 * instruction), the scratch space will just contain the
		 * original instruction followed by an interrupt -- the same
		 * data as at b.
		 *
		 * %rip-relative Addressing
		 * ------------------------
		 *
		 * There's a further complication in 64-bit mode due to %rip-
		 * relative addressing. While this is clearly a beneficial
		 * architectural decision for position independent code, it's
		 * hard not to see it as a personal attack against the pid
		 * provider since before there was a relatively small set of
		 * instructions to emulate; with %rip-relative addressing,
		 * almost every instruction can potentially depend on the
		 * address at which it's executed. Rather than emulating
		 * the broad spectrum of instructions that can now be
		 * position dependent, we emulate jumps and others as in
		 * 32-bit mode, and take a different tack for instructions
		 * using %rip-relative addressing.
		 *
		 * For every instruction that uses the ModRM byte, the
		 * in-kernel disassembler reports its location. We use the
		 * ModRM byte to identify that an instruction uses
		 * %rip-relative addressing and to see what other registers
		 * the instruction uses. To emulate those instructions,
		 * we modify the instruction to be %rax-relative rather than
		 * %rip-relative (or %rcx-relative if the instruction uses
		 * %rax; or %r8- or %r9-relative if the REX.B is present so
		 * we don't have to rewrite the REX prefix). We then load
		 * the value that %rip would have been into the scratch
		 * register and generate an instruction to reset the scratch
		 * register back to its original value. The instruction
		 * sequence looks like this:
		 *
		 *	64-mode %rip-relative		bytes
		 *	------------------------	-----
		 * a:	<modified instruction>		<= 15
		 *	movq	$<value>, %<scratch>	    6
		 *	jmp	0(%rip)			    6
		 *	<pc + tp->ftt_size>		    8
		 * b:	<modified instruction>  	<= 15
		 * 	int	T_DTRACE_RET		    2
		 * 					-----
		 *					   52
		 *
		 * We set curthread->t_dtrace_regv so that upon receiving
		 * a signal we can reset the value of the scratch register.
		 */

		ASSERT(tp->ftt_size < FASTTRAP_MAX_INSTR_SIZE);

		curthread->t_dtrace_scrpc = addr;
		bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size);
		i += tp->ftt_size;

#ifdef __amd64
		if (tp->ftt_ripmode != 0) {
			greg_t *reg = NULL;

			ASSERT(p->p_model == DATAMODEL_LP64);
			ASSERT(tp->ftt_ripmode &
			    (FASTTRAP_RIP_1 | FASTTRAP_RIP_2));

			/*
			 * If this was a %rip-relative instruction, we change
			 * it to be either a %rax- or %rcx-relative
			 * instruction (depending on whether those registers
			 * are used as another operand; or %r8- or %r9-
			 * relative depending on the value of REX.B). We then
			 * set that register and generate a movq instruction
			 * to reset the value.
			 */
			if (tp->ftt_ripmode & FASTTRAP_RIP_X)
				scratch[i++] = FASTTRAP_REX(1, 0, 0, 1);
			else
				scratch[i++] = FASTTRAP_REX(1, 0, 0, 0);

			if (tp->ftt_ripmode & FASTTRAP_RIP_1)
				scratch[i++] = FASTTRAP_MOV_EAX;
			else
				scratch[i++] = FASTTRAP_MOV_ECX;

			switch (tp->ftt_ripmode) {
			case FASTTRAP_RIP_1:
				reg = &rp->r_rax;
				curthread->t_dtrace_reg = REG_RAX;
				break;
			case FASTTRAP_RIP_2:
				reg = &rp->r_rcx;
				curthread->t_dtrace_reg = REG_RCX;
				break;
			case FASTTRAP_RIP_1 | FASTTRAP_RIP_X:
				reg = &rp->r_r8;
				curthread->t_dtrace_reg = REG_R8;
				break;
			case FASTTRAP_RIP_2 | FASTTRAP_RIP_X:
				reg = &rp->r_r9;
				curthread->t_dtrace_reg = REG_R9;
				break;
			}

			/* LINTED - alignment */
			*(uint64_t *)&scratch[i] = *reg;
			curthread->t_dtrace_regv = *reg;
			*reg = pc + tp->ftt_size;
			i += sizeof (uint64_t);
		}
#endif

		/*
		 * Generate the branch instruction to what would have
		 * normally been the subsequent instruction. In 32-bit mode,
		 * this is just a relative branch; in 64-bit mode this is a
		 * %rip-relative branch that loads the 64-bit pc value
		 * immediately after the jmp instruction.
		 */
#ifdef __amd64
		if (p->p_model == DATAMODEL_LP64) {
			scratch[i++] = FASTTRAP_GROUP5_OP;
			scratch[i++] = FASTTRAP_MODRM(0, 4, 5);
			/* LINTED - alignment */
			*(uint32_t *)&scratch[i] = 0;
			i += sizeof (uint32_t);
			/* LINTED - alignment */
			*(uint64_t *)&scratch[i] = pc + tp->ftt_size;
			i += sizeof (uint64_t);
		} else {
#endif
#ifdef __i386__
			/*
			 * Set up the jmp to the next instruction; note that
			 * the size of the traced instruction cancels out.
			 */
			scratch[i++] = FASTTRAP_JMP32;
			/* LINTED - alignment */
			*(uint32_t *)&scratch[i] = pc - addr - 5;
			i += sizeof (uint32_t);
#endif
#ifdef __amd64
		}
#endif

		curthread->t_dtrace_astpc = addr + i;
		bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size);
		i += tp->ftt_size;
		scratch[i++] = FASTTRAP_INT;
		scratch[i++] = T_DTRACE_RET;

		ASSERT(i <= sizeof (scratch));

#if defined(sun)
		if (fasttrap_copyout(scratch, (char *)addr, i)) {
#else
		if (uwrite(curproc, scratch, i, addr)) {
#endif
			fasttrap_sigtrap(p, curthread, pc);
			new_pc = pc;
			break;
		}
		if (tp->ftt_retids != NULL) {
			curthread->t_dtrace_step = 1;
			curthread->t_dtrace_ret = 1;
			new_pc = curthread->t_dtrace_astpc;
		} else {
			new_pc = curthread->t_dtrace_scrpc;
		}

		curthread->t_dtrace_pc = pc;
		curthread->t_dtrace_npc = pc + tp->ftt_size;
		curthread->t_dtrace_on = 1;
		break;
	}

	default:
		panic("fasttrap: mishandled an instruction");
	}

done:
	/*
	 * If there were no return probes when we first found the tracepoint,
	 * we should feel no obligation to honor any return probes that were
	 * subsequently enabled -- they'll just have to wait until the next
	 * time around.
	 */
	if (tp->ftt_retids != NULL) {
		/*
		 * We need to wait until the results of the instruction are
		 * apparent before invoking any return probes. If this
		 * instruction was emulated we can just call
		 * fasttrap_return_common(); if it needs to be executed, we
		 * need to wait until the user thread returns to the kernel.
		 */
		if (tp->ftt_type != FASTTRAP_T_COMMON) {
			/*
			 * Set the program counter to the address of the traced
			 * instruction so that it looks right in ustack()
			 * output. We had previously set it to the end of the
			 * instruction to simplify %rip-relative addressing.
			 */
			rp->r_rip = pc;

			fasttrap_return_common(rp, pc, pid, new_pc);
		} else {
			ASSERT(curthread->t_dtrace_ret != 0);
			ASSERT(curthread->t_dtrace_pc == pc);
			ASSERT(curthread->t_dtrace_scrpc != 0);
			ASSERT(new_pc == curthread->t_dtrace_astpc);
		}
	}

	rp->r_rip = new_pc;

	PROC_LOCK(p);
	proc_write_regs(curthread, rp);
	_PRELE(p);
	PROC_UNLOCK(p);

	return (0);
}

int
fasttrap_return_probe(struct reg *rp)
{
	proc_t *p = curproc;
	uintptr_t pc = curthread->t_dtrace_pc;
	uintptr_t npc = curthread->t_dtrace_npc;

	curthread->t_dtrace_pc = 0;
	curthread->t_dtrace_npc = 0;
	curthread->t_dtrace_scrpc = 0;
	curthread->t_dtrace_astpc = 0;

#if defined(sun)
	/*
	 * Treat a child created by a call to vfork(2) as if it were its
	 * parent. We know that there's only one thread of control in such a
	 * process: this one.
	 */
	while (p->p_flag & SVFORK) {
		p = p->p_parent;
	}
#endif

	/*
	 * We set rp->r_rip to the address of the traced instruction so
	 * that it appears to dtrace_probe() that we're on the original
	 * instruction, and so that the user can't easily detect our
	 * complex web of lies. dtrace_return_probe() (our caller)
	 * will correctly set %pc after we return.
	 */
	rp->r_rip = pc;

	fasttrap_return_common(rp, pc, p->p_pid, npc);

	return (0);
}
예제 #21
0
static int
instr_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	instr_probe_t *fbt = instr_probetab[INSTR_ADDR2NDX(addr)];

//HERE();
	for (; fbt != NULL; fbt = fbt->insp_hashnext) {
		if ((uintptr_t)fbt->insp_patchpoint == addr) {
			tinfo->t_opcode = fbt->insp_savedval;
			tinfo->t_inslen = fbt->insp_inslen;
			tinfo->t_modrm = fbt->insp_modrm;
			if (!tinfo->t_doprobe)
				return DTRACE_INVOP_ANY;
			if (fbt->insp_roffset == 0) {
				/*
				 * When accessing the arguments on the stack,
				 * we must protect against accessing beyond
				 * the stack.  We can safely set NOFAULT here
				 * -- we know that interrupts are already
				 * disabled.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = stack[0];
				stack0 = stack[1];
				stack1 = stack[2];
				stack2 = stack[3];
				stack3 = stack[4];
				stack4 = stack[5];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);

				dtrace_probe(fbt->insp_id, stack0, stack1,
				    stack2, stack3, stack4);

				CPU->cpu_dtrace_caller = NULL;
			} else {
#ifdef __amd64
				/*
				 * On amd64, we instrument the ret, not the
				 * leave.  We therefore need to set the caller
				 * to assure that the top frame of a stack()
				 * action is correct.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = stack[0];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);
#endif

				dtrace_probe(fbt->insp_id, fbt->insp_roffset,
				    rval, 0, 0, 0);
				CPU->cpu_dtrace_caller = NULL;
			}

			return DTRACE_INVOP_ANY;
		}
	}
//HERE();

	return (0);
}
예제 #22
0
파일: sdt_linux.c 프로젝트: msabramo/linux
/*ARGSUSED*/
static int
sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	sdt_probe_t *sdt = sdt_probetab[SDT_ADDR2NDX(addr)];
	struct pt_regs *regs;

	for (; sdt != NULL; sdt = sdt->sdp_hashnext) {
//printk("sdt_invop %p %p\n", sdt->sdp_patchpoint, addr);
		if (sdt->sdp_enabled && (uintptr_t)sdt->sdp_patchpoint == addr) {
			tinfo->t_opcode = sdt->sdp_savedval;
			tinfo->t_inslen = sdt->sdp_inslen;
			tinfo->t_modrm = sdt->sdp_modrm;
			/***********************************************/
			/*   Dont fire probe if this is unsafe.	       */
			/***********************************************/
			if (!tinfo->t_doprobe)
				return (DTRACE_INVOP_NOP);
			/*
			 * When accessing the arguments on the stack, we must
			 * protect against accessing beyond the stack.  We can
			 * safely set NOFAULT here -- we know that interrupts
			 * are already disabled.
			 */
			regs = (struct pt_regs *) stack;
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			stack0 = regs->c_arg0;
			stack1 = regs->c_arg1;
			stack2 = regs->c_arg2;
			stack3 = regs->c_arg3;
			stack4 = regs->c_arg4;

			/***********************************************/
			/*   Not  sure  if  this  is re-entrant safe.  */
			/*   Might   need   a   per-cpu   buffer   to  */
			/*   write/read from.			       */
			/***********************************************/

			/***********************************************/
			/*   Dont  do this for the return probe - the  */
			/*   arguments  are  going  to be junk and we  */
			/*   will  hang/panic  the  kernel.  At  some  */
			/*   point  we  need  something better than a  */
			/*   entry/return  indicator -- maybe an enum  */
			/*   type.				       */
			/***********************************************/
			if (sdt->sdp_entry) {
				stack0 = (uintptr_t) create_buf_t((struct file *) stack0, 
					(void *) stack1,  /* uaddr */
					(size_t) stack2,  /* size */
					(long long) stack3 /* offset */);
			}

			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
			    CPU_DTRACE_BADADDR);
//printk("probe %p: %p %p %p %p %p\n", &addr, stack0, stack1, stack2, stack3, stack4);
			dtrace_probe(sdt->sdp_id, stack0, stack0, stack0, 0, 0);
//			dtrace_probe(sdt->sdp_id, stack0, stack1,
//			    stack2, stack3, stack4);

			return (DTRACE_INVOP_NOP);
		}
	}
//printk("none in invop for dsdt\n");

	return (0);
}
예제 #23
0
/**
 * interface_method_impl{SUPDRVTRACERREG,pfnProbeFireUser}
 */
static DECLCALLBACK(void) vboxDtTOps_ProbeFireUser(PCSUPDRVTRACERREG pThis, PSUPDRVSESSION pSession, PCSUPDRVTRACERUSRCTX pCtx,
        PCVTGOBJHDR pVtgHdr, PCVTGPROBELOC pProbeLocRO)
{
    LOG_DTRACE(("%s: %p / %p\n", __FUNCTION__, pCtx, pCtx->idProbe));
    AssertPtrReturnVoid(pProbeLocRO);
    AssertPtrReturnVoid(pVtgHdr);

    VBDT_SETUP_STACK_DATA(kVBoxDtCaller_ProbeFireUser);

    if (pCtx->cBits == 32)
    {
        pStackData->u.ProbeFireUser.pCtx   = pCtx;
        pStackData->u.ProbeFireUser.offArg = 0;

#if ARCH_BITS == 64 || defined(RT_OS_DARWIN)
        /*
         * Combine two 32-bit arguments into one 64-bit argument where needed.
         */
        PVTGDESCPROBE   pProbeDesc = pProbeLocRO->pProbe;
        AssertPtrReturnVoid(pProbeDesc);
        PVTGDESCARGLIST pArgList   = (PVTGDESCARGLIST)((uintptr_t)pVtgHdr + pVtgHdr->offArgLists + pProbeDesc->offArgList);
        AssertPtrReturnVoid(pArgList);

        if (!pArgList->fHaveLargeArgs)
            dtrace_probe(pCtx->idProbe,
                         pCtx->u.X86.aArgs[0],
                         pCtx->u.X86.aArgs[1],
                         pCtx->u.X86.aArgs[2],
                         pCtx->u.X86.aArgs[3],
                         pCtx->u.X86.aArgs[4]);
        else
        {
            uint32_t const *auSrcArgs = &pCtx->u.X86.aArgs[0];
            uint32_t        iSrcArg   = 0;
            uint32_t        iDstArg   = 0;
            uint64_t        au64DstArgs[5];

            while (   iDstArg < RT_ELEMENTS(au64DstArgs)
                      && iSrcArg < pArgList->cArgs)
            {
                au64DstArgs[iDstArg] = auSrcArgs[iSrcArg];
                if (VTG_TYPE_IS_LARGE(pArgList->aArgs[iDstArg].fType))
                    au64DstArgs[iDstArg] |= (uint64_t)auSrcArgs[++iSrcArg] << 32;
                iSrcArg++;
                iDstArg++;
            }
            while (iDstArg < RT_ELEMENTS(au64DstArgs))
                au64DstArgs[iDstArg++] = auSrcArgs[iSrcArg++];

            pStackData->u.ProbeFireUser.offArg = iSrcArg - RT_ELEMENTS(au64DstArgs);
            dtrace_probe(pCtx->idProbe, au64DstArgs[0], au64DstArgs[1], au64DstArgs[2], au64DstArgs[3], au64DstArgs[4]);
        }
#else
        dtrace_probe(pCtx->idProbe,
                     pCtx->u.X86.aArgs[0],
                     pCtx->u.X86.aArgs[1],
                     pCtx->u.X86.aArgs[2],
                     pCtx->u.X86.aArgs[3],
                     pCtx->u.X86.aArgs[4]);
#endif
    }
    else if (pCtx->cBits == 64)
    {
        pStackData->u.ProbeFireUser.pCtx   = pCtx;
        pStackData->u.ProbeFireUser.offArg = 0;
        dtrace_probe(pCtx->idProbe,
                     pCtx->u.Amd64.aArgs[0],
                     pCtx->u.Amd64.aArgs[1],
                     pCtx->u.Amd64.aArgs[2],
                     pCtx->u.Amd64.aArgs[3],
                     pCtx->u.Amd64.aArgs[4]);
    }
    else
        AssertFailed();

    VBDT_CLEAR_STACK_DATA();
    LOG_DTRACE(("%s: returns\n", __FUNCTION__));
}
예제 #24
0
파일: fbt_linux.c 프로젝트: HackLinux/linux
static int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];

//HERE();
//int dtrace_here = 1;
//if (dtrace_here) printk("fbt_invop:addr=%lx stack=%p eax=%lx\n", addr, stack, (long) rval);
	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
//if (dtrace_here) printk("patchpoint: %p rval=%x\n", fbt->fbtp_patchpoint, fbt->fbtp_rval);
		if ((uintptr_t)fbt->fbtp_patchpoint != addr)
			continue;

		/***********************************************/
		/*   If  probe  is  not  enabled,  but  still  */
		/*   fired,  then it *might have* overran. Likely cause is  */
		/*   cpu cache consistency issue - some other  */
		/*   CPU  hasnt  seen  the  breakpoint  being  */
		/*   removed.  Flag  it  so  we  can  show in  */
		/*   /proc/dtrace/fbt.			       */
		/*   Additionally, another provider (eg prov)  */
		/*   is  sitting  on the same function, so we  */
		/*   have to broadcast to both/all providers.  */
		/***********************************************/
		if (!fbt->fbtp_enabled) {
			fbt->fbtp_overrun = TRUE;
		}

		/***********************************************/
		/*   Always  handle  the  probe - if we dont,  */
		/*   nobody  else  will  know what to do with  */
		/*   it. Its possible somebody else does want  */
		/*   it,  e.g.  INSTR,  but we cant know who.  */
		/*   Probably  need  to  call  all providers,  */
		/*   rather than just the first one.	       */
		/***********************************************/
		if (1) {
			tinfo->t_opcode = fbt->fbtp_savedval;
//printk("fbt: opc=%p %p\n", tinfo->t_opcode, fbt->fbtp_savedval);
			tinfo->t_inslen = fbt->fbtp_inslen;
			tinfo->t_modrm = fbt->fbtp_modrm;
			if (!tinfo->t_doprobe)
				return fbt->fbtp_rval;
			fbt->fbtp_fired++;
			if (fbt->fbtp_roffset == 0) {
				struct pt_regs *ptregs = (struct pt_regs *) stack;
				/*
				 * When accessing the arguments on the stack,
				 * we must protect against accessing beyond
				 * the stack.  We can safely set NOFAULT here
				 * -- we know that interrupts are already
				 * disabled.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = ptregs->r_pc;
				stack0 = ptregs->c_arg0;
				stack1 = ptregs->c_arg1;
				stack2 = ptregs->c_arg2;
				stack3 = ptregs->c_arg3;
				stack4 = ptregs->c_arg4;
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);

				dtrace_probe(fbt->fbtp_id, stack0, stack1,
				    stack2, stack3, stack4);

				CPU->cpu_dtrace_caller = NULL;
			} else {
#ifdef __amd64
				/*
				 * On amd64, we instrument the ret, not the
				 * leave.  We therefore need to set the caller
				 * to assure that the top frame of a stack()
				 * action is correct.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = stack[0];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);
#endif

				dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
				    rval, 0, 0, 0);
				CPU->cpu_dtrace_caller = NULL;
			}

			return (fbt->fbtp_rval);
		}
	}
//HERE();

	return (0);
}