Exemple #1
0
static uint64_t
fasttrap_anarg(struct reg *rp, int function_entry, int argno)
{
	uint64_t value = 0;
	int shift = function_entry ? 1 : 0;

#ifdef __amd64
	if (curproc->p_model == DATAMODEL_LP64) {
		uintptr_t *stack;

		/*
		 * In 64-bit mode, the first six arguments are stored in
		 * registers.
		 */
		if (argno < 6)
			return ((&rp->r_rdi)[argno]);

		stack = (uintptr_t *)rp->r_rsp;
		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		value = dtrace_fulword(&stack[argno - 6 + shift]);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
	} else {
#endif
#ifdef __i386
		uint32_t *stack = (uint32_t *)rp->r_esp;
		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		value = dtrace_fuword32(&stack[argno + shift]);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);
#endif
#ifdef __amd64
	}
#endif

	return (value);
}
Exemple #2
0
static int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
{
	solaris_cpu_t *cpu = &solaris_cpu[curcpu];
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			fbt->fbtp_invop_cnt++;
			if (fbt->fbtp_roffset == 0) {
				int i = 0;
				/*
				 * When accessing the arguments on the stack,
				 * we must protect against accessing beyond
				 * the stack.  We can safely set NOFAULT here
				 * -- we know that interrupts are already
				 * disabled.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				cpu->cpu_dtrace_caller = stack[i++];
				stack0 = stack[i++];
				stack1 = stack[i++];
				stack2 = stack[i++];
				stack3 = stack[i++];
				stack4 = stack[i++];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);

				dtrace_probe(fbt->fbtp_id, stack0, stack1,
				    stack2, stack3, stack4);

				cpu->cpu_dtrace_caller = 0;
			} else {
#ifdef __amd64__
				/*
				 * On amd64, we instrument the ret, not the
				 * leave.  We therefore need to set the caller
				 * to assure that the top frame of a stack()
				 * action is correct.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				cpu->cpu_dtrace_caller = stack[0];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);
#endif

				dtrace_probe(fbt->fbtp_id, fbt->fbtp_roffset,
				    rval, 0, 0, 0);
				cpu->cpu_dtrace_caller = 0;
			}

			return (fbt->fbtp_rval);
		}
	}

	return (0);
}
uint64_t
dtrace_getreg(struct regs *savearea, uint_t reg)
{
	ppc_saved_state_t *regs = (ppc_saved_state_t *)savearea;
    uint64_t mask = (_cpu_capabilities & k64Bit) ? 0xffffffffffffffffULL : 0x00000000ffffffffULL;
	
	/* See osfmk/ppc/savearea.h */
	if (reg > 68) { /* beyond mmcr2 */
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}
	
	switch (reg) {
		/* First 38 registers are saved to 64 bits r0-r31, srr0, srr1, xer, lr, ctr, dar. */
		default:
			return (((uint64_t *)(&(regs->save_r0)))[reg]) & mask;

		/* Handle the 32-bit registers */
		case 38: case 39: case 40: case 41: /* cr, dsisr, exception, vrsave */
		case 42: case 43: case 44: case 45: /* vscr[4] */
		case 46: case 47: case 48: case 49:     /* fpscrpad, fpscr, save_1d8[2] */
		case 50: case 51: case 52: case 53: /* save_1E0[8] */
		case 54: case 55: case 56: case 57: 
		case 58: case 59: case 60: case 61: /* save_pmc[8] */
		case 62: case 63: case 64: case 65: 
			return (uint64_t)(((unsigned int *)(&(regs->save_cr)))[reg - 38]);
			
		case 66:
			return regs->save_mmcr0 & mask;
		case 67:
			return regs->save_mmcr1 & mask;
		case 68:
			return regs->save_mmcr2 & mask;
	}
}
Exemple #4
0
int dtrace_getstackdepth(dtrace_mstate_t *mstate, int aframes)
{
	uintptr_t		old = mstate->dtms_scratch_ptr;
	size_t			size;
	struct stacktrace_state	st = {
					NULL,
					NULL,
					0,
					aframes,
					STACKTRACE_KERNEL
				     };

	st.pcs = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
	size = (uintptr_t)st.pcs - mstate->dtms_scratch_ptr +
			  aframes * sizeof(uint64_t);
	if (mstate->dtms_scratch_ptr + size >
	    mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
		return 0;
	}

	dtrace_stacktrace(&st);

	mstate->dtms_scratch_ptr = old;

	return st.depth;
}
Exemple #5
0
int
fbt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval)
{
	struct trapframe *frame = (struct trapframe *)stack;
	solaris_cpu_t *cpu = &solaris_cpu[curcpu];
	fbt_probe_t *fbt = fbt_probetab[FBT_ADDR2NDX(addr)];
	register_t fifthparam;

	for (; fbt != NULL; fbt = fbt->fbtp_hashnext) {
		if ((uintptr_t)fbt->fbtp_patchpoint == addr) {
			cpu->cpu_dtrace_caller = addr;

			/* Get 5th parameter from stack */
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			fifthparam = *(register_t *)frame->tf_usr_sp;
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT | CPU_DTRACE_BADADDR);

			dtrace_probe(fbt->fbtp_id, frame->tf_r0,
			    frame->tf_r1, frame->tf_r2,
			    frame->tf_r3, fifthparam);

			cpu->cpu_dtrace_caller = 0;

			return (fbt->fbtp_rval | (fbt->fbtp_savedval << DTRACE_INVOP_SHIFT));
		}
	}

	return (0);
}
Exemple #6
0
ulong_t
dtrace_getreg(struct trapframe *rp, uint_t reg)
{
	if (reg < 32)
		return (rp->fixreg[reg]);

	switch (reg) {
	case 33:
		return (rp->lr);
	case 34:
		return (rp->cr);
	case 35:
		return (rp->xer);
	case 36:
		return (rp->ctr);
	case 37:
		return (rp->srr0);
	case 38:
		return (rp->srr1);
	case 39:
		return (rp->exc);
	default:
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}
}
Exemple #7
0
uint32_t
dtrace_fuword32(void *uaddr)
{
	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		return (0);
	}
	return (fuword32(uaddr));
}
Exemple #8
0
void
dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
    volatile uint16_t *flags)
{
	if (dtrace_copycheck(uaddr, kaddr, size))
		if (copyin((const void *)uaddr, (void *)kaddr, size)) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		}
}
Exemple #9
0
uint16_t
dtrace_fuword16(void *uaddr)
{
	if ((uintptr_t)uaddr >= (uintptr_t)MM_HIGHEST_USER_ADDRESS || 
	    (uintptr_t)uaddr <= (uintptr_t) MM_LOWEST_USER_ADDRESS ||
	    MmIsAddressValid((PVOID) uaddr) == 0) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
		cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_illval = (uintptr_t)uaddr;
		return (0);
	}
	return (dtrace_fuword16_nocheck(uaddr));
}
Exemple #10
0
static int
dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
{
	size_t i;
	UNREFERENCED_PARAMETER(kaddr);
	ASSERT(kaddr >= (uintptr_t) kernelbase && kaddr + size >= kaddr);

	if (uaddr + size >= (uintptr_t) kernelbase || uaddr + size < uaddr) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
		cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_illval = uaddr;
		return (0);
	}
	for (i = 0; i < size; i++) {
		if (MmIsAddressValid((PVOID) uaddr) == FALSE) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
			cpu_core[KeGetCurrentProcessorNumber()].cpuc_dtrace_illval = uaddr;
			return (0);
		}
		uaddr++;
	}
	return (1);
}
Exemple #11
0
uintptr_t
dtrace_fulword(void *uaddr)
{
	uintptr_t ret = 0;

	if (dtrace_copycheck((uintptr_t)uaddr, (uintptr_t)&ret, sizeof(ret))) {
		if (copyin((const void *)uaddr, (void *)&ret, sizeof(ret))) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		}
	}
	return ret;
}
Exemple #12
0
static int
dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
{
	ASSERT(INKERNEL(kaddr) && kaddr + size >= kaddr);

	if (uaddr + size > VM_MAXUSER_ADDRESS || uaddr + size < uaddr) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
		cpu_core[curcpu].cpuc_dtrace_illval = uaddr;
		return (0);
	}

	return (1);
}
/*ARGSUSED*/
static int
sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	int i = 0;
	sdt_probe_t *sdt = sdt_probetab[SDT_ADDR2NDX(addr)];

#ifdef __amd64
	/*
	 * On amd64, stack[0] contains the dereferenced stack pointer,
	 * stack[1] contains savfp, stack[2] contains savpc.  We want
	 * to step over these entries.
	 */
	i += 3;
#endif

	for (; sdt != NULL; sdt = sdt->sdp_hashnext) {
		if ((uintptr_t)sdt->sdp_patchpoint == addr) {
			/***********************************************/
			/*   Dont fire probe if this is unsafe.	       */
			/***********************************************/
			if (!tinfo->t_doprobe)
				return (DTRACE_INVOP_NOP);
			/*
			 * When accessing the arguments on the stack, we must
			 * protect against accessing beyond the stack.  We can
			 * safely set NOFAULT here -- we know that interrupts
			 * are already disabled.
			 */
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			stack0 = stack[i++];
			stack1 = stack[i++];
			stack2 = stack[i++];
			stack3 = stack[i++];
			stack4 = stack[i++];
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
			    CPU_DTRACE_BADADDR);

			dtrace_probe(sdt->sdp_id, stack0, stack1,
			    stack2, stack3, stack4);

			return (DTRACE_INVOP_NOP);
		}
	}

	return (0);
}
Exemple #14
0
/*
 * The bulk of this function could be replaced to match dtrace_copyinstr() 
 * if we ever implement a copyoutstr().
 */
void
dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
    volatile uint16_t *flags)
{
	size_t len;

	if (dtrace_copycheck(uaddr, kaddr, size)) {
		len = strlen((const char *)kaddr);
		if (len > size)
			len = size;

		if (copyout((const void *)kaddr, (void *)uaddr, len)) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		}
	}
}
Exemple #15
0
void
dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
    volatile uint16_t *flags)
{
	size_t actual;
	int    error;

	if (dtrace_copycheck(uaddr, kaddr, size)) {
		error = copyinstr((const void *)uaddr, (void *)kaddr,
		    size, &actual);
		
		/* ENAMETOOLONG is not a fault condition. */
		if (error && error != ENAMETOOLONG) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
			cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		}
	}
}
Exemple #16
0
static uint64_t
machtrace_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
{
#pragma unused(arg,id,parg,aframes)     /* __APPLE__ */
	uint64_t val = 0;
	syscall_arg_t *stack = (syscall_arg_t *)NULL;

	uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());
	
	if (uthread)
		stack = (syscall_arg_t *)uthread->t_dtrace_syscall_args;
	
	if (!stack)
		return(0);

	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	/* dtrace_probe arguments arg0 .. arg4 are 64bits wide */
	val = (uint64_t)*(stack+argno);	
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
	return (val);
}
Exemple #17
0
static int
dtrace_copycheck(uintptr_t uaddr, uintptr_t kaddr, size_t size)
{

    if (dtrace_here) {
        printk("copycheck: uaddr=%p kaddr=%p size=%d\n", (void *) uaddr, (void*) kaddr, (int) size);
    }
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)
    if (__range_not_ok(uaddr, size)) {
#else
    if (!addr_valid(uaddr) || !addr_valid(uaddr + size)) {
#endif
//printk("uaddr=%p size=%d\n", uaddr, size);
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = uaddr;
        return (0);
    }
    return (1);
}
# endif

void
dtrace_copyin(uintptr_t uaddr, uintptr_t kaddr, size_t size,
              volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyout(uintptr_t kaddr, uintptr_t uaddr, size_t size,
               volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) uaddr, (void *) kaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyinstr(uintptr_t uaddr, uintptr_t kaddr, size_t size,
                 volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

void
dtrace_copyoutstr(uintptr_t kaddr, uintptr_t uaddr, size_t size,
                  volatile uint16_t *flags)
{
    if (dtrace_memcpy_with_error((void *) kaddr, (void *) uaddr, size) == 0) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        return;
    }
}

uint8_t
dtrace_fuword8(void *uaddr)
{
    extern uint8_t dtrace_fuword8_nocheck(void *);
    if (!access_ok(VERIFY_READ, uaddr, 1)) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        printk("dtrace_fuword8: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword8_nocheck(uaddr));
}

uint16_t
dtrace_fuword16(void *uaddr)
{
    extern uint16_t dtrace_fuword16_nocheck(void *);
    if (!access_ok(VERIFY_WRITE, uaddr, 2)) {
        printk("dtrace_fuword16: uaddr=%p CPU_DTRACE_BADADDR\n", uaddr);
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword16_nocheck(uaddr));
}

uint32_t
dtrace_fuword32(void *uaddr)
{
    extern uint32_t dtrace_fuword32_nocheck(void *);
    if (!addr_valid(uaddr)) {
        HERE2();
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword32_nocheck(uaddr));
}

uint64_t
dtrace_fuword64(void *uaddr)
{
    extern uint64_t dtrace_fuword64_nocheck(void *);
    if (!addr_valid(uaddr)) {
        HERE2();
        DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
        cpu_core[cpu_get_id()].cpuc_dtrace_illval = (uintptr_t)uaddr;
        return (0);
    }
    return (dtrace_fuword64_nocheck(uaddr));
}
Exemple #18
0
/*ARGSUSED*/
uint64_t
dtrace_getarg(int arg, int aframes)
{
    uintptr_t val;
    struct frame *fp;
    uintptr_t *stack;
    int i;

#if defined(__amd64)
    /***********************************************/
    /*   First 6 args in a register.	       */
    /***********************************************/
    int inreg = 5;
    /*int inreg = offsetof(struct regs, r_r9) / sizeof (greg_t);*/
#endif

    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    fp = (struct frame *)dtrace_getfp();
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
//printk("dtrace_getarg: fp=%p %p\n", fp, fp->fr_savfp);

//printk("arg=%d aframes=%d\n", arg, aframes);

    for (i = 1; i <= aframes; i++) {
        pc_t savpc;
        DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
        fp = (struct frame *)(fp->fr_savfp);
        savpc = fp->fr_savpc;
        DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
//printk("fp=%p savpc=%p\n", fp, savpc);
        if (savpc == (pc_t)dtrace_invop_callsite) {
#if !defined(__amd64)
            /*
             * If we pass through the invalid op handler, we will
             * use the pointer that it passed to the stack as the
             * second argument to dtrace_invop() as the pointer to
             * the stack.  When using this stack, we must step
             * beyond the EIP/RIP that was pushed when the trap was
             * taken -- hence the "+ 1" below.
             */
            stack = ((uintptr_t **)&fp[1])[1] + 1;
#else
            /*
             * In the case of amd64, we will use the pointer to the
             * regs structure that was pushed when we took the
             * trap.  To get this structure, we must increment
             * beyond the frame structure, and then again beyond
             * the calling RIP stored in dtrace_invop().  If the
             * argument that we're seeking is passed on the stack,
             * we'll pull the true stack pointer out of the saved
             * registers and decrement our argument by the number
             * of arguments passed in registers; if the argument
             * we're seeking is passed in regsiters, we can just
             * load it directly.
             */
            struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
                                              sizeof (uintptr_t));

            if (arg <= inreg) {
                stack = (uintptr_t *)&rp->r_rdi;
            } else {
                stack = (uintptr_t *)(rp->r_rsp);
                arg -= inreg;
            }
#endif
            goto load;
        }

    }

    /*
     * We know that we did not come through a trap to get into
     * dtrace_probe() -- the provider simply called dtrace_probe()
     * directly.  As this is the case, we need to shift the argument
     * that we're looking for:  the probe ID is the first argument to
     * dtrace_probe(), so the argument n will actually be found where
     * one would expect to find argument (n + 1).
     */
    arg++;

#if defined(__amd64)
    if (arg <= inreg) {
        /*
         * This shouldn't happen.  If the argument is passed in a
         * register then it should have been, well, passed in a
         * register...
         */
        DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
        return (0);
    }

    arg -= (inreg + 1);
#endif
    stack = (uintptr_t *)&fp[1];

load:
    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    val = stack[arg];
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
    return (val);
}
Exemple #19
0
ulong_t
dtrace_getreg(struct pt_regs *rp, uint_t reg)
{
#if defined(__amd64)
    int regmap[] = {
        REG_GS,		/* GS */
        REG_FS,		/* FS */
        REG_ES,		/* ES */
        REG_DS,		/* DS */
        REG_RDI,	/* EDI */
        REG_RSI,	/* ESI */
        REG_RBP,	/* EBP */
        REG_RSP,	/* ESP */
        REG_RBX,	/* EBX */
        REG_RDX,	/* EDX */
        REG_RCX,	/* ECX */
        REG_RAX,	/* EAX */
        REG_TRAPNO,	/* TRAPNO */
        REG_ERR,	/* ERR */
        REG_RIP,	/* EIP */
        REG_CS,		/* CS */
        REG_RFL,	/* EFL */
        REG_RSP,	/* UESP */
        REG_SS		/* SS */
    };

    if (reg <= SS) {
        if (reg >= sizeof (regmap) / sizeof (int)) {
            DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
            return (0);
        }

        reg = regmap[reg];
    } else {
        reg -= SS + 1;
    }

    switch (reg) {
    case REG_RDI:
        return (rp->r_rdi);
    case REG_RSI:
        return (rp->r_rsi);
    case REG_RDX:
        return (rp->r_rdx);
    case REG_RCX:
        return (rp->r_rcx);
    case REG_R8:
        return (rp->r_r8);
    case REG_R9:
        return (rp->r_r9);
    case REG_RAX:
        return (rp->r_rax);
    case REG_RBX:
        return (rp->r_rbx);
    case REG_RBP:
        return (rp->r_rbp);
    case REG_R10:
        return (rp->r_r10);
    case REG_R11:
        return (rp->r_r11);
    case REG_R12:
        return (rp->r_r12);
    case REG_R13:
        return (rp->r_r13);
    case REG_R14:
        return (rp->r_r14);
    case REG_R15:
        return (rp->r_r15);
    case REG_DS:
        return (rp->r_ds);
    case REG_ES:
        return (rp->r_es);
    case REG_FS:
        return (rp->r_fs);
    case REG_GS:
        return (rp->r_gs);
    case REG_TRAPNO:
        return (rp->r_trapno);
# if defined(sun)
    case REG_ERR:
        return (rp->r_err);
# endif
    case REG_RIP:
        return (rp->r_rip);
    case REG_CS:
        return (rp->r_cs);
    case REG_SS:
        return (rp->r_ss);
    case REG_RFL:
        return (rp->r_rfl);
    case REG_RSP:
        return (rp->r_rsp);
    default:
        DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
        return (0);
    }

#elif defined(__i386)
    if (reg > SS) {
        DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
        return (0);
    }

    /***********************************************/
    /*   This is likely to be wrong and we should  */
    /*   likely have code like above to avoid the  */
    /*   funnyism of Linux reg layout.	       */
    /***********************************************/
    return ((&rp->r_gs)[reg]);
# elif defined(__arm__)
    return 0;
# else
#	error "dtrace_isa.c: Unsupported CPU architecture"
#endif
}
static int
instr_invop(uintptr_t addr, uintptr_t *stack, uintptr_t rval, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	instr_probe_t *fbt = instr_probetab[INSTR_ADDR2NDX(addr)];

//HERE();
	for (; fbt != NULL; fbt = fbt->insp_hashnext) {
		if ((uintptr_t)fbt->insp_patchpoint == addr) {
			tinfo->t_opcode = fbt->insp_savedval;
			tinfo->t_inslen = fbt->insp_inslen;
			tinfo->t_modrm = fbt->insp_modrm;
			if (!tinfo->t_doprobe)
				return DTRACE_INVOP_ANY;
			if (fbt->insp_roffset == 0) {
				/*
				 * When accessing the arguments on the stack,
				 * we must protect against accessing beyond
				 * the stack.  We can safely set NOFAULT here
				 * -- we know that interrupts are already
				 * disabled.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = stack[0];
				stack0 = stack[1];
				stack1 = stack[2];
				stack2 = stack[3];
				stack3 = stack[4];
				stack4 = stack[5];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);

				dtrace_probe(fbt->insp_id, stack0, stack1,
				    stack2, stack3, stack4);

				CPU->cpu_dtrace_caller = NULL;
			} else {
#ifdef __amd64
				/*
				 * On amd64, we instrument the ret, not the
				 * leave.  We therefore need to set the caller
				 * to assure that the top frame of a stack()
				 * action is correct.
				 */
				DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
				CPU->cpu_dtrace_caller = stack[0];
				DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
				    CPU_DTRACE_BADADDR);
#endif

				dtrace_probe(fbt->insp_id, fbt->insp_roffset,
				    rval, 0, 0, 0);
				CPU->cpu_dtrace_caller = NULL;
			}

			return DTRACE_INVOP_ANY;
		}
	}
//HERE();

	return (0);
}
Exemple #21
0
/*ARGSUSED*/
void
dtrace_getpcstack(pc_t *pcstack, int pcstack_limit, int aframes,
                  uint32_t *ignored)
{
    int	depth;
#if !defined(HAVE_STACKTRACE_OPS)
    int	lim;
    /***********************************************/
    /*   This  is  a basic stack walker - we dont  */
    /*   care  about  omit-frame-pointer,  and we  */
    /*   can  have  false positives. We also dont  */
    /*   handle  exception  stacks properly - but  */
    /*   this  is  for  older  kernels, where the  */
    /*   kernel  wont  help  us,  so they may not  */
    /*   have exception stacks anyhow.	       */
    /***********************************************/

    /***********************************************/
    /*   20121125 Lets use this always - it avoid  */
    /*   kernel  specific  issues in the official  */
    /*   stack  walker and will give us a vehicle  */
    /*   later  for adding reliable vs guess-work  */
    /*   stack entries.			       */
    /***********************************************/
    cpu_core_t	*this_cpu = cpu_get_this();
    struct pt_regs *regs = this_cpu->cpuc_regs;
    struct thread_info *context;
    uintptr_t *sp;
    uintptr_t *spend;

    /***********************************************/
    /*   For   syscalls,  we  will  have  a  null  */
    /*   cpuc_regs,  since  we dont intercept the  */
    /*   trap,   but   instead  intercept  the  C  */
    /*   syscall function.			       */
    /***********************************************/
    if (regs == NULL)
        sp = (uintptr_t *) &depth;
    else
        sp = (uintptr_t *) regs->r_rsp;

    /***********************************************/
    /*   Daisy  chain the interrupt and any other  */
    /*   stacks.  Limit  ourselves in case of bad  */
    /*   corruptions.			       */
    /***********************************************/
    DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
    depth = 0;
    for (lim = 0; lim < 3 && depth < pcstack_limit; lim++) {
        int	ndepth = depth;
        uintptr_t *prev_esp;

        context = (struct thread_info *) ((unsigned long) sp & (~(THREAD_SIZE - 1)));
        spend = (uintptr_t *) ((unsigned long) sp | (THREAD_SIZE - 1));
        for ( ; depth < pcstack_limit && sp < spend; sp++) {
            if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
                goto end_stack;
            if (*sp && is_kernel_text((unsigned long) *sp)) {
                pcstack[depth++] = *sp;
            }
        }
        if (depth >= pcstack_limit || ndepth == depth)
            break;

        prev_esp = (uintptr_t *) ((char *) context + sizeof(struct thread_info));
        if ((sp = prev_esp) == NULL)
            break;
        /***********************************************/
        /*   Special signal to mark the IRQ stack.     */
        /***********************************************/
        if (depth < pcstack_limit) {
            pcstack[depth++] = 1;
        }
    }
end_stack:
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
    DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_FAULT);
#else

    /***********************************************/
    /*   I'm  a  little tired of the kernel dying  */
    /*   in  the  callback, so lets avoid relying  */
    /*   on the kernel stack walker.	       */
    /***********************************************/
    dmutex_enter(&dtrace_stack_mutex);
    g_depth = 0;
    g_pcstack = pcstack;
    g_pcstack_limit = pcstack_limit;

#if FUNC_DUMP_TRACE_ARGS == 6
    dump_trace(NULL, NULL, NULL, 0, &print_trace_ops, NULL);
#else
    dump_trace(NULL, NULL, NULL, &print_trace_ops, NULL);
#endif
    depth = g_depth;
    dmutex_exit(&dtrace_stack_mutex);
#endif

    while (depth < pcstack_limit)
        pcstack[depth++] = (pc_t) NULL;
}
Exemple #22
0
/*
 * Get user stack entries up to the pcstack_limit; return the number of entries
 * acquired.  If pcstack is NULL, return the number of entries potentially
 * acquirable.
 */
unsigned long dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack,
				 int pcstack_limit)
{
	struct task_struct	*p = current;
	struct mm_struct	*mm = p->mm;
	unsigned long		tos, bos, fpc;
	unsigned long		*sp;
	unsigned long		depth = 0;
	struct vm_area_struct	*stack_vma;
	struct page		*stack_page = NULL;
	struct pt_regs		*regs = current_pt_regs();

	if (pcstack) {
		if (unlikely(pcstack_limit < 2)) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
			return 0;
		}
		*pcstack++ = (uint64_t)p->pid;
		*pcstack++ = (uint64_t)p->tgid;
		pcstack_limit -= 2;
	}

	if (!user_mode(regs))
		goto out;

	/*
	 * There is always at least one address to report: the instruction
	 * pointer itself (frame 0).
	 */
	depth++;

	fpc = instruction_pointer(regs);
	if (pcstack) {
		*pcstack++ = (uint64_t)fpc;
		pcstack_limit--;
	}

	/*
	 * We cannot ustack() if this task has no mm, if this task is a kernel
	 * thread, or when someone else has the mmap_sem or the page_table_lock
	 * (because find_user_vma() ultimately does a __get_user_pages() and
	 * thence a follow_page(), which can take that lock).
	 */
	if (mm == NULL || (p->flags & PF_KTHREAD) ||
	    spin_is_locked(&mm->page_table_lock))
		goto out;

	if (!down_read_trylock(&mm->mmap_sem))
		goto out;
	atomic_inc(&mm->mm_users);

	/*
	 * The following construct can be replaced with:
	 * 	tos = current_user_stack_pointer();
	 * once support for 4.0 is no longer necessary.
	 */
#ifdef CONFIG_X86_64
	tos = current_pt_regs()->sp;
#else
	tos = user_stack_pointer(current_pt_regs());
#endif
	stack_vma = find_user_vma(p, mm, NULL, (unsigned long) tos, 0);
	if (!stack_vma ||
	    stack_vma->vm_start > (unsigned long) tos)
		goto unlock_out;

#ifdef CONFIG_STACK_GROWSUP
#error This code does not yet work on STACK_GROWSUP platforms.
#endif
	bos = stack_vma->vm_end;
	if (stack_guard_page_end(stack_vma, bos))
                bos -= PAGE_SIZE;

	/*
	 * If we have a pcstack, loop as long as we are within the stack limit.
	 * Otherwise, loop until we run out of stack.
	 */
	for (sp = (unsigned long *)tos;
	     sp <= (unsigned long *)bos &&
		     ((pcstack && pcstack_limit > 0) ||
		      !pcstack);
	     sp++) {
		struct vm_area_struct	*code_vma;
		unsigned long		addr;

		/*
		 * Recheck for faultedness and pin at page boundaries.
		 */
		if (!stack_page || (((unsigned long)sp & PAGE_MASK) == 0)) {
			if (stack_page) {
				put_page(stack_page);
				stack_page = NULL;
			}

			if (!find_user_vma(p, mm, &stack_page,
					   (unsigned long) sp, 1))
				break;
		}

		DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
		get_user(addr, sp);
		DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);

		if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) {
			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_BADADDR);
			break;
		}

		if (addr == fpc)
			continue;

		code_vma = find_user_vma(p, mm, NULL, addr, 0);

		if (!code_vma || code_vma->vm_start > addr)
			continue;

		if ((addr >= tos && addr <= bos) ||
		    (code_vma->vm_flags & VM_GROWSDOWN)) {
			/* stack address - may need it for the fpstack. */
		} else if (code_vma->vm_flags & VM_EXEC) {
			if (pcstack) {
				*pcstack++ = addr;
				pcstack_limit--;
			}
			depth++;
		}
	}
	if (stack_page != NULL)
		put_page(stack_page);

unlock_out:
	atomic_dec(&mm->mm_users);
	up_read(&mm->mmap_sem);

out:
	if (pcstack)
		while (pcstack_limit--)
			*pcstack++ = 0;

	return depth;
}
Exemple #23
0
int
fasttrap_pid_probe(struct reg *rp)
{
	proc_t *p = curproc;
	uintptr_t pc = rp->r_rip - 1;
	uintptr_t new_pc = 0;
	fasttrap_bucket_t *bucket;
#if defined(sun)
	kmutex_t *pid_mtx;
#endif
	fasttrap_tracepoint_t *tp, tp_local;
	pid_t pid;
	dtrace_icookie_t cookie;
	uint_t is_enabled = 0;

	/*
	 * It's possible that a user (in a veritable orgy of bad planning)
	 * could redirect this thread's flow of control before it reached the
	 * return probe fasttrap. In this case we need to kill the process
	 * since it's in a unrecoverable state.
	 */
	if (curthread->t_dtrace_step) {
		ASSERT(curthread->t_dtrace_on);
		fasttrap_sigtrap(p, curthread, pc);
		return (0);
	}

	/*
	 * Clear all user tracing flags.
	 */
	curthread->t_dtrace_ft = 0;
	curthread->t_dtrace_pc = 0;
	curthread->t_dtrace_npc = 0;
	curthread->t_dtrace_scrpc = 0;
	curthread->t_dtrace_astpc = 0;
#ifdef __amd64
	curthread->t_dtrace_regv = 0;
#endif

#if defined(sun)
	/*
	 * Treat a child created by a call to vfork(2) as if it were its
	 * parent. We know that there's only one thread of control in such a
	 * process: this one.
	 */
	while (p->p_flag & SVFORK) {
		p = p->p_parent;
	}
#endif

	PROC_LOCK(p);
	_PHOLD(p);
	pid = p->p_pid;
#if defined(sun)
	pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock;
	mutex_enter(pid_mtx);
#endif
	bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)];

	/*
	 * Lookup the tracepoint that the process just hit.
	 */
	for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) {
		if (pid == tp->ftt_pid && pc == tp->ftt_pc &&
		    tp->ftt_proc->ftpc_acount != 0)
			break;
	}

	/*
	 * If we couldn't find a matching tracepoint, either a tracepoint has
	 * been inserted without using the pid<pid> ioctl interface (see
	 * fasttrap_ioctl), or somehow we have mislaid this tracepoint.
	 */
	if (tp == NULL) {
#if defined(sun)
		mutex_exit(pid_mtx);
#endif
		_PRELE(p);
		PROC_UNLOCK(p);
		return (-1);
	}

	/*
	 * Set the program counter to the address of the traced instruction
	 * so that it looks right in ustack() output.
	 */
	rp->r_rip = pc;

	if (tp->ftt_ids != NULL) {
		fasttrap_id_t *id;

#ifdef __amd64
		if (p->p_model == DATAMODEL_LP64) {
			for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
				fasttrap_probe_t *probe = id->fti_probe;

				if (id->fti_ptype == DTFTP_ENTRY) {
					/*
					 * We note that this was an entry
					 * probe to help ustack() find the
					 * first caller.
					 */
					cookie = dtrace_interrupt_disable();
					DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
					dtrace_probe(probe->ftp_id, rp->r_rdi,
					    rp->r_rsi, rp->r_rdx, rp->r_rcx,
					    rp->r_r8);
					DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
					dtrace_interrupt_enable(cookie);
				} else if (id->fti_ptype == DTFTP_IS_ENABLED) {
					/*
					 * Note that in this case, we don't
					 * call dtrace_probe() since it's only
					 * an artificial probe meant to change
					 * the flow of control so that it
					 * encounters the true probe.
					 */
					is_enabled = 1;
				} else if (probe->ftp_argmap == NULL) {
					dtrace_probe(probe->ftp_id, rp->r_rdi,
					    rp->r_rsi, rp->r_rdx, rp->r_rcx,
					    rp->r_r8);
				} else {
					uintptr_t t[5];

					fasttrap_usdt_args64(probe, rp,
					    sizeof (t) / sizeof (t[0]), t);

					dtrace_probe(probe->ftp_id, t[0], t[1],
					    t[2], t[3], t[4]);
				}
			}
		} else {
#else /* __amd64 */
			uintptr_t s0, s1, s2, s3, s4, s5;
			uint32_t *stack = (uint32_t *)rp->r_esp;

			/*
			 * In 32-bit mode, all arguments are passed on the
			 * stack. If this is a function entry probe, we need
			 * to skip the first entry on the stack as it
			 * represents the return address rather than a
			 * parameter to the function.
			 */
			s0 = fasttrap_fuword32_noerr(&stack[0]);
			s1 = fasttrap_fuword32_noerr(&stack[1]);
			s2 = fasttrap_fuword32_noerr(&stack[2]);
			s3 = fasttrap_fuword32_noerr(&stack[3]);
			s4 = fasttrap_fuword32_noerr(&stack[4]);
			s5 = fasttrap_fuword32_noerr(&stack[5]);

			for (id = tp->ftt_ids; id != NULL; id = id->fti_next) {
				fasttrap_probe_t *probe = id->fti_probe;

				if (id->fti_ptype == DTFTP_ENTRY) {
					/*
					 * We note that this was an entry
					 * probe to help ustack() find the
					 * first caller.
					 */
					cookie = dtrace_interrupt_disable();
					DTRACE_CPUFLAG_SET(CPU_DTRACE_ENTRY);
					dtrace_probe(probe->ftp_id, s1, s2,
					    s3, s4, s5);
					DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_ENTRY);
					dtrace_interrupt_enable(cookie);
				} else if (id->fti_ptype == DTFTP_IS_ENABLED) {
					/*
					 * Note that in this case, we don't
					 * call dtrace_probe() since it's only
					 * an artificial probe meant to change
					 * the flow of control so that it
					 * encounters the true probe.
					 */
					is_enabled = 1;
				} else if (probe->ftp_argmap == NULL) {
					dtrace_probe(probe->ftp_id, s0, s1,
					    s2, s3, s4);
				} else {
					uint32_t t[5];

					fasttrap_usdt_args32(probe, rp,
					    sizeof (t) / sizeof (t[0]), t);

					dtrace_probe(probe->ftp_id, t[0], t[1],
					    t[2], t[3], t[4]);
				}
			}
#endif /* __amd64 */
#ifdef __amd64
		}
#endif
	}

	/*
	 * We're about to do a bunch of work so we cache a local copy of
	 * the tracepoint to emulate the instruction, and then find the
	 * tracepoint again later if we need to light up any return probes.
	 */
	tp_local = *tp;
	PROC_UNLOCK(p);
#if defined(sun)
	mutex_exit(pid_mtx);
#endif
	tp = &tp_local;

	/*
	 * Set the program counter to appear as though the traced instruction
	 * had completely executed. This ensures that fasttrap_getreg() will
	 * report the expected value for REG_RIP.
	 */
	rp->r_rip = pc + tp->ftt_size;

	/*
	 * If there's an is-enabled probe connected to this tracepoint it
	 * means that there was a 'xorl %eax, %eax' or 'xorq %rax, %rax'
	 * instruction that was placed there by DTrace when the binary was
	 * linked. As this probe is, in fact, enabled, we need to stuff 1
	 * into %eax or %rax. Accordingly, we can bypass all the instruction
	 * emulation logic since we know the inevitable result. It's possible
	 * that a user could construct a scenario where the 'is-enabled'
	 * probe was on some other instruction, but that would be a rather
	 * exotic way to shoot oneself in the foot.
	 */
	if (is_enabled) {
		rp->r_rax = 1;
		new_pc = rp->r_rip;
		goto done;
	}

	/*
	 * We emulate certain types of instructions to ensure correctness
	 * (in the case of position dependent instructions) or optimize
	 * common cases. The rest we have the thread execute back in user-
	 * land.
	 */
	switch (tp->ftt_type) {
	case FASTTRAP_T_RET:
	case FASTTRAP_T_RET16:
	{
		uintptr_t dst = 0;
		uintptr_t addr = 0;
		int ret = 0;

		/*
		 * We have to emulate _every_ facet of the behavior of a ret
		 * instruction including what happens if the load from %esp
		 * fails; in that case, we send a SIGSEGV.
		 */
#ifdef __amd64
		if (p->p_model == DATAMODEL_NATIVE) {
			ret = dst = fasttrap_fulword((void *)rp->r_rsp);
			addr = rp->r_rsp + sizeof (uintptr_t);
		} else {
#endif
#ifdef __i386__
			uint32_t dst32;
			ret = dst32 = fasttrap_fuword32((void *)rp->r_esp);
			dst = dst32;
			addr = rp->r_esp + sizeof (uint32_t);
#endif
#ifdef __amd64
		}
#endif

		if (ret == -1) {
			fasttrap_sigsegv(p, curthread, rp->r_rsp);
			new_pc = pc;
			break;
		}

		if (tp->ftt_type == FASTTRAP_T_RET16)
			addr += tp->ftt_dest;

		rp->r_rsp = addr;
		new_pc = dst;
		break;
	}

	case FASTTRAP_T_JCC:
	{
		uint_t taken = 0;

		switch (tp->ftt_code) {
		case FASTTRAP_JO:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) != 0;
			break;
		case FASTTRAP_JNO:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0;
			break;
		case FASTTRAP_JB:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0;
			break;
		case FASTTRAP_JAE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0;
			break;
		case FASTTRAP_JE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0;
			break;
		case FASTTRAP_JNE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0;
			break;
		case FASTTRAP_JBE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) != 0 ||
			    (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0;
			break;
		case FASTTRAP_JA:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_CF) == 0 &&
			    (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0;
			break;
		case FASTTRAP_JS:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) != 0;
			break;
		case FASTTRAP_JNS:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0;
			break;
		case FASTTRAP_JP:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) != 0;
			break;
		case FASTTRAP_JNP:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_PF) == 0;
			break;
		case FASTTRAP_JL:
			taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) !=
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JGE:
			taken = ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) ==
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JLE:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 ||
			    ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) !=
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;
		case FASTTRAP_JG:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 &&
			    ((rp->r_rflags & FASTTRAP_EFLAGS_SF) == 0) ==
			    ((rp->r_rflags & FASTTRAP_EFLAGS_OF) == 0);
			break;

		}

		if (taken)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_LOOP:
	{
		uint_t taken = 0;
#ifdef __amd64
		greg_t cx = rp->r_rcx--;
#else
		greg_t cx = rp->r_ecx--;
#endif

		switch (tp->ftt_code) {
		case FASTTRAP_LOOPNZ:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) == 0 &&
			    cx != 0;
			break;
		case FASTTRAP_LOOPZ:
			taken = (rp->r_rflags & FASTTRAP_EFLAGS_ZF) != 0 &&
			    cx != 0;
			break;
		case FASTTRAP_LOOP:
			taken = (cx != 0);
			break;
		}

		if (taken)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_JCXZ:
	{
#ifdef __amd64
		greg_t cx = rp->r_rcx;
#else
		greg_t cx = rp->r_ecx;
#endif

		if (cx == 0)
			new_pc = tp->ftt_dest;
		else
			new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_PUSHL_EBP:
	{
		int ret = 0;
		uintptr_t addr = 0;

#ifdef __amd64
		if (p->p_model == DATAMODEL_NATIVE) {
			addr = rp->r_rsp - sizeof (uintptr_t);
			ret = fasttrap_sulword((void *)addr, &rp->r_rsp);
		} else {
#endif
#ifdef __i386__
			addr = rp->r_rsp - sizeof (uint32_t);
			ret = fasttrap_suword32((void *)addr, &rp->r_rsp);
#endif
#ifdef __amd64
		}
#endif

		if (ret == -1) {
			fasttrap_sigsegv(p, curthread, addr);
			new_pc = pc;
			break;
		}

		rp->r_rsp = addr;
		new_pc = pc + tp->ftt_size;
		break;
	}

	case FASTTRAP_T_NOP:
		new_pc = pc + tp->ftt_size;
		break;

	case FASTTRAP_T_JMP:
	case FASTTRAP_T_CALL:
		if (tp->ftt_code == 0) {
			new_pc = tp->ftt_dest;
		} else {
#ifdef __amd64
			uintptr_t value;
#endif
			uintptr_t addr = tp->ftt_dest;

			if (tp->ftt_base != FASTTRAP_NOREG)
				addr += fasttrap_getreg(rp, tp->ftt_base);
			if (tp->ftt_index != FASTTRAP_NOREG)
				addr += fasttrap_getreg(rp, tp->ftt_index) <<
				    tp->ftt_scale;

			if (tp->ftt_code == 1) {
				/*
				 * If there's a segment prefix for this
				 * instruction, we'll need to check permissions
				 * and bounds on the given selector, and adjust
				 * the address accordingly.
				 */
				if (tp->ftt_segment != FASTTRAP_SEG_NONE &&
				    fasttrap_do_seg(tp, rp, &addr) != 0) {
					fasttrap_sigsegv(p, curthread, addr);
					new_pc = pc;
					break;
				}

#ifdef __amd64
				if (p->p_model == DATAMODEL_NATIVE) {
					if ((value = fasttrap_fulword((void *)addr))
					     == -1) {
						fasttrap_sigsegv(p, curthread,
						    addr);
						new_pc = pc;
						break;
					}
					new_pc = value;
				} else {
#endif
#ifdef __i386__
					uint32_t value32;
					addr = (uintptr_t)(uint32_t)addr;
					if ((value32 = fasttrap_fuword32((void *)addr))
					    == -1) {
						fasttrap_sigsegv(p, curthread,
						    addr);
						new_pc = pc;
						break;
					}
					new_pc = value32;
#endif
				}
#ifdef __amd64
			} else {
				new_pc = addr;
			}
#endif
		}

		/*
		 * If this is a call instruction, we need to push the return
		 * address onto the stack. If this fails, we send the process
		 * a SIGSEGV and reset the pc to emulate what would happen if
		 * this instruction weren't traced.
		 */
		if (tp->ftt_type == FASTTRAP_T_CALL) {
			int ret = 0;
			uintptr_t addr = 0, pcps;
#ifdef __amd64
			if (p->p_model == DATAMODEL_NATIVE) {
				addr = rp->r_rsp - sizeof (uintptr_t);
				pcps = pc + tp->ftt_size;
				ret = fasttrap_sulword((void *)addr, &pcps);
			} else {
#endif
#ifdef __i386__
				addr = rp->r_rsp - sizeof (uint32_t);
				pcps = (uint32_t)(pc + tp->ftt_size);
				ret = fasttrap_suword32((void *)addr, &pcps);
#endif
#ifdef __amd64
			}
#endif

			if (ret == -1) {
				fasttrap_sigsegv(p, curthread, addr);
				new_pc = pc;
				break;
			}

			rp->r_rsp = addr;
		}

		break;

	case FASTTRAP_T_COMMON:
	{
		uintptr_t addr;
#if defined(__amd64)
		uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 22];
#else
		uint8_t scratch[2 * FASTTRAP_MAX_INSTR_SIZE + 7];
#endif
		uint_t i = 0;
#if defined(sun)
		klwp_t *lwp = ttolwp(curthread);
#endif

		/*
		 * Compute the address of the ulwp_t and step over the
		 * ul_self pointer. The method used to store the user-land
		 * thread pointer is very different on 32- and 64-bit
		 * kernels.
		 */
#if defined(sun)
#if defined(__amd64)
		if (p->p_model == DATAMODEL_LP64) {
			addr = lwp->lwp_pcb.pcb_fsbase;
			addr += sizeof (void *);
		} else {
			addr = lwp->lwp_pcb.pcb_gsbase;
			addr += sizeof (caddr32_t);
		}
#else
		addr = USD_GETBASE(&lwp->lwp_pcb.pcb_gsdesc);
		addr += sizeof (void *);
#endif
#endif /* sun */
#ifdef __i386__
		addr = USD_GETBASE(&curthread->td_pcb->pcb_gsd);
#else
		addr = curthread->td_pcb->pcb_gsbase;
#endif
		addr += sizeof (void *);

		/*
		 * Generic Instruction Tracing
		 * ---------------------------
		 *
		 * This is the layout of the scratch space in the user-land
		 * thread structure for our generated instructions.
		 *
		 *	32-bit mode			bytes
		 *	------------------------	-----
		 * a:	<original instruction>		<= 15
		 *	jmp	<pc + tp->ftt_size>	    5
		 * b:	<original instruction>		<= 15
		 *	int	T_DTRACE_RET		    2
		 *					-----
		 *					<= 37
		 *
		 *	64-bit mode			bytes
		 *	------------------------	-----
		 * a:	<original instruction>		<= 15
		 *	jmp	0(%rip)			    6
		 *	<pc + tp->ftt_size>		    8
		 * b:	<original instruction>		<= 15
		 * 	int	T_DTRACE_RET		    2
		 * 					-----
		 * 					<= 46
		 *
		 * The %pc is set to a, and curthread->t_dtrace_astpc is set
		 * to b. If we encounter a signal on the way out of the
		 * kernel, trap() will set %pc to curthread->t_dtrace_astpc
		 * so that we execute the original instruction and re-enter
		 * the kernel rather than redirecting to the next instruction.
		 *
		 * If there are return probes (so we know that we're going to
		 * need to reenter the kernel after executing the original
		 * instruction), the scratch space will just contain the
		 * original instruction followed by an interrupt -- the same
		 * data as at b.
		 *
		 * %rip-relative Addressing
		 * ------------------------
		 *
		 * There's a further complication in 64-bit mode due to %rip-
		 * relative addressing. While this is clearly a beneficial
		 * architectural decision for position independent code, it's
		 * hard not to see it as a personal attack against the pid
		 * provider since before there was a relatively small set of
		 * instructions to emulate; with %rip-relative addressing,
		 * almost every instruction can potentially depend on the
		 * address at which it's executed. Rather than emulating
		 * the broad spectrum of instructions that can now be
		 * position dependent, we emulate jumps and others as in
		 * 32-bit mode, and take a different tack for instructions
		 * using %rip-relative addressing.
		 *
		 * For every instruction that uses the ModRM byte, the
		 * in-kernel disassembler reports its location. We use the
		 * ModRM byte to identify that an instruction uses
		 * %rip-relative addressing and to see what other registers
		 * the instruction uses. To emulate those instructions,
		 * we modify the instruction to be %rax-relative rather than
		 * %rip-relative (or %rcx-relative if the instruction uses
		 * %rax; or %r8- or %r9-relative if the REX.B is present so
		 * we don't have to rewrite the REX prefix). We then load
		 * the value that %rip would have been into the scratch
		 * register and generate an instruction to reset the scratch
		 * register back to its original value. The instruction
		 * sequence looks like this:
		 *
		 *	64-mode %rip-relative		bytes
		 *	------------------------	-----
		 * a:	<modified instruction>		<= 15
		 *	movq	$<value>, %<scratch>	    6
		 *	jmp	0(%rip)			    6
		 *	<pc + tp->ftt_size>		    8
		 * b:	<modified instruction>  	<= 15
		 * 	int	T_DTRACE_RET		    2
		 * 					-----
		 *					   52
		 *
		 * We set curthread->t_dtrace_regv so that upon receiving
		 * a signal we can reset the value of the scratch register.
		 */

		ASSERT(tp->ftt_size < FASTTRAP_MAX_INSTR_SIZE);

		curthread->t_dtrace_scrpc = addr;
		bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size);
		i += tp->ftt_size;

#ifdef __amd64
		if (tp->ftt_ripmode != 0) {
			greg_t *reg = NULL;

			ASSERT(p->p_model == DATAMODEL_LP64);
			ASSERT(tp->ftt_ripmode &
			    (FASTTRAP_RIP_1 | FASTTRAP_RIP_2));

			/*
			 * If this was a %rip-relative instruction, we change
			 * it to be either a %rax- or %rcx-relative
			 * instruction (depending on whether those registers
			 * are used as another operand; or %r8- or %r9-
			 * relative depending on the value of REX.B). We then
			 * set that register and generate a movq instruction
			 * to reset the value.
			 */
			if (tp->ftt_ripmode & FASTTRAP_RIP_X)
				scratch[i++] = FASTTRAP_REX(1, 0, 0, 1);
			else
				scratch[i++] = FASTTRAP_REX(1, 0, 0, 0);

			if (tp->ftt_ripmode & FASTTRAP_RIP_1)
				scratch[i++] = FASTTRAP_MOV_EAX;
			else
				scratch[i++] = FASTTRAP_MOV_ECX;

			switch (tp->ftt_ripmode) {
			case FASTTRAP_RIP_1:
				reg = &rp->r_rax;
				curthread->t_dtrace_reg = REG_RAX;
				break;
			case FASTTRAP_RIP_2:
				reg = &rp->r_rcx;
				curthread->t_dtrace_reg = REG_RCX;
				break;
			case FASTTRAP_RIP_1 | FASTTRAP_RIP_X:
				reg = &rp->r_r8;
				curthread->t_dtrace_reg = REG_R8;
				break;
			case FASTTRAP_RIP_2 | FASTTRAP_RIP_X:
				reg = &rp->r_r9;
				curthread->t_dtrace_reg = REG_R9;
				break;
			}

			/* LINTED - alignment */
			*(uint64_t *)&scratch[i] = *reg;
			curthread->t_dtrace_regv = *reg;
			*reg = pc + tp->ftt_size;
			i += sizeof (uint64_t);
		}
#endif

		/*
		 * Generate the branch instruction to what would have
		 * normally been the subsequent instruction. In 32-bit mode,
		 * this is just a relative branch; in 64-bit mode this is a
		 * %rip-relative branch that loads the 64-bit pc value
		 * immediately after the jmp instruction.
		 */
#ifdef __amd64
		if (p->p_model == DATAMODEL_LP64) {
			scratch[i++] = FASTTRAP_GROUP5_OP;
			scratch[i++] = FASTTRAP_MODRM(0, 4, 5);
			/* LINTED - alignment */
			*(uint32_t *)&scratch[i] = 0;
			i += sizeof (uint32_t);
			/* LINTED - alignment */
			*(uint64_t *)&scratch[i] = pc + tp->ftt_size;
			i += sizeof (uint64_t);
		} else {
#endif
#ifdef __i386__
			/*
			 * Set up the jmp to the next instruction; note that
			 * the size of the traced instruction cancels out.
			 */
			scratch[i++] = FASTTRAP_JMP32;
			/* LINTED - alignment */
			*(uint32_t *)&scratch[i] = pc - addr - 5;
			i += sizeof (uint32_t);
#endif
#ifdef __amd64
		}
#endif

		curthread->t_dtrace_astpc = addr + i;
		bcopy(tp->ftt_instr, &scratch[i], tp->ftt_size);
		i += tp->ftt_size;
		scratch[i++] = FASTTRAP_INT;
		scratch[i++] = T_DTRACE_RET;

		ASSERT(i <= sizeof (scratch));

#if defined(sun)
		if (fasttrap_copyout(scratch, (char *)addr, i)) {
#else
		if (uwrite(curproc, scratch, i, addr)) {
#endif
			fasttrap_sigtrap(p, curthread, pc);
			new_pc = pc;
			break;
		}
		if (tp->ftt_retids != NULL) {
			curthread->t_dtrace_step = 1;
			curthread->t_dtrace_ret = 1;
			new_pc = curthread->t_dtrace_astpc;
		} else {
			new_pc = curthread->t_dtrace_scrpc;
		}

		curthread->t_dtrace_pc = pc;
		curthread->t_dtrace_npc = pc + tp->ftt_size;
		curthread->t_dtrace_on = 1;
		break;
	}

	default:
		panic("fasttrap: mishandled an instruction");
	}

done:
	/*
	 * If there were no return probes when we first found the tracepoint,
	 * we should feel no obligation to honor any return probes that were
	 * subsequently enabled -- they'll just have to wait until the next
	 * time around.
	 */
	if (tp->ftt_retids != NULL) {
		/*
		 * We need to wait until the results of the instruction are
		 * apparent before invoking any return probes. If this
		 * instruction was emulated we can just call
		 * fasttrap_return_common(); if it needs to be executed, we
		 * need to wait until the user thread returns to the kernel.
		 */
		if (tp->ftt_type != FASTTRAP_T_COMMON) {
			/*
			 * Set the program counter to the address of the traced
			 * instruction so that it looks right in ustack()
			 * output. We had previously set it to the end of the
			 * instruction to simplify %rip-relative addressing.
			 */
			rp->r_rip = pc;

			fasttrap_return_common(rp, pc, pid, new_pc);
		} else {
			ASSERT(curthread->t_dtrace_ret != 0);
			ASSERT(curthread->t_dtrace_pc == pc);
			ASSERT(curthread->t_dtrace_scrpc != 0);
			ASSERT(new_pc == curthread->t_dtrace_astpc);
		}
	}

	rp->r_rip = new_pc;

	PROC_LOCK(p);
	proc_write_regs(curthread, rp);
	_PRELE(p);
	PROC_UNLOCK(p);

	return (0);
}

int
fasttrap_return_probe(struct reg *rp)
{
	proc_t *p = curproc;
	uintptr_t pc = curthread->t_dtrace_pc;
	uintptr_t npc = curthread->t_dtrace_npc;

	curthread->t_dtrace_pc = 0;
	curthread->t_dtrace_npc = 0;
	curthread->t_dtrace_scrpc = 0;
	curthread->t_dtrace_astpc = 0;

#if defined(sun)
	/*
	 * Treat a child created by a call to vfork(2) as if it were its
	 * parent. We know that there's only one thread of control in such a
	 * process: this one.
	 */
	while (p->p_flag & SVFORK) {
		p = p->p_parent;
	}
#endif

	/*
	 * We set rp->r_rip to the address of the traced instruction so
	 * that it appears to dtrace_probe() that we're on the original
	 * instruction, and so that the user can't easily detect our
	 * complex web of lies. dtrace_return_probe() (our caller)
	 * will correctly set %pc after we return.
	 */
	rp->r_rip = pc;

	fasttrap_return_common(rp, pc, p->p_pid, npc);

	return (0);
}
/*ARGSUSED*/
uint64_t
sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
{
	uintptr_t val;
//	struct frame *fp = (struct frame *)dtrace_getfp();
	uintptr_t *stack = NULL;
//	int i;
#if defined(__amd64)
	/*
	 * A total of 6 arguments are passed via registers; any argument with
	 * index of 5 or lower is therefore in a register.
	 */
//	int inreg = 5;
#endif

# if defined(TODOxxx)
	// TODO ... we dont have the struct frame in scope... disable for now
	for (i = 1; i <= aframes; i++) {
		fp = (struct frame *)(fp->fr_savfp);

		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
#if !defined(__amd64)
			/*
			 * If we pass through the invalid op handler, we will
			 * use the pointer that it passed to the stack as the
			 * second argument to dtrace_invop() as the pointer to
			 * the stack.
			 */
			stack = ((uintptr_t **)&fp[1])[1];
#else
			/*
			 * In the case of amd64, we will use the pointer to the
			 * regs structure that was pushed when we took the
			 * trap.  To get this structure, we must increment
			 * beyond the frame structure.  If the argument that
			 * we're seeking is passed on the stack, we'll pull
			 * the true stack pointer out of the saved registers
			 * and decrement our argument by the number of
			 * arguments passed in registers; if the argument
			 * we're seeking is passed in regsiters, we can just
			 * load it directly.
			 */
			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
			    sizeof (uintptr_t));

			if (argno <= inreg) {
				stack = (uintptr_t *)&rp->r_rdi;
			} else {
				stack = (uintptr_t *)(rp->r_rsp);
				argno -= (inreg + 1);
			}
#endif
			goto load;
		}
	}

	/*
	 * We know that we did not come through a trap to get into
	 * dtrace_probe() -- the provider simply called dtrace_probe()
	 * directly.  As this is the case, we need to shift the argument
	 * that we're looking for:  the probe ID is the first argument to
	 * dtrace_probe(), so the argument n will actually be found where
	 * one would expect to find argument (n + 1).
	 */
	argno++;

#if defined(__amd64)
	if (argno <= inreg) {
		/*
		 * This shouldn't happen.  If the argument is passed in a
		 * register then it should have been, well, passed in a
		 * register...
		 */
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}

	argno -= (inreg + 1);
#endif
	stack = (uintptr_t *)&fp[1];

load:
#endif /* TODOxxx*/
	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	val = stack[argno];
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);

	return (val);
}
Exemple #25
0
uint64_t
dtrace_getarg(int arg, int aframes)
{
	uint64_t val;
	struct frame *fp = (struct frame *)__builtin_frame_address(0);
	uintptr_t *stack;
	uintptr_t pc;
	int i;


    /*
     * A total of 6 arguments are passed via registers; any argument with
     * index of 5 or lower is therefore in a register.
     */
    int inreg = 5;

	for (i = 1; i <= aframes; i++) {
		fp = fp->backchain;
		pc = fp->retaddr;

		if (dtrace_invop_callsite_pre != NULL
			&& pc  >  (uintptr_t)dtrace_invop_callsite_pre
			&& pc  <= (uintptr_t)dtrace_invop_callsite_post) {
			/*
			 * In the case of x86_64, we will use the pointer to the
			 * save area structure that was pushed when we took the
			 * trap.  To get this structure, we must increment
			 * beyond the frame structure. If the
			 * argument that we're seeking is passed on the stack,
			 * we'll pull the true stack pointer out of the saved
			 * registers and decrement our argument by the number
			 * of arguments passed in registers; if the argument
			 * we're seeking is passed in regsiters, we can just
			 * load it directly.
			 */

			/* fp points to frame of dtrace_invop() activation. */
			fp = fp->backchain; /* to fbt_perfcallback() activation. */
			fp = fp->backchain; /* to kernel_trap() activation. */
			fp = fp->backchain; /* to trap_from_kernel() activation. */
			
			x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)&fp[1];
			x86_saved_state64_t *saved_state = saved_state64(tagged_regs);

			if (arg <= inreg) {
				stack = (uintptr_t *)&saved_state->rdi;
			} else {
				fp = (struct frame *)(saved_state->isf.rsp);
				stack = (uintptr_t *)&fp[1]; /* Find marshalled
								arguments */
				arg -= inreg + 1;
			}
			goto load;
		}
	}

	/*
	 * We know that we did not come through a trap to get into
	 * dtrace_probe() --  We arrive here when the provider has
	 * called dtrace_probe() directly.
	 * The probe ID is the first argument to dtrace_probe().
	 * We must advance beyond that to get the argX.
	 */
	arg++; /* Advance past probeID */

	if (arg <= inreg) {
		/*
		 * This shouldn't happen.  If the argument is passed in a
		 * register then it should have been, well, passed in a
		 * register...
		 */
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}

	arg -= (inreg + 1);
	stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */

load:
	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	/* dtrace_probe arguments arg0 ... arg4 are 64bits wide */
	val = (uint64_t)(*(((uintptr_t *)stack) + arg));
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);

	return (val);
}
Exemple #26
0
/*ARGSUSED*/
uint64_t
sdt_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
{
	uintptr_t val;
	struct frame *fp = (struct frame *)dtrace_getfp();
	uintptr_t *stack = NULL;
	int i;
#if defined(__amd64)
	/*
	 * A total of 6 arguments are passed via registers; any argument with
	 * index of 5 or lower is therefore in a register.
	 */
	int inreg = 5;
#endif

/*	for (i = 1; i <= aframes+2; i++) {
		fp = (struct frame *)(fp->fr_savfp);
printk("stk %d: %p: %p %p %p %p\n%p %p %p\n", i, fp, ((long *) fp)[0], ((long *) fp)[1], ((long *) fp)[2], ((long *) fp)[3], 
((long *) fp)[4],
((long *) fp)[5],
((long *) fp)[6],
((long *) fp)[7]
);
	}
fp = dtrace_getfp();
*/

	for (i = 1; i <= aframes; i++) {
//printk("i=%d fp=%p aframes=%d pc:%p\n", i, fp, aframes, fp->fr_savpc);
		fp = (struct frame *)(fp->fr_savfp);
#if defined(linux)
		/***********************************************/
		/*   Temporary hack - not sure which stack we  */
		/*   have here and it is faultiing us.	       */
		/***********************************************/
		if (fp == NULL)
			return 0;
#endif

		if (fp->fr_savpc == (pc_t)dtrace_invop_callsite) {
#if !defined(__amd64)
			/*
			 * If we pass through the invalid op handler, we will
			 * use the pointer that it passed to the stack as the
			 * second argument to dtrace_invop() as the pointer to
			 * the stack.
			 */
			stack = ((uintptr_t **)&fp[1])[1];
#else
			/*
			 * In the case of amd64, we will use the pointer to the
			 * regs structure that was pushed when we took the
			 * trap.  To get this structure, we must increment
			 * beyond the frame structure.  If the argument that
			 * we're seeking is passed on the stack, we'll pull
			 * the true stack pointer out of the saved registers
			 * and decrement our argument by the number of
			 * arguments passed in registers; if the argument
			 * we're seeking is passed in regsiters, we can just
			 * load it directly.
			 */
			struct regs *rp = (struct regs *)((uintptr_t)&fp[1] +
			    sizeof (uintptr_t));

			if (argno <= inreg) {
				stack = (uintptr_t *)&rp->r_rdi;
			} else {
				stack = (uintptr_t *)(rp->r_rsp);
				argno -= (inreg + 1);
			}
#endif
			goto load;
		}
	}
//printk("stack %d: %p %p %p %p\n", i, ((long *) fp)[0], ((long *) fp)[1], ((long *) fp)[2], ((long *) fp)[3], ((long *) fp)[4]);

	/*
	 * We know that we did not come through a trap to get into
	 * dtrace_probe() -- the provider simply called dtrace_probe()
	 * directly.  As this is the case, we need to shift the argument
	 * that we're looking for:  the probe ID is the first argument to
	 * dtrace_probe(), so the argument n will actually be found where
	 * one would expect to find argument (n + 1).
	 */
	argno++;

#if defined(__amd64)
	if (argno <= inreg) {
		/*
		 * This shouldn't happen.  If the argument is passed in a
		 * register then it should have been, well, passed in a
		 * register...
		 */
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}

	argno -= (inreg + 1);
#endif
	stack = (uintptr_t *)&fp[1];

load:
	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	val = stack[argno];
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
printk("sdt_getarg#%d: %lx aframes=%d\n", argno, val, aframes);

	return (val);
}
Exemple #27
0
/*ARGSUSED*/
static int
sdt_invop(uintptr_t addr, uintptr_t *stack, uintptr_t eax, trap_instr_t *tinfo)
{
	uintptr_t stack0, stack1, stack2, stack3, stack4;
	sdt_probe_t *sdt = sdt_probetab[SDT_ADDR2NDX(addr)];
	struct pt_regs *regs;

	for (; sdt != NULL; sdt = sdt->sdp_hashnext) {
//printk("sdt_invop %p %p\n", sdt->sdp_patchpoint, addr);
		if (sdt->sdp_enabled && (uintptr_t)sdt->sdp_patchpoint == addr) {
			tinfo->t_opcode = sdt->sdp_savedval;
			tinfo->t_inslen = sdt->sdp_inslen;
			tinfo->t_modrm = sdt->sdp_modrm;
			/***********************************************/
			/*   Dont fire probe if this is unsafe.	       */
			/***********************************************/
			if (!tinfo->t_doprobe)
				return (DTRACE_INVOP_NOP);
			/*
			 * When accessing the arguments on the stack, we must
			 * protect against accessing beyond the stack.  We can
			 * safely set NOFAULT here -- we know that interrupts
			 * are already disabled.
			 */
			regs = (struct pt_regs *) stack;
			DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
			stack0 = regs->c_arg0;
			stack1 = regs->c_arg1;
			stack2 = regs->c_arg2;
			stack3 = regs->c_arg3;
			stack4 = regs->c_arg4;

			/***********************************************/
			/*   Not  sure  if  this  is re-entrant safe.  */
			/*   Might   need   a   per-cpu   buffer   to  */
			/*   write/read from.			       */
			/***********************************************/

			/***********************************************/
			/*   Dont  do this for the return probe - the  */
			/*   arguments  are  going  to be junk and we  */
			/*   will  hang/panic  the  kernel.  At  some  */
			/*   point  we  need  something better than a  */
			/*   entry/return  indicator -- maybe an enum  */
			/*   type.				       */
			/***********************************************/
			if (sdt->sdp_entry) {
				stack0 = (uintptr_t) create_buf_t((struct file *) stack0, 
					(void *) stack1,  /* uaddr */
					(size_t) stack2,  /* size */
					(long long) stack3 /* offset */);
			}

			DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT |
			    CPU_DTRACE_BADADDR);
//printk("probe %p: %p %p %p %p %p\n", &addr, stack0, stack1, stack2, stack3, stack4);
			dtrace_probe(sdt->sdp_id, stack0, stack0, stack0, 0, 0);
//			dtrace_probe(sdt->sdp_id, stack0, stack1,
//			    stack2, stack3, stack4);

			return (DTRACE_INVOP_NOP);
		}
	}
//printk("none in invop for dsdt\n");

	return (0);
}
Exemple #28
0
uint64_t
dtrace_getarg(int arg, int aframes)
{
	uint64_t val;
	struct frame *fp = (struct frame *)__builtin_frame_address(0);
	uintptr_t *stack;
	uintptr_t pc;
	int i;


#if defined(__x86_64__)
    /*
     * A total of 6 arguments are passed via registers; any argument with
     * index of 5 or lower is therefore in a register.
     */
    int inreg = 5;
#endif

	for (i = 1; i <= aframes; i++) {
		fp = fp->backchain;
		pc = fp->retaddr;

		if (pc  == (uintptr_t)dtrace_invop_callsite) {
#if defined(__i386__)
			/*
			 * If we pass through the invalid op handler, we will
			 * use the pointer that it passed to the stack as the
			 * second argument to dtrace_invop() as the pointer to
			 * the frame we're hunting for.
			 */

			stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
			fp = (struct frame *)stack[1]; /* Grab *second* argument */
			stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */
#elif defined(__x86_64__)
			/*
			 * In the case of x86_64, we will use the pointer to the
			 * save area structure that was pushed when we took the
			 * trap.  To get this structure, we must increment
			 * beyond the frame structure. If the
			 * argument that we're seeking is passed on the stack,
			 * we'll pull the true stack pointer out of the saved
			 * registers and decrement our argument by the number
			 * of arguments passed in registers; if the argument
			 * we're seeking is passed in regsiters, we can just
			 * load it directly.
			 */

			/* fp points to frame of dtrace_invop() activation. */
			fp = fp->backchain; /* to fbt_perfcallback() activation. */
			fp = fp->backchain; /* to kernel_trap() activation. */
			fp = fp->backchain; /* to trap_from_kernel() activation. */
			
			x86_saved_state_t   *tagged_regs = (x86_saved_state_t *)&fp[1];
			x86_saved_state64_t *saved_state = saved_state64(tagged_regs);

			if (arg <= inreg) {
				stack = (uintptr_t *)&saved_state->rdi;
			} else {
				stack = (uintptr_t *)(saved_state->isf.rsp);
				arg -= inreg;
			}
#else
#error Unknown arch
#endif
			goto load;
		}
	}

	/*
	 * Arrive here when provider has called dtrace_probe directly.
	 */
	arg++; /* Advance past probeID */

#if defined(__x86_64__)
	if (arg <= inreg) {
		/*
		 * This shouldn't happen.  If the argument is passed in a
		 * register then it should have been, well, passed in a
		 * register...
		 */
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	}

	arg -= (inreg + 1);
#endif
	stack = (uintptr_t *)&fp[1]; /* Find marshalled arguments */

load:
	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	val = *(((uint64_t *)stack) + arg); /* dtrace_probe arguments arg0 .. arg4 are 64bits wide */
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);

	return (val);
}
Exemple #29
0
uint64_t
dtrace_getreg(struct regs *savearea, uint_t reg)
{
	boolean_t is64Bit = proc_is64bit(current_proc());
	x86_saved_state_t *regs = (x86_saved_state_t *)savearea;

	if (is64Bit) {
	    if (reg <= SS) {
		reg = regmap[reg];
	    } else {
		reg -= (SS + 1);
	    }

	    switch (reg) {
	    case REG_RDI:
		return (uint64_t)(regs->ss_64.rdi);
	    case REG_RSI:
		return (uint64_t)(regs->ss_64.rsi);
	    case REG_RDX:
		return (uint64_t)(regs->ss_64.rdx);
	    case REG_RCX:
		return (uint64_t)(regs->ss_64.rcx);
	    case REG_R8:
		return (uint64_t)(regs->ss_64.r8);
	    case REG_R9:
		return (uint64_t)(regs->ss_64.r9);
	    case REG_RAX:
		return (uint64_t)(regs->ss_64.rax);
	    case REG_RBX:
		return (uint64_t)(regs->ss_64.rbx);
	    case REG_RBP:
		return (uint64_t)(regs->ss_64.rbp);
	    case REG_R10:
		return (uint64_t)(regs->ss_64.r10);
	    case REG_R11:
		return (uint64_t)(regs->ss_64.r11);
	    case REG_R12:
		return (uint64_t)(regs->ss_64.r12);
	    case REG_R13:
		return (uint64_t)(regs->ss_64.r13);
	    case REG_R14:
		return (uint64_t)(regs->ss_64.r14);
	    case REG_R15:
		return (uint64_t)(regs->ss_64.r15);
	    case REG_FS:
		return (uint64_t)(regs->ss_64.fs);
	    case REG_GS:
		return (uint64_t)(regs->ss_64.gs);
	    case REG_TRAPNO:
		return (uint64_t)(regs->ss_64.isf.trapno);
	    case REG_ERR:
		return (uint64_t)(regs->ss_64.isf.err);
	    case REG_RIP:
		return (uint64_t)(regs->ss_64.isf.rip);
	    case REG_CS:
		return (uint64_t)(regs->ss_64.isf.cs);
	    case REG_SS:
		return (uint64_t)(regs->ss_64.isf.ss);
	    case REG_RFL:
		return (uint64_t)(regs->ss_64.isf.rflags);
	    case REG_RSP:
		return (uint64_t)(regs->ss_64.isf.rsp);
	    case REG_DS:
	    case REG_ES:
	    default:
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	    }
	
	} else {   /* is 32bit user */
		/* beyond register SS */
		if (reg > x86_SAVED_STATE32_COUNT - 1) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
			return (0);
		}
		return (uint64_t)((unsigned int *)(&(regs->ss_32.gs)))[reg];
	}
}
Exemple #30
0
static int
machtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	switch (cmd) {
		case DDI_ATTACH:
			break;
		case DDI_RESUME:
			return (DDI_SUCCESS);
		default:
			return (DDI_FAILURE);
	}

#if !defined(__APPLE__)
	machtrace_probe = (void (*)())dtrace_probe;
	membar_enter();

	if (ddi_create_minor_node(devi, "machtrace", S_IFCHR, 0,
				DDI_PSEUDO, NULL) == DDI_FAILURE ||
			dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL,
				&machtrace_pops, NULL, &machtrace_id) != 0) {
		machtrace_probe = systrace_stub;
#else
	machtrace_probe = dtrace_probe;
	membar_enter();
	
	if (ddi_create_minor_node(devi, "machtrace", S_IFCHR, 0,
				DDI_PSEUDO, 0) == DDI_FAILURE ||
			dtrace_register("mach_trap", &machtrace_attr, DTRACE_PRIV_USER, NULL,
				&machtrace_pops, NULL, &machtrace_id) != 0) {
                machtrace_probe = (void (*))&systrace_stub;
#endif /* __APPLE__ */		
		ddi_remove_minor_node(devi, NULL);
		return (DDI_FAILURE);
	}

	ddi_report_dev(devi);
	machtrace_devi = devi;

	return (DDI_SUCCESS);
}

d_open_t _systrace_open;

int _systrace_open(dev_t dev, int flags, int devtype, struct proc *p)
{
#pragma unused(dev,flags,devtype,p)
	return 0;
}

#define SYSTRACE_MAJOR  -24 /* let the kernel pick the device number */

/*
 * A struct describing which functions will get invoked for certain
 * actions.
 */
static struct cdevsw systrace_cdevsw =
{
	_systrace_open,		/* open */
	eno_opcl,		/* close */
	eno_rdwrt,			/* read */
	eno_rdwrt,			/* write */
	eno_ioctl,		/* ioctl */
	(stop_fcn_t *)nulldev, /* stop */
	(reset_fcn_t *)nulldev, /* reset */
	NULL,				/* tty's */
	eno_select,			/* select */
	eno_mmap,			/* mmap */
	eno_strat,			/* strategy */
	eno_getc,			/* getc */
	eno_putc,			/* putc */
	0					/* type */
};

static int gSysTraceInited = 0;

void systrace_init( void );

void systrace_init( void )
{
	if (0 == gSysTraceInited) {
		int majdevno = cdevsw_add(SYSTRACE_MAJOR, &systrace_cdevsw);

		if (majdevno < 0) {
			printf("systrace_init: failed to allocate a major number!\n");
			gSysTraceInited = 0;
			return;
		}

		systrace_attach( (dev_info_t	*)(uintptr_t)majdevno, DDI_ATTACH );
		machtrace_attach( (dev_info_t	*)(uintptr_t)majdevno, DDI_ATTACH );

		gSysTraceInited = 1;
	} else
		panic("systrace_init: called twice!\n");
}
#undef SYSTRACE_MAJOR
#endif /* __APPLE__ */

static uint64_t
systrace_getarg(void *arg, dtrace_id_t id, void *parg, int argno, int aframes)
{
#pragma unused(arg,id,parg,aframes)     /* __APPLE__ */
	uint64_t val = 0;
	syscall_arg_t *stack = (syscall_arg_t *)NULL;

	uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());	

	if (uthread)
		stack = (syscall_arg_t *)uthread->t_dtrace_syscall_args;

	if (!stack)
		return(0);

	DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
	/* dtrace_probe arguments arg0 .. arg4 are 64bits wide */
	val = (uint64_t)*(stack+argno);
	DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
	return (val);
}