Beispiel #1
0
/*
 * Get a list of available data link type of the interface.
 */
static int
bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl, struct proc *p)
{
	u_int		n;
	int		error;
	struct ifnet	*ifp;
	struct bpf_if	*bp;
	user_addr_t	dlist;

	if (proc_is64bit(p)) {
		dlist = (user_addr_t)bfl->bfl_u.bflu_pad;
	} else {
		dlist = CAST_USER_ADDR_T(bfl->bfl_u.bflu_list);
	}

	ifp = d->bd_bif->bif_ifp;
	n = 0;
	error = 0;
	for (bp = bpf_iflist; bp; bp = bp->bif_next) {
		if (bp->bif_ifp != ifp)
			continue;
		if (dlist != USER_ADDR_NULL) {
			if (n >= bfl->bfl_len) {
				return (ENOMEM);
			}
			error = copyout(&bp->bif_dlt, dlist,
			    sizeof (bp->bif_dlt));
			dlist += sizeof (bp->bif_dlt);
		}
		n++;
	}
	bfl->bfl_len = n;
	return (error);
}
Beispiel #2
0
struct uio *
afsio_partialcopy(struct uio *auio, size_t size)
{
    struct uio *res;
    int i;
    user_addr_t iovaddr;
    user_size_t iovsize;

    if (proc_is64bit(current_proc())) {
	res = uio_create(uio_iovcnt(auio), uio_offset(auio),
			 uio_isuserspace(auio) ? UIO_USERSPACE64 : UIO_SYSSPACE32,
			 uio_rw(auio));
    } else {
	res = uio_create(uio_iovcnt(auio), uio_offset(auio),
			 uio_isuserspace(auio) ? UIO_USERSPACE32 : UIO_SYSSPACE32,
			 uio_rw(auio));
    }

    for (i = 0;i < uio_iovcnt(auio) && size > 0;i++) {
	if (uio_getiov(auio, i, &iovaddr, &iovsize))
	    break;
	if (iovsize > size)
	    iovsize = size;
	if (uio_addiov(res, iovaddr, iovsize))
	    break;
	size -= iovsize;
    }
    return res;
}
Beispiel #3
0
/*
 * The return value indicates if we've modified the stack.
 */
static int
dtrace_adjust_stack(uint64_t **pcstack, int *pcstack_limit, user_addr_t *pc,
                    user_addr_t sp)
{
    int64_t missing_tos;
    int rc = 0;
    boolean_t is64Bit = proc_is64bit(current_proc());

    ASSERT(pc != NULL);

    if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
        /*
         * If we found ourselves in an entry probe, the frame pointer has not
         * yet been pushed (that happens in the
         * function prologue).  The best approach is to
	 * add the current pc as a missing top of stack,
         * and back the pc up to the caller, which is stored  at the
         * current stack pointer address since the call
         * instruction puts it there right before
         * the branch.
         */

        missing_tos = *pc;

        if (is64Bit)
            *pc = dtrace_fuword64(sp);
        else
            *pc = dtrace_fuword32(sp);
    } else {
        /*
         * We might have a top of stack override, in which case we just
         * add that frame without question to the top.  This
         * happens in return probes where you have a valid
         * frame pointer, but it's for the callers frame
         * and you'd like to add the pc of the return site
         * to the frame.
         */
        missing_tos = cpu_core[CPU->cpu_id].cpuc_missing_tos;
    }

    if (missing_tos != 0) {
        if (pcstack != NULL && pcstack_limit != NULL) {
            /*
	     * If the missing top of stack has been filled out, then
	     * we add it and adjust the size.
             */
	    *(*pcstack)++ = missing_tos;
	    (*pcstack_limit)--;
	}
        /*
	 * return 1 because we would have changed the
	 * stack whether or not it was passed in.  This
	 * ensures the stack count is correct
	 */
         rc = 1;
    }
    return rc;
}
Beispiel #4
0
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	x86_saved_state_t *regs;
	user_addr_t pc, sp, fp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	pal_register_cache_state(thread, VALID);
	regs = (x86_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	if (is64Bit) {
		pc = regs->ss_64.isf.rip;
		sp = regs->ss_64.isf.rsp;
		fp = regs->ss_64.rbp;
	} else {
		pc = regs->ss_32.eip;
		sp = regs->ss_32.uesp;
		fp = regs->ss_32.ebp;
	}

	if (dtrace_adjust_stack(NULL, NULL, &pc, sp) == 1) {
	    /*
	     * we would have adjusted the stack if we had
	     * supplied one (that is what rc == 1 means).
	     * Also, as a side effect, the pc might have
	     * been fixed up, which is good for calling
	     * in to dtrace_getustack_common.
	     */
	    n++;
	}
	
	/*
	 * Note that unlike ppc, the x86 code does not use
	 * CPU_DTRACE_USTACK_FP. This is because x86 always
	 * traces from the fp, even in syscall/profile/fbt
	 * providers.
	 */

	n += dtrace_getustack_common(NULL, 0, pc, fp);

	return (n);
}
int
dtrace_getustackdepth(void)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	int n = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (thread == NULL)
		return 0;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
		return (-1);

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		return 0;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		n++;
		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n += dtrace_getustack_common(NULL, 0, pc, sp);

	return (n);
}
Beispiel #6
0
int
afs_cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int fflag, struct proc *p) {
    unsigned int retval=0;
    int code, is64 = proc_is64bit(p);
    struct afssysargs *a = (struct afssysargs *)data;
    struct afssysargs64 *a64 = (struct afssysargs64 *)data;

    if (((unsigned int)cmd != VIOC_SYSCALL) &&
	((unsigned int)cmd != VIOC_SYSCALL64))
	return EINVAL;

    if (((unsigned int)cmd == VIOC_SYSCALL64) && (is64 == 0))
	return EINVAL;

    if (((unsigned int)cmd == VIOC_SYSCALL) && (is64 != 0))
	return EINVAL;
    
    code=afs3_syscall(p, data, &retval);
    if (code)
	return code;

    if ((!is64) && retval && a->syscall != AFSCALL_CALL
	&& a->param1 != AFSOP_CACHEINODE)
    {
	printf("SSCall(%d,%d) is returning non-error value %d\n", a->syscall, a->param1, retval);
    }
    if ((is64) && retval && a64->syscall != AFSCALL_CALL
	&& a64->param1 != AFSOP_CACHEINODE)
    {
	printf("SSCall(%d,%llx) is returning non-error value %d\n", a64->syscall, a64->param1, retval);
    }

    if (!is64)
	a->retval = retval;
    else
	a64->retval = retval;
    return 0; 
}
Beispiel #7
0
/* ARGSUSED */
static	int
vnioctl(dev_t dev, u_long cmd, caddr_t data,
	__unused int flag, proc_t p,
	int is_char)
{
	struct vn_softc *vn;
	struct vn_ioctl_64 *viop;
	int error;
	u_int32_t *f;
	u_int64_t * o;
	int unit;
	struct vfsioattr ioattr;
	struct vn_ioctl_64 user_vnio;
	struct vfs_context  	context;

	unit = vnunit(dev);
	if (vnunit(dev) >= NVNDEVICE) {
		return (ENXIO);
	}

	vn = vn_table + unit;
	error = proc_suser(p);
	if (error) {
		goto done;
	}

	context.vc_thread = current_thread();
	context.vc_ucred = vn->sc_cred;

	viop = (struct vn_ioctl_64 *)data;
	f = (u_int32_t *)data;
	o = (u_int64_t *)data;
	switch (cmd) {
#ifdef __LP64__
	case VNIOCDETACH32:
	case VNIOCDETACH:
#else
	case VNIOCDETACH:
	case VNIOCDETACH64:
#endif
	case DKIOCGETBLOCKSIZE:
	case DKIOCSETBLOCKSIZE:
	case DKIOCGETMAXBLOCKCOUNTREAD:
	case DKIOCGETMAXBLOCKCOUNTWRITE:
	case DKIOCGETMAXSEGMENTCOUNTREAD:
	case DKIOCGETMAXSEGMENTCOUNTWRITE:
	case DKIOCGETMAXSEGMENTBYTECOUNTREAD:
	case DKIOCGETMAXSEGMENTBYTECOUNTWRITE:
	case DKIOCGETBLOCKCOUNT:
	case DKIOCGETBLOCKCOUNT32:
		if ((vn->sc_flags & VNF_INITED) == 0) {
			error = ENXIO;
			goto done;
		}
		break;
	default:
		break;
	}

	if (vn->sc_vp != NULL)
		vfs_ioattr(vnode_mount(vn->sc_vp), &ioattr);
	else
		bzero(&ioattr, sizeof(ioattr));

	switch (cmd) {
	case DKIOCISVIRTUAL:
		*f = 1;
		break;
	case DKIOCGETMAXBLOCKCOUNTREAD:
		*o = ioattr.io_maxreadcnt / vn->sc_secsize;
		break;
	case DKIOCGETMAXBLOCKCOUNTWRITE:
		*o = ioattr.io_maxwritecnt / vn->sc_secsize;
		break;
	case DKIOCGETMAXBYTECOUNTREAD:
		*o = ioattr.io_maxreadcnt;
		break;
	case DKIOCGETMAXBYTECOUNTWRITE:
		*o = ioattr.io_maxwritecnt;
		break;
	case DKIOCGETMAXSEGMENTCOUNTREAD:
		*o = ioattr.io_segreadcnt;
		break;
	case DKIOCGETMAXSEGMENTCOUNTWRITE:
		*o = ioattr.io_segwritecnt;
		break;
	case DKIOCGETMAXSEGMENTBYTECOUNTREAD:
		*o = ioattr.io_maxsegreadsize;
		break;
	case DKIOCGETMAXSEGMENTBYTECOUNTWRITE:
		*o = ioattr.io_maxsegwritesize;
		break;
	case DKIOCGETBLOCKSIZE:
	        *f = vn->sc_secsize;
		break;
	case DKIOCSETBLOCKSIZE:
		if (is_char) {
			/* can only set block size on block device */
			error = ENODEV;
			break;
		}
		if (*f < DEV_BSIZE) {
			error = EINVAL;
			break;
		}
		if (vn->sc_shadow_vp != NULL) {
			if (*f == (unsigned)vn->sc_secsize) {
				break;
			}
			/* can't change the block size if already shadowing */
			error = EBUSY;
			break;
		}
		vn->sc_secsize = *f;
		/* recompute the size in terms of the new blocksize */
		vn->sc_size = vn->sc_fsize / vn->sc_secsize;
		break;
	case DKIOCISWRITABLE:
		*f = 1;
		break;
	case DKIOCGETBLOCKCOUNT32:
		*f = vn->sc_size;
		break;
	case DKIOCGETBLOCKCOUNT:
		*o = vn->sc_size;
		break;
#ifdef __LP64__
	case VNIOCSHADOW32:
	case VNIOCSHADOW:
#else
	case VNIOCSHADOW:
	case VNIOCSHADOW64:
#endif
		if (vn->sc_shadow_vp != NULL) {
			error = EBUSY;
			break;
		}
		if (vn->sc_vp == NULL) {
			/* much be attached before we can shadow */
			error = EINVAL;
			break;
		}
		if (!proc_is64bit(p)) {
			/* downstream code expects LP64 version of vn_ioctl structure */
			vn_ioctl_to_64((struct vn_ioctl_32 *)viop, &user_vnio);
			viop = &user_vnio;
		}
		if (viop->vn_file == USER_ADDR_NULL) {
			error = EINVAL;
			break;
		}
		error = vniocattach_shadow(vn, viop, dev, 0, p);
		break;

#ifdef __LP64__
	case VNIOCATTACH32:
	case VNIOCATTACH:
#else
	case VNIOCATTACH:
	case VNIOCATTACH64:
#endif
		if (is_char) {
			/* attach only on block device */
			error = ENODEV;
			break;
		}
		if (vn->sc_flags & VNF_INITED) {
			error = EBUSY;
			break;
		}
		if (!proc_is64bit(p)) {
			/* downstream code expects LP64 version of vn_ioctl structure */
			vn_ioctl_to_64((struct vn_ioctl_32 *)viop, &user_vnio);
			viop = &user_vnio;
		}
		if (viop->vn_file == USER_ADDR_NULL) {
			error = EINVAL;
			break;
		}
		error = vniocattach_file(vn, viop, dev, 0, p);
		break;

#ifdef __LP64__
	case VNIOCDETACH32:
	case VNIOCDETACH:
#else
	case VNIOCDETACH:
	case VNIOCDETACH64:
#endif
		if (is_char) {
			/* detach only on block device */
			error = ENODEV;
			break;
		}
		/* Note: spec_open won't open a mounted block device */

		/*
		 * XXX handle i/o in progress.  Return EBUSY, or wait, or
		 * flush the i/o.
		 * XXX handle multiple opens of the device.  Return EBUSY,
		 * or revoke the fd's.
		 * How are these problems handled for removable and failing
		 * hardware devices? (Hint: They are not)
		 */
		vnclear(vn, &context);
		break;

	case VNIOCGSET:
		vn_options |= *f;
		*f = vn_options;
		break;

	case VNIOCGCLEAR:
		vn_options &= ~(*f);
		*f = vn_options;
		break;

	case VNIOCUSET:
		vn->sc_options |= *f;
		*f = vn->sc_options;
		break;

	case VNIOCUCLEAR:
		vn->sc_options &= ~(*f);
		*f = vn->sc_options;
		break;

	default:
		error = ENOTTY;
		break;
	}
 done:
	return(error);
}
Beispiel #8
0
void
unix_syscall_return(int error)
{
	thread_t		thread;
	struct uthread		*uthread;
	struct proc *p;
	unsigned int code;
	struct sysent *callp;

	thread = current_thread();
	uthread = get_bsdthread_info(thread);

	pal_register_cache_state(thread, DIRTY);

	p = current_proc();

	if (proc_is64bit(p)) {
		x86_saved_state64_t *regs;

		regs = saved_state64(find_user_regs(thread));

		code = uthread->syscall_code;
		callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];

#if CONFIG_DTRACE
		if (callp->sy_call == dtrace_systrace_syscall)
			dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
#endif /* CONFIG_DTRACE */
		AUDIT_SYSCALL_EXIT(code, p, uthread, error);

		if (error == ERESTART) {
			/*
			 * repeat the syscall
			 */
			pal_syscall_restart( thread, find_user_regs(thread) );
		}
		else if (error != EJUSTRETURN) {
			if (error) {
				regs->rax = error;
				regs->isf.rflags |= EFL_CF;	/* carry bit */
			} else { /* (not error) */

				switch (callp->sy_return_type) {
				case _SYSCALL_RET_INT_T:
					regs->rax = uthread->uu_rval[0];
					regs->rdx = uthread->uu_rval[1];
					break;
				case _SYSCALL_RET_UINT_T:
					regs->rax = ((u_int)uthread->uu_rval[0]);
					regs->rdx = ((u_int)uthread->uu_rval[1]);
					break;
				case _SYSCALL_RET_OFF_T:
				case _SYSCALL_RET_ADDR_T:
				case _SYSCALL_RET_SIZE_T:
				case _SYSCALL_RET_SSIZE_T:
				case _SYSCALL_RET_UINT64_T:
					regs->rax = *((uint64_t *)(&uthread->uu_rval[0]));
					regs->rdx = 0;
					break;
				case _SYSCALL_RET_NONE:
					break;
				default:
					panic("unix_syscall: unknown return type");
					break;
				}
				regs->isf.rflags &= ~EFL_CF;
			} 
		}
		DEBUG_KPRINT_SYSCALL_UNIX(
			"unix_syscall_return: error=%d retval=(%llu,%llu)\n",
			error, regs->rax, regs->rdx);
	} else {
		x86_saved_state32_t	*regs;

		regs = saved_state32(find_user_regs(thread));

		regs->efl &= ~(EFL_CF);

		code = uthread->syscall_code;
		callp = (code >= NUM_SYSENT) ? &sysent[63] : &sysent[code];

#if CONFIG_DTRACE
		if (callp->sy_call == dtrace_systrace_syscall)
			dtrace_systrace_syscall_return( code, error, uthread->uu_rval );
#endif /* CONFIG_DTRACE */
		AUDIT_SYSCALL_EXIT(code, p, uthread, error);

		if (error == ERESTART) {
			pal_syscall_restart( thread, find_user_regs(thread) );
		}
		else if (error != EJUSTRETURN) {
			if (error) {
				regs->eax = error;
				regs->efl |= EFL_CF;	/* carry bit */
			} else { /* (not error) */
				regs->eax = uthread->uu_rval[0];
				regs->edx = uthread->uu_rval[1];
			} 
		}
		DEBUG_KPRINT_SYSCALL_UNIX(
			"unix_syscall_return: error=%d retval=(%u,%u)\n",
			error, regs->eax, regs->edx);
	}


	uthread->uu_flag &= ~UT_NOTCANCELPT;

	if (uthread->uu_lowpri_window) {
	        /*
		 * task is marked as a low priority I/O type
		 * and the I/O we issued while in this system call
		 * collided with normal I/O operations... we'll
		 * delay in order to mitigate the impact of this
		 * task on the normal operation of the system
		 */
		throttle_lowpri_io(1);
	}
	if (code != 180)
		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
			error, uthread->uu_rval[0], uthread->uu_rval[1], p->p_pid, 0);

	thread_exception_return();
	/* NOTREACHED */
}
Beispiel #9
0
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	savearea_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
#if 0
	uintptr_t oldcontext;
	size_t s1, s2;
#endif
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (savearea_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->ss_32.eip;
	sp = regs->ss_32.ebp;
	
#if 0 /* XXX signal stack crawl */
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if(dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp) == 1) {
            /*
	     * we made a change.
	     */
	    *fpstack++ = 0;
	    if (pcstack_limit <= 0)
		return;
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}

#if 0 /* XXX */
		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
#endif
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
Beispiel #10
0
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	x86_saved_state_t *regs;
	user_addr_t pc, sp, fp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
	int n;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (x86_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	if (is64Bit) {
		pc = regs->ss_64.isf.rip;
		sp = regs->ss_64.isf.rsp;
		fp = regs->ss_64.rbp;
	} else {
		pc = regs->ss_32.eip;
		sp = regs->ss_32.uesp;
		fp = regs->ss_32.ebp;
	}

        /*
	 * The return value indicates if we've modified the stack.
	 * Since there is nothing else to fix up in either case,
	 * we can safely ignore it here.
	 */
	(void)dtrace_adjust_stack(&pcstack, &pcstack_limit, &pc, sp);

	if(pcstack_limit <= 0)
	    return;

	/*
	 * Note that unlike ppc, the x86 code does not use
	 * CPU_DTRACE_USTACK_FP. This is because x86 always
	 * traces from the fp, even in syscall/profile/fbt
	 * providers.
	 */
	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, fp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
Beispiel #11
0
static int
dtrace_getustack_common(uint64_t *pcstack, int pcstack_limit, user_addr_t pc,
    user_addr_t sp)
{
#if 0
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;

	uintptr_t oldcontext = lwp->lwp_oldcontext; /* XXX signal stack crawl */
	size_t s1, s2;
#endif
	int ret = 0;
	boolean_t is64Bit = proc_is64bit(current_proc());

	ASSERT(pcstack == NULL || pcstack_limit > 0);
	
#if 0 /* XXX signal stack crawl */
	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	while (pc != 0) {
		ret++;
		if (pcstack != NULL) {
			*pcstack++ = (uint64_t)pc;
			pcstack_limit--;
			if (pcstack_limit <= 0)
				break;
		}

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl */
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext32_t *ucp = (ucontext32_t *)oldcontext;
				greg32_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}

#if 0 /* XXX */
		/*
		 * This is totally bogus:  if we faulted, we're going to clear
		 * the fault and break.  This is to deal with the apparently
		 * broken Java stacks on x86.
		 */
		if (*flags & CPU_DTRACE_FAULT) {
			*flags &= ~CPU_DTRACE_FAULT;
			break;
		}
#endif
	}

	return (ret);
}
Beispiel #12
0
uint64_t
dtrace_getreg(struct regs *savearea, uint_t reg)
{
	boolean_t is64Bit = proc_is64bit(current_proc());
	x86_saved_state_t *regs = (x86_saved_state_t *)savearea;

	if (is64Bit) {
	    if (reg <= SS) {
		reg = regmap[reg];
	    } else {
		reg -= (SS + 1);
	    }

	    switch (reg) {
	    case REG_RDI:
		return (uint64_t)(regs->ss_64.rdi);
	    case REG_RSI:
		return (uint64_t)(regs->ss_64.rsi);
	    case REG_RDX:
		return (uint64_t)(regs->ss_64.rdx);
	    case REG_RCX:
		return (uint64_t)(regs->ss_64.rcx);
	    case REG_R8:
		return (uint64_t)(regs->ss_64.r8);
	    case REG_R9:
		return (uint64_t)(regs->ss_64.r9);
	    case REG_RAX:
		return (uint64_t)(regs->ss_64.rax);
	    case REG_RBX:
		return (uint64_t)(regs->ss_64.rbx);
	    case REG_RBP:
		return (uint64_t)(regs->ss_64.rbp);
	    case REG_R10:
		return (uint64_t)(regs->ss_64.r10);
	    case REG_R11:
		return (uint64_t)(regs->ss_64.r11);
	    case REG_R12:
		return (uint64_t)(regs->ss_64.r12);
	    case REG_R13:
		return (uint64_t)(regs->ss_64.r13);
	    case REG_R14:
		return (uint64_t)(regs->ss_64.r14);
	    case REG_R15:
		return (uint64_t)(regs->ss_64.r15);
	    case REG_FS:
		return (uint64_t)(regs->ss_64.fs);
	    case REG_GS:
		return (uint64_t)(regs->ss_64.gs);
	    case REG_TRAPNO:
		return (uint64_t)(regs->ss_64.isf.trapno);
	    case REG_ERR:
		return (uint64_t)(regs->ss_64.isf.err);
	    case REG_RIP:
		return (uint64_t)(regs->ss_64.isf.rip);
	    case REG_CS:
		return (uint64_t)(regs->ss_64.isf.cs);
	    case REG_SS:
		return (uint64_t)(regs->ss_64.isf.ss);
	    case REG_RFL:
		return (uint64_t)(regs->ss_64.isf.rflags);
	    case REG_RSP:
		return (uint64_t)(regs->ss_64.isf.rsp);
	    case REG_DS:
	    case REG_ES:
	    default:
		DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
		return (0);
	    }
	
	} else {   /* is 32bit user */
		/* beyond register SS */
		if (reg > x86_SAVED_STATE32_COUNT - 1) {
			DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
			return (0);
		}
		return (uint64_t)((unsigned int *)(&(regs->ss_32.gs)))[reg];
	}
}
void
dtrace_getufpstack(uint64_t *pcstack, uint64_t *fpstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
#if 0
	uintptr_t oldcontext;
	size_t s1, s2;
#endif
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;
	
#if 0 /* XXX signal stack crawl*/
	oldcontext = lwp->lwp_oldcontext;

	if (p->p_model == DATAMODEL_NATIVE) {
		s1 = sizeof (struct frame) + 2 * sizeof (long);
		s2 = s1 + sizeof (siginfo_t);
	} else {
		s1 = sizeof (struct frame32) + 3 * sizeof (int);
		s2 = s1 + sizeof (siginfo32_t);
	}
#endif

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = 0;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		/*
		 * XXX This is wrong, but we do not yet support stack helpers.
		 */
		if (is64Bit)
			pc = dtrace_fuword64(sp);
		else
			pc = dtrace_fuword32(sp);
	}

	while (pc != 0) {
		*pcstack++ = (uint64_t)pc;
		*fpstack++ = sp;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			break;

		if (sp == 0)
			break;

#if 0 /* XXX signal stack crawl*/
		if (oldcontext == sp + s1 || oldcontext == sp + s2) {
			if (p->p_model == DATAMODEL_NATIVE) {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fulword(&gregs[REG_FP]);
				pc = dtrace_fulword(&gregs[REG_PC]);

				oldcontext = dtrace_fulword(&ucp->uc_link);
			} else {
				ucontext_t *ucp = (ucontext_t *)oldcontext;
				greg_t *gregs = ucp->uc_mcontext.gregs;

				sp = dtrace_fuword32(&gregs[EBP]);
				pc = dtrace_fuword32(&gregs[EIP]);

				oldcontext = dtrace_fuword32(&ucp->uc_link);
			}
		} 
		else
#endif
		{
			if (is64Bit) {
				pc = dtrace_fuword64((sp + RETURN_OFFSET64));
				sp = dtrace_fuword64(sp);
			} else {
				pc = dtrace_fuword32((sp + RETURN_OFFSET));
				sp = dtrace_fuword32(sp);
			}
		}
	}

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
void
dtrace_getupcstack(uint64_t *pcstack, int pcstack_limit)
{
	thread_t thread = current_thread();
	ppc_saved_state_t *regs;
	user_addr_t pc, sp;
	volatile uint16_t *flags =
	    (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
	int n;
	boolean_t is64Bit = proc_is64bit(current_proc());

	if (*flags & CPU_DTRACE_FAULT)
		return;

	if (pcstack_limit <= 0)
		return;

	/*
	 * If there's no user context we still need to zero the stack.
	 */
	if (thread == NULL)
		goto zero;

	regs = (ppc_saved_state_t *)find_user_regs(thread);
	if (regs == NULL)
		goto zero;
		
	*pcstack++ = (uint64_t)proc_selfpid();
	pcstack_limit--;

	if (pcstack_limit <= 0)
		return;

	pc = regs->REGPC;
	sp = regs->REGSP;

	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_ENTRY)) {
		*pcstack++ = (uint64_t)pc;
		pcstack_limit--;
		if (pcstack_limit <= 0)
			return;

		pc = regs->save_lr;
	}
	
	if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_USTACK_FP)) {
		/*
		 * If the ustack fp flag is set, the stack frame from sp to
		 * fp contains no valid call information. Start with the fp.
		 */
		if (is64Bit)
			sp = dtrace_fuword64(sp);
		else
			sp = (user_addr_t)dtrace_fuword32(sp);
	}

	n = dtrace_getustack_common(pcstack, pcstack_limit, pc, sp);
	ASSERT(n >= 0);
	ASSERT(n <= pcstack_limit);

	pcstack += n;
	pcstack_limit -= n;

zero:
	while (pcstack_limit-- > 0)
		*pcstack++ = 0;
}
Beispiel #15
0
/* -----------------------------------------------------------------------------
----------------------------------------------------------------------------- */
int ppp_comp_setcompressor(struct ppp_if *wan, struct ppp_option_data *odp)
{
    int 			error = 0;
	u_int32_t		nb;
    struct ppp_comp *cp;
    u_char 			ccp_option[CCP_MAX_OPTION_LENGTH];
    user_addr_t		ptr;
    int				transmit;
    
    if (proc_is64bit(current_proc())) {
        struct ppp_option_data64 *odp64 = (struct ppp_option_data64 *)odp;

        nb = odp64->length;
        ptr = odp64->ptr;
        transmit = odp64->transmit;
    } else {
        struct ppp_option_data32 *odp32 = (struct ppp_option_data32 *)odp;

        nb = odp32->length;
        ptr = CAST_USER_ADDR_T(odp32->ptr);
        transmit = odp32->transmit;
    }
    
    if (nb > sizeof(ccp_option))
        nb = sizeof(ccp_option);

    if (error = copyin(ptr, ccp_option, nb))
        return (error);

    if (ccp_option[1] < 2)	/* preliminary check on the length byte */
        return (EINVAL);

    cp = ppp_comp_find(ccp_option[0]);
    if (cp == 0) {
        LOGDBG(wan->net, ("ppp%d: no compressor for [%x %x %x], %x\n",
                ifnet_unit(wan->net), ccp_option[0], ccp_option[1],
                ccp_option[2], nb));

        return EINVAL;	/* no handler found */
    }

    if (transmit) {
        if (wan->xc_state)
            (*wan->xcomp->comp_free)(wan->xc_state);
        wan->xcomp = cp;
        wan->xc_state = cp->comp_alloc(ccp_option, nb);
        if (!wan->xc_state) {
            error = ENOMEM;
            LOGDBG(wan->net, ("ppp%d: comp_alloc failed\n", ifnet_unit(wan->net)));
        }
        wan->sc_flags &= ~SC_COMP_RUN;
    }
    else {
        if (wan->rc_state)
            (*wan->rcomp->decomp_free)(wan->rc_state);
        wan->rcomp = cp;
        wan->rc_state = cp->decomp_alloc(ccp_option, nb);
        if (!wan->rc_state) {
            error = ENOMEM;
            LOGDBG(wan->net, ("ppp%d: decomp_alloc failed\n", ifnet_unit(wan->net)));
        }
        wan->sc_flags &= ~SC_DECOMP_RUN;
    }
    
    return error;
}
Beispiel #16
0
copyin_afs_ioctl(caddr_t cmarg, struct afs_ioctl *dst)
#endif
{
    int code;
#if defined(AFS_DARWIN100_ENV)
    struct afs_ioctl32 dst32;

    if (!proc_is64bit(current_proc())) {
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif
#if defined(AFS_AIX51_ENV) && defined(AFS_64BIT_KERNEL)
    struct afs_ioctl32 dst32;

    if (!(IS64U)) {
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif /* defined(AFS_AIX51_ENV) && defined(AFS_64BIT_KERNEL) */


#if defined(AFS_HPUX_64BIT_ENV)
    struct afs_ioctl32 dst32;

    if (is_32bit(u.u_procp)) {	/* is_32bit() in proc_iface.h */
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif /* defined(AFS_HPUX_64BIT_ENV) */

#if defined(AFS_SUN5_64BIT_ENV)
    struct afs_ioctl32 dst32;

    if (get_udatamodel() == DATAMODEL_ILP32) {
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif /* defined(AFS_SUN5_64BIT_ENV) */

#if defined(AFS_SGI_ENV) && (_MIPS_SZLONG==64)
    struct afs_ioctl32 dst32;

    if (!ABI_IS_64BIT(get_current_abi())) {
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif /* defined(AFS_SGI_ENV) && (_MIPS_SZLONG==64) */

#if defined(AFS_LINUX_64BIT_KERNEL) && !defined(AFS_ALPHA_LINUX20_ENV) && !defined(AFS_IA64_LINUX20_ENV)
    struct afs_ioctl32 dst32;

#ifdef AFS_SPARC64_LINUX26_ENV
    if (test_thread_flag(TIF_32BIT))
#elif defined(AFS_SPARC64_LINUX24_ENV)
    if (current->thread.flags & SPARC_FLAG_32BIT)
#elif defined(AFS_SPARC64_LINUX20_ENV)
    if (current->tss.flags & SPARC_FLAG_32BIT)

#elif defined(AFS_AMD64_LINUX26_ENV)
    if (test_thread_flag(TIF_IA32))
#elif defined(AFS_AMD64_LINUX20_ENV)
    if (current->thread.flags & THREAD_IA32)

#elif defined(AFS_PPC64_LINUX26_ENV)
#if defined(STRUCT_TASK_STRUCT_HAS_THREAD_INFO)
    if (current->thread_info->flags & _TIF_32BIT)
#else
    if (task_thread_info(current)->flags & _TIF_32BIT)
#endif
#elif defined(AFS_PPC64_LINUX20_ENV)
    if (current->thread.flags & PPC_FLAG_32BIT)

#elif defined(AFS_S390X_LINUX26_ENV)
    if (test_thread_flag(TIF_31BIT))
#elif defined(AFS_S390X_LINUX20_ENV)
    if (current->thread.flags & S390_FLAG_31BIT)

#else
#error pioctl32 not done for this linux
#endif
    {
	AFS_COPYIN(cmarg, (caddr_t) & dst32, sizeof dst32, code);
	if (!code)
	    afs_ioctl32_to_afs_ioctl(&dst32, dst);
	return code;
    }
#endif /* defined(AFS_LINUX_64BIT_KERNEL) */

    AFS_COPYIN(cmarg, (caddr_t) dst, sizeof *dst, code);
    return code;
}