Exemplo n.º 1
0
int
pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
    struct trapframe *tf)
{
	uintptr_t *osp, *sp;
	int frames = 0;

	cc[frames++] = PMC_TRAPFRAME_TO_PC(tf);
	sp = (uintptr_t *)PMC_TRAPFRAME_TO_FP(tf);
	osp = NULL;

	for (; frames < maxsamples; frames++) {
		if (sp <= osp)
			break;
		osp = sp;
#ifdef __powerpc64__
		/* Check if 32-bit mode. */
		if (!(tf->srr1 & PSL_SF)) {
			cc[frames] = fuword32((uint32_t *)sp + 1);
			sp = (uintptr_t *)(uintptr_t)fuword32(sp);
		} else {
			cc[frames] = fuword(sp + 2);
			sp = (uintptr_t *)fuword(sp);
		}
#else
		cc[frames] = fuword32((uint32_t *)sp + 1);
		sp = (uintptr_t *)fuword32(sp);
#endif
	}

	return (frames);
}
Exemplo n.º 2
0
int
ia32_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
{
	struct proc *p;
	struct trapframe *frame;
	caddr_t params;
	u_int32_t args[8];
	int error, i;

	p = td->td_proc;
	frame = td->td_frame;

	params = (caddr_t)frame->tf_rsp + sizeof(u_int32_t);
	sa->code = frame->tf_rax;

	/*
	 * Need to check if this is a 32 bit or 64 bit syscall.
	 */
	if (sa->code == SYS_syscall) {
		/*
		 * Code is first argument, followed by actual args.
		 */
		sa->code = fuword32(params);
		params += sizeof(int);
	} else if (sa->code == SYS___syscall) {
		/*
		 * Like syscall, but code is a quad, so as to maintain
		 * quad alignment for the rest of the arguments.
		 * We use a 32-bit fetch in case params is not
		 * aligned.
		 */
		sa->code = fuword32(params);
		params += sizeof(quad_t);
	}
 	if (p->p_sysent->sv_mask)
 		sa->code &= p->p_sysent->sv_mask;
 	if (sa->code >= p->p_sysent->sv_size)
 		sa->callp = &p->p_sysent->sv_table[0];
  	else
 		sa->callp = &p->p_sysent->sv_table[sa->code];
	sa->narg = sa->callp->sy_narg;

	if (params != NULL && sa->narg != 0)
		error = copyin(params, (caddr_t)args,
		    (u_int)(sa->narg * sizeof(int)));
	else
		error = 0;

	for (i = 0; i < sa->narg; i++)
		sa->args[i] = args[i];

	if (error == 0) {
		td->td_retval[0] = 0;
		td->td_retval[1] = frame->tf_rdx;
	}

	return (error);
}
Exemplo n.º 3
0
/* read the user instruction */
enum ftt_type
_fp_read_inst(
	const uint32_t *address,	/* FPU instruction address. */
	uint32_t *pvalue,		/* Place for instruction value. */
	fp_simd_type *pfpsd)		/* Pointer to fpu simulator data. */
{
	if (((uintptr_t)address & 0x3) != 0)
		return (ftt_alignment);	/* Must be word-aligned. */

	if (get_udatamodel() == DATAMODEL_ILP32) {
		/*
		 * If this is a 32-bit program, chop the address accordingly.
		 * The intermediate uintptr_t casts prevent warnings under a
		 * certain compiler, and the temporary 32 bit storage is
		 * intended to force proper code generation and break up what
		 * would otherwise be a quadruple cast.
		 */
		caddr32_t address32 = (caddr32_t)(uintptr_t)address;
		address = (uint32_t *)(uintptr_t)address32;
	}

	if (fuword32(address, pvalue) == -1) {
		pfpsd->fp_trapaddr = (caddr_t)address;
		pfpsd->fp_traprw = S_READ;
		return (ftt_fault);
	}
	return (ftt_none);
}
Exemplo n.º 4
0
uint32_t
dtrace_fuword32(void *uaddr)
{
	if ((uintptr_t)uaddr > VM_MAXUSER_ADDRESS) {
		DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
		cpu_core[curcpu].cpuc_dtrace_illval = (uintptr_t)uaddr;
		return (0);
	}
	return (fuword32(uaddr));
}
Exemplo n.º 5
0
int
fuword32_nowatch(const void *addr, uint32_t *value)
{
	int watched, ret;

	watched = watch_disable_addr(addr, sizeof (*value), S_READ);
	ret = fuword32(addr, value);
	if (watched)
		watch_enable_addr(addr, sizeof (*value), S_READ);

	return (ret);
}
Exemplo n.º 6
0
/*
 * Go to sleep until somebody does a WAKE operation on this futex, we get a
 * signal, or the timeout expires.
 */
static int
futex_wait(memid_t *memid, caddr_t addr, int val, timespec_t *timeout)
{
	int err, ret;
	int32_t curval;
	fwaiter_t fw;
	int index;

	fw.fw_woken = 0;
	MEMID_COPY(memid, &fw.fw_memid);
	cv_init(&fw.fw_cv, NULL, CV_DEFAULT, NULL);

	index = HASH_FUNC(&fw.fw_memid);
	mutex_enter(&futex_hash_lock[index]);

	if (fuword32(addr, (uint32_t *)&curval)) {
		err = set_errno(EFAULT);
		goto out;
	}
	if (curval != val) {
		err = set_errno(EWOULDBLOCK);
		goto out;
	}

	futex_hashin(&fw);

	err = 0;
	while ((fw.fw_woken == 0) && (err == 0)) {
		ret = cv_waituntil_sig(&fw.fw_cv, &futex_hash_lock[index],
		    timeout, timechanged);
		if (ret < 0)
			err = set_errno(ETIMEDOUT);
		else if (ret == 0)
			err = set_errno(EINTR);
	}

	/*
	 * The futex is normally hashed out in wakeup.  If we timed out or
	 * got a signal, we need to hash it out here instead.
	 */
	if (fw.fw_woken == 0)
		futex_hashout(&fw);

out:
	mutex_exit(&futex_hash_lock[index]);

	return (err);
}
Exemplo n.º 7
0
void
dtrace_pid_probe(struct regs *rp)
{
	krwlock_t *rwp = &CPU->cpu_ft_lock;
	uint32_t instr;

	/*
	 * This trap should only be invoked if there's a corresponding
	 * enabled dtrace probe. If there isn't, send SIGILL as though
	 * the process had executed an invalid trap instruction.
	 */
	rw_enter(rwp, RW_READER);
	if (dtrace_pid_probe_ptr != NULL && (*dtrace_pid_probe_ptr)(rp) == 0) {
		rw_exit(rwp);
		return;
	}
	rw_exit(rwp);

	/*
	 * It is possible that we were preempted after entering the kernel,
	 * and the tracepoint was removed. If it appears that the process hit
	 * our reserved trap instruction, we call send SIGILL just as though
	 * the user had executed an unused trap instruction.
	 */
	if (fuword32((void *)rp->r_pc, &instr) != 0 ||
	    instr == FASTTRAP_INSTR) {
		sigqueue_t *sqp = kmem_zalloc(sizeof (sigqueue_t), KM_SLEEP);
		proc_t *p = curproc;

		sqp->sq_info.si_signo = SIGILL;
		sqp->sq_info.si_code = ILL_ILLTRP;
		sqp->sq_info.si_addr = (caddr_t)rp->r_pc;
		sqp->sq_info.si_trapno = 0x38;

		mutex_enter(&p->p_lock);
		sigaddqa(p, curthread, sqp);
		mutex_exit(&p->p_lock);
		aston(curthread);
	}
}
Exemplo n.º 8
0
/*
 * fp_disabled normally occurs when the first floating point in a non-threaded
 * program causes an fp_disabled trap. For threaded programs, the ILP32 threads
 * library calls the .setpsr fasttrap, which has been modified to also set the
 * appropriate bits in fpu_en and fpu_fprs, as well as to enable the %fprs,
 * as before. The LP64 threads library will write to the %fprs directly,
 * so fpu_en will never get updated for LP64 threaded programs,
 * although fpu_fprs will, via resume.
 */
void
fp_disabled(struct regs *rp)
{
	klwp_id_t lwp;
	kfpu_t *fp;
	int ftt;

#ifdef SF_ERRATA_30 /* call causes fp-disabled */
	/*
	 * This code is here because sometimes the call instruction
	 * generates an fp_disabled trap when the call offset is large.
	 */
	if (spitfire_call_bug) {
		uint_t instr = 0;
		extern void trap(struct regs *rp, caddr_t addr, uint32_t type,
		    uint32_t mmu_fsr);

		if (USERMODE(rp->r_tstate)) {
			(void) fuword32((void *)rp->r_pc, &instr);
		} else {
			instr = *(uint_t *)(rp->r_pc);
		}
		if ((instr & 0xc0000000) == 0x40000000) {
			ill_fpcalls++;
			trap(rp, NULL, T_UNIMP_INSTR, 0);
			return;
		}
	}
#endif /* SF_ERRATA_30 - call causes fp-disabled */

#ifdef CHEETAH_ERRATUM_109 /* interrupts not taken during fpops */
	/*
	 * UltraSPARC III will report spurious fp-disabled exceptions when
	 * the pipe is full of fpops and an interrupt is triggered.  By the
	 * time we get here the interrupt has been taken and we just need
	 * to return to where we came from and try again.
	 */
	if (fpu_exists && _fp_read_fprs() & FPRS_FEF)
		return;
#endif /* CHEETAH_ERRATUM_109 */

	lwp = ttolwp(curthread);
	ASSERT(lwp != NULL);
	fp = lwptofpu(lwp);
	if (fpu_exists) {
		kpreempt_disable();
		if (fp->fpu_en) {
#ifdef DEBUG
			if (fpdispr)
				cmn_err(CE_NOTE,
				    "fpu disabled, but already enabled\n");
#endif
			if ((fp->fpu_fprs & FPRS_FEF) != FPRS_FEF) {
				fp->fpu_fprs = FPRS_FEF;
#ifdef DEBUG
				if (fpdispr)
					cmn_err(CE_NOTE,
					"fpu disabled, saved fprs disabled\n");
#endif
			}
			_fp_write_fprs(FPRS_FEF);
			fp_restore(fp);
		} else {
			fp->fpu_en = 1;
			fp->fpu_fsr = 0;
			fp->fpu_fprs = FPRS_FEF;
			_fp_write_fprs(FPRS_FEF);
			fp_clearregs(fp);
		}
		kpreempt_enable();
	} else {
		fp_simd_type fpsd;
		int i;

		(void) flush_user_windows_to_stack(NULL);
		if (!fp->fpu_en) {
			fp->fpu_en = 1;
			fp->fpu_fsr = 0;
			for (i = 0; i < 32; i++)
				fp->fpu_fr.fpu_regs[i] = (uint_t)-1; /* NaN */
			for (i = 16; i < 32; i++)		/* NaN */
				fp->fpu_fr.fpu_dregs[i] = (uint64_t)-1;
		}
		if (ftt = fp_emulator(&fpsd, (fp_inst_type *)rp->r_pc,
		    rp, (ulong_t *)rp->r_sp, fp)) {
			fp->fpu_q_entrysize = sizeof (struct _fpq);
			fp_traps(&fpsd, ftt, rp);
		}
	}
}
Exemplo n.º 9
0
static int
pmc_next_uframe(register_t *pc, register_t *sp, register_t *ra)
{
	int offset, registers_on_stack;
	uint32_t opcode, mask;
	register_t function_start;
	int stksize;
	InstFmt i;

	registers_on_stack = 0;
	mask = 0;
	function_start = 0;
	offset = 0;
	stksize = 0;

	while (offset < MAX_FUNCTION_SIZE) {
		opcode = fuword32((void *)(*pc - offset));

		/* [d]addiu sp, sp, -X*/
		if (((opcode & 0xffff8000) == 0x27bd8000)
		    || ((opcode & 0xffff8000) == 0x67bd8000)) {
			function_start = *pc - offset;
			registers_on_stack = 1;
			break;
		}

		/* lui gp, X */
		if ((opcode & 0xffff8000) == 0x3c1c0000) {
			/*
			 * Function might start with this instruction
			 * Keep an eye on "jr ra" and sp correction
			 * with positive value further on
			 */
			function_start = *pc - offset;
		}

		if (function_start) {
			/*
			 * Stop looking further. Possible end of
			 * function instruction: it means there is no
			 * stack modifications, sp is unchanged
			 */

			/* [d]addiu sp,sp,X */
			if (((opcode & 0xffff8000) == 0x27bd0000)
			    || ((opcode & 0xffff8000) == 0x67bd0000))
				break;

			if (opcode == 0x03e00008)
				break;
		}

		offset += sizeof(int);
	}

	if (!function_start)
		return (-1);

	if (registers_on_stack) {
		offset = 0;
		while ((offset < MAX_PROLOGUE_SIZE)
		    && ((function_start + offset) < *pc)) {
			i.word = fuword32((void *)(function_start + offset));
			switch (i.JType.op) {
			case OP_SW:
				/* look for saved registers on the stack */
				if (i.IType.rs != 29)
					break;
				/* only restore the first one */
				if (mask & (1 << i.IType.rt))
					break;
				mask |= (1 << i.IType.rt);
				if (i.IType.rt == 31)
					*ra = fuword32((void *)(*sp + (short)i.IType.imm));
				break;

#if defined(__mips_n64)
			case OP_SD:
				/* look for saved registers on the stack */
				if (i.IType.rs != 29)
					break;
				/* only restore the first one */
				if (mask & (1 << i.IType.rt))
					break;
				mask |= (1 << i.IType.rt);
				/* ra */
				if (i.IType.rt == 31)
					*ra = fuword64((void *)(*sp + (short)i.IType.imm));
			break;
#endif

			case OP_ADDI:
			case OP_ADDIU:
			case OP_DADDI:
			case OP_DADDIU:
				/* look for stack pointer adjustment */
				if (i.IType.rs != 29 || i.IType.rt != 29)
					break;
				stksize = -((short)i.IType.imm);
			}

			offset += sizeof(int);
		}
	}

	/*
	 * We reached the end of backtrace
	 */
	if (*pc == *ra)
		return (-1);

	*pc = *ra;
	*sp += stksize;

	return (0);
}
Exemplo n.º 10
0
/*
 * 	Save the system call arguments in a safe place.
 *	lwp->lwp_ap normally points to the out regs in the reg structure.
 *	If the user is going to change the out registers, g1, or the stack,
 *	and might want to get the args (for /proc tracing), it must copy
 *	the args elsewhere via save_syscall_args().
 *
 *	This may be called from stop() even when we're not in a system call.
 *	Since there's no easy way to tell, this must be safe (not panic).
 *	If the copyins get data faults, return non-zero.
 */
int
save_syscall_args()
{
	kthread_t	*t = curthread;
	klwp_t		*lwp = ttolwp(t);
	struct regs	*rp = lwptoregs(lwp);
	uint_t		code = t->t_sysnum;
	uint_t		nargs;
	int		i;
	caddr_t		ua;
	model_t		datamodel;

	if (lwp->lwp_argsaved || code == 0)
		return (0);		/* args already saved or not needed */

	if (code >= NSYSCALL) {
		nargs = 0;		/* illegal syscall */
	} else {
		struct sysent *se = LWP_GETSYSENT(lwp);
		struct sysent *callp = se + code;

		nargs = callp->sy_narg;
		if (LOADABLE_SYSCALL(callp) && nargs == 0) {
			krwlock_t	*module_lock;

			/*
			 * Find out how many arguments the system
			 * call uses.
			 *
			 * We have the property that loaded syscalls
			 * never change the number of arguments they
			 * use after they've been loaded once.  This
			 * allows us to stop for /proc tracing without
			 * holding the module lock.
			 * /proc is assured that sy_narg is valid.
			 */
			module_lock = lock_syscall(se, code);
			nargs = callp->sy_narg;
			rw_exit(module_lock);
		}
	}

	/*
	 * Fetch the system call arguments.
	 */
	if (nargs == 0)
		goto out;


	ASSERT(nargs <= MAXSYSARGS);

	if ((datamodel = lwp_getdatamodel(lwp)) == DATAMODEL_ILP32) {

		if (rp->r_g1 == 0) {	/* indirect syscall */

			lwp->lwp_arg[0] = (uint32_t)rp->r_o1;
			lwp->lwp_arg[1] = (uint32_t)rp->r_o2;
			lwp->lwp_arg[2] = (uint32_t)rp->r_o3;
			lwp->lwp_arg[3] = (uint32_t)rp->r_o4;
			lwp->lwp_arg[4] = (uint32_t)rp->r_o5;
			if (nargs > 5) {
				ua = (caddr_t)(uintptr_t)(caddr32_t)(uintptr_t)
				    (rp->r_sp + MINFRAME32);
				for (i = 5; i < nargs; i++) {
					uint32_t a;
					if (fuword32(ua, &a) != 0)
						return (-1);
					lwp->lwp_arg[i] = a;
					ua += sizeof (a);
				}
			}
		} else {
			lwp->lwp_arg[0] = (uint32_t)rp->r_o0;
			lwp->lwp_arg[1] = (uint32_t)rp->r_o1;
			lwp->lwp_arg[2] = (uint32_t)rp->r_o2;
			lwp->lwp_arg[3] = (uint32_t)rp->r_o3;
			lwp->lwp_arg[4] = (uint32_t)rp->r_o4;
			lwp->lwp_arg[5] = (uint32_t)rp->r_o5;
			if (nargs > 6) {
				ua = (caddr_t)(uintptr_t)(caddr32_t)(uintptr_t)
				    (rp->r_sp + MINFRAME32);
				for (i = 6; i < nargs; i++) {
					uint32_t a;
					if (fuword32(ua, &a) != 0)
						return (-1);
					lwp->lwp_arg[i] = a;
					ua += sizeof (a);
				}
			}
		}
	} else {
		ASSERT(datamodel == DATAMODEL_LP64);
		lwp->lwp_arg[0] = rp->r_o0;
		lwp->lwp_arg[1] = rp->r_o1;
		lwp->lwp_arg[2] = rp->r_o2;
		lwp->lwp_arg[3] = rp->r_o3;
		lwp->lwp_arg[4] = rp->r_o4;
		lwp->lwp_arg[5] = rp->r_o5;
		if (nargs > 6) {
			ua = (caddr_t)rp->r_sp + MINFRAME + STACK_BIAS;
			for (i = 6; i < nargs; i++) {
				unsigned long a;
				if (fulword(ua, &a) != 0)
					return (-1);
				lwp->lwp_arg[i] = a;
				ua += sizeof (a);
			}
		}
	}

out:
	lwp->lwp_ap = lwp->lwp_arg;
	lwp->lwp_argsaved = 1;
	t->t_post_sys = 1;	/* so lwp_ap will be reset */
	return (0);
}
Exemplo n.º 11
0
Arquivo: trap.c Projeto: OPSF/freebsd
void
trap(struct trapframe *frame)
{
	struct thread	*td;
	struct proc	*p;
#ifdef KDTRACE_HOOKS
	uint32_t inst;
#endif
	int		sig, type, user;
	u_int		ucode;
	ksiginfo_t	ksi;

	PCPU_INC(cnt.v_trap);

	td = curthread;
	p = td->td_proc;

	type = ucode = frame->exc;
	sig = 0;
	user = frame->srr1 & PSL_PR;

	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
	    trapname(type), user ? "user" : "kernel");

#ifdef KDTRACE_HOOKS
	/*
	 * A trap can occur while DTrace executes a probe. Before
	 * executing the probe, DTrace blocks re-scheduling and sets
	 * a flag in its per-cpu flags to indicate that it doesn't
	 * want to fault. On returning from the probe, the no-fault
	 * flag is cleared and finally re-scheduling is enabled.
	 *
	 * If the DTrace kernel module has registered a trap handler,
	 * call it and if it returns non-zero, assume that it has
	 * handled the trap and modified the trap frame so that this
	 * function can return normally.
	 */
	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
		return;
#endif

	if (user) {
		td->td_pticks = 0;
		td->td_frame = frame;
		if (td->td_cowgen != p->p_cowgen)
			thread_cow_update(td);

		/* User Mode Traps */
		switch (type) {
		case EXC_RUNMODETRC:
		case EXC_TRC:
			frame->srr1 &= ~PSL_SE;
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

#ifdef __powerpc64__
		case EXC_ISE:
		case EXC_DSE:
			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
			    (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){
				sig = SIGSEGV;
				ucode = SEGV_MAPERR;
			}
			break;
#endif
		case EXC_DSI:
		case EXC_ISI:
			sig = trap_pfault(frame, 1);
			if (sig == SIGSEGV)
				ucode = SEGV_MAPERR;
			break;

		case EXC_SC:
			syscall(frame);
			break;

		case EXC_FPU:
			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
			    ("FPU already enabled for thread"));
			enable_fpu(td);
			break;

		case EXC_VEC:
			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
			    ("Altivec already enabled for thread"));
			enable_vec(td);
			break;

		case EXC_VSX:
			KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX,
			    ("VSX already enabled for thread"));
			if (!(td->td_pcb->pcb_flags & PCB_VEC))
				enable_vec(td);
			if (!(td->td_pcb->pcb_flags & PCB_FPU))
				save_fpu(td);
			td->td_pcb->pcb_flags |= PCB_VSX;
			enable_fpu(td);
			break;

		case EXC_VECAST_G4:
		case EXC_VECAST_G5:
			/*
			 * We get a VPU assist exception for IEEE mode
			 * vector operations on denormalized floats.
			 * Emulating this is a giant pain, so for now,
			 * just switch off IEEE mode and treat them as
			 * zero.
			 */

			save_vec(td);
			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
			enable_vec(td);
			break;

		case EXC_ALI:
			if (fix_unaligned(td, frame) != 0) {
				sig = SIGBUS;
				ucode = BUS_ADRALN;
			}
			else
				frame->srr0 += 4;
			break;

		case EXC_DEBUG:	/* Single stepping */
			mtspr(SPR_DBSR, mfspr(SPR_DBSR));
			frame->srr1 &= ~PSL_DE;
			frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC);
			sig = SIGTRAP;
			ucode = TRAP_TRACE;
			break;

		case EXC_PGM:
			/* Identify the trap reason */
#ifdef AIM
			if (frame->srr1 & EXC_PGM_TRAP) {
#else
			if (frame->cpu.booke.esr & ESR_PTR) {
#endif
#ifdef KDTRACE_HOOKS
				inst = fuword32((const void *)frame->srr0);
				if (inst == 0x0FFFDDDD &&
				    dtrace_pid_probe_ptr != NULL) {
					struct reg regs;
					fill_regs(td, &regs);
					(*dtrace_pid_probe_ptr)(&regs);
					break;
				}
#endif
 				sig = SIGTRAP;
				ucode = TRAP_BRKPT;
			} else {
				sig = ppc_instr_emulate(frame, td->td_pcb);
				if (sig == SIGILL) {
					if (frame->srr1 & EXC_PGM_PRIV)
						ucode = ILL_PRVOPC;
					else if (frame->srr1 & EXC_PGM_ILLEGAL)
						ucode = ILL_ILLOPC;
				} else if (sig == SIGFPE)
					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
			}
			break;

		case EXC_MCHK:
			/*
			 * Note that this may not be recoverable for the user
			 * process, depending on the type of machine check,
			 * but it at least prevents the kernel from dying.
			 */
			sig = SIGBUS;
			ucode = BUS_OBJERR;
			break;

		default:
			trap_fatal(frame);
		}
	} else {
		/* Kernel Mode Traps */

		KASSERT(cold || td->td_ucred != NULL,
		    ("kernel trap doesn't have ucred"));
		switch (type) {
#ifdef KDTRACE_HOOKS
		case EXC_PGM:
			if (frame->srr1 & EXC_PGM_TRAP) {
				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
					if (dtrace_invop_jump_addr != NULL) {
						dtrace_invop_jump_addr(frame);
						return;
					}
				}
			}
			break;
#endif
#ifdef __powerpc64__
		case EXC_DSE:
			if ((frame->dar & SEGMENT_MASK) == USER_ADDR) {
				__asm __volatile ("slbmte %0, %1" ::
					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
					"r"(USER_SLB_SLBE));
				return;
			}
			break;
#endif
		case EXC_DSI:
			if (trap_pfault(frame, 0) == 0)
 				return;
			break;
		case EXC_MCHK:
			if (handle_onfault(frame))
 				return;
			break;
		default:
			break;
		}
		trap_fatal(frame);
	}
Exemplo n.º 12
0
/*
 * Wake up to wake_threads waiting on the futex at memid.  If there are
 * more than that many threads waiting, requeue the remaining threads on
 * the futex at requeue_memid.
 */
static int
futex_requeue(memid_t *memid, memid_t *requeue_memid, int wake_threads,
	ulong_t requeue_threads, caddr_t addr, int *cmpval)
{
	fwaiter_t *fwp, *next;
	int index1, index2;
	int ret = 0;
	int32_t curval;
	kmutex_t *l1, *l2;

	/*
	 * To ensure that we don't miss a wakeup if the value of cmpval
	 * changes, we need to grab locks on both the original and new hash
	 * buckets.  To avoid deadlock, we always grab the lower-indexed
	 * lock first.
	 */
	index1 = HASH_FUNC(memid);
	index2 = HASH_FUNC(requeue_memid);

	if (index1 == index2) {
		l1 = &futex_hash_lock[index1];
		l2 = NULL;
	} else if (index1 < index2) {
		l1 = &futex_hash_lock[index1];
		l2 = &futex_hash_lock[index2];
	} else {
		l1 = &futex_hash_lock[index2];
		l2 = &futex_hash_lock[index1];
	}

	mutex_enter(l1);
	if (l2 != NULL)
		mutex_enter(l2);

	if (cmpval != NULL) {
		if (fuword32(addr, (uint32_t *)&curval)) {
			ret = -EFAULT;
			goto out;
		}
		if (curval != *cmpval) {
			ret = -EAGAIN;
			goto out;
		}
	}

	for (fwp = futex_hash[index1]; fwp; fwp = next) {
		next = fwp->fw_next;
		if (!MEMID_EQUAL(&fwp->fw_memid, memid))
			continue;

		futex_hashout(fwp);
		if (ret++ < wake_threads) {
			fwp->fw_woken = 1;
			cv_signal(&fwp->fw_cv);
		} else {
			MEMID_COPY(requeue_memid, &fwp->fw_memid);
			futex_hashin(fwp);

			if ((ret - wake_threads) >= requeue_threads)
				break;
		}
	}

out:
	if (l2 != NULL)
		mutex_exit(l2);
	mutex_exit(l1);

	if (ret < 0)
		return (set_errno(-ret));
	return (ret);
}
Exemplo n.º 13
0
kern_return_t
dtrace_user_probe(arm_saved_state_t *regs, unsigned int instr)
{
	/*
	 * FIXME
	 *
	 * The only call path into this method is always a user trap.
	 * We don't need to test for user trap, but should assert it.
	 */

	lck_rw_t *rwp;
	struct proc *p = current_proc();

	uthread_t uthread = (uthread_t)get_bsdthread_info(current_thread());

	kauth_cred_uthread_update(uthread, p);

	if (((regs->cpsr & PSR_TF) && ((uint16_t) instr) == FASTTRAP_THUMB_RET_INSTR) ||
	    ((uint32_t) instr == FASTTRAP_ARM_RET_INSTR)) {
		uint8_t step = uthread->t_dtrace_step;
		uint8_t ret = uthread->t_dtrace_ret;
		user_addr_t npc = uthread->t_dtrace_npc;

		if (uthread->t_dtrace_ast) {
			printf("dtrace_user_probe() should be calling aston()\n");
			// aston(thread);
			// uthread->t_sig_check = 1;
		}

		/*
		 * Clear all user tracing flags.
		 */
		uthread->t_dtrace_ft = 0;

		/*
		 * If we weren't expecting to take a return probe trap, kill
		 * the process as though it had just executed an unassigned
		 * trap instruction.
		 */
		if (step == 0) {
			/*
			 * APPLE NOTE: We're returning KERN_FAILURE, which causes 
			 * the generic signal handling code to take over, which will effectively
			 * deliver a EXC_BAD_INSTRUCTION to the user process.
			 */
			return KERN_FAILURE;
		} 

		/*
		 * If we hit this trap unrelated to a return probe, we're
		 * just here to reset the AST flag since we deferred a signal
		 * until after we logically single-stepped the instruction we
		 * copied out.
		 */
		if (ret == 0) {
			regs->pc = npc;
			return KERN_SUCCESS;
		}

		/*
		 * We need to wait until after we've called the
		 * dtrace_return_probe_ptr function pointer to step the pc.
		 */
		rwp = &CPU->cpu_ft_lock;
		lck_rw_lock_shared(rwp);

		if (dtrace_return_probe_ptr != NULL)
			(void) (*dtrace_return_probe_ptr)(regs);
		lck_rw_unlock_shared(rwp);

		regs->pc = npc;

		return KERN_SUCCESS;
	} else {
		rwp = &CPU->cpu_ft_lock;

		/*
		 * The DTrace fasttrap provider uses a trap,
		 * FASTTRAP_{ARM,THUMB}_INSTR. We let
		 * DTrace take the first crack at handling
		 * this trap; if it's not a probe that DTrace knows about,
		 * we call into the trap() routine to handle it like a
		 * breakpoint placed by a conventional debugger.
		 */

		/*
		 * APPLE NOTE: I believe the purpose of the reader/writers lock
		 * is thus: There are times which dtrace needs to prevent calling
		 * dtrace_pid_probe_ptr(). Sun's original impl grabbed a plain
		 * mutex here. However, that serialized all probe calls, and
		 * destroyed MP behavior. So now they use a RW lock, with probes
		 * as readers, and the top level synchronization as a writer.
		 */
		lck_rw_lock_shared(rwp);
		if (dtrace_pid_probe_ptr != NULL &&
		    (*dtrace_pid_probe_ptr)(regs) == 0) {
			lck_rw_unlock_shared(rwp);
			return KERN_SUCCESS;
		}
		lck_rw_unlock_shared(rwp);

		/*
		 * If the instruction that caused the breakpoint trap doesn't
		 * look like our trap anymore, it may be that this tracepoint
		 * was removed just after the user thread executed it. In
		 * that case, return to user land to retry the instuction.
		 *
		 * Note that the PC points to the instruction that caused the fault.
		 */
		if (regs->cpsr & PSR_TF) {
			uint16_t instr_check;
			if (fuword16(regs->pc, &instr_check) == 0 && instr_check != FASTTRAP_THUMB_INSTR) {
				return KERN_SUCCESS;
			}
		} else {
			uint32_t instr_check;
			if (fuword32(regs->pc, &instr_check) == 0 && instr_check != FASTTRAP_ARM_INSTR) {
				return KERN_SUCCESS;
			}
		}
	}

	return KERN_FAILURE;
}
Exemplo n.º 14
0
void
ia32_syscall(struct trapframe *frame)
{
	caddr_t params;
	int i;
	struct sysent *callp;
	struct thread *td = curthread;
	struct proc *p = td->td_proc;
	register_t orig_tf_rflags;
	int error;
	int narg;
	u_int32_t args[8];
	u_int64_t args64[8];
	u_int code;
	ksiginfo_t ksi;

	PCPU_INC(cnt.v_syscall);
	td->td_pticks = 0;
	td->td_frame = frame;
	if (td->td_ucred != p->p_ucred) 
		cred_update_thread(td);
	params = (caddr_t)frame->tf_rsp + sizeof(u_int32_t);
	code = frame->tf_rax;
	orig_tf_rflags = frame->tf_rflags;

	if (p->p_sysent->sv_prepsyscall) {
		/*
		 * The prep code is MP aware.
		 */
		(*p->p_sysent->sv_prepsyscall)(frame, args, &code, &params);
	} else {
		/*
		 * Need to check if this is a 32 bit or 64 bit syscall.
		 * fuword is MP aware.
		 */
		if (code == SYS_syscall) {
			/*
			 * Code is first argument, followed by actual args.
			 */
			code = fuword32(params);
			params += sizeof(int);
		} else if (code == SYS___syscall) {
			/*
			 * Like syscall, but code is a quad, so as to maintain
			 * quad alignment for the rest of the arguments.
			 * We use a 32-bit fetch in case params is not
			 * aligned.
			 */
			code = fuword32(params);
			params += sizeof(quad_t);
		}
	}

 	if (p->p_sysent->sv_mask)
 		code &= p->p_sysent->sv_mask;

 	if (code >= p->p_sysent->sv_size)
 		callp = &p->p_sysent->sv_table[0];
  	else
 		callp = &p->p_sysent->sv_table[code];

	narg = callp->sy_narg;

	/*
	 * copyin and the ktrsyscall()/ktrsysret() code is MP-aware
	 */
	if (params != NULL && narg != 0)
		error = copyin(params, (caddr_t)args,
		    (u_int)(narg * sizeof(int)));
	else
		error = 0;

	for (i = 0; i < narg; i++)
		args64[i] = args[i];

#ifdef KTRACE
	if (KTRPOINT(td, KTR_SYSCALL))
		ktrsyscall(code, narg, args64);
#endif
	CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td,
	    td->td_proc->p_pid, td->td_proc->p_comm, code);

	if (error == 0) {
		td->td_retval[0] = 0;
		td->td_retval[1] = frame->tf_rdx;

		STOPEVENT(p, S_SCE, narg);

		PTRACESTOP_SC(p, td, S_PT_SCE);

		AUDIT_SYSCALL_ENTER(code, td);
		error = (*callp->sy_call)(td, args64);
		AUDIT_SYSCALL_EXIT(error, td);
	}

	switch (error) {
	case 0:
		frame->tf_rax = td->td_retval[0];
		frame->tf_rdx = td->td_retval[1];
		frame->tf_rflags &= ~PSL_C;
		break;

	case ERESTART:
		/*
		 * Reconstruct pc, assuming lcall $X,y is 7 bytes,
		 * int 0x80 is 2 bytes. We saved this in tf_err.
		 */
		frame->tf_rip -= frame->tf_err;
		break;

	case EJUSTRETURN:
		break;

	default:
 		if (p->p_sysent->sv_errsize) {
 			if (error >= p->p_sysent->sv_errsize)
  				error = -1;	/* XXX */
   			else
  				error = p->p_sysent->sv_errtbl[error];
		}
		frame->tf_rax = error;
		frame->tf_rflags |= PSL_C;
		break;
	}

	/*
	 * Traced syscall.
	 */
	if (orig_tf_rflags & PSL_T) {
		frame->tf_rflags &= ~PSL_T;
		ksiginfo_init_trap(&ksi);
		ksi.ksi_signo = SIGTRAP;
		ksi.ksi_code = TRAP_TRACE;
		ksi.ksi_addr = (void *)frame->tf_rip;
		trapsignal(td, &ksi);
	}

	/*
	 * Check for misbehavior.
	 */
	WITNESS_WARN(WARN_PANIC, NULL, "System call %s returning",
	    (code >= 0 && code < SYS_MAXSYSCALL) ? freebsd32_syscallnames[code] : "???");
	KASSERT(td->td_critnest == 0,
	    ("System call %s returning in a critical section",
	    (code >= 0 && code < SYS_MAXSYSCALL) ? freebsd32_syscallnames[code] : "???"));
	KASSERT(td->td_locks == 0,
	    ("System call %s returning with %d locks held",
	    (code >= 0 && code < SYS_MAXSYSCALL) ? freebsd32_syscallnames[code] : "???",
	    td->td_locks));

	/*
	 * Handle reschedule and other end-of-syscall issues
	 */
	userret(td, frame);

	CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td,
	    td->td_proc->p_pid, td->td_proc->p_comm, code);
#ifdef KTRACE
	if (KTRPOINT(td, KTR_SYSRET))
		ktrsysret(code, error, td->td_retval[0]);
#endif

	/*
	 * This works because errno is findable through the
	 * register set.  If we ever support an emulation where this
	 * is not the case, this code will need to be revisited.
	 */
	STOPEVENT(p, S_SCX, code);
 
	PTRACESTOP_SC(p, td, S_PT_SCX);
}