Esempio n. 1
0
File: trap.c Progetto: rodero95/sys
/*
 * dab_align() handles the following data aborts:
 *
 *  FAULT_ALIGN_0 - Alignment fault
 *  FAULT_ALIGN_1 - Alignment fault
 *
 * These faults are fatal if they happen in kernel mode. Otherwise, we
 * deliver a bus error to the process.
 */
static int
dab_align(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
{

	/* Alignment faults are always fatal if they occur in kernel mode */
	if (!TRAP_USERMODE(tf)) {
		if (!td || !td->td_pcb->pcb_onfault)
			dab_fatal(tf, fsr, far, td, ksig);
		tf->tf_r0 = EFAULT;
		tf->tf_pc = (int)td->td_pcb->pcb_onfault;
		return (0);
	}

	/* pcb_onfault *must* be NULL at this point */

	/* See if the cpu state needs to be fixed up */
	(void) data_abort_fixup(tf, fsr, far, td, ksig);

	/* Deliver a bus error signal to the process */
	ksig->code = 0;
	ksig->signb = SIGBUS;
	td->td_frame = tf;

	return (1);
}
Esempio n. 2
0
File: trap.c Progetto: rodero95/sys
static __inline int
prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig)
{
#ifdef CPU_ABORT_FIXUP_REQUIRED
	int error;

	/* Call the cpu specific prefetch abort fixup routine */
	error = cpu_prefetchabt_fixup(tf);
	if (__predict_true(error != ABORT_FIXUP_FAILED))
		return (error);

	/*
	 * Oops, couldn't fix up the instruction
	 */
	printf(
	    "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
	    TRAP_USERMODE(tf) ? "user" : "kernel");
	printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
	    *((u_int *)tf->tf_pc));
	disassemble(tf->tf_pc);

	/* Die now if this happened in kernel mode */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, 0, tf->tf_pc, NULL, ksig);

	return (error);
#else
	return (ABORT_FIXUP_OK);
#endif /* CPU_ABORT_FIXUP_REQUIRED */
}
Esempio n. 3
0
static inline int
prefetch_abort_fixup(trapframe_t *tf)
{
#ifdef CPU_ABORT_FIXUP_REQUIRED
	int error;

	/* Call the CPU specific prefetch abort fixup routine */
	error = cpu_prefetchabt_fixup(tf);
	if (__predict_true(error != ABORT_FIXUP_FAILED))
		return (error);

	/*
	 * Oops, couldn't fix up the instruction
	 */
	printf("%s: fixup for %s mode prefetch abort failed.\n", __func__,
	    TRAP_USERMODE(tf) ? "user" : "kernel");
#ifdef THUMB_CODE
	if (tf->tf_spsr & PSR_T_bit) {
		printf("pc = 0x%08x, opcode 0x%04x, 0x%04x, insn = ",
		    tf->tf_pc, *((uint16 *)(tf->tf_pc & ~1)),
		    *((uint16 *)((tf->tf_pc + 2) & ~1)));
	}
	else
#endif
	{
		printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
		    *((u_int *)tf->tf_pc));
	}
	disassemble(tf->tf_pc);

	/* Die now if this happened in kernel mode */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);

	return (error);
#else
	return (ABORT_FIXUP_OK);
#endif /* CPU_ABORT_FIXUP_REQUIRED */
}
Esempio n. 4
0
/*
 * dab_align() handles the following data aborts:
 *
 *  FAULT_ALIGN_0 - Alignment fault
 *  FAULT_ALIGN_0 - Alignment fault
 *
 * These faults are fatal if they happen in kernel mode. Otherwise, we
 * deliver a bus error to the process.
 */
static int
dab_align(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l, ksiginfo_t *ksi)
{
	/* Alignment faults are always fatal if they occur in kernel mode */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, fsr, far, l, NULL);

	/* pcb_onfault *must* be NULL at this point */
	KDASSERT(((struct pcb *)lwp_getpcb(l))->pcb_onfault == NULL);

	/* See if the CPU state needs to be fixed up */
	(void) data_abort_fixup(tf, fsr, far, l);

	/* Deliver a bus error signal to the process */
	KSI_INIT_TRAP(ksi);
	ksi->ksi_signo = SIGBUS;
	ksi->ksi_code = BUS_ADRALN;
	ksi->ksi_addr = (uint32_t *)(intptr_t)far;
	ksi->ksi_trap = fsr;

	lwp_settrapframe(l, tf);

	return (1);
}
Esempio n. 5
0
/*
 * void prefetch_abort_handler(struct trapframe *tf)
 *
 * Abort handler called when instruction execution occurs at
 * a non existent or restricted (access permissions) memory page.
 * If the address is invalid and we were in SVC mode then panic as
 * the kernel should never prefetch abort.
 * If the address is invalid and the page is mapped then the user process
 * does no have read permission so send it a signal.
 * Otherwise fault the page in and try again.
 */
static void
prefetch_abort_handler(struct trapframe *tf)
{
	struct thread *td;
	struct proc * p;
	struct vm_map *map;
	vm_offset_t fault_pc, va;
	int error = 0;
	struct ksig ksig;


#if 0
	/* Update vmmeter statistics */
	uvmexp.traps++;
#endif
#if 0
	printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc,
	    (void*)tf->tf_usr_lr);
#endif

 	td = curthread;
	p = td->td_proc;
	PCPU_INC(cnt.v_trap);

	if (TRAP_USERMODE(tf)) {
		td->td_frame = tf;
		if (td->td_cowgen != td->td_proc->p_cowgen)
			thread_cow_update(td);
	}
	fault_pc = tf->tf_pc;
	if (td->td_md.md_spinlock_count == 0) {
		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
			enable_interrupts(PSR_I);
		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
			enable_interrupts(PSR_F);
	}

	/* Prefetch aborts cannot happen in kernel mode */
	if (__predict_false(!TRAP_USERMODE(tf)))
		dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
	td->td_pticks = 0;


	/* Ok validate the address, can only execute in USER space */
	if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
	    (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
		ksig.signb = SIGSEGV;
		ksig.code = 0;
		goto do_trapsignal;
	}

	map = &td->td_proc->p_vmspace->vm_map;
	va = trunc_page(fault_pc);

	/*
	 * See if the pmap can handle this fault on its own...
	 */
#ifdef DEBUG
	last_fault_code = -1;
#endif
	if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
		goto out;

	error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
	    VM_FAULT_NORMAL);
	if (__predict_true(error == 0))
		goto out;

	if (error == ENOMEM) {
		printf("VM: pid %d (%s), uid %d killed: "
		    "out of swap\n", td->td_proc->p_pid, td->td_name,
		    (td->td_proc->p_ucred) ?
		     td->td_proc->p_ucred->cr_uid : -1);
		ksig.signb = SIGKILL;
	} else {
		ksig.signb = SIGSEGV;
	}
	ksig.code = 0;

do_trapsignal:
	call_trapsignal(td, ksig.signb, ksig.code);

out:
	userret(td, tf);

}
Esempio n. 6
0
/*
 * dab_buserr() handles the following data aborts:
 *
 *  FAULT_BUSERR_0 - External Abort on Linefetch -- Section
 *  FAULT_BUSERR_1 - External Abort on Linefetch -- Page
 *  FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
 *  FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
 *  FAULT_BUSTRNL1 - External abort on Translation -- Level 1
 *  FAULT_BUSTRNL2 - External abort on Translation -- Level 2
 *
 * If pcb_onfault is set, flag the fault and return to the handler.
 * If the fault occurred in user mode, give the process a SIGBUS.
 *
 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
 * can be flagged as imprecise in the FSR. This causes a real headache
 * since some of the machine state is lost. In this case, tf->tf_pc
 * may not actually point to the offending instruction. In fact, if
 * we've taken a double abort fault, it generally points somewhere near
 * the top of "data_abort_entry" in exception.S.
 *
 * In all other cases, these data aborts are considered fatal.
 */
static int
dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td,
    struct ksig *ksig)
{
	struct pcb *pcb = td->td_pcb;

#ifdef __XSCALE__
	if ((fsr & FAULT_IMPRECISE) != 0 &&
	    (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
		/*
		 * Oops, an imprecise, double abort fault. We've lost the
		 * r14_abt/spsr_abt values corresponding to the original
		 * abort, and the spsr saved in the trapframe indicates
		 * ABT mode.
		 */
		tf->tf_spsr &= ~PSR_MODE;

		/*
		 * We use a simple heuristic to determine if the double abort
		 * happened as a result of a kernel or user mode access.
		 * If the current trapframe is at the top of the kernel stack,
		 * the fault _must_ have come from user mode.
		 */
		if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) {
			/*
			 * Kernel mode. We're either about to die a
			 * spectacular death, or pcb_onfault will come
			 * to our rescue. Either way, the current value
			 * of tf->tf_pc is irrelevant.
			 */
			tf->tf_spsr |= PSR_SVC32_MODE;
			if (pcb->pcb_onfault == NULL)
				printf("\nKernel mode double abort!\n");
		} else {
			/*
			 * User mode. We've lost the program counter at the
			 * time of the fault (not that it was accurate anyway;
			 * it's not called an imprecise fault for nothing).
			 * About all we can do is copy r14_usr to tf_pc and
			 * hope for the best. The process is about to get a
			 * SIGBUS, so it's probably history anyway.
			 */
			tf->tf_spsr |= PSR_USR32_MODE;
			tf->tf_pc = tf->tf_usr_lr;
		}
	}

	/* FAR is invalid for imprecise exceptions */
	if ((fsr & FAULT_IMPRECISE) != 0)
		far = 0;
#endif /* __XSCALE__ */

	if (pcb->pcb_onfault) {
		tf->tf_r0 = EFAULT;
		tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
		return (0);
	}

	/*
	 * At this point, if the fault happened in kernel mode, we're toast
	 */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, fsr, far, td, ksig);

	/* Deliver a bus error signal to the process */
	ksig->signb = SIGBUS;
	ksig->code = 0;
	td->td_frame = tf;

	return (1);
}
Esempio n. 7
0
void
abort_handler(struct trapframe *tf, int type)
{
	struct vm_map *map;
	struct pcb *pcb;
	struct thread *td;
	u_int user, far, fsr;
	vm_prot_t ftype;
	void *onfault;
	vm_offset_t va;
	int error = 0;
	struct ksig ksig;
	struct proc *p;

	if (type == 1)
		return (prefetch_abort_handler(tf));

	/* Grab FAR/FSR before enabling interrupts */
	far = cpu_faultaddress();
	fsr = cpu_faultstatus();
#if 0
	printf("data abort: fault address=%p (from pc=%p lr=%p)\n",
	       (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr);
#endif

	/* Update vmmeter statistics */
#if 0
	vmexp.traps++;
#endif

	td = curthread;
	p = td->td_proc;

	PCPU_INC(cnt.v_trap);
	/* Data abort came from user mode? */
	user = TRAP_USERMODE(tf);

	if (user) {
		td->td_pticks = 0;
		td->td_frame = tf;
		if (td->td_cowgen != td->td_proc->p_cowgen)
			thread_cow_update(td);

	}
	/* Grab the current pcb */
	pcb = td->td_pcb;
	/* Re-enable interrupts if they were enabled previously */
	if (td->td_md.md_spinlock_count == 0) {
		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
			enable_interrupts(PSR_I);
		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
			enable_interrupts(PSR_F);
	}


	/* Invoke the appropriate handler, if necessary */
	if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
		if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
		    td, &ksig)) {
			goto do_trapsignal;
		}
		goto out;
	}

	/*
	 * At this point, we're dealing with one of the following data aborts:
	 *
	 *  FAULT_TRANS_S  - Translation -- Section
	 *  FAULT_TRANS_P  - Translation -- Page
	 *  FAULT_DOMAIN_S - Domain -- Section
	 *  FAULT_DOMAIN_P - Domain -- Page
	 *  FAULT_PERM_S   - Permission -- Section
	 *  FAULT_PERM_P   - Permission -- Page
	 *
	 * These are the main virtual memory-related faults signalled by
	 * the MMU.
	 */

	/*
	 * Make sure the Program Counter is sane. We could fall foul of
	 * someone executing Thumb code, in which case the PC might not
	 * be word-aligned. This would cause a kernel alignment fault
	 * further down if we have to decode the current instruction.
	 * XXX: It would be nice to be able to support Thumb at some point.
	 */
	if (__predict_false((tf->tf_pc & 3) != 0)) {
		if (user) {
			/*
			 * Give the user an illegal instruction signal.
			 */
			/* Deliver a SIGILL to the process */
			ksig.signb = SIGILL;
			ksig.code = 0;
			goto do_trapsignal;
		}

		/*
		 * The kernel never executes Thumb code.
		 */
		printf("\ndata_abort_fault: Misaligned Kernel-mode "
		    "Program Counter\n");
		dab_fatal(tf, fsr, far, td, &ksig);
	}

	va = trunc_page((vm_offset_t)far);

	/*
	 * It is only a kernel address space fault iff:
	 *	1. user == 0  and
	 *	2. pcb_onfault not set or
	 *	3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
	 */
	if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
	    (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
	    __predict_true((pcb->pcb_onfault == NULL ||
	     (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
		map = kernel_map;

		/* Was the fault due to the FPE/IPKDB ? */
		if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {

			/*
			 * Force exit via userret()
			 * This is necessary as the FPE is an extension to
			 * userland that actually runs in a priveledged mode
			 * but uses USR mode permissions for its accesses.
			 */
			user = 1;
			ksig.signb = SIGSEGV;
			ksig.code = 0;
			goto do_trapsignal;
		}
	} else {
		map = &td->td_proc->p_vmspace->vm_map;
	}

	/*
	 * We need to know whether the page should be mapped as R or R/W.
	 * On armv4, the fault status register does not indicate whether
	 * the access was a read or write.  We know that a permission fault
	 * can only be the result of a write to a read-only location, so we
	 * can deal with those quickly.  Otherwise we need to disassemble
	 * the faulting instruction to determine if it was a write.
	 */
	if (IS_PERMISSION_FAULT(fsr))
		ftype = VM_PROT_WRITE;
	else {
		u_int insn = ReadWord(tf->tf_pc);

		if (((insn & 0x0c100000) == 0x04000000) ||	/* STR/STRB */
		    ((insn & 0x0e1000b0) == 0x000000b0) ||	/* STRH/STRD */
		    ((insn & 0x0a100000) == 0x08000000)) {	/* STM/CDT */
			ftype = VM_PROT_WRITE;
		} else {
			if ((insn & 0x0fb00ff0) == 0x01000090)	/* SWP */
				ftype = VM_PROT_READ | VM_PROT_WRITE;
			else
				ftype = VM_PROT_READ;
		}
	}

	/*
	 * See if the fault is as a result of ref/mod emulation,
	 * or domain mismatch.
	 */
#ifdef DEBUG
	last_fault_code = fsr;
#endif
	if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK,
	    NULL, "Kernel page fault") != 0)
		goto fatal_pagefault;

	if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
	    user)) {
		goto out;
	}

	onfault = pcb->pcb_onfault;
	pcb->pcb_onfault = NULL;
	error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
	pcb->pcb_onfault = onfault;
	if (__predict_true(error == 0))
		goto out;
fatal_pagefault:
	if (user == 0) {
		if (pcb->pcb_onfault) {
			tf->tf_r0 = error;
			tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
			return;
		}

		printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
		    error);
		dab_fatal(tf, fsr, far, td, &ksig);
	}


	if (error == ENOMEM) {
		printf("VM: pid %d (%s), uid %d killed: "
		    "out of swap\n", td->td_proc->p_pid, td->td_name,
		    (td->td_proc->p_ucred) ?
		     td->td_proc->p_ucred->cr_uid : -1);
		ksig.signb = SIGKILL;
	} else {
		ksig.signb = SIGSEGV;
	}
	ksig.code = 0;
do_trapsignal:
	call_trapsignal(td, ksig.signb, ksig.code);
out:
	/* If returning to user mode, make sure to invoke userret() */
	if (user)
		userret(td, tf);
}
Esempio n. 8
0
File: trap.c Progetto: rodero95/sys
/*
 * void prefetch_abort_handler(trapframe_t *tf)
 *
 * Abort handler called when instruction execution occurs at
 * a non existent or restricted (access permissions) memory page.
 * If the address is invalid and we were in SVC mode then panic as
 * the kernel should never prefetch abort.
 * If the address is invalid and the page is mapped then the user process
 * does no have read permission so send it a signal.
 * Otherwise fault the page in and try again.
 */
void
prefetch_abort_handler(trapframe_t *tf)
{
	struct thread *td;
	struct proc * p;
	struct vm_map *map;
	vm_offset_t fault_pc, va;
	int error = 0;
	struct ksig ksig;


#if 0
	/* Update vmmeter statistics */
	uvmexp.traps++;
#endif
#if 0
	printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc,
	    (void*)tf->tf_usr_lr);
#endif
	
 	td = curthread;
	p = td->td_proc;
	PCPU_INC(cnt.v_trap);

	if (TRAP_USERMODE(tf)) {
		td->td_frame = tf;
		if (td->td_ucred != td->td_proc->p_ucred)
			cred_update_thread(td);
	}
	fault_pc = tf->tf_pc;
	if (td->td_md.md_spinlock_count == 0) {
		if (__predict_true(tf->tf_spsr & I32_bit) == 0)
			enable_interrupts(I32_bit);
		if (__predict_true(tf->tf_spsr & F32_bit) == 0)
			enable_interrupts(F32_bit);
	}

	/* See if the cpu state needs to be fixed up */
	switch (prefetch_abort_fixup(tf, &ksig)) {
	case ABORT_FIXUP_RETURN:
		return;
	case ABORT_FIXUP_FAILED:
		/* Deliver a SIGILL to the process */
		ksig.signb = SIGILL;
		ksig.code = 0;
		td->td_frame = tf;
		goto do_trapsignal;
	default:
		break;
	}

	/* Prefetch aborts cannot happen in kernel mode */
	if (__predict_false(!TRAP_USERMODE(tf)))
		dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
	td->td_pticks = 0;


	/* Ok validate the address, can only execute in USER space */
	if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
	    (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
		ksig.signb = SIGSEGV;
		ksig.code = 0;
		goto do_trapsignal;
	}

	map = &td->td_proc->p_vmspace->vm_map;
	va = trunc_page(fault_pc);

	/*
	 * See if the pmap can handle this fault on its own...
	 */
#ifdef DEBUG
	last_fault_code = -1;
#endif
	if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
		goto out;

	if (map != kernel_map) {
		PROC_LOCK(p);
		p->p_lock++;
		PROC_UNLOCK(p);
	}

	error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
	    VM_FAULT_NORMAL);
	if (map != kernel_map) {
		PROC_LOCK(p);
		p->p_lock--;
		PROC_UNLOCK(p);
	}

	if (__predict_true(error == 0))
		goto out;

	if (error == ENOMEM) {
		printf("VM: pid %d (%s), uid %d killed: "
		    "out of swap\n", td->td_proc->p_pid, td->td_name,
		    (td->td_proc->p_ucred) ?
		     td->td_proc->p_ucred->cr_uid : -1);
		ksig.signb = SIGKILL;
	} else {
		ksig.signb = SIGSEGV;
	}
	ksig.code = 0;

do_trapsignal:
	call_trapsignal(td, ksig.signb, ksig.code);

out:
	userret(td, tf);

}
Esempio n. 9
0
/*
 * void prefetch_abort_handler(trapframe_t *tf)
 *
 * Abort handler called when instruction execution occurs at
 * a non existent or restricted (access permissions) memory page.
 * If the address is invalid and we were in SVC mode then panic as
 * the kernel should never prefetch abort.
 * If the address is invalid and the page is mapped then the user process
 * does no have read permission so send it a signal.
 * Otherwise fault the page in and try again.
 */
void
prefetch_abort_handler(trapframe_t *tf)
{
	struct lwp *l;
	struct pcb *pcb __diagused;
	struct vm_map *map;
	vaddr_t fault_pc, va;
	ksiginfo_t ksi;
	int error, user;

	UVMHIST_FUNC(__func__);
	UVMHIST_CALLED(maphist);

	/* Update vmmeter statistics */
	curcpu()->ci_data.cpu_ntrap++;

	l = curlwp;
	pcb = lwp_getpcb(l);

	if ((user = TRAP_USERMODE(tf)) != 0)
		LWP_CACHE_CREDS(l, l->l_proc);

	/*
	 * Enable IRQ's (disabled by the abort) This always comes
	 * from user mode so we know interrupts were not disabled.
	 * But we check anyway.
	 */
	KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0);
	if (__predict_true((tf->tf_spsr & I32_bit) != IF32_bits))
		restore_interrupts(tf->tf_spsr & IF32_bits);

	/* See if the CPU state needs to be fixed up */
	switch (prefetch_abort_fixup(tf)) {
	case ABORT_FIXUP_RETURN:
		KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0);
		return;
	case ABORT_FIXUP_FAILED:
		/* Deliver a SIGILL to the process */
		KSI_INIT_TRAP(&ksi);
		ksi.ksi_signo = SIGILL;
		ksi.ksi_code = ILL_ILLOPC;
		ksi.ksi_addr = (uint32_t *)(intptr_t) tf->tf_pc;
		lwp_settrapframe(l, tf);
		goto do_trapsignal;
	default:
		break;
	}

	/* Prefetch aborts cannot happen in kernel mode */
	if (__predict_false(!user))
		dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);

	/* Get fault address */
	fault_pc = tf->tf_pc;
	lwp_settrapframe(l, tf);
	UVMHIST_LOG(maphist, " (pc=0x%x, l=0x%x, tf=0x%x)",
	    fault_pc, l, tf, 0);

	/* Ok validate the address, can only execute in USER space */
	if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
	    (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
		KSI_INIT_TRAP(&ksi);
		ksi.ksi_signo = SIGSEGV;
		ksi.ksi_code = SEGV_ACCERR;
		ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc;
		ksi.ksi_trap = fault_pc;
		goto do_trapsignal;
	}

	map = &l->l_proc->p_vmspace->vm_map;
	va = trunc_page(fault_pc);

	/*
	 * See if the pmap can handle this fault on its own...
	 */
#ifdef DEBUG
	last_fault_code = -1;
#endif
	if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ|VM_PROT_EXECUTE, 1)) {
		UVMHIST_LOG (maphist, " <- emulated", 0, 0, 0, 0);
		goto out;
	}

#ifdef DIAGNOSTIC
	if (__predict_false(curcpu()->ci_intr_depth > 0)) {
		printf("\nNon-emulated prefetch abort with intr_depth > 0\n");
		dab_fatal(tf, 0, tf->tf_pc, NULL, NULL);
	}
#endif

	KASSERT(pcb->pcb_onfault == NULL);
	error = uvm_fault(map, va, VM_PROT_READ|VM_PROT_EXECUTE);

	if (__predict_true(error == 0)) {
		UVMHIST_LOG (maphist, " <- uvm", 0, 0, 0, 0);
		goto out;
	}
	KSI_INIT_TRAP(&ksi);

	UVMHIST_LOG (maphist, " <- fatal (%d)", error, 0, 0, 0);

	if (error == ENOMEM) {
		printf("UVM: pid %d (%s), uid %d killed: "
		    "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm,
		    l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
		ksi.ksi_signo = SIGKILL;
	} else
		ksi.ksi_signo = SIGSEGV;

	ksi.ksi_code = SEGV_MAPERR;
	ksi.ksi_addr = (uint32_t *)(intptr_t) fault_pc;
	ksi.ksi_trap = fault_pc;

do_trapsignal:
	call_trapsignal(l, tf, &ksi);

out:
	KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0);
	userret(l);
}
Esempio n. 10
0
/*
 * dab_buserr() handles the following data aborts:
 *
 *  FAULT_BUSERR_0 - External Abort on Linefetch -- Section
 *  FAULT_BUSERR_1 - External Abort on Linefetch -- Page
 *  FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
 *  FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
 *  FAULT_BUSTRNL1 - External abort on Translation -- Level 1
 *  FAULT_BUSTRNL2 - External abort on Translation -- Level 2
 *
 * If pcb_onfault is set, flag the fault and return to the handler.
 * If the fault occurred in user mode, give the process a SIGBUS.
 *
 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
 * can be flagged as imprecise in the FSR. This causes a real headache
 * since some of the machine state is lost. In this case, tf->tf_pc
 * may not actually point to the offending instruction. In fact, if
 * we've taken a double abort fault, it generally points somewhere near
 * the top of "data_abort_entry" in exception.S.
 *
 * In all other cases, these data aborts are considered fatal.
 */
static int
dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct lwp *l,
    ksiginfo_t *ksi)
{
	struct pcb *pcb = lwp_getpcb(l);

#ifdef __XSCALE__
	if ((fsr & FAULT_IMPRECISE) != 0 &&
	    (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
		/*
		 * Oops, an imprecise, double abort fault. We've lost the
		 * r14_abt/spsr_abt values corresponding to the original
		 * abort, and the spsr saved in the trapframe indicates
		 * ABT mode.
		 */
		tf->tf_spsr &= ~PSR_MODE;

		/*
		 * We use a simple heuristic to determine if the double abort
		 * happened as a result of a kernel or user mode access.
		 * If the current trapframe is at the top of the kernel stack,
		 * the fault _must_ have come from user mode.
		 */
		if (tf != ((trapframe_t *)pcb->pcb_ksp) - 1) {
			/*
			 * Kernel mode. We're either about to die a
			 * spectacular death, or pcb_onfault will come
			 * to our rescue. Either way, the current value
			 * of tf->tf_pc is irrelevant.
			 */
			tf->tf_spsr |= PSR_SVC32_MODE;
			if (pcb->pcb_onfault == NULL)
				printf("\nKernel mode double abort!\n");
		} else {
			/*
			 * User mode. We've lost the program counter at the
			 * time of the fault (not that it was accurate anyway;
			 * it's not called an imprecise fault for nothing).
			 * About all we can do is copy r14_usr to tf_pc and
			 * hope for the best. The process is about to get a
			 * SIGBUS, so it's probably history anyway.
			 */
			tf->tf_spsr |= PSR_USR32_MODE;
			tf->tf_pc = tf->tf_usr_lr;
#ifdef THUMB_CODE
			tf->tf_spsr &= ~PSR_T_bit;
			if (tf->tf_usr_lr & 1)
				tf->tf_spsr |= PSR_T_bit;
#endif
		}
	}

	/* FAR is invalid for imprecise exceptions */
	if ((fsr & FAULT_IMPRECISE) != 0)
		far = 0;
#endif /* __XSCALE__ */

	if (pcb->pcb_onfault) {
		KDASSERT(TRAP_USERMODE(tf) == 0);
		tf->tf_r0 = EFAULT;
		tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
		return (0);
	}

	/* See if the CPU state needs to be fixed up */
	(void) data_abort_fixup(tf, fsr, far, l);

	/*
	 * At this point, if the fault happened in kernel mode, we're toast
	 */
	if (!TRAP_USERMODE(tf))
		dab_fatal(tf, fsr, far, l, NULL);

	/* Deliver a bus error signal to the process */
	KSI_INIT_TRAP(ksi);
	ksi->ksi_signo = SIGBUS;
	ksi->ksi_code = BUS_ADRERR;
	ksi->ksi_addr = (uint32_t *)(intptr_t)far;
	ksi->ksi_trap = fsr;

	lwp_settrapframe(l, tf);

	return (1);
}
Esempio n. 11
0
void
data_abort_handler(trapframe_t *tf)
{
	struct vm_map *map;
	struct lwp * const l = curlwp;
	struct cpu_info * const ci = curcpu();
	u_int far, fsr;
	vm_prot_t ftype;
	void *onfault;
	vaddr_t va;
	int error;
	ksiginfo_t ksi;

	UVMHIST_FUNC(__func__);
	UVMHIST_CALLED(maphist);

	/* Grab FAR/FSR before enabling interrupts */
	far = cpu_faultaddress();
	fsr = cpu_faultstatus();

	/* Update vmmeter statistics */
	ci->ci_data.cpu_ntrap++;

	/* Re-enable interrupts if they were enabled previously */
	KASSERT(!TRAP_USERMODE(tf) || (tf->tf_spsr & IF32_bits) == 0);
	if (__predict_true((tf->tf_spsr & IF32_bits) != IF32_bits))
		restore_interrupts(tf->tf_spsr & IF32_bits);

	/* Get the current lwp structure */

	UVMHIST_LOG(maphist, " (l=%#x, far=%#x, fsr=%#x",
	    l, far, fsr, 0);
	UVMHIST_LOG(maphist, "  tf=%#x, pc=%#x)",
	    tf, tf->tf_pc, 0, 0);

	/* Data abort came from user mode? */
	bool user = (TRAP_USERMODE(tf) != 0);
	if (user)
		LWP_CACHE_CREDS(l, l->l_proc);

	/* Grab the current pcb */
	struct pcb * const pcb = lwp_getpcb(l);

	curcpu()->ci_abt_evs[fsr & FAULT_TYPE_MASK].ev_count++;

	/* Invoke the appropriate handler, if necessary */
	if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
#ifdef DIAGNOSTIC
		printf("%s: data_aborts fsr=0x%x far=0x%x\n",
		    __func__, fsr, far);
#endif
		if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
		    l, &ksi))
			goto do_trapsignal;
		goto out;
	}

	/*
	 * At this point, we're dealing with one of the following data aborts:
	 *
	 *  FAULT_TRANS_S  - Translation -- Section
	 *  FAULT_TRANS_P  - Translation -- Page
	 *  FAULT_DOMAIN_S - Domain -- Section
	 *  FAULT_DOMAIN_P - Domain -- Page
	 *  FAULT_PERM_S   - Permission -- Section
	 *  FAULT_PERM_P   - Permission -- Page
	 *
	 * These are the main virtual memory-related faults signalled by
	 * the MMU.
	 */

	/* fusubailout is used by [fs]uswintr to avoid page faulting */
	if (__predict_false(pcb->pcb_onfault == fusubailout)) {
		tf->tf_r0 = EFAULT;
		tf->tf_pc = (intptr_t) pcb->pcb_onfault;
		return;
	}

	if (user) {
		lwp_settrapframe(l, tf);
	}

	/*
	 * Make sure the Program Counter is sane. We could fall foul of
	 * someone executing Thumb code, in which case the PC might not
	 * be word-aligned. This would cause a kernel alignment fault
	 * further down if we have to decode the current instruction.
	 */
#ifdef THUMB_CODE
	/*
	 * XXX: It would be nice to be able to support Thumb in the kernel
	 * at some point.
	 */
	if (__predict_false(!user && (tf->tf_pc & 3) != 0)) {
		printf("\n%s: Misaligned Kernel-mode Program Counter\n",
		    __func__);
		dab_fatal(tf, fsr, far, l, NULL);
	}
#else
	if (__predict_false((tf->tf_pc & 3) != 0)) {
		if (user) {
			/*
			 * Give the user an illegal instruction signal.
			 */
			/* Deliver a SIGILL to the process */
			KSI_INIT_TRAP(&ksi);
			ksi.ksi_signo = SIGILL;
			ksi.ksi_code = ILL_ILLOPC;
			ksi.ksi_addr = (uint32_t *)(intptr_t) far;
			ksi.ksi_trap = fsr;
			goto do_trapsignal;
		}

		/*
		 * The kernel never executes Thumb code.
		 */
		printf("\n%s: Misaligned Kernel-mode Program Counter\n",
		    __func__);
		dab_fatal(tf, fsr, far, l, NULL);
	}
#endif

	/* See if the CPU state needs to be fixed up */
	switch (data_abort_fixup(tf, fsr, far, l)) {
	case ABORT_FIXUP_RETURN:
		return;
	case ABORT_FIXUP_FAILED:
		/* Deliver a SIGILL to the process */
		KSI_INIT_TRAP(&ksi);
		ksi.ksi_signo = SIGILL;
		ksi.ksi_code = ILL_ILLOPC;
		ksi.ksi_addr = (uint32_t *)(intptr_t) far;
		ksi.ksi_trap = fsr;
		goto do_trapsignal;
	default:
		break;
	}

	va = trunc_page((vaddr_t)far);

	/*
	 * It is only a kernel address space fault iff:
	 *	1. user == 0  and
	 *	2. pcb_onfault not set or
	 *	3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
	 */
	if (!user && (va >= VM_MIN_KERNEL_ADDRESS ||
	    (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
	    __predict_true((pcb->pcb_onfault == NULL ||
	     (read_insn(tf->tf_pc, false) & 0x05200000) != 0x04200000))) {
		map = kernel_map;

		/* Was the fault due to the FPE/IPKDB ? */
		if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
			KSI_INIT_TRAP(&ksi);
			ksi.ksi_signo = SIGSEGV;
			ksi.ksi_code = SEGV_ACCERR;
			ksi.ksi_addr = (uint32_t *)(intptr_t) far;
			ksi.ksi_trap = fsr;

			/*
			 * Force exit via userret()
			 * This is necessary as the FPE is an extension to
			 * userland that actually runs in a priveledged mode
			 * but uses USR mode permissions for its accesses.
			 */
			user = true;
			goto do_trapsignal;
		}
	} else {
		map = &l->l_proc->p_vmspace->vm_map;
	}

	/*
	 * We need to know whether the page should be mapped as R or R/W.
	 * Before ARMv6, the MMU did not give us the info as to whether the
	 * fault was caused by a read or a write.
	 *
	 * However, we know that a permission fault can only be the result of
	 * a write to a read-only location, so we can deal with those quickly.
	 *
	 * Otherwise we need to disassemble the instruction responsible to
	 * determine if it was a write.
	 */
	if (CPU_IS_ARMV6_P() || CPU_IS_ARMV7_P()) {
		ftype = (fsr & FAULT_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
	} else if (IS_PERMISSION_FAULT(fsr)) {
		ftype = VM_PROT_WRITE;
	} else {
#ifdef THUMB_CODE
		/* Fast track the ARM case.  */
		if (__predict_false(tf->tf_spsr & PSR_T_bit)) {
			u_int insn = read_thumb_insn(tf->tf_pc, user);
			u_int insn_f8 = insn & 0xf800;
			u_int insn_fe = insn & 0xfe00;

			if (insn_f8 == 0x6000 || /* STR(1) */
			    insn_f8 == 0x7000 || /* STRB(1) */
			    insn_f8 == 0x8000 || /* STRH(1) */
			    insn_f8 == 0x9000 || /* STR(3) */
			    insn_f8 == 0xc000 || /* STM */
			    insn_fe == 0x5000 || /* STR(2) */
			    insn_fe == 0x5200 || /* STRH(2) */
			    insn_fe == 0x5400)   /* STRB(2) */
				ftype = VM_PROT_WRITE;
			else
				ftype = VM_PROT_READ;
		}
		else
#endif
		{
			u_int insn = read_insn(tf->tf_pc, user);

			if (((insn & 0x0c100000) == 0x04000000) || /* STR[B] */
			    ((insn & 0x0e1000b0) == 0x000000b0) || /* STR[HD]*/
			    ((insn & 0x0a100000) == 0x08000000) || /* STM/CDT*/
			    ((insn & 0x0f9000f0) == 0x01800090))   /* STREX[BDH] */
				ftype = VM_PROT_WRITE;
			else if ((insn & 0x0fb00ff0) == 0x01000090)/* SWP */
				ftype = VM_PROT_READ | VM_PROT_WRITE;
			else
				ftype = VM_PROT_READ;
		}
	}

	/*
	 * See if the fault is as a result of ref/mod emulation,
	 * or domain mismatch.
	 */
#ifdef DEBUG
	last_fault_code = fsr;
#endif
	if (pmap_fault_fixup(map->pmap, va, ftype, user)) {
		UVMHIST_LOG(maphist, " <- ref/mod emul", 0, 0, 0, 0);
		goto out;
	}

	if (__predict_false(curcpu()->ci_intr_depth > 0)) {
		if (pcb->pcb_onfault) {
			tf->tf_r0 = EINVAL;
			tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
			return;
		}
		printf("\nNon-emulated page fault with intr_depth > 0\n");
		dab_fatal(tf, fsr, far, l, NULL);
	}

	onfault = pcb->pcb_onfault;
	pcb->pcb_onfault = NULL;
	error = uvm_fault(map, va, ftype);
	pcb->pcb_onfault = onfault;

	if (__predict_true(error == 0)) {
		if (user)
			uvm_grow(l->l_proc, va); /* Record any stack growth */
		else
			ucas_ras_check(tf);
		UVMHIST_LOG(maphist, " <- uvm", 0, 0, 0, 0);
		goto out;
	}

	if (user == 0) {
		if (pcb->pcb_onfault) {
			tf->tf_r0 = error;
			tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
			return;
		}

		printf("\nuvm_fault(%p, %lx, %x) -> %x\n", map, va, ftype,
		    error);
		dab_fatal(tf, fsr, far, l, NULL);
	}

	KSI_INIT_TRAP(&ksi);

	if (error == ENOMEM) {
		printf("UVM: pid %d (%s), uid %d killed: "
		    "out of swap\n", l->l_proc->p_pid, l->l_proc->p_comm,
		    l->l_cred ? kauth_cred_geteuid(l->l_cred) : -1);
		ksi.ksi_signo = SIGKILL;
	} else
		ksi.ksi_signo = SIGSEGV;

	ksi.ksi_code = (error == EACCES) ? SEGV_ACCERR : SEGV_MAPERR;
	ksi.ksi_addr = (uint32_t *)(intptr_t) far;
	ksi.ksi_trap = fsr;
	UVMHIST_LOG(maphist, " <- error (%d)", error, 0, 0, 0);

do_trapsignal:
	call_trapsignal(l, tf, &ksi);
out:
	/* If returning to user mode, make sure to invoke userret() */
	if (user)
		userret(l);
}