int
hardclockintr(struct trapframe *frame)
{

	if (PCPU_GET(cpuid) == 0)
		hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	else
		hardclock_cpu(TRAPF_USERMODE(frame));
	return (FILTER_HANDLED);
}
Beispiel #2
0
/*
 * abort_align() handles the following data abort:
 *
 *  FAULT_ALIGN - Alignment fault
 *
 * Every memory access should be correctly aligned in kernel including
 * user to kernel and vice versa data copying, so we ignore pcb_onfault,
 * and it's always fatal. We generate a signal in case of abort from user mode.
 */
static int
abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
    struct thread *td, struct ksig *ksig)
{
	u_int usermode;

	usermode = TRAPF_USERMODE(tf);

	/*
	 * Alignment faults are always fatal if they occur in any but user mode.
	 *
	 * XXX The old trap code handles pcb fault even for alignment traps.
	 * Unfortunately, we don't known why and if is this need.
	 */
	if (!usermode) {
		if (td->td_intr_nesting_level == 0 && td != NULL &&
		    td->td_pcb->pcb_onfault != NULL) {
			printf("%s: Got alignment fault with pcb_onfault set"
			    ", please report this issue\n", __func__);
			tf->tf_r0 = EFAULT;;
			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
			return (0);
		}
		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
	}
	/* Deliver a bus error signal to the process */
	ksig->code = 0;
	ksig->sig = SIGBUS;
	ksig->addr = far;
	return (1);
}
Beispiel #3
0
/* Per-CPU timer2 handler. */
static int
statclockhandler(struct trapframe *frame)
{

	timer2clock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	return (FILTER_HANDLED);
}
int
statclockintr(struct trapframe *frame)
{

	profclockintr(frame);
	statclock(TRAPF_USERMODE(frame));
	return (FILTER_HANDLED);
}
Beispiel #5
0
/*
 * abort_fatal() handles the following data aborts:
 *
 *  FAULT_DEBUG		- Debug Event
 *  FAULT_ACCESS_xx	- Acces Bit
 *  FAULT_EA_PREC	- Precise External Abort
 *  FAULT_DOMAIN_xx	- Domain Fault
 *  FAULT_EA_TRAN_xx	- External Translation Abort
 *  FAULT_EA_IMPREC	- Imprecise External Abort
 *  + all undefined codes for ABORT
 *
 * We should never see these on a properly functioning system.
 *
 * This function is also called by the other handlers if they
 * detect a fatal problem.
 *
 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
 */
static int
abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
    u_int prefetch, struct thread *td, struct ksig *ksig)
{
	bool usermode;
	const char *mode;
	const char *rw_mode;

	usermode = TRAPF_USERMODE(tf);
#ifdef KDTRACE_HOOKS
	if (!usermode) {
		if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
			return (0);
	}
#endif

	mode = usermode ? "user" : "kernel";
	rw_mode  = fsr & FSR_WNR ? "write" : "read";
	disable_interrupts(PSR_I|PSR_F);

	if (td != NULL) {
		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
		    aborts[idx].desc, rw_mode);
		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
		if (idx != FAULT_EA_IMPREC)
			printf("%08x, ", far);
		else
			printf("Invalid,  ");
		printf("spsr=%08x\n", tf->tf_spsr);
	} else {
		printf("Fatal %s mode prefetch abort at 0x%08x\n",
		    mode, tf->tf_pc);
		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
	}

	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
	printf("r12=%08x, ", tf->tf_r12);

	if (usermode)
		printf("usp=%08x, ulr=%08x",
		    tf->tf_usr_sp, tf->tf_usr_lr);
	else
		printf("ssp=%08x, slr=%08x",
		    tf->tf_svc_sp, tf->tf_svc_lr);
	printf(", pc =%08x\n\n", tf->tf_pc);

#ifdef KDB
	if (debugger_on_panic || kdb_active)
		kdb_trap(fsr, 0, tf);
#endif
	panic("Fatal abort");
	/*NOTREACHED*/
}
int
profclockintr(struct trapframe *frame)
{

	if (!using_atrtc_timer)
		hardclockintr(frame);
	if (profprocs != 0)
		profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	return (FILTER_HANDLED);
}
Beispiel #7
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	atomic_add_32(&s3c24x0_base, timer4_reload_value);

	hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
	return (FILTER_HANDLED);
}
Beispiel #8
0
int
pmc_save_user_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
{
	int n;
	uint32_t instr;
	uintptr_t fp, oldfp, pc, r, sp;

	KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
	    __LINE__, (void *) tf));

	pc = PMC_TRAPFRAME_TO_PC(tf);
	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);
	sp = PMC_TRAPFRAME_TO_USER_SP(tf);

	*cc++ = pc; n = 1;

	r = fp + sizeof(uintptr_t); /* points to return address */

	if (!PMC_IN_USERSPACE(pc))
		return (n);

	if (copyin((void *) pc, &instr, sizeof(instr)) != 0)
		return (n);

	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
	    PMC_AT_FUNCTION_EPILOGUE_RET(instr)) { /* ret */
		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
			return (n);
	} else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
		sp += sizeof(uintptr_t);
		if (copyin((void *) sp, &pc, sizeof(pc)) != 0)
			return (n);
	} else if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
	    copyin((void *) fp, &fp, sizeof(fp)) != 0)
		return (n);

	for (; n < nframes;) {
		if (pc == 0 || !PMC_IN_USERSPACE(pc))
			break;

		*cc++ = pc; n++;

		if (fp < oldfp)
			break;

		r = fp + sizeof(uintptr_t); /* address of return address */
		oldfp = fp;

		if (copyin((void *) r, &pc, sizeof(pc)) != 0 ||
		    copyin((void *) fp, &fp, sizeof(fp)) != 0)
			break;
	}

	return (n);
}
Beispiel #9
0
/*
 * ixpclk_intr:
 *
 *	Handle the hardclock interrupt.
 */
int
ixpclk_intr(void *arg)
{
    struct ixpclk_softc* sc = ixpclk_sc;
    struct trapframe *frame = arg;

    bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS,
                      OST_TIM0_INT);

    hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
    return (FILTER_HANDLED);
}
Beispiel #10
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	/* The interrupt is shared, so we have to make sure it's for us. */
	if (RD4(ST_SR) & ST_SR_PITS) {
		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
	return (FILTER_STRAY);
}
Beispiel #11
0
int
pmc_save_kernel_callchain(uintptr_t *cc, int maxsamples,
    struct trapframe *tf)
{
	uintptr_t pc, r, stackstart, stackend, fp;
	struct thread *td;
	int count;

	KASSERT(TRAPF_USERMODE(tf) == 0,("[arm,%d] not a kernel backtrace",
	    __LINE__));

	pc = PMC_TRAPFRAME_TO_PC(tf);
	*cc++ = pc;

	if ((td = curthread) == NULL)
		return (1);

	if (maxsamples <= 1)
		return (1);

	stackstart = (uintptr_t) td->td_kstack;
	stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;
	fp = PMC_TRAPFRAME_TO_FP(tf);

	if (!PMC_IN_KERNEL(pc) ||
	    !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
		return (1);

	for (count = 1; count < maxsamples; count++) {
		/* Use saved lr as pc. */
		r = fp - sizeof(uintptr_t);
		if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
			break;
		pc = *(uintptr_t *)r;
		if (!PMC_IN_KERNEL(pc))
			break;

		*cc++ = pc;

		/* Switch to next frame up */
		r = fp - 3 * sizeof(uintptr_t);
		if (!PMC_IN_KERNEL_STACK(r, stackstart, stackend))
			break;
		fp = *(uintptr_t *)r;
		if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
			break;
	}

	return (count);
}
Beispiel #12
0
static int
clock_intr(void *arg)
{
	struct trapframe *fp = arg;

	/* The interrupt is shared, so we have to make sure it's for us. */
	if (RD4(ST_SR) & ST_SR_PITS) {
#ifdef SKYEYE_WORKAROUNDS
		tot_count += 32768 / hz;
#endif
		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
	return (FILTER_STRAY);
}
Beispiel #13
0
static int
pit_intr(void *arg)
{
	struct trapframe *fp = arg;
	uint32_t icnt;

	if (RD4(sc, PIT_SR) & PIT_PITS_DONE) {
		icnt = RD4(sc, PIT_PIVR) >> 20;

		/* Just add in the overflows we just read */
		timecount +=  PIT_PIV(RD4(sc, PIT_MR)) * icnt;

		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
Beispiel #14
0
int
pmc_save_user_callchain(uintptr_t *cc, int maxsamples,
    struct trapframe *tf)
{
	uintptr_t pc, r, oldfp, fp;
	struct thread *td;
	int count;

	KASSERT(TRAPF_USERMODE(tf), ("[x86,%d] Not a user trap frame tf=%p",
	    __LINE__, (void *) tf));

	pc = PMC_TRAPFRAME_TO_PC(tf);
	*cc++ = pc;

	if ((td = curthread) == NULL)
		return (1);

	if (maxsamples <= 1)
		return (1);

	oldfp = fp = PMC_TRAPFRAME_TO_FP(tf);

	if (!PMC_IN_USERSPACE(pc) ||
	    !PMC_IN_USERSPACE(fp))
		return (1);

	for (count = 1; count < maxsamples; count++) {
		/* Use saved lr as pc. */
		r = fp - sizeof(uintptr_t);
		if (copyin((void *)r, &pc, sizeof(pc)) != 0)
			break;
		if (!PMC_IN_USERSPACE(pc))
			break;

		*cc++ = pc;

		/* Switch to next frame up */
		oldfp = fp;
		r = fp - 3 * sizeof(uintptr_t);
		if (copyin((void *)r, &fp, sizeof(fp)) != 0)
			break;
		if (fp < oldfp || !PMC_IN_USERSPACE(fp))
			break;
	}

	return (count);
}
Beispiel #15
0
int
pmc_soft_intr(struct pmckern_soft *ks)
{
	struct pmc *pm;
	struct soft_cpu *pc;
	int ri, processed, error, user_mode;

	KASSERT(ks->pm_cpu >= 0 && ks->pm_cpu < pmc_cpu_max(),
	    ("[soft,%d] CPU %d out of range", __LINE__, ks->pm_cpu));

	processed = 0;
	pc = soft_pcpu[ks->pm_cpu];

	for (ri = 0; ri < SOFT_NPMCS; ri++) {

		pm = pc->soft_hw[ri].phw_pmc;
		if (pm == NULL ||
		    pm->pm_state != PMC_STATE_RUNNING ||
		    pm->pm_event != ks->pm_ev) {
				continue;
		}

		processed = 1;
		pc->soft_values[ri]++;
		if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
			user_mode = TRAPF_USERMODE(ks->pm_tf);
			error = pmc_process_interrupt(ks->pm_cpu, PMC_SR, pm,
			    ks->pm_tf, user_mode);
			if (error) {
				soft_stop_pmc(ks->pm_cpu, ri);
				continue;
			}

			if (user_mode) {
				/* If in user mode setup AST to process
				 * callchain out of interrupt context.
				 */
				curthread->td_flags |= TDF_ASTPENDING;
			}
		}
	}

	atomic_add_int(processed ? &pmc_stats.pm_intr_processed :
	    &pmc_stats.pm_intr_ignored, 1);

	return (processed);
}
Beispiel #16
0
static int
arm64_intr(int cpu, struct trapframe *tf)
{
	struct arm64_cpu *pc;
	int retval, ri;
	struct pmc *pm;
	int error;
	int reg;

	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[arm64,%d] CPU %d out of range", __LINE__, cpu));

	retval = 0;
	pc = arm64_pcpu[cpu];

	for (ri = 0; ri < arm64_npmcs; ri++) {
		pm = arm64_pcpu[cpu]->pc_arm64pmcs[ri].phw_pmc;
		if (pm == NULL)
			continue;
		if (!PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
			continue;

		/* Check if counter is overflowed */
		reg = (1 << ri);
		if ((READ_SPECIALREG(PMOVSCLR_EL0) & reg) == 0)
			continue;
		/* Clear Overflow Flag */
		WRITE_SPECIALREG(PMOVSCLR_EL0, reg);

		isb();

		retval = 1; /* Found an interrupting PMC. */
		if (pm->pm_state != PMC_STATE_RUNNING)
			continue;

		error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
		    TRAPF_USERMODE(tf));
		if (error)
			arm64_stop_pmc(cpu, ri);

		/* Reload sampling count */
		arm64_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
	}

	return (retval);
}
Beispiel #17
0
static int
clock_intr(void *arg)
{

	struct trapframe *fp = arg;

	if (RD4(PIT_SR) & PIT_PITS_DONE) {
		uint32_t pivr = RD4(PIT_PIVR);
		if (PIT_CNT(pivr)>1) {
			printf("cnt = %d\n", PIT_CNT(pivr));
		}
		pit_counter += pit_cycle;
		hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp));
		return (FILTER_HANDLED);
	}
	return (FILTER_STRAY);
}
Beispiel #18
0
/* Per-CPU timer1 handler. */
static int
hardclockhandler(struct trapframe *frame)
{

#ifdef KDTRACE_HOOKS
	/*
	 * If the DTrace hooks are configured and a callback function
	 * has been registered, then call it to process the high speed
	 * timers.
	 */
	int cpu = curcpu;
	if (cyclic_clock_func[cpu] != NULL)
		(*cyclic_clock_func[cpu])(frame);
#endif

	timer1clock(TRAPF_USERMODE(frame), TRAPF_PC(frame));
	return (FILTER_HANDLED);
}
Beispiel #19
0
/*
 * abort_align() handles the following data abort:
 *
 *  FAULT_ALIGN - Alignment fault
 *
 * Everything should be aligned in kernel with exception of user to kernel 
 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
 * We generate signal in case of abort from user mode.
 */
static int
abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
    u_int prefetch, struct thread *td, struct ksig *ksig)
{
	bool usermode;

	usermode = TRAPF_USERMODE(tf);
	if (!usermode) {
		if (td->td_intr_nesting_level == 0 && td != NULL &&
		    td->td_pcb->pcb_onfault != NULL) {
			tf->tf_r0 = EFAULT;
			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
			return (0);
		}
		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
	}
	/* Deliver a bus error signal to the process */
	ksig->code = BUS_ADRALN;
	ksig->sig = SIGBUS;
	ksig->addr = far;
	return (1);
}
/*
 * This function is the one registered by the machine dependent
 * initialiser as the callback for high speed timer events.
 */
static void
cyclic_clock(struct trapframe *frame)
{
    cpu_t *c = &solaris_cpu[curcpu];

    if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) {
        if (TRAPF_USERMODE(frame)) {
            c->cpu_profile_pc = 0;
            c->cpu_profile_upc = TRAPF_PC(frame);
        } else {
            c->cpu_profile_pc = TRAPF_PC(frame);
            c->cpu_profile_upc = 0;
        }

        c->cpu_intr_actv = 1;

        /* Fire any timers that are due. */
        cyclic_fire(c);

        c->cpu_intr_actv = 0;
    }
}
Beispiel #21
0
size_t
md_stack_capture_curthread(caddr_t *pcs, size_t size)
{
        struct trapframe *tf = curthread->td_intr_frame;
	pmap_t pmap = vmspace_pmap(curthread->td_proc->p_vmspace);
	
        size_t num_kstacks = 0, num_ustacks = 0;

#if SAMPLE_DEBUG > 2
	printf("%s(%d):  curthread pid %u, tid %u, name %s\n", __FUNCTION__, __LINE__, curthread->td_proc->p_pid, curthread->td_tid, curthread->td_name);
#endif
	
        if (tf == NULL) {
#if SAMPLE_DEBUG > 5
		printf("%s(%d):  intr frame is NULL?\n", __FUNCTION__, __LINE__);
#endif
//		tf = curthread->td_frame;
		/*
		 * I'm not sure what this means.  This should be done via interrupt,
		 * so if curthread wasn't interrupted, then I don't think we can get
		 * anything here?  Please tell me what I'm doing wrong.
		 */
		return 0;
	}

        if (!TRAPF_USERMODE(tf)) {
                // Start with kernel mode
                unsigned long callpc;
                struct amd64_frame *frame;

                frame = (struct amd64_frame*)tf->tf_rbp;

                while (num_kstacks < size) {
                        if (!INKERNEL((long)frame))
                                break;
                        callpc = frame->f_retaddr;
                        if (!INKERNEL(callpc))
                                break;
                        pcs[num_kstacks++] = (caddr_t)callpc;
                        if (frame->f_frame <= frame ||
                            (vm_offset_t)frame->f_frame >= (vm_offset_t)tf->tf_rbp + KSTACK_PAGES * PAGE_SIZE)
                                break;
                        frame = frame->f_frame;
                }
                tf = curthread->td_frame;
        }
        if (TRAPF_USERMODE(tf)) {
                caddr_t *start_pc = pcs + num_kstacks;
                struct amd64_frame frame;

#if SAMPLE_DEBUG > 3
		printf("%s(%d):  doing user mode crawl\n", __FUNCTION__, __LINE__);
#endif
                frame.f_frame = (struct amd64_frame*)tf->tf_rbp;
                if (tf != curthread->td_intr_frame) {
                        start_pc[num_ustacks++] = (caddr_t)tf->tf_rip;
                }
                while ((num_kstacks + num_ustacks) < size) {
                        void *bp = frame.f_frame;
			/*
			 * The calls to GET_WORD replace the non-functional:
			 * copyin_nofault((void*)frame.f_frame, &frame, sizeof(frame));
			 */
			
			frame.f_frame = (struct amd64_frame*)GET_WORD(pmap, (caddr_t)frame.f_frame);
			if (frame.f_frame == 0) {
#if SAMPLE_DEBUG > 4
				printf("%s(%d):  %s:  frame is 0\n", __FUNCTION__, __LINE__, curthread->td_name);
#endif
				break;
			}
			frame.f_retaddr = (long)GET_WORD(pmap, (caddr_t)frame.f_frame + offsetof(struct amd64_frame, f_retaddr));
			if (frame.f_retaddr == 0) {
#if SAMPLE_DEBUG > 4
				printf("%s(%d):  %s:  retaddr from frame %p is 0\n", __FUNCTION__, __LINE__, curthread->td_name, (void*)frame.f_frame);
#endif
				break;
			}
#if SAMPLE_DEBUG > 2
			printf("%s(%d):  f_frame = %ld, f_retaddr = %ld\n", __FUNCTION__, __LINE__, (long)frame.f_frame, (long)frame.f_retaddr);
#endif
			start_pc[num_ustacks++] = (caddr_t)frame.f_retaddr;
			if ((void*)frame.f_frame < bp) {
				break;
			}
                }
        } else {
Beispiel #22
0
/*
 * Abort handler.
 *
 * FAR, FSR, and everything what can be lost after enabling
 * interrupts must be grabbed before the interrupts will be
 * enabled. Note that when interrupts will be enabled, we
 * could even migrate to another CPU ...
 *
 * TODO: move quick cases to ASM
 */
void
abort_handler(struct trapframe *tf, int prefetch)
{
	struct thread *td;
	vm_offset_t far, va;
	int idx, rv;
	uint32_t fsr;
	struct ksig ksig;
	struct proc *p;
	struct pcb *pcb;
	struct vm_map *map;
	struct vmspace *vm;
	vm_prot_t ftype;
	bool usermode;
#ifdef INVARIANTS
	void *onfault;
#endif
	td = curthread;
	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
#if __ARM_ARCH >= 7
	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
#else
	far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
#endif

	idx = FSR_TO_FAULT(fsr);
	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
	if (usermode)
		td->td_frame = tf;

	CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
	    __func__, fsr, idx, far, prefetch, usermode);

	/*
	 * Firstly, handle aborts that are not directly related to mapping.
	 */
	if (__predict_false(idx == FAULT_EA_IMPREC)) {
		abort_imprecise(tf, fsr, prefetch, usermode);
		return;
	}

	if (__predict_false(idx == FAULT_DEBUG)) {
		abort_debug(tf, fsr, prefetch, usermode, far);
		return;
	}

	/*
	 * ARM has a set of unprivileged load and store instructions
	 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
	 * than user mode and OS should recognize their aborts and behave
	 * appropriately. However, there is no way how to do that reasonably
	 * in general unless we restrict the handling somehow.
	 *
	 * For now, these instructions are used only in copyin()/copyout()
	 * like functions where usermode buffers are checked in advance that
	 * they are not from KVA space. Thus, no action is needed here.
	 */

#ifdef ARM_NEW_PMAP
	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
	if (rv == 0) {
		return;
	} else if (rv == EFAULT) {

		call_trapsignal(td, SIGSEGV, SEGV_MAPERR, far);
		userret(td, tf);
		return;
	}
#endif
	/*
	 * Now, when we handled imprecise and debug aborts, the rest of
	 * aborts should be really related to mapping.
	 */

	PCPU_INC(cnt.v_trap);

#ifdef KDB
	if (kdb_active) {
		kdb_reenter();
		goto out;
	}
#endif
	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
		/*
		 * Due to both processor errata and lazy TLB invalidation when
		 * access restrictions are removed from virtual pages, memory
		 * accesses that are allowed by the physical mapping layer may
		 * nonetheless cause one spurious page fault per virtual page.
		 * When the thread is executing a "no faulting" section that
		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
		 * every page fault is treated as a spurious page fault,
		 * unless it accesses the same virtual address as the most
		 * recent page fault within the same "no faulting" section.
		 */
		if (td->td_md.md_spurflt_addr != far ||
		    (td->td_pflags & TDP_RESETSPUR) != 0) {
			td->td_md.md_spurflt_addr = far;
			td->td_pflags &= ~TDP_RESETSPUR;

			tlb_flush_local(far & ~PAGE_MASK);
			return;
		}
	} else {
		/*
		 * If we get a page fault while in a critical section, then
		 * it is most likely a fatal kernel page fault.  The kernel
		 * is already going to panic trying to get a sleep lock to
		 * do the VM lookup, so just consider it a fatal trap so the
		 * kernel can print out a useful trap message and even get
		 * to the debugger.
		 *
		 * If we get a page fault while holding a non-sleepable
		 * lock, then it is most likely a fatal kernel page fault.
		 * If WITNESS is enabled, then it's going to whine about
		 * bogus LORs with various VM locks, so just skip to the
		 * fatal trap handling directly.
		 */
		if (td->td_critnest != 0 ||
		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
		    "Kernel page fault") != 0) {
			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
			return;
		}
	}

	/* Re-enable interrupts if they were enabled previously. */
	if (td->td_md.md_spinlock_count == 0) {
		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
			enable_interrupts(PSR_I);
		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
			enable_interrupts(PSR_F);
	}

	p = td->td_proc;
	if (usermode) {
		td->td_pticks = 0;
		if (td->td_cowgen != p->p_cowgen)
			thread_cow_update(td);
	}

	/* Invoke the appropriate handler, if necessary. */
	if (__predict_false(aborts[idx].func != NULL)) {
		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
			goto do_trapsignal;
		goto out;
	}

	/*
	 * Don't pass faulting cache operation to vm_fault(). We don't want
	 * to handle all vm stuff at this moment.
	 */
	pcb = td->td_pcb;
	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
		tf->tf_r0 = far;		/* return failing address */
		tf->tf_pc = (register_t)pcb->pcb_onfault;
		return;
	}

	/* Handle remaining I-cache aborts. */
	if (idx == FAULT_ICACHE) {
		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
			goto do_trapsignal;
		goto out;
	}

	/*
	 * At this point, we're dealing with one of the following aborts:
	 *
	 *  FAULT_TRAN_xx  - Translation
	 *  FAULT_PERM_xx  - Permission
	 *
	 * These are the main virtual memory-related faults signalled by
	 * the MMU.
	 */

	/* fusubailout is used by [fs]uswintr to avoid page faulting. */
	pcb = td->td_pcb;
	if (__predict_false(pcb->pcb_onfault == fusubailout)) {
		tf->tf_r0 = EFAULT;
		tf->tf_pc = (register_t)pcb->pcb_onfault;
		return;
	}

	va = trunc_page(far);
	if (va >= KERNBASE) {
		/*
		 * Don't allow user-mode faults in kernel address space.
		 */
		if (usermode)
			goto nogo;

		map = kernel_map;
	} else {
		/*
		 * This is a fault on non-kernel virtual memory. If curproc
		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
		 */
		vm = (p != NULL) ? p->p_vmspace : NULL;
		if (vm == NULL)
			goto nogo;

		map = &vm->vm_map;
		if (!usermode && (td->td_intr_nesting_level != 0 ||
		    pcb->pcb_onfault == NULL)) {
			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
			return;
		}
	}

	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
	if (prefetch)
		ftype |= VM_PROT_EXECUTE;

#ifdef DEBUG
	last_fault_code = fsr;
#endif

#ifndef ARM_NEW_PMAP
	if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
	    usermode)) {
		goto out;
	}
#endif

#ifdef INVARIANTS
	onfault = pcb->pcb_onfault;
	pcb->pcb_onfault = NULL;
#endif

	/* Fault in the page. */
	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);

#ifdef INVARIANTS
	pcb->pcb_onfault = onfault;
#endif

	if (__predict_true(rv == KERN_SUCCESS))
		goto out;
nogo:
	if (!usermode) {
		if (td->td_intr_nesting_level == 0 &&
		    pcb->pcb_onfault != NULL) {
			tf->tf_r0 = rv;
			tf->tf_pc = (int)pcb->pcb_onfault;
			return;
		}
		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
		return;
	}

	ksig.sig = SIGSEGV;
	ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR;
	ksig.addr = far;

do_trapsignal:
	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
out:
	if (usermode)
		userret(td, tf);
}
Beispiel #23
0
static int
p4_intr(int cpu, struct trapframe *tf)
{
	uint32_t cccrval, ovf_mask, ovf_partner;
	int did_interrupt, error, ri;
	struct p4_cpu *pc;
	struct pmc *pm;
	pmc_value_t v;

	PMCDBG(MDP,INT, 1, "cpu=%d tf=0x%p um=%d", cpu, (void *) tf,
	    TRAPF_USERMODE(tf));

	pc = p4_pcpu[P4_TO_HTT_PRIMARY(cpu)];

	ovf_mask = P4_CPU_IS_HTT_SECONDARY(cpu) ?
	    P4_CCCR_OVF_PMI_T1 : P4_CCCR_OVF_PMI_T0;
	ovf_mask |= P4_CCCR_OVF;
	if (p4_system_has_htt)
		ovf_partner = P4_CPU_IS_HTT_SECONDARY(cpu) ?
		    P4_CCCR_OVF_PMI_T0 : P4_CCCR_OVF_PMI_T1;
	else
		ovf_partner = 0;
	did_interrupt = 0;

	if (p4_system_has_htt)
		P4_PCPU_ACQ_INTR_SPINLOCK(pc);

	/*
	 * Loop through all CCCRs, looking for ones that have
	 * interrupted this CPU.
	 */
	for (ri = 0; ri < P4_NPMCS; ri++) {

		/*
		 * Check if our partner logical CPU has already marked
		 * this PMC has having interrupted it.  If so, reset
		 * the flag and process the interrupt, but leave the
		 * hardware alone.
		 */
		if (p4_system_has_htt && P4_PCPU_GET_INTRFLAG(pc,ri)) {
			P4_PCPU_SET_INTRFLAG(pc,ri,0);
			did_interrupt = 1;

			/*
			 * Ignore de-configured or stopped PMCs.
			 * Ignore PMCs not in sampling mode.
			 */
			pm = pc->pc_p4pmcs[ri].phw_pmc;
			if (pm == NULL ||
			    pm->pm_state != PMC_STATE_RUNNING ||
			    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
				continue;
			}
			(void) pmc_process_interrupt(cpu, PMC_HR, pm, tf,
			    TRAPF_USERMODE(tf));
			continue;
		}

		/*
		 * Fresh interrupt.  Look for the CCCR_OVF bit
		 * and the OVF_Tx bit for this logical
		 * processor being set.
		 */
		cccrval = rdmsr(P4_CCCR_MSR_FIRST + ri);

		if ((cccrval & ovf_mask) != ovf_mask)
			continue;

		/*
		 * If the other logical CPU would also have been
		 * interrupted due to the PMC being shared, record
		 * this fact in the per-cpu saved interrupt flag
		 * bitmask.
		 */
		if (p4_system_has_htt && (cccrval & ovf_partner))
			P4_PCPU_SET_INTRFLAG(pc, ri, 1);

		v = rdmsr(P4_PERFCTR_MSR_FIRST + ri);

		PMCDBG(MDP,INT, 2, "ri=%d v=%jx", ri, v);

		/* Stop the counter, and reset the overflow  bit */
		cccrval &= ~(P4_CCCR_OVF | P4_CCCR_ENABLE);
		wrmsr(P4_CCCR_MSR_FIRST + ri, cccrval);

		did_interrupt = 1;

		/*
		 * Ignore de-configured or stopped PMCs.  Ignore PMCs
		 * not in sampling mode.
		 */
		pm = pc->pc_p4pmcs[ri].phw_pmc;

		if (pm == NULL ||
		    pm->pm_state != PMC_STATE_RUNNING ||
		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
			continue;
		}

		/*
		 * Process the interrupt.  Re-enable the PMC if
		 * processing was successful.
		 */
		error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
		    TRAPF_USERMODE(tf));

		/*
		 * Only the first processor executing the NMI handler
		 * in a HTT pair will restart a PMC, and that too
		 * only if there were no errors.
		 */
		v = P4_RELOAD_COUNT_TO_PERFCTR_VALUE(
			pm->pm_sc.pm_reloadcount);
		wrmsr(P4_PERFCTR_MSR_FIRST + ri, v);
		if (error == 0)
			wrmsr(P4_CCCR_MSR_FIRST + ri,
			    cccrval | P4_CCCR_ENABLE);
	}

	/* allow the other CPU to proceed */
	if (p4_system_has_htt)
		P4_PCPU_REL_INTR_SPINLOCK(pc);

	/*
	 * On Intel P4 CPUs, the PMC 'pcint' entry in the LAPIC gets
	 * masked when a PMC interrupts the CPU.  We need to unmask
	 * the interrupt source explicitly.
	 */

	if (did_interrupt)
		lapic_reenable_pmc();

	atomic_add_int(did_interrupt ? &pmc_stats.pm_intr_processed :
	    &pmc_stats.pm_intr_ignored, 1);

	return (did_interrupt);
}
Beispiel #24
0
/*
 * Process an asynchronous software trap.
 * This is relatively easy.
 * This function will return with preemption disabled.
 */
void
ast(struct trapframe *framep)
{
	struct thread *td;
	struct proc *p;
	int flags;
	int sig;

	td = curthread;
	p = td->td_proc;

	CTR3(KTR_SYSC, "ast: thread %p (pid %d, %s)", td, p->p_pid,
            p->p_comm);
	KASSERT(TRAPF_USERMODE(framep), ("ast in kernel mode"));
	WITNESS_WARN(WARN_PANIC, NULL, "Returning to user mode");
	mtx_assert(&Giant, MA_NOTOWNED);
	THREAD_LOCK_ASSERT(td, MA_NOTOWNED);
	td->td_frame = framep;
	td->td_pticks = 0;

	/*
	 * This updates the td_flag's for the checks below in one
	 * "atomic" operation with turning off the astpending flag.
	 * If another AST is triggered while we are handling the
	 * AST's saved in flags, the astpending flag will be set and
	 * ast() will be called again.
	 */
	thread_lock(td);
	flags = td->td_flags;
	td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK |
	    TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND);
	thread_unlock(td);
	PCPU_INC(cnt.v_trap);

	if (td->td_ucred != p->p_ucred) 
		cred_update_thread(td);
	if (td->td_pflags & TDP_OWEUPC && p->p_flag & P_PROFIL) {
		addupc_task(td, td->td_profil_addr, td->td_profil_ticks);
		td->td_profil_ticks = 0;
		td->td_pflags &= ~TDP_OWEUPC;
	}
#ifdef HWPMC_HOOKS
	/* Handle Software PMC callchain capture. */
	if (PMC_IS_PENDING_CALLCHAIN(td))
		PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_USER_CALLCHAIN_SOFT, (void *) framep);
#endif
	if (flags & TDF_ALRMPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGVTALRM);
		PROC_UNLOCK(p);
	}
	if (flags & TDF_PROFPEND) {
		PROC_LOCK(p);
		kern_psignal(p, SIGPROF);
		PROC_UNLOCK(p);
	}
#ifdef MAC
	if (flags & TDF_MACPEND)
		mac_thread_userret(td);
#endif
	if (flags & TDF_NEEDRESCHED) {
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(1, 1, __func__);
#endif
		thread_lock(td);
		sched_prio(td, td->td_user_pri);
		mi_switch(SW_INVOL | SWT_NEEDRESCHED, NULL);
		thread_unlock(td);
#ifdef KTRACE
		if (KTRPOINT(td, KTR_CSW))
			ktrcsw(0, 1, __func__);
#endif
	}

	/*
	 * Check for signals. Unlocked reads of p_pendingcnt or
	 * p_siglist might cause process-directed signal to be handled
	 * later.
	 */
	if (flags & TDF_NEEDSIGCHK || p->p_pendingcnt > 0 ||
	    !SIGISEMPTY(p->p_siglist)) {
		PROC_LOCK(p);
		mtx_lock(&p->p_sigacts->ps_mtx);
		while ((sig = cursig(td)) != 0)
			postsig(sig);
		mtx_unlock(&p->p_sigacts->ps_mtx);
		PROC_UNLOCK(p);
	}
	/*
	 * We need to check to see if we have to exit or wait due to a
	 * single threading requirement or some other STOP condition.
	 */
	if (flags & TDF_NEEDSUSPCHK) {
		PROC_LOCK(p);
		thread_suspend_check(0);
		PROC_UNLOCK(p);
	}

	if (td->td_pflags & TDP_OLDMASK) {
		td->td_pflags &= ~TDP_OLDMASK;
		kern_sigprocmask(td, SIG_SETMASK, &td->td_oldsigmask, NULL, 0);
	}

	userret(td, framep);
}
Beispiel #25
0
static int
mips_pmc_intr(int cpu, struct trapframe *tf)
{
	int error;
	int retval, ri;
	struct pmc *pm;
	struct mips_cpu *pc;
	uint32_t r0, r2;
	pmc_value_t r;

	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[mips,%d] CPU %d out of range", __LINE__, cpu));

	retval = 0;
	pc = mips_pcpu[cpu];

	/* Stop PMCs without clearing the counter */
	r0 = mips_rd_perfcnt0();
	mips_wr_perfcnt0(r0 & ~(0x1f));
	r2 = mips_rd_perfcnt2();
	mips_wr_perfcnt2(r2 & ~(0x1f));

	for (ri = 0; ri < mips_npmcs; ri++) {
		pm = mips_pcpu[cpu]->pc_mipspmcs[ri].phw_pmc;
		if (pm == NULL)
			continue;
		if (! PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
			continue;

		r = mips_pmcn_read(ri);

		/* If bit 31 is set, the counter has overflowed */
		if ((r & (1UL << (mips_pmc_spec.ps_counter_width - 1))) == 0)
			continue;

		retval = 1;
		if (pm->pm_state != PMC_STATE_RUNNING)
			continue;
		error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
		    TRAPF_USERMODE(tf));
		if (error) {
			/* Clear/disable the relevant counter */
			if (ri == 0)
				r0 = 0;
			else if (ri == 1)
				r2 = 0;
			mips_stop_pmc(cpu, ri);
		}

		/* Reload sampling count */
		mips_write_pmc(cpu, ri, pm->pm_sc.pm_reloadcount);
	}

	/*
	 * Re-enable the PMC counters where they left off.
	 *
	 * Any counter which overflowed will have its sample count
	 * reloaded in the loop above.
	 */
	mips_wr_perfcnt0(r0);
	mips_wr_perfcnt2(r2);

	return retval;
}
Beispiel #26
0
int
pmc_save_kernel_callchain(uintptr_t *cc, int nframes, struct trapframe *tf)
{
	int n;
	uint32_t instr;
	uintptr_t fp, pc, r, sp, stackstart, stackend;
	struct thread *td;

	KASSERT(TRAPF_USERMODE(tf) == 0,("[x86,%d] not a kernel backtrace",
	    __LINE__));

	td = curthread;
	pc = PMC_TRAPFRAME_TO_PC(tf);
	fp = PMC_TRAPFRAME_TO_FP(tf);
	sp = PMC_TRAPFRAME_TO_KERNEL_SP(tf);

	*cc++ = pc;
	r = fp + sizeof(uintptr_t); /* points to return address */

	if (nframes <= 1)
		return (1);

	stackstart = (uintptr_t) td->td_kstack;
	stackend = (uintptr_t) td->td_kstack + td->td_kstack_pages * PAGE_SIZE;

	if (PMC_IN_TRAP_HANDLER(pc) ||
	    !PMC_IN_KERNEL(pc) ||
	    !PMC_IN_KERNEL_STACK(r, stackstart, stackend) ||
	    !PMC_IN_KERNEL_STACK(sp, stackstart, stackend) ||
	    !PMC_IN_KERNEL_STACK(fp, stackstart, stackend))
		return (1);

	instr = *(uint32_t *) pc;

	/*
	 * Determine whether the interrupted function was in the
	 * processing of either laying down its stack frame or taking
	 * it off.
	 *
	 * If we haven't started laying down a stack frame, or are
	 * just about to return, then our caller's address is at
	 * *sp, and we don't have a frame to unwind.
	 */
	if (PMC_AT_FUNCTION_PROLOGUE_PUSH_BP(instr) ||
	    PMC_AT_FUNCTION_EPILOGUE_RET(instr))
		pc = *(uintptr_t *) sp;
	else if (PMC_AT_FUNCTION_PROLOGUE_MOV_SP_BP(instr)) {
		/*
		 * The code was midway through laying down a frame.
		 * At this point sp[0] has a frame back pointer,
		 * and the caller's address is therefore at sp[1].
		 */
		sp += sizeof(uintptr_t);
		if (!PMC_IN_KERNEL_STACK(sp, stackstart, stackend))
			return (1);
		pc = *(uintptr_t *) sp;
	} else {
		/*
		 * Not in the function prologue or epilogue.
		 */
		pc = *(uintptr_t *) r;
		fp = *(uintptr_t *) fp;
	}

	for (n = 1; n < nframes; n++) {
		*cc++ = pc;

		if (PMC_IN_TRAP_HANDLER(pc))
			break;

		r = fp + sizeof(uintptr_t);
		if (!PMC_IN_KERNEL_STACK(fp, stackstart, stackend) ||
		    !PMC_IN_KERNEL_STACK(r, stackstart, stackend))
			break;
		pc = *(uintptr_t *) r;
		fp = *(uintptr_t *) fp;
	}

	return (n);
}
Beispiel #27
0
static int
mpc7xxx_intr(int cpu, struct trapframe *tf)
{
	int i, error, retval;
	uint32_t config;
	struct pmc *pm;
	struct powerpc_cpu *pac;

	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[powerpc,%d] out of range CPU %d", __LINE__, cpu));

	PMCDBG(MDP,INT,1, "cpu=%d tf=%p um=%d", cpu, (void *) tf,
	    TRAPF_USERMODE(tf));

	retval = 0;

	pac = powerpc_pcpu[cpu];

	config  = mfspr(SPR_MMCR0) & ~SPR_MMCR0_FC;

	/*
	 * look for all PMCs that have interrupted:
	 * - look for a running, sampling PMC which has overflowed
	 *   and which has a valid 'struct pmc' association
	 *
	 * If found, we call a helper to process the interrupt.
	 */

	for (i = 0; i < MPC7XXX_MAX_PMCS; i++) {
		if ((pm = pac->pc_ppcpmcs[i].phw_pmc) == NULL ||
		    !PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm))) {
			continue;
		}

		if (!MPC7XXX_PMC_HAS_OVERFLOWED(i))
			continue;

		retval = 1;	/* Found an interrupting PMC. */

		if (pm->pm_state != PMC_STATE_RUNNING)
			continue;

		/* Stop the counter if logging fails. */
		error = pmc_process_interrupt(cpu, PMC_HR, pm, tf,
		    TRAPF_USERMODE(tf));
		if (error != 0)
			mpc7xxx_stop_pmc(cpu, i);

		/* reload count. */
		mpc7xxx_write_pmc(cpu, i, pm->pm_sc.pm_reloadcount);
	}

	atomic_add_int(retval ? &pmc_stats.pm_intr_processed :
	    &pmc_stats.pm_intr_ignored, 1);

	/* Re-enable PERF exceptions. */
	if (retval)
		mtspr(SPR_MMCR0, config | SPR_MMCR0_PMXE);

	return (retval);
}
Beispiel #28
0
void
trap(struct trapframe *frame)
{
#ifdef KDTRACE_HOOKS
	struct reg regs;
#endif
	struct thread *td = curthread;
	struct proc *p = td->td_proc;
#ifdef KDB
	register_t dr6;
#endif
	int i = 0, ucode = 0;
	u_int type;
	register_t addr = 0;
	ksiginfo_t ksi;

	VM_CNT_INC(v_trap);
	type = frame->tf_trapno;

#ifdef SMP
	/* Handler for NMI IPIs used for stopping CPUs. */
	if (type == T_NMI) {
	         if (ipi_nmi_handler() == 0)
	                   goto out;
	}
#endif /* SMP */

#ifdef KDB
	if (kdb_active) {
		kdb_reenter();
		goto out;
	}
#endif

	if (type == T_RESERVED) {
		trap_fatal(frame, 0);
		goto out;
	}

	if (type == T_NMI) {
#ifdef HWPMC_HOOKS
		/*
		 * CPU PMCs interrupt using an NMI.  If the PMC module is
		 * active, pass the 'rip' value to the PMC module's interrupt
		 * handler.  A non-zero return value from the handler means that
		 * the NMI was consumed by it and we can return immediately.
		 */
		if (pmc_intr != NULL &&
		    (*pmc_intr)(PCPU_GET(cpuid), frame) != 0)
			goto out;
#endif

#ifdef STACK
		if (stack_nmi_handler(frame) != 0)
			goto out;
#endif
	}

	if (type == T_MCHK) {
		mca_intr();
		goto out;
	}

	if ((frame->tf_rflags & PSL_I) == 0) {
		/*
		 * Buggy application or kernel code has disabled
		 * interrupts and then trapped.  Enabling interrupts
		 * now is wrong, but it is better than running with
		 * interrupts disabled until they are accidentally
		 * enabled later.
		 */
		if (TRAPF_USERMODE(frame))
			uprintf(
			    "pid %ld (%s): trap %d with interrupts disabled\n",
			    (long)curproc->p_pid, curthread->td_name, type);
		else if (type != T_NMI && type != T_BPTFLT &&
		    type != T_TRCTRAP) {
			/*
			 * XXX not quite right, since this may be for a
			 * multiple fault in user mode.
			 */
			printf("kernel trap %d with interrupts disabled\n",
			    type);

			/*
			 * We shouldn't enable interrupts while holding a
			 * spin lock.
			 */
			if (td->td_md.md_spinlock_count == 0)
				enable_intr();
		}
	}

	if (TRAPF_USERMODE(frame)) {
		/* user trap */

		td->td_pticks = 0;
		td->td_frame = frame;
		addr = frame->tf_rip;
		if (td->td_cowgen != p->p_cowgen)
			thread_cow_update(td);

		switch (type) {
		case T_PRIVINFLT:	/* privileged instruction fault */
			i = SIGILL;
			ucode = ILL_PRVOPC;
			break;

		case T_BPTFLT:		/* bpt instruction fault */
		case T_TRCTRAP:		/* trace trap */
			enable_intr();
#ifdef KDTRACE_HOOKS
			if (type == T_BPTFLT) {
				fill_frame_regs(frame, &regs);
				if (dtrace_pid_probe_ptr != NULL &&
				    dtrace_pid_probe_ptr(&regs) == 0)
					goto out;
			}
#endif
			frame->tf_rflags &= ~PSL_T;
			i = SIGTRAP;
			ucode = (type == T_TRCTRAP ? TRAP_TRACE : TRAP_BRKPT);
			break;

		case T_ARITHTRAP:	/* arithmetic trap */
			ucode = fputrap_x87();
			if (ucode == -1)
				goto userout;
			i = SIGFPE;
			break;

		case T_PROTFLT:		/* general protection fault */
			i = SIGBUS;
			ucode = BUS_OBJERR;
			break;
		case T_STKFLT:		/* stack fault */
		case T_SEGNPFLT:	/* segment not present fault */
			i = SIGBUS;
			ucode = BUS_ADRERR;
			break;
		case T_TSSFLT:		/* invalid TSS fault */
			i = SIGBUS;
			ucode = BUS_OBJERR;
			break;
		case T_ALIGNFLT:
			i = SIGBUS;
			ucode = BUS_ADRALN;
			break;
		case T_DOUBLEFLT:	/* double fault */
		default:
			i = SIGBUS;
			ucode = BUS_OBJERR;
			break;

		case T_PAGEFLT:		/* page fault */
			/*
			 * Emulator can take care about this trap?
			 */
			if (*p->p_sysent->sv_trap != NULL &&
			    (*p->p_sysent->sv_trap)(td) == 0)
				goto userout;

			addr = frame->tf_addr;
			i = trap_pfault(frame, TRUE);
			if (i == -1)
				goto userout;
			if (i == 0)
				goto user;

			if (i == SIGSEGV)
				ucode = SEGV_MAPERR;
			else {
				if (prot_fault_translation == 0) {
					/*
					 * Autodetect.
					 * This check also covers the images
					 * without the ABI-tag ELF note.
					 */
					if (SV_CURPROC_ABI() == SV_ABI_FREEBSD
					    && p->p_osrel >= P_OSREL_SIGSEGV) {
						i = SIGSEGV;
						ucode = SEGV_ACCERR;
					} else {
						i = SIGBUS;
						ucode = BUS_PAGE_FAULT;
					}
				} else if (prot_fault_translation == 1) {
					/*
					 * Always compat mode.
					 */
					i = SIGBUS;
					ucode = BUS_PAGE_FAULT;
				} else {
					/*
					 * Always SIGSEGV mode.
					 */
					i = SIGSEGV;
					ucode = SEGV_ACCERR;
				}
			}
			break;

		case T_DIVIDE:		/* integer divide fault */
			ucode = FPE_INTDIV;
			i = SIGFPE;
			break;

#ifdef DEV_ISA
		case T_NMI:
			nmi_handle_intr(type, frame);
			break;
#endif /* DEV_ISA */

		case T_OFLOW:		/* integer overflow fault */
			ucode = FPE_INTOVF;
			i = SIGFPE;
			break;

		case T_BOUND:		/* bounds check fault */
			ucode = FPE_FLTSUB;
			i = SIGFPE;
			break;

		case T_DNA:
			/* transparent fault (due to context switch "late") */
			KASSERT(PCB_USER_FPU(td->td_pcb),
			    ("kernel FPU ctx has leaked"));
			fpudna();
			goto userout;

		case T_FPOPFLT:		/* FPU operand fetch fault */
			ucode = ILL_COPROC;
			i = SIGILL;
			break;

		case T_XMMFLT:		/* SIMD floating-point exception */
			ucode = fputrap_sse();
			if (ucode == -1)
				goto userout;
			i = SIGFPE;
			break;
#ifdef KDTRACE_HOOKS
		case T_DTRACE_RET:
			enable_intr();
			fill_frame_regs(frame, &regs);
			if (dtrace_return_probe_ptr != NULL &&
			    dtrace_return_probe_ptr(&regs) == 0)
				goto out;
			break;
#endif
		}
	} else {
		/* kernel trap */

		KASSERT(cold || td->td_ucred != NULL,
		    ("kernel trap doesn't have ucred"));
		switch (type) {
		case T_PAGEFLT:			/* page fault */
			(void) trap_pfault(frame, FALSE);
			goto out;

		case T_DNA:
			if (PCB_USER_FPU(td->td_pcb))
				panic("Unregistered use of FPU in kernel");
			fpudna();
			goto out;

		case T_ARITHTRAP:	/* arithmetic trap */
		case T_XMMFLT:		/* SIMD floating-point exception */
		case T_FPOPFLT:		/* FPU operand fetch fault */
			/*
			 * For now, supporting kernel handler
			 * registration for FPU traps is overkill.
			 */
			trap_fatal(frame, 0);
			goto out;

		case T_STKFLT:		/* stack fault */
		case T_PROTFLT:		/* general protection fault */
		case T_SEGNPFLT:	/* segment not present fault */
			if (td->td_intr_nesting_level != 0)
				break;

			/*
			 * Invalid segment selectors and out of bounds
			 * %rip's and %rsp's can be set up in user mode.
			 * This causes a fault in kernel mode when the
			 * kernel tries to return to user mode.  We want
			 * to get this fault so that we can fix the
			 * problem here and not have to check all the
			 * selectors and pointers when the user changes
			 * them.
			 */
			if (frame->tf_rip == (long)doreti_iret) {
				frame->tf_rip = (long)doreti_iret_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_ds) {
				frame->tf_rip = (long)ds_load_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_es) {
				frame->tf_rip = (long)es_load_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_fs) {
				frame->tf_rip = (long)fs_load_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_gs) {
				frame->tf_rip = (long)gs_load_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_gsbase) {
				frame->tf_rip = (long)gsbase_load_fault;
				goto out;
			}
			if (frame->tf_rip == (long)ld_fsbase) {
				frame->tf_rip = (long)fsbase_load_fault;
				goto out;
			}
			if (curpcb->pcb_onfault != NULL) {
				frame->tf_rip = (long)curpcb->pcb_onfault;
				goto out;
			}
			break;

		case T_TSSFLT:
			/*
			 * PSL_NT can be set in user mode and isn't cleared
			 * automatically when the kernel is entered.  This
			 * causes a TSS fault when the kernel attempts to
			 * `iret' because the TSS link is uninitialized.  We
			 * want to get this fault so that we can fix the
			 * problem here and not every time the kernel is
			 * entered.
			 */
			if (frame->tf_rflags & PSL_NT) {
				frame->tf_rflags &= ~PSL_NT;
				goto out;
			}
			break;

		case T_TRCTRAP:	 /* trace trap */
			/*
			 * Ignore debug register trace traps due to
			 * accesses in the user's address space, which
			 * can happen under several conditions such as
			 * if a user sets a watchpoint on a buffer and
			 * then passes that buffer to a system call.
			 * We still want to get TRCTRAPS for addresses
			 * in kernel space because that is useful when
			 * debugging the kernel.
			 */
			if (user_dbreg_trap()) {
				/*
				 * Reset breakpoint bits because the
				 * processor doesn't
				 */
				load_dr6(rdr6() & ~0xf);
				goto out;
			}
			/*
			 * FALLTHROUGH (TRCTRAP kernel mode, kernel address)
			 */
		case T_BPTFLT:
			/*
			 * If KDB is enabled, let it handle the debugger trap.
			 * Otherwise, debugger traps "can't happen".
			 */
#ifdef KDB
			/* XXX %dr6 is not quite reentrant. */
			dr6 = rdr6();
			load_dr6(dr6 & ~0x4000);
			if (kdb_trap(type, dr6, frame))
				goto out;
#endif
			break;

#ifdef DEV_ISA
		case T_NMI:
			nmi_handle_intr(type, frame);
			goto out;
#endif /* DEV_ISA */
		}

		trap_fatal(frame, 0);
		goto out;
	}

	/* Translate fault for emulators (e.g. Linux) */
	if (*p->p_sysent->sv_transtrap)
		i = (*p->p_sysent->sv_transtrap)(i, type);

	ksiginfo_init_trap(&ksi);
	ksi.ksi_signo = i;
	ksi.ksi_code = ucode;
	ksi.ksi_trapno = type;
	ksi.ksi_addr = (void *)addr;
	if (uprintf_signal) {
		uprintf("pid %d comm %s: signal %d err %lx code %d type %d "
		    "addr 0x%lx rsp 0x%lx rip 0x%lx "
		    "<%02x %02x %02x %02x %02x %02x %02x %02x>\n",
		    p->p_pid, p->p_comm, i, frame->tf_err, ucode, type, addr,
		    frame->tf_rsp, frame->tf_rip,
		    fubyte((void *)(frame->tf_rip + 0)),
		    fubyte((void *)(frame->tf_rip + 1)),
		    fubyte((void *)(frame->tf_rip + 2)),
		    fubyte((void *)(frame->tf_rip + 3)),
		    fubyte((void *)(frame->tf_rip + 4)),
		    fubyte((void *)(frame->tf_rip + 5)),
		    fubyte((void *)(frame->tf_rip + 6)),
		    fubyte((void *)(frame->tf_rip + 7)));
	}
	KASSERT((read_rflags() & PSL_I) != 0, ("interrupts disabled"));
	trapsignal(td, &ksi);

user:
	userret(td, frame);
	KASSERT(PCB_USER_FPU(td->td_pcb),
	    ("Return from trap with kernel FPU ctx leaked"));
userout:
out:
	return;
}
Beispiel #29
0
void
interrupt(u_int64_t vector, struct trapframe *framep)
{
	struct thread *td;
	volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK;

	td = curthread;
	atomic_add_int(&td->td_intr_nesting_level, 1);

	/*
	 * Handle ExtINT interrupts by generating an INTA cycle to
	 * read the vector.
	 */
	if (vector == 0) {
		vector = ib->ib_inta;
		printf("ExtINT interrupt: vector=%ld\n", vector);
	}

	if (vector == 255) {/* clock interrupt */
		/* CTR0(KTR_INTR, "clock interrupt"); */
			
		cnt.v_intr++;
#ifdef EVCNT_COUNTERS
		clock_intr_evcnt.ev_count++;
#else
		intrcnt[INTRCNT_CLOCK]++;
#endif
		critical_enter();
#ifdef SMP
		clks[PCPU_GET(cpuid)]++;
		/* Only the BSP runs the real clock */
		if (PCPU_GET(cpuid) == 0) {
#endif
			handleclock(framep);
			/* divide hz (1024) by 8 to get stathz (128) */
			if ((++schedclk2 & 0x7) == 0)
				statclock((struct clockframe *)framep);
#ifdef SMP
		} else {
			ia64_set_itm(ia64_get_itc() + itm_reload);
			mtx_lock_spin(&sched_lock);
			hardclock_process(curthread, TRAPF_USERMODE(framep));
			if ((schedclk2 & 0x7) == 0)
				statclock_process(curkse, TRAPF_PC(framep),
				    TRAPF_USERMODE(framep));
			mtx_unlock_spin(&sched_lock);
		}
#endif
		critical_exit();
#ifdef SMP
	} else if (vector == ipi_vector[IPI_AST]) {
		asts[PCPU_GET(cpuid)]++;
		CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid));
	} else if (vector == ipi_vector[IPI_RENDEZVOUS]) {
		rdvs[PCPU_GET(cpuid)]++;
		CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid));
		smp_rendezvous_action();
	} else if (vector == ipi_vector[IPI_STOP]) {
		u_int32_t mybit = PCPU_GET(cpumask);

		CTR1(KTR_SMP, "IPI_STOP, cpuid=%d", PCPU_GET(cpuid));
		savectx(PCPU_GET(pcb));
		stopped_cpus |= mybit;
		while ((started_cpus & mybit) == 0)
			/* spin */;
		started_cpus &= ~mybit;
		stopped_cpus &= ~mybit;
		if (PCPU_GET(cpuid) == 0 && cpustop_restartfunc != NULL) {
			void (*f)(void) = cpustop_restartfunc;
			cpustop_restartfunc = NULL;
			(*f)();
		}
	} else if (vector == ipi_vector[IPI_TEST]) {
		CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid));
		mp_ipi_test++;
#endif
	} else {
		ints[PCPU_GET(cpuid)]++;
		ia64_dispatch_intr(framep, vector);
	}

	atomic_subtract_int(&td->td_intr_nesting_level, 1);
}