/* Per-CPU timer2 handler. */ static int statclockhandler(struct trapframe *frame) { timer2clock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); }
int hardclockintr(struct trapframe *frame) { if (PCPU_GET(cpuid) == 0) hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); else hardclock_cpu(TRAPF_USERMODE(frame)); return (FILTER_HANDLED); }
static int clock_intr(void *arg) { struct trapframe *fp = arg; atomic_add_32(&s3c24x0_base, timer4_reload_value); hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); }
int profclockintr(struct trapframe *frame) { if (!using_atrtc_timer) hardclockintr(frame); if (profprocs != 0) profclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); }
/* * ixpclk_intr: * * Handle the hardclock interrupt. */ int ixpclk_intr(void *arg) { struct ixpclk_softc* sc = ixpclk_sc; struct trapframe *frame = arg; bus_space_write_4(sc->sc_iot, sc->sc_ioh, IXP425_OST_STATUS, OST_TIM0_INT); hardclock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); }
static int clock_intr(void *arg) { struct trapframe *fp = arg; /* The interrupt is shared, so we have to make sure it's for us. */ if (RD4(ST_SR) & ST_SR_PITS) { hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); } return (FILTER_STRAY); }
/* * Define the code needed before returning to user mode, for trap and * syscall. */ void userret(struct thread *td, struct trapframe *frame) { struct proc *p = td->td_proc; CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, td->td_name); KASSERT((p->p_flag & P_WEXIT) == 0, ("Exiting process returns to usermode")); #if 0 #ifdef DIAGNOSTIC /* Check that we called signotify() enough. */ PROC_LOCK(p); thread_lock(td); if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 || (td->td_flags & TDF_ASTPENDING) == 0)) printf("failed to set signal flags properly for ast()\n"); thread_unlock(td); PROC_UNLOCK(p); #endif #endif #ifdef KTRACE KTRUSERRET(td); #endif /* * If this thread tickled GEOM, we need to wait for the giggling to * stop before we return to userland */ if (td->td_pflags & TDP_GEOM) g_waitidle(); /* * Charge system time if profiling. */ if (p->p_flag & P_PROFIL) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); /* * Let the scheduler adjust our priority etc. */ sched_userret(td); KASSERT(td->td_locks == 0, ("userret: Returning with %d locks held.", td->td_locks)); #ifdef VIMAGE /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */ VNET_ASSERT(curvnet == NULL, ("%s: Returning on td %p (pid %d, %s) with vnet %p set in %s", __func__, td, p->p_pid, td->td_name, curvnet, (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush : "N/A")); #endif #ifdef XEN PT_UPDATES_FLUSH(); #endif }
/* * This function is the one registered by the machine dependent * initialiser as the callback for high speed timer events. */ static void cyclic_clock(struct trapframe *frame) { cpu_t *c = &solaris_cpu[curcpu]; if (c->cpu_cyclic != NULL && gethrtime() >= exp_due[curcpu]) { if (TRAPF_USERMODE(frame)) { c->cpu_profile_pc = 0; c->cpu_profile_upc = TRAPF_PC(frame); } else { c->cpu_profile_pc = TRAPF_PC(frame); c->cpu_profile_upc = 0; } c->cpu_intr_actv = 1; /* Fire any timers that are due. */ cyclic_fire(c); c->cpu_intr_actv = 0; } }
static int pit_intr(void *arg) { struct trapframe *fp = arg; uint32_t icnt; if (RD4(sc, PIT_SR) & PIT_PITS_DONE) { icnt = RD4(sc, PIT_PIVR) >> 20; /* Just add in the overflows we just read */ timecount += PIT_PIV(RD4(sc, PIT_MR)) * icnt; hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); }
static int clock_intr(void *arg) { struct trapframe *fp = arg; /* The interrupt is shared, so we have to make sure it's for us. */ if (RD4(ST_SR) & ST_SR_PITS) { #ifdef SKYEYE_WORKAROUNDS tot_count += 32768 / hz; #endif hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); } return (FILTER_STRAY); }
static int clock_intr(void *arg) { struct trapframe *fp = arg; if (RD4(PIT_SR) & PIT_PITS_DONE) { uint32_t pivr = RD4(PIT_PIVR); if (PIT_CNT(pivr)>1) { printf("cnt = %d\n", PIT_CNT(pivr)); } pit_counter += pit_cycle; hardclock(TRAPF_USERMODE(fp), TRAPF_PC(fp)); return (FILTER_HANDLED); } return (FILTER_STRAY); }
/* Per-CPU timer1 handler. */ static int hardclockhandler(struct trapframe *frame) { #ifdef KDTRACE_HOOKS /* * If the DTrace hooks are configured and a callback function * has been registered, then call it to process the high speed * timers. */ int cpu = curcpu; if (cyclic_clock_func[cpu] != NULL) (*cyclic_clock_func[cpu])(frame); #endif timer1clock(TRAPF_USERMODE(frame), TRAPF_PC(frame)); return (FILTER_HANDLED); }
/* * Define the code needed before returning to user mode, for trap and * syscall. */ void userret(struct thread *td, struct trapframe *frame) { struct proc *p = td->td_proc; CTR3(KTR_SYSC, "userret: thread %p (pid %d, %s)", td, p->p_pid, td->td_name); KASSERT((p->p_flag & P_WEXIT) == 0, ("Exiting process returns to usermode")); #if 0 #ifdef DIAGNOSTIC /* Check that we called signotify() enough. */ PROC_LOCK(p); thread_lock(td); if (SIGPENDING(td) && ((td->td_flags & TDF_NEEDSIGCHK) == 0 || (td->td_flags & TDF_ASTPENDING) == 0)) printf("failed to set signal flags properly for ast()\n"); thread_unlock(td); PROC_UNLOCK(p); #endif #endif #ifdef KTRACE KTRUSERRET(td); #endif /* * If this thread tickled GEOM, we need to wait for the giggling to * stop before we return to userland */ if (td->td_pflags & TDP_GEOM) g_waitidle(); /* * Charge system time if profiling. */ if (p->p_flag & P_PROFIL) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); /* * Let the scheduler adjust our priority etc. */ sched_userret(td); #ifdef XEN PT_UPDATES_FLUSH(); #endif /* * Check for misbehavior. * * In case there is a callchain tracing ongoing because of * hwpmc(4), skip the scheduler pinning check. * hwpmc(4) subsystem, infact, will collect callchain informations * at ast() checkpoint, which is past userret(). */ WITNESS_WARN(WARN_PANIC, NULL, "userret: returning"); KASSERT(td->td_critnest == 0, ("userret: Returning in a critical section")); KASSERT(td->td_locks == 0, ("userret: Returning with %d locks held", td->td_locks)); KASSERT(td->td_rw_rlocks == 0, ("userret: Returning with %d rwlocks held in read mode", td->td_rw_rlocks)); KASSERT((td->td_pflags & TDP_NOFAULTING) == 0, ("userret: Returning with pagefaults disabled")); KASSERT(td->td_no_sleeping == 0, ("userret: Returning with sleep disabled")); KASSERT(td->td_pinned == 0 || (td->td_pflags & TDP_CALLCHAIN) != 0, ("userret: Returning with with pinned thread")); KASSERT(td->td_vp_reserv == 0, ("userret: Returning while holding vnode reservation")); KASSERT((td->td_flags & TDF_SBDRY) == 0, ("userret: Returning with stop signals deferred")); #ifdef VIMAGE /* Unfortunately td_vnet_lpush needs VNET_DEBUG. */ VNET_ASSERT(curvnet == NULL, ("%s: Returning on td %p (pid %d, %s) with vnet %p set in %s", __func__, td, p->p_pid, td->td_name, curvnet, (td->td_vnet_lpush != NULL) ? td->td_vnet_lpush : "N/A")); #endif #ifdef RACCT PROC_LOCK(p); while (p->p_throttled == 1) msleep(p->p_racct, &p->p_mtx, 0, "racct", 0); PROC_UNLOCK(p); #endif }
/* * Abort handler. * * FAR, FSR, and everything what can be lost after enabling * interrupts must be grabbed before the interrupts will be * enabled. Note that when interrupts will be enabled, we * could even migrate to another CPU ... * * TODO: move quick cases to ASM */ void abort_handler(struct trapframe *tf, int prefetch) { struct thread *td; vm_offset_t far, va; int idx, rv; uint32_t fsr; struct ksig ksig; struct proc *p; struct pcb *pcb; struct vm_map *map; struct vmspace *vm; vm_prot_t ftype; bool usermode; #ifdef INVARIANTS void *onfault; #endif td = curthread; fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get(); #if __ARM_ARCH >= 7 far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get(); #else far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get(); #endif idx = FSR_TO_FAULT(fsr); usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */ if (usermode) td->td_frame = tf; CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d", __func__, fsr, idx, far, prefetch, usermode); /* * Firstly, handle aborts that are not directly related to mapping. */ if (__predict_false(idx == FAULT_EA_IMPREC)) { abort_imprecise(tf, fsr, prefetch, usermode); return; } if (__predict_false(idx == FAULT_DEBUG)) { abort_debug(tf, fsr, prefetch, usermode, far); return; } /* * ARM has a set of unprivileged load and store instructions * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other * than user mode and OS should recognize their aborts and behave * appropriately. However, there is no way how to do that reasonably * in general unless we restrict the handling somehow. * * For now, these instructions are used only in copyin()/copyout() * like functions where usermode buffers are checked in advance that * they are not from KVA space. Thus, no action is needed here. */ #ifdef ARM_NEW_PMAP rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode); if (rv == 0) { return; } else if (rv == EFAULT) { call_trapsignal(td, SIGSEGV, SEGV_MAPERR, far); userret(td, tf); return; } #endif /* * Now, when we handled imprecise and debug aborts, the rest of * aborts should be really related to mapping. */ PCPU_INC(cnt.v_trap); #ifdef KDB if (kdb_active) { kdb_reenter(); goto out; } #endif if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) { /* * Due to both processor errata and lazy TLB invalidation when * access restrictions are removed from virtual pages, memory * accesses that are allowed by the physical mapping layer may * nonetheless cause one spurious page fault per virtual page. * When the thread is executing a "no faulting" section that * is bracketed by vm_fault_{disable,enable}_pagefaults(), * every page fault is treated as a spurious page fault, * unless it accesses the same virtual address as the most * recent page fault within the same "no faulting" section. */ if (td->td_md.md_spurflt_addr != far || (td->td_pflags & TDP_RESETSPUR) != 0) { td->td_md.md_spurflt_addr = far; td->td_pflags &= ~TDP_RESETSPUR; tlb_flush_local(far & ~PAGE_MASK); return; } } else { /* * If we get a page fault while in a critical section, then * it is most likely a fatal kernel page fault. The kernel * is already going to panic trying to get a sleep lock to * do the VM lookup, so just consider it a fatal trap so the * kernel can print out a useful trap message and even get * to the debugger. * * If we get a page fault while holding a non-sleepable * lock, then it is most likely a fatal kernel page fault. * If WITNESS is enabled, then it's going to whine about * bogus LORs with various VM locks, so just skip to the * fatal trap handling directly. */ if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL, "Kernel page fault") != 0) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } /* Re-enable interrupts if they were enabled previously. */ if (td->td_md.md_spinlock_count == 0) { if (__predict_true(tf->tf_spsr & PSR_I) == 0) enable_interrupts(PSR_I); if (__predict_true(tf->tf_spsr & PSR_F) == 0) enable_interrupts(PSR_F); } p = td->td_proc; if (usermode) { td->td_pticks = 0; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); } /* Invoke the appropriate handler, if necessary. */ if (__predict_false(aborts[idx].func != NULL)) { if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; } /* * Don't pass faulting cache operation to vm_fault(). We don't want * to handle all vm stuff at this moment. */ pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == cachebailout)) { tf->tf_r0 = far; /* return failing address */ tf->tf_pc = (register_t)pcb->pcb_onfault; return; } /* Handle remaining I-cache aborts. */ if (idx == FAULT_ICACHE) { if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig)) goto do_trapsignal; goto out; } /* * At this point, we're dealing with one of the following aborts: * * FAULT_TRAN_xx - Translation * FAULT_PERM_xx - Permission * * These are the main virtual memory-related faults signalled by * the MMU. */ /* fusubailout is used by [fs]uswintr to avoid page faulting. */ pcb = td->td_pcb; if (__predict_false(pcb->pcb_onfault == fusubailout)) { tf->tf_r0 = EFAULT; tf->tf_pc = (register_t)pcb->pcb_onfault; return; } va = trunc_page(far); if (va >= KERNBASE) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) goto nogo; map = kernel_map; } else { /* * This is a fault on non-kernel virtual memory. If curproc * is NULL or curproc->p_vmspace is NULL the fault is fatal. */ vm = (p != NULL) ? p->p_vmspace : NULL; if (vm == NULL) goto nogo; map = &vm->vm_map; if (!usermode && (td->td_intr_nesting_level != 0 || pcb->pcb_onfault == NULL)) { abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } } ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ; if (prefetch) ftype |= VM_PROT_EXECUTE; #ifdef DEBUG last_fault_code = fsr; #endif #ifndef ARM_NEW_PMAP if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, usermode)) { goto out; } #endif #ifdef INVARIANTS onfault = pcb->pcb_onfault; pcb->pcb_onfault = NULL; #endif /* Fault in the page. */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); #ifdef INVARIANTS pcb->pcb_onfault = onfault; #endif if (__predict_true(rv == KERN_SUCCESS)) goto out; nogo: if (!usermode) { if (td->td_intr_nesting_level == 0 && pcb->pcb_onfault != NULL) { tf->tf_r0 = rv; tf->tf_pc = (int)pcb->pcb_onfault; return; } CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv); abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig); return; } ksig.sig = SIGSEGV; ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR; ksig.addr = far; do_trapsignal: call_trapsignal(td, ksig.sig, ksig.code, ksig.addr); out: if (usermode) userret(td, tf); }
void interrupt(u_int64_t vector, struct trapframe *framep) { struct thread *td; volatile struct ia64_interrupt_block *ib = IA64_INTERRUPT_BLOCK; td = curthread; atomic_add_int(&td->td_intr_nesting_level, 1); /* * Handle ExtINT interrupts by generating an INTA cycle to * read the vector. */ if (vector == 0) { vector = ib->ib_inta; printf("ExtINT interrupt: vector=%ld\n", vector); } if (vector == 255) {/* clock interrupt */ /* CTR0(KTR_INTR, "clock interrupt"); */ cnt.v_intr++; #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; #else intrcnt[INTRCNT_CLOCK]++; #endif critical_enter(); #ifdef SMP clks[PCPU_GET(cpuid)]++; /* Only the BSP runs the real clock */ if (PCPU_GET(cpuid) == 0) { #endif handleclock(framep); /* divide hz (1024) by 8 to get stathz (128) */ if ((++schedclk2 & 0x7) == 0) statclock((struct clockframe *)framep); #ifdef SMP } else { ia64_set_itm(ia64_get_itc() + itm_reload); mtx_lock_spin(&sched_lock); hardclock_process(curthread, TRAPF_USERMODE(framep)); if ((schedclk2 & 0x7) == 0) statclock_process(curkse, TRAPF_PC(framep), TRAPF_USERMODE(framep)); mtx_unlock_spin(&sched_lock); } #endif critical_exit(); #ifdef SMP } else if (vector == ipi_vector[IPI_AST]) { asts[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); } else if (vector == ipi_vector[IPI_RENDEZVOUS]) { rdvs[PCPU_GET(cpuid)]++; CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); } else if (vector == ipi_vector[IPI_STOP]) { u_int32_t mybit = PCPU_GET(cpumask); CTR1(KTR_SMP, "IPI_STOP, cpuid=%d", PCPU_GET(cpuid)); savectx(PCPU_GET(pcb)); stopped_cpus |= mybit; while ((started_cpus & mybit) == 0) /* spin */; started_cpus &= ~mybit; stopped_cpus &= ~mybit; if (PCPU_GET(cpuid) == 0 && cpustop_restartfunc != NULL) { void (*f)(void) = cpustop_restartfunc; cpustop_restartfunc = NULL; (*f)(); } } else if (vector == ipi_vector[IPI_TEST]) { CTR1(KTR_SMP, "IPI_TEST, cpuid=%d", PCPU_GET(cpuid)); mp_ipi_test++; #endif } else { ints[PCPU_GET(cpuid)]++; ia64_dispatch_intr(framep, vector); } atomic_subtract_int(&td->td_intr_nesting_level, 1); }