void DELAY(int usec) { uint32_t counter; uint32_t delta, now, previous, remaining; /* Timer has not yet been initialized */ if (jz4780_timer_sc == NULL) { for (; usec > 0; usec--) for (counter = 200; counter > 0; counter--) { /* Prevent gcc from optimizing out the loop */ mips_rd_cause(); } return; } /* * Some of the other timers in the source tree do this calculation as: * * usec * ((sc->tc.tc_frequency / 1000000) + 1) * * which gives a fairly pessimistic result when tc_frequency is an exact * multiple of 1000000. Given the data type and typical values for * tc_frequency adding 999999 shouldn't overflow. */ remaining = usec * ((jz4780_timer_sc->tc.tc_frequency + 999999) / 1000000); /* * We add one since the first iteration may catch the counter just * as it is changing. */ remaining += 1; previous = jz4780_get_timecount(&jz4780_timer_sc->tc); for ( ; ; ) { now = jz4780_get_timecount(&jz4780_timer_sc->tc); /* * If the timer has rolled over, then we have the case: * * if (previous > now) { * delta = (0 - previous) + now * } * * which is really no different then the normal case. * Both cases are simply: * * delta = now - previous. */ delta = now - previous; if (delta >= remaining) break; previous = now; remaining -= delta; } }
void cpu_intr(struct trapframe *tf) { struct intr_event *event; register_t cause, status; int hard, i, intr; critical_enter(); cause = mips_rd_cause(); status = mips_rd_status(); intr = (cause & MIPS_INT_MASK) >> 8; /* * Do not handle masked interrupts. They were masked by * pre_ithread function (mips_mask_XXX_intr) and will be * unmasked once ithread is through with handler */ intr &= (status & MIPS_INT_MASK) >> 8; while ((i = fls(intr)) != 0) { intr &= ~(1 << (i - 1)); switch (i) { case 1: case 2: /* Software interrupt. */ i--; /* Get a 0-offset interrupt. */ hard = 0; event = softintr_events[i]; mips_intrcnt_inc(mips_intr_counters[i]); break; default: /* Hardware interrupt. */ i -= 2; /* Trim software interrupt bits. */ i--; /* Get a 0-offset interrupt. */ hard = 1; event = hardintr_events[i]; mips_intrcnt_inc(mips_intr_counters[NSOFT_IRQS + i]); break; } if (!event || TAILQ_EMPTY(&event->ie_handlers)) { printf("stray %s interrupt %d\n", hard ? "hard" : "soft", i); continue; } if (intr_event_handle(event, tf) != 0) { printf("stray %s interrupt %d\n", hard ? "hard" : "soft", i); } } KASSERT(i == 0, ("all interrupts handled")); critical_exit(); #ifdef HWPMC_HOOKS if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); #endif }
int mips_pic_intr(void *arg) { struct mips_pic_softc *sc = arg; register_t cause, status; struct intr_irqsrc *isrc; int i, intr; cause = mips_rd_cause(); status = mips_rd_status(); intr = (cause & MIPS_INT_MASK) >> 8; /* * Do not handle masked interrupts. They were masked by * pre_ithread function (mips_mask_XXX_intr) and will be * unmasked once ithread is through with handler */ intr &= (status & MIPS_INT_MASK) >> 8; while ((i = fls(intr)) != 0) { i--; /* Get a 0-offset interrupt. */ intr &= ~(1 << i); isrc = sc->pic_irqs[i]; if (isrc == NULL) { device_printf(sc->pic_dev, "Stray interrupt %u detected\n", i); pic_irq_mask(sc, i); continue; } intr_irq_dispatch(isrc, curthread->td_intr_frame); } KASSERT(i == 0, ("all interrupts handled")); #ifdef HWPMC_HOOKS if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN)) { struct trapframe *tf = PCPU_GET(curthread)->td_intr_frame; pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, tf); } #endif return (FILTER_HANDLED); }
void octeon_wdog_nmi(void) { int core; core = cvmx_get_core_num(); printf("cpu%u: NMI detected\n", core); printf("cpu%u: Exception PC: %p\n", core, (void *)mips_rd_excpc()); printf("cpu%u: status %#x cause %#x\n", core, mips_rd_status(), mips_rd_cause()); /* * This is the end * Beautiful friend * * Just wait for Soft Reset to come and take us */ for (;;) continue; }