static void pcmu_cb_disable_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, int wait) { uint64_t tmp, map_reg_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino); ASSERT(MUTEX_HELD(&pcb_p->pcb_intr_lock)); /* mark interrupt invalid in mapping register */ tmp = lddphysio(map_reg_pa) & ~PCMU_INTR_MAP_REG_VALID; stdphysio(map_reg_pa, tmp); (void) lddphysio(map_reg_pa); /* flush previous write */ if (wait) { hrtime_t start_time; hrtime_t prev, curr, interval, jump; hrtime_t intr_timeout; uint64_t state_reg_pa = pcb_p->pcb_obsta_pa; uint_t shift = (ino & 0x1f) << 1; /* busy wait if there is interrupt being processed */ /* unless panic or timeout for interrupt pending is reached */ intr_timeout = pcmu_intrpend_timeout; jump = TICK_TO_NSEC(xc_tick_jump_limit); start_time = curr = gethrtime(); while ((((lddphysio(state_reg_pa) >> shift) & PCMU_CLEAR_INTR_REG_MASK) == PCMU_CLEAR_INTR_REG_PENDING) && !panicstr) { /* * If we have a really large jump in hrtime, it is most * probably because we entered the debugger (or OBP, * in general). So, we adjust the timeout accordingly * to prevent declaring an interrupt timeout. The * master-interrupt mechanism in OBP should deliver * the interrupts properly. */ prev = curr; curr = gethrtime(); interval = curr - prev; if (interval > jump) intr_timeout += interval; if (curr - start_time > intr_timeout) { cmn_err(CE_WARN, "pcmu@%x " "pcmu_cb_disable_nintr_reg(%lx,%x) timeout", pcb_p->pcb_pcmu_p->pcmu_id, map_reg_pa, PCMU_CB_INO_TO_MONDO(pcb_p, ino)); break; } } } }
static void pcmu_cb_set_nintr_reg(pcmu_cb_t *pcb_p, pcmu_ib_ino_t ino, uint64_t value) { uint64_t pa = pcmu_cb_ino_to_clr_pa(pcb_p, ino); PCMU_DBG3(PCMU_DBG_CB|PCMU_DBG_CONT, NULL, "pci-%x pcmu_cb_set_nintr_reg: ino=%x PA=%016llx\n", pcb_p->pcb_pcmu_p->pcmu_id, ino, pa); stdphysio(pa, value); (void) lddphysio(pa); /* flush the previous write */ }
/* * kstat update function using physical addresses. */ int pcmu_cntr_kstat_pa_update(kstat_t *ksp, int rw) { struct kstat_named *data_p; pcmu_cntr_pa_t *cntr_pa_p = (pcmu_cntr_pa_t *)ksp->ks_private; uint64_t pic; data_p = (struct kstat_named *)ksp->ks_data; if (rw == KSTAT_WRITE) { stdphysio(cntr_pa_p->pcr_pa, data_p[0].value.ui64); return (0); } else { pic = lddphysio(cntr_pa_p->pic_pa); data_p[0].value.ui64 = lddphysio(cntr_pa_p->pcr_pa); /* pic0 : lo 32 bits */ data_p[1].value.ui64 = (pic << 32) >> 32; /* pic1 : hi 32 bits */ data_p[2].value.ui64 = pic >> 32; } return (0); }
void pcmu_cb_intr_dist(void *arg) { int i; pcmu_cb_t *pcb_p = (pcmu_cb_t *)arg; mutex_enter(&pcb_p->pcb_intr_lock); for (i = 0; i < pcb_p->pcb_no_of_inos; i++) { uint64_t mr_pa; volatile uint64_t imr; pcmu_ib_mondo_t mondo; uint32_t cpu_id; pcmu_ib_t *pib_p = pcb_p->pcb_pcmu_p->pcmu_ib_p; volatile uint64_t *imr_p; pcmu_ib_ino_t ino = pcb_p->pcb_inos[i]; if (!ino) /* skip non-shared interrupts */ continue; mr_pa = pcmu_cb_ino_to_map_pa(pcb_p, ino); imr = lddphysio(mr_pa); if (!PCMU_IB_INO_INTR_ISON(imr)) continue; mondo = PCMU_CB_INO_TO_MONDO(pcb_p, ino); cpu_id = intr_dist_cpuid(); imr_p = ib_intr_map_reg_addr(pib_p, ino); cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p, cpu_id, imr_p); pcmu_cb_disable_nintr_reg(pcb_p, ino, PCMU_IB_INTR_WAIT); stdphysio(mr_pa, ib_get_map_reg(mondo, cpu_id)); (void) lddphysio(mr_pa); /* flush previous write */ } mutex_exit(&pcb_p->pcb_intr_lock); }
void pcmu_cb_suspend(pcmu_cb_t *pcb_p) { int i, inos = pcb_p->pcb_no_of_inos; ASSERT(!pcb_p->pcb_imr_save); pcb_p->pcb_imr_save = kmem_alloc(inos * sizeof (uint64_t), KM_SLEEP); /* * save the internal interrupts' mapping registers content * * The PBM IMR really doesn't need to be saved, as it is * different per side and is handled by pcmu_pbm_suspend/resume. * But it complicates the logic. */ for (i = 0; i < inos; i++) { uint64_t pa; pcmu_ib_ino_t ino = pcb_p->pcb_inos[i]; if (!ino) continue; pa = pcmu_cb_ino_to_map_pa(pcb_p, ino); pcb_p->pcb_imr_save[i] = lddphysio(pa); } }