void sbus_type3_pcmcia_intr_reinstall() { u_int16_t r = _reg_read_2(SBUS_PCMCIA3_TIMR_REG16); _reg_write_2(SBUS_PCMCIA3_TIMR_REG16, 1); _reg_write_2(SBUS_PCMCIA3_TIMR_REG16, r); }
int smap_intr(void *arg) { struct smap_softc *sc = arg; struct ifnet *ifp; u_int16_t cause, disable, r; cause = _reg_read_2(SPD_INTR_STATUS_REG16) & _reg_read_2(SPD_INTR_ENABLE_REG16); disable = cause & (SPD_INTR_RXDNV | SPD_INTR_TXDNV); if (disable) { r = _reg_read_2(SPD_INTR_ENABLE_REG16); r &= ~disable; _reg_write_2(SPD_INTR_ENABLE_REG16, r); printf("%s: invalid descriptor. (%c%c)\n", DEVNAME, disable & SPD_INTR_RXDNV ? 'R' : '_', disable & SPD_INTR_TXDNV ? 'T' : '_'); if (disable & SPD_INTR_RXDNV) smap_rxeof(arg); _reg_write_2(SPD_INTR_CLEAR_REG16, disable); } if (cause & SPD_INTR_TXEND) { _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_TXEND); if (_reg_read_1(SMAP_RXFIFO_FRAME_REG8) > 0) cause |= SPD_INTR_RXEND; smap_txeof(arg); } if (cause & SPD_INTR_RXEND) { _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_RXEND); smap_rxeof(arg); if (sc->tx_desc_cnt > 0 && sc->tx_desc_cnt > _reg_read_1(SMAP_TXFIFO_FRAME_REG8)) smap_txeof(arg); } if (cause & SPD_INTR_EMAC3) emac3_intr(arg); /* if transmission is pending, start here */ ifp = &sc->ethercom.ec_if; if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) smap_start(ifp); #if NRND > 0 rnd_add_uint32(&sc->rnd_source, cause | sc->tx_fifo_ptr << 16); #endif return (1); }
void sh_rtc_set(void *cookie, struct clock_ymdhms *dt) { uint8_t r; /* stop clock */ r = _reg_read_1(SH_(RCR2)); r |= SH_RCR2_RESET; r &= ~SH_RCR2_START; _reg_write_1(SH_(RCR2), r); /* set time */ if (CPU_IS_SH3) _reg_write_1(SH3_RYRCNT, TOBCD(dt->dt_year % 100)); else _reg_write_2(SH4_RYRCNT, TOBCD(dt->dt_year % 100)); #define RTCSET(x, y) _reg_write_1(SH_(R ## x ## CNT), TOBCD(dt->dt_ ## y)) RTCSET(MON, mon); RTCSET(WK, wday); RTCSET(DAY, day); RTCSET(HR, hour); RTCSET(MIN, min); RTCSET(SEC, sec); #undef RTCSET /* start clock */ _reg_write_1(SH_(RCR2), r | SH_RCR2_START); }
/* SCPH-18000 */ void sbus_type2_pcmcia_intr_clear() { if (_reg_read_2(SBUS_PCMCIA_CSC1_REG16) & 0x080) _reg_write_2(SBUS_PCMCIA_CSC1_REG16, 0xffff); }
void cpu_exit(struct proc *p) { if (p->p_md.md_flags & MDP_STEP) _reg_write_2(SH_(BBRB), 0); pmap_deactivate(p); sched_exit(p); }
void __wdc_spd_disable() { u_int16_t r; r = _reg_read_2(SPD_INTR_ENABLE_REG16); r &= ~SPD_INTR_HDD; _reg_write_2(SPD_INTR_ENABLE_REG16, r); }
/* * Prepare context switch from oproc to nproc. * This code is used by cpu_switchto. */ void cpu_switch_prepare(struct proc *oproc, struct proc *nproc) { nproc->p_stat = SONPROC; if (oproc && (oproc->p_md.md_flags & MDP_STEP)) _reg_write_2(SH_(BBRB), 0); curpcb = nproc->p_md.md_pcb; pmap_activate(nproc); if (nproc->p_md.md_flags & MDP_STEP) { int pm_asid = nproc->p_vmspace->vm_map.pmap->pm_asid; _reg_write_2(SH_(BBRB), 0); _reg_write_4(SH_(BARB), nproc->p_md.md_regs->tf_spc); _reg_write_1(SH_(BASRB), pm_asid); _reg_write_1(SH_(BAMRB), 0); _reg_write_2(SH_(BRCR), 0x0040); _reg_write_2(SH_(BBRB), 0x0014); } curproc = nproc; }
// INTC void SH4dev::icu_dump() { #define ON(x, c) ((x) & (c) ? check[1] : check[0]) #define _(n) DPRINTF((TEXT("%S %S "), #n, ON(r, SH4_ICR_ ## n))) static const char *check[] = { "[_]", "[x]" }; u_int16_t r; super::icu_dump_priority(_ipr_table); r = _reg_read_2(SH4_ICR); DPRINTF((TEXT("ICR: "))); _(NMIL); _(MAI); _(NMIB); _(NMIE); _(IRLM); DPRINTF((TEXT("0x%04x\n"), r)); #if 0 // monitoring SH4 interrupt request. // disable SH3 internal devices interrupt. suspendIntr(); _reg_write_2(SH4_IPRA, 0); _reg_write_2(SH4_IPRB, 0); _reg_write_2(SH4_IPRC, 0); // _reg_write_2(SH4_IPRD, 0); SH7709S only. resumeIntr(0); // all interrupts enable. while (1) { DPRINTF((TEXT("%04x ", _reg_read_2(HD64465_NIRR)))); bitdisp(_reg_read_4(SH4_INTEVT)); } /* NOTREACHED */ #endif #undef _ #undef ON }
void SH4dev::mq100_dump() { u_int32_t a, e; int i; // This is HPW650PA test. 640 * 480 linebytes 1280. DPRINTF((TEXT("<<<MQ100/HD64464>>>\n"))); a = MQ100_FB_BASE + 0x4b000; e = a + 640 * 480 * sizeof(u_int16_t); while (a < e) { for (i = 0; i < 640; i++, a += sizeof(u_int16_t)) _reg_write_2(a, ~_reg_read_2(a) & 0xffff); } }
int smap_init(struct ifnet *ifp) { struct smap_softc *sc = ifp->if_softc; u_int16_t r16; int rc; smap_fifo_init(sc); emac3_reset(&sc->emac3); smap_desc_init(sc); _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV); emac3_intr_clear(); r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); r16 |= SPD_INTR_EMAC3 | SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); emac3_intr_enable(); emac3_enable(); /* Program the multicast filter, if necessary. */ emac3_setmulti(&sc->emac3, &sc->ethercom); /* Set current media. */ if ((rc = mii_mediachg(&sc->emac3.mii)) == ENXIO) rc = 0; else if (rc != 0) return rc; ifp->if_flags |= IFF_RUNNING; return (0); }
void intc_intr(int ssr, int spc, int ssp) { struct intc_intrhand *ih; int evtcode; u_int16_t r; evtcode = _reg_read_4(CPU_IS_SH3 ? SH7709_INTEVT2 : SH4_INTEVT); ih = EVTCODE_IH(evtcode); KDASSERT(ih->ih_func); /* * On entry, all interrrupts are disabled, * and exception is enabled for P3 access. (kernel stack is P3, * SH3 may or may not cause TLB miss when access stack.) * Enable higher level interrupt here. */ r = _reg_read_2(HD6446X_NIRR); splx(ih->ih_level); if (evtcode == SH_INTEVT_TMU0_TUNI0) { struct clockframe cf; cf.spc = spc; cf.ssr = ssr; cf.ssp = ssp; (*ih->ih_func)(&cf); __dbg_heart_beat(HEART_BEAT_RED); } else if (evtcode == (CPU_IS_SH3 ? SH7709_INTEVT2_IRQ4 : SH_INTEVT_IRL11)) { int cause = r & hd6446x_ienable; struct hd6446x_intrhand *hh = &hd6446x_intrhand[ffs(cause) - 1]; if (cause == 0) { printf("masked HD6446x interrupt.0x%04x\n", r); _reg_write_2(HD6446X_NIRR, 0x0000); return; } /* Enable higher level interrupt*/ hd6446x_intr_resume(hh->hh_ipl); KDASSERT(hh->hh_func != NULL); (*hh->hh_func)(hh->hh_arg); __dbg_heart_beat(HEART_BEAT_GREEN); } else { (*ih->ih_func)(ih->ih_arg); __dbg_heart_beat(HEART_BEAT_BLUE); } }
void sbus_type2_pcmcia_intr_enable() { _reg_write_2(SBUS_PCMCIA_TIMR_REG16, 0); }
void smap_start(struct ifnet *ifp) { struct smap_softc *sc = ifp->if_softc; struct smap_desc *d; struct mbuf *m0, *m; u_int8_t *p, *q; u_int32_t *r; int i, sz, pktsz; u_int16_t fifop; u_int16_t r16; KDASSERT(ifp->if_flags & IFF_RUNNING); FUNC_ENTER(); while (1) { IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) goto end; pktsz = m0->m_pkthdr.len; KDASSERT(pktsz <= ETHER_MAX_LEN - ETHER_CRC_LEN); sz = ROUND4(pktsz); if (sz > sc->tx_buf_freesize || sc->tx_desc_cnt >= SMAP_DESC_MAX || emac3_tx_done() != 0) { ifp->if_flags |= IFF_OACTIVE; goto end; } IFQ_DEQUEUE(&ifp->if_snd, m0); KDASSERT(m0 != NULL); if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0); p = (u_int8_t *)sc->tx_buf; q = p + sz; /* copy to temporary buffer area */ for (m = m0; m != 0; m = m->m_next) { memcpy(p, mtod(m, void *), m->m_len); p += m->m_len; } m_freem(m0); /* zero padding area */ for (; p < q; p++) *p = 0; /* put to FIFO */ fifop = sc->tx_fifo_ptr; KDASSERT((fifop & 3) == 0); _reg_write_2(SMAP_TXFIFO_PTR_REG16, fifop); sc->tx_fifo_ptr = (fifop + sz) & 0xfff; r = sc->tx_buf; for (i = 0; i < sz; i += sizeof(u_int32_t)) *(volatile u_int32_t *)SMAP_TXFIFO_DATA_REG = *r++; _wbflush(); /* put FIFO to EMAC3 */ d = &sc->tx_desc[sc->tx_start_index]; KDASSERT((d->stat & SMAP_TXDESC_READY) == 0); d->sz = pktsz; d->ptr = fifop + SMAP_TXBUF_BASE; d->stat = SMAP_TXDESC_READY | SMAP_TXDESC_GENFCS | SMAP_TXDESC_GENPAD; _wbflush(); sc->tx_buf_freesize -= sz; sc->tx_desc_cnt++; sc->tx_start_index = (sc->tx_start_index + 1) & 0x3f; _reg_write_1(SMAP_TXFIFO_FRAME_INC_REG8, 1); emac3_tx_kick(); r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); if ((r16 & SPD_INTR_TXDNV) == 0) { r16 |= SPD_INTR_TXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); } } end: /* set watchdog timer */ ifp->if_timer = 5; FUNC_EXIT(); }
void smap_rxeof(void *arg) { struct smap_softc *sc = arg; struct smap_desc *d; struct ifnet *ifp = &sc->ethercom.ec_if; struct mbuf *m; u_int16_t r16, stat; u_int32_t *p; int i, j, sz, rxsz, cnt; FUNC_ENTER(); i = sc->rx_done_index; for (cnt = 0;; cnt++, i = (i + 1) & 0x3f) { m = NULL; d = &sc->rx_desc[i]; stat = d->stat; if ((stat & SMAP_RXDESC_EMPTY) != 0) { break; } else if (stat & 0x7fff) { ifp->if_ierrors++; goto next_packet; } sz = d->sz; rxsz = ROUND4(sz); KDASSERT(sz >= ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN); KDASSERT(sz <= ETHER_MAX_LEN); /* load data from FIFO */ _reg_write_2(SMAP_RXFIFO_PTR_REG16, d->ptr & 0x3ffc); p = sc->rx_buf; for (j = 0; j < rxsz; j += sizeof(u_int32_t)) { *p++ = _reg_read_4(SMAP_RXFIFO_DATA_REG); } /* put to mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: unable to allocate Rx mbuf\n", DEVNAME); ifp->if_ierrors++; goto next_packet; } if (sz > (MHLEN - 2)) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: unable to allocate Rx cluster\n", DEVNAME); m_freem(m); m = NULL; ifp->if_ierrors++; goto next_packet; } } m->m_data += 2; /* for alignment */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sz; memcpy(mtod(m, void *), (void *)sc->rx_buf, sz); next_packet: ifp->if_ipackets++; _reg_write_1(SMAP_RXFIFO_FRAME_DEC_REG8, 1); /* free descriptor */ d->sz = 0; d->ptr = 0; d->stat = SMAP_RXDESC_EMPTY; _wbflush(); if (m != NULL) { if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m); (*ifp->if_input)(ifp, m); } } sc->rx_done_index = i; r16 = _reg_read_2(SPD_INTR_ENABLE_REG16); if (((r16 & SPD_INTR_RXDNV) == 0) && cnt > 0) { r16 |= SPD_INTR_RXDNV; _reg_write_2(SPD_INTR_ENABLE_REG16, r16); } FUNC_EXIT(); }
void smap_attach(struct device *parent, struct device *self, void *aux) { struct spd_attach_args *spa = aux; struct smap_softc *sc = (void *)self; struct emac3_softc *emac3 = &sc->emac3; struct ifnet *ifp = &sc->ethercom.ec_if; struct mii_data *mii = &emac3->mii; void *txbuf, *rxbuf; u_int16_t r; #ifdef SMAP_DEBUG __sc = sc; #endif printf(": %s\n", spa->spa_product_name); /* SPD EEPROM */ if (smap_get_eaddr(sc, emac3->eaddr) != 0) return; printf("%s: Ethernet address %s\n", DEVNAME, ether_sprintf(emac3->eaddr)); /* disable interrupts */ r = _reg_read_2(SPD_INTR_ENABLE_REG16); r &= ~(SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV | SPD_INTR_EMAC3); _reg_write_2(SPD_INTR_ENABLE_REG16, r); emac3_intr_disable(); /* clear pending interrupts */ _reg_write_2(SPD_INTR_CLEAR_REG16, SPD_INTR_RXEND | SPD_INTR_TXEND | SPD_INTR_RXDNV); emac3_intr_clear(); /* buffer descriptor mode */ _reg_write_1(SMAP_DESC_MODE_REG8, 0); if (smap_fifo_init(sc) != 0) return; if (emac3_init(&sc->emac3) != 0) return; emac3_intr_disable(); emac3_disable(); smap_desc_init(sc); /* allocate temporary buffer */ txbuf = malloc(ETHER_MAX_LEN - ETHER_CRC_LEN + SMAP_FIFO_ALIGN + 16, M_DEVBUF, M_NOWAIT); if (txbuf == NULL) { printf("%s: no memory.\n", DEVNAME); return; } rxbuf = malloc(ETHER_MAX_LEN + SMAP_FIFO_ALIGN + 16, M_DEVBUF, M_NOWAIT); if (rxbuf == NULL) { printf("%s: no memory.\n", DEVNAME); free(txbuf, M_DEVBUF); return; } sc->tx_buf = (u_int32_t *)ROUND16((vaddr_t)txbuf); sc->rx_buf = (u_int32_t *)ROUND16((vaddr_t)rxbuf); /* * setup MI layer */ strcpy(ifp->if_xname, DEVNAME); ifp->if_softc = sc; ifp->if_start = smap_start; ifp->if_ioctl = smap_ioctl; ifp->if_init = smap_init; ifp->if_stop = smap_stop; ifp->if_watchdog= smap_watchdog; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; IFQ_SET_READY(&ifp->if_snd); /* ifmedia setup. */ mii->mii_ifp = ifp; mii->mii_readreg = emac3_phy_readreg; mii->mii_writereg = emac3_phy_writereg; mii->mii_statchg = emac3_phy_statchg; sc->ethercom.ec_mii = mii; ifmedia_init(&mii->mii_media, 0, ether_mediachange, ether_mediastatus); mii_attach(&emac3->dev, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); /* Choose a default media. */ if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_NONE); } else { ifmedia_set(&mii->mii_media, IFM_ETHER|IFM_AUTO); } if_attach(ifp); ether_ifattach(ifp, emac3->eaddr); spd_intr_establish(SPD_NIC, smap_intr, sc); #if NRND > 0 rnd_attach_source(&sc->rnd_source, DEVNAME, RND_TYPE_NET, RND_FLAG_DEFAULT); #endif }
void sh_clock_init(int flags, struct rtc_ops *rtc) { uint32_t s, t0, cnt_1s; sh_clock.flags = flags; if (rtc != NULL) sh_clock.rtc = *rtc; /* structure copy */ /* Initialize TMU */ _reg_write_2(SH_(TCR0), 0); _reg_write_2(SH_(TCR1), 0); _reg_write_2(SH_(TCR2), 0); /* Reset RTC alarm and interrupt */ _reg_write_1(SH_(RCR1), 0); /* Stop all counter */ _reg_write_1(SH_(TSTR), 0); /* * Estimate CPU clock. */ if (sh_clock.flags & SH_CLOCK_NORTC) { /* Set TMU channel 0 source to PCLOCK / 16 */ _reg_write_2(SH_(TCR0), TCR_TPSC_P16); sh_clock.tmuclk = sh_clock.pclock / 16; } else { /* Set TMU channel 0 source to RTC counter clock (16.384kHz) */ _reg_write_2(SH_(TCR0), CPU_IS_SH3 ? SH3_TCR_TPSC_RTC : SH4_TCR_TPSC_RTC); sh_clock.tmuclk = SH_RTC_CLOCK; /* Make sure RTC oscillator is enabled */ _reg_bset_1(SH_(RCR2), SH_RCR2_ENABLE); } s = _cpu_exception_suspend(); _cpu_spin(1); /* load function on cache. */ TMU_START(0); _cpu_spin(10000000); t0 = TMU_ELAPSED(0); _cpu_exception_resume(s); sh_clock.cpucycle_1us = (sh_clock.tmuclk * 10) / t0; cnt_1s = ((uint64_t)sh_clock.tmuclk * 10000000 * 10 + t0 / 2) / t0; if (CPU_IS_SH4) sh_clock.cpuclock = cnt_1s / 2; /* two-issue */ else sh_clock.cpuclock = cnt_1s; /* * Estimate PCLOCK */ if (sh_clock.pclock == 0) { uint32_t t1; /* set TMU channel 1 source to PCLOCK / 4 */ _reg_write_2(SH_(TCR1), TCR_TPSC_P4); s = _cpu_exception_suspend(); _cpu_spin(1); /* load function on cache. */ TMU_START(0); TMU_START(1); _cpu_spin(cnt_1s); /* 1 sec. */ t0 = TMU_ELAPSED(0); t1 = TMU_ELAPSED(1); _cpu_exception_resume(s); sh_clock.pclock = ((uint64_t)t1 * 4 * SH_RTC_CLOCK + t0 / 2) / t0; } /* Stop all counters */ _reg_write_1(SH_(TSTR), 0); #undef TMU_START #undef TMU_ELAPSED }
void InitializeBsc(void) { /* * Drive RAS,CAS in stand by mode and bus release mode * Area0 = Normal memory, Area5,6=Normal(no burst) * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory * Area4 = Normal Memory * Area6 = Normal memory */ _reg_write_4(SH4_BCR1, BSC_BCR1_VAL); /* * Bus Width * Area4: Bus width = 16bit * Area6,5 = 16bit * Area1 = 8bit * Area2,3: Bus width = 32bit */ _reg_write_2(SH4_BCR2, BSC_BCR2_VAL); #if defined(SH4) && defined(SH7751R) if (cpu_product == CPU_PRODUCT_7751R) { #ifdef BSC_BCR3_VAL _reg_write_2(SH4_BCR3, BSC_BCR3_VAL); #endif #ifdef BSC_BCR4_VAL _reg_write_4(SH4_BCR4, BSC_BCR4_VAL); #endif } #endif /* SH4 && SH7751R */ /* * Idle cycle number in transition area and read to write * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3 * Area1 = 3, Area0 = 3 */ _reg_write_4(SH4_WCR1, BSC_WCR1_VAL); /* * Wait cycle * Area 6 = 6 * Area 5 = 2 * Area 4 = 10 * Area 3 = 3 * Area 2,1 = 3 * Area 0 = 6 */ _reg_write_4(SH4_WCR2, BSC_WCR2_VAL); #ifdef BSC_WCR3_VAL _reg_write_4(SH4_WCR3, BSC_WCR3_VAL); #endif /* * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle, * write pre-charge=1cycle * CAS before RAS refresh RAS assert time = 3 cycle * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON * CAS before RAS refresh ON, EDO DRAM */ _reg_write_4(SH4_MCR, BSC_MCR_VAL); #ifdef BSC_SDMR2_VAL _reg_write_1(BSC_SDMR2_VAL, 0); #endif #ifdef BSC_SDMR3_VAL _reg_write_1(BSC_SDMR3_VAL, 0); #endif /* BSC_SDMR3_VAL */ /* * PCMCIA Control Register * OE/WE assert delay 3.5 cycle * OE/WE negate-address delay 3.5 cycle */ #ifdef BSC_PCR_VAL _reg_write_2(SH4_PCR, BSC_PCR_VAL); #endif /* * Refresh Timer Control/Status Register * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt * Count Limit = 1024 * In following statement, the reason why high byte = 0xa5(a4 in RFCR) * is the rule of SH3 in writing these register. */ _reg_write_2(SH4_RTCSR, BSC_RTCSR_VAL); /* * Refresh Timer Counter * Initialize to 0 */ #ifdef BSC_RTCNT_VAL _reg_write_2(SH4_RTCNT, BSC_RTCNT_VAL); #endif /* set Refresh Time Constant Register */ _reg_write_2(SH4_RTCOR, BSC_RTCOR_VAL); /* init Refresh Count Register */ #ifdef BSC_RFCR_VAL _reg_write_2(SH4_RFCR, BSC_RFCR_VAL); #endif /* * Clock Pulse Generator */ /* Set Clock mode (make internal clock double speed) */ _reg_write_2(SH4_FRQCR, FRQCR_VAL); }
/* * Start the clock interrupt. */ void cpu_initclocks() { if (sh_clock.pclock == 0) panic("No PCLOCK information."); /* Set global variables. */ hz = HZ; tick = 1000000 / hz; /* * Use TMU channel 0 as hard clock */ _reg_bclr_1(SH_(TSTR), TSTR_STR0); if (sh_clock.flags & SH_CLOCK_NORTC) { /* use PCLOCK/16 as TMU0 source */ _reg_write_2(SH_(TCR0), TCR_UNIE | TCR_TPSC_P16); } else { /* use RTC clock as TMU0 source */ _reg_write_2(SH_(TCR0), TCR_UNIE | (CPU_IS_SH3 ? SH3_TCR_TPSC_RTC : SH4_TCR_TPSC_RTC)); } sh_clock.hz_cnt = sh_clock.tmuclk / hz - 1; _reg_write_4(SH_(TCOR0), sh_clock.hz_cnt); _reg_write_4(SH_(TCNT0), sh_clock.hz_cnt); intc_intr_establish(SH_INTEVT_TMU0_TUNI0, IST_LEVEL, IPL_CLOCK, CPU_IS_SH3 ? sh3_clock_intr : sh4_clock_intr, NULL, "clock"); /* start hardclock */ _reg_bset_1(SH_(TSTR), TSTR_STR0); /* * TMU channel 1 is one shot timer for soft interrupts. */ _reg_write_2(SH_(TCR1), TCR_UNIE | TCR_TPSC_P4); _reg_write_4(SH_(TCOR1), 0xffffffff); /* * TMU channel 2 is freerunning counter for timecounter. */ _reg_write_2(SH_(TCR2), TCR_TPSC_P4); _reg_write_4(SH_(TCOR2), 0xffffffff); /* * Start and initialize timecounter. */ _reg_bset_1(SH_(TSTR), TSTR_STR2); sh_clock.tc.tc_get_timecount = sh_timecounter_get; sh_clock.tc.tc_frequency = sh_clock.pclock / 4; sh_clock.tc.tc_name = "tmu_pclock_4"; sh_clock.tc.tc_quality = 100; sh_clock.tc.tc_counter_mask = 0xffffffff; tc_init(&sh_clock.tc); /* Make sure to start RTC */ if (sh_clock.rtc.init != NULL) sh_clock.rtc.init(sh_clock.rtc._cookie); }
void sbus_type3_pcmcia_intr_disable() { _reg_write_2(SBUS_PCMCIA3_TIMR_REG16, 1); }
void sh_cpu_init(int arch, int product) { /* CPU type */ cpu_arch = arch; cpu_product = product; #if defined(SH3) && defined(SH4) /* Set register addresses */ sh_devreg_init(); #endif /* Cache access ops. */ sh_cache_init(); /* MMU access ops. */ sh_mmu_init(); /* Hardclock, RTC initialize. */ machine_clock_init(); /* ICU initiailze. */ curcpu()->ci_idepth = -1; intc_init(); /* Exception vector. */ memcpy(VBR + 0x100, sh_vector_generic, sh_vector_generic_end - sh_vector_generic); #ifdef SH3 if (CPU_IS_SH3) memcpy(VBR + 0x400, sh3_vector_tlbmiss, sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); #endif #ifdef SH4 if (CPU_IS_SH4) memcpy(VBR + 0x400, sh4_vector_tlbmiss, sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); #endif memcpy(VBR + 0x600, sh_vector_interrupt, sh_vector_interrupt_end - sh_vector_interrupt); if (!SH_HAS_UNIFIED_CACHE) sh_icache_sync_all(); __asm volatile("ldc %0, vbr" :: "r"(VBR)); /* kernel stack setup */ __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; /* Set page size (4KB) */ uvm_setpagesize(); /* setup UBC channel A for single-stepping */ #if defined(PTRACE) || defined(DDB) _reg_write_2(SH_(BBRA), 0); /* disable channel A */ _reg_write_2(SH_(BBRB), 0); /* disable channel B */ #ifdef SH3 if (CPU_IS_SH3) { /* A: break after execution, ignore ASID */ _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN | SH3_UBC_CTL_A_MASK_ASID)); /* A: compare all address bits */ _reg_write_4(SH3_BAMRA, 0x00000000); } #endif /* SH3 */ #ifdef SH4 if (CPU_IS_SH4) { /* A: break after execution */ _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN); /* A: compare all address bits, ignore ASID */ _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID); } #endif /* SH4 */ #endif }