void __sh4_itlb_invalidate_all() { _reg_write_4(SH4_ITLB_AA, 0); _reg_write_4(SH4_ITLB_AA | (1 << SH4_ITLB_E_SHIFT), 0); _reg_write_4(SH4_ITLB_AA | (2 << SH4_ITLB_E_SHIFT), 0); _reg_write_4(SH4_ITLB_AA | (3 << SH4_ITLB_E_SHIFT), 0); }
void timer_init() { _reg_write_4(T0_MODE_REG, (T_MODE_EQUF | T_MODE_OVFF)); _reg_write_4(T1_MODE_REG, (T_MODE_EQUF | T_MODE_OVFF)); _reg_write_4(T2_MODE_REG, (T_MODE_EQUF | T_MODE_OVFF)); _reg_write_4(T3_MODE_REG, (T_MODE_EQUF | T_MODE_OVFF)); }
void timer_one_shot(int timer) { KDASSERT(LEGAL_TIMER(timer) && timer != 0); _reg_write_4(T_COUNT_REG(timer), 0); _reg_write_4(T_COMP_REG(timer), 1); _reg_write_4(T_MODE_REG(timer), T_MODE_CUE | T_MODE_CMPE); }
void timer_clock_init() { /* clock interrupt (296.912MHz / 2 / 256) * 5760 = 100Hz */ intc_intr_establish(I_CH9_TIMER0, IPL_CLOCK, timer0_intr, 0); _reg_write_4(T0_COUNT_REG, 0); _reg_write_4(T0_COMP_REG, 5760); _reg_write_4(T0_MODE_REG, T_MODE_CLKS_BUSCLK256 | T_MODE_ZRET | T_MODE_CUE | T_MODE_CMPE); }
void shpcic_conf_write(void *v, pcitag_t tag, int reg, pcireg_t data) { int s; s = splhigh(); _reg_write_4(SH4_PCIPAR, tag | reg); _reg_write_4(SH4_PCIPDR, data); _reg_write_4(SH4_PCIPAR, 0); splx(s); }
pcireg_t shpcic_conf_read(void *v, pcitag_t tag, int reg) { pcireg_t data; int s; s = splhigh(); _reg_write_4(SH4_PCIPAR, tag | reg); data = _reg_read_4(SH4_PCIPDR); _reg_write_4(SH4_PCIPAR, 0); splx(s); return data; }
void sh3_tlb_invalidate_all() { uint32_t aw, a; int e, w; /* Zero clear all TLB entry to avoid unexpected VPN match. */ for (w = 0; w < SH3_MMU_WAY; w++) { aw = (w << SH3_MMU_WAY_SHIFT); for (e = 0; e < SH3_MMU_ENTRY; e++) { a = aw | (e << SH3_MMU_VPN_SHIFT); _reg_write_4(SH3_MMUAA | a, 0); _reg_write_4(SH3_MMUDA | a, 0); } } }
void sh4_tlb_invalidate_addr(int asid, vaddr_t va) { u_int32_t pteh; va &= SH4_PTEH_VPN_MASK; /* Save current ASID */ pteh = _reg_read_4(SH4_PTEH); /* Set ASID for associative write */ _reg_write_4(SH4_PTEH, asid); /* Associative write(UTLB/ITLB). not required ITLB invalidate. */ RUN_P2; _reg_write_4(SH4_UTLB_AA | SH4_UTLB_A, va); /* Clear D, V */ RUN_P1; /* Restore ASID */ _reg_write_4(SH4_PTEH, pteh); }
void sh3_mmu_start() { /* Zero clear all TLB entry */ sh3_tlb_invalidate_all(); /* Set current ASID to 0 */ sh_tlb_set_asid(0); _reg_write_4(SH3_MMUCR, SH3_MMUCR_AT | SH3_MMUCR_TF); }
void sh4_mmu_start() { /* Zero clear all TLB entry */ _reg_write_4(SH4_MMUCR, 0); /* zero wired entry */ sh_tlb_invalidate_all(); /* Set current ASID to 0 */ sh_tlb_set_asid(0); /* * User can't access store queue * make wired entry for u-area. */ _reg_write_4(SH4_MMUCR, SH4_MMUCR_AT | SH4_MMUCR_TI | SH4_MMUCR_SQMD | (SH4_UTLB_ENTRY - UPAGES) << SH4_MMUCR_URB_SHIFT); SH4_MMU_HAZARD; }
int timer2_intr(void *arg) { _reg_write_4(T2_MODE_REG, T_MODE_EQUF | T_MODE_OVFF); #ifdef __HAVE_FAST_SOFTINTS softintr_dispatch(1); /* IPL_SOFTCLOCK */ #endif return (1); }
/* * Jump to reset vector. */ void cpu_reset() { _cpu_exception_suspend(); _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL); #ifndef __lint__ goto *(void *)0xa0000000; #endif /* NOTREACHED */ }
void machine_reset(void) { _cpu_exception_suspend(); _reg_write_4(SH_(EXPEVT), EXPEVT_RESET_MANUAL); (void)*(volatile uint32_t *)0x80000001; /* CPU shutdown */ /*NOTREACHED*/ for (;;) { continue; } }
/* * interrupt handler for clock interrupt (100Hz) */ int timer0_intr(void *arg) { _reg_write_4(T0_MODE_REG, _reg_read_4(T0_MODE_REG) | T_MODE_EQUF); _playstation2_evcnt.clock.ev_count++; hardclock(&playstation2_clockframe); return (1); }
int timer3_intr(void *arg) { _reg_write_4(T3_MODE_REG, T_MODE_EQUF | T_MODE_OVFF); #ifdef __HAVE_FAST_SOFTINTS softintr_dispatch(3); /* IPL_SOFTSERIAL */ softintr_dispatch(2); /* IPL_SOFTNET */ #endif return (1); }
void sh3_tlb_update(int asid, vaddr_t va, uint32_t pte) { uint32_t oasid; KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0); /* Save old ASID */ oasid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK; /* Invalidate old entry (if any) */ sh3_tlb_invalidate_addr(asid, va); /* Load new entry */ _reg_write_4(SH3_PTEH, (va & ~PGOFSET) | asid); _reg_write_4(SH3_PTEL, pte & PG_HW_BITS); __asm volatile("ldtlb"); /* Restore old ASID */ if (asid != oasid) _reg_write_4(SH3_PTEH, oasid); }
void sh4_tlb_update(int asid, vaddr_t va, u_int32_t pte) { u_int32_t oasid; u_int32_t ptel; KDASSERT(asid < 0x100 && (pte & ~PGOFSET) != 0 && va != 0); /* Save old ASID */ oasid = _reg_read_4(SH4_PTEH) & SH4_PTEH_ASID_MASK; /* Invalidate old entry (if any) */ sh4_tlb_invalidate_addr(asid, va); _reg_write_4(SH4_PTEH, asid); /* Load new entry */ _reg_write_4(SH4_PTEH, (va & ~PGOFSET) | asid); ptel = pte & PG_HW_BITS; if (pte & _PG_PCMCIA) { _reg_write_4(SH4_PTEA, (pte >> _PG_PCMCIA_SHIFT) & SH4_PTEA_SA_MASK); } else {
int sbus_intr(void *arg) { u_int32_t stat; _playstation2_evcnt.sbus.ev_count++; stat = _reg_read_4(SBUS_SMFLG_REG); if (stat & SMFLG_PCMCIA_INT) { (*sbus_pcmcia_intr_clear)(); _reg_write_4(SBUS_SMFLG_REG, SMFLG_PCMCIA_INT); (*sbus_pcmcia_intr)(sbus_pcmcia_context); } if (stat & SMFLG_USB_INT) { _reg_write_4(SBUS_SMFLG_REG, SMFLG_USB_INT); (*sbus_usb_intr)(sbus_usb_context); } (*sbus_pcmcia_intr_reinstall)(); return (1); }
void sbus_init(int type) { /* install model dependent hook */ #define SET_PCMCIA_INTR_OPS(x) \ sbus_pcmcia_intr_clear = sbus_type##x##_pcmcia_intr_clear; \ sbus_pcmcia_intr_enable = sbus_type##x##_pcmcia_intr_enable; \ sbus_pcmcia_intr_disable = sbus_type##x##_pcmcia_intr_disable; \ sbus_pcmcia_intr_reinstall = sbus_type##x##_pcmcia_intr_reinstall switch (type) { default: panic("unknown pcmcia controller type = %d", type); break; case 0: /* FALLTHROUGH */ case 1: /* FALLTHROUGH */ case 2: SET_PCMCIA_INTR_OPS(2); break; case 3: SET_PCMCIA_INTR_OPS(3); break; } #undef SET_PCMCIA_INTR_OPS /* disable interrupt */ (*sbus_pcmcia_intr_disable)(); /* clear interrupt */ (*sbus_pcmcia_intr_clear)(); _reg_write_4(SBUS_SMFLG_REG, SMFLG_PCMCIA_INT); _reg_write_4(SBUS_SMFLG_REG, SMFLG_USB_INT); /* connect to INTC */ intc_intr_establish(I_CH1_SBUS, IPL_BIO, sbus_intr, 0); }
void sh4_tlb_invalidate_asid(int asid) { u_int32_t a; int e; /* Invalidate entry attribute to ASID */ RUN_P2; for (e = 0; e < SH4_UTLB_ENTRY; e++) { a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT); if ((_reg_read_4(a) & SH4_UTLB_AA_ASID_MASK) == asid) _reg_write_4(a, 0); } __sh4_itlb_invalidate_all(); RUN_P1; }
// // Get physical address from memory mapped TLB. // SH3 version. SH4 can't do this method. because address/data array must be // accessed from P2. // paddr_t MemoryManager_SHMMU::searchPage(vaddr_t vaddr) { u_int32_t vpn, idx, s, dum, aae, dae, entry_idx, asid; paddr_t paddr = ~0; int way, kmode; vpn = vaddr & SH3_PAGE_MASK; // Windows CE uses VPN-only index-mode. idx = vaddr & SH3_MMU_VPN_MASK; kmode = SetKMode(1); // Get current ASID asid = _reg_read_4(SH3_PTEH) & SH3_PTEH_ASID_MASK; // to avoid another TLB access, disable external interrupt. s = suspendIntr(); do { // load target address page to TLB dum = _reg_read_4(vaddr); _reg_write_4(vaddr, dum); for (way = 0; way < SH3_MMU_WAY; way++) { entry_idx = idx | (way << SH3_MMU_WAY_SHIFT); // inquire MMU address array. aae = _reg_read_4(SH3_MMUAA | entry_idx); if (!(aae & SH3_MMU_D_VALID) || ((aae & SH3_MMUAA_D_ASID_MASK) != asid) || (((aae | idx) & SH3_PAGE_MASK) != vpn)) continue; // entry found. // inquire MMU data array to get its physical address. dae = _reg_read_4(SH3_MMUDA | entry_idx); paddr = (dae & SH3_PAGE_MASK) | (vaddr & ~SH3_PAGE_MASK); break; } } while (paddr == ~0); resumeIntr(s); SetKMode(kmode); return paddr; }
void sh3_tlb_invalidate_asid(int asid) { uint32_t aw, a; int e, w; /* Invalidate entry attribute to ASID */ for (w = 0; w < SH3_MMU_WAY; w++) { aw = (w << SH3_MMU_WAY_SHIFT); for (e = 0; e < SH3_MMU_ENTRY; e++) { a = aw | (e << SH3_MMU_VPN_SHIFT); if ((_reg_read_4(SH3_MMUAA | a) & SH3_MMUAA_D_ASID_MASK) == asid) { _reg_write_4(SH3_MMUAA | a, 0); } } } }
void sh4_tlb_invalidate_all() { u_int32_t a; int e, eend; /* If non-wired entry limit is zero, clear all entry. */ a = _reg_read_4(SH4_MMUCR) & SH4_MMUCR_URB_MASK; eend = a ? (a >> SH4_MMUCR_URB_SHIFT) : SH4_UTLB_ENTRY; RUN_P2; for (e = 0; e < eend; e++) { a = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT); _reg_write_4(a, 0); } __sh4_itlb_invalidate_all(); RUN_P1; }
void sh3_tlb_invalidate_addr(int asid, vaddr_t va) { uint32_t a, d; int w; d = (va & SH3_MMUAA_D_VPN_MASK_4K) | asid; /* 4K page */ va = va & SH3_MMU_VPN_MASK; /* [16:12] entry index */ /* Probe entry and invalidate it. */ for (w = 0; w < SH3_MMU_WAY; w++) { a = va | (w << SH3_MMU_WAY_SHIFT); /* way [9:8] */ if ((_reg_read_4(SH3_MMUAA | a) & (SH3_MMUAA_D_VPN_MASK_4K | SH3_MMUAA_D_ASID_MASK)) == d) { _reg_write_4(SH3_MMUAA | a, 0); break; } } }
/* * Prepare context switch from oproc to nproc. * This code is used by cpu_switchto. */ void cpu_switch_prepare(struct proc *oproc, struct proc *nproc) { nproc->p_stat = SONPROC; if (oproc && (oproc->p_md.md_flags & MDP_STEP)) _reg_write_2(SH_(BBRB), 0); curpcb = nproc->p_md.md_pcb; pmap_activate(nproc); if (nproc->p_md.md_flags & MDP_STEP) { int pm_asid = nproc->p_vmspace->vm_map.pmap->pm_asid; _reg_write_2(SH_(BBRB), 0); _reg_write_4(SH_(BARB), nproc->p_md.md_regs->tf_spc); _reg_write_1(SH_(BASRB), pm_asid); _reg_write_1(SH_(BAMRB), 0); _reg_write_2(SH_(BRCR), 0x0040); _reg_write_2(SH_(BBRB), 0x0014); } curproc = nproc; }
/* * Start the clock interrupt. */ void cpu_initclocks() { if (sh_clock.pclock == 0) panic("No PCLOCK information."); /* Set global variables. */ hz = HZ; tick = 1000000 / hz; /* * Use TMU channel 0 as hard clock */ _reg_bclr_1(SH_(TSTR), TSTR_STR0); if (sh_clock.flags & SH_CLOCK_NORTC) { /* use PCLOCK/16 as TMU0 source */ _reg_write_2(SH_(TCR0), TCR_UNIE | TCR_TPSC_P16); } else { /* use RTC clock as TMU0 source */ _reg_write_2(SH_(TCR0), TCR_UNIE | (CPU_IS_SH3 ? SH3_TCR_TPSC_RTC : SH4_TCR_TPSC_RTC)); } sh_clock.hz_cnt = sh_clock.tmuclk / hz - 1; _reg_write_4(SH_(TCOR0), sh_clock.hz_cnt); _reg_write_4(SH_(TCNT0), sh_clock.hz_cnt); intc_intr_establish(SH_INTEVT_TMU0_TUNI0, IST_LEVEL, IPL_CLOCK, CPU_IS_SH3 ? sh3_clock_intr : sh4_clock_intr, NULL, "clock"); /* start hardclock */ _reg_bset_1(SH_(TSTR), TSTR_STR0); /* * TMU channel 1 is one shot timer for soft interrupts. */ _reg_write_2(SH_(TCR1), TCR_UNIE | TCR_TPSC_P4); _reg_write_4(SH_(TCOR1), 0xffffffff); /* * TMU channel 2 is freerunning counter for timecounter. */ _reg_write_2(SH_(TCR2), TCR_TPSC_P4); _reg_write_4(SH_(TCOR2), 0xffffffff); /* * Start and initialize timecounter. */ _reg_bset_1(SH_(TSTR), TSTR_STR2); sh_clock.tc.tc_get_timecount = sh_timecounter_get; sh_clock.tc.tc_frequency = sh_clock.pclock / 4; sh_clock.tc.tc_name = "tmu_pclock_4"; sh_clock.tc.tc_quality = 100; sh_clock.tc.tc_counter_mask = 0xffffffff; tc_init(&sh_clock.tc); /* Make sure to start RTC */ if (sh_clock.rtc.init != NULL) sh_clock.rtc.init(sh_clock.rtc._cookie); }
static void shpcic_attach(device_t parent, device_t self, void *aux) { struct pcibus_attach_args pba; #ifdef PCI_NETBSD_CONFIGURE struct extent *ioext, *memext; #endif pcireg_t id, class; char devinfo[256]; shpcic_found = 1; aprint_naive("\n"); id = _reg_read_4(SH4_PCICONF0); class = _reg_read_4(SH4_PCICONF2); pci_devinfo(id, class, 1, devinfo, sizeof(devinfo)); aprint_normal(": %s\n", devinfo); /* allow PCIC request */ _reg_write_4(SH4_BCR1, _reg_read_4(SH4_BCR1) | BCR1_BREQEN); /* Initialize PCIC */ _reg_write_4(SH4_PCICR, PCICR_BASE | PCICR_RSTCTL); delay(10 * 1000); _reg_write_4(SH4_PCICR, PCICR_BASE); /* Class: Host-Bridge */ _reg_write_4(SH4_PCICONF2, PCI_CLASS_CODE(PCI_CLASS_BRIDGE, PCI_SUBCLASS_BRIDGE_HOST, 0x00)); #if !defined(DONT_INIT_PCIBSC) #if defined(PCIBCR_BCR1_VAL) _reg_write_4(SH4_PCIBCR1, PCIBCR_BCR1_VAL); #else _reg_write_4(SH4_PCIBCR1, _reg_read_4(SH4_BCR1) | BCR1_MASTER); #endif #if defined(PCIBCR_BCR2_VAL) _reg_write_4(SH4_PCIBCR2, PCIBCR_BCR2_VAL); #else _reg_write_4(SH4_PCIBCR2, _reg_read_2(SH4_BCR2)); #endif #if defined(SH4) && defined(SH7751R) if (cpu_product == CPU_PRODUCT_7751R) { #if defined(PCIBCR_BCR3_VAL) _reg_write_4(SH4_PCIBCR3, PCIBCR_BCR3_VAL); #else _reg_write_4(SH4_PCIBCR3, _reg_read_2(SH4_BCR3)); #endif } #endif /* SH4 && SH7751R && PCIBCR_BCR3_VAL */ #if defined(PCIBCR_WCR1_VAL) _reg_write_4(SH4_PCIWCR1, PCIBCR_WCR1_VAL); #else _reg_write_4(SH4_PCIWCR1, _reg_read_4(SH4_WCR1)); #endif #if defined(PCIBCR_WCR2_VAL) _reg_write_4(SH4_PCIWCR2, PCIBCR_WCR2_VAL); #else _reg_write_4(SH4_PCIWCR2, _reg_read_4(SH4_WCR2)); #endif #if defined(PCIBCR_WCR3_VAL) _reg_write_4(SH4_PCIWCR3, PCIBCR_WCR3_VAL); #else _reg_write_4(SH4_PCIWCR3, _reg_read_4(SH4_WCR3)); #endif #if defined(PCIBCR_MCR_VAL) _reg_write_4(SH4_PCIMCR, PCIBCR_MCR_VAL); #else _reg_write_4(SH4_PCIMCR, _reg_read_4(SH4_MCR)); #endif #endif /* !DONT_INIT_PCIBSC */ /* set PCI I/O, memory base address */ _reg_write_4(SH4_PCIIOBR, SH4_PCIC_IO); _reg_write_4(SH4_PCIMBR, SH4_PCIC_MEM); /* set PCI local address 0 */ _reg_write_4(SH4_PCILSR0, (64 - 1) << 20); _reg_write_4(SH4_PCILAR0, 0xac000000); _reg_write_4(SH4_PCICONF5, 0xac000000); /* set PCI local address 1 */ _reg_write_4(SH4_PCILSR1, (64 - 1) << 20); _reg_write_4(SH4_PCILAR1, 0xac000000); _reg_write_4(SH4_PCICONF6, 0x8c000000); /* Enable I/O, memory, bus-master */ _reg_write_4(SH4_PCICONF1, PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_STEPPING_ENABLE | PCI_STATUS_DEVSEL_MEDIUM); /* Initialize done. */ _reg_write_4(SH4_PCICR, PCICR_BASE | PCICR_CFINIT); /* set PCI controller interrupt priority */ intpri_intr_priority(SH4_INTEVT_PCIERR, shpcic_intr_priority[0]); intpri_intr_priority(SH4_INTEVT_PCISERR, shpcic_intr_priority[1]); /* PCI bus */ #ifdef PCI_NETBSD_CONFIGURE ioext = extent_create("pciio", SH4_PCIC_IO, SH4_PCIC_IO + SH4_PCIC_IO_SIZE - 1, M_DEVBUF, NULL, 0, EX_NOWAIT); memext = extent_create("pcimem", SH4_PCIC_MEM, SH4_PCIC_MEM + SH4_PCIC_MEM_SIZE - 1, M_DEVBUF, NULL, 0, EX_NOWAIT); pci_configure_bus(NULL, ioext, memext, NULL, 0, sh_cache_line_size); extent_destroy(ioext); extent_destroy(memext); #endif /* PCI bus */ memset(&pba, 0, sizeof(pba)); pba.pba_iot = shpcic_get_bus_io_tag(); pba.pba_memt = shpcic_get_bus_mem_tag(); pba.pba_dmat = shpcic_get_bus_dma_tag(); pba.pba_dmat64 = NULL; pba.pba_pc = NULL; pba.pba_bus = 0; pba.pba_bridgetag = NULL; pba.pba_flags = PCI_FLAGS_IO_ENABLED | PCI_FLAGS_MEM_ENABLED; config_found(self, &pba, NULL); }
void InitializeBsc(void) { /* * Drive RAS,CAS in stand by mode and bus release mode * Area0 = Normal memory, Area5,6=Normal(no burst) * Area2 = Normal memory, Area3 = SDRAM, Area5 = Normal memory * Area4 = Normal Memory * Area6 = Normal memory */ _reg_write_4(SH4_BCR1, BSC_BCR1_VAL); /* * Bus Width * Area4: Bus width = 16bit * Area6,5 = 16bit * Area1 = 8bit * Area2,3: Bus width = 32bit */ _reg_write_2(SH4_BCR2, BSC_BCR2_VAL); #if defined(SH4) && defined(SH7751R) if (cpu_product == CPU_PRODUCT_7751R) { #ifdef BSC_BCR3_VAL _reg_write_2(SH4_BCR3, BSC_BCR3_VAL); #endif #ifdef BSC_BCR4_VAL _reg_write_4(SH4_BCR4, BSC_BCR4_VAL); #endif } #endif /* SH4 && SH7751R */ /* * Idle cycle number in transition area and read to write * Area6 = 3, Area5 = 3, Area4 = 3, Area3 = 3, Area2 = 3 * Area1 = 3, Area0 = 3 */ _reg_write_4(SH4_WCR1, BSC_WCR1_VAL); /* * Wait cycle * Area 6 = 6 * Area 5 = 2 * Area 4 = 10 * Area 3 = 3 * Area 2,1 = 3 * Area 0 = 6 */ _reg_write_4(SH4_WCR2, BSC_WCR2_VAL); #ifdef BSC_WCR3_VAL _reg_write_4(SH4_WCR3, BSC_WCR3_VAL); #endif /* * RAS pre-charge = 2cycle, RAS-CAS delay = 3 cycle, * write pre-charge=1cycle * CAS before RAS refresh RAS assert time = 3 cycle * Disable burst, Bus size=32bit, Column Address=10bit, Refresh ON * CAS before RAS refresh ON, EDO DRAM */ _reg_write_4(SH4_MCR, BSC_MCR_VAL); #ifdef BSC_SDMR2_VAL _reg_write_1(BSC_SDMR2_VAL, 0); #endif #ifdef BSC_SDMR3_VAL _reg_write_1(BSC_SDMR3_VAL, 0); #endif /* BSC_SDMR3_VAL */ /* * PCMCIA Control Register * OE/WE assert delay 3.5 cycle * OE/WE negate-address delay 3.5 cycle */ #ifdef BSC_PCR_VAL _reg_write_2(SH4_PCR, BSC_PCR_VAL); #endif /* * Refresh Timer Control/Status Register * Disable interrupt by CMF, closk 1/16, Disable OVF interrupt * Count Limit = 1024 * In following statement, the reason why high byte = 0xa5(a4 in RFCR) * is the rule of SH3 in writing these register. */ _reg_write_2(SH4_RTCSR, BSC_RTCSR_VAL); /* * Refresh Timer Counter * Initialize to 0 */ #ifdef BSC_RTCNT_VAL _reg_write_2(SH4_RTCNT, BSC_RTCNT_VAL); #endif /* set Refresh Time Constant Register */ _reg_write_2(SH4_RTCOR, BSC_RTCOR_VAL); /* init Refresh Count Register */ #ifdef BSC_RFCR_VAL _reg_write_2(SH4_RFCR, BSC_RFCR_VAL); #endif /* * Clock Pulse Generator */ /* Set Clock mode (make internal clock double speed) */ _reg_write_2(SH4_FRQCR, FRQCR_VAL); }
void sh_tlb_set_asid(int asid) { _reg_write_4(SH_(PTEH), asid); }
void sh_cpu_init(int arch, int product) { /* CPU type */ cpu_arch = arch; cpu_product = product; #if defined(SH3) && defined(SH4) /* Set register addresses */ sh_devreg_init(); #endif /* Cache access ops. */ sh_cache_init(); /* MMU access ops. */ sh_mmu_init(); /* Hardclock, RTC initialize. */ machine_clock_init(); /* ICU initiailze. */ curcpu()->ci_idepth = -1; intc_init(); /* Exception vector. */ memcpy(VBR + 0x100, sh_vector_generic, sh_vector_generic_end - sh_vector_generic); #ifdef SH3 if (CPU_IS_SH3) memcpy(VBR + 0x400, sh3_vector_tlbmiss, sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); #endif #ifdef SH4 if (CPU_IS_SH4) memcpy(VBR + 0x400, sh4_vector_tlbmiss, sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); #endif memcpy(VBR + 0x600, sh_vector_interrupt, sh_vector_interrupt_end - sh_vector_interrupt); if (!SH_HAS_UNIFIED_CACHE) sh_icache_sync_all(); __asm volatile("ldc %0, vbr" :: "r"(VBR)); /* kernel stack setup */ __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; /* Set page size (4KB) */ uvm_setpagesize(); /* setup UBC channel A for single-stepping */ #if defined(PTRACE) || defined(DDB) _reg_write_2(SH_(BBRA), 0); /* disable channel A */ _reg_write_2(SH_(BBRB), 0); /* disable channel B */ #ifdef SH3 if (CPU_IS_SH3) { /* A: break after execution, ignore ASID */ _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN | SH3_UBC_CTL_A_MASK_ASID)); /* A: compare all address bits */ _reg_write_4(SH3_BAMRA, 0x00000000); } #endif /* SH3 */ #ifdef SH4 if (CPU_IS_SH4) { /* A: break after execution */ _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN); /* A: compare all address bits, ignore ASID */ _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID); } #endif /* SH4 */ #endif }