void sh_cpu_init(int arch, int product) { /* CPU type */ cpu_arch = arch; cpu_product = product; #if defined(SH3) && defined(SH4) /* Set register addresses */ sh_devreg_init(); #endif /* Cache access ops. */ sh_cache_init(); /* MMU access ops. */ sh_mmu_init(); /* Hardclock, RTC initialize. */ machine_clock_init(); /* ICU initiailze. */ curcpu()->ci_idepth = -1; intc_init(); /* Exception vector. */ memcpy(VBR + 0x100, sh_vector_generic, sh_vector_generic_end - sh_vector_generic); #ifdef SH3 if (CPU_IS_SH3) memcpy(VBR + 0x400, sh3_vector_tlbmiss, sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); #endif #ifdef SH4 if (CPU_IS_SH4) memcpy(VBR + 0x400, sh4_vector_tlbmiss, sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); #endif memcpy(VBR + 0x600, sh_vector_interrupt, sh_vector_interrupt_end - sh_vector_interrupt); if (!SH_HAS_UNIFIED_CACHE) sh_icache_sync_all(); __asm volatile("ldc %0, vbr" :: "r"(VBR)); /* kernel stack setup */ __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; /* Set page size (4KB) */ uvm_setpagesize(); }
int interrupts_init (void) { int i; /* initialize irq list */ for (i = 0; i < CONFIG_SYS_INTC_0_NUM; i++) { vecs[i].handler = (interrupt_handler_t *) def_hdlr; vecs[i].arg = (void *)i; vecs[i].count = 0; } /* initialize intc controller */ intc_init (); enable_interrupts (); return 0; }
// 中断相关初始化 void system_initexception( void) { // 设置中断向量表 pExceptionUNDEF = (unsigned long)exceptionundef; pExceptionSWI = (unsigned long)exceptionswi; pExceptionPABORT = (unsigned long)exceptionpabort; pExceptionDABORT = (unsigned long)exceptiondabort; pExceptionIRQ = (unsigned long)OS_CPU_IRQ_ISR; pExceptionFIQ = (unsigned long)OS_CPU_IRQ_ISR; // 初始化中断控制器 intc_init(); }
void interrupt_init_bootstrap() { int i; /* initialize interrupt mask (masked all) */ for (i = 0; i < _IPL_N; i++) __icu_mask[i] = 0xffffffff; /* intialize EE embeded device */ timer_init(); /* clear all pending interrupt and disable all */ intc_init(); /* INT0 */ dmac_init(); /* INT1 */ }
int __cpuinit arch_host_irq_init(void) { int rc; physical_addr_t intc_pa; struct vmm_devtree_node *node; node = vmm_devtree_find_compatible(NULL, NULL, "ti,omap2-intc"); if (!node) { return VMM_ENODEV; } rc = vmm_devtree_regaddr(node, &intc_pa, 0); if (rc) { return rc; } return intc_init(intc_pa, OMAP3_MPU_INTC_NRIRQ); }
int interrupts_init (void) { int i; /* initialize irq list */ for (i = 0; i < CFG_INTC_0_NUM; i++) { vecs[i].handler = (interrupt_handler_t *) def_hdlr; vecs[i].arg = (void *)i; vecs[i].count = 0; } /* initialize intc controller */ intc_init (); #ifdef CFG_TIMER_0 timer_init (); #endif #ifdef CFG_FSL_2 fsl_init2 (); #endif enable_interrupts (); return 0; }
int main(void) { struct task *next; /* Set the CPU speed */ uint32_t skuid = read32(DEVICEID_BASE + DEVICEID_SKUID_OFFSET); uint32_t cpuspeed_id = skuid & DEVICEID_SKUID_CPUSPEED_MASK; uint32_t clksel_val = (1<<19) | 12; if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_720) clksel_val |= (720 << 8); else if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_600) clksel_val |= (600 << 8); else panic("Unsupported CPU!"); write32(CM_MPU_BASE + PRM_CLKSEL1_PLL_MPU_OFFSET, clksel_val); /* Basic hardware initialization */ init_cpumodes(); // set up CPU modes for interrupt handling intc_init(); // initialize interrupt controller gpio_init(); // initialize gpio interrupt system /* Start up hardware */ timers_init(); // must come first, since it initializes the watchdog eth_init(); uart_init(); /* For some reason, turning on the caches causes the kernel to hang after finishing the third invocation. Maybe we have to clear the caches here, or enable the MMU. */ printk("mmu init\n"); prep_pagetable(); init_mmu(); printk("cache init\n"); init_cache(); /* Initialize other interrupts */ init_interrupts(); /* Initialize task queues */ init_tasks(); /* Initialize idle task */ syscall_spawn(NULL, 7, idle_task, NULL, 0, SPAWN_DAEMON); pmu_enable(); trace_init(); printk("userspace init\n"); /* Initialize first user program */ syscall_spawn(NULL, 6, init_task, NULL, 0, 0); while (nondaemon_count > 0) { next = schedule(); task_activate(next); check_stack(next); } pmu_disable(); intc_reset(); eth_deinit(); deinit_mmu(); return 0; }
void sh_cpu_init(int arch, int product) { /* CPU type */ cpu_arch = arch; cpu_product = product; #if defined(SH3) && defined(SH4) /* Set register addresses */ sh_devreg_init(); #endif /* Cache access ops. */ sh_cache_init(); /* MMU access ops. */ sh_mmu_init(); /* Hardclock, RTC initialize. */ machine_clock_init(); /* ICU initiailze. */ curcpu()->ci_idepth = -1; intc_init(); /* Exception vector. */ memcpy(VBR + 0x100, sh_vector_generic, sh_vector_generic_end - sh_vector_generic); #ifdef SH3 if (CPU_IS_SH3) memcpy(VBR + 0x400, sh3_vector_tlbmiss, sh3_vector_tlbmiss_end - sh3_vector_tlbmiss); #endif #ifdef SH4 if (CPU_IS_SH4) memcpy(VBR + 0x400, sh4_vector_tlbmiss, sh4_vector_tlbmiss_end - sh4_vector_tlbmiss); #endif memcpy(VBR + 0x600, sh_vector_interrupt, sh_vector_interrupt_end - sh_vector_interrupt); if (!SH_HAS_UNIFIED_CACHE) sh_icache_sync_all(); __asm volatile("ldc %0, vbr" :: "r"(VBR)); /* kernel stack setup */ __sh_switch_resume = CPU_IS_SH3 ? sh3_switch_resume : sh4_switch_resume; /* Set page size (4KB) */ uvm_setpagesize(); /* setup UBC channel A for single-stepping */ #if defined(PTRACE) || defined(DDB) _reg_write_2(SH_(BBRA), 0); /* disable channel A */ _reg_write_2(SH_(BBRB), 0); /* disable channel B */ #ifdef SH3 if (CPU_IS_SH3) { /* A: break after execution, ignore ASID */ _reg_write_4(SH3_BRCR, (UBC_CTL_A_AFTER_INSN | SH3_UBC_CTL_A_MASK_ASID)); /* A: compare all address bits */ _reg_write_4(SH3_BAMRA, 0x00000000); } #endif /* SH3 */ #ifdef SH4 if (CPU_IS_SH4) { /* A: break after execution */ _reg_write_2(SH4_BRCR, UBC_CTL_A_AFTER_INSN); /* A: compare all address bits, ignore ASID */ _reg_write_1(SH4_BAMRA, SH4_UBC_MASK_NONE | SH4_UBC_MASK_ASID); } #endif /* SH4 */ #endif }
void board_init(void) { intc_init(); switch_init(); uart_init(); }