static void extra_e500_restore(struct cpu_extra_state *state) { set_spr(PPCBKEM_SPR_MAS0, state->u.ppcbkem.spr.mas[0]); set_spr(PPCBKEM_SPR_MAS1, state->u.ppcbkem.spr.mas[1]); set_spr(PPCBKEM_SPR_MAS2, state->u.ppcbkem.spr.mas[2]); set_spr(PPCBKEM_SPR_MAS3, state->u.ppcbkem.spr.mas[3]); }
void cpu_restore_extra(struct cpu_extra_state *state) { if(mmu_needs_restoring) { switch(SYSPAGE_CPU_ENTRY(ppc, kerinfo)->ppc_family) { case PPC_FAMILY_600: set_spr(PPC_SPR_DBAT3L, state->u.ppc600.dbat[3].lo); set_spr(PPC_SPR_DBAT3U, state->u.ppc600.dbat[3].up); break; } } extra_chip_restore(state); mmu_needs_restoring = 0; }
void MM_TlbSetup( const struct TlbEntry *tblTable ) { int32_t i = 0; /* Setup the TLBs */ while( tblTable[i].entry != (-1UL) ) { set_spr(SPR_MAS0, tblTable[i].mas0); set_spr(SPR_MAS1, tblTable[i].mas1); set_spr(SPR_MAS2, tblTable[i].mas2); set_spr(SPR_MAS3, tblTable[i].mas3); msync(); isync(); tlbwe(); i++; } }
static void set_l1csr0(uint32_t value) { sync(); set_spr(1010,value); sync(); }
static void asm_write_tlb(uint32_t mas0, uint32_t mas1, uint32_t mas2, uint32_t mas3) { set_spr(624,mas0); set_spr(625,mas1); set_spr(626,mas2); set_spr(627,mas3); #if defined __GNUC__ __asm__ __volatile__(".long 0x7C0007A4"); #elif defined __DCC__ __asm(".long 0x7C0007A4"); #elif defined __ghs__ __asm(" tlbwe"); #else #error Unsupported compiler #endif }
static void rdecl restore_perfregs_7450(PPC_PERFREGS *pcr) { set_spr( PPC7450_SPR_PMC1, pcr->mpc7450.pmc[0] ); set_spr( PPC7450_SPR_PMC2, pcr->mpc7450.pmc[1] ); set_spr( PPC7450_SPR_PMC3, pcr->mpc7450.pmc[2] ); set_spr( PPC7450_SPR_PMC4, pcr->mpc7450.pmc[3] ); set_spr( PPC7450_SPR_PMC5, pcr->mpc7450.pmc[4] ); set_spr( PPC7450_SPR_PMC6, pcr->mpc7450.pmc[5] ); set_spr( PPC7450_SPR_MMCR1, pcr->mpc7450.mmc[1] ); set_spr( PPC7450_SPR_MMCR2, pcr->mpc7450.mmc[2] ); set_spr( PPC7450_SPR_SIA, pcr->mpc7450.sia ); /* MMC0 contains the FREEZE bits. This register should be restored last so that counting resumes only after all other registers have been restored. */ set_spr( PPC7450_SPR_MMCR0, pcr->mpc7450.mmc[0] ); }
static void rdecl save_perfregs_7450(PPC_PERFREGS *pcr) { pcr->mpc7450.mmc[0] = get_spr( PPC7450_SPR_MMCR0 ); /* Disable counting so that a PMI isn't generated in the kernel code. */ set_spr( PPC7450_SPR_MMCR0, 0x80000000); pcr->mpc7450.mmc[1] = get_spr( PPC7450_SPR_MMCR1 ); pcr->mpc7450.mmc[2] = get_spr( PPC7450_SPR_MMCR2 ); pcr->mpc7450.pmc[0] = get_spr( PPC7450_SPR_PMC1 ); pcr->mpc7450.pmc[1] = get_spr( PPC7450_SPR_PMC2 ); pcr->mpc7450.pmc[2] = get_spr( PPC7450_SPR_PMC3 ); pcr->mpc7450.pmc[3] = get_spr( PPC7450_SPR_PMC4 ); pcr->mpc7450.pmc[4] = get_spr( PPC7450_SPR_PMC5 ); pcr->mpc7450.pmc[5] = get_spr( PPC7450_SPR_PMC6 ); pcr->mpc7450.sia = get_spr( PPC7450_SPR_SIA ); }
void fam_pte_init(int phase) { // mmu 800 initialization. assuming int off, mmu off, called once only // MI_CTR & MD_CTR set_spr(PPC800_SPR_MI_CTR, 0/*PPC800_MICTR_CIDEF*/); set_spr(PPC800_SPR_MD_CTR, /*PPC800_MDCTR_WTDEF |*/ PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); // PID: M_CASID -- kernel // set_spr(PPC800_SPR_M_CASID, 1); set_spr(PPC800_SPR_M_CASID, 0); // ZONE: MI_AP & MD_AP set_spr(PPC800_SPR_MI_AP, 0x55555555);//PPC800_AP_PPCMODE_PGPERM(15) | PPC800_AP_PPCMODE_PGPERM(0)); set_spr(PPC800_SPR_MD_AP, 0x55555555);//PPC800_AP_PPCMODE_PGPERM(0) |PPC800_AP_PPCMODE_PGPERM(15)); fam_pte_flush_all(); // set up cache control registers set_spr(PPC800_SPR_IC_CST, 0x0a000000); set_spr(PPC800_SPR_IC_CST, 0x0c000000); set_spr(PPC800_SPR_IC_CST, 0x02000000); ppc_isync(); set_spr(PPC800_SPR_DC_CST, 0x0a000000); set_spr(PPC800_SPR_DC_CST, 0x0c000000); set_spr(PPC800_SPR_DC_CST, 0x02000000); ppc_isync(); #if 1 // map in reserved tlb entries { unsigned twc; unsigned rpn; unsigned epn; // add first 8M mapping for both ker and proc to both itlb and dtlb (31) set_spr(PPC800_SPR_MI_CTR, (31<<PPC800_MICTR_INDX_SHIFT)); set_spr(PPC800_SPR_MD_CTR, (31<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = 0 | PPC800_EPN_EV; twc = PPC800_TWC_V | PPC800_TWC_PS_8M; rpn = 0 | PPC800_RPN_SH | PPC800_RPN_V | PPC800_RPN_LPS | 0xf0 | (0x1<<PPC800_RPN_PP2_SHIFT); add_tlb800( epn, twc, rpn, 1); ppc_isync(); // add third 8M mapping for itlb (28), and second 8M for dtlb (28) set_spr(PPC800_SPR_MI_CTR, 28<<PPC800_MICTR_INDX_SHIFT); set_spr(PPC800_SPR_MD_CTR, (28<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = 0x1000000 | PPC800_EPN_EV; rpn = 0x1000000 | PPC800_RPN_SH | PPC800_RPN_V | PPC800_RPN_LPS | 0xf0 | (0x1<<PPC800_RPN_PP2_SHIFT); add_tlb800( epn, twc, rpn, 1); set_spr(PPC800_SPR_MD_CTR, (28<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = 0x800000 | PPC800_EPN_EV; rpn = 0x800000 | PPC800_RPN_SH | PPC800_RPN_V | PPC800_RPN_LPS | 0xf0 | (0x1<<PPC800_RPN_PP2_SHIFT); add_tlb800( epn, twc, rpn, 0); ppc_isync(); // It seems that tlb search takes much time for valid entries. So, keep tlb buffer simple. // add syspage for dtlb and itlb (29) set_spr(PPC800_SPR_MI_CTR, 29<<PPC800_MICTR_INDX_SHIFT); set_spr(PPC800_SPR_MD_CTR, (29<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = VM_SYSPAGE_ADDR | PPC800_EPN_EV; twc = PPC800_TWC_V; rpn = (unsigned)_syspage_ptr | PPC800_RPN_SH | PPC800_RPN_V | 0xf0 | (0x3<<PPC800_RPN_PP1_SHIFT) | (0x1<<PPC800_RPN_PP2_SHIFT); add_tlb800( epn, twc, rpn, 1); // add uncached 8M for dtlb, and second 8M for itlb (30) set_spr(PPC800_SPR_MI_CTR, 30<<PPC800_MICTR_INDX_SHIFT); set_spr(PPC800_SPR_MD_CTR, (30<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = 0x800000 | PPC800_EPN_EV; twc = PPC800_TWC_V | PPC800_TWC_PS_8M; rpn = 0x800000 | PPC800_RPN_SH | PPC800_RPN_V | PPC800_RPN_LPS | 0xf0 | (0x1<<PPC800_RPN_PP2_SHIFT); add_tlb800( epn, twc, rpn, 1); set_spr(PPC800_SPR_MD_CTR, (30<<PPC800_MDCTR_INDX_SHIFT) | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); epn = 0x30000000 | PPC800_EPN_EV; twc = PPC800_TWC_V | PPC800_TWC_PS_8M; rpn = 0x30000000 | PPC800_RPN_SH | PPC800_RPN_V | PPC800_RPN_LPS | 0xf0 | (0x1<<PPC800_RPN_PP2_SHIFT) | PPC800_RPN_CI; add_tlb800( epn, twc, rpn, 0); ppc_isync(); // set reserved area boundry set_spr(PPC800_SPR_MI_CTR, PPC800_MICTR_PPCS | PPC800_MICTR_RSV2I ); set_spr(PPC800_SPR_MD_CTR, PPC800_MDCTR_PPCS | PPC800_MDCTR_RSV2D | PPC800_MDCTR_TWAN | PPC800_MDCTR_CIDEF); } #endif zp_flags &= ~ZP_CACHE_OFF; pgszlist[2] = __PAGESIZE; }
void config_cpu(unsigned pvr, const struct exc_copy_block **entry, const struct exc_copy_block **exitlocal) { set_spr( PPCBKE_SPR_DBCR0, 0 ); #ifdef VARIANT_booke SYSPAGE_CPU_ENTRY(ppc,kerinfo)->init_msr |= PPC_MSR_DE; #endif switch(PPC_GET_FAM_MEMBER(pvr)) { case PPC_440GP: ppcbke_tlb_select = PPCBKE_TLB_SELECT_IBM; trap_install_set(traps_440gp, NUM_ELTS(traps_440gp)); fix_pgsizes(); break; case PPC_440GX: ppcbke_tlb_select = PPCBKE_TLB_SELECT_IBM; trap_install_set(traps_440gx, NUM_ELTS(traps_440gx)); fix_pgsizes(); break; case PPC_E500V2: ppcbke_tlb_select = PPCBKE_TLB_SELECT_E500v2; trap_install_set(traps_e500, NUM_ELTS(traps_e500)); alt_souls.size = sizeof(PPC_SPE_REGISTERS); *entry++ = &ctx_save_e500_extra; *exitlocal++ = &ctx_restore_e500_extra; break; case PPC_E500: ppcbke_tlb_select = PPCBKE_TLB_SELECT_E500; trap_install_set(traps_e500, NUM_ELTS(traps_e500)); alt_souls.size = sizeof(PPC_SPE_REGISTERS); *entry++ = &ctx_save_e500_extra; *exitlocal++ = &ctx_restore_e500_extra; break; default: kprintf("Unsupported PVR value: %x\n", pvr); crash(); break; } trap_install_set(traps_booke, NUM_ELTS(traps_booke)); if(__cpu_flags & CPU_FLAG_FPU) { if(fpuemul) { // Emulation trap_install(PPCBKE_SPR_IVOR7, __exc_fpu_emulation, &__common_exc_entry); } else { // Real floating point trap_install(PPCBKE_SPR_IVOR7, __exc_fpu_unavail, &__exc_ffpu); } } // Make data & instruction TLB misses go to the // data and instruction storage exceptions. This will // be changed if/when copy_vm_code() gets called and // we know we're running in virtual mode. set_spr(PPCBKE_SPR_IVOR13, get_spr(PPCBKE_SPR_IVOR2)); set_spr(PPCBKE_SPR_IVOR14, get_spr(PPCBKE_SPR_IVOR3)); *entry++ = &ctx_save_usprg0; *exitlocal++ = &ctx_restore_usprg0; *entry = NULL; *exitlocal = NULL; ppc_ienable_bits |= PPC_MSR_CE | PPC_MSR_ME; }
// write 0 to pop INTC stack void Irq_Init( void ) { // Check alignment for the exception table assert(((uint32)exception_tbl & 0xfff)==0); set_spr(SPR_IVPR,(uint32)exception_tbl); // TODO: The 5516 simulator still thinks it's a 5554 so setup the rest #if (defined(CFG_SIMULATOR) && defined(CFG_MPC5516)) || defined(CFG_MPC5567) || defined(CFG_MPC5554) || defined(CFG_MPC5668) || defined(CFG_MPC563XM) set_spr(SPR_IVOR0,((uint32_t)&exception_tbl+0x0) ); set_spr(SPR_IVOR1,((uint32_t)&exception_tbl+0x10) ); set_spr(SPR_IVOR2,((uint32_t)&exception_tbl+0x20) ); set_spr(SPR_IVOR3,((uint32_t)&exception_tbl+0x30) ); set_spr(SPR_IVOR4,((uint32_t)&exception_tbl+0x40) ); set_spr(SPR_IVOR5,((uint32_t)&exception_tbl+0x50) ); set_spr(SPR_IVOR6,((uint32_t)&exception_tbl+0x60) ); set_spr(SPR_IVOR7,((uint32_t)&exception_tbl+0x70) ); set_spr(SPR_IVOR8,((uint32_t)&exception_tbl+0x80) ); set_spr(SPR_IVOR9,((uint32_t)&exception_tbl+0x90) ); set_spr(SPR_IVOR10,((uint32_t)&exception_tbl+0xa0) ); set_spr(SPR_IVOR11,((uint32_t)&exception_tbl+0xb0) ); set_spr(SPR_IVOR12,((uint32_t)&exception_tbl+0xc0) ); set_spr(SPR_IVOR13,((uint32_t)&exception_tbl+0xd0) ); set_spr(SPR_IVOR14,((uint32_t)&exception_tbl+0xe0) ); set_spr(SPR_IVOR15,((uint32_t)&exception_tbl+0xf0) ); #if defined(CFG_SPE) // SPE exceptions set_spr(SPR_IVOR32,((uint32_t)&exception_tbl+0x100) ); set_spr(SPR_IVOR33,((uint32_t)&exception_tbl+0x110) ); set_spr(SPR_IVOR34,((uint32_t)&exception_tbl+0x120) ); #endif #endif // // Setup INTC // // according to manual // // 1. configure VTES_PRC0,VTES_PRC1,HVEN_PRC0 and HVEN_PRC1 in INTC_MCR // 2. configure VTBA_PRCx in INTC_IACKR_PRCx // 3. raise the PRIx fields and set the PRC_SELx fields to the desired processor in INTC_PSRx_x // 4. set the enable bits or clear the mask bits for the peripheral interrupt requests // 5. lower PRI in INTC_CPR_PRCx to zero // 6. enable processor(s) recognition of interrupts #if defined(CFG_MPC5516) || defined(CFG_MPC5668) INTC.MCR.B.HVEN_PRC0 = 0; // Soft vector mode INTC.MCR.B.VTES_PRC0 = 0; // 4 byte offset between entries #elif defined(CFG_MPC5554) || defined(CFG_MPC5567) || defined(CFG_MPC560X) || defined(CFG_MPC563XM) INTC.MCR.B.HVEN = 0; // Soft vector mode INTC.MCR.B.VTES = 0; // 4 byte offset between entries #endif // Pop the FIFO queue for (int i = 0; i < 15; i++) { #if defined(CFG_MPC5516) || defined(CFG_MPC5668) INTC.EOIR_PRC0.R = 0; #elif defined(CFG_MPC5554) || defined(CFG_MPC5567) || defined(CFG_MPC560X) || defined(CFG_MPC563XM) INTC.EOIR.R = 0; #endif } // Accept interrupts #if defined(CFG_MPC5516) || defined(CFG_MPC5668) INTC.CPR_PRC0.B.PRI = 0; #elif defined(CFG_MPC5554) || defined(CFG_MPC5567) || defined(CFG_MPC560X) || defined(CFG_MPC563XM) INTC.CPR.B.PRI = 0; #endif }
static void extra_440_restore(struct cpu_extra_state *state) { set_spr(PPC440_SPR_MMUCR, state->u.ppc440.spr.mmucr); }
static void extra_e500v2_restore(struct cpu_extra_state *state) { extra_e500_restore(state); set_spr(PPCBKEM_SPR_MAS7, state->u.ppcbkem.spr.mas[7]); }