void vec_state_load(lwp_t *l, u_int flags) { struct pcb * const pcb = lwp_getpcb(l); if (__predict_false(!vec_used_p(l))) { memset(&pcb->pcb_vr, 0, sizeof(pcb->pcb_vr)); vec_mark_used(l); } /* * Enable SPE temporarily (and disable interrupts). */ const register_t msr = mfmsr(); mtmsr((msr & ~PSL_EE) | PSL_SPV); __asm volatile ("isync"); /* * Call an assembly routine to do load everything. */ vec_load_from_vreg(&pcb->pcb_vr); __asm volatile ("sync"); /* * Restore MSR (turn off SPE) */ mtmsr(msr); __asm volatile ("isync"); /* * Set PSL_SPV so vectors will be enabled on return to user. */ l->l_md.md_utf->tf_srr1 |= PSL_SPV; }
/** * Writes the data to the APU UDI register at the specified APU address. * * * @param DcrBase is the base of the block of DCR registers * @param UDInum is the intended source APU register * @param Data is the value to be placed into the specified APU register * * @return * * None * * @note * * C-style signature: * void XIo_DcrWriteAPUUDIReg(u32 DcrRegister, u32 UDInum, u32 Data) * * Since writing an APU UDI DCR requires a dummy write to the same DCR, * the target UDI number is required. In order to make this operation atomic, * interrupts are disabled before and enabled after the DCR accesses. * Because an APU UDI access involves two DCR accesses, the DCR bus must be * locked to ensure that another master doesn't access the APU UDI register * at the same time. * Care must be taken to not write a '1' to either timeout bit because * it will be cleared. * Steps: * - save old MSR * - disable interrupts by writing mask to MSR * - acquire lock, since the PPC440 supports timeout wait, it will wait until * it successfully acquires the DCR bus lock * - shift and mask the UDI number to its bit position of [22:25] * - add DCR base address to UDI number offset and perform the write * - release DCR bus lock * - restore MSR * *******************************************************************************/ inline void XIo_DcrWriteAPUUDIReg(u32 DcrBase, u32 UDInum, u32 Data) { u32 oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); XIo_DcrLock(DcrBase); switch (DcrBase) { case XDCR_0_BASEADDR: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_1_BASEADDR: mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_2_BASEADDR: mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_3_BASEADDR: mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (Data)); break; default: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (Data)); break; } XIo_DcrUnlock(DcrBase); mtmsr(oldMSR); }
/** * Writes the register at specified DCR address using the indirect addressing * method. * * * @param DcrBase is the base of the block of DCR registers * @param DcrRegister is the intended destination DCR register * @param Data is the value to be placed into the specified DRC register * * @return * * None * * @note * * C-style signature: * void XIo_DcrIndirectAddrWriteReg(u32 DcrBase, u32 DcrRegister, * u32 Data) * * Assumes auto-buslocking feature is ON. * In order to make this operation atomic, interrupts are disabled before * and enabled after the DCR accesses. * ******************************************************************************/ inline void XIo_DcrIndirectAddrWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data) { unsigned int oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); switch (DcrBase) { case XDCR_0_BASEADDR: XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ADDR, XDCR_0_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_1_BASEADDR: XIo_mDcrWriteReg(XDCR_1_BASEADDR | XDCR_IDA_ADDR, XDCR_1_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_1_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_2_BASEADDR: XIo_mDcrWriteReg(XDCR_2_BASEADDR | XDCR_IDA_ADDR, XDCR_2_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_2_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_3_BASEADDR: XIo_mDcrWriteReg(XDCR_3_BASEADDR | XDCR_IDA_ADDR, XDCR_3_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_3_BASEADDR | XDCR_IDA_ACC, Data); break; default: XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ADDR, XDCR_0_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ACC, Data); break; } mtmsr(oldMSR); return; }
void enable_vec(struct thread *td) { int msr; struct pcb *pcb; struct trapframe *tf; pcb = td->td_pcb; tf = trapframe(td); /* * Save the thread's Altivec CPU number, and set the CPU's current * vector thread */ td->td_pcb->pcb_veccpu = PCPU_GET(cpuid); PCPU_SET(vecthread, td); /* * Enable the vector unit for when the thread returns from the * exception. If this is the first time the unit has been used by * the thread, initialise the vector registers and VSCR to 0, and * set the flag to indicate that the vector unit is in use. */ tf->srr1 |= PSL_VEC; if (!(pcb->pcb_flags & PCB_VEC)) { memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec); pcb->pcb_flags |= PCB_VEC; } /* * Temporarily enable the vector unit so the registers * can be restored. */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Restore VSCR by first loading it into a vector and then into VSCR. * (this needs to done before loading the user's vector registers * since we need to use a scratch vector register) */ __asm __volatile("vxor 0,0,0; lvewx 0,0,%0; mtvscr 0" \ :: "b"(&pcb->pcb_vec.vscr)); #define LVX(n) __asm ("lvx " #n ",0,%0" \ :: "b"(&pcb->pcb_vec.vr[n])); LVX(0); LVX(1); LVX(2); LVX(3); LVX(4); LVX(5); LVX(6); LVX(7); LVX(8); LVX(9); LVX(10); LVX(11); LVX(12); LVX(13); LVX(14); LVX(15); LVX(16); LVX(17); LVX(18); LVX(19); LVX(20); LVX(21); LVX(22); LVX(23); LVX(24); LVX(25); LVX(26); LVX(27); LVX(28); LVX(29); LVX(30); LVX(31); #undef LVX isync(); mtmsr(msr); }
/** * Reads the APU UDI register at the specified APU address. * * * @param DcrBase is the base of the block of DCR registers * @param UDInum is the intended source APU register * * @return * * Contents of the specified APU register. * * @note * * C-style signature: * u32 XIo_DcrReadAPUUDIReg(u32 DcrRegister, u32 UDInum) * * Since reading an APU UDI DCR requires a dummy write to the same DCR, * the target UDI number is required. In order to make this operation atomic, * interrupts are disabled before and enabled after the DCR accesses. * Because an APU UDI access involves two DCR accesses, the DCR bus must be * locked to ensure that another master doesn't access the APU UDI register * at the same time. * Care must be taken to not write a '1' to either timeout bit because * it will be cleared. * Steps: * - save old MSR * - disable interrupts by writing mask to MSR * - acquire lock; since the PPC440 supports timeout wait, it will wait until * it successfully acquires the DCR bus lock * - shift and mask the UDI number to its bit position of [22:25] * - add the DCR base address to the UDI number and perform the read * - release DCR bus lock * - restore MSR * - return value read * *******************************************************************************/ inline u32 XIo_DcrReadAPUUDIReg(u32 DcrBase, u32 UDInum) { u32 rVal; u32 oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); XIo_DcrLock(DcrBase); switch (DcrBase) { case XDCR_0_BASEADDR: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_0_BASEADDR | XDCR_APU_UDI); break; case XDCR_1_BASEADDR: mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_1_BASEADDR | XDCR_APU_UDI); break; case XDCR_2_BASEADDR: mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_2_BASEADDR | XDCR_APU_UDI); break; case XDCR_3_BASEADDR: mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_3_BASEADDR | XDCR_APU_UDI); break; default: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_0_BASEADDR | XDCR_APU_UDI); break; } XIo_DcrUnlock(DcrBase); mtmsr(oldMSR); return (rVal); }
/* * This optional function does a "fast" critical region protection and returns * the previous protection level. This function is only called during very short * critical regions. An embedded system which supports ISR-based drivers might * want to implement this function by disabling interrupts. Task-based systems * might want to implement this by using a mutex or disabling tasking. This * function should support recursive calls from the same task or interrupt. In * other words, sys_arch_protect() could be called while already protected. In * that case the return value indicates that it is already protected. * sys_arch_protect() is only required if your port is supporting an operating * system. */ sys_prot_t sys_arch_protect() { sys_prot_t cur = mfmsr(); #ifdef __MICROBLAZE__ mtmsr(cur & ~0x2); #elif __PPC__ mtmsr(cur & ~XEXC_ALL); #endif return cur; }
static void ppc44x_idle(void) { unsigned long msr_save; msr_save = mfmsr(); /* set wait state MSR */ mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); isync(); /* return to initial state */ mtmsr(msr_save); isync(); }
static int __rtas_suspend_cpu(struct rtas_suspend_me_data *data, int wake_when_done) { long rc = H_SUCCESS; unsigned long msr_save; int cpu; stop_topology_update(); atomic_inc(&data->working); /* really need to ensure MSR.EE is off for H_JOIN */ msr_save = mfmsr(); mtmsr(msr_save & ~(MSR_EE)); while (rc == H_SUCCESS && !data->done) rc = plpar_hcall_norets(H_JOIN); mtmsr(msr_save); if (rc == H_SUCCESS) { /* This cpu was prodded and the suspend is complete. */ goto out; } else if (rc == H_CONTINUE) { /* All other cpus are in H_JOIN, this cpu does * the suspend. */ return __rtas_suspend_last_cpu(data, wake_when_done); } printk(KERN_ERR "H_JOIN on cpu %i failed with rc = %ld\n", smp_processor_id(), rc); data->error = rc; if (wake_when_done) { smp_wmb(); data->done = 1; /* Ensure data->done is seen on all CPUs that are about to wake up as a result of the H_PROD below */ mb(); start_topology_update(); /* This cpu did the suspend or got an error; in either case, * we need to prod all other other cpus out of join state. * Extra prods are harmless. */ for_each_online_cpu(cpu) plpar_hcall_norets(H_PROD, get_hard_smp_processor_id(cpu)); } out: if (atomic_dec_return(&data->working) == 0) complete(data->complete); return rc; }
void pic_mark_pending(int hwirq) { struct cpu_info * const ci = curcpu(); const int virq = virq_map[hwirq]; if (virq == 0) printf("IRQ %d maps to 0\n", hwirq); const register_t msr = mfmsr(); mtmsr(msr & ~PSL_EE); ci->ci_ipending |= PIC_VIRQ_TO_MASK(virq); mtmsr(msr); }
void intr_setup(u_int irq, ih_func_t *ihf, void *iha) { u_int32_t msr; msr = mfmsr(); mtmsr(msr & ~PSL_EE); intr_handlers[irq].ih_func = ihf; intr_handlers[irq].ih_arg = iha; intr_handlers[irq].ih_irq = irq; mtmsr(msr); }
int pic_handle_intr(void *cookie) { struct pic_ops *pic = cookie; struct cpu_info *ci = curcpu(); int picirq; picirq = pic->pic_get_irq(pic, PIC_GET_IRQ); if (picirq == 255) return 0; const register_t msr = mfmsr(); const int pcpl = ci->ci_cpl; do { const int virq = virq_map[picirq + pic->pic_intrbase]; KASSERT(virq != 0); KASSERT(picirq < pic->pic_numintrs); imask_t v_imen = PIC_VIRQ_TO_MASK(virq); struct intr_source * const is = &intrsources[virq]; if ((imask[pcpl] & v_imen) != 0) { ci->ci_ipending |= v_imen; /* Masked! Mark this as pending */ pic->pic_disable_irq(pic, picirq); } else { /* this interrupt is no longer pending */ ci->ci_ipending &= ~v_imen; ci->ci_idepth++; splraise(is->is_ipl); mtmsr(msr | PSL_EE); intr_deliver(is, virq); mtmsr(msr); ci->ci_cpl = pcpl; ci->ci_data.cpu_nintr++; ci->ci_idepth--; } pic->pic_ack_irq(pic, picirq); } while ((picirq = pic->pic_get_irq(pic, PIC_GET_RECHECK)) != 255); mtmsr(msr | PSL_EE); splx(pcpl); /* Process pendings. */ mtmsr(msr); return 0; }
/* * A very short dispatch, to try and maximise assembler code use * between all exception types. Maybe 'true' interrupts should go * here, and the trap code can come in separately */ void powerpc_interrupt(struct trapframe *framep) { struct thread *td; struct clockframe ckframe; td = curthread; switch (framep->exc) { case EXC_EXI: atomic_add_int(&td->td_intr_nesting_level, 1); (*powerpc_extintr_handler)(); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; case EXC_DECR: atomic_add_int(&td->td_intr_nesting_level, 1); ckframe.srr0 = framep->srr0; ckframe.srr1 = framep->srr1; decr_intr(&ckframe); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; default: /* * Re-enable interrupts and call the generic trap code */ #if 0 printf("powerpc_interrupt: got trap\n"); #endif mtmsr(mfmsr() | PSL_EE); isync(); trap(framep); } }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. The * counters won't actually start until the rfi clears the PMM * bit. The PMM bit should not be set until after the interrupt * is cleared to avoid it getting lost in some hypervisor * environments. */ mtmsr(mfmsr() | MSR_PMM); pmc_start_ctrs(1); }
void cpu_reset(void) { uint32_t ver = SVR_VER(mfspr(SPR_SVR)); if (ver == SVR_MPC8572E || ver == SVR_MPC8572 || ver == SVR_MPC8548E || ver == SVR_MPC8548) /* Systems with dedicated reset register */ ccsr_write4(OCP85XX_RSTCR, 2); else { /* Clear DBCR0, disables debug interrupts and events. */ mtspr(SPR_DBCR0, 0); __asm __volatile("isync"); /* Enable Debug Interrupts in MSR. */ mtmsr(mfmsr() | PSL_DE); /* Enable debug interrupts and issue reset. */ mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); } printf("Reset failed...\n"); while (1); }
static void __cpuinit smp_85xx_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); u32 tmp; local_irq_disable(); idle_task_exit(); generic_set_cpu_dead(cpu); mb(); mtspr(SPRN_TCR, 0); __flush_disable_L1(); tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; mtspr(SPRN_HID0, tmp); isync(); /* Enter NAP mode. */ tmp = mfmsr(); tmp |= MSR_WE; mb(); mtmsr(tmp); isync(); while (1) ; }
uint32_t cpudep_ap_bootstrap() { uint32_t msr, sp, csr; /* Enable L1 caches */ csr = mfspr(SPR_L1CSR0); if ((csr & L1CSR0_DCE) == 0) { dcache_inval(); dcache_enable(); } csr = mfspr(SPR_L1CSR1); if ((csr & L1CSR1_ICE) == 0) { icache_inval(); icache_enable(); } /* Set MSR */ msr = PSL_ME; mtmsr(msr); /* Assign pcpu fields, return ptr to this AP's idle thread kstack */ pcpup->pc_curthread = pcpup->pc_idlethread; pcpup->pc_curpcb = pcpup->pc_curthread->td_pcb; sp = pcpup->pc_curpcb->pcb_sp; /* XXX shouldn't the pcb_sp be checked/forced for alignment here?? */ return (sp); }
static int fsl_emb_start(struct op_counter_config *ctr) { int i; mtmsr(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); /* Set each enabled counter to only * count when the Mark bit is *not* set */ set_pmc_marked(i, 1, 0); pmc_start_ctr(i, 1); } else { ctr_write(i, 0); /* Set the ctr to be stopped */ pmc_start_ctr(i, 0); } } /* Clear the freeze bit, and enable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); oprofile_running = 1; pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); return 0; }
int do_reset (cmd_tbl_t *cmdtp, bd_t *bd, int flag, int argc, char *argv[]) { uint pvr; uint ver; unsigned long val, msr; pvr = get_pvr(); ver = PVR_VER(pvr); if (ver & 1){ /* e500 v2 core has reset control register */ volatile unsigned int * rstcr; rstcr = (volatile unsigned int *)(CFG_IMMR + 0xE00B0); *rstcr = 0x2; /* HRESET_REQ */ udelay(100); } /* * Fallthrough if the code above failed * Initiate hard reset in debug control register DBCR0 * Make sure MSR[DE] = 1 */ msr = mfmsr (); msr |= MSR_DE; mtmsr (msr); val = mfspr(DBCR0); val |= 0x70000000; mtspr(DBCR0,val); return 1; }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* set the PMM bit (see comment below) */ mtmsr(mfmsr() | MSR_PMM); pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); }
/* * This optional function does a "fast" critical region protection and returns * the previous protection level. This function is only called during very short * critical regions. An embedded system which supports ISR-based drivers might * want to implement this function by disabling interrupts. Task-based systems * might want to implement this by using a mutex or disabling tasking. This * function should support recursive calls from the same task or interrupt. In * other words, sys_arch_protect() could be called while already protected. In * that case the return value indicates that it is already protected. * sys_arch_protect() is only required if your port is supporting an operating * system. */ sys_prot_t sys_arch_protect() { sys_prot_t cur; #ifdef __MICROBLAZE__ cur = mfmsr(); mtmsr(cur & ~0x2); #elif __PPC__ cur = mfmsr(); mtmsr(cur & ~XEXC_ALL); #elif __arm__ cur = mfcpsr(); mtcpsr(cur & ~XIL_EXCEPTION_IRQ); #endif return cur; }
static void fsl7450_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* */ mtmsr(mfmsr() | MSR_PMM); pc = mfspr(SPRN_SIAR); is_kernel = is_kernel_addr(pc); for (i = 0; i < num_pmcs; ++i) { val = classic_ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); } } } /* */ /* */ pmc_start_ctrs(); }
int testdram(void) { unsigned long *mem = (unsigned long *)0; const unsigned long kend = (1024 / sizeof(unsigned long)); unsigned long k, n; mtmsr(0); for (k = 0; k < CFG_KBYTES_SDRAM; ++k, mem += (1024 / sizeof(unsigned long))) { if ((k & 1023) == 0) { printf("%3d MB\r", k / 1024); } memset(mem, 0xaaaaaaaa, 1024); for (n = 0; n < kend; ++n) { if (mem[n] != 0xaaaaaaaa) { printf("SDRAM test fails at: %08x\n", (uint) & mem[n]); return 1; } } memset(mem, 0x55555555, 1024); for (n = 0; n < kend; ++n) { if (mem[n] != 0x55555555) { printf("SDRAM test fails at: %08x\n", (uint) & mem[n]); return 1; } } } printf("SDRAM test passes\n"); return 0; }
int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) { /* Everything after the first generation of PQ3 parts has RSTCR */ #if defined(CONFIG_MPC8540) || defined(CONFIG_MPC8541) || \ defined(CONFIG_MPC8555) || defined(CONFIG_MPC8560) unsigned long val, msr; /* * Initiate hard reset in debug control register DBCR0 * Make sure MSR[DE] = 1. This only resets the core. */ msr = mfmsr (); msr |= MSR_DE; mtmsr (msr); val = mfspr(DBCR0); val |= 0x70000000; mtspr(DBCR0,val); #else volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); out_be32(&gur->rstcr, 0x2); /* HRESET_REQ */ udelay(100); #endif return 1; }
/* Shutdown the CPU as much as possible. */ void cpu_halt(void) { mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); while (1) ; }
int ram_putmsr(struct pdbg_target *thread, uint64_t value) { uint64_t opcodes[] = {mfspr(0, 277), mtmsr(0)}; uint64_t results[] = {value, 0}; CHECK_ERR(ram_instructions(thread, opcodes, results, ARRAY_SIZE(opcodes), 0)); return 0; }
void save_vec(struct thread *td) { int msr; struct pcb *pcb; pcb = td->td_pcb; /* * Temporarily re-enable the vector unit during the save */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Save the vector registers and VSCR to the PCB */ #define STVX(n) __asm ("stvx %1,0,%0" \ :: "b"(pcb->pcb_vec.vr[n]), "n"(n)); STVX(0); STVX(1); STVX(2); STVX(3); STVX(4); STVX(5); STVX(6); STVX(7); STVX(8); STVX(9); STVX(10); STVX(11); STVX(12); STVX(13); STVX(14); STVX(15); STVX(16); STVX(17); STVX(18); STVX(19); STVX(20); STVX(21); STVX(22); STVX(23); STVX(24); STVX(25); STVX(26); STVX(27); STVX(28); STVX(29); STVX(30); STVX(31); #undef STVX __asm __volatile("mfvscr 0; stvewx 0,0,%0" :: "b"(&pcb->pcb_vec.vscr)); /* * Disable vector unit again */ isync(); mtmsr(msr); /* * Clear the current vec thread and pcb's CPU id * XXX should this be left clear to allow lazy save/restore ? */ pcb->pcb_veccpu = INT_MAX; PCPU_SET(vecthread, NULL); }
/* * This optional function does a "fast" set of critical region protection to the * value specified by pval. See the documentation for sys_arch_protect() for * more information. This function is only required if your port is supporting * an operating system. */ void sys_arch_unprotect(sys_prot_t lev) { #ifdef __arm__ mtcpsr(lev); #else mtmsr(lev); #endif }
void ppc_exception(int code, vm_offset_t where, register_t msr) { mtmsr(PSL_IR | PSL_DR | PSL_RI); printf("Exception %x at %#lx!\n", code, where); printf("Rebooting in 5 seconds...\n"); delay(10000000); lv1_panic(1); }
uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) { struct pcpu *pc; vm_offset_t startkernel, endkernel; void *kmdp; char *env; bool ofw_bootargs = false; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif kmdp = NULL; /* First guess at start/end kernel positions */ startkernel = __startkernel; endkernel = __endkernel; /* Check for ePAPR loader, which puts a magic value into r6 */ if (mdp == (void *)0x65504150) mdp = NULL; #ifdef AIM /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #endif /* * Parse metadata if present and fetch parameters. Must be done * before console is inited so cninit gets the right value of * boothowto. */ if (mdp != NULL) { preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); endkernel = ulmax(endkernel, MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t)); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif }
void pq2_restart(char *cmd) { local_irq_disable(); setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE); mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR)); in_8(&cpm2_immr->im_clkrst.res[0]); panic("Restart failed\n"); }