void cpu_reset(void) { uint32_t ver = SVR_VER(mfspr(SPR_SVR)); if (ver == SVR_MPC8572E || ver == SVR_MPC8572 || ver == SVR_MPC8548E || ver == SVR_MPC8548) /* Systems with dedicated reset register */ ccsr_write4(OCP85XX_RSTCR, 2); else { /* Clear DBCR0, disables debug interrupts and events. */ mtspr(SPR_DBCR0, 0); __asm __volatile("isync"); /* Enable Debug Interrupts in MSR. */ mtmsr(mfmsr() | PSL_DE); /* Enable debug interrupts and issue reset. */ mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); } printf("Reset failed...\n"); while (1); }
int do_reset (cmd_tbl_t *cmdtp, bd_t *bd, int flag, int argc, char *argv[]) { uint pvr; uint ver; unsigned long val, msr; pvr = get_pvr(); ver = PVR_VER(pvr); if (ver & 1){ /* e500 v2 core has reset control register */ volatile unsigned int * rstcr; rstcr = (volatile unsigned int *)(CFG_IMMR + 0xE00B0); *rstcr = 0x2; /* HRESET_REQ */ udelay(100); } /* * Fallthrough if the code above failed * Initiate hard reset in debug control register DBCR0 * Make sure MSR[DE] = 1 */ msr = mfmsr (); msr |= MSR_DE; mtmsr (msr); val = mfspr(DBCR0); val |= 0x70000000; mtspr(DBCR0,val); return 1; }
static void rs64_start(struct op_counter_config *ctr) { int i; unsigned int mmcr0; /* set the PMM bit (see comment below) */ mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); ctrl_write(i, ctr[i].event); } else { ctr_write(i, 0); } } mmcr0 = mfspr(SPRN_MMCR0); /* * now clear the freeze bit, counting will not start until we * rfid from this excetion, because only at that point will * the PMM bit be cleared */ mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* set the PMM bit (see comment below) */ mtmsr(mfmsr() | MSR_PMM); pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); }
/** * Writes the register at specified DCR address using the indirect addressing * method. * * * @param DcrBase is the base of the block of DCR registers * @param DcrRegister is the intended destination DCR register * @param Data is the value to be placed into the specified DRC register * * @return * * None * * @note * * C-style signature: * void XIo_DcrIndirectAddrWriteReg(u32 DcrBase, u32 DcrRegister, * u32 Data) * * Assumes auto-buslocking feature is ON. * In order to make this operation atomic, interrupts are disabled before * and enabled after the DCR accesses. * ******************************************************************************/ inline void XIo_DcrIndirectAddrWriteReg(u32 DcrBase, u32 DcrRegister, u32 Data) { unsigned int oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); switch (DcrBase) { case XDCR_0_BASEADDR: XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ADDR, XDCR_0_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_1_BASEADDR: XIo_mDcrWriteReg(XDCR_1_BASEADDR | XDCR_IDA_ADDR, XDCR_1_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_1_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_2_BASEADDR: XIo_mDcrWriteReg(XDCR_2_BASEADDR | XDCR_IDA_ADDR, XDCR_2_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_2_BASEADDR | XDCR_IDA_ACC, Data); break; case XDCR_3_BASEADDR: XIo_mDcrWriteReg(XDCR_3_BASEADDR | XDCR_IDA_ADDR, XDCR_3_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_3_BASEADDR | XDCR_IDA_ACC, Data); break; default: XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ADDR, XDCR_0_BASEADDR | DcrRegister); XIo_mDcrWriteReg(XDCR_0_BASEADDR | XDCR_IDA_ACC, Data); break; } mtmsr(oldMSR); return; }
/** * Writes the data to the APU UDI register at the specified APU address. * * * @param DcrBase is the base of the block of DCR registers * @param UDInum is the intended source APU register * @param Data is the value to be placed into the specified APU register * * @return * * None * * @note * * C-style signature: * void XIo_DcrWriteAPUUDIReg(u32 DcrRegister, u32 UDInum, u32 Data) * * Since writing an APU UDI DCR requires a dummy write to the same DCR, * the target UDI number is required. In order to make this operation atomic, * interrupts are disabled before and enabled after the DCR accesses. * Because an APU UDI access involves two DCR accesses, the DCR bus must be * locked to ensure that another master doesn't access the APU UDI register * at the same time. * Care must be taken to not write a '1' to either timeout bit because * it will be cleared. * Steps: * - save old MSR * - disable interrupts by writing mask to MSR * - acquire lock, since the PPC440 supports timeout wait, it will wait until * it successfully acquires the DCR bus lock * - shift and mask the UDI number to its bit position of [22:25] * - add DCR base address to UDI number offset and perform the write * - release DCR bus lock * - restore MSR * *******************************************************************************/ inline void XIo_DcrWriteAPUUDIReg(u32 DcrBase, u32 UDInum, u32 Data) { u32 oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); XIo_DcrLock(DcrBase); switch (DcrBase) { case XDCR_0_BASEADDR: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_1_BASEADDR: mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_2_BASEADDR: mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (Data)); break; case XDCR_3_BASEADDR: mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (Data)); break; default: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (Data)); break; } XIo_DcrUnlock(DcrBase); mtmsr(oldMSR); }
/** * Reads the APU UDI register at the specified APU address. * * * @param DcrBase is the base of the block of DCR registers * @param UDInum is the intended source APU register * * @return * * Contents of the specified APU register. * * @note * * C-style signature: * u32 XIo_DcrReadAPUUDIReg(u32 DcrRegister, u32 UDInum) * * Since reading an APU UDI DCR requires a dummy write to the same DCR, * the target UDI number is required. In order to make this operation atomic, * interrupts are disabled before and enabled after the DCR accesses. * Because an APU UDI access involves two DCR accesses, the DCR bus must be * locked to ensure that another master doesn't access the APU UDI register * at the same time. * Care must be taken to not write a '1' to either timeout bit because * it will be cleared. * Steps: * - save old MSR * - disable interrupts by writing mask to MSR * - acquire lock; since the PPC440 supports timeout wait, it will wait until * it successfully acquires the DCR bus lock * - shift and mask the UDI number to its bit position of [22:25] * - add the DCR base address to the UDI number and perform the read * - release DCR bus lock * - restore MSR * - return value read * *******************************************************************************/ inline u32 XIo_DcrReadAPUUDIReg(u32 DcrBase, u32 UDInum) { u32 rVal; u32 oldMSR = mfmsr(); mtmsr(oldMSR & XDCR_DISABLE_EXCEPTIONS); XIo_DcrLock(DcrBase); switch (DcrBase) { case XDCR_0_BASEADDR: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_0_BASEADDR | XDCR_APU_UDI); break; case XDCR_1_BASEADDR: mtdcr(XDCR_1_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_1_BASEADDR | XDCR_APU_UDI); break; case XDCR_2_BASEADDR: mtdcr(XDCR_2_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_2_BASEADDR | XDCR_APU_UDI); break; case XDCR_3_BASEADDR: mtdcr(XDCR_3_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_3_BASEADDR | XDCR_APU_UDI); break; default: mtdcr(XDCR_0_BASEADDR | XDCR_APU_UDI, (((UDInum) << 6) & 0x000003c0) | 0x00000030); rVal = mfdcr(XDCR_0_BASEADDR | XDCR_APU_UDI); break; } XIo_DcrUnlock(DcrBase); mtmsr(oldMSR); return (rVal); }
static int fsl_emb_start(struct op_counter_config *ctr) { int i; mtmsr(mfmsr() | MSR_PMM); for (i = 0; i < num_counters; ++i) { if (ctr[i].enabled) { ctr_write(i, reset_value[i]); /* Set each enabled counter to only * count when the Mark bit is *not* set */ set_pmc_marked(i, 1, 0); pmc_start_ctr(i, 1); } else { ctr_write(i, 0); /* Set the ctr to be stopped */ pmc_start_ctr(i, 0); } } /* Clear the freeze bit, and enable the interrupt. * The counters won't actually start until the rfi clears * the PMM bit */ pmc_start_ctrs(1); oprofile_running = 1; pr_debug("start on cpu %d, pmgc0 %x\n", smp_processor_id(), mfpmr(PMRN_PMGC0)); return 0; }
static void fsl_emb_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; pc = regs->nip; is_kernel = is_kernel_addr(pc); for (i = 0; i < num_counters; ++i) { val = ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); ctr_write(i, reset_value[i]); } else { ctr_write(i, 0); } } } /* The freeze bit was set by the interrupt. */ /* Clear the freeze bit, and reenable the interrupt. The * counters won't actually start until the rfi clears the PMM * bit. The PMM bit should not be set until after the interrupt * is cleared to avoid it getting lost in some hypervisor * environments. */ mtmsr(mfmsr() | MSR_PMM); pmc_start_ctrs(1); }
void enable_vec(struct thread *td) { int msr; struct pcb *pcb; struct trapframe *tf; pcb = td->td_pcb; tf = trapframe(td); /* * Save the thread's Altivec CPU number, and set the CPU's current * vector thread */ td->td_pcb->pcb_veccpu = PCPU_GET(cpuid); PCPU_SET(vecthread, td); /* * Enable the vector unit for when the thread returns from the * exception. If this is the first time the unit has been used by * the thread, initialise the vector registers and VSCR to 0, and * set the flag to indicate that the vector unit is in use. */ tf->srr1 |= PSL_VEC; if (!(pcb->pcb_flags & PCB_VEC)) { memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec); pcb->pcb_flags |= PCB_VEC; } /* * Temporarily enable the vector unit so the registers * can be restored. */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Restore VSCR by first loading it into a vector and then into VSCR. * (this needs to done before loading the user's vector registers * since we need to use a scratch vector register) */ __asm __volatile("vxor 0,0,0; lvewx 0,0,%0; mtvscr 0" \ :: "b"(&pcb->pcb_vec.vscr)); #define LVX(n) __asm ("lvx " #n ",0,%0" \ :: "b"(&pcb->pcb_vec.vr[n])); LVX(0); LVX(1); LVX(2); LVX(3); LVX(4); LVX(5); LVX(6); LVX(7); LVX(8); LVX(9); LVX(10); LVX(11); LVX(12); LVX(13); LVX(14); LVX(15); LVX(16); LVX(17); LVX(18); LVX(19); LVX(20); LVX(21); LVX(22); LVX(23); LVX(24); LVX(25); LVX(26); LVX(27); LVX(28); LVX(29); LVX(30); LVX(31); #undef LVX isync(); mtmsr(msr); }
int do_reset (cmd_tbl_t *cmdtp, int flag, int argc, char * const argv[]) { /* Everything after the first generation of PQ3 parts has RSTCR */ #if defined(CONFIG_MPC8540) || defined(CONFIG_MPC8541) || \ defined(CONFIG_MPC8555) || defined(CONFIG_MPC8560) unsigned long val, msr; /* * Initiate hard reset in debug control register DBCR0 * Make sure MSR[DE] = 1. This only resets the core. */ msr = mfmsr (); msr |= MSR_DE; mtmsr (msr); val = mfspr(DBCR0); val |= 0x70000000; mtspr(DBCR0,val); #else volatile ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); out_be32(&gur->rstcr, 0x2); /* HRESET_REQ */ udelay(100); #endif return 1; }
int opal_check(void) { phandle_t opal; cell_t val[2]; if (opal_initialized) return (0); opal = OF_finddevice("/ibm,opal"); if (opal == -1) return (ENOENT); if (!OF_hasprop(opal, "opal-base-address") || !OF_hasprop(opal, "opal-entry-address")) return (ENOENT); OF_getencprop(opal, "opal-base-address", val, sizeof(val)); opal_data = ((uint64_t)val[0] << 32) | val[1]; OF_getencprop(opal, "opal-entry-address", val, sizeof(val)); opal_entrypoint = ((uint64_t)val[0] << 32) | val[1]; opal_msr = mfmsr() & ~(PSL_EE | PSL_IR | PSL_DR | PSL_SE); opal_initialized = 1; return (0); }
static int rtaspci_attach(device_t dev) { struct rtaspci_softc *sc; sc = device_get_softc(dev); sc->read_pci_config = rtas_token_lookup("read-pci-config"); sc->write_pci_config = rtas_token_lookup("write-pci-config"); sc->ex_read_pci_config = rtas_token_lookup("ibm,read-pci-config"); sc->ex_write_pci_config = rtas_token_lookup("ibm,write-pci-config"); sc->sc_extended_config = 0; OF_getprop(ofw_bus_get_node(dev), "ibm,pci-config-space-type", &sc->sc_extended_config, sizeof(sc->sc_extended_config)); bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->dma_tag); #ifdef __powerpc64__ if (!(mfmsr() & PSL_HV)) phyp_iommu_set_dma_tag(dev, dev, sc->dma_tag); #endif return (ofw_pci_attach(dev)); }
static int chrp_attach(platform_t plat) { #ifdef __powerpc64__ /* XXX: check for /rtas/ibm,hypertas-functions? */ if (!(mfmsr() & PSL_HV)) { struct mem_region *phys, *avail; int nphys, navail; mem_regions(&phys, &nphys, &avail, &navail); realmaxaddr = phys[0].mr_size; pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC); cpu_idle_hook = phyp_cpu_idle; /* Set up important VPA fields */ bzero(splpar_vpa, sizeof(splpar_vpa)); splpar_vpa[4] = (uint8_t)((sizeof(splpar_vpa) >> 8) & 0xff); splpar_vpa[5] = (uint8_t)(sizeof(splpar_vpa) & 0xff); splpar_vpa[0xba] = 1; /* Maintain FPRs */ splpar_vpa[0xbb] = 1; /* Maintain PMCs */ splpar_vpa[0xfc] = 0xff; /* Maintain full SLB */ splpar_vpa[0xfd] = 0xff; splpar_vpa[0xff] = 1; /* Maintain Altivec */ mb(); /* Set up hypervisor CPU stuff */ chrp_smp_ap_init(plat); } #endif /* Some systems (e.g. QEMU) need Open Firmware to stand down */ ofw_quiesce(); return (0); }
static int power4_start(struct op_counter_config *ctr) { int i; unsigned int mmcr0; mtmsrd(mfmsr() | MSR_PMM); for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { if (ctr[i].enabled) { classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); } } mmcr0 = mfspr(SPRN_MMCR0); mmcr0 &= ~MMCR0_PMAO; mmcr0 &= ~MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); oprofile_running = 1; dbg("start on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); return 0; }
static void __cpuinit smp_85xx_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); u32 tmp; local_irq_disable(); idle_task_exit(); generic_set_cpu_dead(cpu); mb(); mtspr(SPRN_TCR, 0); __flush_disable_L1(); tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; mtspr(SPRN_HID0, tmp); isync(); /* Enter NAP mode. */ tmp = mfmsr(); tmp |= MSR_WE; mb(); mtmsr(tmp); isync(); while (1) ; }
void vec_state_load(lwp_t *l, u_int flags) { struct pcb * const pcb = lwp_getpcb(l); if (__predict_false(!vec_used_p(l))) { memset(&pcb->pcb_vr, 0, sizeof(pcb->pcb_vr)); vec_mark_used(l); } /* * Enable SPE temporarily (and disable interrupts). */ const register_t msr = mfmsr(); mtmsr((msr & ~PSL_EE) | PSL_SPV); __asm volatile ("isync"); /* * Call an assembly routine to do load everything. */ vec_load_from_vreg(&pcb->pcb_vr); __asm volatile ("sync"); /* * Restore MSR (turn off SPE) */ mtmsr(msr); __asm volatile ("isync"); /* * Set PSL_SPV so vectors will be enabled on return to user. */ l->l_md.md_utf->tf_srr1 |= PSL_SPV; }
static void fsl7450_handle_interrupt(struct pt_regs *regs, struct op_counter_config *ctr) { unsigned long pc; int is_kernel; int val; int i; /* */ mtmsr(mfmsr() | MSR_PMM); pc = mfspr(SPRN_SIAR); is_kernel = is_kernel_addr(pc); for (i = 0; i < num_pmcs; ++i) { val = classic_ctr_read(i); if (val < 0) { if (oprofile_running && ctr[i].enabled) { oprofile_add_ext_sample(pc, regs, i, is_kernel); classic_ctr_write(i, reset_value[i]); } else { classic_ctr_write(i, 0); } } } /* */ /* */ pmc_start_ctrs(); }
/* * A very short dispatch, to try and maximise assembler code use * between all exception types. Maybe 'true' interrupts should go * here, and the trap code can come in separately */ void powerpc_interrupt(struct trapframe *framep) { struct thread *td; struct clockframe ckframe; td = curthread; switch (framep->exc) { case EXC_EXI: atomic_add_int(&td->td_intr_nesting_level, 1); (*powerpc_extintr_handler)(); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; case EXC_DECR: atomic_add_int(&td->td_intr_nesting_level, 1); ckframe.srr0 = framep->srr0; ckframe.srr1 = framep->srr1; decr_intr(&ckframe); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; default: /* * Re-enable interrupts and call the generic trap code */ #if 0 printf("powerpc_interrupt: got trap\n"); #endif mtmsr(mfmsr() | PSL_EE); isync(); trap(framep); } }
/* * This optional function does a "fast" critical region protection and returns * the previous protection level. This function is only called during very short * critical regions. An embedded system which supports ISR-based drivers might * want to implement this function by disabling interrupts. Task-based systems * might want to implement this by using a mutex or disabling tasking. This * function should support recursive calls from the same task or interrupt. In * other words, sys_arch_protect() could be called while already protected. In * that case the return value indicates that it is already protected. * sys_arch_protect() is only required if your port is supporting an operating * system. */ sys_prot_t sys_arch_protect() { sys_prot_t cur; #ifdef __MICROBLAZE__ cur = mfmsr(); mtmsr(cur & ~0x2); #elif __PPC__ cur = mfmsr(); mtmsr(cur & ~XEXC_ALL); #elif __arm__ cur = mfcpsr(); mtcpsr(cur & ~XIL_EXCEPTION_IRQ); #endif return cur; }
BOOLEAN EFIAPI PpcGetInterruptState ( VOID ) { return (mfmsr() & MSR_EE) != 0; }
/* Shutdown the CPU as much as possible. */ void cpu_halt(void) { mtmsr(mfmsr() & ~(PSL_CE | PSL_EE | PSL_ME | PSL_DE)); while (1) ; }
void init_idle_task (void) { idle_task_pid = proc_create (PRIO_LOWEST); // Idle task (PID 0). ptable[idle_task_pid].state = PROC_RUN; // Idle task assumed to be running as soon as the kernel starts #ifndef PPC_CPU_440 ptable[idle_task_pid].pcontext.regs[CTX_INDEX_MSR] = mfmsr () | XIL_EXCEPTION_NON_CRITICAL; #else // We set MSR[DS] = 1 here, because that is the TLB scheme we use to // separate instruction and data space on the 440. ptable[idle_task_pid].pcontext.regs[CTX_INDEX_MSR] = mfmsr () | XIL_EXCEPTION_NON_CRITICAL | XREG_MSR_TLB_DATA_TS; #endif ptable[idle_task_pid].pcontext.regs[CTX_INDEX_PC] = (unsigned int)idle_task; ptable[idle_task_pid].pcontext.regs[CTX_INDEX_GPR(1)] = mfgpr (1); ptable[idle_task_pid].pcontext.regs[CTX_INDEX_GPR(2)] = mfgpr (2); ptable[idle_task_pid].pcontext.regs[CTX_INDEX_GPR(13)]= mfgpr (13); SET_CURRENT_PROCESS (idle_task_pid); }
int ram_getmsr(struct pdbg_target *thread, uint64_t *value) { uint64_t opcodes[] = {mfmsr(0), mtspr(277, 0)}; uint64_t results[] = {0, 0}; CHECK_ERR(ram_instructions(thread, opcodes, results, ARRAY_SIZE(opcodes), 0)); *value = results[1]; return 0; }
uintptr_t powerpc_init(vm_offset_t fdt, vm_offset_t toc, vm_offset_t ofentry, void *mdp) { struct pcpu *pc; vm_offset_t startkernel, endkernel; void *kmdp; char *env; bool ofw_bootargs = false; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif kmdp = NULL; /* First guess at start/end kernel positions */ startkernel = __startkernel; endkernel = __endkernel; /* Check for ePAPR loader, which puts a magic value into r6 */ if (mdp == (void *)0x65504150) mdp = NULL; #ifdef AIM /* * If running from an FDT, make sure we are in real mode to avoid * tromping on firmware page tables. Everything in the kernel assumes * 1:1 mappings out of firmware, so this won't break anything not * already broken. This doesn't work if there is live OF, since OF * may internally use non-1:1 mappings. */ if (ofentry == 0) mtmsr(mfmsr() & ~(PSL_IR | PSL_DR)); #endif /* * Parse metadata if present and fetch parameters. Must be done * before console is inited so cninit gets the right value of * boothowto. */ if (mdp != NULL) { preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) { boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); endkernel = ulmax(endkernel, MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t)); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif }
static void mvme5100_restart(char *cmd) { local_irq_disable(); mtmsr(mfmsr() | MSR_IP); out_8((u_char *) restart, 0x01); while (1) ; }
void pq2_restart(char *cmd) { local_irq_disable(); setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE); mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR)); in_8(&cpm2_immr->im_clkrst.res[0]); panic("Restart failed\n"); }
void pq2_restart(char *cmd) { local_irq_disable(); setbits32(&cpm2_immr->im_clkrst.car_rmr, RMR_CSRE); /* Clear the ME,EE,IR & DR bits in MSR to cause checkstop */ mtmsr(mfmsr() & ~(MSR_ME | MSR_EE | MSR_IR | MSR_DR)); in_8(&cpm2_immr->im_clkrst.res[0]); panic("Restart failed\n"); }
static void chrp_smp_ap_init(platform_t platform) { if (!(mfmsr() & PSL_HV)) { /* Set interrupt priority */ phyp_hcall(H_CPPR, 0xff); /* Register VPA */ phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid), splpar_vpa); } }
/* * This optional function does a "fast" critical region protection and returns * the previous protection level. This function is only called during very short * critical regions. An embedded system which supports ISR-based drivers might * want to implement this function by disabling interrupts. Task-based systems * might want to implement this by using a mutex or disabling tasking. This * function should support recursive calls from the same task or interrupt. In * other words, sys_arch_protect() could be called while already protected. In * that case the return value indicates that it is already protected. * sys_arch_protect() is only required if your port is supporting an operating * system. */ sys_prot_t sys_arch_protect() { sys_prot_t cur; #ifdef __MICROBLAZE__ cur = mfmsr(); mtmsr(cur & ~0x2); #elif __PPC__ cur = mfmsr(); mtmsr(cur & ~XEXC_ALL); #elif __arm__ #ifdef PEEP EmacDisableIntr(); #else cur = mfcpsr(); mtcpsr(cur | 0xC0); #endif #endif return cur; }