/* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, uint32_t mas0, uint32_t *lpid) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV /* populate mas8 with latest LPID */ stlbe->mas8 = MAS8_TGS | *lpid; mtspr(SPRN_MAS8, stlbe->mas8); #endif asm volatile("isync; tlbwe" : : : "memory"); #ifdef CONFIG_KVM_BOOKE_HV /* Must clear mas8 for other host tlbwe's */ mtspr(SPRN_MAS8, 0); isync(); #endif local_irq_restore(flags); trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, stlbe->mas2, stlbe->mas7_3); }
static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; /* If powersave_nap is enabled, use NAP mode, else just * spin aimlessly */ if (!powersave_nap) { generic_mach_cpu_die(); return; } /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); /* We don't want to take decrementer interrupts while we are offline, * so clear LPCR:PECE1. We keep PECE2 enabled. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { power7_idle(); if (!generic_check_cpu_restart(cpu)) { DBG("CPU%d Unexpected exit while offline !\n", cpu); /* We may be getting an IPI, so we re-enable * interrupts to process it, it will be ignored * since we aren't online (hopefully) */ local_irq_enable(); local_irq_disable(); } } mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); }
static inline void ctr_write(unsigned int i, u64 val) { switch (i) { case 0: mtspr(SPRN_PA6T_PMC0, val); break; case 1: mtspr(SPRN_PA6T_PMC1, val); break; case 2: mtspr(SPRN_PA6T_PMC2, val); break; case 3: mtspr(SPRN_PA6T_PMC3, val); break; case 4: mtspr(SPRN_PA6T_PMC4, val); break; case 5: mtspr(SPRN_PA6T_PMC5, val); break; default: printk(KERN_ERR "ctr_write called with bad arg %u\n", i); break; } }
static void tau_timeout(void * info) { unsigned long cpu = smp_processor_id(); unsigned long flags; int size; int shrink; /* disabling interrupts *should* be okay */ save_flags(flags); cli(); #ifndef CONFIG_TAU_INT TAUupdate(cpu); #endif size = tau[cpu].high - tau[cpu].low; if (size > min_window && ! tau[cpu].grew) { /* do an exponential shrink of half the amount currently over size */ shrink = (2 + size - min_window) / 4; if (shrink) { tau[cpu].low += shrink; tau[cpu].high -= shrink; } else { /* size must have been min_window + 1 */ tau[cpu].low += 1; #if 1 /* debug */ if ((tau[cpu].high - tau[cpu].low) != min_window){ printk(KERN_ERR "temp.c: line %d, logic error\n", __LINE__); } #endif } } tau[cpu].grew = 0; set_thresholds(cpu); /* * Do the enable every time, since otherwise a bunch of (relatively) * complex sleep code needs to be added. One mtspr every time * tau_timeout is called is probably not a big deal. * * Enable thermal sensor and set up sample interval timer * need 20 us to do the compare.. until a nice 'cpu_speed' function * call is implemented, just assume a 500 mhz clock. It doesn't really * matter if we take too long for a compare since it's all interrupt * driven anyway. * * use a extra long time.. (60 us @ 500 mhz) */ mtspr(SPRN_THRM3, THRM3_SITV(500*60) | THRM3_E); restore_flags(flags); }
static void __init cbe_enable_pause_zero(void) { unsigned long thread_switch_control; unsigned long temp_register; struct cbe_pervasive *p; int thread; spin_lock_irq(&cbe_pervasive_lock); p = &cbe_pervasive[smp_processor_id()]; if (!cbe_pervasive->regs) goto out; pr_debug("Power Management: CPU %d\n", smp_processor_id()); /* Enable Pause(0) control bit */ temp_register = in_be64(&p->regs->pm_control); out_be64(&p->regs->pm_control, temp_register|PMD_PAUSE_ZERO_CONTROL); /* Enable DEC and EE interrupt request */ thread_switch_control = mfspr(SPRN_TSC_CELL); thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { case CTRL_CT0: thread_switch_control |= TSC_CELL_DEC_ENABLE_0; thread = 0; break; case CTRL_CT1: thread_switch_control |= TSC_CELL_DEC_ENABLE_1; thread = 1; break; default: printk(KERN_WARNING "%s: unknown configuration\n", __FUNCTION__); thread = -1; break; } if (p->thread != thread) printk(KERN_WARNING "%s: device tree inconsistant, " "cpu %i: %d/%d\n", __FUNCTION__, smp_processor_id(), p->thread, thread); mtspr(SPRN_TSC_CELL, thread_switch_control); out: spin_unlock_irq(&cbe_pervasive_lock); }
static void __init cpufeatures_setup_cpu(void) { set_cur_cpu_spec(&base_cpu_spec); cur_cpu_spec->pvr_mask = -1; cur_cpu_spec->pvr_value = mfspr(SPRN_PVR); /* Initialize the base environment -- clear FSCR/HFSCR. */ hv_mode = !!(mfmsr() & MSR_HV); if (hv_mode) { /* CPU_FTR_HVMODE is used early in PACA setup */ cur_cpu_spec->cpu_features |= CPU_FTR_HVMODE; mtspr(SPRN_HFSCR, 0); } mtspr(SPRN_FSCR, 0); /* * LPCR does not get cleared, to match behaviour with secondaries * in __restore_cpu_cpufeatures. Once the idle code is fixed, this * could clear LPCR too. */ }
/* * This sets up the IRQ domain for the PIC built in to the OpenRISC * 1000 CPU. This is the "root" domain as these are the interrupts * that directly trigger an exception in the CPU. */ static int __init or1k_pic_init(struct device_node *node, struct or1k_pic_dev *pic) { /* Disable all interrupts until explicitly requested */ mtspr(SPR_PICMR, (0UL)); root_domain = irq_domain_add_linear(node, 32, &or1k_irq_domain_ops, pic); set_handle_irq(or1k_pic_handle_irq); return 0; }
static int __init feat_enable_idle_stop(struct dt_cpu_feature *f) { u64 lpcr; /* Set PECE wakeup modes for ISAv3.0B */ lpcr = mfspr(SPRN_LPCR); lpcr |= LPCR_PECE0; lpcr |= LPCR_PECE1; lpcr |= LPCR_PECE2; mtspr(SPRN_LPCR, lpcr); return 1; }
static void pa6t_stop(void) { u64 mmcr0; /* freeze counters */ mmcr0 = mfspr(SPRN_PA6T_MMCR0); mmcr0 |= PA6T_MMCR0_FCM0; mtspr(SPRN_PA6T_MMCR0, mmcr0); oprofile_running = 0; pr_debug("stop on cpu %d, mmcr0 %llx\n", smp_processor_id(), mmcr0); }
void holly_restart(char *cmd) { __be32 __iomem *ocn_bar1 = NULL; unsigned long bar; struct device_node *bridge = NULL; const void *prop; int size; phys_addr_t addr = 0xc0000000; local_irq_disable(); bridge = of_find_node_by_type(NULL, "tsi-bridge"); if (bridge) { prop = of_get_property(bridge, "reg", &size); addr = of_translate_address(bridge, prop); } addr += (TSI108_PB_OFFSET + 0x414); ocn_bar1 = ioremap(addr, 0x4); /* Turn on the BOOT bit so the addresses are correctly * routed to the HLP interface */ bar = ioread32be(ocn_bar1); bar |= 2; iowrite32be(bar, ocn_bar1); iosync(); /* Set SRR0 to the reset vector and turn on MSR_IP */ mtspr(SPRN_SRR0, 0xfff00100); mtspr(SPRN_SRR1, MSR_IP); /* Do an rfi to jump back to firmware. Somewhat evil, * but it works */ __asm__ __volatile__("rfi" : : : "memory"); /* Spin until reset happens. Shouldn't really get here */ for (;;) ; }
static void rs64_stop(void) { unsigned int mmcr0; /* freeze counters */ mmcr0 = mfspr(SPRN_MMCR0); mmcr0 |= MMCR0_FC; mtspr(SPRN_MMCR0, mmcr0); dbg("stop on cpu %d, mmcr0 %x\n", smp_processor_id(), mmcr0); mb(); }
int proc_pmc_set_mmcr0( struct file *file, const char *buffer, unsigned long count, void *data ) { unsigned long v; v = proc_pmc_conv_int( buffer, count ); v = v & ~0x04000000; /* Don't allow interrupts for now */ if ( v & ~0x80000000 ) /* Inform hypervisor we are using PMCs */ get_paca()->xLpPacaPtr->xPMCRegsInUse = 1; else get_paca()->xLpPacaPtr->xPMCRegsInUse = 0; mtspr( MMCR0, v ); return count; }
static int do_chip_reset (unsigned long sys0, unsigned long sys1) { /* Changes to cpc0_sys0 and cpc0_sys1 require chip * reset. */ mtdcr (cntrl0, mfdcr (cntrl0) | 0x80000000); /* Set SWE */ mtdcr (cpc0_sys0, sys0); mtdcr (cpc0_sys1, sys1); mtdcr (cntrl0, mfdcr (cntrl0) & ~0x80000000); /* Clr SWE */ mtspr (dbcr0, 0x20000000); /* Reset the chip */ return 1; }
static int __init feat_enable_dscr(struct dt_cpu_feature *f) { u64 lpcr; feat_enable(f); lpcr = mfspr(SPRN_LPCR); lpcr &= ~LPCR_DPFD; lpcr |= (4UL << LPCR_DPFD_SH); mtspr(SPRN_LPCR, lpcr); return 1; }
void ps3_set_bookmark(u64 bookmark) { /* * As per the PPE book IV, to avoid bookmark loss there must * not be a traced branch within 10 cycles of setting the * SPRN_BKMK register. The actual text is unclear if 'within' * includes cycles before the call. */ asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;"); mtspr(SPRN_BKMK, bookmark); asm volatile("nop;nop;nop;nop;nop;nop;nop;nop;nop;"); }
/* The decrementer counts at the system (internal) clock freq divided by 8 */ void __init mpc86xx_calibrate_decr(void) { bd_t *binfo = (bd_t *) __res; unsigned int freq, divisor, temp; /* get the core frequency */ freq = binfo->bi_busfreq; /* The timebase is updated every 4 bus clocks */ divisor = 4; tb_ticks_per_jiffy = freq / divisor / HZ; tb_to_us = mulhwu_scale_factor(freq / divisor, 1000000); /* Set the time base to zero */ mtspr(SPRN_TBWL, 0); mtspr(SPRN_TBWU, 0); temp = mfspr(SPRN_HID0); temp |= HID0_TBEN; mtspr(SPRN_HID0, temp); }
static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val) { u64 pir = get_hard_smp_processor_id(cpu); mtspr(SPRN_LPCR, lpcr_val); /* * Program the LPCR via stop-api only if the deepest stop state * can lose hypervisor context. */ if (supported_cpuidle_states & OPAL_PM_LOSE_FULL_CONTEXT) opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val); }
static int do_chip_reset (unsigned long sys0, unsigned long sys1) { /* Changes to CPC0_SYS0 and CPC0_SYS1 require chip * reset. */ mtdcr (CPC0_CR0, mfdcr (CPC0_CR0) | 0x80000000); /* Set SWE */ mtdcr (CPC0_SYS0, sys0); mtdcr (CPC0_SYS1, sys1); mtdcr (CPC0_CR0, mfdcr (CPC0_CR0) & ~0x80000000); /* Clr SWE */ mtspr (SPRN_DBCR0, 0x20000000); /* Reset the chip */ return 1; }
static int __xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val) { uint64_t hmer; int64_t ret, retries = 0; int64_t xscom_clear_retries = XSCOM_CLEAR_MAX_RETRIES; if (!xscom_gcid_ok(gcid)) { prerror("%s: invalid XSCOM gcid 0x%x\n", __func__, gcid); return OPAL_PARAMETER; } for (retries = 0; retries <= XSCOM_BUSY_MAX_RETRIES; retries++) { /* Clear status bits in HMER (HMER is special * writing to it *ands* bits */ mtspr(SPR_HMER, HMER_CLR_MASK); /* Write value to SCOM */ out_be64(xscom_addr(gcid, pcb_addr), val); /* Wait for done bit */ hmer = xscom_wait_done(); /* Check for error */ if (!(hmer & SPR_HMER_XSCOM_FAIL)) return OPAL_SUCCESS; /* Handle error and possibly eventually retry */ ret = xscom_handle_error(hmer, gcid, pcb_addr, true, retries, &xscom_clear_retries); if (ret != OPAL_BUSY) break; } /* Do not print error message for multicast SCOMS */ if (xscom_is_multicast_addr(pcb_addr) && ret == OPAL_XSCOM_CHIPLET_OFF) return ret; /* * Workaround on P9: PRD does operations it *knows* will fail with this * error to work around a hardware issue where accesses via the PIB * (FSI or OCC) work as expected, accesses via the ADU (what xscom goes * through) do not. The chip logic will always return all FFs if there * is any error on the scom. */ if (proc_gen == proc_gen_p9 && ret == OPAL_XSCOM_CHIPLET_OFF) return ret; prerror("XSCOM: Write failed, ret = %lld\n", ret); return ret; }
static long booke_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { u32 tmp = 0; u32 __user *p = (u32 __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: if (copy_to_user((void *)arg, &ident, sizeof(ident))) return -EFAULT; case WDIOC_GETSTATUS: return put_user(0, p); case WDIOC_GETBOOTSTATUS: /* XXX: something is clearing TSR */ tmp = mfspr(SPRN_TSR) & TSR_WRS(3); /* returns CARDRESET if last reset was caused by the WDT */ return (tmp ? WDIOF_CARDRESET : 0); case WDIOC_SETOPTIONS: if (get_user(tmp, p)) return -EINVAL; if (tmp == WDIOS_ENABLECARD) { booke_wdt_ping(); break; } else return -EINVAL; return 0; case WDIOC_KEEPALIVE: booke_wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(tmp, p)) return -EFAULT; #ifdef CONFIG_FSL_BOOKE /* period of 1 gives the largest possible timeout */ if (tmp > period_to_sec(1)) return -EINVAL; booke_wdt_period = sec_to_period(tmp); #else booke_wdt_period = tmp; #endif mtspr(SPRN_TCR, (mfspr(SPRN_TCR) & ~WDTP_MASK) | WDTP(booke_wdt_period)); return 0; case WDIOC_GETTIMEOUT: return put_user(booke_wdt_period, p); default: return -ENOTTY; } return 0; }
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { #ifdef CONFIG_BOOKE /* * vrsave (formerly usprg0) isn't used by Linux, but may * be used by the guest. * * On non-booke this is associated with Altivec and * is handled by code in book3s.c. */ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); #endif kvmppc_core_vcpu_load(vcpu, cpu); }
static int mpc7xxx_pcpu_fini(struct pmc_mdep *md, int cpu) { uint32_t mmcr0 = mfspr(SPR_MMCR0); mtmsr(mfmsr() & ~PSL_PMM); mmcr0 |= SPR_MMCR0_FC; mtspr(SPR_MMCR0, mmcr0); free(powerpc_pcpu[cpu]->pc_ppcpmcs, M_PMC); free(powerpc_pcpu[cpu], M_PMC); return 0; }
static inline void proc_pmc_reset(void) { /* Clear all the PMCs to zeros * Assume a "stop" has already frozen the counters * Clear all the PMCs */ mtspr( PMC1, 0 ); mtspr( PMC2, 0 ); mtspr( PMC3, 0 ); mtspr( PMC4, 0 ); mtspr( PMC5, 0 ); mtspr( PMC6, 0 ); mtspr( PMC7, 0 ); mtspr( PMC8, 0 ); }
/* Main interrupt handler */ void int_main() { unsigned long picsr = mfspr(SPR_PICSR); //process only the interrupts asserted at signal catch, ignore all during process unsigned long i = 0; while(i < 32) { if((picsr & (0x01L << i)) && (int_handlers[i].handler != 0)) { (*int_handlers[i].handler)(int_handlers[i].arg); } i++; } mtspr(SPR_PICSR, 0); //clear interrupt status: all modules have level interrupts, which have to be cleared by software, } //thus this is safe, since non processed interrupts will get re-asserted soon enough
static int fastsleep_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long old_lpcr = mfspr(SPRN_LPCR); unsigned long new_lpcr; if (unlikely(system_state < SYSTEM_RUNNING)) return index; new_lpcr = old_lpcr; /* Do not exit powersave upon decrementer as we've setup the timer * offload. */ new_lpcr &= ~LPCR_PECE1; mtspr(SPRN_LPCR, new_lpcr); power7_sleep(); mtspr(SPRN_LPCR, old_lpcr); return index; }
static void do_store_pw20_state(void *val) { u32 *value = val; u32 pw20_state; pw20_state = mfspr(SPRN_PWRMGTCR0); if (*value) pw20_state |= PWRMGTCR0_PW20_WAIT; else pw20_state &= ~PWRMGTCR0_PW20_WAIT; mtspr(SPRN_PWRMGTCR0, pw20_state); }
static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f) { u64 lpcr; system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR); lpcr = mfspr(SPRN_LPCR); lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR); mtspr(SPRN_LPCR, lpcr); cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE; cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU; return 1; }
/* ITLB miss exception handler */ void itlb_miss_handler (void) { unsigned long ea; int set, way = 0; int i; /* Get EA that cause the exception */ ea = mfspr (SPR_EEAR_BASE); /* Find TLB set and LRU way */ set = (ea / PAGE_SIZE) % ITLB_SETS; for (i = 0; i < ITLB_WAYS; i++) { if ((mfspr (SPR_ITLBMR_BASE(i) + set) & SPR_ITLBMR_LRU) == 0) { way = i; break; } } mtspr (SPR_ITLBMR_BASE(way) + set, (ea & SPR_ITLBMR_VPN) | SPR_ITLBMR_V); mtspr (SPR_ITLBTR_BASE(way) + set, (ea & SPR_ITLBTR_PPN) | itlb_val); except_mask |= 1 << V_ITLB_MISS; except_count++; }
static void init_pmu_power8(void) { if (hv_mode) { mtspr(SPRN_MMCRC, 0); mtspr(SPRN_MMCRH, 0); } mtspr(SPRN_MMCRA, 0); mtspr(SPRN_MMCR0, 0); mtspr(SPRN_MMCR1, 0); mtspr(SPRN_MMCR2, 0); mtspr(SPRN_MMCRS, 0); }
int main() { int uart0_core = 0; int uart1_core = 1; uart0_tx_ctrl.busy = 0; /* Set up interrupt handler */ int_init(); /* Install UART core 0 interrupt handler */ int_add(UART0_IRQ, uart_int_handler,(void*) &uart0_core); /* Install UART core 1 interrupt handler */ //int_add(UART1_IRQ, uart_int_handler,(void*) &uart1_core); /* Enable interrupts in supervisor register */ mtspr (SPR_SR, mfspr (SPR_SR) | SPR_SR_IEE); uart_init(uart0_core); //uart_init(uart1_core); //uart_rxint_enable(uart1_core); uart_rxint_enable(uart0_core); char* teststring = "\n\tHello world from UART 0\n\0"; uart0_tx_buffer(teststring); // Do other things while we transmit float f1, f2, f3; int i; f1 = 0.2382; f2 = 4342.65; f3=0; for(i=0;i<32;i++) f3 += f1*f3 + f2; report(f3); report(0x4aaaaa1f); char* done_calculating = "\tDone with the number crunching!\n\0"; uart0_tx_buffer(done_calculating); // Character '*', which will be received in the interrupt handler and cause // the simulation to exit. char* finish = "*\n\0"; uart0_tx_buffer(finish); while(1); // will exit in the rx interrupt routine }