IOReturn IOInterruptController::disableInterrupt(IOService *nub, int source) { IOInterruptSource *interruptSources; long vectorNumber; IOInterruptVector *vector; OSData *vectorData; interruptSources = nub->_interruptSources; vectorData = interruptSources[source].vectorData; vectorNumber = *(long *)vectorData->getBytesNoCopy(); vector = &vectors[vectorNumber]; vector->interruptDisabledSoft = 1; #if __ppc__ sync(); isync(); #endif if (!getPlatform()->atInterruptLevel()) { while (vector->interruptActive); #if __ppc__ isync(); #endif } return kIOReturnSuccess; }
IOReturn IOSharedInterruptController::disableInterrupt(IOService *nub, int source) { IOInterruptSource *interruptSources; long vectorNumber; IOInterruptVector *vector; OSData *vectorData; IOInterruptState interruptState; interruptSources = nub->_interruptSources; vectorData = interruptSources[source].vectorData; vectorNumber = *(long *)vectorData->getBytesNoCopy(); vector = &vectors[vectorNumber]; interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); if (!vector->interruptDisabledSoft) { vector->interruptDisabledSoft = 1; #if __ppc__ sync(); isync(); #endif vectorsEnabled--; } IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState); if (!getPlatform()->atInterruptLevel()) { while (vector->interruptActive); #if __ppc__ isync(); #endif } return kIOReturnSuccess; }
static void __cpuinit smp_85xx_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); u32 tmp; local_irq_disable(); idle_task_exit(); generic_set_cpu_dead(cpu); mb(); mtspr(SPRN_TCR, 0); __flush_disable_L1(); tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; mtspr(SPRN_HID0, tmp); isync(); /* Enter NAP mode. */ tmp = mfmsr(); tmp |= MSR_WE; mb(); mtmsr(tmp); isync(); while (1) ; }
void enable_vec(struct thread *td) { int msr; struct pcb *pcb; struct trapframe *tf; pcb = td->td_pcb; tf = trapframe(td); /* * Save the thread's Altivec CPU number, and set the CPU's current * vector thread */ td->td_pcb->pcb_veccpu = PCPU_GET(cpuid); PCPU_SET(vecthread, td); /* * Enable the vector unit for when the thread returns from the * exception. If this is the first time the unit has been used by * the thread, initialise the vector registers and VSCR to 0, and * set the flag to indicate that the vector unit is in use. */ tf->srr1 |= PSL_VEC; if (!(pcb->pcb_flags & PCB_VEC)) { memset(&pcb->pcb_vec, 0, sizeof pcb->pcb_vec); pcb->pcb_flags |= PCB_VEC; } /* * Temporarily enable the vector unit so the registers * can be restored. */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Restore VSCR by first loading it into a vector and then into VSCR. * (this needs to done before loading the user's vector registers * since we need to use a scratch vector register) */ __asm __volatile("vxor 0,0,0; lvewx 0,0,%0; mtvscr 0" \ :: "b"(&pcb->pcb_vec.vscr)); #define LVX(n) __asm ("lvx " #n ",0,%0" \ :: "b"(&pcb->pcb_vec.vr[n])); LVX(0); LVX(1); LVX(2); LVX(3); LVX(4); LVX(5); LVX(6); LVX(7); LVX(8); LVX(9); LVX(10); LVX(11); LVX(12); LVX(13); LVX(14); LVX(15); LVX(16); LVX(17); LVX(18); LVX(19); LVX(20); LVX(21); LVX(22); LVX(23); LVX(24); LVX(25); LVX(26); LVX(27); LVX(28); LVX(29); LVX(30); LVX(31); #undef LVX isync(); mtmsr(msr); }
void SegmentManager::_initSLB() { // Flush SLB. asm volatile("slbia" ::: "memory"); isync(); // Ensure slbia completes prior to slbmtes. register uint64_t slbRS, slbRB; // Default segment descriptors. // ESID = 0, V = 1, Index = 1. slbRB = 0x0000000008000001; // B = 01 (1TB), VSID = 0, Ks = 0, Kp = 1, NLCLP = 0 slbRS = 0x4000000000000400; // Add all segments to SLB. for (size_t i = 0; i < MAX_SEGMENTS; i++) { // Add segment to SLB. if (NULL != iv_segments[i]) { asm volatile("slbmte %0, %1" :: "r"(slbRS), "r"(slbRB) : "memory"); } // Increment ESID, VSID, Index. slbRB += 0x0000010000000001; slbRS += 0x0000000001000000; } isync(); // Ensure slbmtes complete prior to continuing on. }
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { isync(); mtspr(SPRN_PID, next->context.id); isync(); asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); } else {
static void ppc44x_idle(void) { unsigned long msr_save; msr_save = mfmsr(); /* set wait state MSR */ mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); isync(); /* return to initial state */ mtmsr(msr_save); isync(); }
static inline void createSendGIPulseThread(int numloops) { int loop; for(loop=0; loop<numloops; loop++) { mtspr(SPRN_TENC, 1 << ProcessorThreadID()); isync(); MUSPI_GISend(&GI); ppc_msync(); MUSPI_GISendClear(&GI); ppc_msync(); } mtspr(SPRN_TENC, 1 << ProcessorThreadID()); isync(); }
/* * writing shadow tlb entry to host TLB */ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe, uint32_t mas0, uint32_t *lpid) { unsigned long flags; local_irq_save(flags); mtspr(SPRN_MAS0, mas0); mtspr(SPRN_MAS1, stlbe->mas1); mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); #ifdef CONFIG_KVM_BOOKE_HV /* populate mas8 with latest LPID */ stlbe->mas8 = MAS8_TGS | *lpid; mtspr(SPRN_MAS8, stlbe->mas8); #endif asm volatile("isync; tlbwe" : : : "memory"); #ifdef CONFIG_KVM_BOOKE_HV /* Must clear mas8 for other host tlbwe's */ mtspr(SPRN_MAS8, 0); isync(); #endif local_irq_restore(flags); trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, stlbe->mas2, stlbe->mas7_3); }
/* * A very short dispatch, to try and maximise assembler code use * between all exception types. Maybe 'true' interrupts should go * here, and the trap code can come in separately */ void powerpc_interrupt(struct trapframe *framep) { struct thread *td; struct clockframe ckframe; td = curthread; switch (framep->exc) { case EXC_EXI: atomic_add_int(&td->td_intr_nesting_level, 1); (*powerpc_extintr_handler)(); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; case EXC_DECR: atomic_add_int(&td->td_intr_nesting_level, 1); ckframe.srr0 = framep->srr0; ckframe.srr1 = framep->srr1; decr_intr(&ckframe); atomic_subtract_int(&td->td_intr_nesting_level, 1); break; default: /* * Re-enable interrupts and call the generic trap code */ #if 0 printf("powerpc_interrupt: got trap\n"); #endif mtmsr(mfmsr() | PSL_EE); isync(); trap(framep); } }
static int dfs_set(device_t dev, const struct cf_setting *set) { register_t hid1; if (set == NULL) return (EINVAL); hid1 = mfspr(SPR_HID1); hid1 &= ~(HID1_DFS2 | HID1_DFS4); if (set->freq == 5000) hid1 |= HID1_DFS2; else if (set->freq == 2500) hid1 |= HID1_DFS4; /* * Now set the HID1 register with new values. Calling sequence * taken from page 2-26 of the MPC7450 family CPU manual. */ powerpc_sync(); mtspr(SPR_HID1, hid1); powerpc_sync(); isync(); return (0); }
void init_mmu(void) { uint32_t msr; int n; /* switch to ts == 1 if ts == 0 else bail*/ msr = get_msr(); if (msr & (MSR_IS|MSR_DS)) { return ; } /* setup a valid ts1 entry and switch ts*/ write_tlb1_entry(TMP_ENTRY, TS1, 0x00000000, SIZE_256MB, FULL_SUPER, 0); write_tlb1_entry(TMP_ENTRY+1, TS1, 0x40000000, SIZE_256KB, FULL_SUPER, 0); sync(); set_msr(msr | MSR_IS | MSR_DS); isync(); for (n = 0; tlb[n].base != LAST_ENTRY; n++) { write_tlb1_entry(n, TS0, tlb[n].base, tlb[n].size, tlb[n].prot, tlb[n].wimge); } /* invalidate the rest of the entries */ for (; n < 16; n++) { if (n != TMP_ENTRY) { invalidate_tlb1_entry(n); } } /* switch back to ts == 0 */ sync(); set_msr(msr); isync(); invalidate_tlb1_entry(TMP_ENTRY); invalidate_tlb1_entry(TMP_ENTRY+1); sync(); /* invalidate cache */ set_l1csr0(L1CSRX_CFI); while (get_l1csr0() & L1CSRX_CFI) { ; } /* enable */ set_l1csr0(L1CSRX_CE); }
void save_vec(struct thread *td) { int msr; struct pcb *pcb; pcb = td->td_pcb; /* * Temporarily re-enable the vector unit during the save */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Save the vector registers and VSCR to the PCB */ #define STVX(n) __asm ("stvx %1,0,%0" \ :: "b"(pcb->pcb_vec.vr[n]), "n"(n)); STVX(0); STVX(1); STVX(2); STVX(3); STVX(4); STVX(5); STVX(6); STVX(7); STVX(8); STVX(9); STVX(10); STVX(11); STVX(12); STVX(13); STVX(14); STVX(15); STVX(16); STVX(17); STVX(18); STVX(19); STVX(20); STVX(21); STVX(22); STVX(23); STVX(24); STVX(25); STVX(26); STVX(27); STVX(28); STVX(29); STVX(30); STVX(31); #undef STVX __asm __volatile("mfvscr 0; stvewx 0,0,%0" :: "b"(&pcb->pcb_vec.vscr)); /* * Disable vector unit again */ isync(); mtmsr(msr); /* * Clear the current vec thread and pcb's CPU id * XXX should this be left clear to allow lazy save/restore ? */ pcb->pcb_veccpu = INT_MAX; PCPU_SET(vecthread, NULL); }
void os_mousemove(usbdevice* kb, int x, int y){ struct input_event event; memset(&event, 0, sizeof(event)); event.type = EV_REL; if(x != 0){ event.code = REL_X; event.value = x; if(write(kb->uinput_mouse - 1, &event, sizeof(event)) <= 0) ckb_warn("uinput write failed: %s\n", strerror(errno)); else isync(kb); } if(y != 0){ event.code = REL_Y; event.value = y; if(write(kb->uinput_mouse - 1, &event, sizeof(event)) <= 0) ckb_warn("uinput write failed: %s\n", strerror(errno)); else isync(kb); } }
IOReturn IOSharedInterruptController::handleInterrupt(void * /*refCon*/, IOService * nub, int /*source*/) { long vectorNumber; IOInterruptVector *vector; for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) { vector = &vectors[vectorNumber]; vector->interruptActive = 1; #if __ppc__ sync(); isync(); #endif if (!vector->interruptDisabledSoft) { #if __ppc__ isync(); #endif // Call the handler if it exists. if (vector->interruptRegistered) { vector->handler(vector->target, vector->refCon, vector->nub, vector->source); } } vector->interruptActive = 0; } // if any of the vectors are dissabled, then dissable this controller. IOSimpleLockLock(controllerLock); if (vectorsEnabled != vectorsRegistered) { nub->disableInterrupt(0); controllerDisabled = 1; } IOSimpleLockUnlock(controllerLock); return kIOReturnSuccess; }
static void save_vec_int(struct thread *td) { int msr; struct pcb *pcb; pcb = td->td_pcb; /* * Temporarily re-enable the vector unit during the save */ msr = mfmsr(); mtmsr(msr | PSL_VEC); isync(); /* * Save the vector registers and VSCR to the PCB */ #define STVX(n) __asm ("stvx %1,0,%0" \ :: "b"(pcb->pcb_vec.vr[n]), "n"(n)); STVX(0); STVX(1); STVX(2); STVX(3); STVX(4); STVX(5); STVX(6); STVX(7); STVX(8); STVX(9); STVX(10); STVX(11); STVX(12); STVX(13); STVX(14); STVX(15); STVX(16); STVX(17); STVX(18); STVX(19); STVX(20); STVX(21); STVX(22); STVX(23); STVX(24); STVX(25); STVX(26); STVX(27); STVX(28); STVX(29); STVX(30); STVX(31); #undef STVX __asm __volatile("mfvscr 0; stvewx 0,0,%0" :: "b"(&pcb->pcb_vec.vscr)); /* * Disable vector unit again */ isync(); mtmsr(msr); }
static void mpc85xx_take_timebase(void) { unsigned long flags; local_irq_save(flags); tb_req = 1; while (!tb_valid) barrier(); set_tb(timebase >> 32, timebase & 0xffffffff); isync(); tb_valid = 0; local_irq_restore(flags); }
void MM_TlbSetup( const struct TlbEntry *tblTable ) { int32_t i = 0; /* Setup the TLBs */ while( tblTable[i].entry != (-1UL) ) { set_spr(SPR_MAS0, tblTable[i].mas0); set_spr(SPR_MAS1, tblTable[i].mas1); set_spr(SPR_MAS2, tblTable[i].mas2); set_spr(SPR_MAS3, tblTable[i].mas3); msync(); isync(); tlbwe(); i++; } }
int Speculation_CleanupJob() { SPEC_SetSpeculationIDSelf_priv(0x400); // clear interrupt state, clear lower 9 bits as well // Restore the default system-call and standard-interrupt code sequences. // See Speculation_EnableFastSpeculationPath() for commentary on this // process. In this case we need an IPI only for the "system" core, // because this routine is called on every application hardware thread. uint64_t ici_needed = 0; Kernel_Lock(&FastPathsLock); if (FastPathsEnabled) { extern uint32_t Vector_EI_trampoline; extern uint32_t Vector_SC_trampoline; uint64_t exceptionVector = mfspr(SPRN_IVPR); *((uint32_t *) (exceptionVector + IVO_EI)) = Vector_EI_trampoline; *((uint32_t *) (exceptionVector + IVO_SC)) = Vector_SC_trampoline; ppc_msync(); // make sure the stores have taken effect FastPathsEnabled = 0; ici_needed = 1; // we can't hold the lock while sending IPI's } Kernel_Unlock(&FastPathsLock); // Flush the icache whether or not we're the thread that did the patching. // We only need to do this from one thread on each core. if (ProcessorThreadID() == 0) { isync(); ici(); } if (ici_needed) { // We still need an IPI for the "system" core. IPI_invalidate_icache(NodeState.NumCoresEnabled - 1); Kernel_WriteFlightLog(FLIGHTLOG_high, FL_SPCFEPDIS, 0,0,0,0); } // bqcbugs 1620 l2_set_overlock_threshold(0); l2_set_spec_threshold(0); l2_set_prefetch_enables(1); // -- return 0; }
void * copy_out(void *dest, const void *src, uval n) { uval i = 0; void *p = memcpy(dest, src, n); /* *Should we always do this? * Perhaps best to integrate directly into memcpy. */ while (i < n) { icbi(((uval)p) + i); sync(); isync(); i += CACHE_LINE_SIZE; } return p; }
int set_dabr(unsigned long dabr) { __get_cpu_var(current_dabr) = dabr; if (ppc_md.set_dabr) return ppc_md.set_dabr(dabr); #ifdef CONFIG_PPC_ADV_DEBUG_REGS mtspr(SPRN_DAC1, dabr); #ifdef CONFIG_PPC_47x isync(); #endif #elif defined(CONFIG_PPC_BOOK3S) mtspr(SPRN_DABR, dabr); #endif return 0; }
static __inline void TLBIE(uint64_t vpn) { #ifndef __powerpc64__ register_t vpn_hi, vpn_lo; register_t msr; register_t scratch, intr; #endif static volatile u_int tlbie_lock = 0; vpn <<= ADDR_PIDX_SHFT; vpn &= ~(0xffffULL << 48); /* Hobo spinlock: we need stronger guarantees than mutexes provide */ while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); isync(); /* Flush instruction queue once lock acquired */ #ifdef __powerpc64__ __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); #else vpn_hi = (uint32_t)(vpn >> 32); vpn_lo = (uint32_t)vpn; intr = intr_disable(); __asm __volatile("\ mfmsr %0; \ mr %1, %0; \ insrdi %1,%5,1,0; \ mtmsrd %1; isync; \ \ sld %1,%2,%4; \ or %1,%1,%3; \ tlbie %1; \ \ mtmsrd %0; isync; \ eieio; \ tlbsync; \ ptesync;" : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
int Speculation_EnterJailMode(bool longRunningSpec) { AppProcess_t* process = GetMyProcess(); if (process != GetProcessByProcessorID(ProcessorID())) { Speculation_Restart(SPEC_GetSpeculationIDSelf_priv(), Kernel_SpecReturnCode_INVALID, &GetMyKThread()->Reg_State); return Kernel_SpecReturnCode_INVALID; } if(longRunningSpec) { uint64_t SpecPID; uint32_t ProcessOvercommit = 64 / GetMyAppState()->Active_Processes; if(ProcessOvercommit > 4) ProcessOvercommit = 4; vmm_getSpecPID(process->Tcoord, ProcessorThreadID() % ProcessOvercommit, &SpecPID); if(SpecPID) { mtspr(SPRN_PID, SpecPID); isync(); // A2 does not reliably notify A2 of DCI #if 0 volatile uint64_t* pf_sys_p=(volatile uint64_t*)(SPEC_GetL1PBase_priv()+L1P_CFG_PF_SYS-L1P_ESR); uint64_t pf_sys=*pf_sys_p; *pf_sys_p=pf_sys | L1P_CFG_PF_SYS_pf_invalidate_all; *pf_sys_p=pf_sys & ~L1P_CFG_PF_SYS_pf_invalidate_all; dci(); #else asm volatile ("dci 2"); #endif ppc_msync(); } else { Speculation_Restart(SPEC_GetSpeculationIDSelf_priv(), Kernel_SpecReturnCode_INVALID, &GetMyKThread()->Reg_State); return Kernel_SpecReturnCode_INVALID; } }
void os_keypress(usbdevice* kb, int scancode, int down){ struct input_event event; memset(&event, 0, sizeof(event)); int is_mouse = 0; if(scancode == BTN_WHEELUP || scancode == BTN_WHEELDOWN){ // The mouse wheel is a relative axis if(!down) return; event.type = EV_REL; event.code = REL_WHEEL; event.value = (scancode == BTN_WHEELUP ? 1 : -1); is_mouse = 1; } else { // Mouse buttons and key events are both EV_KEY. The scancodes are already correct, just remove the ckb bit event.type = EV_KEY; event.code = scancode & ~SCAN_MOUSE; event.value = down; is_mouse = !!(scancode & SCAN_MOUSE); } if(write((is_mouse ? kb->uinput_mouse : kb->uinput_kb) - 1, &event, sizeof(event)) <= 0) ckb_warn("uinput write failed: %s\n", strerror(errno)); else isync(kb); }
bool ErrlManager::errlCommittedThisBoot() { isync(); return theErrlManager::instance().iv_nonInfoCommitted; }
/* * Breathe some life into the CPU... * * Set up the memory map, * initialize a bunch of registers, * initialize the UPM's */ void cpu_init_f (volatile immap_t * im) { __be32 acr_mask = #ifdef CONFIG_SYS_ACR_PIPE_DEP /* Arbiter pipeline depth */ ACR_PIPE_DEP | #endif #ifdef CONFIG_SYS_ACR_RPTCNT /* Arbiter repeat count */ ACR_RPTCNT | #endif 0; __be32 acr_val = #ifdef CONFIG_SYS_ACR_PIPE_DEP /* Arbiter pipeline depth */ (CONFIG_SYS_ACR_PIPE_DEP << ACR_PIPE_DEP_SHIFT) | #endif #ifdef CONFIG_SYS_ACR_RPTCNT /* Arbiter repeat count */ (CONFIG_SYS_ACR_RPTCNT << ACR_RPTCNT_SHIFT) | #endif 0; __be32 spcr_mask = #ifdef CONFIG_SYS_SPCR_TSECEP /* all eTSEC's Emergency priority */ SPCR_TSECEP | #endif 0; __be32 spcr_val = 0; __be32 sccr_mask = #ifdef CONFIG_SYS_SCCR_USBDRCM /* USB DR clock mode */ SCCR_USBDRCM | #endif 0; __be32 sccr_val = 0; __be32 lcrr_mask = #ifdef CONFIG_SYS_LCRR_DBYP /* PLL bypass */ LCRR_DBYP | #endif #ifdef CONFIG_SYS_LCRR_CLKDIV /* system clock divider */ LCRR_CLKDIV | #endif 0; __be32 lcrr_val = #ifdef CONFIG_SYS_LCRR_DBYP /* PLL bypass */ CONFIG_SYS_LCRR_DBYP | #endif #ifdef CONFIG_SYS_LCRR_CLKDIV /* system clock divider */ CONFIG_SYS_LCRR_CLKDIV | #endif 0; /* Pointer is writable since we allocated a register for it */ gd = (gd_t *) (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET); /* Clear initial global data */ memset ((void *) gd, 0, sizeof (gd_t)); /* system performance tweaking */ clrsetbits_be32(&im->arbiter.acr, acr_mask, acr_val); clrsetbits_be32(&im->sysconf.spcr, spcr_mask, spcr_val); clrsetbits_be32(&im->clk.sccr, sccr_mask, sccr_val); /* RSR - Reset Status Register - clear all status (4.6.1.3) */ gd->reset_status = __raw_readl(&im->reset.rsr); __raw_writel(~(RSR_RES), &im->reset.rsr); /* AER - Arbiter Event Register - store status */ gd->arbiter_event_attributes = __raw_readl(&im->arbiter.aeatr); gd->arbiter_event_address = __raw_readl(&im->arbiter.aeadr); /* * RMR - Reset Mode Register * contains checkstop reset enable (4.6.1.4) */ __raw_writel(RMR_CSRE & (1<<RMR_CSRE_SHIFT), &im->reset.rmr); /* LCRR - Clock Ratio Register (10.3.1.16) * write, read, and isync per MPC8379ERM rev.1 CLKDEV field description */ clrsetbits_be32(&im->im_lbc.lcrr, lcrr_mask, lcrr_val); __raw_readl(&im->im_lbc.lcrr); isync(); /* Enable Time Base & Decrementer ( so we will have udelay() )*/ setbits_be32(&im->sysconf.spcr, SPCR_TBEN); /* System General Purpose Register */ #ifdef CONFIG_SYS_SICRH __raw_writel(CONFIG_SYS_SICRH, &im->sysconf.sicrh); #endif #ifdef CONFIG_SYS_SICRL __raw_writel(CONFIG_SYS_SICRL, &im->sysconf.sicrl); #endif /* Set up preliminary BR/OR regs */ init_early_memctl_regs(); /* Local Access window setup */ #if defined(CONFIG_SYS_LBLAWBAR0_PRELIM) && defined(CONFIG_SYS_LBLAWAR0_PRELIM) im->sysconf.lblaw[0].bar = CONFIG_SYS_LBLAWBAR0_PRELIM; im->sysconf.lblaw[0].ar = CONFIG_SYS_LBLAWAR0_PRELIM; #else #error CONFIG_SYS_LBLAWBAR0_PRELIM & CONFIG_SYS_LBLAWAR0_PRELIM must be defined #endif #if defined(CONFIG_SYS_LBLAWBAR1_PRELIM) && defined(CONFIG_SYS_LBLAWAR1_PRELIM) im->sysconf.lblaw[1].bar = CONFIG_SYS_LBLAWBAR1_PRELIM; im->sysconf.lblaw[1].ar = CONFIG_SYS_LBLAWAR1_PRELIM; #endif /* #if defined(CONFIG_USB_EHCI_FSL) && defined(CONFIG_MPC831x) uint32_t temp; struct usb_ehci *ehci = (struct usb_ehci *)CONFIG_SYS_FSL_USB_ADDR; setbits_be32(&ehci->control, REFSEL_16MHZ | UTMI_PHY_EN); do { temp = __raw_readl(&ehci->control); udelay(1000); } while (!(temp & PHY_CLK_VALID)); #endif*/ }
void Daemon::collectTracePages() { // Clear indication from clients. iv_service->iv_daemon->clearSignal(); // Collect buffer pages from front-end. BufferPage* srcPages[BUFFER_COUNT]; for (size_t i = 0; i < BUFFER_COUNT; i++) { iv_curPages[i] = srcPages[i] = iv_service->iv_buffers[i]->claimPages(); iv_curOffset[i] = 0; } char* contBuffer = NULL; size_t contBufferSize = 0; // Process buffer pages. do { size_t whichBuffer = BUFFER_COUNT; Entry* whichEntry = NULL; uint64_t minTimeStamp = UINT64_MAX; // Find the entry with the earliest timestamp. for (size_t i = 0; i < BUFFER_COUNT; i++) { if (NULL == iv_curPages[i]) continue; Entry* entry = reinterpret_cast<Entry*>( &((&(iv_curPages[i]->data[0]))[iv_curOffset[i]]) ); trace_bin_entry_t* binEntry = reinterpret_cast<trace_bin_entry_t*>( &(entry->data[0]) ); // Wait for entry to be committed. while(unlikely(entry->committed == 0)) { task_yield(); } isync(); uint64_t curTimeStamp = TWO_UINT32_TO_UINT64(binEntry->stamp.tbh, binEntry->stamp.tbl); if (curTimeStamp < minTimeStamp) { whichBuffer = i; whichEntry = entry; minTimeStamp = curTimeStamp; } } // Did not find another entry, our work is done. if (whichBuffer == BUFFER_COUNT) { break; } // Increment pointers to next buffer entry. iv_curOffset[whichBuffer] += whichEntry->size + sizeof(Entry); if (iv_curOffset[whichBuffer] >= iv_curPages[whichBuffer]->usedSize) { iv_curPages[whichBuffer] = iv_curPages[whichBuffer]->next; iv_curOffset[whichBuffer] = 0; } trace_bin_entry_t* contEntry = reinterpret_cast<trace_bin_entry_t*>(&whichEntry->data[0]); // Calculate the sizes of the entry. size_t contEntryDataLength = contEntry->head.length + sizeof(trace_bin_entry_t); size_t contEntrySize = whichEntry->comp->iv_compNameLen + contEntryDataLength; // Allocate a new continuous trace page if needed. if ((NULL == contBuffer) || ((contBufferSize + contEntrySize) >= PAGESIZE)) { if (NULL != contBuffer) { sendContBuffer(contBuffer, contBufferSize); // contBuffer pointer is transfered to mailbox now. } contBuffer = reinterpret_cast<char*>(malloc(PAGESIZE)); memset(contBuffer, '\0', PAGESIZE); contBuffer[0] = TRACE_BUF_CONT; contBufferSize = 1; } // Add entry to continous trace. memcpy(&contBuffer[contBufferSize], whichEntry->comp->iv_compName, whichEntry->comp->iv_compNameLen); contBufferSize += whichEntry->comp->iv_compNameLen; memcpy(&contBuffer[contBufferSize], &whichEntry->data[0], contEntryDataLength); contBufferSize += contEntryDataLength; // Allocate a new back-end entry. Entry* mainBuffEntry = NULL; while (NULL == (mainBuffEntry = iv_last->claimEntry(whichEntry->size + sizeof(Entry)))) { BufferPage* n = BufferPage::allocate(true); n->next = iv_last; iv_last->prev = n; iv_last = n; } // Move entry from front-end buffer to back-end. replaceEntry(whichEntry, mainBuffEntry); } while(1); // Send remainder of continous trace buffer. if (NULL != contBuffer) { if (contBufferSize > 1) { sendContBuffer(contBuffer, contBufferSize); // contBuffer pointer is transfered to mailbox now. } else { free(contBuffer); } } // Release pages. for (size_t i = 0; i < BUFFER_COUNT; i++) { // Toggle lock to ensure no trace extract currently going on. iv_service->iv_buffers[i]->consumerOp(); while(srcPages[i]) { BufferPage* tmp = srcPages[i]->next; BufferPage::deallocate(srcPages[i]); srcPages[i] = tmp; } } }
void flush_instruction_cache(void) { isync(); mtspr(SPRN_IC_CST, IDC_INVALL); isync(); }
int fw_sync_timebase( void ) { uint64_t numloops = 10; uint64_t value; uint64_t rc; Personality_t *pers = &FW_Personality; uint64_t numthreads; uint64_t msr; uint64_t geamap8 = 0; if(!PERS_ENABLED(PERS_ENABLE_MU)) return 0; if(!PERS_ENABLED(PERS_ENABLE_ND)) return 0; msr = mfmsr(); mtmsr(msr & ~(MSR_EE | MSR_CE | MSR_ME)); isync(); numthreads = popcnt64(DCRReadPriv(TESTINT_DCR(THREAD_ACTIVE0))) + popcnt64(DCRReadPriv(TESTINT_DCR(THREAD_ACTIVE1))); if(PhysicalThreadID() == 0) { #define WU_MMIO_PRIV_BASE ((volatile unsigned long *)0x3ffe8001c00) #define SET_THREAD(i) ((0x300 + (i)*0x40) / sizeof (unsigned long)) WU_MMIO_PRIV_BASE[SET_THREAD(0)] = WU_DCR__THREAD0_WU_EVENT_SET__GEA_WU_EN_set(0x8); if(ProcessorID() == 0) { // Setup classroute 14. Identical to classroute 15. value = DCRReadPriv(ND_500_DCR(CTRL_GI_CLASS_14_15)); ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS14_UP_PORT_I_insert(value, ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS15_UP_PORT_I_get(value)); ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS14_UP_PORT_O_insert(value, ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS15_UP_PORT_O_get(value)); DCRWritePriv(ND_500_DCR(CTRL_GI_CLASS_14_15), value); ppc_msync(); // Initialize GI pulse MUSPI_GIInit (&GI, 14, 0); // Initialize the GI barrier interrupt on classroute 14 DCRWritePriv(MU_DCR(BARRIER_INT_EN), MU_DCR__BARRIER_INT_EN__CLASS14_set(4)); // Route MU MAP4 interrupt to GEA lane 12 (wakeup unit bit 0) geamap8 = DCRReadPriv(GEA_DCR(GEA_INTERRUPT_MAP8)); DCRWritePriv(GEA_DCR(GEA_INTERRUPT_MAP8), GEA_DCR__GEA_INTERRUPT_MAP8__MU_MAP4_set(12)); rc = MUSPI_GIBarrierInit(&GIBarrier, 15); } // do local barrier BeDRAM_ReadIncSat(BeDRAM_LOCKNUM_TIMESYNC_BARRIER); while(BeDRAM_Read(BeDRAM_LOCKNUM_TIMESYNC_BARRIER) != numthreads) { } if(ProcessorID() == 0) { // Perform a barrier across all nodes. MUSPI_GIBarrierEnterAndWait(&GIBarrier); if ( rc != 0 ) { FW_Warning("MUSPI_GIBarrierInit for class route 15 returned rc = %ld.", rc); return -1; } // Start gsync counter (for debug) DCRWritePriv(TESTINT_DCR(GSYNC_CTR), -1); } doTimeSync(numloops); mtspr(SPRN_TENS, 0xf); } else if((ProcessorID() == 1) && (pers->Network_Config.PrimordialClassRoute.GlobIntUpPortOutputs == 0)) { BeDRAM_ReadIncSat(BeDRAM_LOCKNUM_TIMESYNC_BARRIER); createSendGIPulseThread(numloops); } else { BeDRAM_ReadIncSat(BeDRAM_LOCKNUM_TIMESYNC_BARRIER); mtspr(SPRN_TENC, 1 << ProcessorThreadID()); isync(); } // Wait for all hwthreads on node BeDRAM_ReadIncSat(BeDRAM_LOCKNUM_TIMESYNC_BARRIER); while(BeDRAM_Read(BeDRAM_LOCKNUM_TIMESYNC_BARRIER) != numthreads * 2) { } if(ProcessorID() == 0) { value = DCRReadPriv(ND_500_DCR(CTRL_GI_CLASS_14_15)); ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS14_UP_PORT_I_insert(value, 0); ND_500_DCR__CTRL_GI_CLASS_14_15__CLASS14_UP_PORT_O_insert(value, 0); DCRWritePriv(ND_500_DCR(CTRL_GI_CLASS_14_15), value); ppc_msync(); // Initialize the barrier structure. DCRWritePriv(MU_DCR(BARRIER_INT_EN), MU_DCR__BARRIER_INT_EN__CLASS14_set(0)); DCRWritePriv(GEA_DCR(GEA_INTERRUPT_MAP8), geamap8); } WU_MMIO_PRIV_BASE[SET_THREAD(0)] = WU_DCR__THREAD0_WU_EVENT_SET__GEA_WU_EN_set(0); BeDRAM_ReadIncSat(BeDRAM_LOCKNUM_TIMESYNC_BARRIER); while(BeDRAM_Read(BeDRAM_LOCKNUM_TIMESYNC_BARRIER) != numthreads * 3) { } mtmsr(msr); isync(); return 0; }
void radix__switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) { mtspr(SPRN_PID, next->context.id); isync(); }