void set_core_timer(uint32_t usec, bool periodic) { // we could implement periodic timers using one-shot timers, // but for now we only support one-shot assert(!periodic); if (usec) { uint32_t clocks = (uint64_t)usec*TSC_HZ/1000000; int8_t irq_state = 0; disable_irqsave(&irq_state); mtpcr(PCR_COUNT, 0); mtpcr(PCR_COMPARE, clocks); mtpcr(PCR_SR, mfpcr(PCR_SR) | (1 << (IRQ_TIMER+SR_IM_SHIFT))); enable_irqsave(&irq_state); } else { mtpcr(PCR_SR, mfpcr(PCR_SR) & ~(1 << (IRQ_TIMER+SR_IM_SHIFT))); } }
// could consider having an API to allow these to dynamically change // MTRRs are for physical, static ranges. PAT are linear, more granular, and // more dynamic void setup_default_mtrrs(barrier_t* smp_barrier) { // disable interrupts int8_t state = 0; disable_irqsave(&state); // barrier - if we're meant to do this for all cores, we'll be // passed a pointer to an initialized barrier if (smp_barrier) waiton_barrier(smp_barrier); // disable caching cr0: set CD and clear NW lcr0((rcr0() | CR0_CD) & ~CR0_NW); // flush caches cache_flush(); // flush tlb tlb_flush_global(); // disable MTRRs, and sets default type to WB (06) #ifndef CONFIG_NOMTRRS write_msr(IA32_MTRR_DEF_TYPE, 0x00000006); // Now we can actually safely adjust the MTRRs // MTRR for IO Holes (note these are 64 bit values we are writing) // 0x000a0000 - 0x000c0000 : VGA - WC 0x01 write_msr(IA32_MTRR_PHYSBASE0, PTE_ADDR(VGAPHYSMEM) | 0x01); // if we need to have a full 64bit val, use the UINT64 macro write_msr(IA32_MTRR_PHYSMASK0, 0x0000000ffffe0800); // 0x000c0000 - 0x00100000 : IO devices (and ROM BIOS) - UC 0x00 write_msr(IA32_MTRR_PHYSBASE1, PTE_ADDR(DEVPHYSMEM) | 0x00); write_msr(IA32_MTRR_PHYSMASK1, 0x0000000ffffc0800); // APIC/IOAPIC holes /* Going to skip them, since we set their mode using PAT when we * map them in */ // make sure all other MTRR ranges are disabled (should be unnecessary) write_msr(IA32_MTRR_PHYSMASK2, 0); write_msr(IA32_MTRR_PHYSMASK3, 0); write_msr(IA32_MTRR_PHYSMASK4, 0); write_msr(IA32_MTRR_PHYSMASK5, 0); write_msr(IA32_MTRR_PHYSMASK6, 0); write_msr(IA32_MTRR_PHYSMASK7, 0); // keeps default type to WB (06), turns MTRRs on, and turns off fixed ranges write_msr(IA32_MTRR_DEF_TYPE, 0x00000806); #endif // reflush caches and TLB cache_flush(); tlb_flush_global(); // turn on caching lcr0(rcr0() & ~(CR0_CD | CR0_NW)); // barrier if (smp_barrier) waiton_barrier(smp_barrier); // enable interrupts enable_irqsave(&state); }
int smp_call_function_all(isr_t handler, void* data, handler_wrapper_t** wait_wrapper) { int8_t state = 0; int i; handler_wrapper_t* wrapper = 0; if(wait_wrapper) { wrapper = *wait_wrapper = smp_make_wrapper(); if(!wrapper) return -ENOMEM; for(i = 0; i < num_cores(); i++) wrapper->wait_list[i] = 1; } enable_irqsave(&state); // send to others for(i = 0; i < num_cores(); i++) { if(i == core_id()) continue; send_kernel_message(i,(amr_t)smp_call_wrapper, handler, wrapper, data, KMSG_IMMEDIATE); } // send to me send_kernel_message(core_id(),(amr_t)smp_call_wrapper, handler,wrapper,data, KMSG_IMMEDIATE); cpu_relax(); // wait to get the interrupt disable_irqsave(&state); return 0; }
int iprint(char *fmt, ...) { int8_t s = 0; int n, locked; va_list arg; char buf[PRINTSIZE]; disable_irqsave(&s); va_start(arg, fmt); n = vsnprintf(buf, sizeof(buf), fmt, arg); va_end(arg); locked = iprintcanlock(&iprintlock); if (screenputs != NULL && iprintscreenputs) screenputs(buf, n); #if 0 uartputs(buf, n); #endif if (locked) spin_unlock(&iprintlock); enable_irqsave(&s); return n; }
int smp_call_function_single(uint32_t dest, isr_t handler, void* data, handler_wrapper_t** wait_wrapper) { int8_t state = 0; handler_wrapper_t* wrapper = 0; if(wait_wrapper) { wrapper = *wait_wrapper = smp_make_wrapper(); if(!wrapper) return -ENOMEM; wrapper->wait_list[dest] = 1; } enable_irqsave(&state); send_kernel_message(dest,(amr_t)smp_call_wrapper, handler,wrapper,data, KMSG_IMMEDIATE); cpu_relax(); // wait to get the interrupt, if it's to this core disable_irqsave(&state); return 0; }
static void echo(char *buf, int n) { static int ctrlt, pid; char *e, *p; if (n == 0) return; e = buf + n; for (p = buf; p < e; p++) { switch (*p) { #if 0 case 0x10: /* ^P */ if (cpuserver && !kbd.ctlpoff) { active.exiting = 1; return; } break; #endif case 0x14: /* ^T */ ctrlt++; if (ctrlt > 2) ctrlt = 2; continue; } if (ctrlt != 2) continue; /* ^T escapes */ ctrlt = 0; switch (*p) { #if 0 case 'S': { int8_t x = 0; disable_irqsave(&x); dumpstack(); procdump(); enable_irqsave(&x); return; } #endif case 's': dumpstack(); return; #if 0 case 'x': xsummary(); ixsummary(); mallocsummary(); memorysummary(); pagersummary(); return; case 'd': if (consdebug == NULL) consdebug = rdb; else consdebug = NULL; printd("consdebug now %#p\n", consdebug); return; case 'D': if (consdebug == NULL) consdebug = rdb; consdebug(); return; case 'p': x = spllo(); procdump(); splx(x); return; case 'q': scheddump(); return; case 'k': killbig("^t ^t k"); return; #endif case 'r': exit(0); return; } } qproduce(kbdq, buf, n); if (kbd.raw) return; kmesgputs(buf, n); if (screenputs != NULL) echoscreen(buf, n); if (serialoq) echoserialoq(buf, n); }