static int elf64_exec(struct preloaded_file *fp) { struct file_metadata *md; Elf_Ehdr *hdr; pt_entry_t pte; uint64_t bi_addr; md = file_findmetadata(fp, MODINFOMD_ELFHDR); if (md == NULL) return (EINVAL); hdr = (Elf_Ehdr *)&(md->md_data); bi_load(fp, &bi_addr); printf("Entering %s at 0x%lx...\n", fp->f_name, hdr->e_entry); ldr_enter(fp->f_name); __asm __volatile("rsm psr.ic|psr.i;;"); __asm __volatile("srlz.i;;"); /* * Region 6 is direct mapped UC and region 7 is direct mapped * WC. The details of this is controlled by the Alt {I,D}TLB * handlers. Here we just make sure that they have the largest * possible page size to minimise TLB usage. */ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RWX | PTE_ED; __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7))); __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2)); __asm __volatile("ptr.i %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2)); __asm __volatile("ptr.d %0,%1" :: "r"(IA64_RR_BASE(7)), "r"(28<<2)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(pte)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(pte)); __asm __volatile("srlz.i;;"); enter_kernel(hdr->e_entry, bi_addr); /* NOTREACHED */ return (0); }
void platform_reset(void) { volatile unsigned int * p = (void *)0xb8008000; /* * TODO: we should take care of TLB stuff here. Otherwise * board does not boots properly next time */ /* Write 0x8000_0001 to the Reset register */ *p = 0x80000001; __asm __volatile("li $25, 0xbfc00000"); __asm __volatile("j $25"); }
int _pthread_spin_lock(pthread_spinlock_t *lock) { struct pthread_spinlock *lck; struct pthread *self = _pthread_self(); int count, oldval, ret; if (lock == NULL || (lck = *lock) == NULL) ret = EINVAL; else if (lck->s_owner == self) ret = EDEADLK; else { do { count = SPIN_COUNT; while (lck->s_lock) { #ifdef __i386__ /* tell cpu we are spinning */ __asm __volatile("pause"); #endif if (--count <= 0) { count = SPIN_COUNT; _pthread_yield(); } } atomic_swap_int(&(lck)->s_lock, 1, &oldval); } while (oldval); lck->s_owner = self; ret = 0; } return (ret); }
void _BSP_Fatal_error(unsigned int v) { _BSP_GPLED0_on(); _BSP_GPLED1_on(); printk("%s PANIC ERROR %x\n",_RTEMS_version, v); __asm__ __volatile ("sc"); }
int pthread_spin_unlock (pthread_spinlock_t *lock) { __asm __volatile ("membar #StoreStore | #LoadStore"); *lock = 0; return 0; }
void cpu_reset(void) { uint32_t ver = SVR_VER(mfspr(SPR_SVR)); if (ver == SVR_MPC8572E || ver == SVR_MPC8572 || ver == SVR_MPC8548E || ver == SVR_MPC8548) /* Systems with dedicated reset register */ ccsr_write4(OCP85XX_RSTCR, 2); else { /* Clear DBCR0, disables debug interrupts and events. */ mtspr(SPR_DBCR0, 0); __asm __volatile("isync"); /* Enable Debug Interrupts in MSR. */ mtmsr(mfmsr() | PSL_DE); /* Enable debug interrupts and issue reset. */ mtspr(SPR_DBCR0, mfspr(SPR_DBCR0) | DBCR0_IDM | DBCR0_RST_SYSTEM); } printf("Reset failed...\n"); while (1); }
void r4k_icache_sync_range_index_16(vaddr_t va, vsize_t size) { vaddr_t eva; eva = round_line(va + size); va = trunc_line(va); mips_dcache_wbinv_range_index(va, (eva - va)); __asm __volatile("sync"); /* * Since we're doing Index ops, we expect to not be able * to access the address we've been given. So, get the * bits that determine the cache index, and make a KSEG0 * address out of them. */ va = MIPS_PHYS_TO_KSEG0(va & mips_picache_way_mask); eva = round_line(va + size); va = trunc_line(va); while ((eva - va) >= (32 * 16)) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } while (va < eva) { cache_op_r4k_line(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += 16; } }
int badaddr_read(void *addr, size_t size, int *rptr) { struct thread *td; faultbuf env; int x; /* Get rid of any stale machine checks that have been waiting. */ __asm __volatile ("sync; isync"); td = curthread; if (setfault(env)) { td->td_pcb->pcb_onfault = 0; __asm __volatile ("sync"); return (1); } __asm __volatile ("sync"); switch (size) { case 1: x = *(volatile int8_t *)addr; break; case 2: x = *(volatile int16_t *)addr; break; case 4: x = *(volatile int32_t *)addr; break; default: panic("badaddr: invalid size (%d)", size); } /* Make sure we took the machine check, if we caused one. */ __asm __volatile ("sync; isync"); td->td_pcb->pcb_onfault = 0; __asm __volatile ("sync"); /* To be sure. */ /* Use the value to avoid reorder. */ if (rptr) *rptr = x; return (0); }
void ccsr_write4(uintptr_t addr, uint32_t val) { volatile uint32_t *ptr = (void *)addr; *ptr = val; __asm __volatile("eieio; sync"); }
/** * Returns the next character or blocks indefinitely. The related input_t * structure (scancode, flags, keycode value) is copied into the supplied ptr */ char getchar_ext(input_t *input) { char c; while ((c = getch_ext(input)) == -1) { __asm __volatile("sti; hlt"); } return c; }
/* * Dump the machine specific header information at the start of a core dump. */ int cpu_coredump(struct lwp *l, struct coredump_iostate *iocookie, struct core *chdr) { /* XXX implement, pause in ski */ __asm __volatile("break.i 1"); return -1; }
void cpu_idle(void) { struct thread *td = curthread; struct mdglobaldata *gd = mdcpu; int reqflags; crit_exit(); KKASSERT(td->td_critcount == 0); cpu_enable_intr(); for (;;) { /* * See if there are any LWKTs ready to go. */ lwkt_switch(); /* * The idle loop halts only if no threads are scheduleable * and no signals have occured. */ if (cpu_idle_hlt && (td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { splz(); #ifdef SMP KKASSERT(MP_LOCK_HELD() == 0); #endif if ((td->td_gd->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) { #ifdef DEBUGIDLE struct timeval tv1, tv2; gettimeofday(&tv1, NULL); #endif reqflags = gd->mi.gd_reqflags & ~RQF_IDLECHECK_WK_MASK; umtx_sleep(&gd->mi.gd_reqflags, reqflags, 1000000); #ifdef DEBUGIDLE gettimeofday(&tv2, NULL); if (tv2.tv_usec - tv1.tv_usec + (tv2.tv_sec - tv1.tv_sec) * 1000000 > 500000) { kprintf("cpu %d idlelock %08x %08x\n", gd->mi.gd_cpuid, gd->mi.gd_reqflags, gd->gd_fpending); } #endif } ++cpu_idle_hltcnt; } else { splz(); #ifdef SMP __asm __volatile("pause"); #endif ++cpu_idle_spincnt; } } }
void apicHardSleep(uint32_t millisecond) { apic::registers[APIC_TIMER_CURRENT_COUNT_REGISTER] = 0xFFFFFFFFu; uint32_t target = (apic::busFrequency * millisecond) / 1000; uint32_t current; do { __asm__ __volatile ("HLT"); current = apic::registers[APIC_TIMER_CURRENT_COUNT_REGISTER]; } while (0xFFFFFFFFu - current < target); }
static int badaddr(void *addr, size_t size) { struct thread *td; jmp_buf env, *oldfaultbuf; int x; /* Get rid of any stale machine checks that have been waiting. */ __asm __volatile ("sync; isync"); td = curthread; oldfaultbuf = td->td_pcb->pcb_onfault; td->td_pcb->pcb_onfault = &env; if (setjmp(env)) { td->td_pcb->pcb_onfault = oldfaultbuf; __asm __volatile ("sync"); return 1; } __asm __volatile ("sync"); switch (size) { case 1: x = *(volatile int8_t *)addr; break; case 2: x = *(volatile int16_t *)addr; break; case 4: x = *(volatile int32_t *)addr; break; default: panic("badaddr: invalid size (%zd)", size); } /* Make sure we took the machine check, if we caused one. */ __asm __volatile ("sync; isync"); td->td_pcb->pcb_onfault = oldfaultbuf; __asm __volatile ("sync"); /* To be sure. */ return (0); }
void cpu_halt(void) { /* We should have shutdown by now, if not enter a low power sleep */ intr_disable(); while (1) { __asm __volatile("wfi"); } }
static void socfpga_trampoline(void) { __asm __volatile( "ldr pc, 1f\n" ".globl mpentry_addr\n" "mpentry_addr:\n" "1: .space 4\n"); }
static void rk30xx_boot2(void) { __asm __volatile( "ldr pc, 1f\n" ".globl mpentry_addr\n" "mpentry_addr:\n" "1: .space 4\n"); }
/* * Entered with psr.ic and psr.i both zero. */ void enter_kernel(uint64_t start, uint64_t bi) { __asm __volatile("srlz.i;;"); __asm __volatile("mov cr.ipsr=%0" :: "r"(IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT | IA64_PSR_BN)); __asm __volatile("mov cr.iip=%0" :: "r"(start)); __asm __volatile("mov cr.ifs=r0;;"); __asm __volatile("mov ar.rsc=0;; flushrs;;"); __asm __volatile("mov r8=%0" :: "r" (bi)); __asm __volatile("rfi;;"); /* NOTREACHED */ }
/* * Put the CPU in C1 in a machine-dependant way. * XXX: shouldn't be here! */ static void acpi_cpu_c1(void) { #ifdef __ia64__ ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); #else splz(); #ifdef SMP if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti; pause"); #else if ((mycpu->gd_reqflags & RQF_IDLECHECK_WK_MASK) == 0) __asm __volatile("sti; hlt"); else __asm __volatile("sti"); #endif #endif /* !__ia64__ */ }
void kmain(uint32_t magic, multiboot_info_t* mbi) { clear(); printf("Kernel is on!\n"); if (magic != MULTIBOOT_BOOTLOADER_MAGIC) { printf("Invalid magic code: %x", magic); return; } // Check if the cpuid instruction is available if (check_cpuid()) { printf("CPUID available\n"); if (check_apic()) { printf("APIC available\n"); } else { printf("APIC not available\n"); } } else { printf("CPUID not available\n"); } // Init the floating point unit init_fpu(); // Initialize the Interrupt Descriptor Table and Interrupt Service Routines init_idt(); // Print (if available) memory map if (mbi->flags && MULTIBOOT_INFO_MEM_MAP) { uint32_t mmap_entries = mbi->mmap_length / 24; printf("## Memory map ##\n"); printf("Entries: %u\n", mmap_entries); multiboot_memory_map_t* mmap_entry = (multiboot_memory_map_t *) mbi->mmap_addr; for (uint32_t i = 0; i < mmap_entries; ++i, ++mmap_entry) { printf("Entry %u\n", i); printf("\t.addr: %x\n", mmap_entry->addr); printf("\t.len: %u\n", mmap_entry->len); printf("\t.type: "); if (mmap_entry->type == MULTIBOOT_MEMORY_AVAILABLE) { printf("available\n"); } else { printf("reserved\n"); } } } // Test breakpoint interrupt __asm __volatile("int $0x3"); init_timer(50); // Initialise timer to 50Hz }
void enter_kernel(const char* filename, u_int64_t start, struct bootinfo *bi) { printf("Entering %s at 0x%lx...\n", filename, start); while (*filename == '/') filename++; ssc(0, (u_int64_t) filename, 0, 0, SSC_LOAD_SYMBOLS); __asm __volatile("mov cr.ipsr=%0" :: "r"(IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT | IA64_PSR_BN)); __asm __volatile("mov cr.iip=%0" :: "r"(start)); __asm __volatile("mov cr.ifs=r0;;"); __asm __volatile("mov r8=%0" :: "r" (bi)); __asm __volatile("rfi;;"); }
FASTRUN static void portc_interrupt(void) { // TODO: these are inefficent. Use CLZ somehow.... uint32_t isfr = PORTC_ISFR; PORTC_ISFR = isfr; // john 2015/06/13 #define ISR_PIN 12 #define IRQ_PIN 11 #define TRIGGER_PIN 10 digitalWriteFast(ISR_PIN, HIGH); digitalWriteFast(TRIGGER_PIN, LOW); __asm__ __volatile("NOP"); __asm__ __volatile("NOP"); __asm__ __volatile("NOP"); __asm__ __volatile("NOP"); __asm__ __volatile("NOP"); digitalWriteFast(ISR_PIN, LOW); }
/* serial_writebyte Write a byte out out to the serial port => c = character to output */ void serial_writebyte(unsigned char c) { /* loop waiting for bit 5 of the line status register to set, indicating data can be written */ while((x86_inportb(SERIAL_HW + 5) & 0x20) == 0) __asm__ __volatile("pause"); x86_outportb(SERIAL_HW + 0, c); /* in case whatever's connected to the serial port expects CRLF */ #ifdef DEBUG_ENDING_CRLF if(c == '\n') serial_writebyte('\r'); #endif }
void __exec(caddr_t addr, ...) { /* XXX this is wrong */ __asm __volatile("movl %cr0, %eax"); __asm __volatile("andl $0x7fffffff, %eax"); __asm __volatile("mov %eax, %cr0"); __asm __volatile("xorl %eax, %eax"); __asm __volatile("mov %eax, %cr3"); __asm __volatile("movl %cr0, %eax"); __asm __volatile("andl $0xfffffffe, %eax"); __asm __volatile("movl %eax, %cr0"); __asm __volatile("jmp %0" :: "r" (addr)); }
void bootmain(void) { int sectnum = 64; uint8_t *dst = 0; int i = 1; for (; i <= sectnum; ++i) { readsect( dst, i); dst += SECTSIZE; } __asm__ __volatile("ljmp $0x8, $0x0"); }
/* * Perform a board-level soft-reset. */ void platform_reset(void) { /* XXX SMP will likely require us to do more. */ __asm__ __volatile__( "mfc0 $k0, $12\n\t" "li $k1, 0x00100000\n\t" "or $k0, $k0, $k1\n\t" "mtc0 $k0, $12\n"); for( ; ; ) __asm__ __volatile("wait"); }
static void ralink_cnputc(dev_t dv, int c) { int timo = 150000; while ((uart_read(RA_UART_LSR) & LSR_TXRDY) == 0 && --timo > 0) ; uart_write(RA_UART_TBR, c); __asm __volatile("sync"); timo = 150000; while ((uart_read(RA_UART_LSR) & LSR_TSRE) == 0 && --timo > 0) ; }
void r4k_icache_sync_all_16(void) { vaddr_t va = MIPS_PHYS_TO_KSEG0(0); vaddr_t eva = va + mips_picache_size; mips_dcache_wbinv_all(); __asm __volatile("sync"); while (va < eva) { cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV); va += (32 * 16); } }
void shell_loop() { while (shell_quit == false) { if (shell_wait == true) { __asm__ __volatile("nop"); } else { shell_parse(); shell_wait_cmd(); } } }
void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "dsb sy \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); }