int prom_cngetc(dev_t dev) { unsigned char ch = '\0'; int l; #ifdef DDB static int nplus = 0; #endif while ((l = OF_read(stdin, &ch, 1)) != 1) /* void */; #ifdef DDB if (ch == '+') { if (nplus++ > 3) Debugger(); } else nplus = 0; #endif if (ch == '\r') ch = '\n'; if (ch == '\b') ch = '\177'; return ch; }
/* * Report critical errors. ip may be NULL. */ void hammer_critical_error(hammer_mount_t hmp, hammer_inode_t ip, int error, const char *msg) { hmp->flags |= HAMMER_MOUNT_CRITICAL_ERROR; krateprintf(&hmp->krate, "HAMMER(%s): Critical error inode=%jd error=%d %s\n", hmp->mp->mnt_stat.f_mntfromname, (intmax_t)(ip ? ip->obj_id : -1), error, msg); if (hmp->ronly == 0) { hmp->ronly = 2; /* special errored read-only mode */ hmp->mp->mnt_flag |= MNT_RDONLY; RB_SCAN(hammer_vol_rb_tree, &hmp->rb_vols_root, NULL, hammer_adjust_volume_mode, NULL); kprintf("HAMMER(%s): Forcing read-only mode\n", hmp->mp->mnt_stat.f_mntfromname); } hmp->error = error; if (hammer_debug_critical) Debugger("Entering debugger"); }
/* * Obtain a shared lock * * We do not give pending exclusive locks priority over shared locks as * doing so could lead to a deadlock. */ void hammer_lock_sh(struct hammer_lock *lock) { thread_t td = curthread; u_int lv; u_int nlv; const char *ident = "hmrlck"; KKASSERT(lock->refs); for (;;) { lv = lock->lockval; if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) { nlv = (lv + 1); if (atomic_cmpset_int(&lock->lockval, lv, nlv)) break; } else if (lock->lowner == td) { /* * Disallowed case, drop into kernel debugger for * now. A cont continues w/ an exclusive lock. */ nlv = (lv + 1); if (atomic_cmpset_int(&lock->lockval, lv, nlv)) { if (hammer_debug_critical) Debugger("hammer_lock_sh: holding ex"); break; } } else { nlv = lv | HAMMER_LOCKF_WANTED; ++hammer_contention_count; tsleep_interlock(&lock->lockval, 0); if (atomic_cmpset_int(&lock->lockval, lv, nlv)) tsleep(&lock->lockval, PINTERLOCKED, ident, 0); } } }
void cpu_boot_secondary(struct cpu_info *ci) { struct pcb *pcb; int i; struct pmap *kpm = pmap_kernel(); extern u_int32_t mp_pdirpa; if (mp_verbose) printf("%s: starting", ci->ci_dev.dv_xname); /* XXX move elsewhere, not per CPU. */ mp_pdirpa = kpm->pm_pdirpa; pcb = ci->ci_idle_pcb; if (mp_verbose) printf(", init idle stack ptr is 0x%x\n", pcb->pcb_esp); CPU_STARTUP(ci); /* * wait for it to become ready */ for (i = 100000; (!(ci->ci_flags & CPUF_RUNNING)) && i > 0; i--) { delay(10); } if (!(ci->ci_flags & CPUF_RUNNING)) { printf("%s failed to become ready\n", ci->ci_dev.dv_xname); #ifdef DDB Debugger(); #endif } CPU_START_CLEANUP(ci); }
boolean_t lck_rw_lock_shared_to_exclusive( lck_rw_t *lck) { int i; boolean_t do_wakeup = FALSE; wait_result_t res; #if MACH_LDEBUG int decrementer; #endif /* MACH_LDEBUG */ boolean_t istate; #if CONFIG_DTRACE uint64_t wait_interval = 0; int slept = 0; int readers_at_sleep = 0; #endif istate = lck_interlock_lock(lck); lck->lck_rw_shared_count--; if (lck->lck_rw_want_upgrade) { KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_START, (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); /* * Someone else has requested upgrade. * Since we've released a read lock, wake * him up. */ if (lck->lck_w_waiting && (lck->lck_rw_shared_count == 0)) { lck->lck_w_waiting = FALSE; do_wakeup = TRUE; } lck_interlock_unlock(lck, istate); if (do_wakeup) thread_wakeup(RW_LOCK_WRITER_EVENT(lck)); KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX_CODE) | DBG_FUNC_END, (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, 0, 0); return (FALSE); } lck->lck_rw_want_upgrade = TRUE; #if MACH_LDEBUG decrementer = DECREMENTER_TIMEOUT; #endif /* MACH_LDEBUG */ while (lck->lck_rw_shared_count != 0) { #if CONFIG_DTRACE if (lockstat_probemap[LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN] && wait_interval == 0) { wait_interval = mach_absolute_time(); readers_at_sleep = lck->lck_rw_shared_count; } else { wait_interval = -1; } #endif i = lock_wait_time[lck->lck_rw_can_sleep ? 1 : 0]; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_START, (int)lck, lck->lck_rw_shared_count, i, 0, 0); if (i != 0) { lck_interlock_unlock(lck, istate); #if MACH_LDEBUG if (!--decrementer) Debugger("timeout - lck_rw_shared_count"); #endif /* MACH_LDEBUG */ while (--i != 0 && lck->lck_rw_shared_count != 0) lck_rw_lock_pause(istate); istate = lck_interlock_lock(lck); } if (lck->lck_rw_can_sleep && lck->lck_rw_shared_count != 0) { lck->lck_w_waiting = TRUE; res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT); if (res == THREAD_WAITING) { lck_interlock_unlock(lck, istate); res = thread_block(THREAD_CONTINUE_NULL); #if CONFIG_DTRACE slept = 1; #endif istate = lck_interlock_lock(lck); } } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SH_TO_EX1_CODE) | DBG_FUNC_END, (int)lck, lck->lck_rw_shared_count, 0, 0, 0); } lck_interlock_unlock(lck, istate); #if CONFIG_DTRACE /* * We infer whether we took the sleep/spin path above by checking readers_at_sleep. */ if (wait_interval != 0 && wait_interval != (unsigned) -1 && readers_at_sleep) { if (slept == 0) { LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_TO_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 0); } else { LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_TO_EXCL_BLOCK, lck, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, lck, 1); #endif return (TRUE); }
void Stop() { Debugging = 1; Debugger(); }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { int bootopt, newpanic; globaldata_t gd = mycpu; thread_t td = gd->gd_curthread; __va_list ap; static char buf[256]; #ifdef SMP /* * If a panic occurs on multiple cpus before the first is able to * halt the other cpus, only one cpu is allowed to take the panic. * Attempt to be verbose about this situation but if the kprintf() * itself panics don't let us overrun the kernel stack. * * Be very nasty about descheduling our thread at the lowest * level possible in an attempt to freeze the thread without * inducing further panics. * * Bumping gd_trap_nesting_level will also bypass assertions in * lwkt_switch() and allow us to switch away even if we are a * FAST interrupt or IPI. * * The setting of panic_cpu_gd also determines how kprintf() * spin-locks itself. DDB can set panic_cpu_gd as well. */ for (;;) { globaldata_t xgd = panic_cpu_gd; /* * Someone else got the panic cpu */ if (xgd && xgd != gd) { crit_enter(); ++mycpu->gd_trap_nesting_level; if (mycpu->gd_trap_nesting_level < 25) { kprintf("SECONDARY PANIC ON CPU %d THREAD %p\n", mycpu->gd_cpuid, td); } td->td_release = NULL; /* be a grinch */ for (;;) { lwkt_deschedule_self(td); lwkt_switch(); } /* NOT REACHED */ /* --mycpu->gd_trap_nesting_level */ /* crit_exit() */ } /* * Reentrant panic */ if (xgd && xgd == gd) break; /* * We got it */ if (atomic_cmpset_ptr(&panic_cpu_gd, NULL, gd)) break; } #else panic_cpu_gd = gd; #endif /* * Try to get the system into a working state. Save information * we are about to destroy. */ kvcreinitspin(); if (panicstr == NULL) { bcopy(td->td_toks_array, panic_tokens, sizeof(panic_tokens)); panic_tokens_count = td->td_toks_stop - &td->td_toks_base; } lwkt_relalltokens(td); td->td_toks_stop = &td->td_toks_base; /* * Setup */ bootopt = RB_AUTOBOOT | RB_DUMP; if (sync_on_panic == 0) bootopt |= RB_NOSYNC; newpanic = 0; if (panicstr) { bootopt |= RB_NOSYNC; } else { panicstr = fmt; newpanic = 1; } /* * Format the panic string. */ __va_start(ap, fmt); kvsnprintf(buf, sizeof(buf), fmt, ap); if (panicstr == fmt) panicstr = buf; __va_end(ap); kprintf("panic: %s\n", buf); #ifdef SMP /* two separate prints in case of an unmapped page and trap */ kprintf("cpuid = %d\n", mycpu->gd_cpuid); #endif #if (NGPIO > 0) && defined(ERROR_LED_ON_PANIC) led_switch("error", 1); #endif #if defined(WDOG_DISABLE_ON_PANIC) && defined(WATCHDOG_ENABLE) wdog_disable(); #endif /* * Enter the debugger or fall through & dump. Entering the * debugger will stop cpus. If not entering the debugger stop * cpus here. */ #if defined(DDB) if (newpanic && trace_on_panic) print_backtrace(-1); if (debugger_on_panic) Debugger("panic"); else #endif #ifdef SMP if (newpanic) stop_cpus(mycpu->gd_other_cpus); #else ; #endif boot(bootopt); }
/* * Shift *chainp up to the specified directory, change the filename * to "0xINODENUMBER", and adjust the key. The chain becomes the * invisible hardlink target. * * The original *chainp has already been marked deleted. */ static void hammer2_hardlink_shiftup(hammer2_trans_t *trans, hammer2_chain_t **chainp, hammer2_inode_t *dip, hammer2_chain_t **dchainp, int nlinks, int *errorp) { hammer2_inode_data_t *nipdata; hammer2_chain_t *chain; hammer2_chain_t *xchain; hammer2_key_t key_dummy; hammer2_key_t lhc; hammer2_blockref_t bref; int cache_index = -1; chain = *chainp; lhc = chain->data->ipdata.inum; KKASSERT((lhc & HAMMER2_DIRHASH_VISIBLE) == 0); /* * Locate the inode or indirect block to create the new * entry in. lhc represents the inode number so there is * no collision iteration. * * There should be no key collisions with invisible inode keys. * * WARNING! Must use inode_lock_ex() on dip to handle a stale * dip->chain cache. */ retry: *errorp = 0; xchain = hammer2_chain_lookup(dchainp, &key_dummy, lhc, lhc, &cache_index, 0); if (xchain) { kprintf("X3 chain %p dip %p dchain %p dip->chain %p\n", xchain, dip, *dchainp, dip->chain); hammer2_chain_unlock(xchain); xchain = NULL; *errorp = ENOSPC; #if 0 Debugger("X3"); #endif } /* * Create entry in common parent directory using the seek position * calculated above. * * We must refactor chain because it might have been shifted into * an indirect chain by the create. */ if (*errorp == 0) { KKASSERT(xchain == NULL); #if 0 *errorp = hammer2_chain_create(trans, dchainp, &xchain, lhc, 0, HAMMER2_BREF_TYPE_INODE,/* n/a */ HAMMER2_INODE_BYTES); /* n/a */ #endif /*XXX this somehow isn't working on chain XXX*/ /*KKASSERT(xxx)*/ } /* * Cleanup and handle retries. */ if (*errorp == EAGAIN) { kprintf("R"); hammer2_chain_wait(*dchainp); hammer2_chain_drop(*dchainp); goto retry; } /* * Handle the error case */ if (*errorp) { panic("error2"); KKASSERT(xchain == NULL); return; } /* * Use xchain as a placeholder for (lhc). Duplicate chain to the * same target bref as xchain and then delete xchain. The duplication * occurs after xchain in flush order even though xchain is deleted * after the duplication. XXX * * WARNING! Duplications (to a different parent) can cause indirect * blocks to be inserted, refactor xchain. */ bref = chain->bref; bref.key = lhc; /* invisible dir entry key */ bref.keybits = 0; hammer2_chain_duplicate(trans, dchainp, &chain, &bref, 0, 2); /* * chain is now 'live' again.. adjust the filename. * * Directory entries are inodes but this is a hidden hardlink * target. The name isn't used but to ease debugging give it * a name after its inode number. */ hammer2_chain_modify(trans, &chain, 0); nipdata = &chain->data->ipdata; ksnprintf(nipdata->filename, sizeof(nipdata->filename), "0x%016jx", (intmax_t)nipdata->inum); nipdata->name_len = strlen(nipdata->filename); nipdata->name_key = lhc; nipdata->nlinks += nlinks; *chainp = chain; }
/* ARGSUSED */ int compat_16_netbsd32___sigreturn14(struct lwp *l, const struct compat_16_netbsd32___sigreturn14_args *uap, register_t *retval) { /* { syscallarg(struct sigcontext *) sigcntxp; } */ struct netbsd32_sigcontext sc, *scp; struct trapframe64 *tf; struct proc *p = l->l_proc; /* First ensure consistent stack state (see sendsig). */ write_user_windows(); if (rwindow_save(l)) { #ifdef DEBUG printf("netbsd32_sigreturn14: rwindow_save(%p) failed, sending SIGILL\n", p); Debugger(); #endif mutex_enter(p->p_lock); sigexit(l, SIGILL); } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("netbsd32_sigreturn14: %s[%d], sigcntxp %p\n", p->p_comm, p->p_pid, SCARG(uap, sigcntxp)); if (sigdebug & SDB_DDB) Debugger(); } #endif scp = (struct netbsd32_sigcontext *)(u_long)SCARG(uap, sigcntxp); if ((vaddr_t)scp & 3 || (copyin((void *)scp, &sc, sizeof sc) != 0)) { #ifdef DEBUG printf("netbsd32_sigreturn14: copyin failed: scp=%p\n", scp); Debugger(); #endif return (EINVAL); } scp = ≻ tf = l->l_md.md_tf; /* * Only the icc bits in the psr are used, so it need not be * verified. pc and npc must be multiples of 4. This is all * that is required; if it holds, just do it. */ if (((sc.sc_pc | sc.sc_npc) & 3) != 0 || (sc.sc_pc == 0) || (sc.sc_npc == 0)) #ifdef DEBUG { printf("netbsd32_sigreturn14: pc %p or npc %p invalid\n", sc.sc_pc, sc.sc_npc); Debugger(); return (EINVAL); } #else return (EINVAL); #endif /* take only psr ICC field */ tf->tf_tstate = (int64_t)(tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(sc.sc_psr); tf->tf_pc = (int64_t)sc.sc_pc; tf->tf_npc = (int64_t)sc.sc_npc; tf->tf_global[1] = (int64_t)sc.sc_g1; tf->tf_out[0] = (int64_t)sc.sc_o0; tf->tf_out[6] = (int64_t)sc.sc_sp; #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("netbsd32_sigreturn14: return trapframe pc=%p sp=%p tstate=%llx\n", (vaddr_t)tf->tf_pc, (vaddr_t)tf->tf_out[6], tf->tf_tstate); if (sigdebug & SDB_DDB) Debugger(); } #endif /* Restore signal stack. */ mutex_enter(p->p_lock); if (sc.sc_onstack & SS_ONSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; else l->l_sigstk.ss_flags &= ~SS_ONSTACK; /* Restore signal mask. */ (void) sigprocmask1(l, SIG_SETMASK, &sc.sc_mask, 0); mutex_exit(p->p_lock); return (EJUSTRETURN); }
caddr_t mips_init(int argc, void *argv, caddr_t boot_esym) { char *cp; int i; caddr_t sd; u_int cputype; vaddr_t tlb_handler, xtlb_handler; extern char start[], edata[], end[]; extern char exception[], e_exception[]; extern char *hw_vendor, *hw_prod; extern void tlb_miss; extern void tlb_miss_err_r5k; extern void xtlb_miss; extern void xtlb_miss_err_r5k; /* * Make sure we can access the extended address space. * Note that r10k and later do not allow XUSEG accesses * from kernel mode unless SR_UX is set. */ setsr(getsr() | SR_KX | SR_UX); #ifdef notyet /* * Make sure KSEG0 cacheability match what we intend to use. * * XXX This does not work as expected on IP30. Does ARCBios * XXX depend on this? */ cp0_setcfg((cp0_getcfg() & ~0x07) | CCA_CACHED); #endif /* * Clear the compiled BSS segment in OpenBSD code. */ bzero(edata, end - edata); /* * Reserve space for the symbol table, if it exists. */ ssym = (char *)*(u_int64_t *)end; /* Attempt to locate ELF header and symbol table after kernel. */ if (end[0] == ELFMAG0 && end[1] == ELFMAG1 && end[2] == ELFMAG2 && end[3] == ELFMAG3 ) { /* ELF header exists directly after kernel. */ ssym = end; esym = boot_esym; ekern = esym; } else if (((long)ssym - (long)end) >= 0 && ((long)ssym - (long)end) <= 0x1000 && ssym[0] == ELFMAG0 && ssym[1] == ELFMAG1 && ssym[2] == ELFMAG2 && ssym[3] == ELFMAG3 ) { /* Pointers exist directly after kernel. */ esym = (char *)*((u_int64_t *)end + 1); ekern = esym; } else { /* Pointers aren't setup either... */ ssym = NULL; esym = NULL; ekern = end; } /* * Initialize the system type and set up memory layout. * Note that some systems have a more complex memory setup. */ bios_ident(); /* * Determine system type and set up configuration record data. */ hw_vendor = "SGI"; switch (sys_config.system_type) { #if defined(TGT_O2) case SGI_O2: bios_printf("Found SGI-IP32, setting up.\n"); hw_prod = "O2"; strlcpy(cpu_model, "IP32", sizeof(cpu_model)); ip32_setup(); sys_config.cpu[0].clock = 180000000; /* Reasonable default */ cp = Bios_GetEnvironmentVariable("cpufreq"); if (cp && atoi(cp, 10, NULL) > 100) sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000; break; #endif #if defined(TGT_ORIGIN200) || defined(TGT_ORIGIN2000) case SGI_O200: bios_printf("Found SGI-IP27, setting up.\n"); hw_prod = "Origin 200"; strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; case SGI_O300: bios_printf("Found SGI-IP35, setting up.\n"); hw_prod = "Origin 300"; /* IP27 is intentional, we use the same kernel */ strlcpy(cpu_model, "IP27", sizeof(cpu_model)); ip27_setup(); break; #endif #if defined(TGT_OCTANE) case SGI_OCTANE: bios_printf("Found SGI-IP30, setting up.\n"); hw_prod = "Octane"; strlcpy(cpu_model, "IP30", sizeof(cpu_model)); ip30_setup(); sys_config.cpu[0].clock = 175000000; /* Reasonable default */ cp = Bios_GetEnvironmentVariable("cpufreq"); if (cp && atoi(cp, 10, NULL) > 100) sys_config.cpu[0].clock = atoi(cp, 10, NULL) * 1000000; break; #endif default: bios_printf("Kernel doesn't support this system type!\n"); bios_printf("Halting system.\n"); Bios_Halt(); while(1); } /* * Read and store console type. */ cp = Bios_GetEnvironmentVariable("ConsoleOut"); if (cp != NULL && *cp != '\0') strlcpy(bios_console, cp, sizeof bios_console); /* Disable serial console if ARCS is telling us to use video. */ if (strncmp(bios_console, "video", 5) == 0) comconsaddr = 0; /* * Look at arguments passed to us and compute boothowto. */ boothowto = RB_AUTOBOOT; dobootopts(argc, argv); /* * Figure out where we supposedly booted from. */ cp = Bios_GetEnvironmentVariable("OSLoadPartition"); if (cp == NULL) cp = "unknown"; if (strlcpy(osloadpartition, cp, sizeof osloadpartition) >= sizeof osloadpartition) bios_printf("Value of `OSLoadPartition' is too large.\n" "The kernel might not be able to find out its root device.\n"); /* * Read platform-specific environment variables. */ switch (sys_config.system_type) { #if defined(TGT_O2) case SGI_O2: /* Get Ethernet address from ARCBIOS. */ cp = Bios_GetEnvironmentVariable("eaddr"); if (cp != NULL && strlen(cp) > 0) strlcpy(bios_enaddr, cp, sizeof bios_enaddr); break; #endif default: break; } /* * Set pagesize to enable use of page macros and functions. * Commit available memory to UVM system. */ uvmexp.pagesize = PAGE_SIZE; uvm_setpagesize(); for (i = 0; i < MAXMEMSEGS && mem_layout[i].mem_first_page != 0; i++) { u_int32_t fp, lp; u_int32_t firstkernpage, lastkernpage; unsigned int freelist; paddr_t firstkernpa, lastkernpa; if (IS_XKPHYS((vaddr_t)start)) firstkernpa = XKPHYS_TO_PHYS((vaddr_t)start); else firstkernpa = KSEG0_TO_PHYS((vaddr_t)start); if (IS_XKPHYS((vaddr_t)ekern)) lastkernpa = XKPHYS_TO_PHYS((vaddr_t)ekern); else lastkernpa = KSEG0_TO_PHYS((vaddr_t)ekern); firstkernpage = atop(trunc_page(firstkernpa)); lastkernpage = atop(round_page(lastkernpa)); fp = mem_layout[i].mem_first_page; lp = mem_layout[i].mem_last_page; freelist = mem_layout[i].mem_freelist; /* Account for kernel and kernel symbol table. */ if (fp >= firstkernpage && lp < lastkernpage) continue; /* In kernel. */ if (lp < firstkernpage || fp > lastkernpage) { uvm_page_physload(fp, lp, fp, lp, freelist); continue; /* Outside kernel. */ } if (fp >= firstkernpage) fp = lastkernpage; else if (lp < lastkernpage) lp = firstkernpage; else { /* Need to split! */ u_int32_t xp = firstkernpage; uvm_page_physload(fp, xp, fp, xp, freelist); fp = lastkernpage; } if (lp > fp) uvm_page_physload(fp, lp, fp, lp, freelist); } switch (sys_config.system_type) { #if defined(TGT_O2) || defined(TGT_OCTANE) case SGI_O2: case SGI_OCTANE: sys_config.cpu[0].type = (cp0_get_prid() >> 8) & 0xff; sys_config.cpu[0].vers_maj = (cp0_get_prid() >> 4) & 0x0f; sys_config.cpu[0].vers_min = cp0_get_prid() & 0x0f; sys_config.cpu[0].fptype = (cp1_get_prid() >> 8) & 0xff; sys_config.cpu[0].fpvers_maj = (cp1_get_prid() >> 4) & 0x0f; sys_config.cpu[0].fpvers_min = cp1_get_prid() & 0x0f; /* * Configure TLB. */ switch(sys_config.cpu[0].type) { case MIPS_RM7000: /* Rev A (version >= 2) CPU's have 64 TLB entries. */ if (sys_config.cpu[0].vers_maj < 2) { sys_config.cpu[0].tlbsize = 48; } else { sys_config.cpu[0].tlbsize = 64; } break; case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: sys_config.cpu[0].tlbsize = 64; break; default: sys_config.cpu[0].tlbsize = 48; break; } break; #endif default: break; } /* * Configure cache. */ switch(sys_config.cpu[0].type) { case MIPS_R10000: case MIPS_R12000: case MIPS_R14000: cputype = MIPS_R10000; break; case MIPS_R5000: case MIPS_RM7000: case MIPS_RM52X0: case MIPS_RM9000: cputype = MIPS_R5000; break; default: /* * If we can't identify the cpu type, it must be * r10k-compatible on Octane and Origin families, and * it is likely to be r5k-compatible on O2. */ switch (sys_config.system_type) { case SGI_O2: cputype = MIPS_R5000; break; default: case SGI_OCTANE: case SGI_O200: case SGI_O300: cputype = MIPS_R10000; break; } break; } switch (cputype) { case MIPS_R10000: Mips10k_ConfigCache(); sys_config._SyncCache = Mips10k_SyncCache; sys_config._InvalidateICache = Mips10k_InvalidateICache; sys_config._InvalidateICachePage = Mips10k_InvalidateICachePage; sys_config._SyncDCachePage = Mips10k_SyncDCachePage; sys_config._HitSyncDCache = Mips10k_HitSyncDCache; sys_config._IOSyncDCache = Mips10k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips10k_HitInvalidateDCache; break; default: case MIPS_R5000: Mips5k_ConfigCache(); sys_config._SyncCache = Mips5k_SyncCache; sys_config._InvalidateICache = Mips5k_InvalidateICache; sys_config._InvalidateICachePage = Mips5k_InvalidateICachePage; sys_config._SyncDCachePage = Mips5k_SyncDCachePage; sys_config._HitSyncDCache = Mips5k_HitSyncDCache; sys_config._IOSyncDCache = Mips5k_IOSyncDCache; sys_config._HitInvalidateDCache = Mips5k_HitInvalidateDCache; break; } /* * Last chance to call the BIOS. Wiping the TLB means the BIOS' data * areas are demapped on most systems. */ delay(20*1000); /* Let any UART FIFO drain... */ sys_config.cpu[0].tlbwired = UPAGES / 2; tlb_set_wired(0); tlb_flush(sys_config.cpu[0].tlbsize); tlb_set_wired(sys_config.cpu[0].tlbwired); /* * Get a console, very early but after initial mapping setup. */ consinit(); printf("Initial setup done, switching console.\n"); /* * Init message buffer. */ msgbufbase = (caddr_t)pmap_steal_memory(MSGBUFSIZE, NULL,NULL); initmsgbuf(msgbufbase, MSGBUFSIZE); /* * Allocate U page(s) for proc[0], pm_tlbpid 1. */ proc0.p_addr = proc0paddr = curprocpaddr = (struct user *)pmap_steal_memory(USPACE, NULL, NULL); proc0.p_md.md_regs = (struct trap_frame *)&proc0paddr->u_pcb.pcb_regs; tlb_set_pid(1); /* * Allocate system data structures. */ i = (vsize_t)allocsys(NULL); sd = (caddr_t)pmap_steal_memory(i, NULL, NULL); allocsys(sd); /* * Bootstrap VM system. */ pmap_bootstrap(); /* * Copy down exception vector code. */ bcopy(exception, (char *)CACHE_ERR_EXC_VEC, e_exception - exception); bcopy(exception, (char *)GEN_EXC_VEC, e_exception - exception); /* * Build proper TLB refill handler trampolines. */ switch (cputype) { case MIPS_R5000: /* * R5000 processors need a specific chip bug workaround * in their tlb handlers. Theoretically only revision 1 * of the processor need it, but there is evidence * later versions also need it. * * This is also necessary on RM52x0; we test on the `rounded' * cputype value instead of sys_config.cpu[0].type; this * causes RM7k and RM9k to be included, just to be on the * safe side. */ tlb_handler = (vaddr_t)&tlb_miss_err_r5k; xtlb_handler = (vaddr_t)&xtlb_miss_err_r5k; break; default: tlb_handler = (vaddr_t)&tlb_miss; xtlb_handler = (vaddr_t)&xtlb_miss; break; } build_trampoline(TLB_MISS_EXC_VEC, tlb_handler); build_trampoline(XTLB_MISS_EXC_VEC, xtlb_handler); /* * Turn off bootstrap exception vectors. */ setsr(getsr() & ~SR_BOOT_EXC_VEC); proc0.p_md.md_regs->sr = getsr(); /* * Clear out the I and D caches. */ Mips_SyncCache(); #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* * Return new stack pointer. */ return ((caddr_t)proc0paddr + USPACE - 64); }
/* * Do all the stuff that locore normally does before calling main(). */ void mach_init(long fwhandle, long magic, long bootdata, long reserved) { void *kernend, *p0; u_long first, last; extern char edata[], end[]; int i; uint32_t config; /* XXX this code must run on the target CPU */ config = mips3_cp0_config_read(); config &= ~MIPS3_CONFIG_K0_MASK; config |= 0x05; /* XXX. cacheable coherent */ mips3_cp0_config_write(config); /* Zero BSS. XXXCGD: uh, is this really necessary still? */ memset(edata, 0, end - edata); /* * Copy the bootinfo structure from the boot loader. * this has to be done before mips_vector_init is * called because we may need CFE's TLB handler */ if (magic == BOOTINFO_MAGIC) memcpy(&bootinfo, (struct bootinfo_v1 *)bootdata, sizeof bootinfo); else if (reserved == CFE_EPTSEAL) { magic = BOOTINFO_MAGIC; bzero(&bootinfo, sizeof bootinfo); bootinfo.version = BOOTINFO_VERSION; bootinfo.fwhandle = fwhandle; bootinfo.fwentry = bootdata; bootinfo.ssym = (vaddr_t)end; bootinfo.esym = (vaddr_t)end; } kernend = (void *)mips_round_page(end); #if NKSYMS || defined(DDB) || defined(LKM) if (magic == BOOTINFO_MAGIC) { ksym_start = (void *)bootinfo.ssym; ksym_end = (void *)bootinfo.esym; kernend = (void *)mips_round_page((vaddr_t)ksym_end); } #endif consinit(); uvm_setpagesize(); /* * Copy exception-dispatch code down to exception vector. * Initialize locore-function vector. * Clear out the I and D caches. */ mips_vector_init(); #ifdef DEBUG printf("fwhandle=%08X magic=%08X bootdata=%08X reserved=%08X\n", (u_int)fwhandle, (u_int)magic, (u_int)bootdata, (u_int)reserved); #endif strcpy(cpu_model, "sb1250"); if (magic == BOOTINFO_MAGIC) { int idx; int added; uint64_t start, len, type; cfe_init(bootinfo.fwhandle, bootinfo.fwentry); cfe_present = 1; idx = 0; physmem = 0; mem_cluster_cnt = 0; while (cfe_enummem(idx, 0, &start, &len, &type) == 0) { added = 0; printf("Memory Block #%d start %08"PRIx64"X len %08"PRIx64"X: %s: ", idx, start, len, (type == CFE_MI_AVAILABLE) ? "Available" : "Reserved"); if ((type == CFE_MI_AVAILABLE) && (mem_cluster_cnt < VM_PHYSSEG_MAX)) { /* * XXX Ignore memory above 256MB for now, it * XXX needs special handling. */ if (start < (256*1024*1024)) { physmem += btoc(((int) len)); mem_clusters[mem_cluster_cnt].start = (long) start; mem_clusters[mem_cluster_cnt].size = (long) len; mem_cluster_cnt++; added = 1; } } if (added) printf("added to map\n"); else printf("not added to map\n"); idx++; } } else { /* * Handle the case of not being called from the firmware. */ /* XXX hardwire to 32MB; should be kernel config option */ physmem = 32 * 1024 * 1024 / 4096; mem_clusters[0].start = 0; mem_clusters[0].size = ctob(physmem); mem_cluster_cnt = 1; } for (i = 0; i < sizeof(bootinfo.boot_flags); i++) { switch (bootinfo.boot_flags[i]) { case '\0': break; case ' ': continue; case '-': while (bootinfo.boot_flags[i] != ' ' && bootinfo.boot_flags[i] != '\0') { switch (bootinfo.boot_flags[i]) { case 'a': boothowto |= RB_ASKNAME; break; case 'd': boothowto |= RB_KDB; break; case 's': boothowto |= RB_SINGLE; break; } i++; } } } /* * Load the rest of the available pages into the VM system. * The first chunk is tricky because we have to avoid the * kernel, but the rest are easy. */ first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); last = mem_clusters[0].start + mem_clusters[0].size; uvm_page_physload(atop(first), atop(last), atop(first), atop(last), VM_FREELIST_DEFAULT); for (i = 1; i < mem_cluster_cnt; i++) { first = round_page(mem_clusters[i].start); last = mem_clusters[i].start + mem_clusters[i].size; uvm_page_physload(atop(first), atop(last), atop(first), atop(last), VM_FREELIST_DEFAULT); } /* * Initialize error message buffer (at end of core). */ mips_init_msgbuf(); /* * Allocate space for proc0's USPACE */ p0 = (void *)pmap_steal_memory(USPACE, NULL, NULL); lwp0.l_addr = proc0paddr = (struct user *)p0; lwp0.l_md.md_regs = (struct frame *)((char *)p0 + USPACE) - 1; proc0paddr->u_pcb.pcb_context[11] = MIPS_INT_MASK | MIPS_SR_INT_IE; /* SR */ pmap_bootstrap(); /* * Initialize debuggers, and break into them, if appropriate. */ #if NKSYMS || defined(DDB) || defined(LKM) ksyms_init(((uintptr_t)ksym_end - (uintptr_t)ksym_start), ksym_start, ksym_end); #endif if (boothowto & RB_KDB) { #if defined(DDB) Debugger(); #endif } }
static int trap_pfault(struct trapframe *frame, int usermode) { vm_offset_t va; struct vmspace *vm = NULL; vm_map_t map; int rv = 0; int fault_flags; vm_prot_t ftype; thread_t td = curthread; struct lwp *lp = td->td_lwp; struct proc *p; va = trunc_page(frame->tf_addr); if (va >= VM_MIN_KERNEL_ADDRESS) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) { fault_flags = -1; ftype = -1; goto nogo; } map = &kernel_map; } else { /* * This is a fault on non-kernel virtual memory. * vm is initialized above to NULL. If curproc is NULL * or curproc->p_vmspace is NULL the fault is fatal. */ if (lp != NULL) vm = lp->lwp_vmspace; if (vm == NULL) { fault_flags = -1; ftype = -1; goto nogo; } /* * Debugging, try to catch kernel faults on the user address * space when not inside on onfault (e.g. copyin/copyout) * routine. */ if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) { #ifdef DDB if (freeze_on_seg_fault) { kprintf("trap_pfault: user address fault from kernel mode " "%016lx\n", (long)frame->tf_addr); while (freeze_on_seg_fault) tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); } #endif } map = &vm->vm_map; } /* * PGEX_I is defined only if the execute disable bit capability is * supported and enabled. */ if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; else if (frame->tf_err & PGEX_I) ftype = VM_PROT_EXECUTE; else ftype = VM_PROT_READ; if (map != &kernel_map) { /* * Keep swapout from messing with us during this * critical time. */ PHOLD(lp->lwp_proc); /* * Issue fault */ fault_flags = 0; if (usermode) fault_flags |= VM_FAULT_BURST | VM_FAULT_USERMODE; if (ftype & VM_PROT_WRITE) fault_flags |= VM_FAULT_DIRTY; else fault_flags |= VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, fault_flags); PRELE(lp->lwp_proc); } else { /* * Don't have to worry about process locking or stacks in the * kernel. */ fault_flags = VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } if (rv == KERN_SUCCESS) return (0); nogo: if (!usermode) { /* * NOTE: in 64-bit mode traps push rsp/ss * even if no ring change occurs. */ if (td->td_pcb->pcb_onfault && td->td_pcb->pcb_onfault_sp == frame->tf_rsp && td->td_gd->gd_intr_nesting_level == 0) { frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; return (0); } trap_fatal(frame, frame->tf_addr); return (-1); } /* * NOTE: on x86_64 we have a tf_addr field in the trapframe, no * kludge is needed to pass the fault address to signal handlers. */ p = td->td_proc; #ifdef DDB if (td->td_lwp->lwp_vkernel == NULL) { while (freeze_on_seg_fault) { tsleep(p, 0, "freeze", hz * 20); } if (ddb_on_seg_fault) Debugger("ddb_on_seg_fault"); } #endif return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); }
static int trap_pfault(struct trapframe *frame, int usermode) { vm_offset_t va; struct vmspace *vm = NULL; vm_map_t map; int rv = 0; int fault_flags; vm_prot_t ftype; thread_t td = curthread; struct lwp *lp = td->td_lwp; struct proc *p; va = trunc_page(frame->tf_addr); if (va >= VM_MIN_KERNEL_ADDRESS) { /* * Don't allow user-mode faults in kernel address space. */ if (usermode) { fault_flags = -1; ftype = -1; goto nogo; } map = &kernel_map; } else { /* * This is a fault on non-kernel virtual memory. * vm is initialized above to NULL. If curproc is NULL * or curproc->p_vmspace is NULL the fault is fatal. */ if (lp != NULL) vm = lp->lwp_vmspace; if (vm == NULL) { fault_flags = -1; ftype = -1; goto nogo; } /* * Debugging, try to catch kernel faults on the user address space when not inside * on onfault (e.g. copyin/copyout) routine. */ if (usermode == 0 && (td->td_pcb == NULL || td->td_pcb->pcb_onfault == NULL)) { #ifdef DDB if (freeze_on_seg_fault) { kprintf("trap_pfault: user address fault from kernel mode " "%016lx\n", (long)frame->tf_addr); while (freeze_on_seg_fault) tsleep(&freeze_on_seg_fault, 0, "frzseg", hz * 20); } #endif } map = &vm->vm_map; } /* * PGEX_I is defined only if the execute disable bit capability is * supported and enabled. */ if (frame->tf_err & PGEX_W) ftype = VM_PROT_WRITE; #if JG else if ((frame->tf_err & PGEX_I) && pg_nx != 0) ftype = VM_PROT_EXECUTE; #endif else ftype = VM_PROT_READ; if (map != &kernel_map) { /* * Keep swapout from messing with us during this * critical time. */ PHOLD(lp->lwp_proc); /* * Issue fault */ fault_flags = 0; if (usermode) fault_flags |= VM_FAULT_BURST; if (ftype & VM_PROT_WRITE) fault_flags |= VM_FAULT_DIRTY; else fault_flags |= VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, fault_flags); PRELE(lp->lwp_proc); } else { /* * Don't have to worry about process locking or stacks in the * kernel. */ fault_flags = VM_FAULT_NORMAL; rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); } if (rv == KERN_SUCCESS) return (0); nogo: if (!usermode) { if (td->td_gd->gd_intr_nesting_level == 0 && td->td_pcb->pcb_onfault) { frame->tf_rip = (register_t)td->td_pcb->pcb_onfault; return (0); } trap_fatal(frame, frame->tf_addr); return (-1); } /* * NOTE: on x86_64 we have a tf_addr field in the trapframe, no * kludge is needed to pass the fault address to signal handlers. */ p = td->td_proc; if (td->td_lwp->lwp_vkernel == NULL) { #ifdef DDB if (bootverbose || freeze_on_seg_fault || ddb_on_seg_fault) { #else if (bootverbose) { #endif kprintf("seg-fault ft=%04x ff=%04x addr=%p rip=%p " "pid=%d cpu=%d p_comm=%s\n", ftype, fault_flags, (void *)frame->tf_addr, (void *)frame->tf_rip, p->p_pid, mycpu->gd_cpuid, p->p_comm); } #ifdef DDB while (freeze_on_seg_fault) { tsleep(p, 0, "freeze", hz * 20); } if (ddb_on_seg_fault) Debugger("ddb_on_seg_fault"); #endif } return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); } static void trap_fatal(struct trapframe *frame, vm_offset_t eva) { int code, ss; u_int type; long rsp; struct soft_segment_descriptor softseg; char *msg; code = frame->tf_err; type = frame->tf_trapno; sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)], &softseg); if (type <= MAX_TRAP_MSG) msg = trap_msg[type]; else msg = "UNKNOWN"; kprintf("\n\nFatal trap %d: %s while in %s mode\n", type, msg, ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); #ifdef SMP /* three separate prints in case of a trap on an unmapped page */ kprintf("cpuid = %d; ", mycpu->gd_cpuid); kprintf("lapic->id = %08x\n", lapic->id); #endif if (type == T_PAGEFLT) { kprintf("fault virtual address = 0x%lx\n", eva); kprintf("fault code = %s %s %s, %s\n", code & PGEX_U ? "user" : "supervisor", code & PGEX_W ? "write" : "read", code & PGEX_I ? "instruction" : "data", code & PGEX_P ? "protection violation" : "page not present"); } kprintf("instruction pointer = 0x%lx:0x%lx\n", frame->tf_cs & 0xffff, frame->tf_rip); if (ISPL(frame->tf_cs) == SEL_UPL) { ss = frame->tf_ss & 0xffff; rsp = frame->tf_rsp; } else { ss = GSEL(GDATA_SEL, SEL_KPL); rsp = (long)&frame->tf_rsp; } kprintf("stack pointer = 0x%x:0x%lx\n", ss, rsp); kprintf("frame pointer = 0x%x:0x%lx\n", ss, frame->tf_rbp); kprintf("code segment = base 0x%lx, limit 0x%lx, type 0x%x\n", softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); kprintf(" = DPL %d, pres %d, long %d, def32 %d, gran %d\n", softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_long, softseg.ssd_def32, softseg.ssd_gran); kprintf("processor eflags = "); if (frame->tf_rflags & PSL_T) kprintf("trace trap, "); if (frame->tf_rflags & PSL_I) kprintf("interrupt enabled, "); if (frame->tf_rflags & PSL_NT) kprintf("nested task, "); if (frame->tf_rflags & PSL_RF) kprintf("resume, "); kprintf("IOPL = %ld\n", (frame->tf_rflags & PSL_IOPL) >> 12); kprintf("current process = "); if (curproc) { kprintf("%lu\n", (u_long)curproc->p_pid); } else { kprintf("Idle\n"); } kprintf("current thread = pri %d ", curthread->td_pri); if (curthread->td_critcount) kprintf("(CRIT)"); kprintf("\n"); #ifdef DDB if ((debugger_on_panic || db_active) && kdb_trap(type, code, frame)) return; #endif kprintf("trap number = %d\n", type); if (type <= MAX_TRAP_MSG) panic("%s", trap_msg[type]); else panic("unknown/reserved trap"); }
void cpu_startup() { vaddr_t minaddr, maxaddr; extern char cpu_model[]; /* * Initialize error message buffer. */ initmsgbuf((caddr_t)msgbufp, round_page(MSGBUFSIZE)); /* * Good {morning,afternoon,evening,night}. * Also call CPU init on systems that need that. */ printf("%s%s [%08X %08X]\n", version, cpu_model, vax_cpudata, vax_siedata); if (dep_call->cpu_conf) (*dep_call->cpu_conf)(); printf("real mem = %u (%uMB)\n", ptoa(physmem), ptoa(physmem)/1024/1024); mtpr(AST_NO, PR_ASTLVL); spl0(); /* * Allocate a submap for exec arguments. This map effectively limits * the number of processes exec'ing at any time. */ minaddr = vm_map_min(kernel_map); exec_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 16 * NCARGS, VM_MAP_PAGEABLE, FALSE, NULL); #if VAX46 || VAX48 || VAX49 || VAX53 || VAX60 /* * Allocate a submap for physio. This map effectively limits the * number of processes doing physio at any one time. * * Note that machines on which all mass storage I/O controllers * can perform address translation, do not need this. */ if (vax_boardtype == VAX_BTYP_46 || vax_boardtype == VAX_BTYP_48 || vax_boardtype == VAX_BTYP_49 || vax_boardtype == VAX_BTYP_1303 || vax_boardtype == VAX_BTYP_60) phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); #endif printf("avail mem = %lu (%luMB)\n", ptoa(uvmexp.free), ptoa(uvmexp.free)/1024/1024); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif /* * Configure the system. */ if (boothowto & RB_CONFIG) { #ifdef BOOT_CONFIG user_config(); #else printf("kernel does not support -c; continuing..\n"); #endif } }
/* ioctl routine */ int vinumioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { unsigned int objno; int error = 0; struct sd *sd; struct plex *plex; struct volume *vol; unsigned int index; /* for transferring config info */ unsigned int sdno; /* for transferring config info */ int fe; /* free list element number */ struct _ioctl_reply *ioctl_reply = (struct _ioctl_reply *) data; /* struct to return */ /* First, decide what we're looking at */ switch (DEVTYPE(dev)) { case VINUM_SUPERDEV_TYPE: /* ordinary super device */ ioctl_reply = (struct _ioctl_reply *) data; /* save the address to reply to */ switch (cmd) { #ifdef VINUMDEBUG case VINUM_DEBUG: if (((struct debuginfo *) data)->changeit) /* change debug settings */ debug = (((struct debuginfo *) data)->param); else { if (debug & DEBUG_REMOTEGDB) boothowto |= RB_GDB; /* serial debug line */ else boothowto &= ~RB_GDB; /* local ddb */ Debugger("vinum debug"); } ioctl_reply = (struct _ioctl_reply *) data; /* reinstate the address to reply to */ ioctl_reply->error = 0; return 0; #endif case VINUM_CREATE: /* create a vinum object */ error = lock_config(); /* get the config for us alone */ if (error) /* can't do it, */ return error; /* give up */ error = setjmp(command_fail); /* come back here on error */ if (error == 0) /* first time, */ ioctl_reply->error = parse_user_config((char *) data, /* update the config */ &keyword_set); else if (ioctl_reply->error == 0) { /* longjmp, but no error status */ ioctl_reply->error = EINVAL; /* note that something's up */ ioctl_reply->msg[0] = '\0'; /* no message? */ } unlock_config(); return 0; /* must be 0 to return the real error info */ case VINUM_GETCONFIG: /* get the configuration information */ bcopy(&vinum_conf, data, sizeof(vinum_conf)); return 0; /* start configuring the subsystem */ case VINUM_STARTCONFIG: return start_config(*(int *) data); /* just lock it. Parameter is 'force' */ /* * Move the individual parts of the config to user space. * * Specify the index of the object in the first word of data, * and return the object there */ case VINUM_DRIVECONFIG: index = *(int *) data; /* get the index */ if (index >= (unsigned) vinum_conf.drives_allocated) /* can't do it */ return ENXIO; /* bang */ bcopy(&DRIVE[index], data, sizeof(struct _drive)); /* copy the config item out */ return 0; case VINUM_SDCONFIG: index = *(int *) data; /* get the index */ if (index >= (unsigned) vinum_conf.subdisks_allocated) /* can't do it */ return ENXIO; /* bang */ bcopy(&SD[index], data, sizeof(struct _sd)); /* copy the config item out */ return 0; case VINUM_PLEXCONFIG: index = *(int *) data; /* get the index */ if (index >= (unsigned) vinum_conf.plexes_allocated) /* can't do it */ return ENXIO; /* bang */ bcopy(&PLEX[index], data, sizeof(struct _plex)); /* copy the config item out */ return 0; case VINUM_VOLCONFIG: index = *(int *) data; /* get the index */ if (index >= (unsigned) vinum_conf.volumes_allocated) /* can't do it */ return ENXIO; /* bang */ bcopy(&VOL[index], data, sizeof(struct _volume)); /* copy the config item out */ return 0; case VINUM_PLEXSDCONFIG: index = *(int *) data; /* get the plex index */ sdno = ((int *) data)[1]; /* and the sd index */ if ((index >= (unsigned) vinum_conf.plexes_allocated) /* plex doesn't exist */ ||(sdno >= PLEX[index].subdisks)) /* or it doesn't have this many subdisks */ return ENXIO; /* bang */ bcopy(&SD[PLEX[index].sdnos[sdno]], /* copy the config item out */ data, sizeof(struct _sd)); return 0; /* * We get called in two places: one from the * userland config routines, which call us * to complete the config and save it. This * call supplies the value 0 as a parameter. * * The other place is from the user "saveconfig" * routine, which can only work if we're *not* * configuring. In this case, supply parameter 1. */ case VINUM_SAVECONFIG: if (VFLAGS & VF_CONFIGURING) { /* must be us, the others are asleep */ if (*(int *) data == 0) /* finish config */ finish_config(1); /* finish the configuration and update it */ else return EBUSY; /* can't do it now */ } save_config(); /* save configuration to disk */ return 0; case VINUM_RELEASECONFIG: /* release the config */ if (VFLAGS & VF_CONFIGURING) { /* must be us, the others are asleep */ finish_config(0); /* finish the configuration, don't change it */ save_config(); /* save configuration to disk */ } else error = EINVAL; /* release what config? */ return error; case VINUM_INIT: ioctl_reply = (struct _ioctl_reply *) data; /* reinstate the address to reply to */ ioctl_reply->error = 0; return 0; case VINUM_RESETCONFIG: if (vinum_inactive(0)) { /* if the volumes are not active */ /* * Note the open count. We may be called from v, so we'll be open. * Keep the count so we don't underflow */ free_vinum(1); /* clean up everything */ log(LOG_NOTICE, "vinum: CONFIGURATION OBLITERATED\n"); ioctl_reply = (struct _ioctl_reply *) data; /* reinstate the address to reply to */ ioctl_reply->error = 0; return 0; } return EBUSY; case VINUM_SETSTATE: setstate((struct vinum_ioctl_msg *) data); /* set an object state */ return 0; /* * Set state by force, without changing * anything else. */ case VINUM_SETSTATE_FORCE: setstate_by_force((struct vinum_ioctl_msg *) data); /* set an object state */ return 0; #ifdef VINUMDEBUG case VINUM_MEMINFO: vinum_meminfo(data); return 0; case VINUM_MALLOCINFO: return vinum_mallocinfo(data); case VINUM_RQINFO: return vinum_rqinfo(data); #endif case VINUM_LABEL: /* label a volume */ ioctl_reply->error = write_volume_label(*(int *) data); /* index of the volume to label */ ioctl_reply->msg[0] = '\0'; /* no message */ return 0; case VINUM_REMOVE: remove((struct vinum_ioctl_msg *) data); /* remove an object */ return 0; case VINUM_GETFREELIST: /* get a drive free list element */ index = *(int *) data; /* get the drive index */ fe = ((int *) data)[1]; /* and the free list element */ if ((index >= (unsigned) vinum_conf.drives_allocated) /* plex doesn't exist */ ||(DRIVE[index].state == drive_unallocated)) return ENODEV; if (fe >= DRIVE[index].freelist_entries) /* no such entry */ return ENOENT; bcopy(&DRIVE[index].freelist[fe], data, sizeof(struct drive_freelist)); return 0; case VINUM_RESETSTATS: resetstats((struct vinum_ioctl_msg *) data); /* reset object stats */ return 0; /* attach an object to a superordinate object */ case VINUM_ATTACH: attachobject((struct vinum_ioctl_msg *) data); return 0; /* detach an object from a superordinate object */ case VINUM_DETACH: detachobject((struct vinum_ioctl_msg *) data); return 0; /* rename an object */ case VINUM_RENAME: renameobject((struct vinum_rename_msg *) data); return 0; /* replace an object */ case VINUM_REPLACE: replaceobject((struct vinum_ioctl_msg *) data); return 0; case VINUM_DAEMON: vinum_daemon(); /* perform the daemon */ return 0; case VINUM_FINDDAEMON: /* check for presence of daemon */ return vinum_finddaemon(); return 0; case VINUM_SETDAEMON: /* set daemon flags */ return vinum_setdaemonopts(*(int *) data); case VINUM_GETDAEMON: /* get daemon flags */ *(int *) data = daemon_options; return 0; case VINUM_PARITYOP: /* check/rebuild RAID-4/5 parity */ parityops((struct vinum_ioctl_msg *) data); return 0; /* move an object */ case VINUM_MOVE: moveobject((struct vinum_ioctl_msg *) data); return 0; default: /* FALLTHROUGH */ break; } case VINUM_DRIVE_TYPE: default: log(LOG_WARNING, "vinumioctl: invalid ioctl from process %d (%s): %lx\n", curthread->td_proc->p_pid, curthread->td_proc->p_comm, cmd); return EINVAL; case VINUM_SD_TYPE: case VINUM_RAWSD_TYPE: objno = Sdno(dev); sd = &SD[objno]; switch (cmd) { case DIOCGDINFO: /* get disk label */ get_volume_label(sd->name, 1, sd->sectors, (struct disklabel *) data); break; /* * We don't have this stuff on hardware, * so just pretend to do it so that * utilities don't get upset. */ case DIOCWDINFO: /* write partition info */ case DIOCSDINFO: /* set partition info */ return 0; /* not a titty */ default: return ENOTTY; /* not my kind of ioctl */ } return 0; /* pretend we did it */ case VINUM_RAWPLEX_TYPE: case VINUM_PLEX_TYPE: objno = Plexno(dev); plex = &PLEX[objno]; switch (cmd) { case DIOCGDINFO: /* get disk label */ get_volume_label(plex->name, 1, plex->length, (struct disklabel *) data); break; /* * We don't have this stuff on hardware, * so just pretend to do it so that * utilities don't get upset. */ case DIOCWDINFO: /* write partition info */ case DIOCSDINFO: /* set partition info */ return 0; /* not a titty */ default: return ENOTTY; /* not my kind of ioctl */ } return 0; /* pretend we did it */ case VINUM_VOLUME_TYPE: objno = Volno(dev); if ((unsigned) objno >= (unsigned) vinum_conf.volumes_allocated) /* not a valid volume */ return ENXIO; vol = &VOL[objno]; if (vol->state != volume_up) /* not up, */ return EIO; /* I/O error */ switch (cmd) { case DIOCGMEDIASIZE: *(off_t *)data = vol->size << DEV_BSHIFT; break; case DIOCGSECTORSIZE: *(u_int *)data = DEV_BSIZE; break; /* * We don't have this stuff on hardware, * so just pretend to do it so that * utilities don't get upset. */ case DIOCWDINFO: /* write partition info */ case DIOCSDINFO: /* set partition info */ return 0; /* not a titty */ case DIOCWLABEL: /* set or reset label writeable */ if ((flag & FWRITE) == 0) /* not writeable? */ return EACCES; /* no, die */ if (*(int *) data != 0) /* set it? */ vol->flags |= VF_WLABEL; /* yes */ else vol->flags &= ~VF_WLABEL; /* no, reset */ break; default: return ENOTTY; /* not my kind of ioctl */ } break; } return 0; /* XXX */ }
/* * u_int initarm(...) * * Initial entry point on startup. This gets called before main() is * entered. * It should be responsible for setting up everything that must be * in place when main is called. * This includes * Taking a copy of the boot configuration structure. * Initialising the physical console so characters can be printed. * Setting up page tables for the kernel * Relocating the kernel to the bottom of physical memory */ u_int initarm(void *arg) { /* * When we enter here, we are using a temporary first level * translation table with section entries in it to cover the TIPB * peripherals and SDRAM. The temporary first level translation table * is at the end of SDRAM. */ /* Heads up ... Setup the CPU / MMU / TLB functions. */ if (set_cpufuncs()) panic("cpu not recognized!"); init_clocks(); /* The console is going to try to map things. Give pmap a devmap. */ pmap_devmap_register(devmap); consinit(); #ifdef KGDB kgdb_port_init(); #endif #ifdef VERBOSE_INIT_ARM /* Talk to the user */ printf("\nNetBSD/evbarm (OSK5912) booting ...\n"); #endif #ifdef BOOT_ARGS char mi_bootargs[] = BOOT_ARGS; parse_mi_bootargs(mi_bootargs); #endif #ifdef VERBOSE_INIT_ARM printf("initarm: Configuring system ...\n"); #endif /* * Set up the variables that define the availability of physical * memory. */ physical_start = KERNEL_BASE_PHYS; physical_end = physical_start + MEMSIZE_BYTES; physmem = MEMSIZE_BYTES / PAGE_SIZE; /* Fake bootconfig structure for the benefit of pmap.c. */ bootconfig.dramblocks = 1; bootconfig.dram[0].address = physical_start; bootconfig.dram[0].pages = physmem; /* * Our kernel is at the beginning of memory, so set our free space to * all the memory after the kernel. */ physical_freestart = KERN_VTOPHYS(round_page((vaddr_t) _end)); physical_freeend = physical_end; free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; /* * This is going to do all the hard work of setting up the first and * and second level page tables. Pages of memory will be allocated * and mapped for other structures that are required for system * operation. When it returns, physical_freestart and free_pages will * have been updated to reflect the allocations that were made. In * addition, kernel_l1pt, kernel_pt_table[], systempage, irqstack, * abtstack, undstack, kernelstack, msgbufphys will be set to point to * the memory that was allocated for them. */ setup_real_page_tables(); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init. */ proc0paddr = (struct user *)kernelstack.pv_va; lwp0.l_addr = proc0paddr; #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. * Until then we will use a handler that just panics but tells us * why. * Initialisation of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; /* Initialise the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined "); #endif undefined_init(); /* Load memory into UVM. */ #ifdef VERBOSE_INIT_ARM printf("page "); #endif uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ uvm_page_physload(atop(physical_freestart), atop(physical_freeend), atop(physical_freestart), atop(physical_freeend), VM_FREELIST_DEFAULT); /* Boot strap pmap telling it where the kernel page table is */ #ifdef VERBOSE_INIT_ARM printf("pmap "); #endif pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif #ifdef KGDB if (boothowto & RB_KDB) { kgdb_debug_init = 1; kgdb_connect(1); } #endif #ifdef DDB db_machine_init(); /* Firmware doesn't load symbols. */ ddb_init(0, NULL, NULL); if (boothowto & RB_KDB) Debugger(); #endif /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
void x86_ipi_db(struct cpu_info *ci) { Debugger(); }
/* * ochain represents the target file inode. We need to move it to the * specified common parent directory (dip) and rename it to a special * invisible "0xINODENUMBER" filename. * * We use chain_duplicate and duplicate ochain at the new location, * renaming it appropriately. We create a temporary chain and * then delete it to placemark where the duplicate will go. Both of * these use the inode number for (lhc) (the key), generating the * invisible filename. */ static hammer2_chain_t * hammer2_hardlink_shiftup(hammer2_trans_t *trans, hammer2_chain_t **ochainp, hammer2_inode_t *dip, int *errorp) { hammer2_inode_data_t *nipdata; hammer2_chain_t *parent; hammer2_chain_t *ochain; hammer2_chain_t *nchain; hammer2_chain_t *tmp; hammer2_key_t lhc; hammer2_blockref_t bref; ochain = *ochainp; *errorp = 0; lhc = ochain->data->ipdata.inum; KKASSERT((lhc & HAMMER2_DIRHASH_VISIBLE) == 0); /* * Locate the inode or indirect block to create the new * entry in. lhc represents the inode number so there is * no collision iteration. * * There should be no key collisions with invisible inode keys. */ retry: parent = hammer2_chain_lookup_init(dip->chain, 0); nchain = hammer2_chain_lookup(&parent, lhc, lhc, 0); if (nchain) { kprintf("X3 chain %p parent %p dip %p dip->chain %p\n", nchain, parent, dip, dip->chain); hammer2_chain_unlock(nchain); nchain = NULL; *errorp = ENOSPC; #if 1 Debugger("X3"); #endif } /* * Create entry in common parent directory using the seek position * calculated above. */ if (*errorp == 0) { KKASSERT(nchain == NULL); *errorp = hammer2_chain_create(trans, &parent, &nchain, lhc, 0, HAMMER2_BREF_TYPE_INODE,/* n/a */ HAMMER2_INODE_BYTES); /* n/a */ hammer2_chain_refactor(&ochain); *ochainp = ochain; } /* * Cleanup and handle retries. */ if (*errorp == EAGAIN) { hammer2_chain_ref(parent); hammer2_chain_lookup_done(parent); hammer2_chain_wait(parent); hammer2_chain_drop(parent); goto retry; } /* * Handle the error case */ if (*errorp) { KKASSERT(nchain == NULL); hammer2_chain_lookup_done(parent); return (NULL); } /* * Use chain as a placeholder for (lhc), delete it and replace * it with our duplication. * * Gain a second lock on ochain for the duplication function to * unlock, maintain the caller's original lock across the call. * * This is a bit messy. */ hammer2_chain_delete(trans, nchain); hammer2_chain_lock(ochain, HAMMER2_RESOLVE_ALWAYS); tmp = ochain; bref = tmp->bref; bref.key = lhc; /* invisible dir entry key */ bref.keybits = 0; hammer2_chain_duplicate(trans, parent, nchain->index, &tmp, &bref); hammer2_chain_lookup_done(parent); hammer2_chain_unlock(nchain); /* no longer needed */ /* * Now set chain to our duplicate and modify it appropriately. * * Directory entries are inodes but this is a hidden hardlink * target. The name isn't used but to ease debugging give it * a name after its inode number. */ nchain = tmp; tmp = NULL; /* safety */ hammer2_chain_modify(trans, &nchain, HAMMER2_MODIFY_ASSERTNOCOPY); nipdata = &nchain->data->ipdata; ksnprintf(nipdata->filename, sizeof(nipdata->filename), "0x%016jx", (intmax_t)nipdata->inum); nipdata->name_len = strlen(nipdata->filename); nipdata->name_key = lhc; return (nchain); }
static void netbsd32_sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *mask) { int sig = ksi->ksi_signo; struct lwp *l = curlwp; struct proc *p = l->l_proc; struct sparc32_sigframe *fp; struct trapframe64 *tf; int addr, onstack, error; struct rwindow32 *oldsp, *newsp; register32_t sp; sig_t catcher = SIGACTION(p, sig).sa_handler; struct sparc32_sigframe sf; extern char netbsd32_sigcode[], netbsd32_esigcode[]; #define szsigcode (netbsd32_esigcode - netbsd32_sigcode) tf = l->l_md.md_tf; /* Need to attempt to zero extend this 32-bit pointer */ oldsp = (struct rwindow32 *)(u_long)(u_int)tf->tf_out[6]; /* Do we need to jump onto the signal stack? */ onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; if (onstack) { fp = (struct sparc32_sigframe *)((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size); l->l_sigstk.ss_flags |= SS_ONSTACK; } else fp = (struct sparc32_sigframe *)oldsp; fp = (struct sparc32_sigframe *)((u_long)(fp - 1) & ~7); #ifdef DEBUG sigpid = p->p_pid; if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) { printf("sendsig: %s[%d] sig %d newusp %p scp %p oldsp %p\n", p->p_comm, p->p_pid, sig, fp, &fp->sf_sc, oldsp); if (sigdebug & SDB_DDB) Debugger(); } #endif /* * Now set up the signal frame. We build it in kernel space * and then copy it out. We probably ought to just build it * directly in user space.... */ sf.sf_signo = sig; sf.sf_code = (u_int)ksi->ksi_trap; #if defined(COMPAT_SUNOS) || defined(MODULAR) sf.sf_scp = (u_long)&fp->sf_sc; #endif sf.sf_addr = 0; /* XXX */ /* * Build the signal context to be used by sigreturn. */ sf.sf_sc.sc_onstack = onstack; sf.sf_sc.sc_mask = *mask; sf.sf_sc.sc_sp = (u_long)oldsp; sf.sf_sc.sc_pc = tf->tf_pc; sf.sf_sc.sc_npc = tf->tf_npc; sf.sf_sc.sc_psr = TSTATECCR_TO_PSR(tf->tf_tstate); /* XXX */ sf.sf_sc.sc_g1 = tf->tf_global[1]; sf.sf_sc.sc_o0 = tf->tf_out[0]; /* * Put the stack in a consistent state before we whack away * at it. Note that write_user_windows may just dump the * registers into the pcb; we need them in the process's memory. * We also need to make sure that when we start the signal handler, * its %i6 (%fp), which is loaded from the newly allocated stack area, * joins seamlessly with the frame it was in when the signal occurred, * so that the debugger and _longjmp code can back up through it. */ sendsig_reset(l, sig); mutex_exit(p->p_lock); newsp = (struct rwindow32 *)((long)fp - sizeof(struct rwindow32)); write_user_windows(); #ifdef DEBUG if ((sigdebug & SDB_KSTACK)) printf("sendsig: saving sf to %p, setting stack pointer %p to %p\n", fp, &(((struct rwindow32 *)newsp)->rw_in[6]), oldsp); #endif sp = NETBSD32PTR32I(oldsp); error = (rwindow_save(l) || copyout(&sf, fp, sizeof sf) || copyout(&sp, &(((struct rwindow32 *)newsp)->rw_in[6]), sizeof(sp))); mutex_enter(p->p_lock); if (error) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ #ifdef DEBUG mutex_exit(p->p_lock); if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig: window save or copyout error\n"); printf("sendsig: stack was trashed trying to send sig %d, sending SIGILL\n", sig); if (sigdebug & SDB_DDB) Debugger(); mutex_enter(p->p_lock); #endif sigexit(l, SIGILL); /* NOTREACHED */ } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) { printf("sendsig: %s[%d] sig %d scp %p\n", p->p_comm, p->p_pid, sig, &fp->sf_sc); } #endif /* * Arrange to continue execution at the code copied out in exec(). * It needs the function to call in %g1, and a new stack pointer. */ addr = p->p_psstrp - szsigcode; tf->tf_global[1] = (long)catcher; tf->tf_pc = addr; tf->tf_npc = addr + 4; tf->tf_out[6] = (uint64_t)(u_int)(u_long)newsp; /* Remember that we're now on the signal stack. */ if (onstack) l->l_sigstk.ss_flags |= SS_ONSTACK; #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) { mutex_exit(p->p_lock); printf("sendsig: about to return to catcher %p thru %p\n", catcher, addr); if (sigdebug & SDB_DDB) Debugger(); mutex_enter(p->p_lock); } #endif }
int ddb_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, size_t newlen, struct proc *p) { int error, ctlval; /* All sysctl names at this level are terminal. */ if (namelen != 1) return (ENOTDIR); switch (name[0]) { case DBCTL_RADIX: return sysctl_int(oldp, oldlenp, newp, newlen, &db_radix); case DBCTL_MAXWIDTH: return sysctl_int(oldp, oldlenp, newp, newlen, &db_max_width); case DBCTL_TABSTOP: return sysctl_int(oldp, oldlenp, newp, newlen, &db_tab_stop_width); case DBCTL_MAXLINE: return sysctl_int(oldp, oldlenp, newp, newlen, &db_max_line); case DBCTL_PANIC: if (securelevel > 0) return (sysctl_int_lower(oldp, oldlenp, newp, newlen, &db_panic)); else { ctlval = db_panic; if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &ctlval)) || newp == NULL) return (error); if (ctlval != 1 && ctlval != 0) return (EINVAL); db_panic = ctlval; return (0); } break; case DBCTL_CONSOLE: if (securelevel > 0) return (sysctl_int_lower(oldp, oldlenp, newp, newlen, &db_console)); else { ctlval = db_console; if ((error = sysctl_int(oldp, oldlenp, newp, newlen, &ctlval)) || newp == NULL) return (error); if (ctlval != 1 && ctlval != 0) return (EINVAL); db_console = ctlval; return (0); } break; case DBCTL_LOG: return (sysctl_int(oldp, oldlenp, newp, newlen, &db_log)); case DBCTL_TRIGGER: if (newp && db_console) { struct process *pr = curproc->p_p; if (securelevel < 1 || (pr->ps_flags & PS_CONTROLT && cn_tab && cn_tab->cn_dev == pr->ps_session->s_ttyp->t_dev)) { Debugger(); newp = NULL; } else return (ENODEV); } return (sysctl_rdint(oldp, oldlenp, newp, 0)); default: return (EOPNOTSUPP); } /* NOTREACHED */ }
/* * u_int initarm(...) * * Initial entry point on startup. This gets called before main() is * entered. * It should be responsible for setting up everything that must be * in place when main is called. * This includes * Taking a copy of the boot configuration structure. * Initialising the physical console so characters can be printed. * Setting up page tables for the kernel * Relocating the kernel to the bottom of physical memory */ u_int initarm(void *arg) { extern vaddr_t xscale_cache_clean_addr; #ifdef DIAGNOSTIC extern vsize_t xscale_minidata_clean_size; #endif int loop; int loop1; u_int l1pagetable; paddr_t memstart; psize_t memsize; /* * Clear out the 7-segment display. Whee, the first visual * indication that we're running kernel code. */ iq80321_7seg(' ', ' '); /* Calibrate the delay loop. */ i80321_calibrate_delay(); i80321_hardclock_hook = iq80321_hardclock_hook; /* * Since we map the on-board devices VA==PA, and the kernel * is running VA==PA, it's possible for us to initialize * the console now. */ consinit(); #ifdef VERBOSE_INIT_ARM /* Talk to the user */ printf("\nNetBSD/evbarm (IQ80321) booting ...\n"); #endif /* * Heads up ... Setup the CPU / MMU / TLB functions */ if (set_cpufuncs()) panic("CPU not recognized!"); /* * We are currently running with the MMU enabled and the * entire address space mapped VA==PA, except for the * first 64M of RAM is also double-mapped at 0xc0000000. * There is an L1 page table at 0xa0004000. */ /* * Fetch the SDRAM start/size from the i80321 SDRAM configuration * registers. */ i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE, &memstart, &memsize); #ifdef VERBOSE_INIT_ARM printf("initarm: Configuring system ...\n"); #endif /* Fake bootconfig structure for the benefit of pmap.c */ /* XXX must make the memory description h/w independent */ bootconfig.dramblocks = 1; bootconfig.dram[0].address = memstart; bootconfig.dram[0].pages = memsize / PAGE_SIZE; /* * Set up the variables that define the availablilty of * physical memory. For now, we're going to set * physical_freestart to 0xa0200000 (where the kernel * was loaded), and allocate the memory we need downwards. * If we get too close to the L1 table that we set up, we * will panic. We will update physical_freestart and * physical_freeend later to reflect what pmap_bootstrap() * wants to see. * * XXX pmap_bootstrap() needs an enema. */ physical_start = bootconfig.dram[0].address; physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE); physical_freestart = 0xa0009000UL; physical_freeend = 0xa0200000UL; physmem = (physical_end - physical_start) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM /* Tell the user about the memory */ printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem, physical_start, physical_end - 1); #endif /* * Okay, the kernel starts 2MB in from the bottom of physical * memory. We are going to allocate our bootstrap pages downwards * from there. * * We need to allocate some fixed page tables to get the kernel * going. We allocate one page directory and a number of page * tables and store the physical addresses in the kernel_pt_table * array. * * The kernel page directory must be on a 16K boundary. The page * tables must be on 4K boundaries. What we do is allocate the * page directory on the first 16K boundary that we encounter, and * the page tables on 4K boundaries otherwise. Since we allocate * at least 3 L2 page tables, we are guaranteed to encounter at * least one 16K aligned region. */ #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n", physical_freestart, free_pages, free_pages); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ physical_freeend -= ((np) * PAGE_SIZE); \ if (physical_freeend < physical_freestart) \ panic("initarm: out of memory"); \ (var) = physical_freeend; \ free_pages -= (np); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); loop1 = 0; kernel_l1pt.pv_pa = 0; kernel_l1pt.pv_va = 0; for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { /* Are we 16KB aligned for an L1 ? */ if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0 && kernel_l1pt.pv_pa == 0) { valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); } else { valloc_pages(kernel_pt_table[loop1], L2_TABLE_SIZE / PAGE_SIZE); ++loop1; } } /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ alloc_pages(systempage.pv_pa, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); /* Allocate enough pages for cleaning the Mini-Data cache. */ KASSERT(xscale_minidata_clean_size <= PAGE_SIZE); valloc_pages(minidataclean, 1); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif /* * XXX Defer this to later so that we can reclaim the memory * XXX used by the RedBoot page tables. */ alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); /* * Ok we have allocated physical pages for the primary kernel * page tables */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); #endif /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE, &kernel_pt_table[KERNEL_PT_IOPXS]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif /* Now we fill in the L2 pagetable for the kernel static code/data */ { extern char etext[], _end[]; size_t textsize = (uintptr_t) etext - KERNEL_TEXT_BASE; size_t totalsize = (uintptr_t) _end - KERNEL_TEXT_BASE; u_int logical; textsize = (textsize + PGOFSET) & ~PGOFSET; totalsize = (totalsize + PGOFSET) & ~PGOFSET; logical = 0x00200000; /* offset of kernel in RAM */ logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); /* Map the vector page. */ pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map the statically mapped devices. */ pmap_devmap_bootstrap(l1pagetable, iq80321_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ /* * Update the physical_freestart/physical_freeend/free_pages * variables. */ { extern char _end[]; physical_freestart = physical_start + (((((uintptr_t) _end) + PGOFSET) & ~PGOFSET) - KERNEL_BASE); physical_freeend = physical_end; free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; } /* Switch tables */ #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa); #endif cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init */ proc0paddr = (struct user *)kernelstack.pv_va; lwp0.l_addr = proc0paddr; #ifdef VERBOSE_INIT_ARM printf("done!\n"); #endif #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. * Until then we will use a handler that just panics but tells us * why. * Initialisation of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; /* Initialise the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined "); #endif undefined_init(); /* Load memory into UVM. */ #ifdef VERBOSE_INIT_ARM printf("page "); #endif uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ uvm_page_physload(atop(physical_freestart), atop(physical_freeend), atop(physical_freestart), atop(physical_freeend), VM_FREELIST_DEFAULT); /* Boot strap pmap telling it where the kernel page table is */ #ifdef VERBOSE_INIT_ARM printf("pmap "); #endif pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); /* Setup the IRQ system */ #ifdef VERBOSE_INIT_ARM printf("irq "); #endif i80321_intr_init(); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif #ifdef BOOTHOWTO boothowto = BOOTHOWTO; #endif #if NKSYMS || defined(DDB) || defined(LKM) /* Firmware doesn't load symbols. */ ksyms_init(0, NULL, NULL); #endif #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
void kdp_call(void) { Debugger("inline call to debugger(machine_startup)"); }
/* * Go through the rigmarole of shutting down.. * this used to be in machdep.c but I'll be dammned if I could see * anything machine dependant in it. */ static void boot(int howto) { /* * Get rid of any user scheduler baggage and then give * us a high priority. */ if (curthread->td_release) curthread->td_release(curthread); lwkt_setpri_self(TDPRI_MAX); /* collect extra flags that shutdown_nice might have set */ howto |= shutdown_howto; #ifdef SMP /* * We really want to shutdown on the BSP. Subsystems such as ACPI * can't power-down the box otherwise. */ if (smp_active_mask > 1) { kprintf("boot() called on cpu#%d\n", mycpu->gd_cpuid); } if (panicstr == NULL && mycpu->gd_cpuid != 0) { kprintf("Switching to cpu #0 for shutdown\n"); lwkt_setcpu_self(globaldata_find(0)); } #endif /* * Do any callouts that should be done BEFORE syncing the filesystems. */ EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); /* * Try to get rid of any remaining FS references. The calling * process, proc0, and init may still hold references. The * VFS cache subsystem may still hold a root reference to root. * * XXX this needs work. We really need to SIGSTOP all remaining * processes in order to avoid blowups due to proc0's filesystem * references going away. For now just make sure that the init * process is stopped. */ if (panicstr == NULL) { shutdown_cleanup_proc(curproc); shutdown_cleanup_proc(&proc0); if (initproc) { if (initproc != curproc) { ksignal(initproc, SIGSTOP); tsleep(boot, 0, "shutdn", hz / 20); } shutdown_cleanup_proc(initproc); } vfs_cache_setroot(NULL, NULL); } /* * Now sync filesystems */ if (!cold && (howto & RB_NOSYNC) == 0 && waittime < 0) { int iter, nbusy, pbusy; waittime = 0; kprintf("\nsyncing disks... "); sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ /* * With soft updates, some buffers that are * written will be remarked as dirty until other * buffers are written. */ for (iter = pbusy = 0; iter < 20; iter++) { nbusy = scan_all_buffers(shutdown_busycount1, NULL); if (nbusy == 0) break; kprintf("%d ", nbusy); if (nbusy < pbusy) iter = 0; pbusy = nbusy; /* * XXX: * Process soft update work queue if buffers don't sync * after 6 iterations by permitting the syncer to run. */ if (iter > 5) bio_ops_sync(NULL); sys_sync(NULL); /* YYY was sync(&proc0, NULL). why proc0 ? */ tsleep(boot, 0, "shutdn", hz * iter / 20 + 1); } kprintf("\n"); /* * Count only busy local buffers to prevent forcing * a fsck if we're just a client of a wedged NFS server */ nbusy = scan_all_buffers(shutdown_busycount2, NULL); if (nbusy) { /* * Failed to sync all blocks. Indicate this and don't * unmount filesystems (thus forcing an fsck on reboot). */ kprintf("giving up on %d buffers\n", nbusy); #ifdef DDB if (debugger_on_panic) Debugger("busy buffer problem"); #endif /* DDB */ tsleep(boot, 0, "shutdn", hz * 5 + 1); } else { kprintf("done\n"); /* * Unmount filesystems */ if (panicstr == NULL) vfs_unmountall(); } tsleep(boot, 0, "shutdn", hz / 10 + 1); } print_uptime(); /* * Dump before doing post_sync shutdown ops */ crit_enter(); if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold) { dumpsys(); } /* * Ok, now do things that assume all filesystem activity has * been completed. This will also call the device shutdown * methods. */ EVENTHANDLER_INVOKE(shutdown_post_sync, howto); /* Now that we're going to really halt the system... */ EVENTHANDLER_INVOKE(shutdown_final, howto); for(;;) ; /* safety against shutdown_reset not working */ /* NOTREACHED */ }
/* * u_int initarm(...) * * Initial entry point on startup. This gets called before main() is * entered. * It should be responsible for setting up everything that must be * in place when main is called. * This includes * Taking a copy of the boot configuration structure. * Initialising the physical console so characters can be printed. * Setting up page tables for the kernel * Relocating the kernel to the bottom of physical memory */ u_int initarm(void *arg0, void *arg1, void *arg2) { extern vaddr_t xscale_cache_clean_addr; extern cpu_kcore_hdr_t cpu_kcore_hdr; int loop; int loop1; u_int l1pagetable; pv_addr_t kernel_l1pt; paddr_t memstart; psize_t memsize; extern u_int32_t esym; /* &_end if no symbols are loaded */ #ifdef DIAGNOSTIC extern vsize_t xscale_minidata_clean_size; /* used in KASSERT */ #endif /* setup a serial console for very early boot */ consinit(); /* * Heads up ... Setup the CPU / MMU / TLB functions */ if (set_cpufuncs()) panic("cpu not recognized!"); /* * Examine the boot args string for options we need to know about * now. */ /* XXX should really be done after setting up the console, but we * XXX need to parse the console selection flags right now. */ process_kernel_args((char *)0xa0200000 - MAX_BOOT_STRING - 1); /* Calibrate the delay loop. */ #if 1 i80321_calibrate_delay(); #endif /* Talk to the user */ printf("\nOpenBSD/armish booting ...\n"); /* * Reset the secondary PCI bus. RedBoot doesn't stop devices * on the PCI bus before handing us control, so we have to * do this. * * XXX This is arguably a bug in RedBoot, and doing this reset * XXX could be problematic in the future if we encounter an * XXX application where the PPB in the i80312 is used as a * XXX PPB. */ //#define VERBOSE_INIT_ARM /* * Fetch the SDRAM start/size from the i80312 SDRAM configuration * registers. */ i80321_sdram_bounds(&obio_bs_tag, VERDE_PMMR_BASE + VERDE_MCU_BASE, &memstart, &memsize); #define DEBUG #ifdef DEBUG printf("initarm: Configuring system ...\n"); #endif /* Fake bootconfig structure for the benefit of pmap.c */ /* XXX must make the memory description h/w independant */ bootconfig.dramblocks = 1; bootconfig.dram[0].address = memstart; bootconfig.dram[0].pages = memsize / PAGE_SIZE; /* * Set up the variables that define the availablilty of * physical memory. For now, we're going to set * physical_freestart to 0xa0200000 (where the kernel * was loaded), and allocate the memory we need downwards. * If we get too close to the page tables that RedBoot * set up, we will panic. We will update physical_freestart * and physical_freeend later to reflect what pmap_bootstrap() * wants to see. * * XXX pmap_bootstrap() needs an enema. */ physical_start = bootconfig.dram[0].address; physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE); physical_freestart = 0xa0009000UL; physical_freeend = 0xa0200000UL; physmem = (physical_end - physical_start) / PAGE_SIZE; #ifdef DEBUG /* Tell the user about the memory */ printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem, physical_start, physical_end - 1); #endif /* * Okay, the kernel starts 2MB in from the bottom of physical * memory. We are going to allocate our bootstrap pages downwards * from there. * * We need to allocate some fixed page tables to get the kernel * going. We allocate one page directory and a number of page * tables and store the physical addresses in the kernel_pt_table * array. * * The kernel page directory must be on a 16K boundary. The page * tables must be on 4K boundaries. What we do is allocate the * page directory on the first 16K boundary that we encounter, and * the page tables on 4K boundaries otherwise. Since we allocate * at least 3 L2 page tables, we are guaranteed to encounter at * least one 16K aligned region. */ #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%08x)\n", physical_freestart, free_pages, free_pages); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ physical_freeend -= ((np) * PAGE_SIZE); \ if (physical_freeend < physical_freestart) \ panic("initarm: out of memory"); \ (var) = physical_freeend; \ free_pages -= (np); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); loop1 = 0; kernel_l1pt.pv_pa = 0; for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { /* Are we 16KB aligned for an L1 ? */ if (((physical_freeend - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) == 0 && kernel_l1pt.pv_pa == 0) { valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); } else { valloc_pages(kernel_pt_table[loop1], L2_TABLE_SIZE / PAGE_SIZE); ++loop1; } } /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ alloc_pages(systempage.pv_pa, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); /* Allocate enough pages for cleaning the Mini-Data cache. */ KASSERT(xscale_minidata_clean_size <= PAGE_SIZE); valloc_pages(minidataclean, 1); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif /* * XXX Defer this to later so that we can reclaim the memory * XXX used by the RedBoot page tables. */ alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); /* * Ok we have allocated physical pages for the primary kernel * page tables */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); #endif /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; #ifdef HIGH_VECT /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00400000 - 1), &kernel_pt_table[KERNEL_PT_SYS]); #else /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]); #endif for (loop = 0; loop < KERNEL_PT_KERNEL_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_KERNEL + loop]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; loop++) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); #if 0 pmap_link_l2pt(l1pagetable, IQ80321_IOPXS_VBASE, &kernel_pt_table[KERNEL_PT_IOPXS]); #endif /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif /* Now we fill in the L2 pagetable for the kernel static code/data * and the symbol table. */ { extern char etext[]; #ifdef VERBOSE_INIT_ARM extern char _end[]; #endif size_t textsize = (u_int32_t) etext - KERNEL_TEXT_BASE; size_t totalsize = esym - KERNEL_TEXT_BASE; u_int logical; #ifdef VERBOSE_INIT_ARM printf("kernelsize text %x total %x end %xesym %x\n", textsize, totalsize, _end, esym); #endif textsize = round_page(textsize); totalsize = round_page(totalsize); logical = 0x00200000; /* offset of kernel in RAM */ /* Update dump information */ cpu_kcore_hdr.kernelbase = KERNEL_BASE; cpu_kcore_hdr.kerneloffs = logical; cpu_kcore_hdr.staticsize = totalsize; logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, textsize, PROT_READ | PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, totalsize - textsize, PROT_READ | PROT_WRITE, PTE_CACHE); } #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, PROT_READ | PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, PROT_READ | PROT_WRITE, PTE_PAGETABLE); } /* Map the Mini-Data cache clean area. */ xscale_setup_minidata(l1pagetable, minidataclean.pv_va, minidataclean.pv_pa); /* Map the vector page. */ #ifdef HIGH_VECT pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, PROT_READ | PROT_WRITE, PTE_CACHE); #else pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, PROT_READ | PROT_WRITE, PTE_CACHE); #endif pmap_devmap_bootstrap(l1pagetable, iq80321_devmap); /* * Give the XScale global cache clean code an appropriately * sized chunk of unmapped VA space starting at 0xff000000 * (our device mappings end before this address). */ xscale_cache_clean_addr = 0xff000000U; /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ /* * Update the physical_freestart/physical_freeend/free_pages * variables. */ { physical_freestart = physical_start - KERNEL_BASE + round_page(esym); physical_freeend = physical_end; free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; } #ifdef VERBOSE_INIT_ARM printf("physical_freestart %x end %x\n", physical_freestart, physical_freeend); #endif /* be a client to all domains */ cpu_domains(0x55555555); /* Switch tables */ #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa); #endif cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init */ proc0paddr = (struct user *)kernelstack.pv_va; proc0.p_addr = proc0paddr; #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif #ifdef HIGH_VECT arm32_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); #else arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); #endif /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ #ifdef VERBOSE_INIT_ARM printf("init subsystems: stacks "); #endif set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. * Until then we will use a handler that just panics but tells us * why. * Initialisation of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ #ifdef VERBOSE_INIT_ARM printf("vectors "); #endif data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; /* Initialise the undefined instruction handlers */ #ifdef VERBOSE_INIT_ARM printf("undefined "); #endif undefined_init(); /* Load memory into UVM. */ #ifdef VERBOSE_INIT_ARM printf("page "); #endif uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ uvm_page_physload(atop(physical_freestart), atop(physical_freeend), atop(physical_freestart), atop(physical_freeend), 0); /* Boot strap pmap telling it where the kernel page table is */ #ifdef VERBOSE_INIT_ARM printf("pmap "); #endif pmap_bootstrap((pd_entry_t *)kernel_l1pt.pv_va, KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); /* Update dump information */ cpu_kcore_hdr.pmap_kernel_l1 = (u_int32_t)pmap_kernel()->pm_l1; cpu_kcore_hdr.pmap_kernel_l2 = (u_int32_t)&(pmap_kernel()->pm_l2); /* Setup the IRQ system */ #ifdef VERBOSE_INIT_ARM printf("irq "); #endif i80321intc_intr_init(); #ifdef VERBOSE_INIT_ARM printf("done.\n"); #endif #ifdef DDB db_machine_init(); /* Firmware doesn't load symbols. */ ddb_init(); if (boothowto & RB_KDB) Debugger(); #endif /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
/* * Halt or reboot the machine after syncing/dumping according to howto. */ void cpu_reboot(int howto, char *what) { static int syncing; static char str[256]; char *ap = str, *ap1 = ap; boothowto = howto; if (!cold && !(howto & RB_NOSYNC) && !syncing) { syncing = 1; vfs_shutdown(); /* sync */ resettodr(); /* set wall clock */ } splhigh(); if (!cold && (howto & RB_DUMP)) ibm4xx_dumpsys(); doshutdownhooks(); pmf_system_shutdown(boothowto); if ((howto & RB_POWERDOWN) == RB_POWERDOWN) { /* Power off here if we know how...*/ } if (howto & RB_HALT) { printf("halted\n\n"); #if 0 goto reboot; /* XXX for now... */ #endif #ifdef DDB printf("dropping to debugger\n"); while(1) Debugger(); #endif } printf("rebooting\n\n"); if (what && *what) { if (strlen(what) > sizeof str - 5) printf("boot string too large, ignored\n"); else { strcpy(str, what); ap1 = ap = str + strlen(str); *ap++ = ' '; } } *ap++ = '-'; if (howto & RB_SINGLE) *ap++ = 's'; if (howto & RB_KDB) *ap++ = 'd'; *ap++ = 0; if (ap[-2] == '-') *ap1 = 0; /* flush cache for msgbuf */ __syncicache((void *)msgbuf_paddr, round_page(MSGBUFSIZE)); #if 0 reboot: #endif ppc4xx_reset(); printf("ppc4xx_reset() failed!\n"); #ifdef DDB while(1) Debugger(); #else while (1) /* nothing */; #endif }
/* * Do all the stuff that locore normally does before calling main(). */ void mach_init(int32_t memsize32, u_int bim, int32_t bip32) { intptr_t memsize = (int32_t)memsize32; char *kernend; char *bip = (char *)(intptr_t)(int32_t)bip32; u_long first, last; extern char edata[], end[]; const char *bi_msg; #if NKSYMS || defined(DDB) || defined(MODULAR) char *ssym = 0; struct btinfo_symtab *bi_syms; #endif struct btinfo_howto *bi_howto; /* * Clear the BSS segment (if needed). */ if (memcmp(((Elf_Ehdr *)end)->e_ident, ELFMAG, SELFMAG) == 0 && ((Elf_Ehdr *)end)->e_ident[EI_CLASS] == ELFCLASS) { esym = end; #if NKSYMS || defined(DDB) || defined(MODULAR) esym += ((Elf_Ehdr *)end)->e_entry; #endif kernend = (char *)mips_round_page(esym); /* * We don't have to clear BSS here * since our bootloader already does it. */ #if 0 memset(edata, 0, end - edata); #endif } else { kernend = (void *)mips_round_page(end); /* * No symbol table, so assume we are loaded by * the firmware directly with "bfd" command. * The firmware loader doesn't clear BSS of * a loaded kernel, so do it here. */ memset(edata, 0, kernend - edata); } /* * Copy exception-dispatch code down to exception vector. * Initialize locore-function vector. * Clear out the I and D caches. */ mips_vector_init(NULL, false); /* Check for valid bootinfo passed from bootstrap */ if (bim == BOOTINFO_MAGIC) { struct btinfo_magic *bi_magic; bootinfo = bip; bi_magic = lookup_bootinfo(BTINFO_MAGIC); if (bi_magic == NULL) { bi_msg = "missing bootinfo structure"; bim = (uintptr_t)bip; } else if (bi_magic->magic != BOOTINFO_MAGIC) { bi_msg = "invalid bootinfo structure"; bim = bi_magic->magic; } else bi_msg = NULL; } else { bi_msg = "invalid bootinfo (standalone boot?)"; } #if NKSYMS || defined(DDB) || defined(MODULAR) bi_syms = lookup_bootinfo(BTINFO_SYMTAB); /* Load symbol table if present */ if (bi_syms != NULL) { ssym = (void *)(intptr_t)bi_syms->ssym; esym = (void *)(intptr_t)bi_syms->esym; kernend = (void *)mips_round_page(esym); } #endif bi_howto = lookup_bootinfo(BTINFO_HOWTO); if (bi_howto != NULL) boothowto = bi_howto->bi_howto; cobalt_id = read_board_id(); if (cobalt_id >= COBALT_MODELS || cobalt_model[cobalt_id] == NULL) cpu_setmodel("Cobalt unknown model (board ID %u)", cobalt_id); else cpu_setmodel("%s", cobalt_model[cobalt_id]); switch (cobalt_id) { case COBALT_ID_QUBE2700: case COBALT_ID_RAQ: cpuspeed = 150; /* MHz */ break; case COBALT_ID_QUBE2: case COBALT_ID_RAQ2: cpuspeed = 250; /* MHz */ break; default: /* assume the fastest, so that delay(9) works */ cpuspeed = 250; break; } curcpu()->ci_cpu_freq = cpuspeed * 1000 * 1000; curcpu()->ci_cycles_per_hz = (curcpu()->ci_cpu_freq + hz / 2) / hz; curcpu()->ci_divisor_delay = ((curcpu()->ci_cpu_freq + (1000000 / 2)) / 1000000); /* all models have Rm5200, which is CPU_MIPS_DOUBLE_COUNT */ curcpu()->ci_cycles_per_hz /= 2; curcpu()->ci_divisor_delay /= 2; physmem = btoc(memsize - MIPS_KSEG0_START); consinit(); KASSERT(&lwp0 == curlwp); if (bi_msg != NULL) printf("%s: magic=%#x bip=%p\n", bi_msg, bim, bip); uvm_setpagesize(); /* * The boot command is passed in the top 512 bytes, * so don't clobber that. */ mem_clusters[0].start = 0; mem_clusters[0].size = ctob(physmem) - 512; mem_cluster_cnt = 1; memcpy(bootstring, (char *)(memsize - 512), 512); memset((char *)(memsize - 512), 0, 512); bootstring[511] = '\0'; decode_bootstring(); #if NKSYMS || defined(DDB) || defined(MODULAR) /* init symbols if present */ if ((bi_syms != NULL) && (esym != NULL)) ksyms_addsyms_elf(esym - ssym, ssym, esym); #endif KASSERT(&lwp0 == curlwp); #ifdef DDB if (boothowto & RB_KDB) Debugger(); #endif #ifdef KGDB if (boothowto & RB_KDB) kgdb_connect(0); #endif /* * Load the rest of the available pages into the VM system. */ first = round_page(MIPS_KSEG0_TO_PHYS(kernend)); last = mem_clusters[0].start + mem_clusters[0].size; uvm_page_physload(atop(first), atop(last), atop(first), atop(last), VM_FREELIST_DEFAULT); /* * Initialize error message buffer (at end of core). */ mips_init_msgbuf(); pmap_bootstrap(); /* * Allocate space for proc0's USPACE. */ mips_init_lwp0_uarea(); }
/* * Routine: lck_rw_lock_shared_gen */ void lck_rw_lock_shared_gen( lck_rw_t *lck) { int i; wait_result_t res; #if MACH_LDEBUG int decrementer; #endif /* MACH_LDEBUG */ boolean_t istate; #if CONFIG_DTRACE uint64_t wait_interval = 0; int slept = 0; int readers_at_sleep; #endif istate = lck_interlock_lock(lck); #if CONFIG_DTRACE readers_at_sleep = lck->lck_rw_shared_count; #endif #if MACH_LDEBUG decrementer = DECREMENTER_TIMEOUT; #endif /* MACH_LDEBUG */ while ((lck->lck_rw_want_write || lck->lck_rw_want_upgrade) && ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) { i = lock_wait_time[lck->lck_rw_can_sleep ? 1 : 0]; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_START, (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, i, 0); #if CONFIG_DTRACE if ((lockstat_probemap[LS_LCK_RW_LOCK_SHARED_SPIN] || lockstat_probemap[LS_LCK_RW_LOCK_SHARED_BLOCK]) && wait_interval == 0) { wait_interval = mach_absolute_time(); } else { wait_interval = -1; } #endif if (i != 0) { lck_interlock_unlock(lck, istate); #if MACH_LDEBUG if (!--decrementer) Debugger("timeout - wait no writers"); #endif /* MACH_LDEBUG */ while (--i != 0 && (lck->lck_rw_want_write || lck->lck_rw_want_upgrade) && ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) lck_rw_lock_pause(istate); istate = lck_interlock_lock(lck); } if (lck->lck_rw_can_sleep && (lck->lck_rw_want_write || lck->lck_rw_want_upgrade) && ((lck->lck_rw_shared_count == 0) || lck->lck_rw_priv_excl)) { lck->lck_r_waiting = TRUE; res = assert_wait(RW_LOCK_READER_EVENT(lck), THREAD_UNINT); if (res == THREAD_WAITING) { lck_interlock_unlock(lck, istate); res = thread_block(THREAD_CONTINUE_NULL); #if CONFIG_DTRACE slept = 1; #endif istate = lck_interlock_lock(lck); } } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_SHARED_CODE) | DBG_FUNC_END, (int)lck, lck->lck_rw_want_write, lck->lck_rw_want_upgrade, res, 0); } lck->lck_rw_shared_count++; lck_interlock_unlock(lck, istate); #if CONFIG_DTRACE if (wait_interval != 0 && wait_interval != (unsigned) -1) { if (slept == 0) { LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_SHARED_SPIN, lck, mach_absolute_time() - wait_interval, 0); } else { LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_SHARED_BLOCK, lck, mach_absolute_time() - wait_interval, 0, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, lck, 0); #endif }
u_int initarm(void *arg) { int loop; int loop1; u_int l1pagetable; extern char _end[]; /* * Turn the led off, then turn it yellow. * 0x80 - red; 0x04 - fan; 0x02 - green. */ ISA_PUTBYTE(0x338, 0x04); ISA_PUTBYTE(0x338, 0x86); /* * Set up a diagnostic console so we can see what's going * on. */ cn_tab = &kcomcons; /* Talk to the user */ printf("\nNetBSD/netwinder booting ...\n"); /* * Heads up ... Setup the CPU / MMU / TLB functions */ if (set_cpufuncs()) panic("CPU not recognized!"); /* * We are currently running with the MMU enabled and the * entire address space mapped VA==PA, except for the * first 64MB of RAM is also double-mapped at 0xf0000000. * There is an L1 page table at 0x00008000. * * We also have the 21285's PCI I/O space mapped where * we expect it. */ printf("initarm: Configuring system ...\n"); /* * Copy out the boot info passed by the firmware. Note that * early versions of NeTTrom fill this in with bogus values, * so we need to sanity check it. */ memcpy(&nwbootinfo, (void *)(KERNEL_BASE + 0x100), sizeof(nwbootinfo)); #ifdef VERBOSE_INIT_ARM printf("NeTTrom boot info:\n"); printf("\tpage size = 0x%08lx\n", nwbootinfo.bi_pagesize); printf("\tnpages = %ld (0x%08lx)\n", nwbootinfo.bi_nrpages, nwbootinfo.bi_nrpages); printf("\trootdev = 0x%08lx\n", nwbootinfo.bi_rootdev); printf("\tcmdline = %s\n", nwbootinfo.bi_cmdline); #endif if (nwbootinfo.bi_nrpages != 0x02000 && nwbootinfo.bi_nrpages != 0x04000 && nwbootinfo.bi_nrpages != 0x08000 && nwbootinfo.bi_nrpages != 0x10000) { nwbootinfo.bi_pagesize = 0xdeadbeef; nwbootinfo.bi_nrpages = 0x01000; /* 16MB */ nwbootinfo.bi_rootdev = 0; } /* Fake bootconfig structure for the benefit of pmap.c */ /* XXX must make the memory description h/w independent */ bootconfig.dramblocks = 1; bootconfig.dram[0].address = 0; bootconfig.dram[0].pages = nwbootinfo.bi_nrpages; /* * Set up the variables that define the availablilty of * physical memory. * * Since the NetWinder NeTTrom doesn't load ELF symbols * for us, we can safely assume that everything after end[] * is free. We start there and allocate upwards. */ physical_start = bootconfig.dram[0].address; physical_end = physical_start + (bootconfig.dram[0].pages * PAGE_SIZE); physical_freestart = ((((vaddr_t) _end) + PGOFSET) & ~PGOFSET) - KERNEL_BASE; physical_freeend = physical_end; free_pages = (physical_freeend - physical_freestart) / PAGE_SIZE; #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); #endif physmem = (physical_end - physical_start) / PAGE_SIZE; /* Tell the user about the memory */ printf("physmemory: %d pages at 0x%08lx -> 0x%08lx\n", physmem, physical_start, physical_end - 1); /* * Okay, we need to allocate some fixed page tables to get the * kernel going. We allocate one page directory and a number * of page tables and store the physical addresses in the * kernel_pt_table array. * * The kernel page directory must be on a 16K boundary. The page * tables must be on 4K boundaries. What we do is allocate the * page directory on the first 16K boundary that we encounter, * and the page tables on 4K boundaries otherwise. Since we * allocate at least 3 L2 page tables, we are guaranteed to * encounter at least one 16K aligned region. */ #ifdef VERBOSE_INIT_ARM printf("Allocating page tables\n"); #endif /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_pa, (np)); \ (var).pv_va = KERNEL_BASE + (var).pv_pa - physical_start; #define alloc_pages(var, np) \ (var) = physical_freestart; \ physical_freestart += ((np) * PAGE_SIZE);\ free_pages -= (np); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); loop1 = 0; for (loop = 0; loop <= NUM_KERNEL_PTS; ++loop) { /* Are we 16KB aligned for an L1 ? */ if ((physical_freestart & (L1_TABLE_SIZE - 1)) == 0 && kernel_l1pt.pv_pa == 0) { valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); } else { valloc_pages(kernel_pt_table[loop1], L2_TABLE_SIZE / PAGE_SIZE); ++loop1; } } /* This should never be able to happen but better confirm that. */ if (!kernel_l1pt.pv_pa || (kernel_l1pt.pv_pa & (L1_TABLE_SIZE-1)) != 0) panic("initarm: Failed to align the kernel page directory"); /* * Allocate a page for the system page mapped to V0x00000000 * This page will just contain the system vectors and can be * shared by all processes. */ alloc_pages(systempage.pv_pa, 1); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE); valloc_pages(abtstack, ABT_STACK_SIZE); valloc_pages(undstack, UND_STACK_SIZE); valloc_pages(kernelstack, UPAGES); #ifdef VERBOSE_INIT_ARM printf("IRQ stack: p0x%08lx v0x%08lx\n", irqstack.pv_pa, irqstack.pv_va); printf("ABT stack: p0x%08lx v0x%08lx\n", abtstack.pv_pa, abtstack.pv_va); printf("UND stack: p0x%08lx v0x%08lx\n", undstack.pv_pa, undstack.pv_va); printf("SVC stack: p0x%08lx v0x%08lx\n", kernelstack.pv_pa, kernelstack.pv_va); #endif alloc_pages(msgbufphys, round_page(MSGBUFSIZE) / PAGE_SIZE); /* * Ok we have allocated physical pages for the primary kernel * page tables */ #ifdef VERBOSE_INIT_ARM printf("Creating L1 page table at 0x%08lx\n", kernel_l1pt.pv_pa); #endif /* * Now we start consturction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_pa; /* Map the L2 pages tables in the L1 page table */ pmap_link_l2pt(l1pagetable, 0x00000000, &kernel_pt_table[KERNEL_PT_SYS]); pmap_link_l2pt(l1pagetable, KERNEL_BASE, &kernel_pt_table[KERNEL_PT_KERNEL]); for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop) pmap_link_l2pt(l1pagetable, KERNEL_VM_BASE + loop * 0x00400000, &kernel_pt_table[KERNEL_PT_VMDATA + loop]); /* update the top of the kernel VM */ pmap_curmaxkvaddr = KERNEL_VM_BASE + (KERNEL_PT_VMDATA_NUM * 0x00400000); #ifdef VERBOSE_INIT_ARM printf("Mapping kernel\n"); #endif /* Now we fill in the L2 pagetable for the kernel static code/data */ { /* * The kernel starts in the first 1MB of RAM, and we'd * like to use a section mapping for text, so we'll just * map from KERNEL_BASE to etext[] to _end[]. */ extern char etext[]; size_t textsize = (uintptr_t) etext - KERNEL_BASE; size_t totalsize = (uintptr_t) _end - KERNEL_BASE; u_int logical; textsize = (textsize + PGOFSET) & ~PGOFSET; totalsize = (totalsize + PGOFSET) & ~PGOFSET; textsize = textsize & ~PGOFSET; totalsize = (totalsize + PGOFSET) & ~PGOFSET; logical = 0; /* offset into RAM */ logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); logical += pmap_map_chunk(l1pagetable, KERNEL_BASE + logical, physical_start + logical, totalsize - textsize, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); } #ifdef VERBOSE_INIT_ARM printf("Constructing L2 page tables\n"); #endif /* Map the stack pages */ pmap_map_chunk(l1pagetable, irqstack.pv_va, irqstack.pv_pa, IRQ_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, abtstack.pv_va, abtstack.pv_pa, ABT_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, undstack.pv_va, undstack.pv_pa, UND_STACK_SIZE * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernelstack.pv_va, kernelstack.pv_pa, UPAGES * PAGE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) { pmap_map_chunk(l1pagetable, kernel_pt_table[loop].pv_va, kernel_pt_table[loop].pv_pa, L2_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); } /* Map the vector page. */ pmap_map_entry(l1pagetable, vector_page, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* * Map devices we can map w/ section mappings. */ loop = 0; while (l1_sec_table[loop].size) { vsize_t sz; #ifdef VERBOSE_INIT_ARM printf("%08lx -> %08lx @ %08lx\n", l1_sec_table[loop].pa, l1_sec_table[loop].pa + l1_sec_table[loop].size - 1, l1_sec_table[loop].va); #endif for (sz = 0; sz < l1_sec_table[loop].size; sz += L1_S_SIZE) pmap_map_section(l1pagetable, l1_sec_table[loop].va + sz, l1_sec_table[loop].pa + sz, l1_sec_table[loop].prot, l1_sec_table[loop].cache); ++loop; } /* * Now we have the real page tables in place so we can switch to them. * Once this is done we will be running with the REAL kernel page * tables. */ /* Switch tables */ #ifdef VERBOSE_INIT_ARM printf("freestart = 0x%08lx, free_pages = %d (0x%x)\n", physical_freestart, free_pages, free_pages); printf("switching to new L1 page table @%#lx...", kernel_l1pt.pv_pa); #endif cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT); cpu_setttb(kernel_l1pt.pv_pa, true); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)); /* * Moved from cpu_startup() as data_abort_handler() references * this during uvm init */ uvm_lwp_setuarea(&lwp0, kernelstack.pv_va); #ifdef VERBOSE_INIT_ARM printf("done!\n"); #endif /* * XXX this should only be done in main() but it useful to * have output earlier ... */ consinit(); #ifdef VERBOSE_INIT_ARM printf("bootstrap done.\n"); #endif arm32_vector_init(ARM_VECTORS_LOW, ARM_VEC_ALL); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ printf("init subsystems: stacks "); set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + IRQ_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ABT_STACK_SIZE * PAGE_SIZE); set_stackptr(PSR_UND32_MODE, undstack.pv_va + UND_STACK_SIZE * PAGE_SIZE); /* * Well we should set a data abort handler. * Once things get going this will change as we will need a proper * handler. * Until then we will use a handler that just panics but tells us * why. * Initialisation of the vectors will just panic on a data abort. * This just fills in a slightly better one. */ printf("vectors "); data_abort_handler_address = (u_int)data_abort_handler; prefetch_abort_handler_address = (u_int)prefetch_abort_handler; undefined_handler_address = (u_int)undefinedinstruction_bounce; /* Initialise the undefined instruction handlers */ printf("undefined "); undefined_init(); /* Load memory into UVM. */ printf("page "); uvm_setpagesize(); /* initialize PAGE_SIZE-dependent variables */ /* XXX Always one RAM block -- nuke the loop. */ for (loop = 0; loop < bootconfig.dramblocks; loop++) { paddr_t start = (paddr_t)bootconfig.dram[loop].address; paddr_t end = start + (bootconfig.dram[loop].pages * PAGE_SIZE); #if NISADMA > 0 paddr_t istart, isize; extern struct arm32_dma_range *footbridge_isa_dma_ranges; extern int footbridge_isa_dma_nranges; #endif if (start < physical_freestart) start = physical_freestart; if (end > physical_freeend) end = physical_freeend; #if 0 printf("%d: %lx -> %lx\n", loop, start, end - 1); #endif #if NISADMA > 0 if (arm32_dma_range_intersect(footbridge_isa_dma_ranges, footbridge_isa_dma_nranges, start, end - start, &istart, &isize)) { /* * Place the pages that intersect with the * ISA DMA range onto the ISA DMA free list. */ #if 0 printf(" ISADMA 0x%lx -> 0x%lx\n", istart, istart + isize - 1); #endif uvm_page_physload(atop(istart), atop(istart + isize), atop(istart), atop(istart + isize), VM_FREELIST_ISADMA); /* * Load the pieces that come before the * intersection onto the default free list. */ if (start < istart) { #if 0 printf(" BEFORE 0x%lx -> 0x%lx\n", start, istart - 1); #endif uvm_page_physload(atop(start), atop(istart), atop(start), atop(istart), VM_FREELIST_DEFAULT); } /* * Load the pieces that come after the * intersection onto the default free list. */ if ((istart + isize) < end) { #if 0 printf(" AFTER 0x%lx -> 0x%lx\n", (istart + isize), end - 1); #endif uvm_page_physload(atop(istart + isize), atop(end), atop(istart + isize), atop(end), VM_FREELIST_DEFAULT); } } else { uvm_page_physload(atop(start), atop(end), atop(start), atop(end), VM_FREELIST_DEFAULT); } #else /* NISADMA > 0 */ uvm_page_physload(atop(start), atop(end), atop(start), atop(end), VM_FREELIST_DEFAULT); #endif /* NISADMA > 0 */ } /* Boot strap pmap telling it where the kernel page table is */ printf("pmap "); pmap_bootstrap(KERNEL_VM_BASE, KERNEL_VM_BASE + KERNEL_VM_SIZE); /* Now that pmap is inited, we can set cpu_reset_address */ cpu_reset_address_paddr = vtophys((vaddr_t)netwinder_reset); /* Setup the IRQ system */ printf("irq "); footbridge_intr_init(); printf("done.\n"); /* * Warn the user if the bootinfo was bogus. We already * faked up some safe values. */ if (nwbootinfo.bi_pagesize == 0xdeadbeef) printf("WARNING: NeTTrom boot info corrupt\n"); #ifdef DDB db_machine_init(); if (boothowto & RB_KDB) Debugger(); #endif /* Turn the led green */ ISA_PUTBYTE(0x338, 0x06); /* We return the new stack pointer address */ return(kernelstack.pv_va + USPACE_SVC_STACK_TOP); }
/* * Routine: lck_rw_lock_exclusive */ void lck_rw_lock_exclusive( lck_rw_t *lck) { int i; wait_result_t res; #if MACH_LDEBUG int decrementer; #endif /* MACH_LDEBUG */ boolean_t istate; #if CONFIG_DTRACE uint64_t wait_interval = 0; int slept = 0; int readers_at_sleep; #endif istate = lck_interlock_lock(lck); #if CONFIG_DTRACE readers_at_sleep = lck->lck_rw_shared_count; #endif #if MACH_LDEBUG decrementer = DECREMENTER_TIMEOUT; #endif /* MACH_LDEBUG */ /* * Try to acquire the lck_rw_want_write bit. */ while (lck->lck_rw_want_write) { KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_START, (int)lck, 0, 0, 0, 0); /* * Either sleeping or spinning is happening, start * a timing of our delay interval now. */ #if CONFIG_DTRACE if ((lockstat_probemap[LS_LCK_RW_LOCK_EXCL_SPIN] || lockstat_probemap[LS_LCK_RW_LOCK_EXCL_BLOCK]) && wait_interval == 0) { wait_interval = mach_absolute_time(); } else { wait_interval = -1; } #endif i = lock_wait_time[lck->lck_rw_can_sleep ? 1 : 0]; if (i != 0) { lck_interlock_unlock(lck, istate); #if MACH_LDEBUG if (!--decrementer) Debugger("timeout - lck_rw_want_write"); #endif /* MACH_LDEBUG */ while (--i != 0 && lck->lck_rw_want_write) lck_rw_lock_pause(istate); istate = lck_interlock_lock(lck); } if (lck->lck_rw_can_sleep && lck->lck_rw_want_write) { lck->lck_w_waiting = TRUE; res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT); if (res == THREAD_WAITING) { lck_interlock_unlock(lck, istate); res = thread_block(THREAD_CONTINUE_NULL); #if CONFIG_DTRACE slept = 1; #endif istate = lck_interlock_lock(lck); } } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE_CODE) | DBG_FUNC_END, (int)lck, res, 0, 0, 0); } lck->lck_rw_want_write = TRUE; /* Wait for readers (and upgrades) to finish */ #if MACH_LDEBUG decrementer = DECREMENTER_TIMEOUT; #endif /* MACH_LDEBUG */ while ((lck->lck_rw_shared_count != 0) || lck->lck_rw_want_upgrade) { i = lock_wait_time[lck->lck_rw_can_sleep ? 1 : 0]; KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_START, (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, i, 0); #if CONFIG_DTRACE /* * Either sleeping or spinning is happening, start * a timing of our delay interval now. If we set it * to -1 we don't have accurate data so we cannot later * decide to record a dtrace spin or sleep event. */ if ((lockstat_probemap[LS_LCK_RW_LOCK_EXCL_SPIN] || lockstat_probemap[LS_LCK_RW_LOCK_EXCL_BLOCK]) && wait_interval == 0) { wait_interval = mach_absolute_time(); } else { wait_interval = (unsigned) -1; } #endif if (i != 0) { lck_interlock_unlock(lck, istate); #if MACH_LDEBUG if (!--decrementer) Debugger("timeout - wait for readers"); #endif /* MACH_LDEBUG */ while (--i != 0 && (lck->lck_rw_shared_count != 0 || lck->lck_rw_want_upgrade)) lck_rw_lock_pause(istate); istate = lck_interlock_lock(lck); } if (lck->lck_rw_can_sleep && (lck->lck_rw_shared_count != 0 || lck->lck_rw_want_upgrade)) { lck->lck_w_waiting = TRUE; res = assert_wait(RW_LOCK_WRITER_EVENT(lck), THREAD_UNINT); if (res == THREAD_WAITING) { lck_interlock_unlock(lck, istate); res = thread_block(THREAD_CONTINUE_NULL); #if CONFIG_DTRACE slept = 1; #endif istate = lck_interlock_lock(lck); } } KERNEL_DEBUG(MACHDBG_CODE(DBG_MACH_LOCKS, LCK_RW_LCK_EXCLUSIVE1_CODE) | DBG_FUNC_END, (int)lck, lck->lck_rw_shared_count, lck->lck_rw_want_upgrade, res, 0); } lck_interlock_unlock(lck, istate); #if CONFIG_DTRACE /* * Decide what latencies we suffered that are Dtrace events. * If we have set wait_interval, then we either spun or slept. * At least we get out from under the interlock before we record * which is the best we can do here to minimize the impact * of the tracing. * If we have set wait_interval to -1, then dtrace was not enabled when we * started sleeping/spinning so we don't record this event. */ if (wait_interval != 0 && wait_interval != (unsigned) -1) { if (slept == 0) { LOCKSTAT_RECORD2(LS_LCK_RW_LOCK_EXCL_SPIN, lck, mach_absolute_time() - wait_interval, 1); } else { /* * For the blocking case, we also record if when we blocked * it was held for read or write, and how many readers. * Notice that above we recorded this before we dropped * the interlock so the count is accurate. */ LOCKSTAT_RECORD4(LS_LCK_RW_LOCK_EXCL_BLOCK, lck, mach_absolute_time() - wait_interval, 1, (readers_at_sleep == 0 ? 1 : 0), readers_at_sleep); } } LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, lck, 1); #endif }
static void get_bootpath_from_prom(void) { struct btinfo_bootdev *bdev = NULL; char sbuf[OFPATHLEN], *cp; int chosen; /* * Grab boot path from PROM */ if ((chosen = OF_finddevice("/chosen")) == -1) return; bdev = lookup_bootinfo(BTINFO_BOOTDEV); if (bdev != NULL) { strcpy(ofbootpath, bdev->name); } else { if (OF_getprop(chosen, "bootpath", sbuf, sizeof(sbuf)) < 0) return; strcpy(ofbootpath, sbuf); } DPRINTF(ACDB_BOOTDEV, ("bootpath: %s\n", ofbootpath)); ofbootpackage = prom_finddevice(ofbootpath); /* * Strip partition or boot protocol */ cp = strrchr(ofbootpath, ':'); if (cp) { *cp = '\0'; ofbootpartition = cp+1; } cp = strrchr(ofbootpath, '@'); if (cp) { for (; cp != ofbootpath; cp--) { if (*cp == '/') { ofboottarget = cp+1; break; } } } DPRINTF(ACDB_BOOTDEV, ("bootpath phandle: 0x%x\n", ofbootpackage)); DPRINTF(ACDB_BOOTDEV, ("boot target: %s\n", ofboottarget ? ofboottarget : "<none>")); DPRINTF(ACDB_BOOTDEV, ("boot partition: %s\n", ofbootpartition ? ofbootpartition : "<none>")); /* Setup pointer to boot flags */ if (OF_getprop(chosen, "bootargs", sbuf, sizeof(sbuf)) == -1) return; strcpy(ofbootargs, sbuf); cp = ofbootargs; /* Find start of boot flags */ while (*cp) { while(*cp == ' ' || *cp == '\t') cp++; if (*cp == '-' || *cp == '\0') break; while(*cp != ' ' && *cp != '\t' && *cp != '\0') cp++; if (*cp != '\0') *cp++ = '\0'; } if (cp != ofbootargs) ofbootfile = ofbootargs; ofbootflags = cp; if (*cp != '-') return; for (;*++cp;) { int fl; fl = 0; BOOT_FLAG(*cp, fl); if (!fl) { printf("unknown option `%c'\n", *cp); continue; } boothowto |= fl; /* specialties */ if (*cp == 'd') { #if defined(KGDB) kgdb_break_at_attach = 1; #elif defined(DDB) Debugger(); #else printf("kernel has no debugger\n"); #endif } else if (*cp == 't') { /* turn on traptrace w/o breaking into kdb */ extern int trap_trace_dis; trap_trace_dis = 0; } } }