ACPI_STATUS AcpiOsSignal(UINT32 Function, void *Info) { ACPI_SIGNAL_FATAL_INFO *fatal; switch (Function) { case ACPI_SIGNAL_FATAL: fatal = (ACPI_SIGNAL_FATAL_INFO *)Info; printf("ACPI fatal signal, type 0x%x code 0x%x argument 0x%x", fatal->Type, fatal->Code, fatal->Argument); #ifdef ACPI_DEBUG kdb_enter(KDB_WHY_ACPI, "AcpiOsSignal"); #endif break; case ACPI_SIGNAL_BREAKPOINT: #ifdef ACPI_DEBUG kdb_enter(KDB_WHY_ACPI, (char *)Info); #endif break; default: return (AE_BAD_PARAMETER); } return (AE_OK); }
static int dcons_check_break(struct dcons_softc *dc, int c) { int kdb_brk; if (c < 0) return (c); if ((kdb_brk = kdb_alt_break(c, &dc->brk_state)) != 0) { switch (kdb_brk) { case KDB_REQ_DEBUGGER: if ((dc->flags & DC_GDB) != 0) { #ifdef GDB if (gdb_cur == &dcons_gdb_dbgport) { kdb_dbbe_select("gdb"); kdb_enter(KDB_WHY_BREAK, "Break sequence on dcons gdb port"); } #endif } else kdb_enter(KDB_WHY_BREAK, "Break sequence on dcons console port"); break; case KDB_REQ_PANIC: kdb_panic("Panic sequence on dcons console port"); break; case KDB_REQ_REBOOT: kdb_reboot(); break; } } return (c); }
static int ofw_cngetc(struct consdev *cp) { unsigned char ch; if (OF_read(stdin, &ch, 1) > 0) { #if defined(KDB) && defined(ALT_BREAK_TO_DEBUGGER) int kdb_brk; if ((kdb_brk = kdb_alt_break(ch, &alt_break_state)) != 0) { switch (kdb_brk) { case KDB_REQ_DEBUGGER: kdb_enter(KDB_WHY_BREAK, "Break sequence on console"); break; case KDB_REQ_PANIC: kdb_panic("Panic sequence on console"); break; case KDB_REQ_REBOOT: kdb_reboot(); break; } } #endif return (ch); } return (-1); }
static void mips_init(void) { int i; printf("entry: mips_init()\n"); bootverbose = 1; realmem = btoc(32 << 20); for (i = 0; i < 10; i++) { phys_avail[i] = 0; } /* phys_avail regions are in bytes */ dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); dump_avail[1] = phys_avail[1] = ctob(realmem); physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
static int kdb_alt_break_internal(int key, int *state, int force_gdb) { int brk; if (!kdb_alt_break_to_debugger) return (0); brk = kdb_alt_break_state(key, state); switch (brk) { case KDB_REQ_DEBUGGER: if (force_gdb) kdb_dbbe_select("gdb"); kdb_enter(KDB_WHY_BREAK, "Break to debugger"); break; case KDB_REQ_PANIC: if (force_gdb) kdb_dbbe_select("gdb"); kdb_panic("Panic sequence on console"); break; case KDB_REQ_REBOOT: kdb_reboot(); break; } return (0); }
/* * ap_watchdog() is called by the SMP idle loop code. It works on the same * premise that the disabling of logical processors does: that if the cpu is * idle, then it can ignore the world from then on, as nothing will be * scheduled on it. Leaving aside multi-runqueue schedulers (SCHED_ULE) and * explicit process migration (sched_bind()), this is not an unreasonable * assumption. */ void ap_watchdog(u_int cpuid) { char old_pcomm[MAXCOMLEN + 1]; struct proc *p; if (watchdog_cpu != cpuid) return; printf("watchdog started on cpu %d\n", cpuid); p = curproc; bcopy(p->p_comm, old_pcomm, MAXCOMLEN + 1); snprintf(p->p_comm, MAXCOMLEN + 1, "mp_watchdog cpu %d", cpuid); while (1) { DELAY(1000000); /* One second. */ if (watchdog_cpu != cpuid) break; atomic_subtract_int(&watchdog_timer, 1); if (watchdog_timer < 4) printf("Watchdog timer: %d\n", watchdog_timer); if (watchdog_timer == 0 && watchdog_dontfire == 0) { printf("Watchdog firing!\n"); watchdog_dontfire = 1; if (watchdog_nmi) watchdog_ipi_nmi(); else kdb_enter(KDB_WHY_WATCHDOG, "mp_watchdog"); } } bcopy(old_pcomm, p->p_comm, MAXCOMLEN + 1); printf("watchdog stopped on cpu %d\n", cpuid); }
static void mips_init(void) { int i; #ifdef FDT struct mem_region mr[FDT_MEM_REGIONS]; uint64_t val; int mr_cnt; int j; #endif for (i = 0; i < 10; i++) { phys_avail[i] = 0; } /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1]; physmem = realmem; #ifdef FDT if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) { physmem = btoc(val); KASSERT((phys_avail[0] >= mr[0].mr_start) && \ (phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)), ("First region is not within FDT memory range")); /* Limit size of the first region */ phys_avail[1] = (mr[0].mr_start + MIN(mr[0].mr_size, ctob(realmem))); dump_avail[1] = phys_avail[1]; /* Add the rest of regions */ for (i = 1, j = 2; i < mr_cnt; i++, j+=2) { phys_avail[j] = mr[i].mr_start; phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size); dump_avail[j] = phys_avail[j]; dump_avail[j+1] = phys_avail[j+1]; } } #endif init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
static int hvcn_cncheckc(struct consdev *cp) { unsigned char ch; int l; if ((l = hv_cons_getchar(&ch)) == H_EOK) { #if defined(KDB) if (l == H_BREAK || l == H_HUP) kdb_enter(KDB_WHY_BREAK, "Break sequence on console"); if (kdb_alt_break(ch, &alt_break_state)) kdb_enter(KDB_WHY_BREAK, "Break sequence on console"); #endif return (ch); } return (-1); }
int kdb_break(void) { if (!kdb_break_to_debugger) return (0); kdb_enter(KDB_WHY_BREAK, "Break to debugger"); return (KDB_REQ_DEBUGGER); }
static int pswitch_intr(void *arg) { device_t dev; dev = (device_t)arg; kdb_enter(KDB_WHY_POWERPC, device_get_nameunit(dev)); return (FILTER_HANDLED); }
static int hvcn_cngetc(struct consdev *cp) { unsigned char ch; int l; ch = '\0'; while ((l = hv_cons_getchar(&ch)) != H_EOK) { #if defined(KDB) int kdb_brk; if (l == H_BREAK || l == H_HUP) kdb_enter(KDB_WHY_BREAK, "Break sequence on console"); if ((kdb_brk = kdb_alt_break(ch, &alt_break_state)) != 0) { switch (kdb_brk) { case KDB_REQ_DEBUGGER: kdb_enter(KDB_WHY_BREAK, "Break sequence on console"); break; case KDB_REQ_PANIC: kdb_panic("Panic sequence on console"); break; case KDB_REQ_REBOOT: kdb_reboot(); break; } } #endif if (l != -2 && l != 0) { return (-1); } } return (ch); }
/* * Called by KASSERT, this decides if we will panic * or if we will log via printf and/or ktr. */ void kassert_panic(const char *fmt, ...) { static char buf[256]; va_list ap; va_start(ap, fmt); (void)vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); /* * panic if we're not just warning, or if we've exceeded * kassert_log_panic_at warnings. */ if (!kassert_warn_only || (kassert_log_panic_at > 0 && kassert_warnings >= kassert_log_panic_at)) { va_start(ap, fmt); vpanic(fmt, ap); /* NORETURN */ } #ifdef KTR if (kassert_do_ktr) CTR0(ktr_mask, buf); #endif /* KTR */ /* * log if we've not yet met the mute limit. */ if (kassert_do_log && (kassert_log_mute_at == 0 || kassert_warnings < kassert_log_mute_at)) { static struct timeval lasterr; static int curerr; if (ppsratecheck(&lasterr, &curerr, kassert_log_pps_limit)) { printf("KASSERT failed: %s\n", buf); kdb_backtrace(); } } #ifdef KDB if (kassert_do_kdb) { kdb_enter(KDB_WHY_KASSERT, buf); } #endif atomic_add_int(&kassert_warnings, 1); }
static int kdb_sysctl_enter(SYSCTL_HANDLER_ARGS) { int error, i; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) { i = 0; error = sysctl_handle_int(oidp, &i, 0, req); } if (error != 0 || req->newptr == NULL) return (error); if (kdb_active) return (EBUSY); kdb_enter(KDB_WHY_SYSCTL, "sysctl debug.kdb.enter"); return (0); }
static void wd_timeout_cb(void *arg) { const char *type = arg; #ifdef DDB if ((wd_pretimeout_act & WD_SOFT_DDB)) { char kdb_why[80]; snprintf(kdb_why, sizeof(buf), "watchdog %s timeout", type); kdb_backtrace(); kdb_enter(KDB_WHY_WATCHDOG, kdb_why); } #endif if ((wd_pretimeout_act & WD_SOFT_LOG)) log(LOG_EMERG, "watchdog %s-timeout, WD_SOFT_LOG", type); if ((wd_pretimeout_act & WD_SOFT_PRINTF)) printf("watchdog %s-timeout, WD_SOFT_PRINTF\n", type); if ((wd_pretimeout_act & WD_SOFT_PANIC)) panic("watchdog %s-timeout, WD_SOFT_PANIC set", type); }
static int vt_machine_kbdevent(int c) { switch (c) { case SPCLKEY | DBG: kdb_enter(KDB_WHY_BREAK, "manual escape to debugger"); return (1); case SPCLKEY | RBT: /* XXX: Make this configurable! */ shutdown_nice(0); return (1); case SPCLKEY | HALT: shutdown_nice(RB_HALT); return (1); case SPCLKEY | PDWN: shutdown_nice(RB_HALT|RB_POWEROFF); return (1); }; return (0); }
/* * Only allow most system calls from either ambient authority, or from * sandboxes that have been explicitly delegated CHERI_PERM_SYSCALL via their * code capability. Note that CHERI_PERM_SYSCALL effectively implies ambient * authority, as the kernel does not [currently] interpret pointers/lengths * via userspace $ddc. */ int cheri_syscall_authorize(struct thread *td, u_int code, int nargs, register_t *args) { uintmax_t c_perms; /* * Allow the cycle counter to be read via sysarch. * * XXXRW: Now that we support a userspace cycle counter, we should * remove this. */ if (code == SYS_sysarch && args[0] == MIPS_GET_COUNT) return (0); /* * Check whether userspace holds the rights defined in * cheri_capability_set_user() in $pcc. Note that object type doesn't * come into play here. * * XXXRW: Possibly ECAPMODE should be EPROT or ESANDBOX? */ CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &td->td_pcb->pcb_regs.pcc, 0); CHERI_CGETPERM(c_perms, CHERI_CR_CTEMP0); if ((c_perms & CHERI_PERM_SYSCALL) == 0) { atomic_add_int(&security_cheri_syscall_violations, 1); #if DDB if (security_cheri_debugger_on_sandbox_syscall) kdb_enter(KDB_WHY_CHERI, "Syscall rejected in CHERI sandbox"); #endif return (ECAPMODE); } return (0); }
void platform_start(__register_t a0, __register_t a1, __register_t a2 __unused, __register_t a3 __unused) { uint64_t platform_counter_freq; vm_offset_t kernend; int argc = a0; char **argv = (char **)a1; int i, mem; /* clear the BSS and SBSS segments */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); /* Initialize pcpu stuff */ mips_pcpu0_init(); /* * Looking for mem=XXM argument */ mem = 0; /* Just something to start with */ for (i=0; i < argc; i++) { if (strncmp(argv[i], "mem=", 4) == 0) { mem = strtol(argv[i] + 4, NULL, 0); break; } } bootverbose = 1; if (mem > 0) realmem = btoc(mem << 20); else realmem = btoc(32 << 20); for (i = 0; i < 10; i++) { phys_avail[i] = 0; } /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1]; physmem = realmem; /* * ns8250 uart code uses DELAY so ticker should be inititalized * before cninit. And tick_init_params refers to hz, so * init_param1 * should be called first. */ init_param1(); /* TODO: parse argc,argv */ platform_counter_freq = 330000000UL; mips_timer_init_params(platform_counter_freq, 1); cninit(); /* Panic here, after cninit */ if (mem == 0) panic("No mem=XX parameter in arguments"); printf("cmd line: "); for (i=0; i < argc; i++) printf("%s ", argv[i]); printf("\n"); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
void platform_start(__register_t a0 __unused, __register_t a1 __unused, __register_t a2 __unused, __register_t a3 __unused) { uint64_t platform_counter_freq; uint32_t reg; int argc, i, count = 0; char **argv, **envp; vm_offset_t kernend; /* * clear the BSS and SBSS segments, this should be first call in * the function */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); /* Initialize pcpu stuff */ mips_pcpu0_init(); argc = a0; argv = (char**)a1; envp = (char**)a2; /* * Protect ourselves from garbage in registers */ if (MIPS_IS_VALID_PTR(envp)) { for (i = 0; envp[i]; i += 2) { if (strcmp(envp[i], "memsize") == 0) realmem = btoc(strtoul(envp[i+1], NULL, 16)); else if (strcmp(envp[i], "ethaddr") == 0) { count = sscanf(envp[i+1], "%x.%x.%x.%x.%x.%x", &ar711_base_mac[0], &ar711_base_mac[1], &ar711_base_mac[2], &ar711_base_mac[3], &ar711_base_mac[4], &ar711_base_mac[5]); if (count < 6) memset(ar711_base_mac, 0, sizeof(ar711_base_mac)); } } } /* * Just wild guess. RedBoot let us down and didn't reported * memory size */ if (realmem == 0) realmem = btoc(32*1024*1024); /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); physmem = realmem; /* * ns8250 uart code uses DELAY so ticker should be inititalized * before cninit. And tick_init_params refers to hz, so * init_param1 * should be called first. */ init_param1(); platform_counter_freq = ar71xx_cpu_freq(); mips_timer_init_params(platform_counter_freq, 1); cninit(); init_static_kenv(boot1_env, sizeof(boot1_env)); printf("platform frequency: %lld\n", platform_counter_freq); printf("arguments: \n"); printf(" a0 = %08x\n", a0); printf(" a1 = %08x\n", a1); printf(" a2 = %08x\n", a2); printf(" a3 = %08x\n", a3); printf("Cmd line:"); if (MIPS_IS_VALID_PTR(argv)) { for (i = 0; i < argc; i++) { printf(" %s", argv[i]); parse_argv(argv[i]); } } else printf ("argv is invalid"); printf("\n"); printf("Environment:\n"); if (MIPS_IS_VALID_PTR(envp)) { for (i = 0; envp[i]; i+=2) { printf(" %s = %s\n", envp[i], envp[i+1]); setenv(envp[i], envp[i+1]); } } else printf ("envp is invalid\n"); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); /* * Reset USB devices */ reg = ATH_READ_REG(AR71XX_RST_RESET); reg |= RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY; ATH_WRITE_REG(AR71XX_RST_RESET, reg); DELAY(1000); reg &= ~(RST_RESET_USB_OHCI_DLL | RST_RESET_USB_HOST | RST_RESET_USB_PHY); ATH_WRITE_REG(AR71XX_RST_RESET, reg); ATH_WRITE_REG(AR71XX_USB_CTRL_CONFIG, USB_CTRL_CONFIG_OHCI_DES_SWAP | USB_CTRL_CONFIG_OHCI_BUF_SWAP | USB_CTRL_CONFIG_EHCI_DES_SWAP | USB_CTRL_CONFIG_EHCI_BUF_SWAP); ATH_WRITE_REG(AR71XX_USB_CTRL_FLADJ, (32 << USB_CTRL_FLADJ_HOST_SHIFT) | (3 << USB_CTRL_FLADJ_A5_SHIFT)); DELAY(1000); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { #ifdef SMP static volatile u_int panic_cpu = NOCPU; cpuset_t other_cpus; #endif struct thread *td = curthread; int bootopt, newpanic; va_list ap; static char buf[256]; if (stop_scheduler_on_panic) spinlock_enter(); else critical_enter(); #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we * use panic_cpu as a simple spinlock. We have to keep checking * panic_cpu if we are spinning in case the panic on the first * CPU is canceled. */ if (panic_cpu != PCPU_GET(cpuid)) while (atomic_cmpset_int(&panic_cpu, NOCPU, PCPU_GET(cpuid)) == 0) while (panic_cpu != NOCPU) ; /* nothing */ if (stop_scheduler_on_panic) { if (panicstr == NULL && !kdb_active) { other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); } /* * We set stop_scheduler here and not in the block above, * because we want to ensure that if panic has been called and * stop_scheduler_on_panic is true, then stop_scheduler will * always be set. Even if panic has been entered from kdb. */ td->td_stopsched = 1; } #endif bootopt = RB_AUTOBOOT; newpanic = 0; if (panicstr) bootopt |= RB_NOSYNC; else { bootopt |= RB_DUMP; panicstr = fmt; newpanic = 1; } va_start(ap, fmt); if (newpanic) { (void)vsnprintf(buf, sizeof(buf), fmt, ap); panicstr = buf; cngrab(); printf("panic: %s\n", buf); } else { printf("panic: "); vprintf(fmt, ap); printf("\n"); } va_end(ap); #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif #ifdef KDB if (newpanic && trace_on_panic) kdb_backtrace(); if (debugger_on_panic) kdb_enter(KDB_WHY_PANIC, "panic"); #endif /*thread_lock(td); */ td->td_flags |= TDF_INPANIC; /* thread_unlock(td); */ if (!sync_on_panic) bootopt |= RB_NOSYNC; if (!stop_scheduler_on_panic) critical_exit(); kern_reboot(bootopt); }
/* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync * the disks as this often leads to recursive panics. */ void panic(const char *fmt, ...) { #ifdef SMP static volatile u_int panic_cpu = NOCPU; #endif struct thread *td = curthread; int bootopt, newpanic; va_list ap; static char buf[256]; critical_enter(); #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we * use panic_cpu as a simple spinlock. We have to keep checking * panic_cpu if we are spinning in case the panic on the first * CPU is canceled. */ if (panic_cpu != PCPU_GET(cpuid)) while (atomic_cmpset_int(&panic_cpu, NOCPU, PCPU_GET(cpuid)) == 0) while (panic_cpu != NOCPU) ; /* nothing */ #endif bootopt = RB_AUTOBOOT; newpanic = 0; if (panicstr) bootopt |= RB_NOSYNC; else { bootopt |= RB_DUMP; panicstr = fmt; newpanic = 1; } va_start(ap, fmt); if (newpanic) { (void)vsnprintf(buf, sizeof(buf), fmt, ap); panicstr = buf; printf("panic: %s\n", buf); } else { printf("panic: "); vprintf(fmt, ap); printf("\n"); } va_end(ap); #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif #ifdef KDB if (newpanic && trace_on_panic) kdb_backtrace(); if (debugger_on_panic) kdb_enter(KDB_WHY_PANIC, "panic"); #endif /*thread_lock(td); */ td->td_flags |= TDF_INPANIC; /* thread_unlock(td); */ if (!sync_on_panic) bootopt |= RB_NOSYNC; critical_exit(); kern_reboot(bootopt); }
static void mips_init(void) { struct mem_region mr[FDT_MEM_REGIONS]; uint64_t val; int i, j, mr_cnt; char *memsize; printf("entry: mips_init()\n"); bootverbose = 1; for (i = 0; i < 10; i++) phys_avail[i] = 0; dump_avail[0] = phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); /* * The most low memory MT7621 can have. Currently MT7621 is the chip * that supports the most memory, so that seems reasonable. */ realmem = btoc(448 * 1024 * 1024); if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) { physmem = btoc(val); printf("RAM size: %ldMB (from FDT)\n", ctob(physmem) / (1024 * 1024)); KASSERT((phys_avail[0] >= mr[0].mr_start) && \ (phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)), ("First region is not within FDT memory range")); /* Limit size of the first region */ phys_avail[1] = (mr[0].mr_start + MIN(mr[0].mr_size, ctob(realmem))); dump_avail[1] = phys_avail[1]; /* Add the rest of the regions */ for (i = 1, j = 2; i < mr_cnt; i++, j+=2) { phys_avail[j] = mr[i].mr_start; phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size); dump_avail[j] = phys_avail[j]; dump_avail[j+1] = phys_avail[j+1]; } } else { if ((memsize = kern_getenv("memsize")) != NULL) { physmem = btoc(strtol(memsize, NULL, 0) << 20); printf("RAM size: %ldMB (from memsize)\n", ctob(physmem) / (1024 * 1024)); } else { /* All else failed, assume 32MB */ physmem = btoc(32 * 1024 * 1024); printf("RAM size: %ldMB (assumed)\n", ctob(physmem) / (1024 * 1024)); } if (ctob(physmem) < (448 * 1024 * 1024)) { /* * Anything up to 448MB is assumed to be directly * mappable as low memory... */ dump_avail[1] = phys_avail[1] = ctob(physmem); } else if (mtk_soc_get_socid() == MTK_SOC_MT7621) { /* * On MT7621 the low memory is limited to 448MB, the * rest is high memory, mapped at 0x20000000 */ phys_avail[1] = 448 * 1024 * 1024; phys_avail[2] = 0x20000000; phys_avail[3] = phys_avail[2] + ctob(physmem) - phys_avail[1]; dump_avail[1] = phys_avail[1] - phys_avail[0]; dump_avail[2] = phys_avail[2]; dump_avail[3] = phys_avail[3] - phys_avail[2]; } else { /* * We have > 448MB RAM and we're not MT7621? Currently * there is no such chip, so we'll just limit the RAM to * 32MB and let the user know... */ printf("Unknown chip, assuming 32MB RAM\n"); physmem = btoc(32 * 1024 * 1024); dump_avail[1] = phys_avail[1] = ctob(physmem); } } if (physmem < realmem) realmem = physmem; init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
/* * If a signal is delivered while in a sandbox, forceably unwind the trusted * stack simulating a CReturn. Clear the regular and capability register * files. * * When a signal is thrown in a sandbox, one option is for the kernel to * forceably unwind the stack by a frame. * * Note that the callee has not had a chance to clean up the mess -- and * particular, can't clear the register file before returning. We therefore * have to do that for the callee or information/rights may leak!. * * XXXRW: Really we want to delegate this to userspace via SIGSANDBOX or * similar, but in the mean time. * * XXXRW: We don't yet handle floating point. */ int cheri_stack_sandboxexception(struct thread *td, struct trapframe *tf, int signum) { struct cheri_stack_frame *csfp; struct pcb *pcb = td->td_pcb; register_t s, sr, badvaddr, cause; f_register_t fsr; if (pcb->pcb_cheristack.cs_tsp == CHERI_STACK_SIZE) return (0); printf("%s: processing sandbox exception signal %d, pid %d\n", __func__, signum, td->td_proc->p_pid); #if DDB if (security_cheri_debugger_on_sandbox_exception) kdb_enter(KDB_WHY_CHERI, "CHERI sandbox exception"); #endif /* * XXXRW: It is my belief that the trap frame in a thread is always a * pointer to the PCB. Check this is true, however, because I rely on * it. */ KASSERT(td->td_frame == &pcb->pcb_regs, ("%s: td_frame != pcb_regs", __func__)); KASSERT(td->td_frame == tf, ("%s: td_frame != tf", __func__)); /* * XXXRW: It would actually be quite nice to print out some exception * information here. Otherwise all the state required to debug the * sandbox failure will be lost. * * XXXRW: Or, has it all been sent to printf? * * XXXRW: Or, maybe that is actually a bad thing, since printf is * quite slow and noisy, and not something we want to do on every * sandbox failure. */ /* * Clear the regular and capability register files to ensure no state * (information, rights) is returned to the caller that shouldn't be * when the callee exits unexpectedly. Save and restore kernel-side * registers, however. * * XXXRW: What about floating-point registers? */ sr = pcb->pcb_regs.sr; badvaddr = pcb->pcb_regs.badvaddr; cause = pcb->pcb_regs.cause; fsr = pcb->pcb_regs.fsr; bzero(&pcb->pcb_regs, sizeof(pcb->pcb_regs)); bzero(&pcb->pcb_cheriframe, sizeof(pcb->pcb_cheriframe)); pcb->pcb_regs.sr = sr; pcb->pcb_regs.badvaddr = badvaddr; pcb->pcb_regs.cause = cause; pcb->pcb_regs.fsr = fsr; /* * Reproduce CReturn. */ csfp = &pcb->pcb_cheristack.cs_frames[pcb->pcb_cheristack.cs_tsp / CHERI_FRAME_SIZE]; pcb->pcb_cheristack.cs_tsp += CHERI_FRAME_SIZE; /* * Pop IDC, PCC. */ s = intr_disable(); cheri_capability_load(CHERI_CR_CTEMP, &csfp->csf_idc); cheri_capability_store(CHERI_CR_CTEMP, &pcb->pcb_cheriframe.cf_idc); cheri_capability_load(CHERI_CR_CTEMP, &csfp->csf_pcc); cheri_capability_store(CHERI_CR_CTEMP, &pcb->pcb_cheriframe.cf_pcc); intr_restore(s); /* * Pop SP, PC (+4 already done). */ pcb->pcb_regs.sp = csfp->csf_sp; pcb->pcb_regs.pc = csfp->csf_pc; /* * Set 'v0' to -1, and 'v1' to the signal number so that the consumer * of CCall can handle the error. * * XXXRW: That isn't really quite what we want, however. What about * CCall failures themselves, and what if CReturn returns a -1 -- how * should the caller interpret that? */ pcb->pcb_regs.v0 = -1; pcb->pcb_regs.v1 = signum; return (1); }
void platform_start(__register_t a0, __register_t a1, __register_t a2 __unused, __register_t a3) { const struct octeon_feature_description *ofd; uint64_t platform_counter_freq; int rv; mips_postboot_fixup(); /* * Initialize boot parameters so that we can determine things like * which console we shoud use, etc. */ octeon_boot_params_init(a3); /* Initialize pcpu stuff */ mips_pcpu0_init(); mips_timer_early_init(cvmx_sysinfo_get()->cpu_clock_hz); /* Initialize console. */ cninit(); /* * Display information about the CPU. */ #if !defined(OCTEON_MODEL) printf("Using runtime CPU model checks.\n"); #else printf("Compiled for CPU model: " __XSTRING(OCTEON_MODEL) "\n"); #endif strcpy(cpu_model, octeon_model_get_string(cvmx_get_proc_id())); printf("CPU Model: %s\n", cpu_model); printf("CPU clock: %uMHz Core Mask: %#x\n", cvmx_sysinfo_get()->cpu_clock_hz / 1000000, cvmx_sysinfo_get()->core_mask); rv = octeon_model_version_check(cvmx_get_proc_id()); if (rv == -1) panic("%s: kernel not compatible with this processor.", __func__); /* * Display information about the board. */ #if defined(OCTEON_BOARD_CAPK_0100ND) strcpy(cpu_board, "CAPK-0100ND"); if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_CN3010_EVB_HS5) { panic("Compiled for %s, but board type is %s.", cpu_board, cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type)); } #else strcpy(cpu_board, cvmx_board_type_to_string(cvmx_sysinfo_get()->board_type)); #endif printf("Board: %s\n", cpu_board); printf("Board Type: %u Revision: %u/%u\n", cvmx_sysinfo_get()->board_type, cvmx_sysinfo_get()->board_rev_major, cvmx_sysinfo_get()->board_rev_minor); printf("Serial number: %s\n", cvmx_sysinfo_get()->board_serial_number); /* * Additional on-chip hardware/settings. * * XXX Display PCI host/target? What else? */ printf("MAC address base: %6D (%u configured)\n", cvmx_sysinfo_get()->mac_addr_base, ":", cvmx_sysinfo_get()->mac_addr_count); octeon_ciu_reset(); /* * Convert U-Boot 'bootoctlinux' loader command line arguments into * boot flags and kernel environment variables. */ bootverbose = 1; octeon_init_kenv(a3); /* * For some reason on the cn38xx simulator ebase register is set to * 0x80001000 at bootup time. Move it back to the default, but * when we move to having support for multiple executives, we need * to rethink this. */ mips_wr_ebase(0x80000000); octeon_memory_init(); init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif cpu_clock = cvmx_sysinfo_get()->cpu_clock_hz; platform_counter_freq = cpu_clock; octeon_timecounter.tc_frequency = cpu_clock; platform_timecounter = &octeon_timecounter; mips_timer_init_params(platform_counter_freq, 0); set_cputicker(octeon_get_ticks, cpu_clock, 0); #ifdef SMP /* * Clear any pending IPIs. */ cvmx_write_csr(CVMX_CIU_MBOX_CLRX(0), 0xffffffff); #endif printf("Octeon SDK: %s\n", OCTEON_SDK_VERSION_STRING); printf("Available Octeon features:"); for (ofd = octeon_feature_descriptions; ofd->ofd_string != NULL; ofd++) if (octeon_has_feature(ofd->ofd_feature)) printf(" %s", ofd->ofd_string); printf("\n"); }
static int ct_poll(void *arg) { struct ct_softc *ct = arg; struct scsi_low_softc *slp = &ct->sc_sclow; struct ct_bus_access_handle *chp = &ct->sc_ch; struct targ_info *ti; struct buf *bp; u_int derror, flags; int len, satgo, error; u_int8_t scsi_status, regv; again: if (slp->sl_flags & HW_INACTIVE) return 0; /************************************************** * Get status & bus phase **************************************************/ if ((ct_stat_read_1(chp) & STR_INT) == 0) return 0; scsi_status = ct_cr_read_1(chp, wd3s_stat); if (scsi_status == ((u_int8_t) -1)) return 1; /************************************************** * Check reselection, or nexus **************************************************/ if (scsi_status == BSR_RESEL || scsi_status == BSR_AFM_RESEL) { if (ct_reselected(ct, scsi_status) == EJUSTRETURN) return 1; } if ((ti = slp->sl_Tnexus) == NULL) return 1; /************************************************** * Debug section **************************************************/ #ifdef CT_DEBUG if (ct_debug > 0) { scsi_low_print(slp, NULL); device_printf(slp->sl_dev, "scsi_status 0x%x\n\n", (u_int) scsi_status); #ifdef KDB if (ct_debug > 1) kdb_enter(KDB_WHY_CAM, "ct"); #endif /* KDB */ } #endif /* CT_DEBUG */ /************************************************** * Internal scsi phase **************************************************/ satgo = ct->sc_satgo; ct->sc_satgo &= ~CT_SAT_GOING; switch (ti->ti_phase) { case PH_SELSTART: if ((satgo & CT_SAT_GOING) == 0) { if (scsi_status != BSR_SELECTED) { ct_phase_error(ct, scsi_status); return 1; } scsi_low_arbit_win(slp); SCSI_LOW_SETUP_PHASE(ti, PH_SELECTED); return 1; } else { scsi_low_arbit_win(slp); SCSI_LOW_SETUP_PHASE(ti, PH_MSGOUT); /* XXX */ } break; case PH_RESEL: if ((scsi_status & BSR_PHVALID) == 0 || (scsi_status & BSR_PM) != BSR_MSGIN) { scsi_low_restart(slp, SCSI_LOW_RESTART_HARD, "phase miss after reselect"); return 1; } break; default: if (slp->sl_flags & HW_PDMASTART) { slp->sl_flags &= ~HW_PDMASTART; if (ct->sc_dma & CT_DMA_DMASTART) { (*ct->ct_dma_xfer_stop) (ct); ct->sc_dma &= ~CT_DMA_DMASTART; } else if (ct->sc_dma & CT_DMA_PIOSTART) { (*ct->ct_pio_xfer_stop) (ct); ct->sc_dma &= ~CT_DMA_PIOSTART; } else { scsi_low_data_finish(slp); } } break; } /************************************************** * parse scsi phase **************************************************/ if (scsi_status & BSR_PHVALID) { /************************************************** * Normal SCSI phase. **************************************************/ if ((scsi_status & BSR_CM) == BSR_CMDABT) { ct_phase_error(ct, scsi_status); return 1; } switch (scsi_status & BSR_PM) { case BSR_DATAOUT: SCSI_LOW_SETUP_PHASE(ti, PH_DATA); if (scsi_low_data(slp, ti, &bp, SCSI_LOW_WRITE) != 0) { ct_attention(ct); } goto common_data_phase; case BSR_DATAIN: SCSI_LOW_SETUP_PHASE(ti, PH_DATA); if (scsi_low_data(slp, ti, &bp, SCSI_LOW_READ) != 0) { ct_attention(ct); } common_data_phase: if (slp->sl_scp.scp_datalen > 0) { slp->sl_flags |= HW_PDMASTART; if ((ct->sc_xmode & CT_XMODE_PIO) != 0) { error = (*ct->ct_pio_xfer_start) (ct); if (error == 0) { ct->sc_dma |= CT_DMA_PIOSTART; return 1; } } if ((ct->sc_xmode & CT_XMODE_DMA) != 0) { error = (*ct->ct_dma_xfer_start) (ct); if (error == 0) { ct->sc_dma |= CT_DMA_DMASTART; return 1; } } } else { if (slp->sl_scp.scp_direction == SCSI_LOW_READ) { if (!(slp->sl_flags & HW_READ_PADDING)) { device_printf(slp->sl_dev, "read padding required\n"); return 1; } } else { if (!(slp->sl_flags & HW_WRITE_PADDING)) { device_printf(slp->sl_dev, "write padding required\n"); return 1; } } slp->sl_flags |= HW_PDMASTART; } ct_io_xfer(ct); return 1; case BSR_CMDOUT: SCSI_LOW_SETUP_PHASE(ti, PH_CMD); if (scsi_low_cmd(slp, ti) != 0) { ct_attention(ct); } if (ct_xfer(ct, slp->sl_scp.scp_cmd, slp->sl_scp.scp_cmdlen, SCSI_LOW_WRITE, &derror) != 0) { device_printf(slp->sl_dev, "scsi cmd xfer short\n"); } return 1; case BSR_STATIN: SCSI_LOW_SETUP_PHASE(ti, PH_STAT); if ((ct_io_control & CT_USE_CCSEQ) != 0) { if (scsi_low_is_msgout_continue(ti, 0) != 0 || ct->sc_atten != 0) { ct_xfer(ct, ®v, 1, SCSI_LOW_READ, &derror); scsi_low_statusin(slp, ti, regv | derror); } else { ct->sc_satgo |= CT_SAT_GOING; cthw_set_count(chp, 0); cthw_phase_bypass(ct, 0x41); } } else { ct_xfer(ct, ®v, 1, SCSI_LOW_READ, &derror); scsi_low_statusin(slp, ti, regv | derror); } return 1; case BSR_UNSPINFO0: case BSR_UNSPINFO1: device_printf(slp->sl_dev, "illegal bus phase (0x%x)\n", (u_int) scsi_status); scsi_low_print(slp, ti); return 1; case BSR_MSGOUT: SCSI_LOW_SETUP_PHASE(ti, PH_MSGOUT); flags = SCSI_LOW_MSGOUT_UNIFY; if (ti->ti_ophase != ti->ti_phase) flags |= SCSI_LOW_MSGOUT_INIT; len = scsi_low_msgout(slp, ti, flags); if (len > 1 && slp->sl_atten == 0) { ct_attention(ct); } if (ct_xfer(ct, ti->ti_msgoutstr, len, SCSI_LOW_WRITE, &derror) != 0) { device_printf(slp->sl_dev, "scsi msgout xfer short\n"); } SCSI_LOW_DEASSERT_ATN(slp); ct->sc_atten = 0; return 1; case BSR_MSGIN:/* msg in */ SCSI_LOW_SETUP_PHASE(ti, PH_MSGIN); ct_xfer(ct, ®v, 1, SCSI_LOW_READ, &derror); if (scsi_low_msgin(slp, ti, regv | derror) == 0) { if (scsi_low_is_msgout_continue(ti, 0) != 0) { /* XXX: scsi_low_attetion */ scsi_low_attention(slp); } } if ((ct_io_control & CT_FAST_INTR) != 0) { if (ct_catch_intr(ct) == 0) goto again; } return 1; } } else { /************************************************** * Special SCSI phase **************************************************/ switch (scsi_status) { case BSR_SATSDP: /* SAT with save data pointer */ SCSI_LOW_SETUP_PHASE(ti, PH_MSGIN); ct->sc_satgo |= CT_SAT_GOING; scsi_low_msgin(slp, ti, MSG_SAVESP); cthw_phase_bypass(ct, 0x41); return 1; case BSR_SATFIN: /* SAT COMPLETE */ /* * emulate statusin => msgin */ SCSI_LOW_SETUP_PHASE(ti, PH_STAT); scsi_low_statusin(slp, ti, ct_cr_read_1(chp, wd3s_lun)); SCSI_LOW_SETUP_PHASE(ti, PH_MSGIN); scsi_low_msgin(slp, ti, MSG_COMP); scsi_low_disconnected(slp, ti); return 1; case BSR_ACKREQ: /* negate ACK */ if (ct->sc_atten != 0) { ct_attention(ct); } ct_cr_write_1(chp, wd3s_cmd, WD3S_NEGATE_ACK); if ((ct_io_control & CT_FAST_INTR) != 0) { /* XXX: * Should clear a pending interrupt and * sync with a next interrupt! */ ct_catch_intr(ct); } return 1; case BSR_DISC: /* disconnect */ if (slp->sl_msgphase == MSGPH_NULL && (satgo & CT_SAT_GOING) != 0) { /* * emulate disconnect msg */ SCSI_LOW_SETUP_PHASE(ti, PH_MSGIN); scsi_low_msgin(slp, ti, MSG_DISCON); } scsi_low_disconnected(slp, ti); return 1; default: break; } } ct_phase_error(ct, scsi_status); return 1; }
static void mips_init(void) { int i, j; printf("entry: mips_init()\n"); #ifdef CFE /* * Query DRAM memory map from CFE. */ physmem = 0; for (i = 0; i < 10; i += 2) { int result; uint64_t addr, len, type; result = cfe_enummem(i / 2, 0, &addr, &len, &type); if (result < 0) { BCM_TRACE("There is no phys memory for: %d\n", i); phys_avail[i] = phys_avail[i + 1] = 0; break; } if (type != CFE_MI_AVAILABLE) { BCM_TRACE("phys memory is not available: %d\n", i); continue; } phys_avail[i] = addr; if (i == 0 && addr == 0) { /* * If this is the first physical memory segment probed * from CFE, omit the region at the start of physical * memory where the kernel has been loaded. */ phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); } BCM_TRACE("phys memory is available for: %d\n", i); BCM_TRACE(" => addr = %jx\n", addr); BCM_TRACE(" => len = %jd\n", len); phys_avail[i + 1] = addr + len; physmem += len; } BCM_TRACE("Total phys memory is : %ld\n", physmem); realmem = btoc(physmem); #endif for (j = 0; j < i; j++) dump_avail[j] = phys_avail[j]; physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
void platform_start(__register_t a0 __unused, __register_t a1 __unused, __register_t a2 __unused, __register_t a3 __unused) { uint64_t platform_counter_freq; int argc = 0, i; char **argv = NULL, **envp = NULL; vm_offset_t kernend; /* * clear the BSS and SBSS segments, this should be first call in * the function */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); /* Initialize pcpu stuff */ mips_pcpu0_init(); /* * Until some more sensible abstractions for uboot/redboot * environment handling, we have to make this a compile-time * hack. The existing code handles the uboot environment * very incorrectly so we should just ignore initialising * the relevant pointers. */ #ifndef AR71XX_ENV_UBOOT argc = a0; argv = (char**)a1; envp = (char**)a2; #endif /* * Protect ourselves from garbage in registers */ if (MIPS_IS_VALID_PTR(envp)) { for (i = 0; envp[i]; i += 2) { if (strcmp(envp[i], "memsize") == 0) realmem = btoc(strtoul(envp[i+1], NULL, 16)); } } /* * Just wild guess. RedBoot let us down and didn't reported * memory size */ if (realmem == 0) realmem = btoc(32*1024*1024); /* * Allow build-time override in case Redboot lies * or in other situations (eg where there's u-boot) * where there isn't (yet) a convienent method of * being told how much RAM is available. * * This happens on at least the Ubiquiti LS-SR71A * board, where redboot says there's 16mb of RAM * but in fact there's 32mb. */ #if defined(AR71XX_REALMEM) realmem = btoc(AR71XX_REALMEM); #endif /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1] - phys_avail[0]; physmem = realmem; /* * ns8250 uart code uses DELAY so ticker should be inititalized * before cninit. And tick_init_params refers to hz, so * init_param1 * should be called first. */ init_param1(); /* Detect the system type - this is needed for subsequent chipset-specific calls */ ar71xx_detect_sys_type(); ar71xx_detect_sys_frequency(); platform_counter_freq = ar71xx_cpu_freq(); mips_timer_init_params(platform_counter_freq, 1); cninit(); init_static_kenv(boot1_env, sizeof(boot1_env)); printf("CPU platform: %s\n", ar71xx_get_system_type()); printf("CPU Frequency=%d MHz\n", u_ar71xx_cpu_freq / 1000000); printf("CPU DDR Frequency=%d MHz\n", u_ar71xx_ddr_freq / 1000000); printf("CPU AHB Frequency=%d MHz\n", u_ar71xx_ahb_freq / 1000000); printf("platform frequency: %lld\n", platform_counter_freq); printf("CPU reference clock: %d MHz\n", u_ar71xx_refclk / 1000000); printf("arguments: \n"); printf(" a0 = %08x\n", a0); printf(" a1 = %08x\n", a1); printf(" a2 = %08x\n", a2); printf(" a3 = %08x\n", a3); /* * XXX this code is very redboot specific. */ printf("Cmd line:"); if (MIPS_IS_VALID_PTR(argv)) { for (i = 0; i < argc; i++) { printf(" %s", argv[i]); parse_argv(argv[i]); } } else printf ("argv is invalid"); printf("\n"); printf("Environment:\n"); if (MIPS_IS_VALID_PTR(envp)) { for (i = 0; envp[i]; i+=2) { printf(" %s = %s\n", envp[i], envp[i+1]); setenv(envp[i], envp[i+1]); } } else printf ("envp is invalid\n"); /* Redboot if_arge MAC address is in the environment */ ar71xx_redboot_get_macaddr(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); /* * Reset USB devices */ ar71xx_init_usb_peripheral(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
static void mips_init(void) { int i, j, cfe_mem_idx, tmp; uint64_t maxmem; #ifdef CFE_ENV cfe_env_init(); #endif TUNABLE_INT_FETCH("boothowto", &boothowto); if (boothowto & RB_VERBOSE) bootverbose++; #ifdef MAXMEM tmp = MAXMEM; #else tmp = 0; #endif TUNABLE_INT_FETCH("hw.physmem", &tmp); maxmem = (uint64_t)tmp * 1024; /* * XXX * If we used vm_paddr_t consistently in pmap, etc., we could * use 64-bit page numbers on !n64 systems, too, like i386 * does with PAE. */ #if !defined(__mips_n64) if (maxmem == 0 || maxmem > 0xffffffff) maxmem = 0xffffffff; #endif #ifdef CFE /* * Query DRAM memory map from CFE. */ physmem = 0; cfe_mem_idx = 0; for (i = 0; i < 10; i += 2) { int result; uint64_t addr, len, type; result = cfe_enummem(cfe_mem_idx++, 0, &addr, &len, &type); if (result < 0) { phys_avail[i] = phys_avail[i + 1] = 0; break; } KASSERT(type == CFE_MI_AVAILABLE, ("CFE DRAM region is not available?")); if (bootverbose) printf("cfe_enummem: 0x%016jx/%ju.\n", addr, len); if (maxmem != 0) { if (addr >= maxmem) { printf("Ignoring %ju bytes of memory at 0x%jx " "that is above maxmem %dMB\n", len, addr, (int)(maxmem / (1024 * 1024))); continue; } if (addr + len > maxmem) { printf("Ignoring %ju bytes of memory " "that is above maxmem %dMB\n", (addr + len) - maxmem, (int)(maxmem / (1024 * 1024))); len = maxmem - addr; } } phys_avail[i] = addr; if (i == 0 && addr == 0) { /* * If this is the first physical memory segment probed * from CFE, omit the region at the start of physical * memory where the kernel has been loaded. */ phys_avail[i] += MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); } phys_avail[i + 1] = addr + len; physmem += len; } realmem = btoc(physmem); #endif for (j = 0; j < i; j++) dump_avail[j] = phys_avail[j]; physmem = realmem; init_param1(); init_param2(physmem); mips_cpu_init(); /* * Sibyte has a L1 data cache coherent with DMA. This includes * on-chip network interfaces as well as PCI/HyperTransport bus * masters. */ cpuinfo.cache_coherent_dma = TRUE; /* * XXX * The kernel is running in 32-bit mode but the CFE is running in * 64-bit mode. So the SR_KX bit in the status register is turned * on by the CFE every time we call into it - for e.g. CFE_CONSOLE. * * This means that if get a TLB miss for any address above 0xc0000000 * and the SR_KX bit is set then we will end up in the XTLB exception * vector. * * For now work around this by copying the TLB exception handling * code to the XTLB exception vector. */ { bcopy(MipsTLBMiss, (void *)MIPS3_XTLB_MISS_EXC_VEC, MipsTLBMissEnd - MipsTLBMiss); mips_icache_sync_all(); mips_dcache_wbinv_all(); } pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif }
void vpanic(const char *fmt, va_list ap) { #ifdef SMP cpuset_t other_cpus; #endif struct thread *td = curthread; int bootopt, newpanic; static char buf[256]; spinlock_enter(); #ifdef SMP /* * stop_cpus_hard(other_cpus) should prevent multiple CPUs from * concurrently entering panic. Only the winner will proceed * further. */ if (panicstr == NULL && !kdb_active) { other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); stop_cpus_hard(other_cpus); } /* * Ensure that the scheduler is stopped while panicking, even if panic * has been entered from kdb. */ td->td_stopsched = 1; #endif bootopt = RB_AUTOBOOT; newpanic = 0; if (panicstr) bootopt |= RB_NOSYNC; else { bootopt |= RB_DUMP; panicstr = fmt; newpanic = 1; } if (newpanic) { (void)vsnprintf(buf, sizeof(buf), fmt, ap); panicstr = buf; cngrab(); printf("panic: %s\n", buf); } else { printf("panic: "); vprintf(fmt, ap); printf("\n"); } #ifdef SMP printf("cpuid = %d\n", PCPU_GET(cpuid)); #endif #ifdef KDB if (newpanic && trace_on_panic) kdb_backtrace(); if (debugger_on_panic) kdb_enter(KDB_WHY_PANIC, "panic"); #endif /*thread_lock(td); */ td->td_flags |= TDF_INPANIC; /* thread_unlock(td); */ if (!sync_on_panic) bootopt |= RB_NOSYNC; kern_reboot(bootopt); }
/* * The CheriABI version of sendsig(9) largely borrows from the MIPS version, * and it is important to keep them in sync. It differs primarily in that it * must also be aware of user stack-handling ABIs, so is also sensitive to our * (fluctuating) design choices in how $stc and $sp interact. The current * design uses ($stc + $sp) for stack-relative references, so early on we have * to calculate a 'relocated' version of $sp that we can then use for * MIPS-style access. * * This code, as with the CHERI-aware MIPS code, makes a privilege * determination in order to decide whether to trust the stack exposed by the * user code for the purposes of signal handling. We must use the alternative * stack if there is any indication that using the user thread's stack state * might violate the userspace compartmentalisation model. */ static void cheriabi_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct sigacts *psp; struct sigframe_c sf, *sfp; uintptr_t stackbase; vm_offset_t sp; int cheri_is_sandboxed; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; /* * In CheriABI, $sp is $stc relative, so calculate a relocation base * that must be combined with regs->sp from this point onwards. * Unfortunately, we won't retain bounds and permissions information * (as is the case elsewhere in CheriABI). While 'stackbase' * suggests that $stc's offset isn't included, in practice it will be, * although we may reasonably assume that it will be zero. * * If it turns out we will be delivering to the alternative signal * stack, we'll recalculate stackbase later. */ CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &td->td_pcb->pcb_regs.stc, 0); CHERI_CTOPTR(stackbase, CHERI_CR_CTEMP0, CHERI_CR_KDC); oonstack = sigonstack(stackbase + regs->sp); /* * CHERI affects signal delivery in the following ways: * * (1) Additional capability-coprocessor state is exposed via * extensions to the context frame placed on the stack. * * (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we * consider user state to be 'sandboxed' and therefore to require * special delivery handling which includes a domain-switch to the * thread's context-switch domain. (This is done by * cheri_sendsig()). * * (3) If an alternative signal stack is not defined, and we are in a * 'sandboxed' state, then we have two choices: (a) if the signal * is of type SA_SANDBOX_UNWIND, we will automatically unwind the * trusted stack by one frame; (b) otherwise, we will terminate * the process unconditionally. */ cheri_is_sandboxed = cheri_signal_sandboxed(td); /* * We provide the ability to drop into the debugger in two different * circumstances: (1) if the code running is sandboxed; and (2) if the * fault is a CHERI protection fault. Handle both here for the * non-unwind case. Do this before we rewrite any general-purpose or * capability register state for the thread. */ #if DDB if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal) kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox"); else if (sig == SIGPROT && security_cheri_debugger_on_sigprot) kdb_enter(KDB_WHY_CHERI, "SIGPROT delivered outside sandbox"); #endif /* * If a thread is running sandboxed, we can't rely on $sp which may * not point at a valid stack in the ambient context, or even be * maliciously manipulated. We must therefore always use the * alternative stack. We are also therefore unable to tell whether we * are on the alternative stack, so must clear 'oonstack' here. * * XXXRW: This requires significant further thinking; however, the net * upshot is that it is not a good idea to do an object-capability * invoke() from a signal handler, as with so many other things in * life. */ if (cheri_is_sandboxed != 0) oonstack = 0; /* save user context */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; #if 0 /* * XXX-BD: stack_t type differs and we can't just fake a capabilty. * We don't restore the value so what purpose does it serve? */ sf.sf_uc.uc_stack = td->td_sigstk; #endif sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; cheri_capability_copy(&sf.sf_uc.uc_mcontext.mc_tls, &td->td_md.md_tls_cap); sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; #if defined(CPU_HAVEFPU) if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } #endif /* XXXRW: sf.sf_uc.uc_mcontext.sr seems never to be set? */ sf.sf_uc.uc_mcontext.cause = regs->cause; cheri_trapframe_to_cheriframe(&td->td_pcb->pcb_regs, &sf.sf_uc.uc_mcontext.mc_cheriframe); /* * Allocate and validate space for the signal handler context. For * CheriABI purposes, 'sp' from this point forward is relocated * relative to any pertinent stack capability. For an alternative * signal context, we need to recalculate stackbase for later use in * calculating a new $sp for the signal-handling context. * * XXXRW: It seems like it would be nice to both the regular and * alternative stack calculations in the same place. However, we need * oonstack sooner. We should clean this up later. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { stackbase = (vm_offset_t)td->td_sigstk.ss_sp; sp = (vm_offset_t)(stackbase + td->td_sigstk.ss_size); } else { /* * Signals delivered when a CHERI sandbox is present must be * delivered on the alternative stack rather than a local one. * If an alternative stack isn't present, then terminate or * risk leaking capabilities (and control) to the sandbox (or * just crashing the sandbox). */ if (cheri_is_sandboxed) { mtx_unlock(&psp->ps_mtx); printf("pid %d, tid %d: signal in sandbox without " "alternative stack defined\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } sp = (vm_offset_t)(stackbase + regs->sp); } sp -= sizeof(struct sigframe_c); /* For CHERI, keep the stack pointer capability aligned. */ sp &= ~(CHERICAP_SIZE - 1); sfp = (void *)sp; /* Build the argument list for the signal handler. */ regs->a0 = sig; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* * Signal handler installed with SA_SIGINFO. * * XXXRW: We would ideally synthesise these from the * user-originated stack capability, rather than $kdc, to be * on the safe side. */ cheri_capability_set(®s->c3, CHERI_CAP_USER_DATA_PERMS, (void *)(intptr_t)&sfp->sf_si, sizeof(sfp->sf_si), 0); cheri_capability_set(®s->c4, CHERI_CAP_USER_DATA_PERMS, (void *)(intptr_t)&sfp->sf_uc, sizeof(sfp->sf_uc), 0); /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; /* * Write out badvaddr, but don't create a valid capability * since that might allow privilege amplification. * * XXX-BD: This probably isn't the right method. * XXX-BD: Do we want to set base or offset? * * XXXRW: I think there's some argument that anything * receiving this signal is fairly privileged. But we could * generate a $ddc-relative (or $pcc-relative) capability, if * possible. (Using versions if $ddc and $pcc for the * signal-handling context rather than that which caused the * signal). I'd be tempted to deliver badvaddr as the offset * of that capability. If badvaddr is not in range, then we * should just deliver an untagged NULL-derived version * (perhaps)? */ *((uintptr_t *)&sf.sf_si.si_addr) = (uintptr_t)(void *)regs->badvaddr; } /* * XXX: No support for undocumented arguments to old style handlers. */ mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyoutcap(&sf, (void *)sfp, sizeof(sf)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); printf("pid %d, tid %d: could not copy out sigframe\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } /* * Re-acquire process locks necessary to access suitable pcb fields. * However, arguably, these operations should be atomic with the * initial inspection of 'psp'. */ PROC_LOCK(p); mtx_lock(&psp->ps_mtx); /* * Install CHERI signal-delivery register state for handler to run * in. As we don't install this in the CHERI frame on the user stack, * it will be (generally) be removed automatically on sigreturn(). */ /* XXX-BD: this isn't quite right */ cheri_sendsig(td); /* * Note that $sp must be installed relative to $stc, so re-subtract * the stack base here. */ regs->pc = (register_t)(intptr_t)catcher; regs->sp = (register_t)((intptr_t)sfp - stackbase); cheri_capability_copy(®s->c12, &psp->ps_sigcap[_SIG_IDX(sig)]); cheri_capability_copy(®s->c17, &td->td_pcb->pcb_cherisignal.csig_sigcode); }
static void cheriabi_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct cheri_frame *capreg; struct sigacts *psp; struct sigframe_c sf, *sfp; vm_offset_t sp; int cheri_is_sandboxed; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; capreg = &td->td_pcb->pcb_cheriframe; oonstack = sigonstack(regs->sp); /* * CHERI affects signal delivery in the following ways: * * (1) Additional capability-coprocessor state is exposed via * extensions to the context frame placed on the stack. * * (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we * consider user state to be 'sandboxed' and therefore to require * special delivery handling which includes a domain-switch to the * thread's context-switch domain. (This is done by * cheri_sendsig()). * * (3) If an alternative signal stack is not defined, and we are in a * 'sandboxed' state, then we have two choices: (a) if the signal * is of type SA_SANDBOX_UNWIND, we will automatically unwind the * trusted stack by one frame; (b) otherwise, we will terminate * the process unconditionally. */ cheri_is_sandboxed = cheri_signal_sandboxed(td); /* * We provide the ability to drop into the sandbox in two different * circumstances: (1) if the code running is sandboxed; and (2) if the * fault is a CHERI protection fault. Handle both here for the * non-unwind case. Do this before we rewrite any general-purpose or * capability register state for the thread. */ #if DDB if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal) kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox"); else if (sig == SIGPROT && security_cheri_debugger_on_sigprot) kdb_enter(KDB_WHY_CHERI, "SIGPROT delivered outside sandbox"); #endif /* * If a thread is running sandboxed, we can't rely on $sp which may * not point at a valid stack in the ambient context, or even be * maliciously manipulated. We must therefore always use the * alternative stack. We are also therefore unable to tell whether we * are on the alternative stack, so must clear 'oonstack' here. * * XXXRW: This requires significant further thinking; however, the net * upshot is that it is not a good idea to do an object-capability * invoke() from a signal handler, as with so many other things in * life. */ if (cheri_is_sandboxed != 0) oonstack = 0; /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; #if 0 /* * XXX-BD: stack_t type differs and we can't just fake a capabilty. * We don't restore the value so what purpose does it serve? */ sf.sf_uc.uc_stack = td->td_sigstk; #endif sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; #if 0 /* XXX-BD: what actually makes sense here? */ sf.sf_uc.uc_mcontext.mc_tls = td->td_md.md_tls; #endif sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } /* XXXRW: sf.sf_uc.uc_mcontext.sr seems never to be set? */ sf.sf_uc.uc_mcontext.cause = regs->cause; cheri_memcpy(&sf.sf_uc.uc_mcontext.mc_cheriframe, &td->td_pcb->pcb_cheriframe, sizeof(struct cheri_frame)); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (vm_offset_t)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { /* * Signals delivered when a CHERI sandbox is present must be * delivered on the alternative stack rather than a local one. * If an alternative stack isn't present, then terminate or * risk leaking capabilities (and control) to the sandbox (or * just crashing the sandbox). */ if (cheri_is_sandboxed) { mtx_unlock(&psp->ps_mtx); printf("pid %d, tid %d: signal in sandbox without " "alternative stack defined\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } sp = (vm_offset_t)regs->sp; } sp -= sizeof(struct sigframe_c); /* For CHERI, keep the stack pointer capability aligned. */ sp &= ~(CHERICAP_SIZE - 1); sfp = (void *)sp; /* Build the argument list for the signal handler. */ regs->a0 = sig; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ cheri_capability_set(&capreg->cf_c3, CHERI_CAP_USER_DATA_PERMS, CHERI_CAP_USER_DATA_OTYPE, (void *)(intptr_t)&sfp->sf_si, sizeof(sfp->sf_si), 0); cheri_capability_set(&capreg->cf_c4, CHERI_CAP_USER_DATA_PERMS, CHERI_CAP_USER_DATA_OTYPE, (void *)(intptr_t)&sfp->sf_uc, sizeof(sfp->sf_uc), 0); /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; /* * Write out badvaddr, but don't create a valid capability * since that might allow privlege amplification. * * XXX-BD: This probably isn't the right method. * XXX-BD: Do we want to set base or offset? */ *((uintptr_t *)&sf.sf_si.si_addr) = (uintptr_t)(void *)regs->badvaddr; } /* * XXX: No support for undocumented arguments to old style handlers. */ mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyoutcap(&sf, sfp, sizeof(sf)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); sigexit(td, SIGILL); /* NOTREACHED */ } /* * Install CHERI signal-delivery register state for handler to run * in. As we don't install this in the CHERI frame on the user stack, * it will be (generally) be removed automatically on sigreturn(). */ /* XXX-BD: this isn't quite right */ cheri_sendsig(td); regs->pc = (register_t)(intptr_t)catcher; regs->sp = (register_t)(intptr_t)sfp; cheri_capability_copy(&capreg->cf_c12, &psp->ps_sigcap[_SIG_IDX(sig)]); cheri_capability_copy(&capreg->cf_c17, &td->td_pcb->pcb_cherisignal.csig_sigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }