/* * Start up secondary cpus. Called from boot(). */ void thread_start_cpus(void) { char buf[64]; unsigned i; cpu_identify(buf, sizeof(buf)); kprintf("cpu0: %s\n", buf); cpu_startup_sem = sem_create("cpu_hatch", 0); thread_count_wchan = wchan_create("thread_count"); mainbus_start_cpus(); num_cpus = cpuarray_num(&allcpus); for (i=0; i<num_cpus - 1; i++) { P(cpu_startup_sem); } sem_destroy(cpu_startup_sem); if (i == 0) { kprintf("1 CPU online\n"); } else { kprintf("%d CPUs online\n", i + 1); } cpu_startup_sem = NULL; // Gross hack to deal with os/161 "idle" threads. Hardcode the thread count // to 1 so the inc/dec properly works in thread_[fork/exit]. The one thread // is the cpu0 boot thread (menu), which is the only thread that hasn't // exited yet. thread_count = 1; }
static void cpu_ident(void) { /* NMOS, CMOS or 65C816 ? */ cpu_id = cpu_identify(); switch(cpu_id) { case 0: if (cpu_rortest() == 0x02) cpu_bugs = "brk ror jmpff invread rmw"; else cpu_bugs = "brk jmpff invread rmw"; /* Check if BCD mode works - 0A v 10 */ /* 2A03: Just in case anyone ever ports Fuzix to a NES */ if (cpu_bcdtest() != 0x10) cpu_bugs = "brk jmpff invread rmw nobcd"; cpu_flags = "nmos"; break; case 1: /* Q : How to safely check for rockwell bit ops ? */ /* Could also check here for HuC6820, Renesas 740 I guess ? */ cpu_flags = "bcd cmos"; break; case 2: cpu_vendor = 1; cpu_flags = "bcd cmos ai16"; cpu_psize = 24; break; } }
static void cpu_startup(void *arg) { vm_paddr_t physsz; int i; physsz = 0; for (i = 0; i < sparc64_nmemreg; i++) physsz += sparc64_memreg[i].mr_size; printf("real memory = %lu (%lu MB)\n", physsz, physsz / (1024 * 1024)); realmem = (long)physsz / PAGE_SIZE; vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL, SHUTDOWN_PRI_LAST); printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE, cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE)); if (bootverbose) printf("machine: %s\n", sparc64_model); cpu_identify(rdpr(ver), PCPU_GET(clock), curcpu); }
void cpu_attach(struct device *parent, struct device *dev, void *aux) { printf(": "); cpu_identify(); }
static void cpuattach(device_t parent, device_t self, void *aux) { printf(": "); cpu_identify(self); }
/*! * \brief Kinetis Start * \return None * * This function calls all of the needed starup routines and then * branches to the main process. */ void start(void) { /* Disable the watchdog timer */ wdog_disable(); /* Copy any vector or data sections that need to be in RAM */ common_startup(); /* Perform processor initialization */ sysinit(); printf("\n\n"); /* Determine the last cause(s) of reset */ outSRS(); /* Determine specific Kinetis device and revision */ cpu_identify(); /* Jump to main process */ main(); /* No actions to perform after this so wait forever */ while(1); }
static void ap_start(phandle_t node, u_int mid, u_int cpu_impl) { volatile struct cpu_start_args *csa; struct pcpu *pc; register_t s; vm_offset_t va; u_int cpuid; uint32_t clock; if (cpuids > mp_maxid) return; if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0) panic("%s: couldn't determine CPU frequency", __func__); if (clock != PCPU_GET(clock)) tick_et_use_stick = 1; csa = &cpu_start_args; csa->csa_state = 0; sun4u_startcpu(node, (void *)mp_tramp, 0); s = intr_disable(); while (csa->csa_state != CPU_TICKSYNC) ; membar(StoreLoad); csa->csa_tick = rd(tick); if (cpu_impl == CPU_IMPL_SPARC64V || cpu_impl >= CPU_IMPL_ULTRASPARCIII) { while (csa->csa_state != CPU_STICKSYNC) ; membar(StoreLoad); csa->csa_stick = rdstick(); } while (csa->csa_state != CPU_INIT) ; csa->csa_tick = csa->csa_stick = 0; intr_restore(s); cpuid = cpuids++; cpuid_to_mid[cpuid] = mid; cpu_identify(csa->csa_ver, clock, cpuid); va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO); pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1; pcpu_init(pc, cpuid, sizeof(*pc)); dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO), cpuid); pc->pc_addr = va; pc->pc_clock = clock; pc->pc_impl = cpu_impl; pc->pc_mid = mid; pc->pc_node = node; cache_init(pc); CPU_SET(cpuid, &all_cpus); intr_add_cpu(cpuid); }
/*! * \brief Kinetis Start * \return None * * This function calls all of the needed starup routines and then * branches to the main process. */ void start(void) { //#ifdef DEBUG /* 关闭看门狗 */ wdog_disable(); //#endif /* 复制中断向量表、初始化数据、以__ramfunc声明的子函数复制到RAM区 */ common_startup(); /* CPU初始化,设置频率 */ sysinit(); #if (defined(DEBUG) && defined(DEBUG_PRINT)) printf("\n\n\t\t野火kinetis核心板测试程序\n"); printf("内核频率:%dMHz\t总线频率 :%dMHz\nflex频率:%dMHz \tflash频率:%dMHz\n\n",\ core_clk_mhz,core_clk_mhz/(mcg_div.bus_div+1),core_clk_mhz/(mcg_div.flex_div+1),core_clk_mhz/(mcg_div.flash_div+1)); /* Determine the last cause(s) of reset */ if (MC_SRSH & MC_SRSH_SW_MASK) printf("Software Reset\n"); if (MC_SRSH & MC_SRSH_LOCKUP_MASK) printf("Core Lockup Event Reset\n"); if (MC_SRSH & MC_SRSH_JTAG_MASK) printf("JTAG Reset\n"); if (MC_SRSL & MC_SRSL_POR_MASK) printf("Power-on Reset\n"); if (MC_SRSL & MC_SRSL_PIN_MASK) printf("External Pin Reset\n"); if (MC_SRSL & MC_SRSL_COP_MASK) printf("Watchdog(COP) Reset\n"); if (MC_SRSL & MC_SRSL_LOC_MASK) printf("Loss of Clock Reset\n"); if (MC_SRSL & MC_SRSL_LVD_MASK) printf("Low-voltage Detect Reset\n"); if (MC_SRSL & MC_SRSL_WAKEUP_MASK) printf("LLWU Reset\n"); /* 这两个数组的地址 在 链接器Linker文件,即ICF文件 定义 */ extern uint32 __VECTOR_TABLE[]; extern uint32 __VECTOR_RAM[]; /* 检测是否需要 复制中断向量表,即可以知道是ROM启动还是RAM启动*/ printf("\n野火Kinetis开发板启动方式:"); if (__VECTOR_RAM != __VECTOR_TABLE) printf("flash启动\n"); else printf("SRAM启动\n"); /* Determine specific Kinetis device and revision */ cpu_identify(); #endif //DUBUG && DEBUG_PRINT /* 跳进main函数 */ main(); /* 保证CPU不会停止执行 */ while(1); }
static void cpuattach(struct device *parent, struct device *dev, void *aux) { printf(": "); __attached = 1; cpu_identify(); }
void cpuattach(device_t parent, device_t self, void *aux) { aprint_normal(": "); cpu_attached = 1; cpu_identify(); }
struct cpu_id_t* get_cached_cpuid(void) { static int initialized = 0; static struct cpu_id_t id; if (initialized) return &id; if (cpu_identify(NULL, &id)) memset(&id, 0, sizeof(id)); initialized = 1; return &id; }
static void cpuattach(device_t parent, device_t self, void *aux) { struct cpu_info * const ci = curcpu(); ci->ci_dev = self; self->dv_private = ci; aprint_normal(": "); cpu_identify(self); }
static void cpu_attach(struct device *parent, struct device *self, void *aux) { printf(": %lu.%02luMHz (hz cycles = %lu, delay divisor = %lu)\n", curcpu()->ci_cpu_freq / 1000000, (curcpu()->ci_cpu_freq % 1000000) / 10000, curcpu()->ci_cycles_per_hz, curcpu()->ci_divisor_delay); printf("%s: ", self->dv_xname); cpu_identify(); }
static void cpu_ident(void) { cpu_id = cpu_identify(); switch(cpu_id) { case 0: cpu_vendor = 0; break; case 1: cpu_vendor = 1; break; case 2: cpu_vendor = 0; break; } }
/* * New CPUs come here once MD initialization is finished. curthread * and curcpu should already be initialized. * * Other than clearing thread_start_cpus() to continue, we don't need * to do anything. The startup thread can just exit; we only need it * to be able to get into thread_switch() properly. */ void cpu_hatch(unsigned software_number) { char buf[64]; KASSERT(curcpu != NULL); KASSERT(curthread != NULL); KASSERT(curcpu->c_number == software_number); spl0(); cpu_identify(buf, sizeof(buf)); V(cpu_startup_sem); thread_exit(); }
static void cpuattach(device_t parent, device_t self, void *aux) { struct cpu_info * const ci = curcpu(); ci->ci_dev = self; self->dv_private = ci; aprint_normal(": "); cpu_identify(self); /* install CPU specific idle routine if any. */ if (platform.cpu_idle != NULL) mips_locoresw.lsw_cpu_idle = platform.cpu_idle; }
static void cpu_startup(void *arg) { vm_paddr_t physsz; int i; tick_tc.tc_get_timecount = tick_get_timecount; tick_tc.tc_poll_pps = NULL; tick_tc.tc_counter_mask = ~0u; tick_tc.tc_frequency = tick_freq; tick_tc.tc_name = "tick"; tick_tc.tc_quality = UP_TICK_QUALITY; #ifdef SMP /* * We do not know if each CPU's tick counter is synchronized. */ if (cpu_mp_probe()) tick_tc.tc_quality = MP_TICK_QUALITY; #endif tc_init(&tick_tc); physsz = 0; for (i = 0; i < sparc64_nmemreg; i++) physsz += sparc64_memreg[i].mr_size; printf("real memory = %lu (%lu MB)\n", physsz, physsz / (1024 * 1024)); realmem = (long)physsz; vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); EVENTHANDLER_REGISTER(shutdown_final, sparc64_shutdown_final, NULL, SHUTDOWN_PRI_LAST); printf("avail memory = %lu (%lu MB)\n", cnt.v_free_count * PAGE_SIZE, cnt.v_free_count / ((1024 * 1024) / PAGE_SIZE)); if (bootverbose) printf("machine: %s\n", sparc64_model); #ifdef notyet cpu_identify(rdpr(ver), tick_freq, PCPU_GET(cpuid)); #endif }
/** Initialize CPUs * * Initialize kernel CPUs support. * */ void cpu_init(void) { #ifdef CONFIG_SMP if (config.cpu_active == 1) { #endif /* CONFIG_SMP */ cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count, FRAME_ATOMIC); if (!cpus) panic("Cannot allocate CPU structures."); /* Initialize everything */ memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0); size_t i; for (i = 0; i < config.cpu_count; i++) { cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_LOWMEM | FRAME_KA | FRAME_ATOMIC); cpus[i].id = i; irq_spinlock_initialize(&cpus[i].lock, "cpus[].lock"); unsigned int j; for (j = 0; j < RQ_COUNT; j++) { irq_spinlock_initialize(&cpus[i].rq[j].lock, "cpus[].rq[].lock"); list_initialize(&cpus[i].rq[j].rq); } } #ifdef CONFIG_SMP } #endif /* CONFIG_SMP */ CPU = &cpus[config.cpu_active - 1]; CPU->active = true; CPU->tlb_active = true; CPU->idle = false; CPU->last_cycle = get_cycle(); CPU->idle_cycles = 0; CPU->busy_cycles = 0; cpu_identify(); cpu_arch_init(); }
INTVAL Parrot_get_num_cpus(SHIM_INTERP) { INTVAL nprocs = -1; #ifdef _SC_NPROCESSORS_ONLN nprocs = sysconf(_SC_NPROCESSORS_ONLN); #elif defined(PARROT_HAS_HEADER_LIBCPUID) struct cpu_raw_data_t raw; struct cpu_id_t data; if (!cpuid_present()) { printf("cpuid_present failed\n"); exit(EXIT_FAILURE); } if (cpuid_get_raw_data(&raw) < 0) { printf("cpuid_get_raw_data failed\n"); printf("Error: %s\n", cpuid_error()); exit(EXIT_FAILURE); } if (cpu_identify(&raw, &data) < 0) { printf("cpu_identify failed\n"); printf("Error: %s\n", cpuid_error()); exit(EXIT_FAILURE); } nprocs = data.num_cores; #else FILE *f; char line[128]; if (!fopen("/proc/cpuinfo", "rb")) return nprocs; while (!feof(f)) { fgets(line, 128, f); if (strlen(line) == 1) continue; if (strncmp(line, "cpu cores", 8) == 0) { sscanf(line, "cpu cores\t: %d", &nprocs); fclose(f); return nprocs; } } fclose(f); #endif return nprocs; }
/* * Start up secondary cpus. Called from boot(). */ void thread_start_cpus(void) { char buf[64]; unsigned i; cpu_identify(buf, sizeof(buf)); kprintf("cpu0: %s\n", buf); cpu_startup_sem = sem_create("cpu_hatch", 0); mainbus_start_cpus(); for (i=0; i<cpuarray_num(&allcpus) - 1; i++) { P(cpu_startup_sem); } sem_destroy(cpu_startup_sem); cpu_startup_sem = NULL; }
/*! * \brief Kinetis Start * \return None * * This function calls all of the needed starup routines and then * branches to the main process. */ void start(void) { /* Disable the watchdog timer */ wdog_disable(); /* Copy any vector or data sections that need to be in RAM */ common_startup(); /* Perform processor initialization */ sysinit(); printf("\n\n"); /* Determine the last cause(s) of reset */ if (MC_SRSH & MC_SRSH_SW_MASK) printf("Software Reset\n"); if (MC_SRSH & MC_SRSH_LOCKUP_MASK) printf("Core Lockup Event Reset\n"); if (MC_SRSH & MC_SRSH_JTAG_MASK) printf("JTAG Reset\n"); if (MC_SRSL & MC_SRSL_POR_MASK) printf("Power-on Reset\n"); if (MC_SRSL & MC_SRSL_PIN_MASK) printf("External Pin Reset\n"); if (MC_SRSL & MC_SRSL_COP_MASK) printf("Watchdog(COP) Reset\n"); if (MC_SRSL & MC_SRSL_LOC_MASK) printf("Loss of Clock Reset\n"); if (MC_SRSL & MC_SRSL_LVD_MASK) printf("Low-voltage Detect Reset\n"); if (MC_SRSL & MC_SRSL_WAKEUP_MASK) printf("LLWU Reset\n"); /* Determine specific Kinetis device and revision */ cpu_identify(); /* Jump to main process */ main(); /* No actions to perform after this so wait forever */ while(1); }
/* * 描述: Kinetis启动代码 * 返回值: 无 * * This function calls all of the needed starup routines and then * branches to the main process. */ void start(void) { /* 禁用看门狗定时器 */ wdog_disable(); /* 复制需要用到的中断向量表和数据段到RAM中 */ common_startup(); /* 执行处理器初始化 */ sysinit(); #if(defined(DEBUG_PRINT)) if (MC_SRSH & MC_SRSH_SW_MASK) printf("Software Reset\r\n"); if (MC_SRSH & MC_SRSH_LOCKUP_MASK) printf("Core Lockup Event Reset\r\n"); if (MC_SRSH & MC_SRSH_JTAG_MASK) printf("JTAG Reset\r\n"); if (MC_SRSL & MC_SRSL_POR_MASK) printf("Power-on Reset\r\n"); if (MC_SRSL & MC_SRSL_PIN_MASK) printf("External Pin Reset\r\n"); if (MC_SRSL & MC_SRSL_COP_MASK) printf("Watchdog(COP) Reset\r\n"); if (MC_SRSL & MC_SRSL_LOC_MASK) printf("Loss of Clock Reset\r\n"); if (MC_SRSL & MC_SRSL_LVD_MASK) printf("Low-voltage Detect Reset\r\n"); if (MC_SRSL & MC_SRSL_WAKEUP_MASK) printf("LLWU Reset\r\n"); /* Determine specific Kinetis device and revision */ cpu_identify(); #endif /* 执行main主函数 */ main(); /* 无限等待 */ while(1); }
int main(int argc, char** argv) { int parseres = parse_cmdline(argc, argv); int i, readres, writeres; int only_clock_queries; struct cpu_raw_data_t raw; struct cpu_id_t data; if (parseres != 1) return parseres; /* In quiet mode, disable libcpuid warning messages: */ if (need_quiet) cpuid_set_warn_function(NULL); cpuid_set_verbosiness_level(verbose_level); /* Redirect output, if necessary: */ if (strcmp(out_file, "") && strcmp(out_file, "-")) { fout = fopen(out_file, "wt"); if (!fout) { if (!need_quiet) fprintf(stderr, "Cannot open `%s' for writing!\n", out_file); return -1; } atexit(close_out); } else { fout = stdout; } /* If requested, print library version: */ if (need_version) fprintf(fout, "%s\n", cpuid_lib_version()); if (need_input) { /* We have a request to input raw CPUID data from file: */ if (!strcmp(raw_data_file, "-")) /* Input from stdin */ readres = cpuid_deserialize_raw_data(&raw, ""); else /* Input from file */ readres = cpuid_deserialize_raw_data(&raw, raw_data_file); if (readres < 0) { if (!need_quiet) { fprintf(stderr, "Cannot deserialize raw data from "); if (!strcmp(raw_data_file, "-")) fprintf(stderr, "stdin\n"); else fprintf(stderr, "file `%s'\n", raw_data_file); /* Print the error message */ fprintf(stderr, "Error: %s\n", cpuid_error()); } return -1; } } else { if (check_need_raw_data()) { /* Try to obtain raw CPUID data from the CPU: */ readres = cpuid_get_raw_data(&raw); if (readres < 0) { if (!need_quiet) { fprintf(stderr, "Cannot obtain raw CPU data!\n"); fprintf(stderr, "Error: %s\n", cpuid_error()); } return -1; } } } /* Need to dump raw CPUID data to file: */ if (need_output) { if (verbose_level >= 1) printf("Writing raw CPUID dump to `%s'\n", raw_data_file); if (!strcmp(raw_data_file, "-")) /* Serialize to stdout */ writeres = cpuid_serialize_raw_data(&raw, ""); else /* Serialize to file */ writeres = cpuid_serialize_raw_data(&raw, raw_data_file); if (writeres < 0) { if (!need_quiet) { fprintf(stderr, "Cannot serialize raw data to "); if (!strcmp(raw_data_file, "-")) fprintf(stderr, "stdout\n"); else fprintf(stderr, "file `%s'\n", raw_data_file); /* Print the error message */ fprintf(stderr, "Error: %s\n", cpuid_error()); } return -1; } } if (need_report) { if (verbose_level >= 1) { printf("Writing decoded CPU report to `%s'\n", out_file); } /* Write a thorough report of cpu_id_t structure to output (usually stdout) */ fprintf(fout, "CPUID is present\n"); /* * Try CPU identification * (this fill the `data' structure with decoded CPU features) */ if (cpu_identify(&raw, &data) < 0) fprintf(fout, "Error identifying the CPU: %s\n", cpuid_error()); /* OK, now write what we have in `data'...: */ fprintf(fout, "CPU Info:\n------------------\n"); fprintf(fout, " vendor_str : `%s'\n", data.vendor_str); fprintf(fout, " vendor id : %d\n", (int) data.vendor); fprintf(fout, " brand_str : `%s'\n", data.brand_str); fprintf(fout, " family : %d (%02Xh)\n", data.family, data.family); fprintf(fout, " model : %d (%02Xh)\n", data.model, data.model); fprintf(fout, " stepping : %d (%02Xh)\n", data.stepping, data.stepping); fprintf(fout, " ext_family : %d (%02Xh)\n", data.ext_family, data.ext_family); fprintf(fout, " ext_model : %d (%02Xh)\n", data.ext_model, data.ext_model); fprintf(fout, " num_cores : %d\n", data.num_cores); fprintf(fout, " num_logical: %d\n", data.num_logical_cpus); fprintf(fout, " tot_logical: %d\n", data.total_logical_cpus); fprintf(fout, " L1 D cache : %d KB\n", data.l1_data_cache); fprintf(fout, " L1 I cache : %d KB\n", data.l1_instruction_cache); fprintf(fout, " L2 cache : %d KB\n", data.l2_cache); fprintf(fout, " L3 cache : %d KB\n", data.l3_cache); fprintf(fout, " L4 cache : %d KB\n", data.l4_cache); fprintf(fout, " L1D assoc. : %d-way\n", data.l1_assoc); fprintf(fout, " L2 assoc. : %d-way\n", data.l2_assoc); fprintf(fout, " L3 assoc. : %d-way\n", data.l3_assoc); fprintf(fout, " L4 assoc. : %d-way\n", data.l4_assoc); fprintf(fout, " L1D line sz: %d bytes\n", data.l1_cacheline); fprintf(fout, " L2 line sz : %d bytes\n", data.l2_cacheline); fprintf(fout, " L3 line sz : %d bytes\n", data.l3_cacheline); fprintf(fout, " L4 line sz : %d bytes\n", data.l4_cacheline); fprintf(fout, " SSE units : %d bits (%s)\n", data.sse_size, data.detection_hints[CPU_HINT_SSE_SIZE_AUTH] ? "authoritative" : "non-authoritative"); fprintf(fout, " code name : `%s'\n", data.cpu_codename); fprintf(fout, " features :"); /* * Here we enumerate all CPU feature bits, and when a feature * is present output its name: */ for (i = 0; i < NUM_CPU_FEATURES; i++) if (data.flags[i]) fprintf(fout, " %s", cpu_feature_str(i)); fprintf(fout, "\n"); /* Is CPU clock info requested? */ if (need_clockreport) { if (need_timed_clockreport) { /* Here we use the RDTSC-based routine */ fprintf(fout, " cpu clock : %d MHz\n", cpu_clock_measure(400, 1)); } else { /* Here we use the OS-provided info */ fprintf(fout, " cpu clock : %d MHz\n", cpu_clock()); } } } /* * Check if we have any queries to process. * We have to handle the case when `--clock' or `--clock-rdtsc' options * are present. * If in report mode, this will generate spurious output after the * report, if not handled explicitly. */ only_clock_queries = 1; for (i = 0; i < num_requests; i++) if (requests[i] != NEED_CLOCK && requests[i] != NEED_CLOCK_RDTSC) { only_clock_queries = 0; break; } /* OK, process all queries. */ if ((!need_report || !only_clock_queries) && num_requests > 0) { /* Identify the CPU. Make it do cpuid_get_raw_data() itself */ if (check_need_raw_data() && cpu_identify(&raw, &data) < 0) { if (!need_quiet) fprintf(stderr, "Error identifying the CPU: %s\n", cpuid_error()); return -1; } for (i = 0; i < num_requests; i++) print_info(requests[i], &raw, &data); } if (need_cpulist) { print_cpulist(); } return 0; }
static void cpucore_rmixl_attach(device_t parent, device_t self, void *aux) { struct cpucore_softc * const sc = device_private(self); struct cpunode_attach_args *na = aux; struct cpucore_attach_args ca; u_int nthreads; struct rmixl_config *rcp = &rmixl_configuration; sc->sc_dev = self; sc->sc_core = na->na_core; KASSERT(sc->sc_hatched == false); #if 0 #ifdef MULTIPROCESSOR /* * Create the TLB structure needed - one per core and core0 uses the * default one for the system. */ if (sc->sc_core == 0) { sc->sc_tlbinfo = &pmap_tlb0_info; } else { const vaddr_t va = (vaddr_t)&sc->sc_tlbinfo0; paddr_t pa; if (! pmap_extract(pmap_kernel(), va, &pa)) panic("%s: pmap_extract fail, va %#"PRIxVADDR, __func__, va); #ifdef _LP64 sc->sc_tlbinfo = (struct pmap_tlb_info *) MIPS_PHYS_TO_XKPHYS_CACHED(pa); #else sc->sc_tlbinfo = (struct pmap_tlb_info *) MIPS_PHYS_TO_KSEG0(pa); #endif pmap_tlb_info_init(sc->sc_tlbinfo); } #endif #endif aprint_normal("\n"); aprint_normal_dev(self, "%lu.%02luMHz (hz cycles = %lu, " "delay divisor = %lu)\n", curcpu()->ci_cpu_freq / 1000000, (curcpu()->ci_cpu_freq % 1000000) / 10000, curcpu()->ci_cycles_per_hz, curcpu()->ci_divisor_delay); aprint_normal("%s: ", device_xname(self)); cpu_identify(self); nthreads = MIPS_CIDFL_RMI_NTHREADS(mips_options.mips_cpu->cpu_cidflags); aprint_normal_dev(self, "%d %s on core\n", nthreads, nthreads == 1 ? "thread" : "threads"); /* * Attach CPU (RMI thread contexts) devices * according to userapp_cpu_map bitmask. */ u_int thread_mask = (1 << nthreads) - 1; u_int core_shft = sc->sc_core * nthreads; u_int threads_enb = (u_int)(rcp->rc_psb_info.userapp_cpu_map >> core_shft) & thread_mask; u_int threads_dis = (~threads_enb) & thread_mask; sc->sc_threads_dis = threads_dis; if (threads_dis != 0) { aprint_normal_dev(self, "threads"); u_int d = threads_dis; while (d != 0) { const u_int t = ffs(d) - 1; d ^= (1 << t); aprint_normal(" %d%s", t, (d==0) ? "" : ","); } aprint_normal(" offline (disabled by firmware)\n"); } u_int threads_try_attach = threads_enb; while (threads_try_attach != 0) { const u_int t = ffs(threads_try_attach) - 1; const u_int bit = 1 << t; threads_try_attach ^= bit; ca.ca_name = "cpu"; ca.ca_thread = t; ca.ca_core = sc->sc_core; if (config_found(self, &ca, cpucore_rmixl_print) == NULL) { /* * thread did not attach, e.g. not configured * arrange to have it disabled in THREADEN PCR */ threads_enb ^= bit; threads_dis |= bit; } } sc->sc_threads_enb = threads_enb; sc->sc_threads_dis = threads_dis; /* * when attaching the core of the primary cpu, * do the post-running initialization here */ if (sc->sc_core == RMIXL_CPU_CORE((curcpu()->ci_cpuid))) cpucore_rmixl_run(self); }
void bsp_finish_booting(void) { int i; #if SPROFILE sprofiling = 0; /* we're not profiling until instructed to */ #endif /* SPROFILE */ cprof_procs_no = 0; /* init nr of hash table slots used */ cpu_identify(); vm_running = 0; krandom.random_sources = RANDOM_SOURCES; krandom.random_elements = RANDOM_ELEMENTS; /* MINIX is now ready. All boot image processes are on the ready queue. * Return to the assembly code to start running the current process. */ /* it should point somewhere */ get_cpulocal_var(bill_ptr) = get_cpulocal_var_ptr(idle_proc); get_cpulocal_var(proc_ptr) = get_cpulocal_var_ptr(idle_proc); announce(); /* print MINIX startup banner */ /* * we have access to the cpu local run queue, only now schedule the processes. * We ignore the slots for the former kernel tasks */ for (i=0; i < NR_BOOT_PROCS - NR_TASKS; i++) { RTS_UNSET(proc_addr(i), RTS_PROC_STOP); } /* * enable timer interrupts and clock task on the boot CPU */ if (boot_cpu_init_timer(system_hz)) { panic("FATAL : failed to initialize timer interrupts, " "cannot continue without any clock source!"); } fpu_init(); /* Warnings for sanity checks that take time. These warnings are printed * so it's a clear warning no full release should be done with them * enabled. */ #if DEBUG_SCHED_CHECK FIXME("DEBUG_SCHED_CHECK enabled"); #endif #if DEBUG_VMASSERT FIXME("DEBUG_VMASSERT enabled"); #endif #if DEBUG_PROC_CHECK FIXME("PROC check enabled"); #endif DEBUGEXTRA(("cycles_accounting_init()... ")); cycles_accounting_init(); DEBUGEXTRA(("done\n")); #ifdef CONFIG_SMP cpu_set_flag(bsp_cpu_id, CPU_IS_READY); machine.processors_count = ncpus; machine.bsp_id = bsp_cpu_id; #else machine.processors_count = 1; machine.bsp_id = 0; #endif /* Kernel may no longer use bits of memory as VM will be running soon */ kernel_may_alloc = 0; switch_to_user(); NOT_REACHABLE; }