void cpu_startup_common(void) { vaddr_t minaddr, maxaddr; char pbuf[9]; /* "99999 MB" */ pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); cpu_hwrena_setup(); /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * (No need to allocate an mbuf cluster submap. Mbuf clusters * are allocated via the pool allocator, and we use KSEG/XKPHYS to * map those pages.) */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); }
void cpu_startup_common(void) { vaddr_t minaddr, maxaddr; char pbuf[9]; /* "99999 MB" */ pmap_tlb_info_evcnt_attach(&pmap_tlb0_info); #ifdef MULTIPROCESSOR kcpuset_create(&cpus_halted, true); KASSERT(cpus_halted != NULL); kcpuset_create(&cpus_hatched, true); KASSERT(cpus_hatched != NULL); kcpuset_create(&cpus_paused, true); KASSERT(cpus_paused != NULL); kcpuset_create(&cpus_resumed, true); KASSERT(cpus_resumed != NULL); kcpuset_create(&cpus_running, true); KASSERT(cpus_running != NULL); kcpuset_set(cpus_hatched, cpu_number()); kcpuset_set(cpus_running, cpu_number()); #endif cpu_hwrena_setup(); /* * Good {morning,afternoon,evening,night}. */ printf("%s%s", copyright, version); printf("%s\n", cpu_getmodel()); format_bytes(pbuf, sizeof(pbuf), ctob(physmem)); printf("total memory = %s\n", pbuf); minaddr = 0; /* * Allocate a submap for physio. */ phys_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, VM_PHYS_SIZE, 0, FALSE, NULL); /* * (No need to allocate an mbuf cluster submap. Mbuf clusters * are allocated via the pool allocator, and we use KSEG/XKPHYS to * map those pages.) */ format_bytes(pbuf, sizeof(pbuf), ptoa(uvmexp.free)); printf("avail memory = %s\n", pbuf); #if defined(__mips_n32) module_machine = "mips-n32"; #endif }
void cpu_attach_common(device_t self, struct cpu_info *ci) { const char * const xname = device_xname(self); /* * Cross link cpu_info and its device together */ ci->ci_dev = self; self->dv_private = ci; KASSERT(ci->ci_idepth == 0); evcnt_attach_dynamic(&ci->ci_ev_count_compare, EVCNT_TYPE_INTR, NULL, xname, "int 5 (clock)"); evcnt_attach_dynamic(&ci->ci_ev_count_compare_missed, EVCNT_TYPE_INTR, NULL, xname, "int 5 (clock) missed"); evcnt_attach_dynamic(&ci->ci_ev_fpu_loads, EVCNT_TYPE_MISC, NULL, xname, "fpu loads"); evcnt_attach_dynamic(&ci->ci_ev_fpu_saves, EVCNT_TYPE_MISC, NULL, xname, "fpu saves"); evcnt_attach_dynamic(&ci->ci_ev_dsp_loads, EVCNT_TYPE_MISC, NULL, xname, "dsp loads"); evcnt_attach_dynamic(&ci->ci_ev_dsp_saves, EVCNT_TYPE_MISC, NULL, xname, "dsp saves"); evcnt_attach_dynamic(&ci->ci_ev_tlbmisses, EVCNT_TYPE_TRAP, NULL, xname, "tlb misses"); if (ci == &cpu_info_store) pmap_tlb_info_evcnt_attach(ci->ci_tlb_info); #ifdef MULTIPROCESSOR if (ci != &cpu_info_store) { /* * Tail insert this onto the list of cpu_info's. */ KASSERT(ci->ci_next == NULL); KASSERT(cpu_info_last->ci_next == NULL); cpu_info_last->ci_next = ci; cpu_info_last = ci; } evcnt_attach_dynamic(&ci->ci_evcnt_synci_activate_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache activate request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_deferred_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache deferred request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_ipi_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache ipi request"); evcnt_attach_dynamic(&ci->ci_evcnt_synci_onproc_rqst, EVCNT_TYPE_MISC, NULL, xname, "syncicache onproc request"); /* * Initialize IPI framework for this cpu instance */ ipi_init(ci); #endif }