struct cpu_info * cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id, cpuid_t cpu_core_id, cpuid_t cpu_smt_id) { KASSERT(cpu_id < MAXCPUS); #ifdef MIPS64_OCTEON vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id; __CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280); struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1; memset((void *)exc_page, 0, PAGE_SIZE); if (ti == NULL) { ti = ((struct pmap_tlb_info *)ci) - 1; pmap_tlb_info_init(ti); } #else const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; struct pglist pglist; int error; /* * Grab a page from the first 512MB (mappable by KSEG0) to use to store * exception vectors and cpu_info for this cpu. */ error = uvm_pglistalloc(PAGE_SIZE, 0, MIPS_KSEG1_START - MIPS_KSEG0_START, PAGE_SIZE, PAGE_SIZE, &pglist, 1, false); if (error) return NULL; const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa); struct cpu_info * const ci = (void *) (va + cpu_info_offset); memset((void *)va, 0, PAGE_SIZE); /* * If we weren't passed a pmap_tlb_info to use, the caller wants us * to take care of that for him. Since we have room left over in the * page we just allocated, just use a piece of that for it. */ if (ti == NULL) { if (cpu_info_offset >= sizeof(*ti)) { ti = (void *) va; } else { KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti)); ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1; } pmap_tlb_info_init(ti); } /* * Attach its TLB info (which must be direct-mapped) */ #ifdef _LP64 KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti)); #else KASSERT(MIPS_KSEG0_P(ti)); #endif #endif /* MIPS64_OCTEON */ KASSERT(cpu_id != 0); ci->ci_cpuid = cpu_id; ci->ci_pmap_kern_segtab = &pmap_kern_segtab, ci->ci_data.cpu_package_id = cpu_package_id; ci->ci_data.cpu_core_id = cpu_core_id; ci->ci_data.cpu_smt_id = cpu_smt_id; ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq; ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq; ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz; ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay; ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip; ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count; pmap_md_alloc_ephemeral_address_space(ci); mi_cpu_attach(ci); pmap_tlb_info_attach(ti, ci); return ci; }
/* * Attach the CPU. * Discover interesting goop about the virtual address cache * (slightly funny place to do it, but this is where it is to be found). */ void cpu_attach(device_t parent, device_t dev, void *aux) { int node; long clk, sclk = 0; struct mainbus_attach_args *ma = aux; struct cpu_info *ci; const char *sep; register int i, l; int bigcache, cachesize; char buf[100]; int totalsize = 0; int linesize, dcachesize, icachesize; /* tell them what we have */ node = ma->ma_node; /* * Allocate cpu_info structure if needed. */ ci = alloc_cpuinfo((u_int)node); /* * Only do this on the boot cpu. Other cpu's call * cpu_reset_fpustate() from cpu_hatch() before they * call into the idle loop. * For other cpus, we need to call mi_cpu_attach() * and complete setting up cpcb. */ if (ci->ci_flags & CPUF_PRIMARY) { fpstate_cache = pool_cache_init(sizeof(struct fpstate64), SPARC64_BLOCK_SIZE, 0, 0, "fpstate", NULL, IPL_NONE, NULL, NULL, NULL); cpu_reset_fpustate(); } #ifdef MULTIPROCESSOR else { mi_cpu_attach(ci); ci->ci_cpcb = lwp_getpcb(ci->ci_data.cpu_idlelwp); } for (i = 0; i < IPI_EVCNT_NUM; ++i) evcnt_attach_dynamic(&ci->ci_ipi_evcnt[i], EVCNT_TYPE_INTR, NULL, device_xname(dev), ipi_evcnt_names[i]); #endif evcnt_attach_dynamic(&ci->ci_tick_evcnt, EVCNT_TYPE_INTR, NULL, device_xname(dev), "timer"); mutex_init(&ci->ci_ctx_lock, MUTEX_SPIN, IPL_VM); clk = prom_getpropint(node, "clock-frequency", 0); if (clk == 0) { /* * Try to find it in the OpenPROM root... */ clk = prom_getpropint(findroot(), "clock-frequency", 0); } if (clk) { /* Tell OS what frequency we run on */ ci->ci_cpu_clockrate[0] = clk; ci->ci_cpu_clockrate[1] = clk / 1000000; } sclk = prom_getpropint(findroot(), "stick-frequency", 0); ci->ci_system_clockrate[0] = sclk; ci->ci_system_clockrate[1] = sclk / 1000000; snprintf(buf, sizeof buf, "%s @ %s MHz", prom_getpropstring(node, "name"), clockfreq(clk)); snprintf(cpu_model, sizeof cpu_model, "%s (%s)", machine_model, buf); aprint_normal(": %s, UPA id %d\n", buf, ci->ci_cpuid); aprint_naive("\n"); if (ci->ci_system_clockrate[0] != 0) { aprint_normal_dev(dev, "system tick frequency %d MHz\n", (int)ci->ci_system_clockrate[1]); } aprint_normal_dev(dev, ""); bigcache = 0; icachesize = prom_getpropint(node, "icache-size", 0); if (icachesize > icache_size) icache_size = icachesize; linesize = l = prom_getpropint(node, "icache-line-size", 0); if (linesize > icache_line_size) icache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad icache line size %d", l); totalsize = icachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "icache-nlines", 64) * prom_getpropint(node, "icache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "icache-associativity", 1); bigcache = cachesize; sep = ""; if (totalsize > 0) { aprint_normal("%s%ldK instruction (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } dcachesize = prom_getpropint(node, "dcache-size", 0); if (dcachesize > dcache_size) dcache_size = dcachesize; linesize = l = prom_getpropint(node, "dcache-line-size", 0); if (linesize > dcache_line_size) dcache_line_size = linesize; for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad dcache line size %d", l); totalsize = dcachesize; if (totalsize == 0) totalsize = l * prom_getpropint(node, "dcache-nlines", 128) * prom_getpropint(node, "dcache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "dcache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK data (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); sep = ", "; } linesize = l = prom_getpropint(node, "ecache-line-size", 0); for (i = 0; (1 << i) < l && l; i++) /* void */; if ((1 << i) != l && l) panic("bad ecache line size %d", l); totalsize = prom_getpropint(node, "ecache-size", 0); if (totalsize == 0) totalsize = l * prom_getpropint(node, "ecache-nlines", 32768) * prom_getpropint(node, "ecache-associativity", 1); cachesize = totalsize / prom_getpropint(node, "ecache-associativity", 1); if (cachesize > bigcache) bigcache = cachesize; if (totalsize > 0) { aprint_normal("%s%ldK external (%ld b/l)", sep, (long)totalsize/1024, (long)linesize); } aprint_normal("\n"); if (ecache_min_line_size == 0 || linesize < ecache_min_line_size) ecache_min_line_size = linesize; /* * Now that we know the size of the largest cache on this CPU, * re-color our pages. */ uvm_page_recolor(atop(bigcache)); /* XXX */ }
struct cpu_info * cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id, cpuid_t cpu_core_id, cpuid_t cpu_smt_id) { vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; struct pglist pglist; int error; /* * Grab a page from the first 512MB (mappable by KSEG0) to use to store * exception vectors and cpu_info for this cpu. */ error = uvm_pglistalloc(PAGE_SIZE, 0, MIPS_KSEG1_START - MIPS_KSEG0_START, PAGE_SIZE, PAGE_SIZE, &pglist, 1, false); if (error) return NULL; const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist)); const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa); struct cpu_info * const ci = (void *) (va + cpu_info_offset); memset((void *)va, 0, PAGE_SIZE); /* * If we weren't passed a pmap_tlb_info to use, the caller wants us * to take care of that for him. Since we have room left over in the * page we just allocated, just use a piece of that for it. */ if (ti == NULL) { if (cpu_info_offset >= sizeof(*ti)) { ti = (void *) va; } else { KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti)); ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1; } pmap_tlb_info_init(ti); } ci->ci_cpuid = cpu_id; ci->ci_data.cpu_package_id = cpu_package_id; ci->ci_data.cpu_core_id = cpu_core_id; ci->ci_data.cpu_smt_id = cpu_smt_id; ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq; ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq; ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz; ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay; ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip; ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count; /* * Attach its TLB info (which must be direct-mapped) */ #ifdef _LP64 KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti)); #else KASSERT(MIPS_KSEG0_P(ti)); #endif #ifndef _LP64 /* * If we have more memory than can be mapped by KSEG0, we need to * allocate enough VA so we can map pages with the right color * (to avoid cache alias problems). */ if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); KASSERT(ci->ci_pmap_dstbase); ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map, uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); KASSERT(ci->ci_pmap_srcbase); } #endif mi_cpu_attach(ci); pmap_tlb_info_attach(ti, ci); return ci; }