/* * Move pages from one kernel virtual address to another. * Both addresses are assumed to reside in the Sysmap. */ void pagemove(caddr_t from, caddr_t to, size_t size) { pt_entry_t *fpte, *tpte, *mafpte, *matpte; pt_entry_t ofpte, otpte; #ifdef MULTIPROCESSOR u_int32_t cpumask = 0; #endif #ifdef DIAGNOSTIC if ((size & PAGE_MASK) != 0) panic("pagemove"); #endif fpte = kvtopte((vaddr_t)from); tpte = kvtopte((vaddr_t)to); while (size > 0) { mafpte = (pt_entry_t *)vtomach((vaddr_t)fpte); matpte = (pt_entry_t *)vtomach((vaddr_t)tpte); otpte = pte_atomic_update(tpte, matpte, *fpte); ofpte = pte_atomic_update(fpte, mafpte, 0); tpte++; fpte++; #if defined(I386_CPU) && !defined(MULTIPROCESSOR) if (cpu_class != CPUCLASS_386) #endif { if (otpte & PG_V) #ifdef MULTIPROCESSOR pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)to, otpte, &cpumask); #else pmap_update_pg((vaddr_t)to); #endif if (ofpte & PG_V) #ifdef MULTIPROCESSOR pmap_tlb_shootdown(pmap_kernel(), (vaddr_t)from, ofpte, &cpumask); #else pmap_update_pg((vaddr_t)from); #endif } from += PAGE_SIZE; to += PAGE_SIZE; size -= PAGE_SIZE; } #ifdef MULTIPROCESSOR pmap_tlb_shootnow(cpumask); #else #if defined(I386_CPU) if (cpu_class == CPUCLASS_386) tlbflush(); #endif #endif }
/* * Load appropriate gdt descriptor; we better be running on *ci * (for the most part, this is how a cpu knows who it is). */ void gdt_init_cpu(struct cpu_info *ci) { size_t len = gdt_size[0] * sizeof(gdt[0]); unsigned long frames[len >> PAGE_SHIFT]; vaddr_t va; pt_entry_t *ptp; pt_entry_t *maptp; int f; for (va = (vaddr_t)ci->ci_gdt, f = 0; va < (vaddr_t)ci->ci_gdt + len; va += PAGE_SIZE, f++) { KASSERT(va >= VM_MIN_KERNEL_ADDRESS); ptp = kvtopte(va); frames[f] = *ptp >> PAGE_SHIFT; maptp = (pt_entry_t *)vtomach((vaddr_t)ptp); PTE_CLEARBITS(ptp, maptp, PG_RW); } PTE_UPDATES_FLUSH(); if (HYPERVISOR_set_gdt(frames, gdt_size[0])) panic("HYPERVISOR_set_gdt failed!\n"); lgdt_finish(); }
int start_all_aps(void) { int x,apic_id, cpu; struct pcpu *pc; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* set up temporary P==V mapping for AP boot */ /* XXX this is a hack, we should boot the AP on its own stack/PTD */ /* start each AP */ for (cpu = 1; cpu < mp_ncpus; cpu++) { apic_id = cpu_apic_ids[cpu]; bootAP = cpu; bootAPgdt = gdt + (512*cpu); /* Get per-cpu data */ pc = &__pcpu[bootAP]; pcpu_init(pc, bootAP, sizeof(struct pcpu)); dpcpu_init((void *)kmem_alloc(kernel_map, DPCPU_SIZE), bootAP); pc->pc_apic_id = cpu_apic_ids[bootAP]; pc->pc_prvspace = pc; pc->pc_curthread = 0; gdt_segs[GPRIV_SEL].ssd_base = (int) pc; gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; PT_SET_MA(bootAPgdt, VTOM(bootAPgdt) | PG_V | PG_RW); bzero(bootAPgdt, PAGE_SIZE); for (x = 0; x < NGDT; x++) ssdtosd(&gdt_segs[x], &bootAPgdt[x].sd); PT_SET_MA(bootAPgdt, vtomach(bootAPgdt) | PG_V); #ifdef notyet if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) { apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id); acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id); #ifdef CONFIG_ACPI if (acpiid != 0xff) x86_acpiid_to_apicid[acpiid] = apicid; #endif } #endif /* attempt to start the Application Processor */ if (!start_ap(cpu)) { printf("AP #%d (PHY# %d) failed!\n", cpu, apic_id); /* better panic as the AP may be running loose */ printf("panic y/n? [y] "); if (cngetc() != 'n') panic("bye-bye"); } CPU_SET(cpu, &all_cpus); /* record AP in CPU map */ } pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); /* number of APs actually started */ return mp_naps; }