예제 #1
0
파일: smpboot.c 프로젝트: abligh/xen
void __init
make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
{
    unsigned long *gate;
    paddr_t gate_pa;
    int i;

    printk("Waiting for %i other CPUs to be ready\n", max_cpus - 1);
    /* We use the unrelocated copy of smp_up_cpu as that's the one the
     * others can see. */ 
    gate_pa = ((paddr_t) (unsigned long) &smp_up_cpu) + boot_phys_offset;
    gate = map_domain_page(gate_pa >> PAGE_SHIFT) + (gate_pa & ~PAGE_MASK); 
    for ( i = 1; i < max_cpus; i++ )
    {
        /* Tell the next CPU to get ready */
        /* TODO: handle boards where CPUIDs are not contiguous */
        *gate = i;
        flush_xen_dcache(*gate);
        isb();
        sev();
        /* And wait for it to respond */
        while ( ready_cpus < i )
            smp_rmb();
    }
    unmap_domain_page(gate);
}
예제 #2
0
파일: smpboot.c 프로젝트: avsm/xen-1
/* Bring up a remote CPU */
int __cpu_up(unsigned int cpu)
{
    int rc;

    printk("Bringing up CPU%d\n", cpu);

    rc = init_secondary_pagetables(cpu);
    if ( rc < 0 )
        return rc;

    console_start_sync(); /* Secondary may use early_printk */

    /* Tell the remote CPU which stack to boot on. */
    init_data.stack = idle_vcpu[cpu]->arch.stack;

    /* Tell the remote CPU what is it's logical CPU ID */
    init_data.cpuid = cpu;

    /* Open the gate for this CPU */
    smp_up_cpu = cpu_logical_map(cpu);
    flush_xen_dcache(smp_up_cpu);

    rc = arch_cpu_up(cpu);

    console_end_sync();

    if ( rc < 0 )
    {
        printk("Failed to bring up CPU%d\n", cpu);
        return rc;
    }

    while ( !cpu_online(cpu) )
    {
        cpu_relax();
        process_pending_softirqs();
    }

    return 0;
}
예제 #3
0
파일: smpboot.c 프로젝트: abligh/xen
/* Bring up a remote CPU */
int __cpu_up(unsigned int cpu)
{
    /* Tell the remote CPU which stack to boot on. */
    init_stack = idle_vcpu[cpu]->arch.stack;

    /* Unblock the CPU.  It should be waiting in the loop in head.S
     * for an event to arrive when smp_up_cpu matches its cpuid. */
    smp_up_cpu = cpu;
    /* we need to make sure that the change to smp_up_cpu is visible to
     * secondary cpus with D-cache off */
    flush_xen_dcache(smp_up_cpu);
    isb();
    sev();

    while ( !cpu_online(cpu) )
    {
        cpu_relax();
        process_pending_softirqs();
    }

    return 0;
}
예제 #4
0
파일: mm.c 프로젝트: abligh/xen
/* Boot-time pagetable setup.
 * Changes here may need matching changes in head.S */
void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr)
{
    unsigned long dest_va;
    lpae_t pte, *p;
    int i;

    /* Map the destination in the boot misc area. */
    dest_va = BOOT_MISC_VIRT_START;
    pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT);
    write_pte(xen_second + second_table_offset(dest_va), pte);
    flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE);

    /* Calculate virt-to-phys offset for the new location */
    phys_offset = xen_paddr - (unsigned long) _start;

    /* Copy */
    memcpy((void *) dest_va, _start, _end - _start);

    /* Beware!  Any state we modify between now and the PT switch may be
     * discarded when we switch over to the copy. */

    /* Update the copy of xen_pgtable to use the new paddrs */
    p = (void *) xen_pgtable + dest_va - (unsigned long) _start;
#ifdef CONFIG_ARM_64
    p[0].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;
    p = (void *) xen_first + dest_va - (unsigned long) _start;
#endif
    for ( i = 0; i < 4; i++)
        p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;

    p = (void *) xen_second + dest_va - (unsigned long) _start;
    if ( boot_phys_offset != 0 )
    {
        /* Remove the old identity mapping of the boot paddr */
        vaddr_t va = (vaddr_t)_start + boot_phys_offset;
        p[second_linear_offset(va)].bits = 0;
    }
    for ( i = 0; i < 4 * LPAE_ENTRIES; i++)
        if ( p[i].pt.valid )
            p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT;

    /* Change pagetables to the copy in the relocated Xen */
    boot_ttbr = (uintptr_t) xen_pgtable + phys_offset;
    flush_xen_dcache(boot_ttbr);
    flush_xen_dcache_va_range((void*)dest_va, _end - _start);
    flush_xen_text_tlb();

    WRITE_SYSREG64(boot_ttbr, TTBR0_EL2);
    dsb();                         /* Ensure visibility of HTTBR update */
    flush_xen_text_tlb();

    /* Undo the temporary map */
    pte.bits = 0;
    write_pte(xen_second + second_table_offset(dest_va), pte);
    flush_xen_text_tlb();

    /* Link in the fixmap pagetable */
    pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset)
                           >> PAGE_SHIFT);
    pte.pt.table = 1;
    write_pte(xen_second + second_table_offset(FIXMAP_ADDR(0)), pte);
    /*
     * No flush required here. Individual flushes are done in
     * set_fixmap as entries are used.
     */

    /* Break up the Xen mapping into 4k pages and protect them separately. */
    for ( i = 0; i < LPAE_ENTRIES; i++ )
    {
        unsigned long mfn = paddr_to_pfn(xen_paddr) + i;
        unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT);
        if ( !is_kernel(va) )
            break;
        pte = mfn_to_xen_entry(mfn);
        pte.pt.table = 1; /* 4k mappings always have this bit set */
        if ( is_kernel_text(va) || is_kernel_inittext(va) )
        {
            pte.pt.xn = 0;
            pte.pt.ro = 1;
        }
        if ( is_kernel_rodata(va) )
            pte.pt.ro = 1;
        write_pte(xen_xenmap + i, pte);
        /* No flush required here as page table is not hooked in yet. */
    }
    pte = mfn_to_xen_entry((((unsigned long) xen_xenmap) + phys_offset)
                           >> PAGE_SHIFT);
    pte.pt.table = 1;
    write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte);
    /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */

    /* From now on, no mapping may be both writable and executable. */
    WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2);
    /* Flush everything after setting WXN bit. */
    flush_xen_text_tlb();
}