static int set_up_temporary_text_mapping(pgd_t *pgd_base) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; pgd = pgd_base + pgd_index(restore_jump_address); pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (boot_cpu_has(X86_FEATURE_PSE)) { set_pmd(pmd + pmd_index(restore_jump_address), __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); } else { pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; set_pte(pte + pte_index(restore_jump_address), __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); } return 0; }
static void __meminit early_make_page_readonly(void *va, unsigned int feature) { unsigned long addr, _va = (unsigned long)va; pte_t pte, *ptep; unsigned long *page = (unsigned long *) init_level4_pgt; BUG_ON(after_bootmem); if (xen_feature(feature)) return; addr = (unsigned long) page[pgd_index(_va)]; addr_to_page(addr, page); addr = page[pud_index(_va)]; addr_to_page(addr, page); addr = page[pmd_index(_va)]; addr_to_page(addr, page); ptep = (pte_t *) &page[pte_index(_va)]; pte.pte = ptep->pte & ~_PAGE_RW; if (HYPERVISOR_update_va_mapping(_va, pte, 0)) BUG(); }
/* Follow the PGD to the PTE (no mid-level for !PAE). */ static unsigned long gpte_addr(struct lg_cpu *cpu, pgd_t gpgd, unsigned long vaddr) { unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); return gpage + pte_index(vaddr) * sizeof(pte_t); }
unsigned long virtaddr_to_physaddr(struct mm_struct *mm, unsigned long vaddr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long paddr = 0; pgd = pgd_offset(mm, vaddr); printk("pgd_val = 0x%lx\n", pgd_val(*pgd)); printk("pgd_index = %lu\n", pgd_index(vaddr)); if (pgd_none(*pgd)) { printk("not mapped in pgd\n"); return INVALID_ADDR; } pud = pud_offset(pgd, vaddr); printk("pud_val = 0x%lx\n", pud_val(*pud)); printk("pud_index = %lu\n", pud_index(vaddr)); if (pud_none(*pud)) { printk("not mapped in pud\n"); return INVALID_ADDR; } pmd = pmd_offset(pud, vaddr); printk("pmd_val = 0x%lx\n", pmd_val(*pmd)); printk("pmd_index = %lx\n", pmd_index(vaddr)); if(pmd_none(*pmd)){ printk("not mapped in pmd\n"); return INVALID_ADDR; } /*If pmd_large is true, represent pmd is the last level*/ if(pmd_large(*pmd)){ paddr = (pmd_val(*pmd) & PAGE_MASK); paddr = paddr | (vaddr & ~PAGE_MASK); return paddr; } /*Walk the forth level page table ** you may use PAGE_MASK = 0xfffffffffffff000 to help you get [0:11] bits ***/ else{ /* XXX: Need to implement */ pte = pte_offset_kernel(pmd, vaddr); printk("pte_val = 0x%lx\n", pte_val(*pte)); printk("pte_index = %lx\n", pte_index(vaddr)); if(pte_none(*pte)){ printk("not mapped in pte\n"); return INVALID_ADDR; } paddr = (pte_val(*pte) & PAGE_MASK); paddr = paddr | (vaddr & ~PAGE_MASK); printk("paddr = %lx\n", paddr); printk("__pa = %lx\n", __pa(vaddr)); /* magic macro in the kernel */ /* End of implement */ return paddr; } }
/* * for Xen extraction */ unsigned long long kvtop_xen_x86_64(unsigned long kvaddr) { unsigned long long dirp, entry; if (!is_xen_vaddr(kvaddr)) return NOT_PADDR; if (is_xen_text(kvaddr)) return (unsigned long)kvaddr - XEN_VIRT_START + info->xen_phys_start; if (is_direct(kvaddr)) return (unsigned long)kvaddr - DIRECTMAP_VIRT_START; if ((dirp = kvtop_xen_x86_64(SYMBOL(pgd_l4))) == NOT_PADDR) return NOT_PADDR; dirp += pml4_index(kvaddr) * sizeof(unsigned long long); if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry))) return NOT_PADDR; if (!(entry & _PAGE_PRESENT)) return NOT_PADDR; dirp = entry & ENTRY_MASK; dirp += pgd_index(kvaddr) * sizeof(unsigned long long); if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry))) return NOT_PADDR; if (!(entry & _PAGE_PRESENT)) return NOT_PADDR; dirp = entry & ENTRY_MASK; dirp += pmd_index(kvaddr) * sizeof(unsigned long long); if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry))) return NOT_PADDR; if (!(entry & _PAGE_PRESENT)) return NOT_PADDR; if (entry & _PAGE_PSE) { entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PMD_SHIFT) - 1)); return entry; } dirp = entry & ENTRY_MASK; dirp += pte_index(kvaddr) * sizeof(unsigned long long); if (!readmem(MADDR_XEN, dirp, &entry, sizeof(entry))) return NOT_PADDR; if (!(entry & _PAGE_PRESENT)) { return NOT_PADDR; } entry = (entry & ENTRY_MASK) + (kvaddr & ((1UL << PTE_SHIFT) - 1)); return entry; }
unsigned long long vtop_x86_PAE(unsigned long vaddr) { unsigned long long page_dir, pgd_pte, pmd_paddr, pmd_pte; unsigned long long pte_paddr, pte; if (SYMBOL(swapper_pg_dir) == NOT_FOUND_SYMBOL) { ERRMSG("Can't get the symbol of swapper_pg_dir.\n"); return NOT_PADDR; } page_dir = SYMBOL(swapper_pg_dir); page_dir += pgd_index_PAE(vaddr) * sizeof(unsigned long long); if (!readmem(VADDR, page_dir, &pgd_pte, sizeof(pgd_pte))) { ERRMSG("Can't get pgd_pte (page_dir:%llx).\n", page_dir); return NOT_PADDR; } if (!(pgd_pte & _PAGE_PRESENT)) return NOT_PADDR; if (info->vaddr_for_vtop == vaddr) MSG(" PGD : %16llx => %16llx\n", page_dir, pgd_pte); pmd_paddr = pgd_pte & ENTRY_MASK; pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long long); if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof(pmd_pte))) { ERRMSG("Can't get pmd_pte (pmd_paddr:%llx).\n", pmd_paddr); return NOT_PADDR; } if (!(pmd_pte & _PAGE_PRESENT)) return NOT_PADDR; if (info->vaddr_for_vtop == vaddr) MSG(" PMD : %16llx => %16llx\n", pmd_paddr, pmd_pte); if (pmd_pte & _PAGE_PSE) return (pmd_pte & ENTRY_MASK) + (vaddr & ((1UL << PMD_SHIFT) - 1)); pte_paddr = pmd_pte & ENTRY_MASK; pte_paddr += pte_index(vaddr) * sizeof(unsigned long long); if (!readmem(PADDR, pte_paddr, &pte, sizeof(pte))) return NOT_PADDR; if (!(pte & _PAGE_PRESENT)) return NOT_PADDR; if (info->vaddr_for_vtop == vaddr) MSG(" PTE : %16llx => %16llx\n", pte_paddr, pte); return (pte & ENTRY_MASK) + (vaddr & ((1UL << PTE_SHIFT) - 1)); }
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES pte_t *pte; #endif /* Get the top-level page table entry. */ pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0); /* We don't have four levels. */ pud = pud_offset(pgd, addr); #ifndef __PAGETABLE_PUD_FOLDED # error support fourth page table level #endif if (!pud_present(*pud)) return NULL; /* Check for an L0 huge PTE, if we have three levels. */ #ifndef __PAGETABLE_PMD_FOLDED if (pud_huge(*pud)) return (pte_t *)pud; pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud), pmd_index(addr), 1); if (!pmd_present(*pmd)) return NULL; #else pmd = pmd_offset(pud, addr); #endif /* Check for an L1 huge PTE. */ if (pmd_huge(*pmd)) return (pte_t *)pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES /* Check for an L2 huge PTE. */ pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2); if (!pte_present(*pte)) return NULL; if (pte_super(*pte)) return pte; #endif return NULL; }
/* * This routine then takes the page directory entry returned above, which * contains the address of the page table entry (PTE) page. It then returns a * pointer to the PTE entry for the given address. */ static pte_t *spte_addr(struct lg_cpu *cpu, pgd_t spgd, unsigned long vaddr) { #ifdef CONFIG_X86_PAE pmd_t *pmd = spmd_addr(cpu, spgd, vaddr); pte_t *page = __va(pmd_pfn(*pmd) << PAGE_SHIFT); /* You should never call this if the PMD entry wasn't valid */ BUG_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)); #else pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); /* You should never call this if the PGD entry wasn't valid */ BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); #endif return &page[pte_index(vaddr)]; }
/*H:480 * (vi) Mapping the Switcher when the Guest is about to run. * * The Switcher and the two pages for this CPU need to be visible in the * Guest (and not the pages for other CPUs). We have the appropriate PTE pages * for each CPU already set up, we just need to hook them in now we know which * Guest is about to run on this CPU. */ void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages) { pte_t *switcher_pte_page = __this_cpu_read(switcher_pte_pages); pte_t regs_pte; #ifdef CONFIG_X86_PAE pmd_t switcher_pmd; pmd_t *pmd_table; switcher_pmd = pfn_pmd(__pa(switcher_pte_page) >> PAGE_SHIFT, PAGE_KERNEL_EXEC); /* Figure out where the pmd page is, by reading the PGD, and converting * it to a virtual address. */ pmd_table = __va(pgd_pfn(cpu->lg-> pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) << PAGE_SHIFT); /* Now write it into the shadow page table. */ set_pmd(&pmd_table[SWITCHER_PMD_INDEX], switcher_pmd); #else pgd_t switcher_pgd; /* * Make the last PGD entry for this Guest point to the Switcher's PTE * page for this CPU (with appropriate flags). */ switcher_pgd = __pgd(__pa(switcher_pte_page) | __PAGE_KERNEL_EXEC); cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; #endif /* * We also change the Switcher PTE page. When we're running the Guest, * we want the Guest's "regs" page to appear where the first Switcher * page for this CPU is. This is an optimization: when the Switcher * saves the Guest registers, it saves them into the first page of this * CPU's "struct lguest_pages": if we make sure the Guest's register * page is already mapped there, we don't have to copy them out * again. */ regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], regs_pte); }
/* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET. * * This routine transitions us from using a set of compiled-in large * pages to using some more precise caching, including removing access * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START) * marking read-only data as locally cacheable, striping the remaining * .data and .bss across all the available tiles, and removing access * to pages above the top of RAM (thus ensuring a page fault from a bad * virtual address rather than a hypervisor shoot down for accessing * memory outside the assigned limits). */ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) { unsigned long long irqmask; unsigned long address, pfn; pmd_t *pmd; pte_t *pte; int pte_ofs; const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id()); struct cpumask kstripe_mask; int rc, i; #if CHIP_HAS_CBOX_HOME_MAP() if (ktext_arg_seen && ktext_hash) { pr_warning("warning: \"ktext\" boot argument ignored" " if \"kcache_hash\" sets up text hash-for-home\n"); ktext_small = 0; } if (kdata_arg_seen && kdata_hash) { pr_warning("warning: \"kdata\" boot argument ignored" " if \"kcache_hash\" sets up data hash-for-home\n"); } if (kdata_huge && !hash_default) { pr_warning("warning: disabling \"kdata=huge\"; requires" " kcache_hash=all or =allbutstack\n"); kdata_huge = 0; } #endif /* * Set up a mask for cpus to use for kernel striping. * This is normally all cpus, but minus dataplane cpus if any. * If the dataplane covers the whole chip, we stripe over * the whole chip too. */ cpumask_copy(&kstripe_mask, cpu_possible_mask); if (!kdata_arg_seen) kdata_mask = kstripe_mask; /* Allocate and fill in L2 page tables */ for (i = 0; i < MAX_NUMNODES; ++i) { #ifdef CONFIG_HIGHMEM unsigned long end_pfn = node_lowmem_end_pfn[i]; #else unsigned long end_pfn = node_end_pfn[i]; #endif unsigned long end_huge_pfn = 0; /* Pre-shatter the last huge page to allow per-cpu pages. */ if (kdata_huge) end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT); pfn = node_start_pfn[i]; /* Allocate enough memory to hold L2 page tables for node. */ init_prealloc_ptes(i, end_pfn - pfn); address = (unsigned long) pfn_to_kaddr(pfn); while (pfn < end_pfn) { BUG_ON(address & (HPAGE_SIZE-1)); pmd = get_pmd(pgtables, address); pte = get_prealloc_pte(pfn); if (pfn < end_huge_pfn) { pgprot_t prot = init_pgprot(address); *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot)); for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; pfn++, pte_ofs++, address += PAGE_SIZE) pte[pte_ofs] = pfn_pte(pfn, prot); } else { if (kdata_huge) printk(KERN_DEBUG "pre-shattered huge" " page at %#lx\n", address); for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE; pfn++, pte_ofs++, address += PAGE_SIZE) { pgprot_t prot = init_pgprot(address); pte[pte_ofs] = pfn_pte(pfn, prot); } assign_pte(pmd, pte); } } } /* * Set or check ktext_map now that we have cpu_possible_mask * and kstripe_mask to work with. */ if (ktext_all) cpumask_copy(&ktext_mask, cpu_possible_mask); else if (ktext_nondataplane) ktext_mask = kstripe_mask; else if (!cpumask_empty(&ktext_mask)) { /* Sanity-check any mask that was requested */ struct cpumask bad; cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask); cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask); if (!cpumask_empty(&bad)) { char buf[NR_CPUS * 5]; cpulist_scnprintf(buf, sizeof(buf), &bad); pr_info("ktext: not using unavailable cpus %s\n", buf); } if (cpumask_empty(&ktext_mask)) { pr_warning("ktext: no valid cpus; caching on %d.\n", smp_processor_id()); cpumask_copy(&ktext_mask, cpumask_of(smp_processor_id())); } } address = MEM_SV_INTRPT; pmd = get_pmd(pgtables, address); pfn = 0; /* code starts at PA 0 */ if (ktext_small) { /* Allocate an L2 PTE for the kernel text */ int cpu = 0; pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC, PAGE_HOME_IMMUTABLE); if (ktext_local) { if (ktext_nocache) prot = hv_pte_set_mode(prot, HV_PTE_MODE_UNCACHED); else prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3); } else { prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_TILE_L3); cpu = cpumask_first(&ktext_mask); prot = ktext_set_nocache(prot); } BUG_ON(address != (unsigned long)_stext); pte = NULL; for (; address < (unsigned long)_einittext; pfn++, address += PAGE_SIZE) { pte_ofs = pte_index(address); if (pte_ofs == 0) { if (pte) assign_pte(pmd++, pte); pte = alloc_pte(); } if (!ktext_local) { prot = set_remote_cache_cpu(prot, cpu); cpu = cpumask_next(cpu, &ktext_mask); if (cpu == NR_CPUS) cpu = cpumask_first(&ktext_mask); } pte[pte_ofs] = pfn_pte(pfn, prot); } if (pte) assign_pte(pmd, pte); } else { pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); pteval = pte_mkhuge(pteval); #if CHIP_HAS_CBOX_HOME_MAP() if (ktext_hash) { pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_CACHE_HASH_L3); pteval = ktext_set_nocache(pteval); } else #endif /* CHIP_HAS_CBOX_HOME_MAP() */ if (cpumask_weight(&ktext_mask) == 1) { pteval = set_remote_cache_cpu(pteval, cpumask_first(&ktext_mask)); pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_CACHE_TILE_L3); pteval = ktext_set_nocache(pteval); } else if (ktext_nocache) pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_UNCACHED); else pteval = hv_pte_set_mode(pteval, HV_PTE_MODE_CACHE_NO_L3); for (; address < (unsigned long)_einittext; pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE) *(pte_t *)(pmd++) = pfn_pte(pfn, pteval); } /* Set swapper_pgprot here so it is flushed to memory right away. */ swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir); /* * Since we may be changing the caching of the stack and page * table itself, we invoke an assembly helper to do the * following steps: * * - flush the cache so we start with an empty slate * - install pgtables[] as the real page table * - flush the TLB so the new page table takes effect */ irqmask = interrupt_mask_save_mask(); interrupt_mask_set_mask(-1ULL); rc = flush_and_install_context(__pa(pgtables), init_pgprot((unsigned long)pgtables), __get_cpu_var(current_asid), cpumask_bits(my_cpu_mask)); interrupt_mask_restore_mask(irqmask); BUG_ON(rc != 0); /* Copy the page table back to the normal swapper_pg_dir. */ memcpy(pgd_base, pgtables, sizeof(pgtables)); __install_page_table(pgd_base, __get_cpu_var(current_asid), swapper_pgprot); /* * We just read swapper_pgprot and thus brought it into the cache, * with its new home & caching mode. When we start the other CPUs, * they're going to reference swapper_pgprot via their initial fake * VA-is-PA mappings, which cache everything locally. At that * time, if it's in our cache with a conflicting home, the * simulator's coherence checker will complain. So, flush it out * of our cache; we're not going to ever use it again anyway. */ __insn_finv(&swapper_pgprot); }
/* * Translate a virtual address to a physical address by using 4 levels paging. */ unsigned long long vtop4_x86_64(unsigned long vaddr) { unsigned long page_dir, pml4, pgd_paddr, pgd_pte, pmd_paddr, pmd_pte; unsigned long pte_paddr, pte; if (SYMBOL(init_level4_pgt) == NOT_FOUND_SYMBOL) { ERRMSG("Can't get the symbol of init_level4_pgt.\n"); return NOT_PADDR; } /* * Get PGD. */ page_dir = SYMBOL(init_level4_pgt); page_dir += pml4_index(vaddr) * sizeof(unsigned long); if (!readmem(VADDR, page_dir, &pml4, sizeof pml4)) { ERRMSG("Can't get pml4 (page_dir:%lx).\n", page_dir); return NOT_PADDR; } if (info->vaddr_for_vtop == vaddr) MSG(" PGD : %16lx => %16lx\n", page_dir, pml4); if (!(pml4 & _PAGE_PRESENT)) { ERRMSG("Can't get a valid pml4.\n"); return NOT_PADDR; } /* * Get PUD. */ pgd_paddr = pml4 & PHYSICAL_PAGE_MASK; pgd_paddr += pgd_index(vaddr) * sizeof(unsigned long); if (!readmem(PADDR, pgd_paddr, &pgd_pte, sizeof pgd_pte)) { ERRMSG("Can't get pgd_pte (pgd_paddr:%lx).\n", pgd_paddr); return NOT_PADDR; } if (info->vaddr_for_vtop == vaddr) MSG(" PUD : %16lx => %16lx\n", pgd_paddr, pgd_pte); if (!(pgd_pte & _PAGE_PRESENT)) { ERRMSG("Can't get a valid pgd_pte.\n"); return NOT_PADDR; } /* * Get PMD. */ pmd_paddr = pgd_pte & PHYSICAL_PAGE_MASK; pmd_paddr += pmd_index(vaddr) * sizeof(unsigned long); if (!readmem(PADDR, pmd_paddr, &pmd_pte, sizeof pmd_pte)) { ERRMSG("Can't get pmd_pte (pmd_paddr:%lx).\n", pmd_paddr); return NOT_PADDR; } if (info->vaddr_for_vtop == vaddr) MSG(" PMD : %16lx => %16lx\n", pmd_paddr, pmd_pte); if (!(pmd_pte & _PAGE_PRESENT)) { ERRMSG("Can't get a valid pmd_pte.\n"); return NOT_PADDR; } if (pmd_pte & _PAGE_PSE) return (PAGEBASE(pmd_pte) & PHYSICAL_PAGE_MASK) + (vaddr & ~_2MB_PAGE_MASK); /* * Get PTE. */ pte_paddr = pmd_pte & PHYSICAL_PAGE_MASK; pte_paddr += pte_index(vaddr) * sizeof(unsigned long); if (!readmem(PADDR, pte_paddr, &pte, sizeof pte)) { ERRMSG("Can't get pte (pte_paddr:%lx).\n", pte_paddr); return NOT_PADDR; } if (info->vaddr_for_vtop == vaddr) MSG(" PTE : %16lx => %16lx\n", pte_paddr, pte); if (!(pte & _PAGE_PRESENT)) { ERRMSG("Can't get a valid pte.\n"); return NOT_PADDR; } return (PAGEBASE(pte) & PHYSICAL_PAGE_MASK) + PAGEOFFSET(vaddr); }
pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type) { pte_t *pte = kmap_atomic(pmd_page(*dir), type) + (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK; return &pte[pte_index(address)]; }
void __init xen_start_kernel(void) { unsigned int i; struct xen_machphys_mapping mapping; unsigned long machine_to_phys_nr_ents; #ifdef CONFIG_X86_32 struct xen_platform_parameters pp; extern pte_t swapper_pg_fixmap[PTRS_PER_PTE]; unsigned long addr; #endif xen_setup_features(); if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { machine_to_phys_mapping = (unsigned long *)mapping.v_start; machine_to_phys_nr_ents = mapping.max_mfn + 1; } else machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES; while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents ) machine_to_phys_order++; if (!xen_feature(XENFEAT_auto_translated_physmap)) phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables)); reserve_early(ALIGN(__pa_symbol(&_end), PAGE_SIZE), __pa(xen_start_info->pt_base) + (xen_start_info->nr_pt_frames << PAGE_SHIFT), "Xen provided"); #ifdef CONFIG_X86_32 WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments)); init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base; if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) { hypervisor_virt_start = pp.virt_start; reserve_top_address(0UL - pp.virt_start); } BUG_ON(pte_index(hypervisor_virt_start)); /* Do an early initialization of the fixmap area */ make_lowmem_page_readonly(swapper_pg_fixmap, XENFEAT_writable_page_tables); addr = __fix_to_virt(FIX_EARLYCON_MEM_BASE); set_pmd(pmd_offset(pud_offset(swapper_pg_dir + pgd_index(addr), addr), addr), __pmd(__pa_symbol(swapper_pg_fixmap) | _PAGE_TABLE)); #else check_efer(); xen_init_pt(); #endif #define __FIXADDR_TOP (-PAGE_SIZE) #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) #define FIX_BUG_ON(fix) BUILD_BUG_ON(pmd_index(__fix_to_virt(FIX_##fix)) \ != pmd_index(__fix_to_virt(FIX_EARLYCON_MEM_BASE))) FIX_BUG_ON(SHARED_INFO); FIX_BUG_ON(ISAMAP_BEGIN); FIX_BUG_ON(ISAMAP_END); #undef pmd_index #undef __FIXADDR_TOP /* Switch to the real shared_info page, and clear the dummy page. */ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info); HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO); memset(empty_zero_page, 0, sizeof(empty_zero_page)); setup_vcpu_info(0); /* Set up mapping of lowest 1MB of physical memory. */ for (i = 0; i < NR_FIX_ISAMAPS; i++) if (is_initial_xendomain()) set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE); else __set_fixmap(FIX_ISAMAP_BEGIN - i, virt_to_machine(empty_zero_page), PAGE_KERNEL_RO); }