static int set_up_temporary_text_mapping(pgd_t *pgd_base) { pgd_t *pgd; pmd_t *pmd; pte_t *pte; pgd = pgd_base + pgd_index(restore_jump_address); pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (boot_cpu_has(X86_FEATURE_PSE)) { set_pmd(pmd + pmd_index(restore_jump_address), __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); } else { pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; set_pte(pte + pte_index(restore_jump_address), __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); } return 0; }
/* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. */ static int resume_physical_mapping_init(pgd_t *pgd_base) { unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int pgd_idx, pmd_idx; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (pfn >= max_low_pfn) continue; for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { if (pfn >= max_low_pfn) break; /* Map with big pages if possible, otherwise create * normal page tables. * NOTE: We can mark everything as executable here */ if (cpu_has_pse) { set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); pfn += PTRS_PER_PTE; } else { pte_t *max_pte; pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; max_pte = pte + PTRS_PER_PTE; for (; pte < max_pte; pte++, pfn++) { if (pfn >= max_low_pfn) break; set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); } } } } resume_map_numa_kva(pgd_base); return 0; }
static int resume_physical_mapping_init(pgd_t *pgd_base) { unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; int pgd_idx, pmd_idx; pgd_idx = pgd_index(PAGE_OFFSET); pgd = pgd_base + pgd_idx; pfn = 0; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = resume_one_md_table_init(pgd); if (!pmd) return -ENOMEM; if (pfn >= max_low_pfn) continue; for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { if (pfn >= max_low_pfn) break; if (cpu_has_pse) { set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); pfn += PTRS_PER_PTE; } else { pte_t *max_pte; pte = resume_one_page_table_init(pmd); if (!pte) return -ENOMEM; max_pte = pte + PTRS_PER_PTE; for (; pte < max_pte; pte++, pfn++) { if (pfn >= max_low_pfn) break; set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); } } } } return 0; }