static int init_stub_pte(struct mm_struct *mm, unsigned long proc, unsigned long kernel) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) goto out; pmd = pmd_alloc(mm, pud, proc); if (!pmd) goto out_pmd; pte = pte_alloc_map(mm, NULL, pmd, proc); if (!pte) goto out_pte; *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkread(*pte); return 0; out_pte: pmd_free(mm, pmd); out_pmd: pud_free(mm, pud); out: return -ENOMEM; }
static int init_stub_pte(struct mm_struct *mm, unsigned long proc, unsigned long kernel) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pgd = pgd_offset(mm, proc); pud = pud_alloc(mm, pgd, proc); if (!pud) goto out; pmd = pmd_alloc(mm, pud, proc); if (!pmd) goto out_pmd; pte = pte_alloc_map(mm, pmd, proc); if (!pte) goto out_pte; /* There's an interaction between the skas0 stub pages, stack * randomization, and the BUG at the end of exit_mmap. exit_mmap * checks that the number of page tables freed is the same as had * been allocated. If the stack is on the last page table page, * then the stack pte page will be freed, and if not, it won't. To * avoid having to know where the stack is, or if the process mapped * something at the top of its address space for some other reason, * we set TASK_SIZE to end at the start of the last page table. * This keeps exit_mmap off the last page, but introduces a leak * of that page. So, we hang onto it here and free it in * destroy_context_skas. */ mm->context.skas.last_page_table = pmd_page_vaddr(*pmd); #ifdef CONFIG_3_LEVEL_PGTABLES mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud)); #endif *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = pte_mkread(*pte); return(0); out_pmd: pud_free(pud); out_pte: pmd_free(pmd); out: return(-ENOMEM); }