/* common code for old and new mmaps */ inline long do_mmap2( unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { int error = -EINVAL; struct file * file = NULL; flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); /* * If we are doing a fixed mapping, and address < PAGE_SIZE, * then deny it. */ if (flags & MAP_FIXED && addr < PAGE_SIZE && vectors_base() == 0) goto out; error = -EBADF; if (!(flags & MAP_ANONYMOUS)) { file = fget(fd); if (!file) goto out; } down_write(¤t->mm->mmap_sem); error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff); up_write(¤t->mm->mmap_sem); if (file) fput(file); out: return error; }
/* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); init_pgd = pgd_offset_k(0); if (vectors_base() == 0) { init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset(init_pmd, 0); /* * This lock is here just to satisfy pmd_alloc and pte_lock */ spin_lock(&mm->page_table_lock); /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc(mm, new_pmd, 0); if (!new_pte) goto no_pte; set_pte(new_pte, *init_pte); spin_unlock(&mm->page_table_lock); } /* * Copy over the kernel and IO PGD entries */ memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); /* * FIXME: this should not be necessary */ clean_cache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); return new_pgd; no_pte: spin_unlock(&mm->page_table_lock); pmd_free(new_pmd); free_pages((unsigned long)new_pgd, 2); return NULL; no_pmd: spin_unlock(&mm->page_table_lock); free_pages((unsigned long)new_pgd, 2); return NULL; no_pgd: return NULL; }