void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); BUG_ON(pte_val(*ptep) != pte_val(pte)); BUG_ON(*depth <= 0); raw_local_irq_save(flags); (*depth)--; if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { set_pte_ext(ptep, saved_pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); if (!in_interrupt()) preempt_enable(); }
void __kunmap_atomic(void *kvaddr) { unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; int idx, type; if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); idx = type + KM_TYPE_NR * smp_processor_id(); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); local_flush_tlb_kernel_page(vaddr); #else (void) idx; /* to kill a warning */ #endif kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); }
void *kmap_atomic(struct page *page, enum km_type type) { unsigned int idx; unsigned long vaddr; void *kmap; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); kmap = kmap_high_get(page); if (kmap) return kmap; idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) { unsigned int idx, cpu = smp_processor_id(); int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); unsigned long vaddr, flags; pte_t pte, *ptep; idx = KM_L1_CACHE + KM_TYPE_NR * cpu; vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ptep = TOP_PTE(vaddr); pte = mk_pte(page, kmap_prot); if (!in_interrupt()) preempt_disable(); raw_local_irq_save(flags); (*depth)++; if (pte_val(*ptep) == pte_val(pte)) { *saved_pte = pte; } else { *saved_pte = *ptep; set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); } raw_local_irq_restore(flags); return (void *)vaddr; }
/* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; unsigned long flags; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); pgd_list_lock(flags); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); pgd_list_add(new_pgd); pgd_list_unlock(flags); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { #ifdef CONFIG_ARM_FCSE /* FCSE does not work without high vectors. */ BUG(); #endif /* CONFIG_ARM_FCSE */ /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
static inline void set_fixmap_pte(int idx, pte_t pte) { unsigned long vaddr = __fix_to_virt(idx); pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr); set_pte_ext(ptep, pte, 0); local_flush_tlb_kernel_page(vaddr); }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
/* Taken from __dma_update_pte */ static int bralloc_mem_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { struct page *page = virt_to_page(addr); pgprot_t prot = *(pgprot_t *)data; set_pte_ext(pte, mk_pte(page, prot), 0); return 0; }
static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { struct arm_vmregion *c; size_t align; int bit; if (!consistent_pte) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; } /* * Align the virtual region allocation - maximum alignment is * a section size, minimum is a page size. This helps reduce * fragmentation of the DMA space, and also prevents allocations * smaller than a section from crossing a section boundary. */ bit = fls(size - 1); if (bit > SECTION_SHIFT) bit = SECTION_SHIFT; align = 1 << bit; /* * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, align, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; c->vm_pages = page; do { BUG_ON(!pte_none(*pte)); set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; off++; if (off >= PTRS_PER_PTE) { off = 0; pte = consistent_pte[++idx]; } } while (size -= PAGE_SIZE); dsb(); return (void *)c->vm_start; } return NULL; }
/* * get_pgd_slow:申请一个pgd项 * notice:一个pgd占用4个页框,每一个pgt项大小为8字节 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; /*pgd占用四个页框*/ new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * 复制内核与I/O PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; /*返回pmd的第0项页表项,因为第0项用于映射中断向量*/ new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; /*返回中断向量的页表项,中断向量位于低地址空间时(0地址开始处)*/ init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); /*给新的页表项映射中断向量的页表项*/ set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); /*取消new_pte的高端内存的页表映射*/ pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) { unsigned int idx; unsigned long vaddr; pagefault_disable(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
void *__kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; void *kmap; int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the * dedicated kmap usage for better debugging purposes in that case. */ if (!cache_is_vivt()) kmap = NULL; else #endif kmap = kmap_high_get(page); if (kmap) return kmap; type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; } /* * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; c->vm_pages = page; do { BUG_ON(!pte_none(*pte)); set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; off++; if (off >= PTRS_PER_PTE) { off = 0; pte = consistent_pte[++idx]; } } while (size -= PAGE_SIZE); return (void *)c->vm_start; } return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ //#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) //其实就是要拷贝内核空间的页表项... init_pgd = pgd_offset_k(0); //拷贝内核空间的pgd表项到新创建的pgd表项 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); //把Dcache行的数据写回到主存,并清除cache行的脏标记. //参数:主存的物理地址,(确定cache行) 需要写回的长度... clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); //异常向量表是在高端地址或是在低端地址... if (!vectors_high()) { //如果是在低端地址.那么异常向量表在第一页.. /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ //该函数是在new_pgd指定的PGD页表上找到地址0对应的表项,然后分配一个页. //接着填充PGD的页表项(PMD). //哦,好像略过了PUD了、对于ARM来说PMD也是直接返回pgd. new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; //如果为空则分配一个页面,然后设置PGD的页表项. new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; //init_pgd就是init进程的页表,这里得到地址0的PGD页表的表项(PMD) init_pmd = pmd_offset(init_pgd, 0); //根据init_pmd,得到了PMD页表,然后根据0地址得到了PMD的表项(PTE) init_pte = pte_offset_map_nested(init_pmd, 0); //new_pte指定了要存放PTE的地址,init_pte就是PTE的值。 set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; #ifndef CONFIG_ARM_LPAE /* * Modify the PTE pointer to have the correct domain. This * needs to be the vectors domain to avoid the low vectors * being unmapped. */ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); #endif init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte + 0, init_pte[0], 0); set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); mm_dec_nr_pmds(mm); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; #if defined(CONFIG_SYNO_ARMADA_ARCH) new_pgd = __pgd_alloc(); #elif defined(CONFIG_SYNO_COMCERTO) new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, get_order(16384)); #else new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); #endif if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #if defined(CONFIG_SYNO_ARMADA_ARCH) && defined(CONFIG_ARM_LPAE) /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it #if defined(CONFIG_SYNO_ARMADA_ARCH) * contains the machine vectors. The vectors are always high * with LPAE. #else * contains the machine vectors. #endif */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: #if defined(CONFIG_SYNO_ARMADA_ARCH) __pgd_free(new_pgd); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long)new_pgd, get_order(16384)); #else free_pages((unsigned long)new_pgd, 2); #endif no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }