static int __init exynos4_l2x0_cache_init(void) { int ret; if (soc_is_exynos5250()) return 0; ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); if (!ret) { l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); return 0; } if (!(__raw_readl(S5P_VA_L2CC + L2X0_CTRL) & 0x1)) { l2x0_saved_regs.phy_base = EXYNOS4_PA_L2CC; /* TAG, Data Latency Control: 2 cycles */ l2x0_saved_regs.tag_latency = 0x110; if (soc_is_exynos4212() || soc_is_exynos4412()) l2x0_saved_regs.data_latency = 0x120; else l2x0_saved_regs.data_latency = 0x110; l2x0_saved_regs.prefetch_ctrl = 0x30000007; l2x0_saved_regs.pwr_ctrl = (L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN); l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); __raw_writel(l2x0_saved_regs.tag_latency, S5P_VA_L2CC + L2X0_TAG_LATENCY_CTRL); __raw_writel(l2x0_saved_regs.data_latency, S5P_VA_L2CC + L2X0_DATA_LATENCY_CTRL); /* L2X0 Prefetch Control */ __raw_writel(l2x0_saved_regs.prefetch_ctrl, S5P_VA_L2CC + L2X0_PREFETCH_CTRL); /* L2X0 Power Control */ __raw_writel(l2x0_saved_regs.pwr_ctrl, S5P_VA_L2CC + L2X0_POWER_CTRL); clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); clean_dcache_area(&l2x0_saved_regs, sizeof(struct l2x0_regs)); } l2x0_init(S5P_VA_L2CC, L2_AUX_VAL, L2_AUX_MASK); return 0; }
void __cpuinit platform_secondary_init(unsigned int cpu) { trace_hardirqs_off(); /* * if any interrupts are already enabled for the primary * core (e.g. timer irq), then they will not have been enabled * for us: do so */ mpcore_cpu_init(); /* * let the primary processor know we're out of the * pen, then head off into the C entry point */ pen_release = -1; smp_wmb(); clean_dcache_area((void *) &pen_release, sizeof(pen_release)); /* * Synchronise with the boot thread. */ spin_lock(&boot_lock); spin_unlock(&boot_lock); }
#include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <asm/cputype.h> #include <asm/idmap.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/sections.h> #include <asm/system_info.h> pgd_t *idmap_pgd; #ifdef CONFIG_ARM_LPAE static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) { pmd_t *pmd; unsigned long next; if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { pmd = pmd_alloc_one(&init_mm, addr); if (!pmd) { pr_warning("Failed to allocate identity pmd.\n"); return; } pud_populate(&init_mm, pud, pmd); pmd += pmd_index(addr); } else pmd = pmd_offset(pud, addr); do { next = pmd_addr_end(addr, end); *pmd = __pmd((addr & PMD_MASK) | prot); flush_pmd_entry(pmd); } while (pmd++, addr = next, addr != end); } #else /* !CONFIG_ARM_LPAE */ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) { #ifdef CONFIG_TIMA_RKP_L1_TABLES unsigned long cmd_id = 0x3f809221; #endif pmd_t *pmd = pmd_offset(pud, addr); addr = (addr & PMD_MASK) | prot; #ifdef CONFIG_TIMA_RKP_L1_TABLES if (tima_is_pg_protected((unsigned long) pmd) != 0) { #ifndef CONFIG_TIMA_RKP_COHERENT_TT clean_dcache_area(pmd, 8); tima_cache_flush((unsigned long)pmd); #endif tima_send_cmd5((unsigned long)__pa(pmd), (unsigned long)__pmd(addr), (unsigned long)__pmd(addr+SECTION_SIZE), 0, 0, cmd_id); #ifndef CONFIG_TIMA_RKP_COHERENT_TT tima_cache_inval((unsigned long)pmd); #endif tima_tlb_inval_is(0); } else { pmd[0] = __pmd(addr); addr += SECTION_SIZE; pmd[1] = __pmd(addr); } #else /* CONFIG_TIMA_RKP_L1_TABLES */ pmd[0] = __pmd(addr); addr += SECTION_SIZE; pmd[1] = __pmd(addr); #endif /* CONFIG_TIMA_RKP_L1_TABLES */ flush_pmd_entry(pmd); }
/* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; unsigned long flags; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); pgd_list_lock(flags); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); pgd_list_add(new_pgd); pgd_list_unlock(flags); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { #ifdef CONFIG_ARM_FCSE /* FCSE does not work without high vectors. */ BUG(); #endif /* CONFIG_ARM_FCSE */ /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
/* * get_pgd_slow:申请一个pgd项 * notice:一个pgd占用4个页框,每一个pgt项大小为8字节 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; /*pgd占用四个页框*/ new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * 复制内核与I/O PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; /*返回pmd的第0项页表项,因为第0项用于映射中断向量*/ new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; /*返回中断向量的页表项,中断向量位于低地址空间时(0地址开始处)*/ init_pmd = pmd_offset(init_pgd, 0); init_pte = pte_offset_map_nested(init_pmd, 0); /*给新的页表项映射中断向量的页表项*/ set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); /*取消new_pte的高端内存的页表映射*/ pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long timeout; /* * set synchronisation state between this boot processor * and the secondary one */ spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from * the holding pen - release it, then wait for it to flag * that it has been released by resetting pen_release. * * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ pen_release = cpu; smp_wmb(); clean_dcache_area((void *) &pen_release, sizeof(pen_release)); outer_clean_range(__pa(&pen_release), __pa(&pen_release + sizeof(pen_release))); dsb_sev(); /* * Timeout set on purpose in jiffies so that on slow processors * that must also have low HZ it will wait longer. */ timeout = jiffies + 128; udelay(100); /* * If the secondary CPU was waiting on WFE, it should * be already watching <pen_release>, or it could be * waiting in WFI, send it an IPI to be sure it wakes. */ if( pen_release != -1 ) { smp_cross_call(cpumask_of(cpu)); } while (time_before(jiffies, timeout)) { smp_rmb(); if (pen_release == -1) break; udelay(10); } if (arch_is_coherent()) { outer_cache.inv_range = NULL; outer_cache.clean_range = NULL; outer_cache.flush_range = NULL; outer_cache.sync = NULL; } /* * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; }
/* * need to get a 16k page for level 1 */ pgd_t *get_pgd_slow(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ //#define pgd_offset_k(addr) pgd_offset(&init_mm, addr) //其实就是要拷贝内核空间的页表项... init_pgd = pgd_offset_k(0); //拷贝内核空间的pgd表项到新创建的pgd表项 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); //把Dcache行的数据写回到主存,并清除cache行的脏标记. //参数:主存的物理地址,(确定cache行) 需要写回的长度... clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); //异常向量表是在高端地址或是在低端地址... if (!vectors_high()) { //如果是在低端地址.那么异常向量表在第一页.. /* * On ARM, first page must always be allocated since it * contains the machine vectors. */ //该函数是在new_pgd指定的PGD页表上找到地址0对应的表项,然后分配一个页. //接着填充PGD的页表项(PMD). //哦,好像略过了PUD了、对于ARM来说PMD也是直接返回pgd. new_pmd = pmd_alloc(mm, new_pgd, 0); if (!new_pmd) goto no_pmd; //如果为空则分配一个页面,然后设置PGD的页表项. new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; //init_pgd就是init进程的页表,这里得到地址0的PGD页表的表项(PMD) init_pmd = pmd_offset(init_pgd, 0); //根据init_pmd,得到了PMD页表,然后根据0地址得到了PMD的表项(PTE) init_pte = pte_offset_map_nested(init_pmd, 0); //new_pte指定了要存放PTE的地址,init_pte就是PTE的值。 set_pte_ext(new_pte, *init_pte, 0); pte_unmap_nested(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; }
static void iopte_cachep_ctor(void *iopte) { clean_dcache_area(iopte, IOPTE_TABLE_SIZE); }
/* * OMAP Device MMU(IOMMU) detection */ static int __devinit omap_iommu_probe(struct platform_device *pdev) { int err = -ENODEV; void *p; int irq; struct iommu *obj; struct resource *res; struct iommu_platform_data *pdata = pdev->dev.platform_data; if (pdev->num_resources != 2) return -EINVAL; obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); if (!obj) return -ENOMEM; obj->clk = clk_get(&pdev->dev, pdata->clk_name); if (IS_ERR(obj->clk)) goto err_clk; obj->nr_tlb_entries = pdata->nr_tlb_entries; obj->name = pdata->name; obj->dev = &pdev->dev; obj->ctx = (void *)obj + sizeof(*obj); mutex_init(&obj->iommu_lock); mutex_init(&obj->mmap_lock); spin_lock_init(&obj->page_table_lock); INIT_LIST_HEAD(&obj->mmap); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { err = -ENODEV; goto err_mem; } obj->regbase = ioremap(res->start, resource_size(res)); if (!obj->regbase) { err = -ENOMEM; goto err_mem; } res = request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { err = -EIO; goto err_mem; } irq = platform_get_irq(pdev, 0); if (irq < 0) { err = -ENODEV; goto err_irq; } err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, dev_name(&pdev->dev), obj); if (err < 0) goto err_irq; platform_set_drvdata(pdev, obj); p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); if (!p) { err = -ENOMEM; goto err_pgd; } memset(p, 0, IOPGD_TABLE_SIZE); clean_dcache_area(p, IOPGD_TABLE_SIZE); obj->iopgd = p; BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); dev_info(&pdev->dev, "%s registered\n", obj->name); return 0; err_pgd: free_irq(irq, obj); err_irq: release_mem_region(res->start, resource_size(res)); iounmap(obj->regbase); err_mem: clk_put(obj->clk); err_clk: kfree(obj); return err; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; #if defined(CONFIG_SYNO_ARMADA_ARCH) new_pgd = __pgd_alloc(); #elif defined(CONFIG_SYNO_COMCERTO) new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, get_order(16384)); #else new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); #endif if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #if defined(CONFIG_SYNO_ARMADA_ARCH) && defined(CONFIG_ARM_LPAE) /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it #if defined(CONFIG_SYNO_ARMADA_ARCH) * contains the machine vectors. The vectors are always high * with LPAE. #else * contains the machine vectors. #endif */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); if (!new_pte) goto no_pte; init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); no_pmd: pud_free(mm, new_pud); no_pud: #if defined(CONFIG_SYNO_ARMADA_ARCH) __pgd_free(new_pgd); #elif defined(CONFIG_SYNO_COMCERTO) free_pages((unsigned long)new_pgd, get_order(16384)); #else free_pages((unsigned long)new_pgd, 2); #endif no_pgd: return NULL; }
/* * need to get a 16k page for level 1 */ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* * Copy over the kernel and IO PGD entries */ init_pgd = pgd_offset_k(0); memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); #ifdef CONFIG_ARM_LPAE /* * Allocate PMD table for modules and pkmap mappings. */ new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), MODULES_VADDR); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; #endif if (!vectors_high()) { /* * On ARM, first page must always be allocated since it * contains the machine vectors. The vectors are always high * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) goto no_pud; new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; new_pte = pte_alloc_map(mm, new_pmd, 0); if (!new_pte) goto no_pte; #ifndef CONFIG_ARM_LPAE /* * Modify the PTE pointer to have the correct domain. This * needs to be the vectors domain to avoid the low vectors * being unmapped. */ pmd_val(*new_pmd) &= ~PMD_DOMAIN_MASK; pmd_val(*new_pmd) |= PMD_DOMAIN(DOMAIN_VECTORS); #endif init_pud = pud_offset(init_pgd, 0); init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte + 0, init_pte[0], 0); set_pte_ext(new_pte + 1, init_pte[1], 0); pte_unmap(init_pte); pte_unmap(new_pte); } return new_pgd; no_pte: pmd_free(mm, new_pmd); mm_dec_nr_pmds(mm); no_pmd: pud_free(mm, new_pud); no_pud: __pgd_free(new_pgd); no_pgd: return NULL; }