void *kmap_atomic_pfn(unsigned long pfn) { unsigned long vaddr; int idx, type; pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_top_pte(vaddr))); #endif set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; }
void *kmap_atomic(struct page *page) { unsigned int idx; unsigned long vaddr; void *kmap; int type; pagefault_disable(); //POS (Cheolhee Lee) if (!(PageHighMem(page) || PageNVRAM(page))) return page_address(page); #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the * dedicated kmap usage for better debugging purposes in that case. */ if (!cache_is_vivt()) kmap = NULL; else #endif kmap = kmap_high_get(page); if (kmap) return kmap; type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(get_top_pte(vaddr))); #endif /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ set_top_pte(vaddr, mk_pte(page, kmap_prot)); return (void *)vaddr; }
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; /* We do not yet support multiple huge page sizes. */ BUG_ON(sz != PMD_SIZE); pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) pte = (pte_t *) pmd_alloc(mm, pud, addr); BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); return pte; }
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { phys_addr_t start_addr = addr; pte_t *pte, *start_pte; start_pte = pte = pte_offset_kernel(pmd, addr); do { if (!pte_none(*pte)) { kvm_set_pte(pte, __pte(0)); put_page(virt_to_page(pte)); kvm_tlb_flush_vmid_ipa(kvm, addr); } } while (pte++, addr += PAGE_SIZE, addr != end); if (kvm_pte_table_empty(kvm, start_pte)) clear_pmd_entry(kvm, pmd, start_addr); }
pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) { pgd_t *pgd; pud_t *pud; pte_t *pte = NULL; BUG_ON(sz != PMD_SIZE); pgd = pgd_offset(mm, addr); pud = pud_alloc(mm, pgd, addr); if (pud) pte = (pte_t *) pmd_alloc(mm, pud, addr); BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); return pte; }
void *kmap_atomic(struct page *page, enum km_type type) { unsigned int idx; unsigned long vaddr; void *kmap; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); #ifdef CONFIG_DEBUG_HIGHMEM /* * There is no cache coherency issue when non VIVT, so force the * dedicated kmap usage for better debugging purposes in that case. */ if (!cache_is_vivt()) kmap = NULL; else #endif kmap = kmap_high_get(page); if (kmap) return kmap; idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM /* * With debugging enabled, kunmap_atomic forces that entry to 0. * Make sure it was indeed properly unmapped. */ BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); #endif set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); /* * When debugging is off, kunmap_atomic leaves the previous mapping * in place, so this TLB flush ensures the TLB is updated with the * new mapping. */ local_flush_tlb_kernel_page(vaddr); return (void *)vaddr; }
/* * free page(s) as defined by the above mapping. */ void consistent_free(size_t size, void *vaddr) { struct page *page; if (in_interrupt()) BUG(); size = PAGE_ALIGN(size); #ifndef CONFIG_MMU /* Clear SHADOW_MASK bit in address, and free as per usual */ # ifdef CONFIG_XILINX_UNCACHED_SHADOW vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK); # endif page = virt_to_page(vaddr); do { __free_reserved_page(page); page++; } while (size -= PAGE_SIZE); #else do { pte_t *ptep; unsigned long pfn; ptep = pte_offset_kernel(pmd_offset(pgd_offset_k( (unsigned int)vaddr), (unsigned int)vaddr), (unsigned int)vaddr); if (!pte_none(*ptep) && pte_present(*ptep)) { pfn = pte_pfn(*ptep); pte_clear(&init_mm, (unsigned int)vaddr, ptep); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); __free_reserved_page(page); } } vaddr += PAGE_SIZE; } while (size -= PAGE_SIZE); /* flush tlb */ flush_tlb_all(); #endif }
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); return (void *)vaddr; }
static void shmedia_mapioaddr(unsigned long pa, unsigned long va) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep, pte; pgprot_t prot; unsigned long flags = 1; /* 1 = CB0-1 device */ pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); pgdp = pgd_offset_k(va); if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { pudp = (pud_t *)sh64_get_page(); set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE)); } pudp = pud_offset(pgdp, va); if (pud_none(*pudp) || !pud_present(*pudp)) { pmdp = (pmd_t *)sh64_get_page(); set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE)); } pmdp = pmd_offset(pudp, va); if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { ptep = (pte_t *)sh64_get_page(); set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); } prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); pte = pfn_pte(pa >> PAGE_SHIFT, prot); ptep = pte_offset_kernel(pmdp, va); if (!pte_none(*ptep) && pte_val(*ptep) != pte_val(pte)) pte_ERROR(*ptep); set_pte(ptep, pte); flush_tlb_kernel_range(va, PAGE_SIZE); }
/* * This is the same as kmap_atomic() but can map memory that doesn't * have a struct page associated with it. */ void *kmap_atomic_pfn(unsigned long pfn) { enum fixed_addresses idx; unsigned long vaddr; int type; pagefault_disable(); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte - idx))); #endif set_pte(kmap_pte - idx, pfn_pte(pfn, PAGE_KERNEL)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); return (void *)vaddr; }
static inline int callback_page_walk(pte_t *pte, unsigned long addr, unsigned long next_addr, struct mm_walk *walk) { if (pte_none(*pte) || !pte_present(*pte) /* || !pte_young(*pte) || pte_special(*pte) */ ) return 0; proc[(long)walk->private].next_addr = addr; /* TODO: try pte_mknuma on 3.8 */ *pte = pte_clear_flags(*pte, _PAGE_PRESENT); spcd_pf_extra++; return 1; }
void *kmap_atomic(struct page *page) { enum fixed_addresses idx; unsigned long vaddr; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); idx = kmap_idx(kmap_atomic_idx_push(), DCACHE_ALIAS(page_to_phys(page))); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte + idx))); #endif set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); return (void *)vaddr; }
static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, unsigned long flags) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { if (!pte_none(*pte)) printk("remap_area_pte: page already exists\n"); set_pte(pte, mk_pte_phys(phys_addr, __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; } while (address < end); }
static int get_gate_page(struct mm_struct *mm, unsigned long address, unsigned int gup_flags, struct vm_area_struct **vma, struct page **page) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int ret = -EFAULT; /* user gate pages are read-only */ if (gup_flags & FOLL_WRITE) return -EFAULT; if (address > TASK_SIZE) pgd = pgd_offset_k(address); else pgd = pgd_offset_gate(mm, address); BUG_ON(pgd_none(*pgd)); pud = pud_offset(pgd, address); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return -EFAULT; VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; *page = vm_normal_page(*vma, address, *pte); if (!*page) { if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) goto unmap; *page = pte_page(*pte); } get_page(*page); out: ret = 0; unmap: pte_unmap(pte); return ret; }
/* * We are called with the MM semaphore and page_table_lock * spinlock held to protect against concurrent faults in * multithreaded programs. */ static int do_anonymous_page(struct mm_struct * mm, struct vm_area_struct * vma, pte_t *page_table, int write_access, unsigned long addr) { pte_t entry; /* Read-only mapping of ZERO_PAGE. */ entry = pte_wrprotect(mk_pte(ZERO_PAGE(addr), vma->vm_page_prot)); /* ..except if it's a write access */ if (write_access) { struct page *page; /* Allocate our own private page. */ spin_unlock(&mm->page_table_lock); page = alloc_page(GFP_HIGHUSER); if (!page) goto no_mem; clear_user_highpage(page, addr); spin_lock(&mm->page_table_lock); if (!pte_none(*page_table)) { page_cache_release(page); spin_unlock(&mm->page_table_lock); return 1; } mm->rss++; flush_page_to_ram(page); entry = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); lru_cache_add(page); mark_page_accessed(page); } set_pte(page_table, entry); /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, addr, entry); spin_unlock(&mm->page_table_lock); return 1; /* Minor fault */ no_mem: return -1; }
/* * Again because of the changes in page table walking, a 2.4 and 2.5 * version is supplied */ inline unsigned long forall_pte_pmd(struct mm_struct *mm, pmd_t *pmd, unsigned long start, unsigned long end, unsigned long *sched_count, void *data, unsigned long (*func)(pte_t *, unsigned long, void *)) { pte_t *ptep, pte; unsigned long pmd_end; unsigned long ret=0; if (pmd_none(*pmd)) return 0; pmd_end = (start + PMD_SIZE) & PMD_MASK; if (end > pmd_end) end = pmd_end; do { preempt_disable(); ptep = pte_offset_map(pmd, start); pte = *ptep; pte_unmap(ptep); preempt_enable(); /* Call the if a PTE is available */ if (!pte_none(pte)) { /* * Call schedule if necessary * Can func() block or be preempted? * It seems the sched_count won't be guarnateed * accurate. */ spin_unlock(&mm->page_table_lock); check_resched(sched_count); ret += func(&pte, start, data); spin_lock(&mm->page_table_lock); } start += PAGE_SIZE; } while (start && (start < end)); return ret; }
void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); return (void *)vaddr; }
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long phys_addr, const struct mem_type *type) { pgprot_t prot = __pgprot(type->prot_pte); pte_t *pte; pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { if (!pte_none(*pte)) goto bad; set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); phys_addr += PAGE_SIZE; #ifdef CONFIG_PAGE_SIZE_64K } while (pte += PTE_STEP, addr += PAGE_SIZE, addr != end); #else /* #ifdef CONFIG_PAGE_SIZE_64K */ } while (pte++, addr += PAGE_SIZE, addr != end);
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ static void *__kmap_atomic_xen(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ inc_preempt_count(); if (page < highmem_start_page) return page_address(page); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); #endif set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page,prot)); return (void*) vaddr; }
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; preempt_disable(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void *)vaddr; }
static void shmedia_unmapioaddr(unsigned long vaddr) { pgd_t *pgdp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(vaddr); pmdp = pmd_offset(pgdp, vaddr); if (pmd_none(*pmdp) || pmd_bad(*pmdp)) return; ptep = pte_offset(pmdp, vaddr); if (pte_none(*ptep) || !pte_present(*ptep)) return; clear_page((void *)ptep); pte_clear(ptep); }
void *kmap_atomic_pfn(unsigned long pfn) { unsigned long vaddr; int idx, type; struct page *page = pfn_to_page(pfn); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); return (void *)vaddr; }
//walk_page_table modified static pte_t *walk_page_table(unsigned long addr) { pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; pgdp = pgd_offset_k(addr); if (pgd_none(*pgdp)) return NULL; pudp = pud_offset(pgdp,addr); if (pud_none(*pudp) || pud_large(*pudp)) return NULL; pmdp = pmd_offset(pudp, addr); if (pmd_none(*pmdp) || pmd_large(*pmdp)) return NULL; ptep = pte_offset_kernel(pmdp, addr); if (pte_none(*ptep)) return NULL; return ptep; }
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *__kmap_atomic(struct page *page, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; preempt_disable(); pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); WARN_ON_ONCE(!pte_none(*(kmap_pte-idx))); if (!PageHighMem(page)) return page_address(page); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); arch_flush_lazy_mmu_mode(); return (void*) vaddr; }
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); idx = type + KM_TYPE_NR*smp_processor_id(); BUG_ON(!pte_none(*(kmap_pte-idx))); if (!PageHighMem(page)) return page_address(page); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); set_pte(kmap_pte-idx, mk_pte(page, prot)); arch_flush_lazy_mmu_mode(); return (void*) vaddr; }
static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned char *vec) { unsigned long next; spinlock_t *ptl; pte_t *ptep; ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); do { pte_t pte = *ptep; pgoff_t pgoff; next = addr + PAGE_SIZE; if (pte_none(pte)) mincore_unmapped_range(vma, addr, next, vec); else if (pte_present(pte)) *vec = 1; else if (pte_file(pte)) { pgoff = pte_to_pgoff(pte); *vec = mincore_page(vma->vm_file->f_mapping, pgoff); } else { /* pte is a swap entry */ swp_entry_t entry = pte_to_swp_entry(pte); if (is_migration_entry(entry)) { /* migration entries are always uptodate */ *vec = 1; } else { #ifdef CONFIG_SWAP pgoff = entry.val; *vec = mincore_page(&swapper_space, pgoff); #else WARN_ON(1); *vec = 1; #endif } } vec++; } while (ptep++, addr = next, addr != end); pte_unmap_unlock(ptep - 1, ptl); }
static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) { struct arm_vmregion *c; if (!consistent_pte[0]) { printk(KERN_ERR "%s: not initialised\n", __func__); dump_stack(); return NULL; } /* * Allocate a virtual address in the consistent mapping region. */ c = arm_vmregion_alloc(&consistent_head, size, gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); if (c) { pte_t *pte; int idx = CONSISTENT_PTE_INDEX(c->vm_start); u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); pte = consistent_pte[idx] + off; c->vm_pages = page; do { BUG_ON(!pte_none(*pte)); set_pte_ext(pte, mk_pte(page, prot), 0); page++; pte++; off++; if (off >= PTRS_PER_PTE) { off = 0; pte = consistent_pte[++idx]; } } while (size -= PAGE_SIZE); return (void *)c->vm_start; } return NULL; }
void *__kmap_atomic(struct page *page, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ pagefault_disable(); if (!PageHighMem(page)) return page_address(page); debug_kmap_atomic(type); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte - idx))); #endif set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); local_flush_tlb_one((unsigned long)vaddr); return (void*) vaddr; }
/* * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because * no global lock is needed and because the kmap code must perform a global TLB * invalidation when the kmap pool wraps. * * However when holding an atomic kmap is is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ void *kmap_atomic(struct page *page, enum km_type type) { enum fixed_addresses idx; unsigned long vaddr; /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ inc_preempt_count(); if (!PageHighMem(page)) return page_address(page); idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM if (!pte_none(*(kmap_pte-idx))) BUG(); #endif set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); __flush_tlb_one(vaddr); return (void*) vaddr; }
static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t prot, struct page **pages, int *nr) { pte_t *pte; pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { struct page *page = pages[*nr]; if (WARN_ON(!pte_none(*pte))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); (*nr)++; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; }