/* * maps a range of vmalloc()ed memory into the requested pages. the old * mappings are removed. */ static inline void vmap_pte_range (pte_t *pte, unsigned long address, unsigned long size, unsigned long vaddr) { unsigned long end; pgd_t *vdir; pmd_t *vpmd; pte_t *vpte; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t oldpage = *pte; struct page * page; pte_clear(pte); vdir = pgd_offset_k (vaddr); vpmd = pmd_offset (vdir, vaddr); vpte = pte_offset (vpmd, vaddr); page = pte_page (*vpte); set_pte(pte, mk_pte(page, PAGE_USERIO)); forget_pte(oldpage); address += PAGE_SIZE; vaddr += PAGE_SIZE; pte++; } while (address < end); }
/* Remap IO memory, the same way as remap_page_range(), but use * the obio memory space. * * They use a pgprot that sets PAGE_IO and does not check the * mem_map table as this is independent of normal memory. * * As a special hack if the lowest bit of offset is set the * side-effect bit will be turned off. This is used as a * performance improvement on FFB/AFB. -DaveM */ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, unsigned long offset, pgprot_t prot, int space) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t oldpage; pte_t entry; unsigned long curend = address + PAGE_SIZE; entry = mk_pte_io((offset & ~(0x1UL)), prot, space); if (!(address & 0xffff)) { if (!(address & 0x3fffff) && !(offset & 0x3ffffe) && end >= address + 0x400000) { entry = mk_pte_io((offset & ~(0x1UL)), __pgprot(pgprot_val (prot) | _PAGE_SZ4MB), space); curend = address + 0x400000; offset += 0x400000; } else if (!(address & 0x7ffff) && !(offset & 0x7fffe) && end >= address + 0x80000) { entry = mk_pte_io((offset & ~(0x1UL)), __pgprot(pgprot_val (prot) | _PAGE_SZ512K), space); curend = address + 0x80000; offset += 0x80000; } else if (!(offset & 0xfffe) && end >= address + 0x10000) { entry = mk_pte_io((offset & ~(0x1UL)), __pgprot(pgprot_val (prot) | _PAGE_SZ64K), space); curend = address + 0x10000; offset += 0x10000; } else offset += PAGE_SIZE; } else offset += PAGE_SIZE; if (offset & 0x1UL) pte_val(entry) &= ~(_PAGE_E); do { oldpage = *pte; pte_clear(pte); set_pte(pte, entry); forget_pte(oldpage); address += PAGE_SIZE; pte++; } while (address < curend); } while (address < end); }
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pte_t zero_pte) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t oldpage = *pte; set_pte(pte, zero_pte); forget_pte(oldpage); address += PAGE_SIZE; pte++; } while (address < end); }
static inline void zeromap_pte_range(pte_t * pte, unsigned long address, unsigned long size, pgprot_t prot) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot)); pte_t oldpage = ptep_get_and_clear(pte); set_pte(pte, zero_pte); forget_pte(oldpage); address += PAGE_SIZE; pte++; } while (address && (address < end)); }
/* Remap IO memory, the same way as remap_page_range(), but use * the obio memory space. * * They use a pgprot that sets PAGE_IO and does not check the * mem_map table as this is independent of normal memory. */ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, unsigned long offset, pgprot_t prot, int space) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t oldpage = *pte; pte_clear(pte); set_pte(pte, mk_pte_io(offset, prot, space)); forget_pte(oldpage); address += PAGE_SIZE; offset += PAGE_SIZE; pte++; } while (address < end); }
/* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, unsigned long offset, pgprot_t prot) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t oldpage = *pte; pte_clear(pte); if (offset >= high_memory || PageReserved(mem_map+MAP_NR(offset))) set_pte(pte, mk_pte(offset, prot)); forget_pte(oldpage); address += PAGE_SIZE; offset += PAGE_SIZE; pte++; } while (address < end); }
static inline void zeromap_pte_range(struct mm_struct *mm, pte_t * pte, unsigned long address, unsigned long size, pgprot_t prot) { unsigned long end; debug_lock_break(1); break_spin_lock(&mm->page_table_lock); address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { pte_t zero_pte = pte_wrprotect(mk_pte(ZERO_PAGE(address), prot)); pte_t oldpage = ptep_get_and_clear(pte); set_pte(pte, zero_pte); forget_pte(oldpage); address += PAGE_SIZE; pte++; } while (address && (address < end)); }
/* * maps a range of physical memory into the requested pages. the old * mappings are removed. any references to nonexistent pages results * in null mappings (currently treated as "copy-on-access") */ static inline void remap_pte_range(pte_t * pte, unsigned long address, unsigned long size, unsigned long phys_addr, pgprot_t prot) { unsigned long end; address &= ~PMD_MASK; end = address + size; if (end > PMD_SIZE) end = PMD_SIZE; do { struct page *page; pte_t oldpage; oldpage = ptep_get_and_clear(pte); page = virt_to_page(__va(phys_addr)); if ((!VALID_PAGE(page)) || PageReserved(page)) set_pte(pte, mk_pte_phys(phys_addr, prot)); forget_pte(oldpage); address += PAGE_SIZE; phys_addr += PAGE_SIZE; pte++; } while (address && (address < end)); }