static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, phys_t size, phys_t phys_addr, unsigned long flags) { phys_t end; address &= ~PGDIR_MASK; end = address + size; if (end > PGDIR_SIZE) end = PGDIR_SIZE; phys_addr -= address; if (address >= end) BUG(); do { pte_t * pte = pte_alloc_kernel(pmd, address); if (!pte) return -ENOMEM; remap_area_pte(pte, address, end - address, address + phys_addr, flags); address = (address + PMD_SIZE) & PMD_MASK; pmd++; } while (address && (address < end)); return 0; }
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long phys_addr, pgprot_t prot) { pte_t *pte; pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { if (!pte_none(*pte)) goto bad; set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); phys_addr += PAGE_SIZE; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; bad: printk(KERN_CRIT "remap_area_pte: page already exists\n"); BUG(); }
static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long phys_addr, const struct mem_type *type) { pgprot_t prot = __pgprot(type->prot_pte); pte_t *pte; pte = pte_alloc_kernel(pmd, addr); if (!pte) return -ENOMEM; do { if (!pte_none(*pte)) { printk(KERN_CRIT "remap_area_pte: page already exists\n"); BUG(); return -EFAULT; } set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), type->prot_pte_ext); phys_addr += PAGE_SIZE; } while (pte++, addr += PAGE_SIZE, addr != end); return 0; }
void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) { struct vm_struct *area; unsigned long virtaddr, retaddr; long offset; pgd_t *pgd_dir; pmd_t *pmd_dir; pte_t *pte_dir; /* * Don't allow mappings that wrap.. */ if (!size || size > physaddr + size) return NULL; #ifdef CONFIG_AMIGA if (MACH_IS_AMIGA) { if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) && (cacheflag == IOMAP_NOCACHE_SER)) return (void *)physaddr; } #endif #ifdef DEBUG printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); #endif /* * Mappings have to be aligned */ offset = physaddr & (IO_SIZE - 1); physaddr &= -IO_SIZE; size = (size + offset + IO_SIZE - 1) & -IO_SIZE; /* * Ok, go for it.. */ area = get_io_area(size); if (!area) return NULL; virtaddr = (unsigned long)area->addr; retaddr = virtaddr + offset; #ifdef DEBUG printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); #endif /* * add cache and table flags to physical address */ if (CPU_IS_040_OR_060) { physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | _PAGE_ACCESSED | _PAGE_DIRTY); switch (cacheflag) { case IOMAP_FULL_CACHING: physaddr |= _PAGE_CACHE040; break; case IOMAP_NOCACHE_SER: default: physaddr |= _PAGE_NOCACHE_S; break; case IOMAP_NOCACHE_NONSER: physaddr |= _PAGE_NOCACHE; break; case IOMAP_WRITETHROUGH: physaddr |= _PAGE_CACHE040W; break; } } else { physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); switch (cacheflag) { case IOMAP_NOCACHE_SER: case IOMAP_NOCACHE_NONSER: default: physaddr |= _PAGE_NOCACHE030; break; case IOMAP_FULL_CACHING: case IOMAP_WRITETHROUGH: break; } } while ((long)size > 0) { #ifdef DEBUG if (!(virtaddr & (PTRTREESIZE-1))) printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); #endif pgd_dir = pgd_offset_k(virtaddr); pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); if (!pmd_dir) { printk("ioremap: no mem for pmd_dir\n"); return NULL; } if (CPU_IS_020_OR_030) { pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; physaddr += PTRTREESIZE; virtaddr += PTRTREESIZE; size -= PTRTREESIZE; } else { pte_dir = pte_alloc_kernel(&init_mm, pmd_dir, virtaddr); if (!pte_dir) { printk("ioremap: no mem for pte_dir\n"); return NULL; } pte_val(*pte_dir) = physaddr; virtaddr += PAGE_SIZE; physaddr += PAGE_SIZE; size -= PAGE_SIZE; } } #ifdef DEBUG printk("\n"); #endif flush_tlb_all(); return (void *)retaddr; }
void *nvmap_mmap(struct nvmap_handle_ref *ref) { struct nvmap_handle *h; pgprot_t prot; unsigned long adj_size; unsigned long offs; struct vm_struct *v; void *p; h = nvmap_handle_get(ref->handle); if (!h) return NULL; prot = nvmap_pgprot(h, pgprot_kernel); if (h->heap_pgalloc) return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, -1, prot); /* carveout - explicitly map the pfns into a vmalloc area */ nvmap_usecount_inc(h); adj_size = h->carveout->base & ~PAGE_MASK; adj_size += h->size; adj_size = PAGE_ALIGN(adj_size); v = alloc_vm_area(adj_size); if (!v) { nvmap_usecount_dec(h); nvmap_handle_put(h); return NULL; } p = v->addr + (h->carveout->base & ~PAGE_MASK); for (offs = 0; offs < adj_size; offs += PAGE_SIZE) { unsigned long addr = (unsigned long) v->addr + offs; unsigned int pfn; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; pfn = __phys_to_pfn(h->carveout->base + offs); pgd = pgd_offset_k(addr); pud = pud_alloc(&init_mm, pgd, addr); if (!pud) break; pmd = pmd_alloc(&init_mm, pud, addr); if (!pmd) break; pte = pte_alloc_kernel(pmd, addr); if (!pte) break; set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot)); flush_tlb_kernel_page(addr); } if (offs != adj_size) { free_vm_area(v); nvmap_usecount_dec(h); nvmap_handle_put(h); return NULL; } /* leave the handle ref count incremented by 1, so that * the handle will not be freed while the kernel mapping exists. * nvmap_handle_put will be called by unmapping this address */ return p; }