void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) { unsigned long last_addr, addr; unsigned long offset = phys_addr & ~PAGE_MASK; struct vm_struct *area; pgprot_t prot = __pgprot(_PAGE_PRESENT|_PAGE_READ|_PAGE_WRITE |(__HEXAGON_C_DEV << 6)); last_addr = phys_addr + size - 1; if (!size || (last_addr < phys_addr)) return NULL; size = PAGE_ALIGN(offset + size); area = get_vm_area(size, VM_IOREMAP); addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr+size, phys_addr, prot)) { vunmap((void *)addr); return NULL; } return (void __iomem *) (offset + addr); }
void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = (void __iomem *)area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, prot)) { vfree((void __force *)addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __ioremap_prot(unsigned long phys_addr, unsigned long size, pgprot_t prot) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = (void __iomem *)area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, prot)) { vfree((void __force *)addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); }
static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { struct vm_struct *area; unsigned long addr; /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; area->phys_addr = __pfn_to_phys(page_to_pfn(page)); #ifdef CONFIG_L4 area->phys_addr = virt_to_phys((void *)(page_to_pfn(page) << PAGE_SHIFT)); #endif if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { vunmap((void *)addr); return NULL; } l4x_map_pages(addr, page_to_pfn(page) << PAGE_SHIFT, size); return (void *)addr; }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(phys_addr_t addr, size_t size, pgprot_t prot, void *caller) { phys_addr_t last_addr; unsigned long offset, vaddr; struct vm_struct *area; /* Disallow wrap-around or zero size */ last_addr = addr + size - 1; if (!size || last_addr < addr) return NULL; /* Page-align mappings */ offset = addr & (~PAGE_MASK); addr &= PAGE_MASK; size = PAGE_ALIGN(size + offset); area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; vaddr = (unsigned long)area->addr; if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) { free_vm_area(area); return NULL; } return (void __iomem *)(vaddr + offset); }
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; unsigned long addr; struct vm_struct * area; /* * High mappings must be supersection aligned */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ */ #ifndef CONFIG_SQUASHFS_DEBUGGER_AUTO_DIAGNOSE if (WARN_ON(pfn_valid(pfn))) return NULL; #endif type = get_mem_type(mtype); if (!type) return NULL; /* * Page align the mapping size, taking account of any offset. */ size = PAGE_ALIGN(offset + size); area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; #ifndef CONFIG_SMP if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); return NULL; } flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); }
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) { BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); return ioremap_page_range(PCI_IO_VIRT_BASE + offset, PCI_IO_VIRT_BASE + offset + SZ_64K, phys_addr, __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); }
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { unsigned long vaddr; vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); ioremap_page_range(vaddr, vaddr + PAGE_SIZE, pfn << PAGE_SHIFT, PAGE_KERNEL); return (void __iomem *)vaddr; }
void __iomem *__uc32_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; unsigned long addr; struct vm_struct *area; /* * High mappings must be section aligned */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SECTION_MASK)) return NULL; /* * Don't allow RAM to be mapped */ if (pfn_valid(pfn)) { WARN(1, "BUG: Your driver calls ioremap() on\n" "system memory. This leads to architecturally\n" "unpredictable behaviour, and ioremap() will fail in\n" "the next kernel release. Please fix your driver.\n"); return NULL; } type = get_mem_type(mtype); if (!type) return NULL; /* * Page align the mapping size, taking account of any offset. */ size = PAGE_ALIGN(offset + size); area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { area->flags |= VM_UNICORE_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); return NULL; } flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); }
/* * Re-map an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access physical * memory directly. */ void __iomem *__ioremap(unsigned long phys_addr, size_t size, unsigned long flags) { unsigned long addr; struct vm_struct *area; unsigned long offset, last_addr; pgprot_t prot; /* * Check if we can simply use the P4 segment. This area is * uncacheable, so if caching/buffering is requested, we can't * use it. */ if ((phys_addr >= P4SEG) && (flags == 0)) return (void __iomem *)phys_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * XXX: When mapping regular RAM, we'd better make damn sure * it's never used for anything else. But this is really the * caller's responsibility... */ if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr) return (void __iomem *)P2SEGADDR(phys_addr); /* Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; prot = __pgprot(_PAGE_PRESENT | _PAGE_GLOBAL | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = phys_addr; addr = (unsigned long )area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { vunmap((void *)addr); return NULL; } return (void __iomem *)(offset + (char *)addr); }
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) { unsigned long vaddr, paddr; pgprot_t prot; vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); paddr = pfn << PAGE_SHIFT; prot = arch_apei_get_mem_attribute(paddr); ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); return (void __iomem *)vaddr; }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __init_refok __ioremap_caller(phys_addr_t phys_addr, unsigned long size, pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) return ioremap_fixed(phys_addr, size, pgprot); /* * First try to remap through the PMB. * PMB entries are all pre-faulted. */ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); if (mapped && !IS_ERR(mapped)) return mapped; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } return (void __iomem *)(offset + (char *)orig_addr); }
static void* MV_SHM_ioremap(unsigned long phys_start, unsigned long size) { struct vm_struct *area; area = get_vm_area(size, VM_IOREMAP); if (area == NULL) return NULL; area->phys_addr = phys_start; if (ioremap_page_range((unsigned long)area->addr, (unsigned long)area->addr + size, phys_start, PAGE_KERNEL)) { return NULL; } return area->addr; }
void *fmem_map_virtual_area(int cacheability) { unsigned long addr; const struct mem_type *type; int ret; addr = (unsigned long) fmem_data.area->addr; type = get_mem_type(cacheability); ret = ioremap_page_range(addr, addr + fmem_data.size, fmem_data.phys, __pgprot(type->prot_pte)); if (ret) return ERR_PTR(ret); fmem_data.virt = fmem_data.area->addr; return fmem_data.virt; }
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, pgprot_t prot, void *caller) { unsigned long last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; int err; unsigned long addr; struct vm_struct *area; /* * Page align the mapping address and size, taking account of any * offset. */ phys_addr &= PAGE_MASK; size = PAGE_ALIGN(size + offset); /* * Don't allow wraparound, zero size or outside PHYS_MASK. */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) return NULL; /* * Don't allow RAM to be mapped. */ if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr)))) return NULL; area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; area->phys_addr = phys_addr; err = ioremap_page_range(addr, addr + size, phys_addr, prot); if (err) { vunmap((void *)addr); return NULL; } return (void __iomem *)(offset + addr); }
static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { struct vm_struct *area; unsigned long addr; area = get_vm_area_caller(size, VM_DMA | VM_USERMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; area->phys_addr = __pfn_to_phys(page_to_pfn(page)); if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { vunmap((void *)addr); return NULL; } return (void *)addr; }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem *__ioremap(unsigned long phys_addr, size_t size, unsigned long flags) { unsigned long addr; struct vm_struct *area; unsigned long offset, last_addr; pgprot_t prot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* Custom region addresses are accessible and uncached by default. */ if (phys_addr >= LINSYSCUSTOM_BASE && phys_addr < (LINSYSCUSTOM_BASE + LINSYSCUSTOM_LIMIT)) return (__force void __iomem *) phys_addr; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; prot = __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_KERNEL | _PAGE_CACHE_WIN0 | flags); /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = phys_addr; addr = (unsigned long) area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, prot)) { vunmap((void *) addr); return NULL; } return (__force void __iomem *) (offset + (char *)addr); }
/** * The caller has to make sure that there is enough guard * vm area allocated, so that the allignment adjustment done here * does not overflow the vm area. Unlike ioremap, this function cant * take care of this, as the vm area is pre-allocated * by calling plat_get_vm_area. */ void __iomem *plat_ioremap_ns(unsigned long vaddr, unsigned long size, phys_addr_t phys_addr) { unsigned long pfn; unsigned long offset; pfn = __phys_to_pfn(phys_addr); offset = phys_addr & ~PAGE_MASK; size = PAGE_ALIGN(offset + size); if (ioremap_page_range(vaddr, vaddr + size, __pfn_to_phys(pfn), __pgprot(PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | L_PTE_SHARED))) { pr_err("ERROR: ns_ioremap failed\n"); return (void __iomem *)NULL; } return (void __iomem *)(vaddr + offset); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void * addr; struct vm_struct * area; unsigned long offset, last_addr; pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); pr_debug("Get vm_area returns %p addr %p\n",area,area->addr); if (!area) return NULL; area->phys_addr = phys_addr; addr = area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { vunmap(addr); return NULL; } return (void *) (offset + (char *)addr); }
/* * ioremap with access flags * Cache semantics wise it is same as ioremap - "forced" uncached. * However unline vanilla ioremap which bypasses ARC MMU for addresses in * ARC hardware uncached region, this one still goes thru the MMU as caller * might need finer access control (R/W/X) */ void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size, unsigned long flags) { void __iomem *vaddr; struct vm_struct *area; unsigned long off, end; pgprot_t prot = __pgprot(flags); /* Don't allow wraparound, zero size */ end = paddr + size - 1; if ((!size) || (end < paddr)) return NULL; /* An early platform driver might end up here */ if (!slab_is_available()) return NULL; /* force uncached */ prot = pgprot_noncached(prot); /* Mappings have to be page-aligned */ off = paddr & ~PAGE_MASK; paddr &= PAGE_MASK; size = PAGE_ALIGN(end + 1) - paddr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = paddr; vaddr = (void __iomem *)area->addr; if (ioremap_page_range((unsigned long)vaddr, (unsigned long)vaddr + size, paddr, prot)) { vunmap((void __force *)vaddr); return NULL; } return (void __iomem *)(off + (char __iomem *)vaddr); }
/* Map an arbitrary MMIO address, homed according to pgprot, into VA space. */ void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, pgprot_t home) { void *addr; struct vm_struct *area; unsigned long offset, last_addr; pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* Create a read/write, MMIO VA mapping homed at the requested shim. */ pgprot = PAGE_KERNEL; pgprot = hv_pte_set_mode(pgprot, HV_PTE_MODE_MMIO); pgprot = hv_pte_set_lotar(pgprot, hv_pte_get_lotar(home)); /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP /* | other flags? */); if (!area) return NULL; area->phys_addr = phys_addr; addr = area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); return NULL; } return (__force void __iomem *) (offset + (char *)addr); }
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base, void *virt_base, unsigned long flags) { int ret; unsigned int offset = buffer->priv_phys - phys_base; unsigned long start = ((unsigned long)virt_base) + offset; const struct mem_type *type = ION_IS_CACHED(flags) ? get_mem_type(MT_DEVICE_CACHED) : get_mem_type(MT_DEVICE); if (phys_base > buffer->priv_phys) return NULL; ret = ioremap_page_range(start, start + buffer->size, buffer->priv_phys, __pgprot(type->prot_pte)); if (!ret) return (void *)start; else return NULL; }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. */ void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void * addr; struct vm_struct * area; if (phys_addr < virt_to_phys(high_memory)) return phys_to_virt(phys_addr); if (phys_addr & ~PAGE_MASK) return NULL; size = PAGE_ALIGN(size); if (!size || size > phys_addr + size) return NULL; area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; addr = area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, __pgprot(flags))) { vfree(addr); return NULL; } return addr; }
void __iomem * __init_refok __ioremap_caller(phys_addr_t phys_addr, unsigned long size, pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; if (!mem_init_done) return ioremap_fixed(phys_addr, size, pgprot); mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); if (mapped && !IS_ERR(mapped)) return mapped; offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } return (void __iomem *)(offset + (char *)orig_addr); }
int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { return ioremap_page_range(virt, virt + PAGE_SIZE, phys, __pgprot(mtype->prot_pte)); }
void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; unsigned long addr; struct vm_struct * area; #ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; #endif type = get_mem_type(mtype); if (!type) return NULL; /* * Page align the mapping size, taking account of any offset. */ size = PAGE_ALIGN(offset + size); /* * Try to reuse one of the static mapping whenever possible. */ read_lock(&vmlist_lock); for (area = vmlist; area; area = area->next) { if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) break; if (!(area->flags & VM_ARM_STATIC_MAPPING)) continue; if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) continue; if (__phys_to_pfn(area->phys_addr) > pfn || __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) continue; /* we can drop the lock here as we know *area is static */ read_unlock(&vmlist_lock); addr = (unsigned long)area->addr; addr += __pfn_to_phys(pfn) - area->phys_addr; return (void __iomem *) (offset + addr); } read_unlock(&vmlist_lock); #if 0 /* HACK - do allow RAM to be mapped, the problems are a bit overrated */ /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ */ if (WARN_ON(pfn_valid(pfn))) return NULL; #endif area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_supersections(addr, pfn, size, type); } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { area->flags |= VM_ARM_SECTION_MAPPING; err = remap_area_sections(addr, pfn, size, type); } else #endif err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), __pgprot(type->prot_pte)); if (err) { vunmap((void *)addr); return NULL; } flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { struct vm_struct * area; unsigned long offset, last_addr, addr, orig_addr; pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * If we're on an SH7751 or SH7780 PCI controller, PCI memory is * mapped at the end of the address space (typically 0xfd000000) * in a non-translatable area, so mapping through page tables for * this area is not only pointless, but also fundamentally * broken. Just return the physical address instead. * * For boards that map a small PCI memory aperture somewhere in * P1/P2 space, ioremap() will already do the right thing, * and we'll never get this far. */ if (is_pci_memaddr(phys_addr) && is_pci_memaddr(last_addr)) return (void __iomem *)phys_addr; /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) return NULL; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; #ifdef CONFIG_32BIT /* * First try to remap through the PMB once a valid VMA has been * established. Smaller allocations (or the rest of the size * remaining after a PMB mapping due to the size not being * perfectly aligned on a PMB size boundary) are then mapped * through the UTLB using conventional page tables. * * PMB entries are all pre-faulted. */ if (unlikely(size >= 0x1000000)) { unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); if (likely(mapped)) { addr += mapped; phys_addr += mapped; size -= mapped; } } #endif pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } return (void __iomem *)(offset + (char *)orig_addr); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. It transparently creates kernel huge I/O mapping when * the physical address is aligned by a huge page size (1GB or 2MB) and * the requested size is at least the huge page size. * * NOTE: MTRRs can override PAT memory types with a 4KB granularity. * Therefore, the mapping code falls back to use a smaller page toward 4KB * when a mapping range is covered by non-WB type of MTRRs. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(resource_size_t phys_addr, unsigned long size, enum page_cache_mode pcm, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; enum page_cache_mode new_pcm; pgprot_t prot; int retval; void __iomem *ret_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; if (!phys_addr_valid(phys_addr)) { printk(KERN_WARNING "ioremap: invalid physical address %llx\n", (unsigned long long)phys_addr); WARN_ON_ONCE(1); return NULL; } /* * Don't remap the low PCI/ISA area, it's always mapped.. */ if (is_ISA_range(phys_addr, last_addr)) return (__force void __iomem *)phys_to_virt(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, __ioremap_check_ram) == 1) { WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n", phys_addr, last_addr); return NULL; } /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } if (pcm != new_pcm) { if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { printk(KERN_ERR "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", (unsigned long long)phys_addr, (unsigned long long)(phys_addr + size), pcm, new_pcm); goto err_free_memtype; } pcm = new_pcm; } prot = PAGE_KERNEL_IO; switch (pcm) { case _PAGE_CACHE_MODE_UC: default: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC)); break; case _PAGE_CACHE_MODE_UC_MINUS: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); break; case _PAGE_CACHE_MODE_WC: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WC)); break; case _PAGE_CACHE_MODE_WB: break; } /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) goto err_free_memtype; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (kernel_map_sync_memtype(phys_addr, size, pcm)) goto err_free_area; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) goto err_free_area; ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); /* * Check if the request spans more than any BAR in the iomem resource * tree. */ WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); return ret_addr; err_free_area: free_vm_area(area); err_free_memtype: free_memtype(phys_addr, phys_addr + size); return NULL; }
void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) { void __iomem * addr; struct vm_struct * area; unsigned long offset, last_addr; pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * Map objects in the low 512mb of address space using KSEG1, otherwise * map using page tables. */ if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1)) return (void *) KSEG1ADDR(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ if (phys_addr < virt_to_phys(high_memory)) { char *t_addr, *t_end; struct page *page; t_addr = __va(phys_addr); t_end = t_addr + (size - 1); for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) if(!PageReserved(page)) return NULL; } pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | flags); /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr + 1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area(size, VM_IOREMAP); if (!area) return NULL; area->phys_addr = phys_addr; addr = (void __iomem *) area->addr; if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, phys_addr, pgprot)) { vunmap((void __force *) addr); return NULL; } return (void __iomem *) (offset + (char __iomem *)addr); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __init_refok __ioremap_caller(phys_addr_t phys_addr, unsigned long size, pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; int simple = (pgprot_val(pgprot) == pgprot_val(PAGE_KERNEL)) || (pgprot_val(pgprot) == pgprot_val(PAGE_KERNEL_NOCACHE)); int cached = pgprot_val(pgprot) & _PAGE_CACHABLE; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* This code is shared with SH5, hence the ifdef. Respect if you * have an SH5 */ #ifdef P4SEG /* If the address is in P4 and it is uncached we can just * use the address directly */ if ((PXSEG(phys_addr) == P4SEG) && simple && !cached) return (void __iomem *) phys_addr; #endif /* * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) return ioremap_fixed(phys_addr, size, pgprot); /* * First try to remap through the PMB. * PMB entries are all pre-faulted. */ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); if (mapped && !IS_ERR(mapped)) return mapped; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } return (void __iomem *)(offset + (char *)orig_addr); }