void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_WRITE) flags |= _PAGE_DIRTY; /* we don't want to let _PAGE_EXEC leak out */ flags &= ~_PAGE_EXEC; /* * Force kernel mapping. */ #if defined(CONFIG_PPC_BOOK3S_64) flags |= _PAGE_PRIVILEGED; #else flags &= ~_PAGE_USER; #endif #ifdef _PAGE_BAP_SR /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format * which means that we just cleared supervisor access... oops ;-) This * restores it */ flags |= _PAGE_BAP_SR; #endif if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }
void __iomem * ioremap_coherent(phys_addr_t addr, unsigned long size) { pgprot_t prot = pgprot_cached(PAGE_KERNEL); return __ioremap_caller(addr, size, prot, __builtin_return_address(0)); }
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { return __ioremap_caller(phys_addr, size, pgprot2cachemode(__pgprot(prot_val)), __builtin_return_address(0)); }
/** * ioremap_wc - map memory into CPU space write combined * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write combining. * Write combining allows faster writes to some hardware devices. * * Must be freed with iounmap. */ void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { if (pat_enabled) return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC, __builtin_return_address(0)); else return ioremap_nocache(phys_addr, size); }
void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) { /* For normal memory we already have a cacheable mapping. */ if (pfn_valid(__phys_to_pfn(phys_addr))) return (void __iomem *)__phys_to_virt(phys_addr); return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), __builtin_return_address(0)); }
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0))); void *caller = __builtin_return_address(0); if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }
void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_RW) flags |= _PAGE_DIRTY | _PAGE_HWWRITE; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ flags &= ~(_PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC); return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); }
/** * ioremap_nocache - map bus memory into CPU space * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap_nocache performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked uncachable * on the CPU as well as honouring existing caching rules from things like * the PCI bus. Note that there are other caches and buffers on many * busses. In particular driver authors should read up on PCI writes * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) { /* * Ideally, this should be: * pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS; * * Till we fix all X drivers to use ioremap_wc(), we will use * UC MINUS. */ enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS; return __ioremap_caller(phys_addr, size, pcm, __builtin_return_address(0)); }
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { pte_t pte = __pte(flags); /* writeable implies dirty for kernel addresses */ if (pte_write(pte)) pte = pte_mkdirty(pte); /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ pte = pte_exprotect(pte); pte = pte_mkprivileged(pte); return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0)); }
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { /* writeable implies dirty for kernel addresses */ if ((flags & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO) flags |= _PAGE_DIRTY | _PAGE_HWWRITE; /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ flags &= ~(_PAGE_USER | _PAGE_EXEC); #ifdef _PAGE_BAP_SR /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format * which means that we just cleared supervisor access... oops ;-) This * restores it */ flags |= _PAGE_BAP_SR; #endif return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); }
void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) { void *caller = __builtin_return_address(0); /* writeable implies dirty for kernel addresses */ if (flags & _PAGE_WRITE) flags |= _PAGE_DIRTY; /* we don't want to let _PAGE_EXEC leak out */ flags &= ~_PAGE_EXEC; /* * Force kernel mapping. */ flags &= ~_PAGE_USER; flags |= _PAGE_PRIVILEGED; if (ppc_md.ioremap) return ppc_md.ioremap(addr, size, flags, caller); return __ioremap_caller(addr, size, flags, caller); }
void __iomem * __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) { return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); }
void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) { return __ioremap_caller(addr, size, _PAGE_NO_CACHE, __builtin_return_address(0)); }
void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) { return __ioremap_caller(phys_addr, size, prot, __builtin_return_address(0)); }
void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB, __builtin_return_address(0)); }
/* * ioremap - map bus memory into CPU space * @offset: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * Must be freed with iounmap. */ void __iomem *ioremap(phys_addr_t offset, unsigned long size) { return __ioremap_caller(offset, size, PAGE_KERNEL, __builtin_return_address(0)); }