/* * Map 'pfn' using protections 'prot' */ void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS. * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the * MTRR is UC or WC. UC_MINUS gets the real intention, of the * user, which is "WC if the MTRR is WC, UC if you can't do that." */ if (!pat_enabled && pgprot_val(prot) == (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC))) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); }
int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot) { enum page_cache_mode pcm = _PAGE_CACHE_MODE_WC; int ret; if (!is_io_mapping_possible(base, size)) return -EINVAL; ret = io_reserve_memtype(base, base + size, &pcm); if (ret) return ret; *prot = __pgprot(__PAGE_KERNEL | cachemode2protval(pcm)); return 0; }
/* * Map 'pfn' using protections 'prot' */ void __iomem * iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { /* * For non-PAT systems, translate non-WB request to UC- just in * case the caller set the PWT bit to prot directly without using * pgprot_writecombine(). UC- translates to uncached if the MTRR * is UC or WC. UC- gets the real intention, of the user, which is * "WC if the MTRR is WC, UC if you can't do that." */ if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB) prot = __pgprot(__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot); }
/* * Remap an arbitrary physical address space into the kernel virtual * address space. It transparently creates kernel huge I/O mapping when * the physical address is aligned by a huge page size (1GB or 2MB) and * the requested size is at least the huge page size. * * NOTE: MTRRs can override PAT memory types with a 4KB granularity. * Therefore, the mapping code falls back to use a smaller page toward 4KB * when a mapping range is covered by non-WB type of MTRRs. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ static void __iomem *__ioremap_caller(resource_size_t phys_addr, unsigned long size, enum page_cache_mode pcm, void *caller) { unsigned long offset, vaddr; resource_size_t pfn, last_pfn, last_addr; const resource_size_t unaligned_phys_addr = phys_addr; const unsigned long unaligned_size = size; struct vm_struct *area; enum page_cache_mode new_pcm; pgprot_t prot; int retval; void __iomem *ret_addr; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; if (!phys_addr_valid(phys_addr)) { printk(KERN_WARNING "ioremap: invalid physical address %llx\n", (unsigned long long)phys_addr); WARN_ON_ONCE(1); return NULL; } /* * Don't remap the low PCI/ISA area, it's always mapped.. */ if (is_ISA_range(phys_addr, last_addr)) return (__force void __iomem *)phys_to_virt(phys_addr); /* * Don't allow anybody to remap normal RAM that we're using.. */ pfn = phys_addr >> PAGE_SHIFT; last_pfn = last_addr >> PAGE_SHIFT; if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, __ioremap_check_ram) == 1) { WARN_ONCE(1, "ioremap on RAM at 0x%llx - 0x%llx\n", phys_addr, last_addr); return NULL; } /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PHYSICAL_PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; retval = reserve_memtype(phys_addr, (u64)phys_addr + size, pcm, &new_pcm); if (retval) { printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); return NULL; } if (pcm != new_pcm) { if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) { printk(KERN_ERR "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n", (unsigned long long)phys_addr, (unsigned long long)(phys_addr + size), pcm, new_pcm); goto err_free_memtype; } pcm = new_pcm; } prot = PAGE_KERNEL_IO; switch (pcm) { case _PAGE_CACHE_MODE_UC: default: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC)); break; case _PAGE_CACHE_MODE_UC_MINUS: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)); break; case _PAGE_CACHE_MODE_WC: prot = __pgprot(pgprot_val(prot) | cachemode2protval(_PAGE_CACHE_MODE_WC)); break; case _PAGE_CACHE_MODE_WB: break; } /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) goto err_free_memtype; area->phys_addr = phys_addr; vaddr = (unsigned long) area->addr; if (kernel_map_sync_memtype(phys_addr, size, pcm)) goto err_free_area; if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) goto err_free_area; ret_addr = (void __iomem *) (vaddr + offset); mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); /* * Check if the request spans more than any BAR in the iomem resource * tree. */ WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size), KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); return ret_addr; err_free_area: free_vm_area(area); err_free_memtype: free_memtype(phys_addr, phys_addr + size); return NULL; }