void __iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; unsigned long seg = PXSEG(vaddr); struct vm_struct *p; if (seg < P3SEG || seg >= P3_ADDR_MAX || is_pci_memaddr(vaddr)) return; #ifdef CONFIG_32BIT /* * Purge any PMB entries that may have been established for this * mapping, then proceed with conventional VMA teardown. * * XXX: Note that due to the way that remove_vm_area() does * matching of the resultant VMA, we aren't able to fast-forward * the address past the PMB space until the end of the VMA where * the page tables reside. As such, unmap_vm_area() will be * forced to linearly scan over the area until it finds the page * tables where PTEs that need to be unmapped actually reside, * which is far from optimal. Perhaps we need to use a separate * VMA for the PMB mappings? * -- PFM. */ pmb_unmap(vaddr); #endif p = remove_vm_area((void *)(vaddr & PAGE_MASK)); if (!p) { printk(KERN_ERR "%s: bad address %p\n", __func__, addr); return; } kfree(p); }
void __iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; struct vm_struct *p; /* * Nothing to do if there is no translatable mapping. */ if (iomapping_nontranslatable(vaddr)) return; /* * There's no VMA if it's from an early fixed mapping. */ if (iounmap_fixed(addr) == 0) return; /* * If the PMB handled it, there's nothing else to do. */ if (pmb_unmap(addr) == 0) return; p = remove_vm_area((void *)(vaddr & PAGE_MASK)); if (!p) { printk(KERN_ERR "%s: bad address %p\n", __func__, addr); return; } kfree(p); }
void __iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; struct vm_struct *p; if (iomapping_nontranslatable(vaddr)) return; if (iounmap_fixed(addr) == 0) return; if (pmb_unmap(addr) == 0) return; p = remove_vm_area((void *)(vaddr & PAGE_MASK)); if (!p) { printk(KERN_ERR "%s: bad address %p\n", __func__, addr); return; } kfree(p); }