static int remap_area_sections(unsigned long virt, unsigned long pfn, size_t size, const struct mem_type *type) { unsigned long addr = virt, end = virt + size; pgd_t *pgd; pud_t *pud; pmd_t *pmd; /* * Remove and free any PTE-based mapping, and * sync the current kernel mapping. */ unmap_area_sections(virt, size); pgd = pgd_offset_k(addr); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); do { pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pfn += SZ_1M >> PAGE_SHIFT; pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pfn += SZ_1M >> PAGE_SHIFT; flush_pmd_entry(pmd); addr += PMD_SIZE; pmd += 2; } while (addr < end); return 0; }
void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); struct vm_struct *vm; read_lock(&vmlist_lock); for (vm = vmlist; vm; vm = vm->next) { if (vm->addr > addr) break; if (!(vm->flags & VM_IOREMAP)) continue; /* If this is a static mapping we must leave it alone */ if ((vm->flags & VM_ARM_STATIC_MAPPING) && (vm->addr <= addr) && (vm->addr + vm->size > addr)) { read_unlock(&vmlist_lock); return; } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ if ((vm->addr == addr) && (vm->flags & VM_ARM_SECTION_MAPPING)) { unmap_area_sections((unsigned long)vm->addr, vm->size); break; } #endif } read_unlock(&vmlist_lock); vunmap(addr); }
void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP struct vm_struct **p, *tmp; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. We need the lock here b/c we need to clear * all the mappings before the area can be reclaimed * by someone else. */ write_lock(&vmlist_lock); for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { if (tmp->flags & VM_ARM_SECTION_MAPPING) { unmap_area_sections((unsigned long)tmp->addr, tmp->size); } break; } } write_unlock(&vmlist_lock); #endif vunmap(addr); }
void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP struct vm_struct *vm; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. */ read_lock(&vmlist_lock); for (vm = vmlist; vm; vm = vm->next) { if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { if (vm->flags & VM_ARM_SECTION_MAPPING) { unmap_area_sections((unsigned long)vm->addr, vm->size); } break; } } read_unlock(&vmlist_lock); #endif vunmap(addr); }
void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); struct vm_struct *vm; read_lock(&vmlist_lock); for (vm = vmlist; vm; vm = vm->next) { if (vm->addr > addr) break; if (!(vm->flags & VM_IOREMAP)) continue; if ((vm->flags & VM_ARM_STATIC_MAPPING) && (vm->addr <= addr) && (vm->addr + vm->size > addr)) { read_unlock(&vmlist_lock); return; } #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if ((vm->addr == addr) && (vm->flags & VM_ARM_SECTION_MAPPING)) { unmap_area_sections((unsigned long)vm->addr, vm->size); break; } #endif } read_unlock(&vmlist_lock); vunmap(addr); }
static int remap_area_supersections(unsigned long virt, unsigned long pfn, size_t size, const struct mem_type *type) { unsigned long addr = virt, end = virt + size; pgd_t *pgd; pud_t *pud; pmd_t *pmd; unmap_area_sections(virt, size); pgd = pgd_offset_k(virt); pud = pud_offset(pgd, addr); pmd = pmd_offset(pud, addr); do { unsigned long super_pmd_val, i; super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | PMD_SECT_SUPER; super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; for (i = 0; i < 8; i++) { pmd[0] = __pmd(super_pmd_val); pmd[1] = __pmd(super_pmd_val); flush_pmd_entry(pmd); addr += PMD_SIZE; pmd += 2; } pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; } while (addr < end); return 0; }
static int remap_area_supersections(unsigned long virt, unsigned long pfn, unsigned long size, unsigned long flags) { unsigned long prot, addr = virt, end = virt + size; pgd_t *pgd; /* * Remove and free any PTE-based mapping, and * sync the current kernel mapping. */ unmap_area_sections(virt, size); prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); /* * ARMv6 and above need XN set to prevent speculative prefetches * hitting IO. */ if (cpu_architecture() >= CPU_ARCH_ARMv6) prot |= PMD_SECT_XN; pgd = pgd_offset_k(virt); do { unsigned long super_pmd_val, i; super_pmd_val = __pfn_to_phys(pfn) | prot; super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; for (i = 0; i < 8; i++) { pmd_t *pmd = pmd_offset(pgd, addr); pmd[0] = __pmd(super_pmd_val); pmd[1] = __pmd(super_pmd_val); flush_pmd_entry(pmd); addr += PGDIR_SIZE; pgd++; } pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; } while (addr < end); return 0; }
void __uc32_iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); struct vm_struct **p, *tmp; write_lock(&vmlist_lock); for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { unmap_area_sections((unsigned long)tmp->addr, tmp->size); } break; } } write_unlock(&vmlist_lock); vunmap(addr); }
void __uc32_iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); struct vm_struct *vm; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle * such a beast. We need the lock here b/c we need to clear * all the mappings before the area can be reclaimed * by someone else. */ vm = find_vm_area(addr); if (vm && (vm->flags & VM_IOREMAP) && (vm->flags & VM_UNICORE_SECTION_MAPPING)) unmap_area_sections((unsigned long)vm->addr, vm->size); vunmap(addr); }
static int remap_area_sections(unsigned long virt, unsigned long pfn, size_t size, const struct mem_type *type) { unsigned long addr = virt, end = virt + size; pgd_t *pgd; unmap_area_sections(virt, size); pgd = pgd_offset_k(addr); do { pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); set_pmd(pmd, __pmd(__pfn_to_phys(pfn) | type->prot_sect)); pfn += SZ_4M >> PAGE_SHIFT; flush_pmd_entry(pmd); addr += PGDIR_SIZE; pgd++; } while (addr < end); return 0; }
static int remap_area_supersections(unsigned long virt, unsigned long pfn, size_t size, const struct mem_type *type) { unsigned long addr = virt, end = virt + size; pgd_t *pgd; /* * Remove and free any PTE-based mapping, and * sync the current kernel mapping. */ unmap_area_sections(virt, size); pgd = pgd_offset_k(virt); do { unsigned long super_pmd_val, i; super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | PMD_SECT_SUPER; super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; for (i = 0; i < 8; i++) { pmd_t *pmd = pmd_offset(pgd, addr); pmd[0] = __pmd(super_pmd_val); pmd[1] = __pmd(super_pmd_val); flush_pmd_entry(pmd); addr += PGDIR_SIZE; pgd++; } pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; } while (addr < end); return 0; }