/** * __iounmap_from - Low level function to tear down the page tables * for an IO mapping. This is used for mappings that * are manipulated manually, like partial unmapping of * PCI IOs or ISA space. */ void __iounmap_at(void *ea, unsigned long size) { WARN_ON(((unsigned long)ea) & ~PAGE_MASK); WARN_ON(size & ~PAGE_MASK); unmap_kernel_range((unsigned long)ea, size); }
static void __dma_free_remap(void *cpu_addr, size_t size) { unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; struct vm_struct *area = find_vm_area(cpu_addr); if (!area || (area->flags & flags) != flags) { WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); }
static void __dma_free_remap(void *cpu_addr, size_t size) { struct vm_struct *area = find_vm_area(cpu_addr); if (!area || !(area->flags & VM_DMA)) { pr_err("%s: trying to free invalid coherent area: %p\n", __func__, cpu_addr); dump_stack(); return; } unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); }
void ion_cp_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer) { struct ion_cp_heap *cp_heap = container_of(heap, struct ion_cp_heap, heap); if (cp_heap->reusable) unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size); else __arm_iounmap(buffer->vaddr); buffer->vaddr = NULL; mutex_lock(&cp_heap->lock); if (ION_IS_CACHED(buffer->flags)) --cp_heap->kmap_cached_count; else --cp_heap->kmap_uncached_count; ion_cp_release_region(cp_heap); mutex_unlock(&cp_heap->lock); return; }
void fmem_unmap_virtual_area(void) { unmap_kernel_range((unsigned long)fmem_data.virt, fmem_data.size); fmem_data.virt = NULL; }
static int binder_update_page_range(struct binder_alloc *alloc, int allocate, void *start, void *end) { void *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: %s pages %pK-%pK\n", alloc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; trace_binder_update_page_range(alloc, allocate, start, end); if (allocate == 0) goto free_range; for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; if (!page->page_ptr) { need_mm = true; break; } } if (need_mm && mmget_not_zero(alloc->vma_vm_mm)) mm = alloc->vma_vm_mm; if (mm) { down_read(&mm->mmap_sem); vma = alloc->vma; } if (!vma && need_mm) { pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n", alloc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; bool on_lru; size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; if (page->page_ptr) { trace_binder_alloc_lru_start(alloc, index); on_lru = list_lru_del(&binder_alloc_lru, &page->lru); WARN_ON(!on_lru); trace_binder_alloc_lru_end(alloc, index); continue; } if (WARN_ON(!vma)) goto err_page_ptr_cleared; trace_binder_alloc_page_start(alloc, index); page->page_ptr = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (!page->page_ptr) { pr_err("%d: binder_alloc_buf failed for page at %pK\n", alloc->pid, page_addr); goto err_alloc_page_failed; } page->alloc = alloc; INIT_LIST_HEAD(&page->lru); ret = map_kernel_range_noflush((unsigned long)page_addr, PAGE_SIZE, PAGE_KERNEL, &page->page_ptr); flush_cache_vmap((unsigned long)page_addr, (unsigned long)page_addr + PAGE_SIZE); if (ret != 1) { pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", alloc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = (uintptr_t)page_addr + alloc->user_buffer_offset; ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", alloc->pid, user_page_addr); goto err_vm_insert_page_failed; } if (index + 1 > alloc->pages_high) alloc->pages_high = index + 1; trace_binder_alloc_page_end(alloc, index); /* vm_insert_page does not seem to increment the refcount */ } if (mm) { up_read(&mm->mmap_sem); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; page_addr >= start; page_addr -= PAGE_SIZE) { bool ret; size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; trace_binder_free_lru_start(alloc, index); ret = list_lru_add(&binder_alloc_lru, &page->lru); WARN_ON(!ret); trace_binder_free_lru_end(alloc, index); continue; err_vm_insert_page_failed: unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); err_map_kernel_failed: __free_page(page->page_ptr); page->page_ptr = NULL; err_alloc_page_failed: err_page_ptr_cleared: ; } err_no_vma: if (mm) { up_read(&mm->mmap_sem); mmput(mm); } return vma ? -ENOMEM : -ESRCH; }