static int filemap_sync(struct vm_area_struct * vma, unsigned long address, size_t size, unsigned int flags) { pgd_t * dir; unsigned long end = address + size; int error = 0; /* Aquire the lock early; it may be possible to avoid dropping * and reaquiring it repeatedly. */ spin_lock(&vma->vm_mm->page_table_lock); dir = pgd_offset(vma->vm_mm, address); flush_cache_range(vma, address, end); if (address >= end) BUG(); do { error |= filemap_sync_pmd_range(dir, address, end, vma, flags); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); flush_tlb_range(vma, end - size, end); spin_unlock(&vma->vm_mm->page_table_lock); return error; }
static inline int filemap_sync_pud_range(pgd_t *pgd, unsigned long address, unsigned long end, struct vm_area_struct *vma, unsigned int flags) { pud_t *pud; int error; if (pgd_none(*pgd)) return 0; if (pgd_bad(*pgd)) { pgd_ERROR(*pgd); pgd_clear(pgd); return 0; } pud = pud_offset(pgd, address); if ((address & PGDIR_MASK) != (end & PGDIR_MASK)) end = (address & PGDIR_MASK) + PGDIR_SIZE; error = 0; do { error |= filemap_sync_pmd_range(pud, address, end, vma, flags); address = (address + PUD_SIZE) & PUD_MASK; pud++; } while (address && (address < end)); return error; }
static int filemap_sync(struct vm_area_struct * vma, unsigned long address, size_t size, unsigned int flags) { pgd_t * dir; unsigned long end = address + size; int error = 0; /* Aquire the lock early; it may be possible to avoid dropping * and reaquiring it repeatedly. */ spin_lock(&vma->vm_mm->page_table_lock); dir = pgd_offset(vma->vm_mm, address); flush_cache_range(vma, address, end); /* For hugepages we can't go walking the page table normally, * but that's ok, hugetlbfs is memory based, so we don't need * to do anything more on an msync() */ if (is_vm_hugetlb_page(vma)) goto out; if (address >= end) BUG(); do { error |= filemap_sync_pmd_range(dir, address, end, vma, flags); address = (address + PGDIR_SIZE) & PGDIR_MASK; dir++; } while (address && (address < end)); /* * Why flush ? filemap_sync_pte already flushed the tlbs with the * dirty bits. */ flush_tlb_range(vma, end - size, end); out: spin_unlock(&vma->vm_mm->page_table_lock); return error; }