/* mm->page_table_lock is held. mmap_sem is not held */ static inline int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int count, zone_t * classzone) { pgd_t *pgdir; unsigned long end; /* Don't swap out areas which are reserved */ if (vma->vm_flags & VM_RESERVED) return count; pgdir = pgd_offset(mm, address); end = vma->vm_end; if (address >= end) BUG(); do { count = swap_out_pgd(mm, vma, pgdir, address, end, count, classzone); if (!count) break; /* lock depth can be 1 or 2 */ if (conditional_schedule_needed()) return count; address = (address + PGDIR_SIZE) & PGDIR_MASK; pgdir++; } while (address && (address < end)); return count; }
static int swap_out_vma(struct mm_struct * mm, struct vm_area_struct * vma, unsigned long address, int gfp_mask) { pgd_t *pgdir; unsigned long end; /* Don't swap out areas which are locked down */ if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) return 0; pgdir = pgd_offset(mm, address); end = vma->vm_end; if (address >= end) BUG(); do { int result = swap_out_pgd(mm, vma, pgdir, address, end, gfp_mask); if (result) return result; if (!mm->swap_cnt) return 0; address = (address + PGDIR_SIZE) & PGDIR_MASK; pgdir++; } while (address && (address < end)); return 0; }