Exemple #1
0
static inline unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len)
{
	struct vm_area_struct * new_vma;

	new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (new_vma) {
		unsigned long new_addr = get_unmapped_area(addr, new_len);

		if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
			if (new_vma->vm_file)
				new_vma->vm_file->f_count++;
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
			merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
			do_munmap(addr, old_len);
			current->mm->total_vm += new_len >> PAGE_SHIFT;
			if (new_vma->vm_flags & VM_LOCKED) {
				current->mm->locked_vm += new_len >> PAGE_SHIFT;
				make_pages_present(new_vma->vm_start,
						   new_vma->vm_end);
			}
			return new_addr;
		}
Exemple #2
0
static int mlock_fixup(struct vm_area_struct * vma, 
	unsigned long start, unsigned long end, unsigned int newflags)
{
	int pages, retval;

	if (newflags == vma->vm_flags)
		return 0;

	if (start == vma->vm_start) {
		if (end == vma->vm_end)
			retval = mlock_fixup_all(vma, newflags);
		else
			retval = mlock_fixup_start(vma, end, newflags);
	} else {
		if (end == vma->vm_end)
			retval = mlock_fixup_end(vma, start, newflags);
		else
			retval = mlock_fixup_middle(vma, start, end, newflags);
	}
	if (!retval) {
		/* keep track of amount of locked VM */
		pages = (end - start) >> PAGE_SHIFT;
		if (newflags & VM_LOCKED) {
			pages = -pages;
			make_pages_present(start, end);
		}
		vma->vm_mm->locked_vm -= pages;
	}
Exemple #3
0
static int mlock_fixup(struct vm_area_struct * vma, 
	unsigned long start, unsigned long end, unsigned int newflags)
{
	int pages, retval;

	if (newflags == vma->vm_flags)
		return 0;

	if (start == vma->vm_start) {
		if (end == vma->vm_end)
			retval = mlock_fixup_all(vma, newflags);
		else
			retval = mlock_fixup_start(vma, end, newflags);
	} else {
		if (end == vma->vm_end)
			retval = mlock_fixup_end(vma, start, newflags);
		else
			retval = mlock_fixup_middle(vma, start, end, newflags);
	}
	if (!retval) {
		/* keep track of amount of locked VM */
		pages = (end - start) >> PAGE_SHIFT;
		if (!(newflags & VM_LOCKED))
			pages = -pages;
		vma->vm_mm->locked_vm += pages;
#ifdef CONFIG_SA1100_EMPEG
		if(newflags & (VM_READ | VM_WRITE | VM_EXEC))
#endif
		make_pages_present(start, end);
		//		printk("mlock_fixup: locked_vm = %d\n", vma->vm_mm->locked_vm);
	}
Exemple #4
0
static int mlock_fixup(struct vm_area_struct * vma, 
	unsigned long start, unsigned long end, unsigned int newflags)
{
	struct mm_struct * mm = vma->vm_mm;
	int pages;
	int ret = 0;

	if (newflags == vma->vm_flags)
		goto out;

	if (start != vma->vm_start) {
		if (split_vma(mm, vma, start, 1)) {
			ret = -EAGAIN;
			goto out;
		}
	}

	if (end != vma->vm_end) {
		if (split_vma(mm, vma, end, 0)) {
			ret = -EAGAIN;
			goto out;
		}
	}

	/*
	 * vm_flags is protected by the mmap_sem held in write mode.
	 * It's okay if try_to_unmap_one unmaps a page just after we
	 * set VM_LOCKED, make_pages_present below will bring it back.
	 */
	vma->vm_flags = newflags;

	/*
	 * Keep track of amount of locked VM.
	 */
	pages = (end - start) >> PAGE_SHIFT;
	if (newflags & VM_LOCKED) {
		pages = -pages;
		ret = make_pages_present(start, end);
	}

	vma->vm_mm->locked_vm -= pages;
out:
	return ret;
}
Exemple #5
0
static int mlock_fixup(struct vm_area_struct * vma, 
	unsigned long start, unsigned long end, unsigned int newflags)
{
	struct mm_struct * mm = vma->vm_mm;
	int pages;
	int ret = 0;

	if (newflags == vma->vm_flags)
		goto out;

	if (start != vma->vm_start) {
		if (split_vma(mm, vma, start, 1)) {
			ret = -EAGAIN;
			goto out;
		}
	}

	if (end != vma->vm_end) {
		if (split_vma(mm, vma, end, 0)) {
			ret = -EAGAIN;
			goto out;
		}
	}
	
	spin_lock(&mm->page_table_lock);
	vma->vm_flags = newflags;
	spin_unlock(&mm->page_table_lock);

	/*
	 * Keep track of amount of locked VM.
	 */
	pages = (end - start) >> PAGE_SHIFT;
	if (newflags & VM_LOCKED) {
		pages = -pages;
		ret = make_pages_present(start, end);
	}

	vma->vm_mm->locked_vm -= pages;
out:
	return ret;
}
Exemple #6
0
static inline unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len,
	unsigned long new_addr)
{
	struct mm_struct * mm = vma->vm_mm;
	struct vm_area_struct * new_vma, * next, * prev;
	int allocated_vma;

	new_vma = NULL;
	next = find_vma_prev(mm, new_addr, &prev);
	if (next) {
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
			if (next != prev->vm_next)
				BUG();
			if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
				spin_lock(&mm->page_table_lock);
				prev->vm_end = next->vm_end;
				__vma_unlink(mm, next, prev);
				spin_unlock(&mm->page_table_lock);

				mm->map_count--;
				kmem_cache_free(vm_area_cachep, next);
			}
		} else if (next->vm_start == new_addr + new_len &&
			   can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			next->vm_start = new_addr;
			spin_unlock(&mm->page_table_lock);
			new_vma = next;
		}
	} else {
		prev = find_vma(mm, new_addr-1);
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
		}
	}

	allocated_vma = 0;
	if (!new_vma) {
		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!new_vma)
			goto out;
		allocated_vma = 1;
	}

	if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
		unsigned long vm_locked = vma->vm_flags & VM_LOCKED;

		if (allocated_vma) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
			new_vma->vm_raend = 0;
			if (new_vma->vm_file)
				get_file(new_vma->vm_file);
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
		}

		/* XXX: possible errors masked, mapping might remain */
		do_munmap(current->mm, addr, old_len);

		current->mm->total_vm += new_len >> PAGE_SHIFT;
		if (vm_locked) {
			current->mm->locked_vm += new_len >> PAGE_SHIFT;
			if (new_len > old_len)
				make_pages_present(new_addr + old_len,
						   new_addr + new_len);
		}
		return new_addr;
	}