Пример #1
0
/*
 * Munmap is split into 2 main parts -- this part which finds
 * what needs doing, and the areas themselves, which do the
 * work.  This now handles partial unmappings.
 * Jeremy Fitzhardine <*****@*****.**>
 */
int do_munmap(unsigned long addr, unsigned long len)
{
	struct vm_area_struct *vma, *free, **p;

	if ((addr & ~PAGE_MASK) || addr > KERNEL_BASE || len > KERNEL_BASE-addr)
		return -EINVAL;
	if ((len = PAGE_ALIGN(len)) == 0)
		return 0;
	/*
	 * Check if this memory area is ok - put it on the temporary
	 * list if so..  The checks here are pretty simple --
	 * every area affected in some way (by any overlap) is put
	 * on the list.  If nothing is put on, nothing is affected.
	 */
	free = NULL;
	for (vma = current->mm->mmap, p = &current->mm->mmap; vma; ) {
		if (vma->vm_start >= addr+len)
			break;
		if (vma->vm_end <= addr) {
			vma = vma->vm_next;
			continue;
		}
		*p = vma->vm_next;
		vma->vm_next = free;
		free = vma;
		vma = *p;
	}
	if (!free)
		return 0;
	/*
	 * Ok - we have the memory areas we should free on the 'free' list,
	 * so release them, and unmap the page range..
	 * If the one of the segments is only being partially unmapped,
	 * it will put new vm_area_struct(s) into the address space.
	 */
	while (free) {
		unsigned long st, end;

		vma = free;
		free = free->vm_next;

		remove_shared_vm_struct(vma);

		st = addr < vma->vm_start ? vma->vm_start : addr;
		end = addr+len;
		end = end > vma->vm_end ? vma->vm_end : end;

		if (vma->vm_ops && vma->vm_ops->unmap)
			vma->vm_ops->unmap(vma, st, end-st);

		unmap_fixup(vma, st, end-st);
		kfree(vma);
	}
	unmap_page_range(addr, len);
	return 0;
}
Пример #2
0
unsigned long do_munmap(unsigned long addr, int len)
{
    struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
    if((addr & ~PAGE_MASK) || (addr > PAGE_OFFSET) || (addr + len) > PAGE_OFFSET)
        return -EINVAL;
    if((len = PAGE_ALIGN(len)) == 0)
        return 0;

    mpnt = find_vma(current, addr);
    if(!mpnt)
        return 0;

    avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);

    npp = (prev? &prev->vm_next: &current->mm->mmap);
    free = NULL;

    for(; mpnt && mpnt->vm_start < addr + len; mpnt = *npp)
    {
        *npp = mpnt->vm_next;
        mpnt->vm_next = free;
        free = mpnt;
        avl_remove(mpnt, &current->mm->mmap_avl);
    }
    if(free == NULL)
        return 0;

    while(free)
    {
        unsigned long st, end;

        mpnt = free;
        free = free->vm_next;

        remove_shared_vm_struct(mpnt);

        st = addr < mpnt->vm_start?mpnt->vm_start:addr;
        end = addr + len;
        end = end > mpnt->vm_end? mpnt->vm_end:end;

        if(mpnt->vm_ops && mpnt->vm_ops->unmap)
            mpnt->vm_ops->unmap(mpnt, st, end-st);

        unmap_fixup(mpnt, st, end-st);
        kfree(mpnt);
    }

    unmap_page_range(addr, len);
    return 0;
}