Beispiel #1
0
/*
 * Munmap is split into 2 main parts -- this part which finds
 * what needs doing, and the areas themselves, which do the
 * work.  This now handles partial unmappings.
 * Jeremy Fitzhardine <*****@*****.**>
 */
int do_munmap(unsigned long addr, unsigned long len)
{
	struct vm_area_struct *vma, *free, **p;

	if ((addr & ~PAGE_MASK) || addr > KERNEL_BASE || len > KERNEL_BASE-addr)
		return -EINVAL;
	if ((len = PAGE_ALIGN(len)) == 0)
		return 0;
	/*
	 * Check if this memory area is ok - put it on the temporary
	 * list if so..  The checks here are pretty simple --
	 * every area affected in some way (by any overlap) is put
	 * on the list.  If nothing is put on, nothing is affected.
	 */
	free = NULL;
	for (vma = current->mm->mmap, p = &current->mm->mmap; vma; ) {
		if (vma->vm_start >= addr+len)
			break;
		if (vma->vm_end <= addr) {
			vma = vma->vm_next;
			continue;
		}
		*p = vma->vm_next;
		vma->vm_next = free;
		free = vma;
		vma = *p;
	}
	if (!free)
		return 0;
	/*
	 * Ok - we have the memory areas we should free on the 'free' list,
	 * so release them, and unmap the page range..
	 * If the one of the segments is only being partially unmapped,
	 * it will put new vm_area_struct(s) into the address space.
	 */
	while (free) {
		unsigned long st, end;

		vma = free;
		free = free->vm_next;

		remove_shared_vm_struct(vma);

		st = addr < vma->vm_start ? vma->vm_start : addr;
		end = addr+len;
		end = end > vma->vm_end ? vma->vm_end : end;

		if (vma->vm_ops && vma->vm_ops->unmap)
			vma->vm_ops->unmap(vma, st, end-st);

		unmap_fixup(vma, st, end-st);
		kfree(vma);
	}
	unmap_page_range(addr, len);
	return 0;
}
Beispiel #2
0
/*
 * merge the list of memory segments if possible. Redundant vm_area_structs are freed. This assumes that the list is ordered by address.
 * we don't need to traverse the entire list, only those segments which intersect or are adjacent to a given interval.
 */
void merge_segments (struct task_struct * task, unsigned long start_addr, unsigned long end_addr)
{
    struct vm_area_struct *prev, *mpnt, *next;
    mpnt = find_vma(task, start_addr);
    if(!mpnt)
        return;
    avl_neighbours(mpnt, task->mm->mmap_avl, &prev, &next);

    if(!prev)
    {
        prev = mpnt;
        mpnt = next;
    }
    for(; mpnt && prev->vm_start < end_addr; prev = mpnt, mpnt = next)
    {
        next = mpnt->vm_next;
        if(mpnt->vm_inode != prev->vm_inode)
            continue;
        if(mpnt->vm_pte != prev->vm_pte)
            continue;
        if(mpnt->vm_ops != prev->vm_ops)
            continue;
        if(mpnt->vm_flags != prev->vm_flags)
            continue;
        if(mpnt->vm_start != prev->vm_end)
            continue;

        if((mpnt->vm_inode != NULL) || (mpnt->vm_flags & VM_SHM))
        {
            if(prev->vm_offset + prev->vm_end - prev->vm_start != mpnt->vm_offset)
                continue;
        }

        avl_remove(mpnt, &task->mm->mmap_avl);
        prev->vm_end = mpnt->vm_end;
        prev->vm_next = mpnt->vm_next;
        if(mpnt->vm_ops && mpnt->vm_ops->close)
        {
            mpnt->vm_offset += mpnt->vm_end - mpnt->vm_start;
            mpnt->vm_start = mpnt->vm_end;
            mpnt->vm_ops->close(mpnt);
        }

        remove_shared_vm_struct(mpnt);
        if(mpnt->vm_inode)
            mpnt->vm_inode->i_count++;

        kfree_s(mpnt,sizeof(*mpnt));
        mpnt = prev;

    }
    return;
}
Beispiel #3
0
unsigned long do_munmap(unsigned long addr, int len)
{
    struct vm_area_struct *mpnt, *prev, *next, **npp, *free;
    if((addr & ~PAGE_MASK) || (addr > PAGE_OFFSET) || (addr + len) > PAGE_OFFSET)
        return -EINVAL;
    if((len = PAGE_ALIGN(len)) == 0)
        return 0;

    mpnt = find_vma(current, addr);
    if(!mpnt)
        return 0;

    avl_neighbours(mpnt, current->mm->mmap_avl, &prev, &next);

    npp = (prev? &prev->vm_next: &current->mm->mmap);
    free = NULL;

    for(; mpnt && mpnt->vm_start < addr + len; mpnt = *npp)
    {
        *npp = mpnt->vm_next;
        mpnt->vm_next = free;
        free = mpnt;
        avl_remove(mpnt, &current->mm->mmap_avl);
    }
    if(free == NULL)
        return 0;

    while(free)
    {
        unsigned long st, end;

        mpnt = free;
        free = free->vm_next;

        remove_shared_vm_struct(mpnt);

        st = addr < mpnt->vm_start?mpnt->vm_start:addr;
        end = addr + len;
        end = end > mpnt->vm_end? mpnt->vm_end:end;

        if(mpnt->vm_ops && mpnt->vm_ops->unmap)
            mpnt->vm_ops->unmap(mpnt, st, end-st);

        unmap_fixup(mpnt, st, end-st);
        kfree(mpnt);
    }

    unmap_page_range(addr, len);
    return 0;
}
Beispiel #4
0
void exit_mmap(struct task_struct *t)
{
	struct vm_area_struct *vma, *next;

	vma = t->mm->mmap;
	t->mm->mmap = NULL;
	while (vma) {
		next = vma->vm_next;
		if (vma->vm_ops && vma->vm_ops->close)
			vma->vm_ops->close(vma);
		remove_shared_vm_struct(vma);
		if (vma->vm_inode)
			iput(vma->vm_inode);
		kfree(vma);
		vma = next;
	}
}