示例#1
0
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int newflags)
{
	struct vm_area_struct * left, * right;

	left = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (!left)
		return -EAGAIN;
	right = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (!right) {
		kmem_cache_free(vm_area_cachep, left);
		return -EAGAIN;
	}
	*left = *vma;
	*right = *vma;
	left->vm_end = start;
	vma->vm_start = start;
	vma->vm_end = end;
	right->vm_start = end;
	vma->vm_offset += vma->vm_start - left->vm_start;
	right->vm_offset += right->vm_start - left->vm_start;
	vma->vm_flags = newflags;
	if (vma->vm_file)
		vma->vm_file->f_count += 2;

	if (vma->vm_ops && vma->vm_ops->open) {
		vma->vm_ops->open(left);
		vma->vm_ops->open(right);
	}
	insert_vm_struct(current->mm, left);
	insert_vm_struct(current->mm, right);
	return 0;
}
示例#2
0
void unmap_fixup(struct vm_area_struct *area, unsigned long addr, size_t len)
{
    struct vm_area_struct *mpnt;
    unsigned long end = addr + len;

    if(addr < area->vm_start || addr >= area->vm_end || end <= area->vm_start || end > area->vm_end || end < addr)
    {
        printk("unmap_fixup: area = %lx-%lx, unmap %lx-%lx!!\n",
                area->vm_start, area->vm_end, addr, end);
        return;
    }

    if(addr == area->vm_start && end == area->vm_end)
    {
        if(area->vm_ops && area->vm_ops->close)
            area->vm_ops->close(area);
        if(area->vm_inode)
            iput(area->vm_inode);
        return;
    }

    if(end == area->vm_end)
        area->vm_end = addr;
    else if(addr == area->vm_start)
    {
        area->vm_offset += (end - area->vm_start);
        area->vm_start = end;
    }
    else
    {
        mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
        if(!mpnt)
            return;
        *mpnt = *area;
        mpnt->vm_offset += (end - area->vm_start);
        mpnt->vm_start = end;
        if(mpnt->vm_inode)
            mpnt->vm_inode->i_count++;
        if(mpnt->vm_ops && mpnt->vm_ops->open)
            mpnt->vm_ops->open(mpnt);
        //				area->vm_end = addr;
        insert_vm_struct(current,mpnt);
    }
    mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
    if(!mpnt)
        return;
    *mpnt = *area;
    mpnt->vm_end = addr;

    if(mpnt->vm_ops && mpnt->vm_ops->open)
        mpnt->vm_ops->open(mpnt);
    if(area->vm_ops && area->vm_ops->close)
    {
        area->vm_end = area->vm_start;
        area->vm_ops->close(area);
    }
    insert_vm_struct(current,mpnt);
}
示例#3
0
文件: mem.c 项目: wanggx/Linux1.0
static int mmap_mem(struct inode * inode, struct file * file,
	unsigned long addr, size_t len, int prot, unsigned long off)
{
	struct vm_area_struct * mpnt;

	if (off & 0xfff || off + len < off)
		return -ENXIO;
	if (x86 > 3 && off >= high_memory)
		prot |= PAGE_PCD;
	if (remap_page_range(addr, off, len, prot))
		return -EAGAIN;
/* try to create a dummy vmm-structure so that the rest of the kernel knows we are here */
	mpnt = (struct vm_area_struct * ) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!mpnt)
		return 0;

	mpnt->vm_task = current;
	mpnt->vm_start = addr;
	mpnt->vm_end = addr + len;
	mpnt->vm_page_prot = prot;
	mpnt->vm_share = NULL;
	mpnt->vm_inode = inode;
	inode->i_count++;
	mpnt->vm_offset = off;
	mpnt->vm_ops = NULL;
	insert_vm_struct(current, mpnt);
	merge_segments(current->mmap, NULL, NULL);
	return 0;
}
示例#4
0
文件: mem.c 项目: wanggx/Linux1.0
static int mmap_zero(struct inode * inode, struct file * file,
	unsigned long addr, size_t len, int prot, unsigned long off)
{
	struct vm_area_struct *mpnt;

	if (prot & PAGE_RW)
		return -EINVAL;
	if (zeromap_page_range(addr, len, prot))
		return -EAGAIN;
	/*
	 * try to create a dummy vmm-structure so that the
	 * rest of the kernel knows we are here
	 */
	mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
	if (!mpnt)
		return 0;

	mpnt->vm_task = current;
	mpnt->vm_start = addr;
	mpnt->vm_end = addr + len;
	mpnt->vm_page_prot = prot;
	mpnt->vm_share = NULL;
	mpnt->vm_inode = NULL;
	mpnt->vm_offset = off;
	mpnt->vm_ops = NULL;
	insert_vm_struct(current, mpnt);
	merge_segments(current->mmap, ignoff_mergep, inode);
	return 0;
}
示例#5
0
static inline unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len)
{
	struct vm_area_struct * new_vma;

	new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (new_vma) {
		unsigned long new_addr = get_unmapped_area(addr, new_len);

		if (new_addr && !move_page_tables(current->mm, new_addr, addr, old_len)) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_offset = vma->vm_offset + (addr - vma->vm_start);
			if (new_vma->vm_file)
				new_vma->vm_file->f_count++;
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
			merge_segments(current->mm, new_vma->vm_start, new_vma->vm_end);
			do_munmap(addr, old_len);
			current->mm->total_vm += new_len >> PAGE_SHIFT;
			if (new_vma->vm_flags & VM_LOCKED) {
				current->mm->locked_vm += new_len >> PAGE_SHIFT;
				make_pages_present(new_vma->vm_start,
						   new_vma->vm_end);
			}
			return new_addr;
		}
示例#6
0
文件: mlock.c 项目: rohsaini/mkunity
static inline int mlock_fixup_end(struct vm_area_struct * vma,
	unsigned long start, int newflags)
{
	struct vm_area_struct * n;

	n = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!n)
		return -EAGAIN;
	*n = *vma;
	vma->vm_end = start;
	n->vm_start = start;
	n->vm_offset += n->vm_start - vma->vm_start;
#ifdef	CONFIG_OSFMACH3
	osfmach3_mlock_fixup(n, newflags);
#endif	/* CONFIG_OSFMACH3 */
	n->vm_flags = newflags;
	if (n->vm_inode)
		n->vm_inode->i_count++;
	if (n->vm_ops && n->vm_ops->open)
		n->vm_ops->open(n);
#ifdef	CONFIG_OSFMACH3
	n->vm_flags |= VM_REMAPPING;
#endif	/* CONFIG_OSFMACH3 */
	insert_vm_struct(current->mm, n);
#ifdef	CONFIG_OSFMACH3
	n->vm_flags &= ~VM_REMAPPING;
#endif	/* CONFIG_OSFMACH3 */
	return 0;
}
示例#7
0
/*  We don`t use ordinary `setup_arg_pages()' because
   svr3 has another stack start address.
*/
static unsigned long svr3_setup_arg_pages (unsigned long p,
						struct linux_binprm * bprm) {
	unsigned long stack_base;
	struct vm_area_struct *mpnt;
	int i;

	stack_base = SVR3_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;

	p += stack_base;

	mpnt = (struct vm_area_struct *) kmalloc (sizeof (*mpnt), GFP_KERNEL);
	if (mpnt) {
		mpnt->vm_mm = current->mm;
		mpnt->vm_start = PAGE_MASK & (unsigned long) p;
		mpnt->vm_end = SVR3_STACK_TOP;
		mpnt->vm_page_prot = PAGE_COPY;
		mpnt->vm_flags = VM_STACK_FLAGS;
		mpnt->vm_ops = NULL;
		mpnt->vm_offset = 0;
		mpnt->vm_inode = NULL;
		mpnt->vm_pte = 0;
		insert_vm_struct (current->mm, mpnt);
		current->mm->total_vm =
			(mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
	}

	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
		if (bprm->page[i]) {
			current->mm->rss++;
			put_dirty_page(current,bprm->page[i],stack_base);
		}
		stack_base += PAGE_SIZE;
	}
	return p;
}
示例#8
0
文件: mm.c 项目: feng-lei/mario
/*
 * By the time this function is called, the area struct has been
 * removed from the process mapping list, so it needs to be
 * reinserted if necessary.
 *
 * The 4 main cases are:
 *    Unmapping the whole area
 *    Unmapping from the start of the segment to a point in it
 *    Unmapping from an intermediate point to the end
 *    Unmapping between to intermediate points, making a hole.
 *
 * Case 4 involves the creation of 2 new areas, for each side of
 * the hole.
 */
void unmap_fixup(struct vm_area_struct *vma, 
	unsigned long addr, unsigned long len)
{
	struct vm_area_struct *mpnt;
	unsigned long end = addr + len;

	/* Unmapping the whole area */
	if (addr == vma->vm_start && end == vma->vm_end) {
		if (vma->vm_ops && vma->vm_ops->close)
			vma->vm_ops->close(vma);
		if (vma->vm_inode)
			iput(vma->vm_inode);
		return;
	}
	/* Work out to one of the ends */
	if (end == vma->vm_end) {
		vma->vm_end = addr;
	} else if (addr == vma->vm_start) {
		vma->vm_offset += (end - vma->vm_start);
		vma->vm_start = end;
	} else {
		mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt));
		if (!mpnt)
			return;
		*mpnt = *vma;
		mpnt->vm_offset += (end - vma->vm_start);
		mpnt->vm_start = end;
		if (mpnt->vm_inode)
			iref(mpnt->vm_inode);
		if (mpnt->vm_ops && mpnt->vm_ops->open)
			mpnt->vm_ops->open(mpnt);
		vma->vm_end = addr;	/* Truncate area */
		insert_vm_struct(current->mm, mpnt);
	}
	/* construct whatever mapping is needed */
	mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt));
	if (!mpnt)
		return;
	*mpnt = *vma;
	if (mpnt->vm_ops && mpnt->vm_ops->open)
		mpnt->vm_ops->open(mpnt);
	if (vma->vm_ops && vma->vm_ops->close) {
		vma->vm_end = vma->vm_start;
		vma->vm_ops->close(vma);
	}
	insert_vm_struct(current->mm, mpnt);
}
示例#9
0
int
ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
{
	unsigned long stack_base;
	struct vm_area_struct *mpnt;
	struct mm_struct *mm = current->mm;
	int i, ret;

	stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
	mm->arg_start = bprm->p + stack_base;

	bprm->p += stack_base;
	if (bprm->loader)
		bprm->loader += stack_base;
	bprm->exec += stack_base;

	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (!mpnt)
		return -ENOMEM;

	memset(mpnt, 0, sizeof(*mpnt));

	down_write(&current->mm->mmap_sem);
	{
		mpnt->vm_mm = current->mm;
		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
		mpnt->vm_end = IA32_STACK_TOP;
		if (executable_stack == EXSTACK_ENABLE_X)
			mpnt->vm_flags = VM_STACK_FLAGS |  VM_EXEC;
		else if (executable_stack == EXSTACK_DISABLE_X)
			mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
		else
			mpnt->vm_flags = VM_STACK_FLAGS;
		mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC)?
					PAGE_COPY_EXEC: PAGE_COPY;
		if ((ret = insert_vm_struct(current->mm, mpnt))) {
			up_write(&current->mm->mmap_sem);
			kmem_cache_free(vm_area_cachep, mpnt);
			return ret;
		}
		current->mm->stack_vm = current->mm->total_vm = vma_pages(mpnt);
	}

	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
		struct page *page = bprm->page[i];
		if (page) {
			bprm->page[i] = NULL;
			install_arg_page(mpnt, page, stack_base);
		}
		stack_base += PAGE_SIZE;
	}
	up_write(&current->mm->mmap_sem);

	/* Can't do it in ia64_elf32_init(). Needs to be done before calls to
	   elf32_map() */
	current->thread.ppl = ia32_init_pp_list();

	return 0;
}
示例#10
0
文件: mlock.c 项目: rohsaini/mkunity
static inline int mlock_fixup_middle(struct vm_area_struct * vma,
	unsigned long start, unsigned long end, int newflags)
{
	struct vm_area_struct * left, * right;

	left = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!left)
		return -EAGAIN;
	right = (struct vm_area_struct *) kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
	if (!right) {
		kfree(left);
		return -EAGAIN;
	}
	*left = *vma;
	*right = *vma;
	left->vm_end = start;
	vma->vm_start = start;
	vma->vm_end = end;
	right->vm_start = end;
	vma->vm_offset += vma->vm_start - left->vm_start;
	right->vm_offset += right->vm_start - left->vm_start;
#ifdef	CONFIG_OSFMACH3
	osfmach3_mlock_fixup(vma, newflags);
#endif	/* CONFIG_OSFMACH3 */
	vma->vm_flags = newflags;
	if (vma->vm_inode)
		vma->vm_inode->i_count += 2;
	if (vma->vm_ops && vma->vm_ops->open) {
		vma->vm_ops->open(left);
		vma->vm_ops->open(right);
	}
#ifdef	CONFIG_OSFMACH3
	left->vm_flags |= VM_REMAPPING;
#endif	/* CONFIG_OSFMACH3 */
	insert_vm_struct(current->mm, left);
#ifdef	CONFIG_OSFMACH3
	left->vm_flags &= ~VM_REMAPPING;
	right->vm_flags |= VM_REMAPPING;
#endif	/* CONFIG_OSFMACH3 */
	insert_vm_struct(current->mm, right);
#ifdef	CONFIG_OSFMACH3
	right->vm_flags &= ~VM_REMAPPING;
#endif	/* CONFIG_OSFMACH3 */
	return 0;
}
int ia32_setup_arg_pages(struct linux_binprm *bprm)
{
	unsigned long stack_base;
	struct vm_area_struct *mpnt;
	int i, ret;

	stack_base = IA32_STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;

	bprm->p += stack_base;
	if (bprm->loader)
		bprm->loader += stack_base;
	bprm->exec += stack_base;

	mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (!mpnt) 
		return -ENOMEM; 
	
	down_write(&current->mm->mmap_sem);
	{
		mpnt->vm_mm = current->mm;
		mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
		mpnt->vm_end = IA32_STACK_TOP;
		mpnt->vm_flags = vm_stack_flags32; 
		mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ? 
			PAGE_COPY_EXEC : PAGE_COPY;
		mpnt->vm_ops = NULL;
		mpnt->vm_pgoff = 0;
		mpnt->vm_file = NULL;
		mpnt->vm_private_data = (void *) 0;
		if ((ret = insert_vm_struct(current->mm, mpnt))) {
			up_write(&current->mm->mmap_sem);
			kmem_cache_free(vm_area_cachep, mpnt);
			return ret;
		}
		current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
	} 

	for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
		struct page *page = bprm->page[i];
		if (page) {
			bprm->page[i] = NULL;
			current->mm->rss++;
			put_dirty_page(current,page,stack_base);
		}
		stack_base += PAGE_SIZE;
	}
	up_write(&current->mm->mmap_sem);
	
	return 0;
}
示例#12
0
static int __bprm_mm_init(struct linux_binprm *bprm)
{
	int err = -ENOMEM;
	struct vm_area_struct *vma = NULL;
	struct mm_struct *mm = bprm->mm;

	bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		goto err;

	down_write(&mm->mmap_sem);
	vma->vm_mm = mm;

	/*
	 * Place the stack at the largest stack address the architecture
	 * supports. Later, we'll move this to an appropriate place. We don't
	 * use STACK_TOP because that can depend on attributes which aren't
	 * configured yet.
	 */
	vma->vm_end = STACK_TOP_MAX;
	vma->vm_start = vma->vm_end - PAGE_SIZE;

	vma->vm_flags = VM_STACK_FLAGS;
	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];

	err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
	if (err)
		goto err;

	err = insert_vm_struct(mm, vma);
	if (err) {
		up_write(&mm->mmap_sem);
		goto err;
	}

	mm->stack_vm = mm->total_vm = 1;
	up_write(&mm->mmap_sem);

	bprm->p = vma->vm_end - sizeof(void *);

	return 0;

err:
	if (vma) {
		bprm->vma = NULL;
		kmem_cache_free(vm_area_cachep, vma);
	}

	return err;
}
示例#13
0
/*
 * ensure page tables exist
 * mark page table entries with shm_sgn.
 */
static int shm_map (struct vm_area_struct *shmd)
{
    unsigned long tmp;

    /* clear old mappings */
    do_munmap(shmd->vm_start, shmd->vm_end - shmd->vm_start);

    /* add new mapping */
    tmp = shmd->vm_end - shmd->vm_start;
    if ((current->rlim[RLIMIT_AS].rlim_cur < RLIM_INFINITY) &&
            ((current->mm->total_vm << PAGE_SHIFT) + tmp
             > current->rlim[RLIMIT_AS].rlim_cur))
        return -ENOMEM;
    current->mm->total_vm += tmp >> PAGE_SHIFT;
    insert_vm_struct(current->mm, shmd);
    merge_segments(current->mm, shmd->vm_start, shmd->vm_end);

    return 0;
}
示例#14
0
unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm)
{
	unsigned long stack_base;
	struct vm_area_struct *mpnt;
	int i;

	stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;

	p += stack_base;
	if (bprm->loader)
		bprm->loader += stack_base;
	bprm->exec += stack_base;

	mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
	if (mpnt) {
		mpnt->vm_mm = current->mm;
		mpnt->vm_start = PAGE_MASK & (unsigned long) p;
		mpnt->vm_end = STACK_TOP;
		mpnt->vm_page_prot = PAGE_COPY;
		mpnt->vm_flags = VM_STACK_FLAGS;
		mpnt->vm_ops = NULL;
		mpnt->vm_offset = 0;
		mpnt->vm_inode = NULL;
		mpnt->vm_pte = 0;
		insert_vm_struct(current->mm, mpnt);
		current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;

		for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
			if (bprm->page[i]) {
				current->mm->rss++;
				put_dirty_page(current,bprm->page[i],stack_base);
			}
			stack_base += PAGE_SIZE;
		}
	} else {
		/*
		 * This one is tricky. We are already in the new context, so we cannot
		 * return with -ENOMEM. So we _have_ to deallocate argument pages here,
		 * if there is no VMA, they wont be freed at exit_mmap() -> memory leak.
		 *
		 * User space then gets a SIGSEGV when it tries to access argument pages.
		 */
		for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
示例#15
0
static inline int mlock_fixup_end(struct vm_area_struct * vma,
	unsigned long start, int newflags)
{
	struct vm_area_struct * n;

	n = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (!n)
		return -EAGAIN;
	*n = *vma;
	vma->vm_end = start;
	n->vm_start = start;
	n->vm_offset += n->vm_start - vma->vm_start;
	n->vm_flags = newflags;
	if (n->vm_file)
		n->vm_file->f_count++;
	if (n->vm_ops && n->vm_ops->open)
		n->vm_ops->open(n);
	insert_vm_struct(current->mm, n);
	return 0;
}
示例#16
0
文件: syscall32.c 项目: ivucica/linux
/* Setup a VMA at program startup for the vsyscall page */
int syscall32_setup_pages(struct linux_binprm *bprm, int exstack)
{
	int npages = (VSYSCALL32_END - VSYSCALL32_BASE) >> PAGE_SHIFT;
	struct vm_area_struct *vma;
	struct mm_struct *mm = current->mm;
	int ret;

	vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
	if (!vma)
		return -ENOMEM;

	memset(vma, 0, sizeof(struct vm_area_struct));
	/* Could randomize here */
	vma->vm_start = VSYSCALL32_BASE;
	vma->vm_end = VSYSCALL32_END;
	/* MAYWRITE to allow gdb to COW and set breakpoints */
	vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
	/*
	 * Make sure the vDSO gets into every core dump.
	 * Dumping its contents makes post-mortem fully interpretable later
	 * without matching up the same kernel and hardware config to see
	 * what PC values meant.
	 */
	vma->vm_flags |= VM_ALWAYSDUMP;
	vma->vm_flags |= mm->def_flags;
	vma->vm_page_prot = protection_map[vma->vm_flags & 7];
	vma->vm_ops = &syscall32_vm_ops;
	vma->vm_mm = mm;

	down_write(&mm->mmap_sem);
	if ((ret = insert_vm_struct(mm, vma))) {
		up_write(&mm->mmap_sem);
		kmem_cache_free(vm_area_cachep, vma);
		return ret;
	}
	mm->total_vm += npages;
	up_write(&mm->mmap_sem);
	return 0;
}
示例#17
0
void
ia64_elf32_init (struct pt_regs *regs)
{
	struct vm_area_struct *vma;

	/*
	 * Map GDT below 4GB, where the processor can find it.  We need to map
	 * it with privilege level 3 because the IVE uses non-privileged accesses to these
	 * tables.  IA-32 segmentation is used to protect against IA-32 accesses to them.
	 */
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (vma) {
		memset(vma, 0, sizeof(*vma));
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_GDT_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_SIZE;
		vma->vm_page_prot = PAGE_SHARED;
		vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
		vma->vm_ops = &ia32_shared_page_vm_ops;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	/*
	 * When user stack is not executable, push sigreturn code to stack makes
	 * segmentation fault raised when returning to kernel. So now sigreturn
	 * code is locked in specific gate page, which is pointed by pretcode
	 * when setup_frame_ia32
	 */
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (vma) {
		memset(vma, 0, sizeof(*vma));
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_GATE_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_SIZE;
		vma->vm_page_prot = PAGE_COPY_EXEC;
		vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
				| VM_MAYEXEC | VM_RESERVED;
		vma->vm_ops = &ia32_gate_page_vm_ops;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	/*
	 * Install LDT as anonymous memory.  This gives us all-zero segment descriptors
	 * until a task modifies them via modify_ldt().
	 */
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (vma) {
		memset(vma, 0, sizeof(*vma));
		vma->vm_mm = current->mm;
		vma->vm_start = IA32_LDT_OFFSET;
		vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
		vma->vm_page_prot = PAGE_SHARED;
		vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
		down_write(&current->mm->mmap_sem);
		{
			if (insert_vm_struct(current->mm, vma)) {
				kmem_cache_free(vm_area_cachep, vma);
				up_write(&current->mm->mmap_sem);
				BUG();
			}
		}
		up_write(&current->mm->mmap_sem);
	}

	ia64_psr(regs)->ac = 0;		/* turn off alignment checking */
	regs->loadrs = 0;
	/*
	 *  According to the ABI %edx points to an `atexit' handler.  Since we don't have
	 *  one we'll set it to 0 and initialize all the other registers just to make
	 *  things more deterministic, ala the i386 implementation.
	 */
	regs->r8 = 0;	/* %eax */
	regs->r11 = 0;	/* %ebx */
	regs->r9 = 0;	/* %ecx */
	regs->r10 = 0;	/* %edx */
	regs->r13 = 0;	/* %ebp */
	regs->r14 = 0;	/* %esi */
	regs->r15 = 0;	/* %edi */

	current->thread.eflag = IA32_EFLAG;
	current->thread.fsr = IA32_FSR_DEFAULT;
	current->thread.fcr = IA32_FCR_DEFAULT;
	current->thread.fir = 0;
	current->thread.fdr = 0;

	/*
	 * Setup GDTD.  Note: GDTD is the descrambled version of the pseudo-descriptor
	 * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
	 * architecture manual. Also note that the only fields that are not ignored are
	 * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
	 */
	regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
							    0, 0, 0, 1, 0, 0, 0));
	/* Setup the segment selectors */
	regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
	regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */

	ia32_load_segment_descriptors(current);
	ia32_load_state(current);
}
示例#18
0
unsigned long * create_elf_tables(char * p,int argc,int envc,struct elfhdr * exec, unsigned int load_addr, int ibcs)
{
	unsigned long *argv,*envp, *dlinfo;
	unsigned long * sp;
	struct vm_area_struct *mpnt;

	mpnt = (struct vm_area_struct *)kmalloc(sizeof(*mpnt), GFP_KERNEL);
	if (mpnt) {
		mpnt->vm_task = current;
		mpnt->vm_start = PAGE_MASK & (unsigned long) p;
		mpnt->vm_end = TASK_SIZE;
		mpnt->vm_page_prot = PAGE_PRIVATE|PAGE_DIRTY;
		mpnt->vm_flags = VM_STACK_FLAGS;
		mpnt->vm_share = NULL;
		mpnt->vm_ops = NULL;
		mpnt->vm_inode = NULL;
		mpnt->vm_offset = 0;
		mpnt->vm_pte = 0;
		insert_vm_struct(current, mpnt);
	}
	sp = (unsigned long *) (0xfffffffc & (unsigned long) p);
	if(exec) sp -= DLINFO_ITEMS*2;
	dlinfo = sp;
	sp -= envc+1;
	envp = sp;
	sp -= argc+1;
	argv = sp;
	if (!ibcs) {
		put_fs_long((unsigned long)envp,--sp);
		put_fs_long((unsigned long)argv,--sp);
	}

	/* The constant numbers (0-9) that we are writing here are
	   described in the header file sys/auxv.h on at least
	   some versions of SVr4 */
	if(exec) { /* Put this here for an ELF program interpreter */
	  struct elf_phdr * eppnt;
	  eppnt = (struct elf_phdr *) exec->e_phoff;
	  put_fs_long(3,dlinfo++); put_fs_long(load_addr + exec->e_phoff,dlinfo++);
	  put_fs_long(4,dlinfo++); put_fs_long(sizeof(struct elf_phdr),dlinfo++);
	  put_fs_long(5,dlinfo++); put_fs_long(exec->e_phnum,dlinfo++);
	  put_fs_long(9,dlinfo++); put_fs_long((unsigned long) exec->e_entry,dlinfo++);
	  put_fs_long(7,dlinfo++); put_fs_long(SHM_RANGE_START,dlinfo++);
	  put_fs_long(8,dlinfo++); put_fs_long(0,dlinfo++);
	  put_fs_long(6,dlinfo++); put_fs_long(PAGE_SIZE,dlinfo++);
	  put_fs_long(0,dlinfo++); put_fs_long(0,dlinfo++);
	};

	put_fs_long((unsigned long)argc,--sp);
	current->mm->arg_start = (unsigned long) p;
	while (argc-->0) {
		put_fs_long((unsigned long) p,argv++);
		while (get_fs_byte(p++)) /* nothing */ ;
	}
	put_fs_long(0,argv);
	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
	while (envc-->0) {
		put_fs_long((unsigned long) p,envp++);
		while (get_fs_byte(p++)) /* nothing */ ;
	}
	put_fs_long(0,envp);
	current->mm->env_end = (unsigned long) p;
	return sp;
}
示例#19
0
unsigned long do_mmap(struct file *file,unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long off)
{
    int error;
    struct vm_area_struct *vma;

    if(len <= 0)
        return -EINVAL;

    if((len = PAGE_ALIGN(len)) == 0)
        return addr;

    if(addr > PAGE_OFFSET || len > PAGE_OFFSET || (addr + len) > PAGE_OFFSET)
        return -EINVAL;

    if(!file)
    {
        switch (flags & MAP_TYPE)
        {
            case MAP_SHARED:
                if((prot & PROT_WRITE) && (file->f_mode & FILE_WRITE))
                    return -EACCES;
                break;
            case MAP_PRIVATE:
                if(!(file->f_mode & FILE_READ))
                    return -EACCES;
            default:
                return -EINVAL;
        }
        if(file->f_inode->i_count > 0 && flags & MAP_DENYWRITE)
            return -ETXTBSY;
    }
    else if((flags & MAP_TYPE) != MAP_PRIVATE)
        return -EINVAL;

    if(flags & MAP_FIXED)
    {
        if(addr & ~ PAGE_MASK)
            return -EINVAL;
        if(len > PAGE_OFFSET || addr + len > PAGE_OFFSET)
            return -EINVAL;
    }
    else
    {
        addr = get_unmmapped_area(len);
        if(!addr)
            return -ENOMEM;
    }

    if(file && (!file->f_op || !file->f_op->mmap))
        return -ENODEV;

    vma = (struct vm_area_struct *)kmalloc(sizeof(struct vm_area_struct),GFP_KERNEL);
    vma->vm_task = current;
    vma->vm_start = addr;
    vma->vm_end = addr + len;
    vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
    vma->vm_flags |= flags & (VM_GROWSDOWN | VM_DENYWRITE | VM_EXECUTABLE);

    if(file)
    {
        if(file->f_mode & FILE_READ)
            vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
        if(flags & MAP_SHARED)
            vma->vm_flags |= VM_SHARED | VM_MAYSHARE;

        if(file->f_mode & FILE_WRITE)
            vma->vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
    }
    else
        vma->vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;


    //	vma->vm_page_prot = protection_map[vma->vm_flags & 0X0F];
    vma->vm_ops = NULL;
    vma->vm_offset = off;
    vma->vm_inode = NULL;
    vma->vm_pte = 0;

    do_munmap(addr, len);

    if(file)
        error = file->f_op->mmap(file->f_inode, file, vma);
    else 
        error = anon_map(NULL, NULL, vma);
    if(error)
    {
        kfree(vma);
        return error;
    }

    insert_vm_struct(current, vma);
    merge_segments(current, vma->vm_start, vma->vm_end);
    return addr;

    return 0;
}
示例#20
0
文件: mmap.c 项目: rohsaini/mkunity
int expand_stack(struct vm_area_struct * vma, unsigned long address)
{
	unsigned long grow;
#if 0
	vm_address_t mach_addr;
	kern_return_t kr;
#else
	struct vm_area_struct *new_vma;
#endif

#ifdef STACK_GROWTH_UP
	unsigned long top;

	top = PAGE_MASK & (address + PAGE_SIZE);
	address = vma->vm_end;
	grow = top - address;
	if (top - vma->vm_start 
	    > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur)
		return -ENOMEM;
#else
	address &= PAGE_MASK;
	grow = vma->vm_start - address;
	if (vma->vm_end - address
	    > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
	    (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
	    > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
		return -ENOMEM;
#endif /* STACK_GROWTH_UP */

#if	VMA_DEBUG
	if (vma_debug) {
		printk("expand_stack(vma=%p): "
		       "address=0x%lx, grow=0x%lx\n",
		       vma, address, grow);
	}
#endif	/* VMA_DEBUG */

#if 0
	mach_addr = (vm_address_t) address;
	kr = vm_allocate(current->osfmach3.task->mach_task_port,
			 &mach_addr,
			 (vm_size_t) grow,
			 FALSE);
	if (kr != KERN_SUCCESS) {
		MACH3_DEBUG(1, kr,
			    ("expand_stack: vm_allocate(0x%x,0x%x)",
			     mach_addr, (vm_size_t) grow));
		return -ENOMEM;
	}
	if (mach_addr != (vm_address_t) address) {
		printk("expand_stack: "
		       "stack expanded at 0x%x instead of 0x%lx\n",
		       mach_addr, address);
		kr = vm_deallocate(current->osfmach3.task->mach_task_port,
				   mach_addr,
				   (vm_size_t) grow);
		if (kr != KERN_SUCCESS) {
			MACH3_DEBUG(1, kr,
				    ("expand_stack: "
				     "can't deallocate bogus stack "
				     "addr=0x%x size=0x%x",
				     mach_addr, (vm_size_t) grow));
		}
		return -ENOMEM;
	}

#ifdef STACK_GROWTH_UP
	vma->vm_end = top;
	vma->vm_offset += grow;
#else
	vma->vm_start = address;
	vma->vm_offset -= grow;
#endif 
#else	/* 0 */
	new_vma = (struct vm_area_struct *)
		kmalloc(sizeof *new_vma, GFP_KERNEL);
	if (!new_vma) {
		return -ENOMEM;
	}
	*new_vma = *vma;
#ifdef	STACK_GROWTH_UP
	new_vma->vm_start = vma->vm_end;
	new_vma->vm_end = vma->vm_end + grow;
	new_vma->vm_offset = vma->vm_offset + (vma->vm_end - vma->vm_start);
	insert_vm_struct(current->mm, new_vma);
	/* merge_segments(current->mm, vma->vm_start, new_vma->vm_end); */
#else	/* STACK_GROWTH_UP */
	new_vma->vm_start = vma->vm_start - grow;
	new_vma->vm_end = vma->vm_start;
	new_vma->vm_offset = 0;
	vma->vm_offset = grow;
	insert_vm_struct(current->mm, new_vma);
	/* merge_segments(current->mm, new_vma->vm_start, vma->vm_end); */
#endif	/* STACK_GROWTH_UP */
	vma = find_vma(current->mm, address);
#endif	/* 0 */

	vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
	if (vma->vm_flags & VM_LOCKED)
		vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;

#if	VMA_DEBUG
	if (vma_debug) {
		printk("expand_stack(vma=%p): "
		       "start=0x%lx,end=0x%lx,off=0x%lx\n",
		       vma, vma->vm_start, vma->vm_end, vma->vm_offset);
	}
#endif	/* VMA_DEBUG */

	return 0;
}
示例#21
0
/*
 * This is called from binfmt_elf, we create the special vma for the
 * vDSO and insert it into the mm struct tree
 */
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long vdso_pages;
	unsigned long vdso_base;

	if (test_thread_flag(TIF_32BIT)) {
		vdso_pages = vdso32_pages;
		vdso_base = VDSO32_MBASE;
	} else {
		vdso_pages = vdso64_pages;
		vdso_base = VDSO64_MBASE;
	}

	/* vDSO has a problem and was disabled, just don't "enable" it for the
	 * process
	 */
	if (vdso_pages == 0) {
		current->thread.vdso_base = 0;
		return 0;
	}
	vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
	if (vma == NULL)
		return -ENOMEM;
	if (security_vm_enough_memory(vdso_pages)) {
		kmem_cache_free(vm_area_cachep, vma);
		return -ENOMEM;
	}
	memset(vma, 0, sizeof(*vma));

	/*
	 * pick a base address for the vDSO in process space. We have a default
	 * base of 1Mb on which we had a random offset up to 1Mb.
	 * XXX: Add possibility for a program header to specify that location
	 */
	current->thread.vdso_base = vdso_base;
	/*  + ((unsigned long)vma & 0x000ff000); */

	vma->vm_mm = mm;
	vma->vm_start = current->thread.vdso_base;

	/*
	 * the VMA size is one page more than the vDSO since systemcfg
	 * is mapped in the last one
	 */
	vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT);

	/*
	 * our vma flags don't have VM_WRITE so by default, the process isn't allowed
	 * to write those pages.
	 * gdb can break that with ptrace interface, and thus trigger COW on those
	 * pages but it's then your responsibility to never do that on the "data" page
	 * of the vDSO or you'll stop getting kernel updates and your nice userland
	 * gettimeofday will be totally dead. It's fine to use that for setting
	 * breakpoints in the vDSO code pages though
	 */
	vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
	vma->vm_flags |= mm->def_flags;
	vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
	vma->vm_ops = &vdso_vmops;

	down_write(&mm->mmap_sem);
	insert_vm_struct(mm, vma);
	mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	up_write(&mm->mmap_sem);

	return 0;
}
示例#22
0
文件: mm.c 项目: feng-lei/mario
unsigned long do_mmap(unsigned long addr, unsigned long len, unsigned long prot, 
	unsigned long flags, int fd, unsigned long off)
{
	int error;
	struct file *file = NULL;
	struct vm_area_struct *vma;

	if (flags & MAP_ANONYMOUS) {
		if (fd != (unsigned long)-1)
			return -EINVAL;
	} else {
		if (fd >= NR_OPEN || !(file = current->files->fd[fd]))
			return -EBADF;
	}

	if ((len = PAGE_ALIGN(len)) == 0)
		return -EINVAL;
	if (addr > KERNEL_BASE || len > KERNEL_BASE || addr > KERNEL_BASE-len)
		return -EINVAL;

	if (flags & MAP_FIXED) {
		if (addr & ~PAGE_MASK)
			return -EINVAL;
		if (len > KERNEL_BASE || addr > KERNEL_BASE - len)
			return -EINVAL;
	} else {
		addr = get_unmapped_area(addr, len);
		if (!addr)
			return -ENOMEM;
	}

	if (file) {
		if (!file->f_op || !file->f_op->mmap)
			return -ENODEV;
		if (off & ~PAGE_MASK)
			return -EINVAL;
		/* offset overflow? */
		if (off + len < off)
			return -EINVAL;
	}


	switch (flags & MAP_TYPE) {
	case MAP_SHARED:
		if (file && (prot & PROT_WRITE) && !(file->f_mode & 2))
			return -EACCES;
	case MAP_PRIVATE:
		if (file && !(file->f_mode & 1))
			return -EACCES;
		break;
	default:
		return -EINVAL;
	}
	
	vma = (struct vm_area_struct *)kmalloc(sizeof(*vma));
	if (!vma)
		return -ENOMEM;
	vma->vm_mm= current->mm;
	vma->vm_start = addr;
	vma->vm_end = addr + len;
	vma->vm_flags = prot & (VM_READ | VM_WRITE | VM_EXEC);
	if (flags & VM_GROWSDOWN)
		vma->vm_flags |= VM_GROWSDOWN;
	if ((flags & MAP_TYPE) == MAP_SHARED)
		vma->vm_flags |= VM_SHARED;

	/* initialize the share ring */
	vma->vm_next_share = vma->vm_prev_share = vma;
	vma->vm_page_prot = get_page_prot(vma->vm_flags);
	vma->vm_ops = NULL;
	if (file)
		vma->vm_offset = off;
	else
		vma->vm_offset = 0;
	vma->vm_inode = NULL;
	do_munmap(addr, len);	/* Clear old maps */
	if (file) {
		error = file->f_op->mmap(file->f_inode, file, vma);
		if (error) {
			kfree(vma);
			return error;
		}
	}
	insert_vm_struct(current->mm, vma);
	/* merge_segments(current->mm, vma->vm_start, vma->vm_end); */
	return addr;
}
示例#23
0
static inline unsigned long move_vma(struct vm_area_struct * vma,
	unsigned long addr, unsigned long old_len, unsigned long new_len,
	unsigned long new_addr)
{
	struct mm_struct * mm = vma->vm_mm;
	struct vm_area_struct * new_vma, * next, * prev;
	int allocated_vma;

	new_vma = NULL;
	next = find_vma_prev(mm, new_addr, &prev);
	if (next) {
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
			if (next != prev->vm_next)
				BUG();
			if (prev->vm_end == next->vm_start && can_vma_merge(next, prev->vm_flags)) {
				spin_lock(&mm->page_table_lock);
				prev->vm_end = next->vm_end;
				__vma_unlink(mm, next, prev);
				spin_unlock(&mm->page_table_lock);

				mm->map_count--;
				kmem_cache_free(vm_area_cachep, next);
			}
		} else if (next->vm_start == new_addr + new_len &&
			   can_vma_merge(next, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			next->vm_start = new_addr;
			spin_unlock(&mm->page_table_lock);
			new_vma = next;
		}
	} else {
		prev = find_vma(mm, new_addr-1);
		if (prev && prev->vm_end == new_addr &&
		    can_vma_merge(prev, vma->vm_flags) && !vma->vm_file && !(vma->vm_flags & VM_SHARED)) {
			spin_lock(&mm->page_table_lock);
			prev->vm_end = new_addr + new_len;
			spin_unlock(&mm->page_table_lock);
			new_vma = prev;
		}
	}

	allocated_vma = 0;
	if (!new_vma) {
		new_vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
		if (!new_vma)
			goto out;
		allocated_vma = 1;
	}

	if (!move_page_tables(current->mm, new_addr, addr, old_len)) {
		unsigned long vm_locked = vma->vm_flags & VM_LOCKED;

		if (allocated_vma) {
			*new_vma = *vma;
			new_vma->vm_start = new_addr;
			new_vma->vm_end = new_addr+new_len;
			new_vma->vm_pgoff += (addr-vma->vm_start) >> PAGE_SHIFT;
			new_vma->vm_raend = 0;
			if (new_vma->vm_file)
				get_file(new_vma->vm_file);
			if (new_vma->vm_ops && new_vma->vm_ops->open)
				new_vma->vm_ops->open(new_vma);
			insert_vm_struct(current->mm, new_vma);
		}

		/* XXX: possible errors masked, mapping might remain */
		do_munmap(current->mm, addr, old_len);

		current->mm->total_vm += new_len >> PAGE_SHIFT;
		if (vm_locked) {
			current->mm->locked_vm += new_len >> PAGE_SHIFT;
			if (new_len > old_len)
				make_pages_present(new_addr + old_len,
						   new_addr + new_len);
		}
		return new_addr;
	}