Beispiel #1
0
static unsigned long load_aout_interp(struct exec * interp_ex,
			     struct file * interpreter)
{
	unsigned long text_data, elf_entry = ~0UL;
	char * addr;
	loff_t offset;

	current->mm->end_code = interp_ex->a_text;
	text_data = interp_ex->a_text + interp_ex->a_data;
	current->mm->end_data = text_data;
	current->mm->brk = interp_ex->a_bss + text_data;

	switch (N_MAGIC(*interp_ex)) {
	case OMAGIC:
		offset = 32;
		addr = (char *) 0;
		break;
	case ZMAGIC:
	case QMAGIC:
		offset = N_TXTOFF(*interp_ex);
		addr = (char *) N_TXTADDR(*interp_ex);
		break;
	default:
		goto out;
	}

	down_write(&current->mm->mmap_sem);
	do_brk(0, text_data);
	up_write(&current->mm->mmap_sem);

	if (!interpreter->f_op || !interpreter->f_op->read)
		goto out;
	if (interpreter->f_op->read(interpreter, addr, text_data, &offset) < 0)
		goto out;
	flush_icache_range((unsigned long)addr,
	                   (unsigned long)addr + text_data);

	down_write(&current->mm->mmap_sem);
	do_brk(ELF_PAGESTART(text_data + ELF_MIN_ALIGN - 1),
		interp_ex->a_bss);
	up_write(&current->mm->mmap_sem);

	elf_entry = interp_ex->a_entry;

out:
	return elf_entry;
}
static void set_brk(unsigned long start, unsigned long end)
{
	start = ELF_PAGEALIGN(start);
	end = ELF_PAGEALIGN(end);
	if (end <= start)
		return;
	do_brk(start, end - start);
}
Beispiel #3
0
static void set_brk(unsigned long start, unsigned long end)
{
	start = PAGE_ALIGN(start);
	end = PAGE_ALIGN(end);
	if (end <= start)
		return;
	down_write(&current->mm->mmap_sem);
	do_brk(start, end - start);
	up_write(&current->mm->mmap_sem);
}
Beispiel #4
0
asmlinkage unsigned long
ia64_brk (unsigned long brk, long arg1, long arg2, long arg3,
	  long arg4, long arg5, long arg6, long arg7, long stack)
{
	extern int vm_enough_memory (long pages);
	struct pt_regs *regs = (struct pt_regs *) &stack;
	unsigned long rlim, retval, newbrk, oldbrk;
	struct mm_struct *mm = current->mm;

	/*
	 * Most of this replicates the code in sys_brk() except for an additional safety
	 * check and the clearing of r8.  However, we can't call sys_brk() because we need
	 * to acquire the mmap_sem before we can do the test...
	 */
	down_write(&mm->mmap_sem);

	if (brk < mm->end_code)
		goto out;
	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk,1))
			goto set_brk;
		goto out;
	}

	/* Check against unimplemented/unmapped addresses: */
	if ((newbrk - oldbrk) > RGN_MAP_LIMIT || rgn_offset(newbrk) > RGN_MAP_LIMIT)
		goto out;

	/* Check against rlimit.. */
	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
		goto out;

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	regs->r8 = 0;		/* ensure large retval isn't mistaken as error code */
	return retval;
}
Beispiel #5
0
static void *sys_brk(void *addr) {
	void *ret;
	int err;

	if (0 == (err = do_brk(addr, &ret))) {
		return ret;
	} else {
		curthr->kt_errno = -err;
		return (void *) -1;
	}
}
Beispiel #6
0
asmlinkage int sunos_brk(u32 baddr)
{
	int freepages, retval = -ENOMEM;
	unsigned long rlim;
	unsigned long newbrk, oldbrk, brk = (unsigned long) baddr;

	down_write(&current->mm->mmap_sem);
	if (brk < current->mm->end_code)
		goto out;
	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(current->mm->brk);
	retval = 0;
	if (oldbrk == newbrk) {
		current->mm->brk = brk;
		goto out;
	}
	/* Always allow shrinking brk. */
	if (brk <= current->mm->brk) {
		current->mm->brk = brk;
		do_munmap(current->mm, newbrk, oldbrk-newbrk);
		goto out;
	}
	/* Check against rlimit and stack.. */
	retval = -ENOMEM;
	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim >= RLIM_INFINITY)
		rlim = ~0;
	if (brk - current->mm->end_code > rlim)
		goto out;
	/* Check against existing mmap mappings. */
	if (find_vma_intersection(current->mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;
	/* stupid algorithm to decide if we have enough memory: while
	 * simple, it hopefully works in most obvious cases.. Easy to
	 * fool it, but this should catch most mistakes.
	 */
	freepages = get_page_cache_size();
	freepages >>= 1;
	freepages += nr_free_pages();
	freepages += nr_swap_pages;
	freepages -= num_physpages >> 4;
	freepages -= (newbrk-oldbrk) >> PAGE_SHIFT;
	if (freepages < 0)
		goto out;
	/* Ok, we have probably got enough memory - let it rip. */
	current->mm->brk = brk;
	do_brk(oldbrk, newbrk-oldbrk);
	retval = 0;
out:
	up_write(&current->mm->mmap_sem);
	return retval;
}
Beispiel #7
0
asmlinkage unsigned long
ia64_brk (unsigned long brk)
{
	unsigned long rlim, retval, newbrk, oldbrk;
	struct mm_struct *mm = current->mm;

	/*
	 * Most of this replicates the code in sys_brk() except for an additional safety
	 * check and the clearing of r8.  However, we can't call sys_brk() because we need
	 * to acquire the mmap_sem before we can do the test...
	 */
	down_write(&mm->mmap_sem);

	if (brk < mm->end_code)
		goto out;
	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	/* Check against unimplemented/unmapped addresses: */
	if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
		goto out;

	/* Check against rlimit.. */
	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
		goto out;

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	force_successful_syscall_return();
	return retval;
}
Beispiel #8
0
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;
	unsigned long min_brk;

	down_write(&mm->mmap_sem);

#ifdef CONFIG_COMPAT_BRK
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	rlim = rlimit(RLIMIT_DATA);
	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
			(mm->end_data - mm->start_data) > rlim)
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
Beispiel #9
0
static int set_brk(unsigned long start, unsigned long end)
{
	start = PAGE_ALIGN(start);
	end = PAGE_ALIGN(end);
	if (end > start) {
		unsigned long addr;
		down_write(&current->mm->mmap_sem);
		addr = do_brk(start, end - start);
		up_write(&current->mm->mmap_sem);
		if (BAD_ADDR(addr))
			return addr;
	}
	return 0;
}
/*
 * IRIX maps a page at 0x200000 that holds information about the
 * process and the system, here we map the page and fill the
 * structure
 */
void irix_map_prda_page (void)
{
	unsigned long v;
	struct prda *pp;

	v =  do_brk (PRDA_ADDRESS, PAGE_SIZE);

	if (v < 0)
		return;

	pp = (struct prda *) v;
	pp->prda_sys.t_pid  = current->pid;
	pp->prda_sys.t_prid = read_c0_prid();
	pp->prda_sys.t_rpid = current->pid;

	/* We leave the rest set to zero */
}
Beispiel #11
0
/*
 * IRIX maps a page at 0x200000 that holds information about the
 * process and the system, here we map the page and fill the
 * structure
 */
static void irix_map_prda_page(void)
{
	unsigned long v;
	struct prda *pp;

	down_write(&current->mm->mmap_sem);
	v =  do_brk(PRDA_ADDRESS, PAGE_SIZE);
	up_write(&current->mm->mmap_sem);

	if (v < 0)
		return;

	pp = (struct prda *) v;
	pp->prda_sys.t_pid  = task_pid_vnr(current);
	pp->prda_sys.t_prid = read_c0_prid();
	pp->prda_sys.t_rpid = task_pid_vnr(current);

	/* We leave the rest set to zero */
}
Beispiel #12
0
/*
 *  sys_brk() for the most part doesn't need the global kernel
 *  lock, except when an application is doing something nasty
 *  like trying to un-brk an area that has already been mapped
 *  to a regular file.  in this case, the unmapping will need
 *  to invoke file system routines that need the global lock.
 */
asmlinkage unsigned long sys_brk(unsigned long brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;

	down_write(&mm->mmap_sem);

	if (brk < mm->end_code)
		goto out;
	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	/* Check against rlimit.. */
	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
		goto out;

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
				     struct file * interpreter,
				     unsigned long *interp_load_addr)
{
	struct elf_phdr *elf_phdata;
	struct elf_phdr *eppnt;
	unsigned long load_addr = 0;
	int load_addr_set = 0;
	unsigned long last_bss = 0, elf_bss = 0;
	unsigned long error = ~0UL;
	int retval, i, size;

	/* First of all, some simple consistency checks */
	if (interp_elf_ex->e_type != ET_EXEC &&
	    interp_elf_ex->e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(interp_elf_ex))
		goto out;
	if (!interpreter->f_op || !interpreter->f_op->mmap)
		goto out;

	/*
	 * If the size of this structure has changed, then punt, since
	 * we will be doing the wrong thing.
	 */
	if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
		goto out;
	if (interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
		goto out;

	/* Now read in all of the header information */

	size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
	if (size > ELF_MIN_ALIGN)
		goto out;
	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
	error = retval;
	if (retval < 0)
		goto out_close;

	eppnt = elf_phdata;
	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
	  if (eppnt->p_type == PT_LOAD) {
	    int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
	    int elf_prot = 0;
	    unsigned long vaddr = 0;
	    unsigned long k, map_addr;

	    if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
	    if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
	    if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
	    vaddr = eppnt->p_vaddr;
	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
	    	elf_type |= MAP_FIXED;

	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
	    if (BAD_ADDR(map_addr))
	    	goto out_close;

	    if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
		load_addr = map_addr - ELF_PAGESTART(vaddr);
		load_addr_set = 1;
	    }

	    /*
	     * Find the end of the file mapping for this phdr, and keep
	     * track of the largest address we see for this.
	     */
	    k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
	    if (k > elf_bss)
		elf_bss = k;

	    /*
	     * Do the same thing for the memory mapping - between
	     * elf_bss and last_bss is the bss section.
	     */
	    k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
	    if (k > last_bss)
		last_bss = k;
	  }
	}

	/* Now use mmap to map the library into memory. */

	/*
	 * Now fill out the bss section.  First pad the last page up
	 * to the page boundary, and then perform a mmap to make sure
	 * that there are zero-mapped pages up to and including the 
	 * last bss page.
	 */
	padzero(elf_bss);
	elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);	/* What we have mapped so far */

	/* Map the last of the bss segment */
	if (last_bss > elf_bss)
		do_brk(elf_bss, last_bss - elf_bss);

	*interp_load_addr = load_addr;
	error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;

out_close:
	kfree(elf_phdata);
out:
	return error;
}
Beispiel #14
0
/* This is really simpleminded and specialized - we are loading an
 * a.out library that is given an ELF header.
 */
static int load_irix_library(struct file *file)
{
	struct elfhdr elf_ex;
	struct elf_phdr *elf_phdata  =  NULL;
	unsigned int len = 0;
	int elf_bss = 0;
	int retval;
	unsigned int bss;
	int error;
	int i, j, k;

	error = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
	if (error != sizeof(elf_ex))
		return -ENOEXEC;

	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
		return -ENOEXEC;

	/* First of all, some simple consistency checks. */
	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
	   !file->f_op->mmap)
		return -ENOEXEC;

	/* Now read in all of the header information. */
	if (sizeof(struct elf_phdr) * elf_ex.e_phnum > PAGE_SIZE)
		return -ENOEXEC;

	elf_phdata = kmalloc(sizeof(struct elf_phdr) * elf_ex.e_phnum, GFP_KERNEL);
	if (elf_phdata == NULL)
		return -ENOMEM;

	retval = kernel_read(file, elf_ex.e_phoff, (char *) elf_phdata,
			   sizeof(struct elf_phdr) * elf_ex.e_phnum);

	j = 0;
	for (i=0; i<elf_ex.e_phnum; i++)
		if ((elf_phdata + i)->p_type == PT_LOAD) j++;

	if (j != 1)  {
		kfree(elf_phdata);
		return -ENOEXEC;
	}

	while (elf_phdata->p_type != PT_LOAD) elf_phdata++;

	/* Now use mmap to map the library into memory. */
	down_write(&current->mm->mmap_sem);
	error = do_mmap(file,
			elf_phdata->p_vaddr & 0xfffff000,
			elf_phdata->p_filesz + (elf_phdata->p_vaddr & 0xfff),
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
			elf_phdata->p_offset & 0xfffff000);
	up_write(&current->mm->mmap_sem);

	k = elf_phdata->p_vaddr + elf_phdata->p_filesz;
	if (k > elf_bss) elf_bss = k;

	if (error != (elf_phdata->p_vaddr & 0xfffff000)) {
		kfree(elf_phdata);
		return error;
	}

	padzero(elf_bss);

	len = (elf_phdata->p_filesz + elf_phdata->p_vaddr+ 0xfff) & 0xfffff000;
	bss = elf_phdata->p_memsz + elf_phdata->p_vaddr;
	if (bss > len) {
	  down_write(&current->mm->mmap_sem);
	  do_brk(len, bss-len);
	  up_write(&current->mm->mmap_sem);
	}
	kfree(elf_phdata);
	return 0;
}
Beispiel #15
0
void do_m68k_simcall(CPUM68KState *env, int nr)
{
    uint32_t *args;

    args = (uint32_t *)(env->aregs[7] + 4);
    switch (nr) {
    case SYS_EXIT:
        exit(ARG(0));
    case SYS_READ:
        check_err(env, read(ARG(0), (void *)ARG(1), ARG(2)));
        break;
    case SYS_WRITE:
        check_err(env, write(ARG(0), (void *)ARG(1), ARG(2)));
        break;
    case SYS_OPEN:
        check_err(env, open((char *)ARG(0), translate_openflags(ARG(1)),
                            ARG(2)));
        break;
    case SYS_CLOSE:
        {
            /* Ignore attempts to close stdin/out/err.  */
            int fd = ARG(0);
            if (fd > 2)
              check_err(env, close(fd));
            else
              check_err(env, 0);
            break;
        }
    case SYS_BRK:
        {
            int32_t ret;

            ret = do_brk((void *)ARG(0));
            if (ret == -ENOMEM)
                ret = -1;
            check_err(env, ret);
        }
        break;
    case SYS_FSTAT:
        {
            struct stat s;
            int rc;
            struct m86k_sim_stat *p;
            rc = check_err(env, fstat(ARG(0), &s));
            if (rc == 0) {
                p = (struct m86k_sim_stat *)ARG(1);
                p->sim_st_dev = tswap16(s.st_dev);
                p->sim_st_ino = tswap16(s.st_ino);
                p->sim_st_mode = tswap32(s.st_mode);
                p->sim_st_nlink = tswap16(s.st_nlink);
                p->sim_st_uid = tswap16(s.st_uid);
                p->sim_st_gid = tswap16(s.st_gid);
                p->sim_st_rdev = tswap16(s.st_rdev);
                p->sim_st_size = tswap32(s.st_size);
                p->sim_st_atime = tswap32(s.st_atime);
                p->sim_st_mtime = tswap32(s.st_mtime);
                p->sim_st_ctime = tswap32(s.st_ctime);
                p->sim_st_blksize = tswap32(s.st_blksize);
                p->sim_st_blocks = tswap32(s.st_blocks);
            }
        }
        break;
    case SYS_ISATTY:
        check_err(env, isatty(ARG(0)));
        break;
    case SYS_LSEEK:
        check_err(env, lseek(ARG(0), (int32_t)ARG(1), ARG(2)));
        break;
    default:
        cpu_abort(env, "Unsupported m68k sim syscall %d\n", nr);
    }
}
Beispiel #16
0
/**
 * <Ring 1> The main loop of TASK MM.
 * 
 *****************************************************************************/
PUBLIC void task_mm()
{
	init_mm();

	while (1) {
		send_recv(RECEIVE, ANY, &mm_msg);
		int src = mm_msg.source;
		int reply = 1;

		int msgtype = mm_msg.type;

		switch (msgtype) {
		case FORK:
			mm_msg.RETVAL = do_fork();
			break;
		case EXIT:
			do_exit(mm_msg.STATUS);
			reply = 0;
			break;
		case EXEC:
			mm_msg.RETVAL = do_exec();
			break;
		case WAIT:
			do_wait();
			reply = 0;
			break;
		case KILL:
			mm_msg.RETVAL = do_kill();
			break; 
		case RAISE:
			mm_msg.RETVAL = do_raise();
			break;
		case BRK:
			mm_msg.RETVAL = do_brk();
			break;
		case ACCT:
			mm_msg.RETVAL = do_acct();
			break;
		case GETUID:
			mm_msg.RETVAL = do_getuid();
			break;
		case SETUID:
            mm_msg.RETVAL = do_setuid();
			break;
		case GETGID:
			mm_msg.RETVAL = do_getgid();
			break;
		case SETGID:
			mm_msg.RETVAL = do_setgid();
			break;
		case GETEUID:
			mm_msg.RETVAL = do_geteuid();
			break;
		case GETEGID:
			mm_msg.RETVAL = do_getegid();
			break;
		case SIGACTION:
			mm_msg.RETVAL = do_sigaction();
			break;
		case ALARM:
			mm_msg.RETVAL = do_alarm();
			break;
		default:
			dump_msg("MM::unknown msg", &mm_msg);
			assert(0);
			break;
		}

		if (reply) {
			mm_msg.type = SYSCALL_RET;
			send_recv(SEND, src, &mm_msg);
		}
	}
}
Beispiel #17
0
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
	unsigned long rlim, retval;
	unsigned long newbrk, oldbrk;
	struct mm_struct *mm = current->mm;
	unsigned long min_brk;

	down_write(&mm->mmap_sem);

#ifdef CONFIG_COMPAT_BRK
	/*
	 * CONFIG_COMPAT_BRK can still be overridden by setting
	 * randomize_va_space to 2, which will still cause mm->start_brk
	 * to be arbitrarily shifted
	 */
	if (current->brk_randomized)
		min_brk = mm->start_brk;
	else
		min_brk = mm->end_data;
#else
	min_brk = mm->start_brk;
#endif
	if (brk < min_brk)
		goto out;

	/*
	 * Check against rlimit here. If this check is done later after the test
	 * of oldbrk with newbrk then it can escape the test and let the data
	 * segment grow beyond its set limit the in case where the limit is
	 * not page aligned -Ram Gupta
	 */
	rlim = rlimit(RLIMIT_DATA);
	if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
			(mm->end_data - mm->start_data) > rlim)
		goto out;

	newbrk = PAGE_ALIGN(brk);
	oldbrk = PAGE_ALIGN(mm->brk);
	if (oldbrk == newbrk)
		goto set_brk;

	/* Always allow shrinking brk. */
	if (brk <= mm->brk) {
		if (!do_munmap(mm, newbrk, oldbrk-newbrk))
			goto set_brk;
		goto out;
	}

	/* Check against existing mmap mappings. */
	if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
		goto out;

	/* Ok, looks good - let it rip. */
	if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
		goto out;
set_brk:
	mm->brk = brk;
out:
	retval = mm->brk;
	up_write(&mm->mmap_sem);
	return retval;
}
Beispiel #18
0
static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
	struct exec ex;
	unsigned long error;
	unsigned long fd_offset;
	unsigned long rlim;
	unsigned long orig_thr_flags;
	int retval;

	ex = *((struct exec *) bprm->buf);		/* exec-header */
	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
	    bprm->file->f_path.dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
		return -ENOEXEC;
	}

	fd_offset = N_TXTOFF(ex);

	/* Check initial limits. This avoids letting people circumvent
	 * size limits imposed on them by creating programs with large
	 * arrays in the data or bss.
	 */
	rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim >= RLIM_INFINITY)
		rlim = ~0;
	if (ex.a_data + ex.a_bss > rlim)
		return -ENOMEM;

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		return retval;

	/* OK, This is the point of no return */
	set_personality(PER_SUNOS);

	current->mm->end_code = ex.a_text +
		(current->mm->start_code = N_TXTADDR(ex));
	current->mm->end_data = ex.a_data +
		(current->mm->start_data = N_DATADDR(ex));
	current->mm->brk = ex.a_bss +
		(current->mm->start_brk = N_BSSADDR(ex));
	current->mm->free_area_cache = current->mm->mmap_base;
	current->mm->cached_hole_size = 0;

	current->mm->mmap = NULL;
	compute_creds(bprm);
 	current->flags &= ~PF_FORKNOEXEC;
	if (N_MAGIC(ex) == NMAGIC) {
		loff_t pos = fd_offset;
		/* F**k me plenty... */
		down_write(&current->mm->mmap_sem);	
		error = do_brk(N_TXTADDR(ex), ex.a_text);
		up_write(&current->mm->mmap_sem);
		bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
			  ex.a_text, &pos);
		down_write(&current->mm->mmap_sem);
		error = do_brk(N_DATADDR(ex), ex.a_data);
		up_write(&current->mm->mmap_sem);
		bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex),
			  ex.a_data, &pos);
		goto beyond_if;
	}

	if (N_MAGIC(ex) == OMAGIC) {
		loff_t pos = fd_offset;
		down_write(&current->mm->mmap_sem);
		do_brk(N_TXTADDR(ex) & PAGE_MASK,
			ex.a_text+ex.a_data + PAGE_SIZE - 1);
		up_write(&current->mm->mmap_sem);
		bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex),
			  ex.a_text+ex.a_data, &pos);
	} else {
		static unsigned long error_time;
		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ)
		{
			printk(KERN_NOTICE "executable not page aligned\n");
			error_time = jiffies;
		}

		if (!bprm->file->f_op->mmap) {
			loff_t pos = fd_offset;
			down_write(&current->mm->mmap_sem);
			do_brk(0, ex.a_text+ex.a_data);
			up_write(&current->mm->mmap_sem);
			bprm->file->f_op->read(bprm->file,
				  (char __user *)N_TXTADDR(ex),
				  ex.a_text+ex.a_data, &pos);
			goto beyond_if;
		}

	        down_write(&current->mm->mmap_sem);
		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
			PROT_READ | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			fd_offset);
	        up_write(&current->mm->mmap_sem);

		if (error != N_TXTADDR(ex)) {
			send_sig(SIGKILL, current, 0);
			return error;
		}

	        down_write(&current->mm->mmap_sem);
 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
				PROT_READ | PROT_WRITE | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
				fd_offset + ex.a_text);
	        up_write(&current->mm->mmap_sem);
		if (error != N_DATADDR(ex)) {
			send_sig(SIGKILL, current, 0);
			return error;
		}
	}
beyond_if:
	set_binfmt(&aout32_format);

	set_brk(current->mm->start_brk, current->mm->brk);

	/* Make sure STACK_TOP returns the right thing.  */
	orig_thr_flags = current_thread_info()->flags;
	current_thread_info()->flags |= _TIF_32BIT;

	retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
	if (retval < 0) { 
		current_thread_info()->flags = orig_thr_flags;

		/* Someone check-me: is this error path enough? */ 
		send_sig(SIGKILL, current, 0); 
		return retval;
	}

	current->mm->start_stack =
		(unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
	tsb_context_switch(current->mm);

	start_thread32(regs, ex.a_entry, current->mm->start_stack);
	if (current->ptrace & PT_PTRACED)
		send_sig(SIGTRAP, current, 0);
	return 0;
}
Beispiel #19
0
void do_m68k_semihosting(CPUM68KState *env, int nr)
{
    uint32_t args;
    void *p;
    void *q;
    uint32_t len;
    uint32_t result;

    args = env->dregs[1];
    switch (nr) {
    case HOSTED_EXIT:
        gdb_exit(env, env->dregs[0]);
        exit(env->dregs[0]);
    case HOSTED_OPEN:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "open,%s,%x,%x", ARG(0), (int)ARG(1),
                           ARG(2), ARG(3));
            return;
        } else {
            if (!(p = lock_user_string(ARG(0)))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = open(p, translate_openflags(ARG(2)), ARG(3));
                unlock_user(p, ARG(0), 0);
            }
        }
        break;
    case HOSTED_CLOSE:
        {
            /* Ignore attempts to close stdin/out/err.  */
            int fd = ARG(0);
            if (fd > 2) {
                if (use_gdb_syscalls()) {
                    gdb_do_syscall(m68k_semi_cb, "close,%x", ARG(0));
                    return;
                } else {
                    result = close(fd);
                }
            } else {
                result = 0;
            }
            break;
        }
    case HOSTED_READ:
        len = ARG(2);
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "read,%x,%x,%x",
                           ARG(0), ARG(1), len);
            return;
        } else {
            if (!(p = lock_user(VERIFY_WRITE, ARG(1), len, 0))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = read(ARG(0), p, len);
                unlock_user(p, ARG(1), len);
            }
        }
        break;
    case HOSTED_WRITE:
        len = ARG(2);
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "write,%x,%x,%x",
                           ARG(0), ARG(1), len);
            return;
        } else {
            if (!(p = lock_user(VERIFY_READ, ARG(1), len, 1))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = write(ARG(0), p, len);
                unlock_user(p, ARG(0), 0);
            }
        }
        break;
    case HOSTED_LSEEK:
        {
            uint64_t off;
            off = (uint32_t)ARG(2) | ((uint64_t)ARG(1) << 32);
            if (use_gdb_syscalls()) {
                m68k_semi_is_fseek = 1;
                gdb_do_syscall(m68k_semi_cb, "fseek,%x,%lx,%x",
                               ARG(0), off, ARG(3));
            } else {
                off = lseek(ARG(0), off, ARG(3));
                /* FIXME - handle put_user() failure */
                put_user_u32(off >> 32, args);
                put_user_u32(off, args + 4);
                put_user_u32(errno, args + 8);
            }
            return;
        }
    case HOSTED_RENAME:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "rename,%s,%s",
                           ARG(0), (int)ARG(1), ARG(2), (int)ARG(3));
            return;
        } else {
            p = lock_user_string(ARG(0));
            q = lock_user_string(ARG(2));
            if (!p || !q) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = rename(p, q);
            }
            unlock_user(p, ARG(0), 0);
            unlock_user(q, ARG(2), 0);
        }
        break;
    case HOSTED_UNLINK:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "unlink,%s",
                           ARG(0), (int)ARG(1));
            return;
        } else {
            if (!(p = lock_user_string(ARG(0)))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = unlink(p);
                unlock_user(p, ARG(0), 0);
            }
        }
        break;
    case HOSTED_STAT:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "stat,%s,%x",
                           ARG(0), (int)ARG(1), ARG(2));
            return;
        } else {
            struct stat s;
            if (!(p = lock_user_string(ARG(0)))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = stat(p, &s);
                unlock_user(p, ARG(0), 0);
            }
            if (result == 0) {
                translate_stat(env, ARG(2), &s);
            }
        }
        break;
    case HOSTED_FSTAT:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "fstat,%x,%x",
                           ARG(0), ARG(1));
            return;
        } else {
            struct stat s;
            result = fstat(ARG(0), &s);
            if (result == 0) {
                translate_stat(env, ARG(1), &s);
            }
        }
        break;
    case HOSTED_GETTIMEOFDAY:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "gettimeofday,%x,%x",
                           ARG(0), ARG(1));
            return;
        } else {
            qemu_timeval tv;
            struct gdb_timeval *p;
            result = qemu_gettimeofday(&tv);
            if (result != 0) {
                if (!(p = lock_user(VERIFY_WRITE,
                                    ARG(0), sizeof(struct gdb_timeval), 0))) {
                    /* FIXME - check error code? */
                    result = -1;
                } else {
                    p->tv_sec = cpu_to_be32(tv.tv_sec);
                    p->tv_usec = cpu_to_be64(tv.tv_usec);
                    unlock_user(p, ARG(0), sizeof(struct gdb_timeval));
                }
            }
        }
        break;
    case HOSTED_ISATTY:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "isatty,%x", ARG(0));
            return;
        } else {
            result = isatty(ARG(0));
        }
        break;
    case HOSTED_SYSTEM:
        if (use_gdb_syscalls()) {
            gdb_do_syscall(m68k_semi_cb, "system,%s",
                           ARG(0), (int)ARG(1));
            return;
        } else {
            if (!(p = lock_user_string(ARG(0)))) {
                /* FIXME - check error code? */
                result = -1;
            } else {
                result = system(p);
                unlock_user(p, ARG(0), 0);
            }
        }
        break;
    case HOSTED_INIT_SIM:
#if defined(CONFIG_USER_ONLY)
        {
        TaskState *ts = env->opaque;
        /* Allocate the heap using sbrk.  */
        if (!ts->heap_limit) {
            abi_ulong ret;
            uint32_t size;
            uint32_t base;

            base = do_brk(0);
            size = SEMIHOSTING_HEAP_SIZE;
            /* Try a big heap, and reduce the size if that fails.  */
            for (;;) {
                ret = do_brk(base + size);
                if (ret >= (base + size)) {
                    break;
                }
                size >>= 1;
            }
            ts->heap_limit = base + size;
        }
        /* This call may happen before we have writable memory, so return
           values directly in registers.  */
        env->dregs[1] = ts->heap_limit;
        env->aregs[7] = ts->stack_base;
        }
#else
        /* FIXME: This is wrong for boards where RAM does not start at
           address zero.  */
        env->dregs[1] = ram_size;
        env->aregs[7] = ram_size;
#endif
        return;
    default:
        cpu_abort(env, "Unsupported semihosting syscall %d\n", nr);
        result = 0;
    }
Beispiel #20
0
size_t sys_brk(size_t pos)
{
  return do_brk(pos);
}
Beispiel #21
0
/*
 * Helper function to process the load operation.
 */
static int
coff_load_object(struct linux_binprm *bprm, struct pt_regs *regs, int binary)
{
	COFF_FILHDR		*coff_hdr = NULL;
	COFF_SCNHDR		*text_sect = NULL, *data_sect = NULL,
				*bss_sect = NULL, *sect_bufr = NULL,
				*sect_ptr = NULL;
	int			text_count = 0, data_count = 0,
				bss_count = 0, lib_count = 0;
	coff_section		text, data, bss;
	u_long			start_addr = 0, p = bprm->p;
	short			flags, aout_size = 0;
	int			pageable = 1, sections = 0, status = 0, i;
	int			coff_exec_fileno;
	mm_segment_t		old_fs;


	coff_hdr = (COFF_FILHDR *)bprm->buf;

	/*
	 * Validate the magic value for the object file.
	 */
	if (COFF_I386BADMAG(*coff_hdr))
		return -ENOEXEC;

	flags = COFF_SHORT(coff_hdr->f_flags);

	/*
	 * The object file should have 32 BIT little endian format. Do not allow
	 * it to have the 16 bit object file flag set as Linux is not able to run
	 * on the 80286/80186/8086.
	 */
	if ((flags & (COFF_F_AR32WR | COFF_F_AR16WR)) != COFF_F_AR32WR)
		return -ENOEXEC;

	/*
	 * If the file is not executable then reject the execution. This means
	 * that there must not be external references.
	 */
	if ((flags & COFF_F_EXEC) == 0)
		return -ENOEXEC;

	/*
	 * Extract the header information which we need.
	 */
	sections = COFF_SHORT(coff_hdr->f_nscns);	/* Number of sections */
	aout_size = COFF_SHORT(coff_hdr->f_opthdr);	/* Size of opt. headr */

	/*
	 * There must be at least one section.
	 */
	if (!sections)
		return -ENOEXEC;

	if (!bprm->file->f_op->mmap)
		pageable = 0;

	if (!(sect_bufr = kmalloc(sections * COFF_SCNHSZ, GFP_KERNEL))) {
		printk(KERN_WARNING "coff: kmalloc failed\n");
		return -ENOMEM;
	}

	status = kernel_read(bprm->file, aout_size + COFF_FILHSZ,
			(char *)sect_bufr, sections * COFF_SCNHSZ);
	if (status < 0) {
		printk(KERN_WARNING "coff: unable to read\n");
		goto out_free_buf;
	}

	status = get_unused_fd();
	if (status < 0) {
		printk(KERN_WARNING "coff: unable to get free fs\n");
		goto out_free_buf;
	}

	get_file(bprm->file);
	fd_install(coff_exec_fileno = status, bprm->file);

	/*
	 *  Loop through the sections and find the various types
	 */
	sect_ptr = sect_bufr;

	for (i = 0; i < sections; i++) {
		long int sect_flags = COFF_LONG(sect_ptr->s_flags);

		switch (sect_flags) {
		case COFF_STYP_TEXT:
			status |= coff_isaligned(sect_ptr);
			text_sect = sect_ptr;
			text_count++;
			break;

		case COFF_STYP_DATA:
			status |= coff_isaligned(sect_ptr);
			data_sect = sect_ptr;
			data_count++;
			break;

		case COFF_STYP_BSS:
			bss_sect = sect_ptr;
			bss_count++;
			break;

		case COFF_STYP_LIB:
			lib_count++;
			break;

		default:
			break;
		}

		sect_ptr = (COFF_SCNHDR *) & ((char *) sect_ptr)[COFF_SCNHSZ];
	}

	/*
	 * If any of the sections weren't properly aligned we aren't
	 * going to be able to demand page this executable. Note that
	 * at this stage the *only* excuse for having status <= 0 is if
	 * the alignment test failed.
	 */
	if (status < 0)
		pageable = 0;

	/*
	 * Ensure that there are the required sections.  There must be one
	 * text sections and one each of the data and bss sections for an
	 * executable.  A library may or may not have a data / bss section.
	 */
	if (text_count != 1) {
		status = -ENOEXEC;
		goto out_free_file;
	}
	if (binary && (data_count != 1 || bss_count != 1)) {
		status = -ENOEXEC;
		goto out_free_file;
	}

	/*
	 * If there is no additional header then assume the file starts
	 * at the first byte of the text section.  This may not be the
	 * proper place, so the best solution is to include the optional
	 * header.  A shared library __MUST__ have an optional header to
	 * indicate that it is a shared library.
	 */
	if (aout_size == 0) {
		if (!binary) {
			status = -ENOEXEC;
			goto out_free_file;
		}
		start_addr = COFF_LONG(text_sect->s_vaddr);
	} else if (aout_size < (short) COFF_AOUTSZ) {
		status = -ENOEXEC;
		goto out_free_file;
	} else {
		COFF_AOUTHDR	*aout_hdr;
		short		aout_magic;

		aout_hdr = (COFF_AOUTHDR *) &((char *)coff_hdr)[COFF_FILHSZ];
		aout_magic = COFF_SHORT(aout_hdr->magic);

		/*
		 * Validate the magic number in the a.out header. If it is valid then
		 * update the starting symbol location. Do not accept these file formats
		 * when loading a shared library.
		 */
		switch (aout_magic) {
		case COFF_OMAGIC:
		case COFF_ZMAGIC:
		case COFF_STMAGIC:
			if (!binary) {
				status = -ENOEXEC;
				goto out_free_file;
			}
			start_addr = (u_int)COFF_LONG(aout_hdr->entry);
			break;
		/*
		 * Magic value for a shared library. This is valid only when
		 * loading a shared library.
		 *
		 * (There is no need for a start_addr. It won't be used.)
		 */
		case COFF_SHMAGIC:
			if (!binary)
				break;
			/* FALLTHROUGH */
		default:
			status = -ENOEXEC;
			goto out_free_file;
		}
	}

	/*
	 *  Generate the proper values for the text fields
	 *
	 *  THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD
	 *  SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD.
	 */

	text.scnptr = COFF_LONG(text_sect->s_scnptr);
	text.size = COFF_LONG(text_sect->s_size);
	text.vaddr = COFF_LONG(text_sect->s_vaddr);

	/*
	 *  Generate the proper values for the data fields
	 */

	if (data_sect != NULL) {
		data.scnptr = COFF_LONG(data_sect->s_scnptr);
		data.size = COFF_LONG(data_sect->s_size);
		data.vaddr = COFF_LONG(data_sect->s_vaddr);
	} else {
		data.scnptr = 0;
		data.size = 0;
		data.vaddr = 0;
	}

	/*
	 *  Generate the proper values for the bss fields
	 */

	if (bss_sect != NULL) {
		bss.size = COFF_LONG(bss_sect->s_size);
		bss.vaddr = COFF_LONG(bss_sect->s_vaddr);
	} else {
		bss.size = 0;
		bss.vaddr = 0;
	}

	/*
	 * Flush the executable from memory. At this point the executable is
	 * committed to being defined or a segmentation violation will occur.
	 */

	if (binary) {
		COFF_SCNHDR	*sect_ptr2 = sect_bufr;
		u_long		personality = PER_SVR3;
		int		i;

		if ((status = flush_old_exec(bprm)))
			goto out_free_file;

		/*
		 * Look for clues as to the system this binary was compiled
		 * on in the comments section(s).
		 *
		 * Only look at the main binary, not the shared libraries
		 * (or would it be better to prefer shared libraries over
		 * binaries?  Or could they be different???)
	 	 */
		for (i = 0; i < sections; i++) {
			long	sect_flags = COFF_LONG(sect_ptr2->s_flags);

			if (sect_flags == COFF_STYP_INFO &&
			   (status = coff_parse_comments(bprm->file,
						sect_ptr2, &personality)) > 0)
				goto found;

			sect_ptr2 = (COFF_SCNHDR *) &((char *)sect_ptr2)[COFF_SCNHSZ];
		}

		/*
		 * If no .comments section was found there is no way to
		 * figure out the personality. Odds on it is SCO though...
		 */
		personality = abi_defhandler_coff;

found:
		set_personality(personality);

		current->mm->start_data = 0;
		current->mm->end_data = 0;
		current->mm->end_code = 0;
		current->mm->mmap = NULL;
		current->flags &= ~PF_FORKNOEXEC;
		current->mm->_rss = 0;

		/*
		 * Construct the parameter and environment
		 * string table entries.
		 */
		if ((status = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT)) < 0)
			goto sigsegv;

		p = (u_long)coff_mktables((char *)bprm->p,
				bprm->argc, bprm->envc);

		current->mm->end_code = text.size +
		    (current->mm->start_code = text.vaddr);
		current->mm->end_data = data.size +
		    (current->mm->start_data = data.vaddr);
		current->mm->brk = bss.size +
		    (current->mm->start_brk = bss.vaddr);

		current->mm->start_stack = p;
		compute_creds(bprm);
		start_thread(regs, start_addr, p);
	}

	old_fs = get_fs();
	set_fs(get_ds());

	if (!pageable) {
		/*
		 * Read the file from disk...
		 *
		 * XXX: untested.
		 */
		loff_t pos = data.scnptr;
		status = do_brk(text.vaddr, text.size);
		bprm->file->f_op->read(bprm->file,
				(char *)data.vaddr, data.scnptr, &pos);
		status = do_brk(data.vaddr, data.size);
		bprm->file->f_op->read(bprm->file,
				(char *)text.vaddr, text.scnptr, &pos);
		status = 0;
	} else {
		/* map the text pages...*/
		status = map_coff(bprm->file, &text, PROT_READ | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			text.scnptr & PAGE_MASK);

		if (status != (text.vaddr & PAGE_MASK)) {
			status = -ENOEXEC;
			goto out_free_file;
		}

		/* map the data pages */
		if (data.size != 0) {
			status = map_coff(bprm->file, &data,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			    data.scnptr & PAGE_MASK);

			if (status != (data.vaddr & PAGE_MASK)) {
				status = -ENOEXEC;
				goto out_free_file;
			}
		}

		status = 0;
	}

	/*
	 * Construct the bss data for the process. The bss ranges from the
	 * end of the data (which may not be on a page boundary) to the end
	 * of the bss section. Allocate any necessary pages for the data.
	 */
	if (bss.size != 0) {
		down_write(&current->mm->mmap_sem);
		do_mmap(NULL, PAGE_ALIGN(bss.vaddr),
			bss.size + bss.vaddr -
			PAGE_ALIGN(bss.vaddr),
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE, 0);
		up_write(&current->mm->mmap_sem);

		if ((status = coff_clear_memory(bss.vaddr, bss.size)) < 0)
			goto out_free_file;
	}

	set_fs(old_fs);

	if (!binary)
		goto out_free_file;

	/*
	 * Load any shared library for the executable.
	 */
	if (lib_count)
		status = coff_preload_shlibs(bprm, sect_bufr, sections);

	set_binfmt(&coff_format);

	/*
	 * Generate any needed trap for this process. If an error occured then
	 * generate a segmentation violation. If the process is being debugged
	 * then generate the load trap. (Note: If this is a library load then
	 * do not generate the trap here. Pass the error to the caller who
	 * will do it for the process in the outer lay of this procedure call.)
	 */
	if (status < 0) {
sigsegv:
		printk(KERN_WARNING "coff: trapping process with SEGV\n");
		send_sig(SIGSEGV, current, 0);	/* Generate the error trap  */
	} else if (current->ptrace & PT_PTRACED)
		send_sig(SIGTRAP, current, 0);

	/* We are committed. It can't fail */
	status = 0;

out_free_file:
	sys_close(coff_exec_fileno);

out_free_buf:
	kfree(sect_bufr);
	return (status);
}
Beispiel #22
0
static uint32_t sys_brk(uint32_t arg[])
{
	uintptr_t *brk_store = (uintptr_t *) arg[0];
	return do_brk(brk_store);
}
Beispiel #23
0
static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
	struct exec ex;
	unsigned long error;
	unsigned long fd_offset;
	unsigned long rlim;
	int retval;

	ex = *((struct exec *) bprm->buf);		/* exec-header */
	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC &&
	     N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) ||
	    N_TRSIZE(ex) || N_DRSIZE(ex) ||
	    bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
		return -ENOEXEC;
	}

	fd_offset = N_TXTOFF(ex);

	/* Check initial limits. This avoids letting people circumvent
	 * size limits imposed on them by creating programs with large
	 * arrays in the data or bss.
	 */
	rlim = current->rlim[RLIMIT_DATA].rlim_cur;
	if (rlim >= RLIM_INFINITY)
		rlim = ~0;
	if (ex.a_data + ex.a_bss > rlim)
		return -ENOMEM;

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		return retval;

	/* OK, This is the point of no return */
#if defined(__alpha__)
	SET_AOUT_PERSONALITY(bprm, ex);
#elif defined(__sparc__)
	set_personality(PER_SUNOS);
#if !defined(__sparc_v9__)
	memcpy(&current->thread.core_exec, &ex, sizeof(struct exec));
#endif
#else
	set_personality(PER_LINUX);
#endif

	current->mm->end_code = ex.a_text +
		(current->mm->start_code = N_TXTADDR(ex));
	current->mm->end_data = ex.a_data +
		(current->mm->start_data = N_DATADDR(ex));
	current->mm->brk = ex.a_bss +
		(current->mm->start_brk = N_BSSADDR(ex));

	current->mm->rss = 0;
	current->mm->mmap = NULL;

#ifdef CONFIG_ARM_FASS
	arch_new_mm(current, current->mm);
#endif

	compute_creds(bprm);
 	current->flags &= ~PF_FORKNOEXEC;
#ifdef __sparc__
	if (N_MAGIC(ex) == NMAGIC) {
		loff_t pos = fd_offset;
		/* F**k me plenty... */
		/* <AOL></AOL> */
		error = do_brk(N_TXTADDR(ex), ex.a_text);
		bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex),
			  ex.a_text, &pos);
		error = do_brk(N_DATADDR(ex), ex.a_data);
		bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex),
			  ex.a_data, &pos);
		goto beyond_if;
	}
#endif

	if (N_MAGIC(ex) == OMAGIC) {
		unsigned long text_addr, map_size;
		loff_t pos;

		text_addr = N_TXTADDR(ex);

#if defined(__alpha__) || defined(__sparc__)
		pos = fd_offset;
		map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1;
#else
		pos = 32;
		map_size = ex.a_text+ex.a_data;
#endif

		error = do_brk(text_addr & PAGE_MASK, map_size);
		if (error != (text_addr & PAGE_MASK)) {
			send_sig(SIGKILL, current, 0);
			return error;
		}

		error = bprm->file->f_op->read(bprm->file, (char *)text_addr,
			  ex.a_text+ex.a_data, &pos);
		if ((signed long)error < 0) {
			send_sig(SIGKILL, current, 0);
			return error;
		}
			 
		flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
	} else {
		static unsigned long error_time, error_time2;
		if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
		    (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
		{
			printk(KERN_NOTICE "executable not page aligned\n");
			error_time2 = jiffies;
		}

		if ((fd_offset & ~PAGE_MASK) != 0 &&
		    (jiffies-error_time) > 5*HZ)
		{
			printk(KERN_WARNING 
			       "fd_offset is not page aligned. Please convert program: %s\n",
			       bprm->file->f_dentry->d_name.name);
			error_time = jiffies;
		}

		if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
			loff_t pos = fd_offset;
			do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data);
			bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex),
					ex.a_text+ex.a_data, &pos);
			flush_icache_range((unsigned long) N_TXTADDR(ex),
					   (unsigned long) N_TXTADDR(ex) +
					   ex.a_text+ex.a_data);
			goto beyond_if;
		}

		down_write(&current->mm->mmap_sem);
		error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text,
			PROT_READ | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			fd_offset);
		up_write(&current->mm->mmap_sem);

		if (error != N_TXTADDR(ex)) {
			send_sig(SIGKILL, current, 0);
			return error;
		}

		down_write(&current->mm->mmap_sem);
 		error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
				PROT_READ | PROT_WRITE | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
				fd_offset + ex.a_text);
		up_write(&current->mm->mmap_sem);
		if (error != N_DATADDR(ex)) {
			send_sig(SIGKILL, current, 0);
			return error;
		}
	}
beyond_if:
	set_binfmt(&aout_format);

	set_brk(current->mm->start_brk, current->mm->brk);

	retval = setup_arg_pages(bprm); 
	if (retval < 0) { 
		/* Someone check-me: is this error path enough? */ 
		send_sig(SIGKILL, current, 0); 
		return retval;
	}

	current->mm->start_stack =
		(unsigned long) create_aout_tables((char *) bprm->p, bprm);
#ifdef __alpha__
	regs->gp = ex.a_gpvalue;
#endif
	start_thread(regs, ex.a_entry, current->mm->start_stack);
	if (current->ptrace & PT_PTRACED)
		send_sig(SIGTRAP, current, 0);
#ifndef __arm__
	return 0;
#else
	return regs->ARM_r0;
#endif
}
Beispiel #24
0
/* N.B. Move to .h file and use code in fs/binfmt_aout.c? */
static int load_aout32_library(struct file *file)
{
	struct inode * inode;
	unsigned long bss, start_addr, len;
	unsigned long error;
	int retval;
	struct exec ex;

	inode = file->f_path.dentry->d_inode;

	retval = -ENOEXEC;
	error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
	if (error != sizeof(ex))
		goto out;

	/* We come in here for the regular a.out style of shared libraries */
	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
	    inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
		goto out;
	}

	if (N_MAGIC(ex) == ZMAGIC && N_TXTOFF(ex) &&
	    (N_TXTOFF(ex) < inode->i_sb->s_blocksize)) {
		printk("N_TXTOFF < BLOCK_SIZE. Please convert library\n");
		goto out;
	}

	if (N_FLAGS(ex))
		goto out;

	/* For  QMAGIC, the starting address is 0x20 into the page.  We mask
	   this off to get the starting address for the page */

	start_addr =  ex.a_entry & 0xfffff000;

	/* Now use mmap to map the library into memory. */
	down_write(&current->mm->mmap_sem);
	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
			N_TXTOFF(ex));
	up_write(&current->mm->mmap_sem);
	retval = error;
	if (error != start_addr)
		goto out;

	len = PAGE_ALIGN(ex.a_text + ex.a_data);
	bss = ex.a_text + ex.a_data + ex.a_bss;
	if (bss > len) {
		down_write(&current->mm->mmap_sem);
		error = do_brk(start_addr + len, bss - len);
		up_write(&current->mm->mmap_sem);
		retval = error;
		if (error != start_addr + len)
			goto out;
	}
	retval = 0;
out:
	return retval;
}
Beispiel #25
0
static int load_aout_library(struct file *file)
{
	struct inode * inode;
	unsigned long bss, start_addr, len;
	unsigned long error;
	int retval;
	struct exec ex;

	inode = file->f_dentry->d_inode;

	retval = -ENOEXEC;
	error = kernel_read(file, 0, (char *) &ex, sizeof(ex));
	if (error != sizeof(ex))
		goto out;

	/* We come in here for the regular a.out style of shared libraries */
	if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) ||
	    N_DRSIZE(ex) || ((ex.a_entry & 0xfff) && N_MAGIC(ex) == ZMAGIC) ||
	    inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) {
		goto out;
	}

	if (N_FLAGS(ex))
		goto out;

	/* For  QMAGIC, the starting address is 0x20 into the page.  We mask
	   this off to get the starting address for the page */
#ifndef __arm__
	start_addr =  ex.a_entry & 0xfffff000;
#else
	start_addr = ex.a_entry & 0xffff8000;
#endif

	if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
		static unsigned long error_time;
		loff_t pos = N_TXTOFF(ex);

		if ((jiffies-error_time) > 5*HZ)
		{
			printk(KERN_WARNING 
			       "N_TXTOFF is not page aligned. Please convert library: %s\n",
			       file->f_dentry->d_name.name);
			error_time = jiffies;
		}

		do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
		
		file->f_op->read(file, (char *)start_addr,
			ex.a_text + ex.a_data, &pos);
		flush_icache_range((unsigned long) start_addr,
				   (unsigned long) start_addr + ex.a_text + ex.a_data);

		retval = 0;
		goto out;
	}
	/* Now use mmap to map the library into memory. */
	down_write(&current->mm->mmap_sem);
	error = do_mmap(file, start_addr, ex.a_text + ex.a_data,
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
			N_TXTOFF(ex));
	up_write(&current->mm->mmap_sem);
	retval = error;
	if (error != start_addr)
		goto out;

	len = PAGE_ALIGN(ex.a_text + ex.a_data);
	bss = ex.a_text + ex.a_data + ex.a_bss;
	if (bss > len) {
		error = do_brk(start_addr + len, bss - len);
		retval = error;
		if (error != start_addr + len)
			goto out;
	}
	retval = 0;
out:
	return retval;
}
Beispiel #26
0
static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
				     struct file * interpreter,
				     unsigned long *interp_load_addr)
{
	struct elf_phdr *elf_phdata;
	struct elf_phdr *eppnt;
	unsigned long load_addr = 0;
	int load_addr_set = 0;
	unsigned long last_bss = 0, elf_bss = 0;
	unsigned long error = ~0UL;
	int retval, i, size;

	/* First of all, some simple consistency checks */
	if (interp_elf_ex->e_type != ET_EXEC &&
	    interp_elf_ex->e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(interp_elf_ex))
		goto out;
	if (!interpreter->f_op || !interpreter->f_op->mmap)
		goto out;

	/*
	 * If the size of this structure has changed, then punt, since
	 * we will be doing the wrong thing.
	 */
	if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
		goto out;

	if (interp_elf_ex->e_phnum < 1 ||
	    interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
		goto out;

	/* Now read in all of the header information */

	size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
	if (size > ELF_MIN_ALIGN)
		goto out;
	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(interpreter,interp_elf_ex->e_phoff,(char *)elf_phdata,size);
	error = -EIO;
	if (retval != size) {
		if (retval < 0)
			error = retval;	
		goto out_close;
	}

	eppnt = elf_phdata;
	for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
	  if (eppnt->p_type == PT_LOAD) {
	    int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
	    int elf_prot = 0;
	    unsigned long vaddr = 0;
	    unsigned long k, map_addr;

	    if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
	    if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
	    if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
	    vaddr = eppnt->p_vaddr;
	    if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
	    	elf_type |= MAP_FIXED;

	    map_addr = elf_map(interpreter, load_addr + vaddr, eppnt, elf_prot, elf_type);
	    if (BAD_ADDR(map_addr))
	    	goto out_close;

	    if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
		load_addr = map_addr - ELF_PAGESTART(vaddr);
		load_addr_set = 1;
	    }

	    /*
	     * Check to see if the section's size will overflow the
	     * allowed task size. Note that p_filesz must always be
	     * <= p_memsize so it is only necessary to check p_memsz.
	     */
	    k = load_addr + eppnt->p_vaddr;
	    if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
		eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
	        error = -ENOMEM;
		goto out_close;
	    }

	    /*
	     * Find the end of the file mapping for this phdr, and keep
	     * track of the largest address we see for this.
	     */
	    k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
	    if (k > elf_bss)
		elf_bss = k;

	    /*
	     * Do the same thing for the memory mapping - between
	     * elf_bss and last_bss is the bss section.
	     */
	    k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
	    if (k > last_bss)
		last_bss = k;
	  }
	}

	/* Now use mmap to map the library into memory. */

	/*
	 * Now fill out the bss section.  First pad the last page up
	 * to the page boundary, and then perform a mmap to make sure
	 * that there are zero-mapped pages up to and including the 
	 * last bss page.
	 */
	padzero(elf_bss);
	elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);	/* What we have mapped so far */

	/* Map the last of the bss segment */
	if (last_bss > elf_bss) {
		down_write(&current->mm->mmap_sem);
		error = do_brk(elf_bss, last_bss - elf_bss);
		up_write(&current->mm->mmap_sem);
		if (BAD_ADDR(error))
			goto out_close;
	}

	*interp_load_addr = load_addr;
	/*
	 * XXX: is everything deallocated properly if this happens
	 * to be ~0UL (that is, we succeeded, but the header is broken
	 * and thus the caller will think that we failed)? We'd better
	 * switch to out-of-band error reporting.
	 */
	error = ((unsigned long) interp_elf_ex->e_entry) + load_addr;

out_close:
	kfree(elf_phdata);
out:
	return error;
}
Beispiel #27
0
/* This is much more generalized than the library routine read function,
 * so we keep this separate.  Technically the library read function
 * is only provided so that we can read a.out libraries that have
 * an ELF header.
 */
static unsigned int load_irix_interp(struct elfhdr * interp_elf_ex,
				     struct file * interpreter,
				     unsigned int *interp_load_addr)
{
	struct elf_phdr *elf_phdata  =  NULL;
	struct elf_phdr *eppnt;
	unsigned int len;
	unsigned int load_addr;
	int elf_bss;
	int retval;
	unsigned int last_bss;
	int error;
	int i;
	unsigned int k;

	elf_bss = 0;
	last_bss = 0;
	error = load_addr = 0;

	print_elfhdr(interp_elf_ex);

	/* First of all, some simple consistency checks */
	if ((interp_elf_ex->e_type != ET_EXEC &&
	     interp_elf_ex->e_type != ET_DYN) ||
	     !interpreter->f_op->mmap) {
		printk("IRIX interp has bad e_type %d\n", interp_elf_ex->e_type);
		return 0xffffffff;
	}

	/* Now read in all of the header information */
	if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > PAGE_SIZE) {
	    printk("IRIX interp header bigger than a page (%d)\n",
		   (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum));
	    return 0xffffffff;
	}

	elf_phdata = kmalloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum,
			     GFP_KERNEL);

	if (!elf_phdata) {
		printk("Cannot kmalloc phdata for IRIX interp.\n");
		return 0xffffffff;
	}

	/* If the size of this structure has changed, then punt, since
	 * we will be doing the wrong thing.
	 */
	if (interp_elf_ex->e_phentsize != 32) {
		printk("IRIX interp e_phentsize == %d != 32 ",
		       interp_elf_ex->e_phentsize);
		kfree(elf_phdata);
		return 0xffffffff;
	}

	retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
			   (char *) elf_phdata,
			   sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);

	dump_phdrs(elf_phdata, interp_elf_ex->e_phnum);

	eppnt = elf_phdata;
	for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
		if (eppnt->p_type == PT_LOAD) {
			int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
			int elf_prot = 0;
			unsigned long vaddr = 0;
			if (eppnt->p_flags & PF_R)
				elf_prot =  PROT_READ;
			if (eppnt->p_flags & PF_W)
				elf_prot |= PROT_WRITE;
			if (eppnt->p_flags & PF_X)
				elf_prot |= PROT_EXEC;
			elf_type |= MAP_FIXED;
			vaddr = eppnt->p_vaddr;

			pr_debug("INTERP do_mmap"
			         "(%p, %08lx, %08lx, %08lx, %08lx, %08lx) ",
			         interpreter, vaddr,
			         (unsigned long)
			         (eppnt->p_filesz + (eppnt->p_vaddr & 0xfff)),
			         (unsigned long)
			         elf_prot, (unsigned long) elf_type,
			         (unsigned long)
			         (eppnt->p_offset & 0xfffff000));

			down_write(&current->mm->mmap_sem);
			error = do_mmap(interpreter, vaddr,
			eppnt->p_filesz + (eppnt->p_vaddr & 0xfff),
			elf_prot, elf_type,
			eppnt->p_offset & 0xfffff000);
			up_write(&current->mm->mmap_sem);

			if (error < 0 && error > -1024) {
				printk("Aieee IRIX interp mmap error=%d\n",
				       error);
				break;  /* Real error */
			}
			pr_debug("error=%08lx ", (unsigned long) error);
			if (!load_addr && interp_elf_ex->e_type == ET_DYN) {
				load_addr = error;
				pr_debug("load_addr = error ");
			}

			/*
			 * Find the end of the file  mapping for this phdr, and
			 * keep track of the largest address we see for this.
			 */
			k = eppnt->p_vaddr + eppnt->p_filesz;
			if (k > elf_bss)
				elf_bss = k;

			/* Do the same thing for the memory mapping - between
			 * elf_bss and last_bss is the bss section.
			 */
			k = eppnt->p_memsz + eppnt->p_vaddr;
			if (k > last_bss)
				last_bss = k;
			pr_debug("\n");
		}
	}

	/* Now use mmap to map the library into memory. */
	if (error < 0 && error > -1024) {
		pr_debug("got error %d\n", error);
		kfree(elf_phdata);
		return 0xffffffff;
	}

	/* Now fill out the bss section.  First pad the last page up
	 * to the page boundary, and then perform a mmap to make sure
	 * that there are zero-mapped pages up to and including the
	 * last bss page.
	 */
	pr_debug("padzero(%08lx) ", (unsigned long) (elf_bss));
	padzero(elf_bss);
	len = (elf_bss + 0xfff) & 0xfffff000; /* What we have mapped so far */

	pr_debug("last_bss[%08lx] len[%08lx]\n", (unsigned long) last_bss,
	         (unsigned long) len);

	/* Map the last of the bss segment */
	if (last_bss > len) {
		down_write(&current->mm->mmap_sem);
		do_brk(len, (last_bss - len));
		up_write(&current->mm->mmap_sem);
	}
	kfree(elf_phdata);

	*interp_load_addr = load_addr;
	return ((unsigned int) interp_elf_ex->e_entry);
}
Beispiel #28
0
static int load_elf_library(struct file *file)
{
	struct elf_phdr *elf_phdata;
	struct elf_phdr *eppnt;
	unsigned long elf_bss, bss, len;
	int retval, error, i, j;
	struct elfhdr elf_ex;

	error = -ENOEXEC;
	retval = kernel_read(file, 0, (char *) &elf_ex, sizeof(elf_ex));
	if (retval != sizeof(elf_ex))
		goto out;

	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
		goto out;

	/* First of all, some simple consistency checks */
	if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
	   !elf_check_arch(&elf_ex) || !file->f_op || !file->f_op->mmap)
		goto out;

	/* Now read in all of the header information */

	j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
	/* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */

	error = -ENOMEM;
	elf_phdata = kmalloc(j, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	eppnt = elf_phdata;
	error = -ENOEXEC;
	retval = kernel_read(file, elf_ex.e_phoff, (char *)eppnt, j);
	if (retval != j)
		goto out_free_ph;

	for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
		if ((eppnt + i)->p_type == PT_LOAD) j++;
	if (j != 1)
		goto out_free_ph;

	while (eppnt->p_type != PT_LOAD) 
		eppnt++;

	/* Now use mmap to map the library into memory. */
	down_write(&current->mm->mmap_sem);
	error = do_mmap(file,
			ELF_PAGESTART(eppnt->p_vaddr),
			(eppnt->p_filesz +
			 ELF_PAGEOFFSET(eppnt->p_vaddr)),
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
			(eppnt->p_offset -
			 ELF_PAGEOFFSET(eppnt->p_vaddr)));
	up_write(&current->mm->mmap_sem);
	if (error != ELF_PAGESTART(eppnt->p_vaddr))
		goto out_free_ph;

	elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
	padzero(elf_bss);

	len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1);
	bss = eppnt->p_memsz + eppnt->p_vaddr;
	if (bss > len) {
		down_write(&current->mm->mmap_sem);
		do_brk(len, bss - len);
		up_write(&current->mm->mmap_sem);
	}
	error = 0;

out_free_ph:
	kfree(elf_phdata);
out:
	return error;
}
Beispiel #29
0
static uint32_t
sys_brk(uint32_t arg[]) {//kprintf("[sys_brk! nr_free_pages()=%d]\n", nr_free_pages());
    //if (nr_free_pages() < 2) return 1;
    uintptr_t *brk_store = (uintptr_t *)arg[0];
    return do_brk(brk_store);
}