Esempio n. 1
0
static int
load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
    int retval;
    unsigned int size;
    unsigned long som_entry;
    struct som_hdr *som_ex;
    struct som_exec_auxhdr *hpuxhdr;

    /* Get the exec-header */
    som_ex = (struct som_hdr *) bprm->buf;

    retval = check_som_header(som_ex);
    if (retval != 0)
        goto out;

    /* Now read in the auxiliary header information */

    retval = -ENOMEM;
    size = som_ex->aux_header_size;
    if (size > SOM_PAGESIZE)
        goto out;
    hpuxhdr = kmalloc(size, GFP_KERNEL);
    if (!hpuxhdr)
        goto out;

    retval = kernel_read(bprm->file, som_ex->aux_header_location,
                         (char *) hpuxhdr, size);
    if (retval != size) {
        if (retval >= 0)
            retval = -EIO;
        goto out_free;
    }

    /* Flush all traces of the currently running executable */
    retval = flush_old_exec(bprm);
    if (retval)
        goto out_free;

    /* OK, This is the point of no return */
    current->flags &= ~PF_FORKNOEXEC;
    current->personality = PER_HPUX;
    setup_new_exec(bprm);

    /* Set the task size for HP-UX processes such that
     * the gateway page is outside the address space.
     * This can be fixed later, but for now, this is much
     * easier.
     */

    current->thread.task_size = 0xc0000000;

    /* Set map base to allow enough room for hp-ux heap growth */

    current->thread.map_base = 0x80000000;

    retval = map_som_binary(bprm->file, hpuxhdr);
    if (retval < 0)
        goto out_free;

    som_entry = hpuxhdr->exec_entry;
    kfree(hpuxhdr);

    set_binfmt(&som_format);
    install_exec_creds(bprm);
    setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);

    create_som_tables(bprm);

    current->mm->start_stack = bprm->p;

#if 0
    printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk);
    printk("(end_code) %08lx\n" , (unsigned long) current->mm->end_code);
    printk("(start_code) %08lx\n" , (unsigned long) current->mm->start_code);
    printk("(end_data) %08lx\n" , (unsigned long) current->mm->end_data);
    printk("(start_stack) %08lx\n" , (unsigned long) current->mm->start_stack);
    printk("(brk) %08lx\n" , (unsigned long) current->mm->brk);
#endif

    map_hpux_gateway_page(current,current->mm);

    start_thread_som(regs, som_entry, bprm->p);
    return 0;

    /* error cleanup */
out_free:
    kfree(hpuxhdr);
out:
    return retval;
}
Esempio n. 2
0
/*
 * Helper function to process the load operation.
 */
static int
xout_load_object(struct linux_binprm * bpp, struct pt_regs *rp, int executable)
{
	struct xexec			*xexec = (struct xexec *)bpp->buf;
	struct xext			*xext = (struct xext *)(xexec + 1);
	struct xseg			*seglist;
	struct file			*fp = NULL;
	u_long				addr, lPers;
	int				nsegs, ntext, ndata;
	int				pageable = 1, err = 0, i;
#ifdef CONFIG_BINFMT_XOUT_X286
        struct file			*file;
#endif

	lPers = abi_personality((char *)_BX(rp));
	if (lPers == 0) lPers = PER_XENIX;

	if (xexec->x_magic != X_MAGIC) {
		return -ENOEXEC;
	}

	switch (xexec->x_cpu & XC_CPU) {
		case XC_386:
			break;
#if defined(CONFIG_BINFMT_XOUT_X286)
		case XC_8086:
		case XC_286:
		case XC_286V:
		case XC_186:
		if (!Emulx286) return -ENOEXEC;
		file = open_exec(Emulx286);
		if (file) {
			fput(bpp->file);
			bpp->file = file;
			kernel_read(bpp->file, 0L, bpp->buf, sizeof(bpp->buf));
		}
		return -ENOEXEC;
#endif
		default:
		dprintk(KERN_DEBUG "xout: unsupported CPU type (%02x)\n",
					xexec->x_cpu);
			return -ENOEXEC;
	}

	/*
	 * We can't handle byte or word swapped headers. Well, we
	 * *could* but they should never happen surely?
	 */
	if ((xexec->x_cpu & (XC_BSWAP | XC_WSWAP)) != XC_WSWAP) {
		dprintk(KERN_DEBUG "xout: wrong byte or word sex (%02x)\n",
				xexec->x_cpu);
		return -ENOEXEC;
	}

	/* Check it's an executable. */
	if (!(xexec->x_renv & XE_EXEC)) {
		dprintk(KERN_DEBUG "xout: not executable\n");
		return -ENOEXEC;
	}

	/*
	 * There should be an extended header and there should be
	 * some segments. At this stage we don't handle non-segmented
	 * binaries. I'm not sure you can get them under Xenix anyway.
	 */
	if (xexec->x_ext != sizeof(struct xext)) {
		dprintk(KERN_DEBUG "xout: bad extended header\n");
		return -ENOEXEC;
	}

	if (!(xexec->x_renv & XE_SEG) || !xext->xe_segsize) {
		dprintk(KERN_DEBUG "xout: not segmented\n");
		return -ENOEXEC;
	}

	if (!(seglist = kmalloc(xext->xe_segsize, GFP_KERNEL))) {
		printk(KERN_WARNING "xout: allocating segment list failed\n");
		return -ENOMEM;
	}

	err = kernel_read(bpp->file, xext->xe_segpos,
			(char *)seglist, xext->xe_segsize);
	if (err < 0) {
		dprintk(KERN_DEBUG "xout: problem reading segment table\n");
		goto out;
	}

	if (!bpp->file->f_op->mmap)
		pageable = 0;

	nsegs = xext->xe_segsize / sizeof(struct xseg);

	ntext = ndata = 0;
	for (i = 0; i < nsegs; i++) {
		switch (seglist[i].xs_type) {
			case XS_TTEXT:
				if (isnotaligned(seglist+i))
					pageable = 0;
				ntext++;
				break;
			case XS_TDATA:
				if (isnotaligned(seglist+i))
					pageable = 0;
				ndata++;
				break;
		}
	}

	if (!ndata)
		goto out;

	/*
	 * Generate the proper values for the text fields
	 *
	 * THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD
	 * SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD.
	 */

	/*
	 *  Flush the executable from memory. At this point the executable is
	 *  committed to being defined or a segmentation violation will occur.
	 */
	if (executable) {
		dprintk(KERN_DEBUG "xout: flushing executable\n");

		flush_old_exec(bpp);

		if ( (lPers & 0xFF) == (current->personality & 0xFF) ) set_personality(0);
		set_personality(lPers);
#if defined(CONFIG_ABI_TRACE)
	abi_trace(ABI_TRACE_UNIMPL, "Personality %08X assigned\n",
				(unsigned int)current->personality);
#endif
#ifdef CONFIG_64BIT
		set_thread_flag(TIF_IA32);
		clear_thread_flag(TIF_ABI_PENDING);
#endif
		current->mm->mmap        = NULL;

#ifdef set_mm_counter
#if _KSL > 14
		set_mm_counter(current->mm, file_rss, 0);
#else
		set_mm_counter(current->mm, rss, 0);
#endif
#else
		current->mm->rss = 0;
#endif
 
#if _KSL > 10
		if ((err = setup_arg_pages(bpp, STACK_TOP, EXSTACK_DEFAULT)) < 0) 
#else
		if ((err = setup_arg_pages(bpp, EXSTACK_DEFAULT)) < 0) 
#endif
		{
			send_sig(SIGSEGV, current, 1);
			return (err);
		}

		bpp->p = (u_long)xout_create_tables((char *)bpp->p, bpp,
				(xexec->x_cpu & XC_CPU) == XC_386 ? 1 : 0);

		current->mm->start_code  = 0;
		current->mm->end_code    = xexec->x_text;
		current->mm->end_data    = xexec->x_text + xexec->x_data;
		current->mm->start_brk   =
		current->mm->brk         = xexec->x_text + xexec->x_data + xexec->x_bss;

#if _KSL > 28
		install_exec_creds(bpp);
#else
 		compute_creds(bpp);
#endif
		current->flags &= ~PF_FORKNOEXEC;

#if _KSL < 15
#ifdef CONFIG_64BIT
		__asm__ volatile (
		"movl %0,%%fs; movl %0,%%es; movl %0,%%ds"
		: :"r" (0));
		__asm__ volatile (
		"pushf; cli; swapgs; movl %0,%%gs; mfence; swapgs; popf"
		: :"r" (0));
		write_pda(oldrsp,bpp->p);
		_FLG(rp) = 0x200;
#else
		__asm__ volatile (
		"movl %0,%%fs ; movl %0,%%gs"
		: :"r" (0));
		_DS(rp) = _ES(rp) = __USER_DS;
#endif
		_SS(rp) = __USER_DS;
		_SP(rp) = bpp->p;
		_CS(rp) = __USER_CS;
		_IP(rp) = xexec->x_entry;
		set_fs(USER_DS);
#else
		start_thread(rp, xexec->x_entry, bpp->p);
#endif
#ifdef CONFIG_64BIT
	__asm__ volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
	_SS(rp) = __USER32_DS;
	_CS(rp) = __USER32_CS;
#endif

		dprintk(KERN_DEBUG "xout: entry point = 0x%x:0x%08lx\n",
				xext->xe_eseg, xexec->x_entry);


	}
	/*
	 * Scan the segments and map them into the process space. If this
	 * executable is pageable (unlikely since Xenix aligns to 1k
	 * boundaries and we want it aligned to 4k boundaries) this is
	 * all we need to do. If it isn't pageable we go round again
	 * afterwards and load the data. We have to do this in two steps
	 * because if segments overlap within a 4K page we'll lose the
	 * first instance when we remap the page. Hope that's clear...
	 */
	for (i = 0; err >= 0 && i < nsegs; i++) {
		struct xseg		*sp = seglist+i;

		if (sp->xs_attr & XS_AMEM) {
			err = xout_amen(fp, sp, pageable, &addr,
				xexec, rp, (!ntext && ndata == 1));
		}

	}

	/*
	 * We better fix start_data because sys_brk looks there to
	 * calculate data size.
	 * Kernel 2.2 did look at end_code so this is reasonable.
	 */
	if (current->mm->start_data == current->mm->start_code)
		current->mm->start_data = current->mm->end_code;

	dprintk(KERN_DEBUG "xout: start code 0x%08lx, end code 0x%08lx,"
	    " start data 0x%08lx, end data 0x%08lx, brk 0x%08lx\n",
	    current->mm->start_code, current->mm->end_code,
	    current->mm->start_data, current->mm->end_data,
	    current->mm->brk);

	if (pageable)
		goto trap;
	if (err < 0)
		goto trap;

	for (i = 0; (err >= 0) && (i < nsegs); i++) {
		struct xseg		*sp = seglist + i;
		u_long			psize;

		if (sp->xs_type == XS_TTEXT || sp->xs_type == XS_TDATA) {
			dprintk(KERN_DEBUG "xout: read to 0x%08lx from 0x%08lx,"
					" length 0x%8lx\n", sp->xs_rbase,
					sp->xs_filpos, sp->xs_psize);

			if (sp->xs_psize < 0)
				continue;

			/*
			 * Do we still get the size ? Yes! [joerg]
			 */
			psize = kernel_read(bpp->file, sp->xs_filpos,
				(char *)((long)sp->xs_rbase), sp->xs_psize);

			if (psize != sp->xs_psize) {
				dprintk(KERN_DEBUG "xout: short read 0x%8lx\n",psize);
				err = -1;
				break;
			}
		}
	}

	/*
	 * Generate any needed trap for this process. If an error occured then
	 * generate a segmentation violation. If the process is being debugged
	 * then generate the load trap. (Note: If this is a library load then
	 * do not generate the trap here. Pass the error to the caller who
	 * will do it for the process in the outer lay of this procedure call.)
	 */
trap:
	if (executable) {
		if (err < 0) {
			dprintk(KERN_DEBUG "xout: loader forces seg fault "
					"(err = %d)\n", err);
			send_sig(SIGSEGV, current, 0);
		} 
#ifdef CONFIG_PTRACE
		/* --- Red Hat specific handling --- */
#else
		else if (current->ptrace & PT_PTRACED)
			 send_sig(SIGTRAP, current, 0);
#endif
		err = 0;
	}

out:
	kfree(seglist);

	dprintk(KERN_DEBUG "xout: binfmt_xout: result = %d\n", err);

	/*
	 * If we are using the [2]86 emulation overlay we enter this
	 * rather than the real program and give it the information
	 * it needs to start the ball rolling.
	 */
	/*
	 * Xenix 386 programs expect the initial brk value to be in eax
	 * on start up. Hence if we succeeded we need to pass back
	 * the brk value rather than the status. Ultimately the
	 * ret_from_sys_call assembly will place this in eax before
	 * resuming (starting) the process.
	 */
	return (err < 0 ? err : current->mm->brk);
}
Esempio n. 3
0
static int load_macho_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{ 
	unsigned long def_flags = 0;
	void* entry_point = 0;
	int retval = -ENOEXEC;
	int file_size = 0;
	int executable_stack = EXSTACK_DEFAULT;
	size_t macho_header_sz = sizeof(macho_header);
	macho_header* head = ((macho_header*)bprm->buf);
	struct file *linker_file = NULL;
	
	/* have we got enough space? */
	if (!head) {
		retval = -ENOMEM;
		goto out_ret;
	}
	
	retval = ml_checkImage(bprm->file, head);
	if (retval) {
		printk(KERN_WARNING "load_macho_binary: image failed sanity checks, not loading \n");
		goto out_ret;
	}
	
	/*
		XXX: this should be retrieved by ml_checkImage()
	*/
	file_size = ml_getFileSize(bprm->file);
	
	/*
		The file seems to be alright, so set up an environment for the 
		new binary to run in. After this, the old image will no longer be 
		usable. If some of the load commands are broken, this process is doomed.
	*/
	retval = flush_old_exec(bprm);
	if (retval) {
		panic("load_macho_binary: flush_old_exec failed\n");
	}
	else {
		current->flags &= ~PF_FORKNOEXEC;
		current->mm->def_flags = def_flags;
		
		setup_new_exec(bprm);
		
		/* set personality */
		unsigned int personality = current->personality & ~PER_MASK;
		personality |= PER_LINUX;
		
		/*
		 	This flag has to be set for 32x architectures (I think).
		*/
		personality |= ADDR_LIMIT_32BIT;
		
		set_personality(personality);

		/* set stuff */
		current->mm->free_area_cache = current->mm->mmap_base;
		current->mm->cached_hole_size = 0;
		//retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack);
					
		if (retval < 0) {
			//send_sig(SIGKILL, current, 0);
			//goto out_ret;
		}
		
		/* stack */
		current->mm->start_stack = bprm->p;
	}
	
	
	/*
		Read the load commands from the file.
	*/
	size_t offset;
	size_t oldoffset;
	uint32_t ncmds;
	uint8_t* addr;

	offset = 0;
	ncmds = head->ncmds;
	addr = kmalloc(head->sizeofcmds, GFP_KERNEL); /***/
	retval = -EINVAL;
	
	int ret = 0;
	
	/*
		Top of the image data. This is needed to position the heap.
	*/
	int top_data = 0;
	
	/*
		First text segment where the mach header is.
	*/
	void* first_text = 0;
	void* first_text_linker = 0;
	
	/* read in load commands */
	kernel_read(bprm->file, macho_header_sz, addr, head->sizeofcmds);
	
	while (ncmds--) {
		/* LC pointer */
		struct load_command	*lcp = 
		(struct load_command *)(addr + offset);
		
		oldoffset = offset;
		offset += lcp->cmdsize;
		
		if (oldoffset > offset ||
		    lcp->cmdsize < sizeof(struct load_command) ||
		    offset > head->sizeofcmds + macho_header_sz)
		{
			printk(KERN_WARNING "load_macho_binary: malformed binary - lc overflow \n");
			goto lc_ret;
		}
		
		/*  Parse load commands.
		 
			We only need a bare minimum to get the image up an running. Dyld will
			take care of all the other stuff.
		 */
		switch(lcp->cmd) {
			case LC_SEGMENT:
				ret = ml_loadSegment(bprm, file_size, (struct segment_command*)lcp, &top_data, &first_text, 0);
				if (ret != LOAD_SUCCESS) {
					printk(KERN_WARNING "load_macho_binary: segment loading failure \n");
					goto lc_ret;
				}
				break;
			case LC_LOAD_DYLINKER:
				ret = ml_loadDylinker(bprm, file_size, (struct dylinker_command*)lcp, &linker_file);
				if (ret != LOAD_SUCCESS) {
					printk(KERN_WARNING "load_macho_binary: dylinker loading failure \n");
					goto lc_ret;
				}
				else {
					/* done */
				}
				break;
			case LC_UNIXTHREAD:
				ret = ml_loadUnixThread(bprm, file_size, (struct arm_thread_command*)lcp, &entry_point);
				if (ret != LOAD_SUCCESS) {
					printk(KERN_WARNING "load_macho_binary: unix thread loading failure \n");
					goto lc_ret;
				}
				break;
			default: 
				if (_verboseLog)
					printk(KERN_WARNING "load_macho_binary: unsupported lc 0x%p \n", (void*)lcp->cmd);

				break;
		}
	}
	
	/*
		Bootstrap the dynamic linker if needed.
	*/
	if (linker_file) {
		int dylinker_load_addr = top_data;
		
		ml_bootstrapDylinker(linker_file,
							&top_data,
							&first_text_linker,
							&entry_point);
		
		/* slide the entry point */
		entry_point = entry_point + dylinker_load_addr;
			
		if (_verboseLog)				
			printk(KERN_WARNING "load_macho_binary: dylinker's first text segment @ %d, new pc @ %d \n",
					first_text_linker,
					(int)entry_point);
	}
	
	/*
		Now, I don't know what these are used for, but I'm fairly sure
		they're *very* important. So let's set them up. 
		
		See 'linux/mm_types.h':
		unsigned long start_code, end_code, start_data, end_data;
		unsigned long start_brk, brk, start_stack;
	*/	
	current->mm->start_code = 0; /* IMP */
	current->mm->end_code = top_data; /* IMP */
	current->mm->start_data = 0;
	current->mm->end_data = top_data;
		
	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: setting up heap ...\n");

	/* Set up an empty heap. This will be grown as more memory is allocated.  */
	int brkret = ml_setBrk(top_data, top_data);

	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: setting up misc ...\n");

	/* setup misc stuff */
	set_binfmt(&macho_format);
	install_exec_creds(bprm);

	/*
		Stack (grows down on ARM).
	*/
	uint32_t* stack = bprm->p;
	uint32_t* argv_array;
	uint32_t* argv;
	uint32_t* envp_array;
	uint32_t* envp;
	uint32_t total_argv_size;
	uint32_t total_env_size;

	/* Construct envp array. */
	envp = envp_array = stack = (uint32_t*)stack - ((bprm->envc+1));

	/* Construct argv array. */
	argv = argv_array = stack = (uint32_t*)stack - ((bprm->argc+1));

	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: setting up stack @ %p ...\n", (uint32_t*)stack);

	uint32_t argc = bprm->argc;
	uint32_t envc = bprm->envc;
	char* p = bprm->p;

	/* Set up argv pointers */
	current->mm->arg_start = (unsigned long)p;
	while(argc--) {
		char c;

		put_user(p,argv++);
		do {
			get_user(c,p++);
		} while (c);
	}
	put_user(NULL,argv);

	/* Set up envp pointers */
	current->mm->arg_end = current->mm->env_start = (unsigned long) p;
	while(envc--) {
		char c;

		put_user(p,envp++);
		do {
			get_user(c,p++);
		} while (c);
	}
	put_user(NULL,envp);
	current->mm->env_end = (unsigned long) p;

	/*
		The actual stuff passed to the linker goes here.
	*/
	stack = (uint32_t*)stack - (4);

	stack[0] = (uint32_t)first_text; /* mach_header */
	stack[1] = bprm->argc; /* argc */
	stack[2] = argv_array; /* argv */
	stack[3] = (uint32_t)first_text_linker; /* linker's mach_header */
	
	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: setting up main thread ...\n");	
	
	/*
		Set up the main thread
	*/
	if (BAD_ADDR(entry_point)) {
		/* entry point is not executable */
		
		printk(KERN_WARNING "load_macho_binary: bad entry point \n");
		force_sig(SIGSEGV, current);
		retval = -EINVAL;
		goto lc_ret;
	}
	
	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: setting up registers ...\n");

	/* 
		See 'start_thread' in 'processor.h'
		'start_thread' provides an ELF implementation of this function.
		This is for the Darwin ABI implementation which is used by iPhoneOS binaries.
	*/
	unsigned long initial_pc = (unsigned long)entry_point;	
	
	/* exit supervisor and enter user */
	set_fs(USER_DS);
	memset(regs->uregs, 0, sizeof(regs->uregs));
	regs->ARM_cpsr = USR_MODE;	

	/* not sure */
	if (elf_hwcap & HWCAP_THUMB && initial_pc & 1)
		regs->ARM_cpsr |= PSR_T_BIT;
		
	/* set up control regs */	
	regs->ARM_cpsr |= PSR_ENDSTATE;	
	regs->ARM_pc = initial_pc & ~1;		/* pc */
	regs->ARM_sp = stack;		/* sp */

	/* This is actually ignored, but set it anyway */
	regs->ARM_r2 = stack[2];	/* r2 (envp) */	
	regs->ARM_r1 = stack[1];	/* r1 (argv) */
	regs->ARM_r0 = stack[0];	/* r0 (argc) */	
	
	/* this will work for mmu and nonmmu */
	nommu_start_thread(regs);
	
	wire_weird_pages();	
			
	/*
		Binary is now loaded. Return 0 to signify success.
	*/
	retval = 0;

	if (_verboseLog)
		printk(KERN_WARNING "load_macho_binary: complete, heap starts at %d, brkret %d \n", top_data, brkret);

	/*
	 	Teardown
	*/
	lc_ret:
		kfree(addr);
	out_ret:
		return retval;
}
Esempio n. 4
0
static int load_exeso_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
    struct elfhdr *elf_ex;
    struct elf_phdr *elf_phdata = NULL;
    struct mm_struct *mm;
    unsigned long load_addr = 0;
    unsigned long error;
    int retval = 0;
    unsigned long pe_entry, ntdll_load_addr = 0;
    unsigned long start_code, end_code, start_data, end_data;
    unsigned long ntdll_entry;
    int executable_stack = EXSTACK_DEFAULT;
    unsigned long def_flags = 0;
    unsigned long stack_top;
#ifdef NTDLL_SO
    unsigned long	interp_load_addr;
    unsigned long	interp_entry;
#endif
    struct eprocess	*process;
    struct ethread	*thread;
    PRTL_USER_PROCESS_PARAMETERS	ppb;
    OBJECT_ATTRIBUTES	ObjectAttributes;
    INITIAL_TEB	init_teb;

    BOOLEAN is_win32=FALSE;
    struct startup_info *info=NULL;
    struct eprocess	*parent_eprocess=NULL;
    struct ethread	*parent_ethread=NULL;
    struct w32process* child_w32process =NULL;
    struct w32process* parent_w32process =NULL;

    elf_ex = (struct elfhdr *)bprm->buf;
    retval = -ENOEXEC;
    /* First of all, some simple consistency checks */
    if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
        goto out;
    if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
        goto out;
    if (!elf_check_arch(elf_ex))
        goto out;
    if (!bprm->file->f_op||!bprm->file->f_op->mmap)
        goto out;

    if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
        goto out;
    if (elf_ex->e_phnum < 1 ||
            elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
        goto out;

    if(!check_exeso(bprm))
        goto out;

    start_code = ~0UL;
    end_code = 0;
    start_data = 0;
    end_data = 0;

    if(current->parent->ethread)
    {
        is_win32 = TRUE;
        parent_ethread = current->parent->ethread;
        parent_eprocess = parent_ethread->threads_process;
    }

    /* Flush all traces of the currently running executable */
    retval = flush_old_exec(bprm);
    if (retval) {
        goto out;
    }

    /* OK, This is the point of no return */
    mm = current->mm;
    current->flags &= ~PF_FORKNOEXEC;
    mm->def_flags = def_flags;

    current->signal->rlim[RLIMIT_STACK].rlim_cur = WIN32_STACK_LIMIT;
    current->signal->rlim[RLIMIT_STACK].rlim_max = WIN32_STACK_LIMIT;
    current->personality |= ADDR_COMPAT_LAYOUT;
    arch_pick_mmap_layout(mm);

    /* Do this so that we can load the ntdll, if need be.  We will
       change some of these later */
    mm->free_area_cache = mm->mmap_base = WIN32_UNMAPPED_BASE;
    mm->cached_hole_size = 0;
    stack_top = WIN32_STACK_LIMIT + WIN32_LOWEST_ADDR;
    retval = setup_arg_pages(bprm, stack_top, executable_stack);
    if (retval < 0)
        goto out_free_file;

    down_write(&mm->mmap_sem);
    /* reserve first 0x100000 */
    do_mmap_pgoff(NULL, 0, WIN32_LOWEST_ADDR, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    /* reserve first 0x7fff0000 - 0x80000000 */
    do_mmap_pgoff(NULL, WIN32_TASK_SIZE - 0x10000, 0x10000,
                  PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    /* reserve first 0x81000000 - 0xc0000000
     * 0x80000000 - 0x81000000 used for wine SYSTEM_HEAP */
    do_mmap_pgoff(NULL, WIN32_TASK_SIZE + WIN32_SYSTEM_HEAP_SIZE,
                  TASK_SIZE - WIN32_TASK_SIZE - WIN32_SYSTEM_HEAP_SIZE,
                  PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0);
    up_write(&mm->mmap_sem);

#ifdef NTDLL_SO
    /* search ntdll.dll.so in $PATH, default is /usr/local/lib/wine/ntdll.dll.so */
    if (!*ntdll_name)
        search_ntdll();

    /* map ntdll.dll.so */
    map_system_dll(current, ntdll_name, &ntdll_load_addr, &interp_load_addr);

    pe_entry = get_pe_entry();
    ntdll_entry = get_ntdll_entry();
    interp_entry = get_interp_entry();
#endif

    set_binfmt(&exeso_format);

#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
    retval = arch_setup_additional_pages(bprm, executable_stack);
    if (retval < 0) {
        goto out_free_file;
    }
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */

    install_exec_creds(bprm);
    current->flags &= ~PF_FORKNOEXEC;

#ifdef NTDLL_SO
    /* copy argv, env, and auxvec to stack, all for interpreter */
    create_elf_tables_aux(bprm,
                          ntdll_load_addr, ntdll_phoff, ntdll_phnum, get_ntdll_start_thunk(),
                          load_addr, elf_ex->e_phoff, elf_ex->e_phnum, 0,
                          interp_load_addr, interp_entry, 0);
#endif

    mm->end_code = end_code;
    mm->start_code = start_code;
    mm->start_data = start_data;
    mm->end_data = end_data;
    mm->start_stack = bprm->p;

    if (current->personality & MMAP_PAGE_ZERO) {
        /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
           and some applications "depend" upon this behavior.
           Since we do not have the power to recompile these, we
           emulate the SVr4 behavior.  Sigh.  */
        down_write(&mm->mmap_sem);
        error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
                        MAP_FIXED | MAP_PRIVATE, 0);
        up_write(&mm->mmap_sem);
    }


    /* create win-related structure */
    INIT_OBJECT_ATTR(&ObjectAttributes, NULL, 0, NULL, NULL);

    /* Create EPROCESS */
    retval = create_object(KernelMode,
                           process_object_type,
                           &ObjectAttributes,
                           KernelMode,
                           NULL,
                           sizeof(struct eprocess),
                           0,
                           0,
                           (PVOID *)&process);
    if (retval != STATUS_SUCCESS) {
        goto out_free_file;
    }

    /* init eprocess */
    eprocess_init(NULL, FALSE, process);
    process->unique_processid = create_cid_handle(process, process_object_type);
    if (!process->unique_processid)
        goto out_free_eproc;

    /* initialize EProcess and KProcess */
    process->section_base_address = (void *)load_addr;

    /* FIXME: PsCreateCidHandle */

    /* Create PEB */
    if ((retval = create_peb(process)))
        goto out_free_process_cid;

    /* Create PPB */
    if(is_win32 == FALSE)
    {
        create_ppb(&ppb, process, bprm, bprm->filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
        ((PEB *)process->peb)->ProcessParameters = ppb;
    }
    /* allocate a Win32 thread object */
    retval = create_object(KernelMode,
                           thread_object_type,
                           &ObjectAttributes,
                           KernelMode,
                           NULL,
                           sizeof(struct ethread),
                           0,
                           0,
                           (PVOID *)&thread);
    if (retval) {
        goto out_free_process_cid;
    }

    thread->cid.unique_thread = create_cid_handle(thread, thread_object_type);
    thread->cid.unique_process = process->unique_processid;
    if (!thread->cid.unique_thread)
        goto out_free_ethread;

    /* set the teb */
    init_teb.StackBase = (PVOID)(bprm->p);
    init_teb.StackLimit = (PVOID)WIN32_LOWEST_ADDR + PAGE_SIZE;
    thread->tcb.teb = create_teb(process, (PCLIENT_ID)&thread->cid, &init_teb);
    if (IS_ERR(thread->tcb.teb)) {
        retval = PTR_ERR(thread->tcb.teb);
        goto out_free_thread_cid;
    }

    /* Init KThreaad */
    ethread_init(thread, process, current);

    sema_init(&thread->exec_semaphore,0);
    if (is_win32 == TRUE) //parent is a windows process
    {
        down(&thread->exec_semaphore);  //wait for the parent

        child_w32process = process->win32process;
        parent_w32process = parent_eprocess->win32process;
        info = child_w32process->startup_info;

        //now parent has finished its work
        if(thread->inherit_all)
        {
            create_handle_table(parent_eprocess, TRUE, process);
            child_w32process = create_w32process(parent_w32process, TRUE, process);
        }
    }

    deref_object(process);
    deref_object(thread);

    set_teb_selector(current, (long)thread->tcb.teb);

    thread->start_address = (void *)pe_entry;	/* FIXME */

    /* save current trap frame */
    thread->tcb.trap_frame = (struct ktrap_frame *)regs;

    /* init apc, to call LdrInitializeThunk */
#if 0
    thread_apc = kmalloc(sizeof(KAPC), GFP_KERNEL);
    if (!thread_apc) {
        retval = -ENOMEM;
        goto out_free_thread_cid;
    }
    apc_init(thread_apc,
             &thread->tcb,
             OriginalApcEnvironment,
             thread_special_apc,
             NULL,
             (PKNORMAL_ROUTINE)ntdll_entry,
             UserMode,
             (void *)(bprm->p + 12));
    insert_queue_apc(thread_apc, (void *)interp_entry, (void *)extra_page, IO_NO_INCREMENT);
#ifndef TIF_APC
#define	TIF_APC	13
#endif
    set_tsk_thread_flag(current, TIF_APC);
#endif

#ifdef ELF_PLAT_INIT
    /*
     * The ABI may specify that certain registers be set up in special
     * ways (on i386 %edx is the address of a DT_FINI function, for
     * example.  In addition, it may also specify (eg, PowerPC64 ELF)
     * that the e_entry field is the address of the function descriptor
     * for the startup routine, rather than the address of the startup
     * routine itself.  This macro performs whatever initialization to
     * the regs structure is required as well as any relocations to the
     * function descriptor entries when executing dynamically links apps.
     */
    ELF_PLAT_INIT(regs, reloc_func_desc);
#endif

    start_thread(regs, interp_entry, bprm->p);
    if (unlikely(current->ptrace & PT_PTRACED)) {
        if (current->ptrace & PT_TRACE_EXEC)
            ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP);
        else
            send_sig(SIGTRAP, current, 0);
    }

    retval = 0;

    try_module_get(THIS_MODULE);

    /* return from w32syscall_exit, not syscall_exit */
    ((unsigned long *)regs)[-1] = (unsigned long)w32syscall_exit;
    regs->fs = TEB_SELECTOR;

out:
    if(elf_phdata)
        kfree(elf_phdata);
    return retval;

    /* error cleanup */
out_free_thread_cid:
    delete_cid_handle(thread->cid.unique_thread, thread_object_type);
out_free_ethread:
    deref_object(thread);
out_free_process_cid:
    delete_cid_handle(process->unique_processid, process_object_type);
out_free_eproc:
    deref_object(process);
out_free_file:
    send_sig(SIGKILL, current, 0);
    goto out;
}
Esempio n. 5
0
/*
 * Helper function to process the load operation.
 */
static int
coff_load_object(struct linux_binprm *bprm, struct pt_regs *regs, int binary)
{
	COFF_FILHDR		*coff_hdr = NULL;
	COFF_SCNHDR		*text_sect = NULL, *data_sect = NULL,
				*bss_sect = NULL, *sect_bufr = NULL,
				*sect_ptr = NULL;
	int			text_count = 0, data_count = 0,
				bss_count = 0, lib_count = 0;
	coff_section		text, data, bss;
	u_long			start_addr = 0, p = bprm->p, m_addr, lPers;
	short			flags, aout_size = 0;
	int			pageable = 1, sections = 0, status = 0, i, ce;
	int			coff_exec_fileno;
	mm_segment_t		old_fs;


	lPers = abi_personality((char *)_BX(regs));
	ce = cap_mmap(0);

	coff_hdr = (COFF_FILHDR *)bprm->buf;

	/*
	 * Validate the magic value for the object file.
	 */
	if (COFF_I386BADMAG(*coff_hdr))
		return -ENOEXEC;

	flags = COFF_SHORT(coff_hdr->f_flags);

	/*
	 * The object file should have 32 BIT little endian format. Do not allow
	 * it to have the 16 bit object file flag set as Linux is not able to run
	 * on the 80286/80186/8086.
	 */
	if ((flags & (COFF_F_AR32WR | COFF_F_AR16WR)) != COFF_F_AR32WR)
		return -ENOEXEC;

	/*
	 * If the file is not executable then reject the execution. This means
	 * that there must not be external references.
	 */
	if ((flags & COFF_F_EXEC) == 0)
		return -ENOEXEC;

	/*
	 * Extract the header information which we need.
	 */
	sections = COFF_SHORT(coff_hdr->f_nscns);	/* Number of sections */
	aout_size = COFF_SHORT(coff_hdr->f_opthdr);	/* Size of opt. headr */

	/*
	 * There must be at least one section.
	 */
	if (!sections)
		return -ENOEXEC;

	if (!bprm->file->f_op->mmap)
		pageable = 0;

	if (!(sect_bufr = kmalloc(sections * COFF_SCNHSZ, GFP_KERNEL))) {
		printk(KERN_WARNING "coff: kmalloc failed\n");
		return -ENOMEM;
	}

	status = kernel_read(bprm->file, aout_size + COFF_FILHSZ,
			(char *)sect_bufr, sections * COFF_SCNHSZ);
	if (status < 0) {
		printk(KERN_WARNING "coff: unable to read\n");
		goto out_free_buf;
	}

	status = get_unused_fd();
	if (status < 0) {
		printk(KERN_WARNING "coff: unable to get free fs\n");
		goto out_free_buf;
	}

	get_file(bprm->file);
	fd_install(coff_exec_fileno = status, bprm->file);

	/*
	 *  Loop through the sections and find the various types
	 */
	sect_ptr = sect_bufr;

	for (i = 0; i < sections; i++) {
		long int sect_flags = COFF_LONG(sect_ptr->s_flags);

		switch (sect_flags) {
		case COFF_STYP_TEXT:
			status |= coff_isaligned(sect_ptr);
			text_sect = sect_ptr;
			text_count++;
			break;

		case COFF_STYP_DATA:
			status |= coff_isaligned(sect_ptr);
			data_sect = sect_ptr;
			data_count++;
			break;

		case COFF_STYP_BSS:
			bss_sect = sect_ptr;
			bss_count++;
			break;

		case COFF_STYP_LIB:
			lib_count++;
			break;

		default:
			break;
		}

		sect_ptr = (COFF_SCNHDR *) & ((char *) sect_ptr)[COFF_SCNHSZ];
	}

	/*
	 * If any of the sections weren't properly aligned we aren't
	 * going to be able to demand page this executable. Note that
	 * at this stage the *only* excuse for having status <= 0 is if
	 * the alignment test failed.
	 */
	if (status < 0)
		pageable = 0;

	/*
	 * Ensure that there are the required sections.  There must be one
	 * text sections and one each of the data and bss sections for an
	 * executable.  A library may or may not have a data / bss section.
	 */
	if (text_count != 1) {
		status = -ENOEXEC;
		goto out_free_file;
	}
	if (binary && (data_count != 1 || bss_count != 1)) {
		status = -ENOEXEC;
		goto out_free_file;
	}

	/*
	 * If there is no additional header then assume the file starts
	 * at the first byte of the text section.  This may not be the
	 * proper place, so the best solution is to include the optional
	 * header.  A shared library __MUST__ have an optional header to
	 * indicate that it is a shared library.
	 */
	if (aout_size == 0) {
		if (!binary) {
			status = -ENOEXEC;
			goto out_free_file;
		}
		start_addr = COFF_LONG(text_sect->s_vaddr);
	} else if (aout_size < (short) COFF_AOUTSZ) {
		status = -ENOEXEC;
		goto out_free_file;
	} else {
		COFF_AOUTHDR	*aout_hdr;
		short		aout_magic;

		aout_hdr = (COFF_AOUTHDR *) &((char *)coff_hdr)[COFF_FILHSZ];
		aout_magic = COFF_SHORT(aout_hdr->magic);

		/*
		 * Validate the magic number in the a.out header. If it is valid then
		 * update the starting symbol location. Do not accept these file formats
		 * when loading a shared library.
		 */
		switch (aout_magic) {
		case COFF_OMAGIC:
		case COFF_ZMAGIC:
		case COFF_STMAGIC:
			if (!binary) {
				status = -ENOEXEC;
				goto out_free_file;
			}
			start_addr = (u_int)COFF_LONG(aout_hdr->entry);
			break;
		/*
		 * Magic value for a shared library. This is valid only when
		 * loading a shared library.
		 *
		 * (There is no need for a start_addr. It won't be used.)
		 */
		case COFF_SHMAGIC:
			if (!binary)
				break;
			/* FALLTHROUGH */
		default:
			status = -ENOEXEC;
			goto out_free_file;
		}
	}

	/*
	 *  Generate the proper values for the text fields
	 *
	 *  THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD
	 *  SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD.
	 */

	text.scnptr = COFF_LONG(text_sect->s_scnptr);
	text.size = COFF_LONG(text_sect->s_size);
	text.vaddr = COFF_LONG(text_sect->s_vaddr);

	/*
	 *  Generate the proper values for the data fields
	 */

	if (data_sect != NULL) {
		data.scnptr = COFF_LONG(data_sect->s_scnptr);
		data.size = COFF_LONG(data_sect->s_size);
		data.vaddr = COFF_LONG(data_sect->s_vaddr);
	} else {
		data.scnptr = 0;
		data.size = 0;
		data.vaddr = 0;
	}

	/*
	 *  Generate the proper values for the bss fields
	 */

	if (bss_sect != NULL) {
		bss.size = COFF_LONG(bss_sect->s_size);
		bss.vaddr = COFF_LONG(bss_sect->s_vaddr);
	} else {
		bss.size = 0;
		bss.vaddr = 0;
	}

	/*
	 * Flush the executable from memory. At this point the executable is
	 * committed to being defined or a segmentation violation will occur.
	 */

	if (binary) {
		COFF_SCNHDR	*sect_ptr2 = sect_bufr;
		u_long		personality = PER_SVR3;
		int		i;

		if ((status = flush_old_exec(bprm)))
			goto out_free_file;

		/*
		 * Look for clues as to the system this binary was compiled
		 * on in the comments section(s).
		 *
		 * Only look at the main binary, not the shared libraries
		 * (or would it be better to prefer shared libraries over
		 * binaries?  Or could they be different???)
	 	 */
		for (i = 0; i < sections; i++) {
			long	sect_flags = COFF_LONG(sect_ptr2->s_flags);

			if (sect_flags == COFF_STYP_INFO &&
			   (status = coff_parse_comments(bprm->file,
						sect_ptr2, &personality)) > 0)
				goto found;

			sect_ptr2 = (COFF_SCNHDR *) &((char *)sect_ptr2)[COFF_SCNHSZ];
		}

		/*
		 * If no .comments section was found there is no way to
		 * figure out the personality. Odds on it is SCO though...
		 */
		personality = PER_SCOSVR3;

found:
		if (lPers) personality = lPers;
		if ( (personality & 0xFF) == (current->personality & 0xFF) ) set_personality(0);
		set_personality(personality);

#if defined(CONFIG_ABI_TRACE)
	abi_trace(ABI_TRACE_UNIMPL,"Personality %08lX assigned\n",personality);
#endif
#ifdef CONFIG_64BIT
		set_thread_flag(TIF_IA32);
		clear_thread_flag(TIF_ABI_PENDING);
#endif
		current->mm->start_data = 0;
		current->mm->end_data = 0;
		current->mm->end_code = 0;
		current->mm->mmap = NULL;
		current->flags &= ~PF_FORKNOEXEC;
#ifdef set_mm_counter
#if _KSL > 14
		set_mm_counter(current->mm, file_rss, 0);
#else
		set_mm_counter(current->mm, rss, 0);
#endif
#else
		current->mm->rss = 0;
#endif
		/*
		 * Construct the parameter and environment
		 * string table entries.
		 */
#if _KSL > 10
		if ((status = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT)) < 0)
#else
		if ((status = setup_arg_pages(bprm, EXSTACK_DEFAULT)) < 0)
#endif
			goto sigsegv;

		p = (u_long)coff_mktables((char *)bprm->p,
				bprm->argc, bprm->envc);

		current->mm->end_code = text.size +
		    (current->mm->start_code = text.vaddr);
		current->mm->end_data = data.size +
		    (current->mm->start_data = data.vaddr);
		current->mm->brk = bss.size +
		    (current->mm->start_brk = bss.vaddr);

		current->mm->start_stack = p;
#if _KSL > 28
		install_exec_creds(bprm);
#else
 		compute_creds(bprm);
#endif

#if _KSL < 15
#ifdef CONFIG_64BIT
		__asm__ volatile (
		"movl %0,%%fs; movl %0,%%es; movl %0,%%ds"
		: :"r" (0));
		__asm__ volatile (
		"pushf; cli; swapgs; movl %0,%%gs; mfence; swapgs; popf"
		: :"r" (0));
		write_pda(oldrsp,p);
		_FLG(regs) = 0x200;
#else
		__asm__ volatile (
		"movl %0,%%fs ; movl %0,%%gs"
		: :"r" (0));
		_DS(regs) = _ES(regs) = __USER_DS;
#endif
		_SS(regs) = __USER_DS;
		_SP(regs) = p;
		_CS(regs) = __USER_CS;
		_IP(regs) = start_addr;
		set_fs(USER_DS);
#else
		start_thread(regs, start_addr, p);
#endif

#ifdef CONFIG_64BIT
	__asm__ volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS));
	_SS(regs) = __USER32_DS;
	_CS(regs) = __USER32_CS;
#endif
	}

	old_fs = get_fs();
	set_fs(get_ds());

	if (!pageable) {
		/*
		 * Read the file from disk...
		 *
		 * XXX: untested.
		 */
		loff_t pos = data.scnptr;
		status = do_brk(text.vaddr, text.size);
		bprm->file->f_op->read(bprm->file,
				(char *)data.vaddr, data.scnptr, &pos);
		status = do_brk(data.vaddr, data.size);
		bprm->file->f_op->read(bprm->file,
				(char *)text.vaddr, text.scnptr, &pos);
		status = 0;
	} else {
		/* map the text pages...*/
		cap_mmap(1);
		m_addr = map_coff(bprm->file, &text, PROT_READ | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			text.scnptr & PAGE_MASK);
		if(!ce) cap_mmap(2);

		if (m_addr != (text.vaddr & PAGE_MASK)) {
			status = -ENOEXEC;
			set_fs(old_fs);
			goto out_free_file;
		}

		/* map the data pages */
		if (data.size != 0) {
			cap_mmap(1);
			m_addr = map_coff(bprm->file, &data,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
			    data.scnptr & PAGE_MASK);
			if(!ce) cap_mmap(2);

			if (m_addr != (data.vaddr & PAGE_MASK)) {
				status = -ENOEXEC;
				set_fs(old_fs);
				goto out_free_file;
			}
		}

		status = 0;
	}

	/*
	 * Construct the bss data for the process. The bss ranges from the
	 * end of the data (which may not be on a page boundary) to the end
	 * of the bss section. Allocate any necessary pages for the data.
	 */
	if (bss.size != 0) {
		cap_mmap(1);
		down_write(&current->mm->mmap_sem);
		do_mmap(NULL, PAGE_ALIGN(bss.vaddr),
			bss.size + bss.vaddr -
			PAGE_ALIGN(bss.vaddr),
			PROT_READ | PROT_WRITE | PROT_EXEC,
			MAP_FIXED | MAP_PRIVATE | MAP_32BIT, 0);
		up_write(&current->mm->mmap_sem);
		if(!ce) cap_mmap(2);

		if ((status = coff_clear_memory(bss.vaddr, bss.size)) < 0) {
			set_fs(old_fs);
			goto out_free_file;
		}
	}

	set_fs(old_fs);

	if (!binary)
		goto out_free_file;

	/*
	 * Load any shared library for the executable.
	 */
	if (lib_count)
		status = coff_preload_shlibs(bprm, sect_bufr, sections);

	set_binfmt(&coff_format);

	/*
	 * Generate any needed trap for this process. If an error occured then
	 * generate a segmentation violation. If the process is being debugged
	 * then generate the load trap. (Note: If this is a library load then
	 * do not generate the trap here. Pass the error to the caller who
	 * will do it for the process in the outer lay of this procedure call.)
	 */
	if (status < 0) {
sigsegv:
		printk(KERN_WARNING "coff: trapping process with SEGV\n");
		send_sig(SIGSEGV, current, 0);	/* Generate the error trap  */
	}
#ifdef CONFIG_PTRACE
	/* --- Red Hat specific handling --- */
#else
	else if (current->ptrace & PT_PTRACED)
		 send_sig(SIGTRAP, current, 0);
#endif
	/* We are committed. It can't fail */
	status = 0;

out_free_file:
	SYS(close,coff_exec_fileno);

out_free_buf:
	kfree(sect_bufr);
	return (status);
}