/* These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct elfhdr elf_ex, interp_elf_ex; struct file *interpreter; struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr; unsigned int load_addr, elf_bss, elf_brk; unsigned int elf_entry, interp_load_addr = 0; unsigned int start_code, end_code, end_data, elf_stack; int retval, has_interp, has_ephdr, size, i; char *elf_interpreter; mm_segment_t old_fs; load_addr = 0; has_interp = has_ephdr = 0; elf_ihdr = elf_ephdr = NULL; elf_ex = *((struct elfhdr *) bprm->buf); retval = -ENOEXEC; if (verify_binary(&elf_ex, bprm)) goto out; /* * Telling -o32 static binaries from Linux and Irix apart from each * other is difficult. There are 2 differences to be noted for static * binaries from the 2 operating systems: * * 1) Irix binaries have their .text section before their .init * section. Linux binaries are just the opposite. * * 2) Irix binaries usually have <= 12 sections and Linux * binaries have > 20. * * We will use Method #2 since Method #1 would require us to read in * the section headers which is way too much overhead. This appears * to work for everything we have ran into so far. If anyone has a * better method to tell the binaries apart, I'm listening. */ if (elf_ex.e_shnum > 20) goto out; print_elfhdr(&elf_ex); /* Now read in all of the header information */ size = elf_ex.e_phentsize * elf_ex.e_phnum; if (size > 65536) goto out; elf_phdata = kmalloc(size, GFP_KERNEL); if (elf_phdata == NULL) { retval = -ENOMEM; goto out; } retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size); if (retval < 0) goto out_free_ph; dump_phdrs(elf_phdata, elf_ex.e_phnum); /* Set some things for later. */ for (i = 0; i < elf_ex.e_phnum; i++) { switch (elf_phdata[i].p_type) { case PT_INTERP: has_interp = 1; elf_ihdr = &elf_phdata[i]; break; case PT_PHDR: has_ephdr = 1; elf_ephdr = &elf_phdata[i]; break; }; } pr_debug("\n"); elf_bss = 0; elf_brk = 0; elf_stack = 0xffffffff; elf_interpreter = NULL; start_code = 0xffffffff; end_code = 0; end_data = 0; /* * If we get a return value, we change the value to be ENOEXEC * so that we can exit gracefully and the main binary format * search loop in 'fs/exec.c' will move onto the next handler * which should be the normal ELF binary handler. */ retval = look_for_irix_interpreter(&elf_interpreter, &interpreter, &interp_elf_ex, elf_phdata, bprm, elf_ex.e_phnum); if (retval) { retval = -ENOEXEC; goto out_free_file; } if (elf_interpreter) { retval = verify_irix_interpreter(&interp_elf_ex); if (retval) goto out_free_interp; } /* OK, we are done with that, now set up the arg stuff, * and then start this sucker up. */ retval = -E2BIG; if (!bprm->sh_bang && !bprm->p) goto out_free_interp; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* OK, This is the point of no return */ current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned int) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will * change some of these later. */ setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); current->mm->start_stack = bprm->p; /* At this point, we assume that the image should be loaded at * fixed address, not at a variable address. */ old_fs = get_fs(); set_fs(get_ds()); map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack, &load_addr, &start_code, &elf_bss, &end_code, &end_data, &elf_brk); if (elf_interpreter) { retval = map_interpreter(elf_phdata, &interp_elf_ex, interpreter, &interp_load_addr, elf_ex.e_phnum, old_fs, &elf_entry); kfree(elf_interpreter); if (retval) { set_fs(old_fs); printk("Unable to load IRIX ELF interpreter\n"); send_sig(SIGSEGV, current, 0); retval = 0; goto out_free_file; } } set_fs(old_fs); kfree(elf_phdata); set_personality(PER_IRIX32); set_binfmt(&irix_format); compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; bprm->p = (unsigned long) create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc, (elf_interpreter ? &elf_ex : NULL), load_addr, interp_load_addr, regs, elf_ephdr); current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; /* Calling set_brk effectively mmaps the pages that we need for the * bss and break sections. */ set_brk(elf_bss, elf_brk); /* * IRIX maps a page at 0x200000 which holds some system * information. Programs depend on this. */ irix_map_prda_page(); padzero(elf_bss); pr_debug("(start_brk) %lx\n" , (long) current->mm->start_brk); pr_debug("(end_code) %lx\n" , (long) current->mm->end_code); pr_debug("(start_code) %lx\n" , (long) current->mm->start_code); pr_debug("(end_data) %lx\n" , (long) current->mm->end_data); pr_debug("(start_stack) %lx\n" , (long) current->mm->start_stack); pr_debug("(brk) %lx\n" , (long) current->mm->brk); #if 0 /* XXX No f*****g way dude... */ /* Why this, you ask??? Well SVr4 maps page 0 as read-only, * and some applications "depend" upon this behavior. * Since we do not have the power to recompile these, we * emulate the SVr4 behavior. Sigh. */ down_write(¤t->mm->mmap_sem); (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); #endif start_thread(regs, elf_entry, bprm->p); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); return 0; out: return retval; out_free_dentry: allow_write_access(interpreter); fput(interpreter); out_free_interp: kfree(elf_interpreter); out_free_file: out_free_ph: kfree(elf_phdata); goto out; }
static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct exec ex; unsigned long error; unsigned long fd_offset; unsigned long rlim; int retval; ex = *((struct exec *) bprm->buf); /* exec-header */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || bprm->file->f_dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { return -ENOEXEC; } fd_offset = N_TXTOFF(ex); /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) return retval; /* OK, This is the point of no return */ #if defined(__alpha__) SET_AOUT_PERSONALITY(bprm, ex); #elif defined(__sparc__) set_personality(PER_SUNOS); #if !defined(__sparc_v9__) memcpy(¤t->thread.core_exec, &ex, sizeof(struct exec)); #endif #else set_personality(PER_LINUX); #endif current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); current->mm->end_data = ex.a_data + (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->rss = 0; current->mm->mmap = NULL; #ifdef CONFIG_ARM_FASS arch_new_mm(current, current->mm); #endif compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; #ifdef __sparc__ if (N_MAGIC(ex) == NMAGIC) { loff_t pos = fd_offset; /* F**k me plenty... */ /* <AOL></AOL> */ error = do_brk(N_TXTADDR(ex), ex.a_text); bprm->file->f_op->read(bprm->file, (char *) N_TXTADDR(ex), ex.a_text, &pos); error = do_brk(N_DATADDR(ex), ex.a_data); bprm->file->f_op->read(bprm->file, (char *) N_DATADDR(ex), ex.a_data, &pos); goto beyond_if; } #endif if (N_MAGIC(ex) == OMAGIC) { unsigned long text_addr, map_size; loff_t pos; text_addr = N_TXTADDR(ex); #if defined(__alpha__) || defined(__sparc__) pos = fd_offset; map_size = ex.a_text+ex.a_data + PAGE_SIZE - 1; #else pos = 32; map_size = ex.a_text+ex.a_data; #endif error = do_brk(text_addr & PAGE_MASK, map_size); if (error != (text_addr & PAGE_MASK)) { send_sig(SIGKILL, current, 0); return error; } error = bprm->file->f_op->read(bprm->file, (char *)text_addr, ex.a_text+ex.a_data, &pos); if ((signed long)error < 0) { send_sig(SIGKILL, current, 0); return error; } flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data); } else { static unsigned long error_time, error_time2; if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ) { printk(KERN_NOTICE "executable not page aligned\n"); error_time2 = jiffies; } if ((fd_offset & ~PAGE_MASK) != 0 && (jiffies-error_time) > 5*HZ) { printk(KERN_WARNING "fd_offset is not page aligned. Please convert program: %s\n", bprm->file->f_dentry->d_name.name); error_time = jiffies; } if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { loff_t pos = fd_offset; do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); bprm->file->f_op->read(bprm->file,(char *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); flush_icache_range((unsigned long) N_TXTADDR(ex), (unsigned long) N_TXTADDR(ex) + ex.a_text+ex.a_data); goto beyond_if; } down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset); up_write(¤t->mm->mmap_sem); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); up_write(¤t->mm->mmap_sem); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } } beyond_if: set_binfmt(&aout_format); set_brk(current->mm->start_brk, current->mm->brk); retval = setup_arg_pages(bprm); if (retval < 0) { /* Someone check-me: is this error path enough? */ send_sig(SIGKILL, current, 0); return retval; } current->mm->start_stack = (unsigned long) create_aout_tables((char *) bprm->p, bprm); #ifdef __alpha__ regs->gp = ex.a_gpvalue; #endif start_thread(regs, ex.a_entry, current->mm->start_stack); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); #ifndef __arm__ return 0; #else return regs->ARM_r0; #endif }
static int CVE_2010_0307_linux2_6_27_31_load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct file *interpreter = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias = 0; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned long error; struct elf_phdr *elf_ppnt, *elf_phdata; unsigned long elf_bss, elf_brk; int elf_exec_fileno; int retval, i; unsigned int size; unsigned long elf_entry; unsigned long interp_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long reloc_func_desc = 0; int executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; struct { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; } *loc; loc = kmalloc(sizeof(*loc), GFP_KERNEL); if (!loc) { retval = -ENOMEM; goto out_ret; } /* Get the exec-header */ loc->elf_ex = *((struct elfhdr *)bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(&loc->elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr)) goto out; if (loc->elf_ex.e_phnum < 1 || loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr); retval = -ENOMEM; elf_phdata = kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = kernel_read(bprm->file, loc->elf_ex.e_phoff, (char *)elf_phdata, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free_ph; } retval = get_unused_fd(); if (retval < 0) goto out_free_ph; get_file(bprm->file); fd_install(elf_exec_fileno = retval, bprm->file); elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; for (i = 0; i < loc->elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOEXEC; if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) goto out_free_file; retval = -ENOMEM; elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) goto out_free_file; retval = kernel_read(bprm->file, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz); if (retval != elf_ppnt->p_filesz) { if (retval >= 0) retval = -EIO; goto out_free_interp; } /* make sure path is NULL terminated */ retval = -ENOEXEC; if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; /* * The early SET_PERSONALITY here is so that the lookup * for the interpreter happens in the namespace of the * to-be-execed image. SET_PERSONALITY can select an * alternate root. * * However, SET_PERSONALITY is NOT allowed to switch * this task into the new images's memory mapping * policy - that is, TASK_SIZE must still evaluate to * that which is appropriate to the execing application. * This is because exit_mmap() needs to have TASK_SIZE * evaluate to the size of the old image. * * So if (say) a 64-bit application is execing a 32-bit * application it is the architecture's responsibility * to defer changing the value of TASK_SIZE until the * switch really is going to happen - do this in * flush_thread(). - akpm */ SET_PERSONALITY(loc->elf_ex, 0); interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) goto out_free_interp; /* * If the binary is not readable then enforce * mm->dumpable = 0 regardless of the interpreter's * permissions. */ if (file_permission(interpreter, MAY_READ) < 0) bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP; retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); if (retval != BINPRM_BUF_SIZE) { if (retval >= 0) retval = -EIO; goto out_free_dentry; } /* Get the exec headers */ loc->interp_elf_ex = *((struct elfhdr *)bprm->buf); break; } elf_ppnt++; } elf_ppnt = elf_phdata; for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) if (elf_ppnt->p_type == PT_GNU_STACK) { if (elf_ppnt->p_flags & PF_X) executable_stack = EXSTACK_ENABLE_X; else executable_stack = EXSTACK_DISABLE_X; break; } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { retval = -ELIBBAD; /* Not an ELF interpreter */ if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out_free_dentry; /* Verify the interpreter has a valid arch */ if (!elf_check_arch(&loc->interp_elf_ex)) goto out_free_dentry; } else { /* Executables without an interpreter also need a personality */ SET_PERSONALITY(loc->elf_ex, 0); } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->mm->def_flags = def_flags; /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(loc->elf_ex, 0); if (elf_read_implies_exec(loc->elf_ex, executable_stack)) current->personality |= READ_IMPLIES_EXEC; if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) current->flags |= PF_RANDOMIZE; arch_pick_mmap_layout(current->mm); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } current->mm->start_stack = bprm->p; /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. */ for(i = 0, elf_ppnt = elf_phdata; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long k, vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (unlikely (elf_brk > elf_bss)) { unsigned long nbyte; /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ retval = set_brk (elf_bss + load_bias, elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; if (nbyte > elf_brk - elf_bss) nbyte = elf_brk - elf_bss; if (clear_user((void __user *)elf_bss + load_bias, nbyte)) { /* * This bss-zeroing can fail if the ELF * file specifies odd protections. So * we don't check the return value */ } } } if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (loc->elf_ex.e_type == ET_DYN) { /* Try and get dynamic programs out of the way of the * default mmap base, as well as whatever program they * might try to exec. This is because the brk will * follow the loader, and is not movable. */ #ifdef CONFIG_X86 load_bias = 0; #else load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); #endif } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags, 0); if (BAD_ADDR(error)) { send_sig(SIGKILL, current, 0); retval = IS_ERR((void *)error) ? PTR_ERR((void*)error) : -EINVAL; goto out_free_dentry; } if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); if (loc->elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += load_bias; reloc_func_desc = load_bias; } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; if (start_data < k) start_data = k; /* * Check to see if the section's size will overflow the * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || elf_ppnt->p_memsz > TASK_SIZE || TASK_SIZE - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ send_sig(SIGKILL, current, 0); retval = -EINVAL; goto out_free_dentry; } k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } loc->elf_ex.e_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; start_data += load_bias; end_data += load_bias; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections. We must do this before * mapping in the interpreter, to make sure it doesn't wind * up getting placed where the bss needs to go. */ retval = set_brk(elf_bss, elf_brk); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { send_sig(SIGSEGV, current, 0); retval = -EFAULT; /* Nobody gets to see this, but.. */ goto out_free_dentry; } if (elf_interpreter) { unsigned long uninitialized_var(interp_map_addr); elf_entry = load_elf_interp(&loc->interp_elf_ex, interpreter, &interp_map_addr, load_bias); if (!IS_ERR((void *)elf_entry)) { /* * load_elf_interp() returns relocation * adjustment */ interp_load_addr = elf_entry; elf_entry += loc->interp_elf_ex.e_entry; } if (BAD_ADDR(elf_entry)) { force_sig(SIGSEGV, current); retval = IS_ERR((void *)elf_entry) ? (int)elf_entry : -EINVAL; goto out_free_dentry; } reloc_func_desc = interp_load_addr; allow_write_access(interpreter); fput(interpreter); kfree(elf_interpreter); } else { elf_entry = loc->elf_ex.e_entry; if (BAD_ADDR(elf_entry)) { force_sig(SIGSEGV, current); retval = -EINVAL; goto out_free_dentry; } } kfree(elf_phdata); sys_close(elf_exec_fileno); set_binfmt(&elf_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES retval = arch_setup_additional_pages(bprm, executable_stack); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; retval = create_elf_tables(bprm, &loc->elf_ex, load_addr, interp_load_addr); if (retval < 0) { send_sig(SIGKILL, current, 0); goto out; } /* N.B. passed_fileno might not be initialized? */ current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->start_data = start_data; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; #ifdef arch_randomize_brk if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) current->mm->brk = current->mm->start_brk = arch_randomize_brk(current->mm); #endif if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ down_write(¤t->mm->mmap_sem); error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); } #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically links apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, elf_entry, bprm->p); retval = 0; out: kfree(loc); out_ret: return retval; /* error cleanup */ out_free_dentry: allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_interp: kfree(elf_interpreter); out_free_file: sys_close(elf_exec_fileno); out_free_ph: kfree(elf_phdata); goto out; }
/** * \<\<public\>\> Restarts a process from a checkpoint (new binfmt * handler). This consists of: * * - creating a new checkpoint instance * - reading a checkpoint header - might fail, if the magic number * doesn't match. * - reading open files * - reading memory descriptor * - reading memory areas along with pages * - reading process state (registers) * - reading signal handlers * * @param *bprm - binary object that is passed to this method by * execve and contains a pointer to the executable file * @param *regs - references the registers of the process, so that * they can be overlayed from the checkpoint. * @return 0 upon success */ int tcmi_ckptcom_restart(struct linux_binprm *bprm, struct pt_regs *regs) { struct tcmi_ckpt *ckpt; struct pt_regs* original_regs; // int i; u64 beg_time = 0, end_time = 0; beg_time = cpu_clock(smp_processor_id()); memory_sanity_check("Start"); original_regs = kmalloc(sizeof(struct pt_regs), GFP_KERNEL); if ( !original_regs ) return -ENOMEM; if (!tcmi_ckpt_check_magic(bprm->buf)) { goto exit0; } mdbg(INFO3, "Restarting '%s'", bprm->filename); if (!(ckpt = tcmi_ckpt_new(bprm->file))) { mdbg(ERR3, "Failed to instantiate a checkpoint"); goto exit0; } if (tcmi_ckpt_read_hdr(ckpt) < 0) { mdbg(ERR3, "Error reading checkpoint header!"); goto exit1; } memory_sanity_check("Post header"); /* Flush all traces of the currently running executable */ if (flush_old_exec(bprm)) { mdbg(ERR3, "Error flushing the old execution context!"); goto exit0; } memory_sanity_check("Pre-rlimit"); if (tcmi_ckpt_read_rlimit(ckpt, current) < 0) { mdbg(ERR3, "Error reading checkpoint rlimit!"); goto exit1; } memory_sanity_check("Post-rlimit"); // Replaced macro FD_ISSET to function test_bit with equal parameters by Jiri Rakosnik if ( test_bit(0, current->files->fdt->open_fds) ) { mdbg(INFO3, "Closing open fs.. this should not happend though.."); sys_close(0); } memory_sanity_check("Pre files"); if (tcmi_ckpt_read_files(ckpt) < 0) { mdbg(ERR3, "Error reading checkpoint files!"); goto exit1; } memory_sanity_check("Post - files"); if (tcmi_ckpt_mm_read(ckpt) < 0) { mdbg(ERR3, "Error reading memory descriptor!"); goto exit1; } memory_sanity_check("Post mm"); if ( !ckpt->hdr.is_npm ) { if (tcmi_ckpt_read_vmas(ckpt) < 0) { mdbg(ERR3, "Error reading VM areas"); goto exit1; } } *original_regs = *regs; if (tcmi_ckpt_regs_read(ckpt, regs) < 0) { mdbg(ERR3, "Error reading processor registers descriptor!"); goto exit1; } if (tcmi_ckpt_tls_read(ckpt, current, regs) < 0) { mdbg(ERR3, "Error reading process tls!"); goto exit1; } if (tcmi_ckpt_fsstruct_read(ckpt, current) < 0) { mdbg(ERR3, "Error reading process fsstruct!"); goto exit1; } if (tcmi_ckpt_sig_read(ckpt) < 0) { mdbg(ERR3, "Error reading signals informations!"); goto exit1; } if ( ckpt->hdr.is_npm ) { struct tcmi_npm_params* params = vmalloc(sizeof(struct tcmi_npm_params)); int exec_result = -EFAULT; mm_segment_t old_fs; *regs = *original_regs; if ( !params ) { mdbg(ERR3, "Cannot allocate memory for npm params!"); goto exit1; } if (tcmi_ckpt_npm_params_read(ckpt, params) < 0) { mdbg(ERR3, "Error reading npm params!"); goto exit1; } tcmi_ckpt_put(ckpt); // TEMPORARY DEBUG! if ( current->mm ) mdbg(INFO2, "MM %p nr_ptes: %lu", current->mm, current->mm->nr_ptes); if ( current->active_mm ) mdbg(INFO2, "ACTIVE %p MM nr_ptes: %lu", current->active_mm, current->active_mm->nr_ptes); // TODO: This is something REALLY NASTY! Some better solution is appreciated if ( current->mm ) { current->mm->nr_ptes = 0; } old_fs = get_fs(); set_fs(KERNEL_DS); // TEMPORARY DEBUG! if ( current->mm ) mdbg(INFO2, "MM %p nr_ptes: %lu", current->mm, current->mm->nr_ptes); if ( current->active_mm ) mdbg(INFO2, "ACTIVE %p MM nr_ptes: %lu", current->active_mm, current->active_mm->nr_ptes); // We have to unlock temprarily guar to prevent recursive lock (we are calling recursive exceve). TODO: Some better solution? // Moved cred_guard_mutex to struct signal in new kernel 3.x.x => Fix by Jiri Rakosnik mutex_unlock(¤t->signal->cred_guard_mutex); current->fs->in_exec = 0; // Required to pass through 'check_unsafe_exec' mdbg(INFO3, "Going to execute '%s', Args: %d, Envps %d", params->file_name, params->argsc, params->envpc); //mdbg(INFO3, "Arg[0] '%s', Envp[0] '%s'", params->args[0], params->envp[0]); exec_result = do_execve(params->file_name, (const char __user * const __user *)params->args, (const char __user * const __user *)params->envp, regs); mdbg(INFO3, "NPM internal execution result %d", exec_result); // And now we relock again as the relock of outer execve will be attempted. // Moved cred_guard_mutex to struct signal in new kernel 3.x.x => Fix by Jiri Rakosnik if (mutex_lock_interruptible(¤t->signal->cred_guard_mutex)) { minfo(ERR3, "Failed to relock cred guard!"); } // TODO: How do we release reference to the binmt of the module that was used for this execve.. ? // Do we have to call "set_binfmt(&tcmi_ckptcom_format);" here or can we release it here? set_fs(old_fs); vfree(params); mdbg(INFO3, "NPM after param free"); if ( exec_result ) {// In case of error of internal execution, we have to return ENOEXEC return -EFAULT; } end_time = cpu_clock(smp_processor_id()); mdbg(INFO3, "Checkpoint NPM took '%llu' ms.'", (end_time - beg_time) / 1000000); printk("Checkpoint NPM took '%llu' ms.\n'", (end_time - beg_time) / 1000000); return 0; } else {
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct file * file; struct exec interp_ex; struct inode *interpreter_inode; unsigned int load_addr; unsigned int interpreter_type = INTERPRETER_NONE; int i; int old_fs; int error; struct elf_phdr * elf_ppnt, *elf_phdata; int elf_exec_fileno; unsigned int elf_bss, k, elf_brk; int retval; char * elf_interpreter; unsigned int elf_entry; int status; unsigned int start_code, end_code, end_data; unsigned int elf_stack; char passed_fileno[6]; status = 0; load_addr = 0; elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ if (elf_ex.e_ident[0] != 0x7f || strncmp(&elf_ex.e_ident[1], "ELF",3) != 0) return -ENOEXEC; /* First of all, some simple consistency checks */ if(elf_ex.e_type != ET_EXEC || (elf_ex.e_machine != EM_386 && elf_ex.e_machine != EM_486) || (!bprm->inode->i_op || !bprm->inode->i_op->default_file_ops || !bprm->inode->i_op->default_file_ops->mmap)){ return -ENOEXEC; }; /* Now read in all of the header information */ elf_phdata = (struct elf_phdr *) kmalloc(elf_ex.e_phentsize * elf_ex.e_phnum, GFP_KERNEL); old_fs = get_fs(); set_fs(get_ds()); retval = read_exec(bprm->inode, elf_ex.e_phoff, (char *) elf_phdata, elf_ex.e_phentsize * elf_ex.e_phnum); set_fs(old_fs); if (retval < 0) { kfree (elf_phdata); return retval; } elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; elf_exec_fileno = open_inode(bprm->inode, O_RDONLY); if (elf_exec_fileno < 0) { kfree (elf_phdata); return elf_exec_fileno; } file = current->files->fd[elf_exec_fileno]; elf_stack = 0xffffffff; elf_interpreter = NULL; start_code = 0; end_code = 0; end_data = 0; old_fs = get_fs(); set_fs(get_ds()); for(i=0;i < elf_ex.e_phnum; i++){ if(elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for shared libraries - for now assume that this is an a.out format binary */ elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); retval = read_exec(bprm->inode,elf_ppnt->p_offset,elf_interpreter, elf_ppnt->p_filesz); #if 0 printk("Using ELF interpreter %s\n", elf_interpreter); #endif if(retval >= 0) retval = namei(elf_interpreter, &interpreter_inode); if(retval >= 0) retval = read_exec(interpreter_inode,0,bprm->buf,128); if(retval >= 0){ interp_ex = *((struct exec *) bprm->buf); /* exec-header */ interp_elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ }; if(retval < 0) { kfree (elf_phdata); kfree(elf_interpreter); return retval; }; }; elf_ppnt++; }; set_fs(old_fs); /* Some simple consistency checks for the interpreter */ if(elf_interpreter){ interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; if(retval < 0) { kfree(elf_interpreter); kfree(elf_phdata); return -ELIBACC; }; /* Now figure out which format our binary is */ if((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && (N_MAGIC(interp_ex) != QMAGIC)) interpreter_type = INTERPRETER_ELF; if (interp_elf_ex.e_ident[0] != 0x7f || strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) interpreter_type &= ~INTERPRETER_ELF; if(!interpreter_type) { kfree(elf_interpreter); kfree(elf_phdata); return -ELIBBAD; }; } /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ if (!bprm->sh_bang) { char * passed_p; if(interpreter_type == INTERPRETER_AOUT) { sprintf(passed_fileno, "%d", elf_exec_fileno); passed_p = passed_fileno; if(elf_interpreter) { bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p,2); bprm->argc++; }; }; if (!bprm->p) { if(elf_interpreter) { kfree(elf_interpreter); } kfree (elf_phdata); return -E2BIG; } } /* OK, This is the point of no return */ flush_old_exec(bprm); current->mm->end_data = 0; current->mm->end_code = 0; current->mm->start_mmap = ELF_START_MMAP; current->mm->mmap = NULL; elf_entry = (unsigned int) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->rss = 0; bprm->p += change_ldt(0, bprm->page); current->mm->start_stack = bprm->p; /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. At this point, we assume that the image should be loaded at fixed address, not at a variable address. */ old_fs = get_fs(); set_fs(get_ds()); elf_ppnt = elf_phdata; for(i=0;i < elf_ex.e_phnum; i++){ if(elf_ppnt->p_type == PT_INTERP) { /* Set these up so that we are able to load the interpreter */ /* Now load the interpreter into user address space */ set_fs(old_fs); if(interpreter_type & 1) elf_entry = load_aout_interp(&interp_ex, interpreter_inode); if(interpreter_type & 2) elf_entry = load_elf_interp(&interp_elf_ex, interpreter_inode); old_fs = get_fs(); set_fs(get_ds()); iput(interpreter_inode); kfree(elf_interpreter); if(elf_entry == 0xffffffff) { printk("Unable to load interpreter\n"); kfree(elf_phdata); send_sig(SIGSEGV, current, 0); return 0; }; }; if(elf_ppnt->p_type == PT_LOAD) { error = do_mmap(file, elf_ppnt->p_vaddr & 0xfffff000, elf_ppnt->p_filesz + (elf_ppnt->p_vaddr & 0xfff), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, elf_ppnt->p_offset & 0xfffff000); #ifdef LOW_ELF_STACK if(elf_ppnt->p_vaddr & 0xfffff000 < elf_stack) elf_stack = elf_ppnt->p_vaddr & 0xfffff000; #endif if(!load_addr) load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; k = elf_ppnt->p_vaddr; if(k > start_code) start_code = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if(k > elf_bss) elf_bss = k; if((elf_ppnt->p_flags | PROT_WRITE) && end_code < k) end_code = k; if(end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if(k > elf_brk) elf_brk = k; }; elf_ppnt++; }; set_fs(old_fs); kfree(elf_phdata); if(interpreter_type != INTERPRETER_AOUT) sys_close(elf_exec_fileno); /* The following 3 lines need a little bit of work if we are loading an iBCS2 binary. We should initially load it this way, and if we get a lcall7, then we should look to see if the iBCS2 execution profile is present. If it is, then switch to that, otherwise bomb. */ current->personality = PER_LINUX; current->lcall7 = no_lcall7; current->signal_map = current->signal_invmap = ident_map; current->executable = bprm->inode; bprm->inode->i_count++; #ifdef LOW_ELF_STACK current->start_stack = p = elf_stack - 4; #endif bprm->p -= MAX_ARG_PAGES*PAGE_SIZE; bprm->p = (unsigned long) create_elf_tables((char *)bprm->p, bprm->argc, bprm->envc, (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL), load_addr, (interpreter_type == INTERPRETER_AOUT ? 0 : 1)); if(interpreter_type == INTERPRETER_AOUT) current->mm->arg_start += strlen(passed_fileno) + 1; current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; current->suid = current->euid = bprm->e_uid; current->sgid = current->egid = bprm->e_gid; /* Calling sys_brk effectively mmaps the pages that we need for the bss and break sections */ current->mm->brk = (elf_bss + 0xfff) & 0xfffff000; sys_brk((elf_brk + 0xfff) & 0xfffff000); padzero(elf_bss); /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); regs->eip = elf_entry; /* eip, magic happens :-) */ regs->esp = bprm->p; /* stack pointer */ if (current->flags & PF_PTRACED) send_sig(SIGTRAP, current, 0); return 0; }
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct file *interpreter = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias = 0; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned int interpreter_type = INTERPRETER_NONE; unsigned char ibcs2_interpreter = 0; unsigned long error; struct elf_phdr * elf_ppnt, *elf_phdata; unsigned long elf_bss, k, elf_brk; int elf_exec_fileno; int retval, i; unsigned int size; unsigned long elf_entry, interp_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long reloc_func_desc = 0; struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct exec interp_ex; char passed_fileno[6]; struct files_struct *files; /* Get the exec-header */ elf_ex = *((struct elfhdr *) bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0) goto out; if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(&elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; /* Now read in all of the header information */ if (elf_ex.e_phentsize != sizeof(struct elf_phdr)) goto out; if (elf_ex.e_phnum < 1 || elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; size = elf_ex.e_phnum * sizeof(struct elf_phdr); retval = -ENOMEM; elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free_ph; } files = current->files; /* Refcounted so ok */ retval = unshare_files(); if (retval < 0) goto out_free_ph; if (files == current->files) { put_files_struct(files); files = NULL; } /* exec will make our files private anyway, but for the a.out loader stuff we need to do it earlier */ retval = get_unused_fd(); if (retval < 0) goto out_free_fh; get_file(bprm->file); fd_install(elf_exec_fileno = retval, bprm->file); elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; for (i = 0; i < elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOEXEC; if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2) goto out_free_file; retval = -ENOMEM; elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) goto out_free_file; retval = kernel_read(bprm->file, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz); if (retval != elf_ppnt->p_filesz) { if (retval >= 0) retval = -EIO; goto out_free_interp; } /* make sure path is NULL terminated */ retval = -ENOEXEC; if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0') goto out_free_interp; /* If the program interpreter is one of these two, * then assume an iBCS2 image. Otherwise assume * a native linux image. */ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) ibcs2_interpreter = 1; #if 0 printk("Using ELF interpreter %s\n", elf_interpreter); #endif SET_PERSONALITY(elf_ex, ibcs2_interpreter); interpreter = open_exec(elf_interpreter); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) goto out_free_interp; retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); if (retval != BINPRM_BUF_SIZE) { if (retval >= 0) retval = -EIO; goto out_free_dentry; } /* Get the exec headers */ interp_ex = *((struct exec *) bprm->buf); interp_elf_ex = *((struct elfhdr *) bprm->buf); break; } elf_ppnt++; } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; /* Now figure out which format our binary is */ if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && (N_MAGIC(interp_ex) != QMAGIC)) interpreter_type = INTERPRETER_ELF; if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0) interpreter_type &= ~INTERPRETER_ELF; retval = -ELIBBAD; if (!interpreter_type) goto out_free_dentry; /* Make sure only one type was selected */ if ((interpreter_type & INTERPRETER_ELF) && interpreter_type != INTERPRETER_ELF) { // FIXME - ratelimit this before re-enabling // printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); interpreter_type = INTERPRETER_ELF; } /* Verify the interpreter has a valid arch */ if ((interpreter_type == INTERPRETER_ELF) && !elf_check_arch(&interp_elf_ex)) goto out_free_dentry; } else { /* Executables without an interpreter also need a personality */ SET_PERSONALITY(elf_ex, ibcs2_interpreter); } /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ if (!bprm->sh_bang) { char * passed_p; if (interpreter_type == INTERPRETER_AOUT) { sprintf(passed_fileno, "%d", elf_exec_fileno); passed_p = passed_fileno; if (elf_interpreter) { retval = copy_strings_kernel(1,&passed_p,bprm); if (retval) goto out_free_dentry; bprm->argc++; } } } else { /* Executables without an interpreter also need a personality */ SET_PERSONALITY(elf_ex, ibcs2_interpreter); } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* Discard our unneeded old files struct */ if (files) { steal_locks(files); put_files_struct(files); files = NULL; } /* OK, This is the point of no return */ current->mm->start_data = 0; current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned long) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->rss = 0; retval = setup_arg_pages(bprm); if (retval < 0) { send_sig(SIGKILL, current, 0); return retval; } current->mm->start_stack = bprm->p; /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. At this point, we assume that the image should be loaded at fixed address, not at a variable address. */ for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (unlikely (elf_brk > elf_bss)) { unsigned long nbyte; /* There was a PT_LOAD segment with p_memsz > p_filesz before this one. Map anonymous pages, if needed, and clear the area. */ retval = set_brk (elf_bss + load_bias, elf_brk + load_bias); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } nbyte = ELF_PAGEOFFSET(elf_bss); if (nbyte) { nbyte = ELF_MIN_ALIGN - nbyte; if (nbyte > elf_brk - elf_bss) nbyte = elf_brk - elf_bss; clear_user((void *) elf_bss + load_bias, nbyte); } } if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } else if (elf_ex.e_type == ET_DYN) { /* Try and get dynamic programs out of the way of the default mmap base, as well as whatever program they might try to exec. This is because the brk will follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); } error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags); if (BAD_ADDR(error)) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); if (elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += load_bias; reloc_func_desc = load_addr; } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; if (start_data < k) start_data = k; /* * Check to see if the section's size will overflow the * allowed task size. Note that p_filesz must always be * <= p_memsz so it is only necessary to check p_memsz. */ if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz || elf_ppnt->p_memsz > TASK_SIZE || TASK_SIZE - elf_ppnt->p_memsz < k) { /* set_brk can never work. Avoid overflows. */ send_sig(SIGKILL, current, 0); goto out_free_dentry; } k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } elf_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; start_data += load_bias; end_data += load_bias; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections. We must do this before * mapping in the interpreter, to make sure it doesn't wind * up getting placed where the bss needs to go. */ retval = set_brk(elf_bss, elf_brk); if (retval) { send_sig(SIGKILL, current, 0); goto out_free_dentry; } padzero(elf_bss); if (elf_interpreter) { if (interpreter_type == INTERPRETER_AOUT) elf_entry = load_aout_interp(&interp_ex, interpreter); else elf_entry = load_elf_interp(&interp_elf_ex, interpreter, &interp_load_addr); if (BAD_ADDR(elf_entry)) { printk(KERN_ERR "Unable to load interpreter %.128s\n", elf_interpreter); force_sig(SIGSEGV, current); retval = -ENOEXEC; /* Nobody gets to see this, but.. */ goto out_free_dentry; } reloc_func_desc = interp_load_addr; allow_write_access(interpreter); fput(interpreter); kfree(elf_interpreter); } kfree(elf_phdata); if (interpreter_type != INTERPRETER_AOUT) sys_close(elf_exec_fileno); set_binfmt(&elf_format); compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; bprm->p = (unsigned long) create_elf_tables((char *)bprm->p, bprm->argc, bprm->envc, &elf_ex, load_addr, load_bias, interp_load_addr, (interpreter_type == INTERPRETER_AOUT ? 0 : 1)); /* N.B. passed_fileno might not be initialized? */ if (interpreter_type == INTERPRETER_AOUT) current->mm->arg_start += strlen(passed_fileno) + 1; current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->start_data = start_data; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; #if 0 printk("(start_brk) %lx\n" , (long) current->mm->start_brk); printk("(end_code) %lx\n" , (long) current->mm->end_code); printk("(start_code) %lx\n" , (long) current->mm->start_code); printk("(start_data) %lx\n" , (long) current->mm->start_data); printk("(end_data) %lx\n" , (long) current->mm->end_data); printk("(start_stack) %lx\n" , (long) current->mm->start_stack); printk("(brk) %lx\n" , (long) current->mm->brk); #endif if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ /* N.B. Shouldn't the size here be PAGE_SIZE?? */ down_write(¤t->mm->mmap_sem); error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); } #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically linked apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, elf_entry, bprm->p); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); retval = 0; out: return retval; /* error cleanup */ out_free_dentry: allow_write_access(interpreter); if (interpreter) fput(interpreter); out_free_interp: if (elf_interpreter) kfree(elf_interpreter); out_free_file: sys_close(elf_exec_fileno); out_free_fh: if (files) { put_files_struct(current->files); current->files = files; } out_free_ph: kfree(elf_phdata); goto out; }
static int load_svr3_binary (struct linux_binprm *bprm, struct pt_regs *regs) { struct file *file; int error, retval, i, j, shlibs; int fd[1+SHLIB_MAX]; long entry; unsigned long rlim; unsigned long p = bprm->p; struct filehdr *fh; struct aouthdr *ah; struct scnhdr *sh; char *buf, *libs_buf; /* Main binary + SHLIB_MAX */ struct bin_info bin_info[SHLIB_MAX + 1]; /* Cheking accessable headers by bprm->buf (128 bytes). */ fh = (struct filehdr *) bprm->buf; if (fh->f_magic != MC68MAGIC || fh->f_opthdr < AOUTSZ || fh->f_nscns < 3 || !(fh->f_flags & F_AR32W) || !(fh->f_flags & F_EXEC) ) return -ENOEXEC; ah = (struct aouthdr *) ((char *) bprm->buf + FILHSZ); if (ah->magic == SHMAGIC) return -ELIBEXEC; if ((ah->magic != DMAGIC && ah->magic != ZMAGIC) || !ah->tsize || ah->tsize + ah->dsize + FILHSZ + fh->f_opthdr + SCNHSZ * fh->f_nscns > bprm->inode->i_size || ah->text_start + ah->tsize > ah->data_start ) return -ENOEXEC; if (fh->f_nscns > 24) { printk ("Too many sections in svr3 binary file\n"); return -ENOEXEC; } /* Touch main binary file (which has # 0). */ fd[0] = open_inode (bprm->inode, O_RDONLY); if (fd[0] < 0) return fd[0]; buf = (char *) kmalloc (2*1024, GFP_KERNEL); if (!buf) { sys_close (fd[0]); return -ENOMEM; } libs_buf = buf + 1024; retval = touch_svr3_binary (fd[0], buf, &bin_info[0], 0); if (retval < 0) { sys_close(fd[0]); kfree (buf); return retval; } /* Looking for STYP_LIB section for shared libraries. */ sh = (struct scnhdr *) (buf + FILHSZ + fh->f_opthdr); for (i = 0; i < fh->f_nscns; i++) if (sh[i].s_flags == STYP_LIB) break; if (i == fh->f_nscns) shlibs = 0; else shlibs = sh[i].s_nlib; /* Touch target shared library binary files (## 1--SHLIB_MAX). */ if (shlibs) { void *p; int slib_size = sh[i].s_size; if (shlibs > SHLIB_MAX) { retval = -ELIBMAX; goto error_close; } file = bin_info[0].file; retval = sys_lseek (fd[0], sh[i].s_scnptr, 0); if (retval < 0) goto error_close; if (retval != sh[i].s_scnptr) { retval = -EACCES; goto error_close; } set_fs (KERNEL_DS); retval = file->f_op->read (file->f_inode, file, libs_buf, 1024); set_fs (USER_DS); if (retval < 0) goto error_close; if (retval < slib_size) { retval = -ELIBSCN; goto error_close; } for (p = libs_buf, j = 1; j <= shlibs; j++) { int len; char *name; struct slib *slibh = (struct slib *) p; p += slibh->sl_pathndx * 4; len = (slibh->sl_entsz - slibh->sl_pathndx) * 4; if (len <= 0 || p + len > (void *) libs_buf + slib_size) { retval = -ELIBSCN; goto error_close; } /* Target shared library path name. Must be followed by one or more zeroes. */ name = (char *) p; /* Try to access this library. */ set_fs (KERNEL_DS); fd[j] = sys_open (name, 0, 0); set_fs (USER_DS); if (fd[j] < 0) { retval = fd[j]; goto error_close; } retval = touch_svr3_binary (fd[j],buf,&bin_info[j],SHMAGIC); if (retval < 0) { /* Renumbering for shared library context. */ if (retval == -ENOEXEC) retval = -ELIBBAD; else if (retval == -EACCES) retval = -ELIBACC; goto error_close; } p += len; } } /* if (shlibs) .... */ /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; if (ah->dsize + ah->bsize > rlim) { /* XXX: but in shlibs too */ retval = -ENOMEM; goto error_close; } kfree (buf); /* OK, this is the point of noreturn. */ entry = ah->entry & ~0x1; /* Avoids possibly hult after `rte' ??? */ flush_old_exec (bprm); current->personality = PER_SVR3; current->mm->end_code = bin_info[0].text_len + (current->mm->start_code = bin_info[0].text_addr); current->mm->end_data = bin_info[0].data_len + (current->mm->start_data = bin_info[0].data_addr); current->mm->brk = bin_info[0].bss_len + (current->mm->start_brk = current->mm->end_data); current->mm->rss = 0; current->mm->mmap = NULL; current->suid = current->euid = current->fsuid = bprm->e_uid; current->sgid = current->egid = current->fsgid = bprm->e_gid; current->flags &= ~PF_FORKNOEXEC; /* mmap all binaries */ for (i = 0; i < 1 + shlibs; i++) { struct bin_info *binf = &bin_info[i]; unsigned int blocksize = binf->file->f_inode->i_sb->s_blocksize; unsigned int start_bss, end_bss; if (binf->text_addr & (PAGE_SIZE - 1) || binf->data_addr & (PAGE_SIZE - 1) || binf->text_offs & (blocksize - 1) || binf->data_offs & (blocksize - 1) || !binf->file->f_op->mmap ) { /* cannot mmap immediatly */ do_mmap (NULL, PAGE_ROUND(binf->text_addr), binf->text_len + (binf->text_addr - PAGE_ROUND(binf->text_addr)), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); read_exec (binf->file->f_inode, binf->text_offs, (char *) binf->text_addr, binf->text_len, 0); do_mmap (NULL, PAGE_ROUND(binf->data_addr), binf->data_len + (binf->data_addr - PAGE_ROUND(binf->data_addr)), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); read_exec (binf->file->f_inode, binf->data_offs, (char *) binf->data_addr, binf->data_len, 0); /* there's no nice way of flushing a number of user pages to ram 8*( */ flush_cache_all(); } else { error = do_mmap (binf->file, binf->text_addr, binf->text_len, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, binf->text_offs); if (error != binf->text_addr) goto error_kill_close; error = do_mmap (binf->file, binf->data_addr, binf->data_len, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, binf->data_offs); if (error != binf->data_addr) goto error_kill_close; #ifdef DMAGIC_NODEMAND /* DMAGIC is for pure executable (not demand loading). But let the shared libraries be demand load ??? */ if (i == 0 && ah->magic == DMAGIC) { volatile char c; unsigned long addr; /* Touch all pages in .text and .data segments. */ for (addr = binf->text_addr; addr < binf->text_addr + binf->text_len; addr += PAGE_SIZE ) c = get_fs_byte ((char *) addr); for (addr = binf->data_addr; addr < binf->data_addr + binf->data_len; addr += PAGE_SIZE ) c = get_fs_byte ((char *) addr); } #endif } sys_close (fd[i]); start_bss = PAGE_ALIGN(binf->data_addr + binf->data_len); end_bss = PAGE_ALIGN(binf->data_addr + binf->data_len + binf->bss_len); /* svr3 binaries very hope that .bss section had been initialized by zeroes. Oh... */ if (binf->bss_len != 0) { /* Because there may be skipped heap by alignment. */ int addr = binf->data_addr + binf->data_len; int i = start_bss - addr; /* start_bss is aligned, addr may be no */ while (i & 0x3) { put_fs_byte (0, (char *) addr); addr++; i--; } i >>= 2; while (i--) { put_fs_long (0, (long *) addr); addr += sizeof (long); } } if (end_bss >= start_bss) do_mmap (NULL, start_bss, end_bss - start_bss, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); #ifdef DMAGIC_NODEMAND /* The same reason as above. */ if (i == 0 && ah->magic == DMAGIC) { volatile char c; unsigned long addr; for (addr = start_bss; addr < end_bss; addr += PAGE_SIZE) c = get_fs_byte ((char *) addr); } #endif /* OK, now all is mmapped for binary # i */ } /* for (i = ... ) */
/* * Helper function to process the load operation. */ static int coff_load_object(struct linux_binprm *bprm, struct pt_regs *regs, int binary) { COFF_FILHDR *coff_hdr = NULL; COFF_SCNHDR *text_sect = NULL, *data_sect = NULL, *bss_sect = NULL, *sect_bufr = NULL, *sect_ptr = NULL; int text_count = 0, data_count = 0, bss_count = 0, lib_count = 0; coff_section text, data, bss; u_long start_addr = 0, p = bprm->p; short flags, aout_size = 0; int pageable = 1, sections = 0, status = 0, i; int coff_exec_fileno; mm_segment_t old_fs; coff_hdr = (COFF_FILHDR *)bprm->buf; /* * Validate the magic value for the object file. */ if (COFF_I386BADMAG(*coff_hdr)) return -ENOEXEC; flags = COFF_SHORT(coff_hdr->f_flags); /* * The object file should have 32 BIT little endian format. Do not allow * it to have the 16 bit object file flag set as Linux is not able to run * on the 80286/80186/8086. */ if ((flags & (COFF_F_AR32WR | COFF_F_AR16WR)) != COFF_F_AR32WR) return -ENOEXEC; /* * If the file is not executable then reject the execution. This means * that there must not be external references. */ if ((flags & COFF_F_EXEC) == 0) return -ENOEXEC; /* * Extract the header information which we need. */ sections = COFF_SHORT(coff_hdr->f_nscns); /* Number of sections */ aout_size = COFF_SHORT(coff_hdr->f_opthdr); /* Size of opt. headr */ /* * There must be at least one section. */ if (!sections) return -ENOEXEC; if (!bprm->file->f_op->mmap) pageable = 0; if (!(sect_bufr = kmalloc(sections * COFF_SCNHSZ, GFP_KERNEL))) { printk(KERN_WARNING "coff: kmalloc failed\n"); return -ENOMEM; } status = kernel_read(bprm->file, aout_size + COFF_FILHSZ, (char *)sect_bufr, sections * COFF_SCNHSZ); if (status < 0) { printk(KERN_WARNING "coff: unable to read\n"); goto out_free_buf; } status = get_unused_fd(); if (status < 0) { printk(KERN_WARNING "coff: unable to get free fs\n"); goto out_free_buf; } get_file(bprm->file); fd_install(coff_exec_fileno = status, bprm->file); /* * Loop through the sections and find the various types */ sect_ptr = sect_bufr; for (i = 0; i < sections; i++) { long int sect_flags = COFF_LONG(sect_ptr->s_flags); switch (sect_flags) { case COFF_STYP_TEXT: status |= coff_isaligned(sect_ptr); text_sect = sect_ptr; text_count++; break; case COFF_STYP_DATA: status |= coff_isaligned(sect_ptr); data_sect = sect_ptr; data_count++; break; case COFF_STYP_BSS: bss_sect = sect_ptr; bss_count++; break; case COFF_STYP_LIB: lib_count++; break; default: break; } sect_ptr = (COFF_SCNHDR *) & ((char *) sect_ptr)[COFF_SCNHSZ]; } /* * If any of the sections weren't properly aligned we aren't * going to be able to demand page this executable. Note that * at this stage the *only* excuse for having status <= 0 is if * the alignment test failed. */ if (status < 0) pageable = 0; /* * Ensure that there are the required sections. There must be one * text sections and one each of the data and bss sections for an * executable. A library may or may not have a data / bss section. */ if (text_count != 1) { status = -ENOEXEC; goto out_free_file; } if (binary && (data_count != 1 || bss_count != 1)) { status = -ENOEXEC; goto out_free_file; } /* * If there is no additional header then assume the file starts * at the first byte of the text section. This may not be the * proper place, so the best solution is to include the optional * header. A shared library __MUST__ have an optional header to * indicate that it is a shared library. */ if (aout_size == 0) { if (!binary) { status = -ENOEXEC; goto out_free_file; } start_addr = COFF_LONG(text_sect->s_vaddr); } else if (aout_size < (short) COFF_AOUTSZ) { status = -ENOEXEC; goto out_free_file; } else { COFF_AOUTHDR *aout_hdr; short aout_magic; aout_hdr = (COFF_AOUTHDR *) &((char *)coff_hdr)[COFF_FILHSZ]; aout_magic = COFF_SHORT(aout_hdr->magic); /* * Validate the magic number in the a.out header. If it is valid then * update the starting symbol location. Do not accept these file formats * when loading a shared library. */ switch (aout_magic) { case COFF_OMAGIC: case COFF_ZMAGIC: case COFF_STMAGIC: if (!binary) { status = -ENOEXEC; goto out_free_file; } start_addr = (u_int)COFF_LONG(aout_hdr->entry); break; /* * Magic value for a shared library. This is valid only when * loading a shared library. * * (There is no need for a start_addr. It won't be used.) */ case COFF_SHMAGIC: if (!binary) break; /* FALLTHROUGH */ default: status = -ENOEXEC; goto out_free_file; } } /* * Generate the proper values for the text fields * * THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD * SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD. */ text.scnptr = COFF_LONG(text_sect->s_scnptr); text.size = COFF_LONG(text_sect->s_size); text.vaddr = COFF_LONG(text_sect->s_vaddr); /* * Generate the proper values for the data fields */ if (data_sect != NULL) { data.scnptr = COFF_LONG(data_sect->s_scnptr); data.size = COFF_LONG(data_sect->s_size); data.vaddr = COFF_LONG(data_sect->s_vaddr); } else { data.scnptr = 0; data.size = 0; data.vaddr = 0; } /* * Generate the proper values for the bss fields */ if (bss_sect != NULL) { bss.size = COFF_LONG(bss_sect->s_size); bss.vaddr = COFF_LONG(bss_sect->s_vaddr); } else { bss.size = 0; bss.vaddr = 0; } /* * Flush the executable from memory. At this point the executable is * committed to being defined or a segmentation violation will occur. */ if (binary) { COFF_SCNHDR *sect_ptr2 = sect_bufr; u_long personality = PER_SVR3; int i; if ((status = flush_old_exec(bprm))) goto out_free_file; /* * Look for clues as to the system this binary was compiled * on in the comments section(s). * * Only look at the main binary, not the shared libraries * (or would it be better to prefer shared libraries over * binaries? Or could they be different???) */ for (i = 0; i < sections; i++) { long sect_flags = COFF_LONG(sect_ptr2->s_flags); if (sect_flags == COFF_STYP_INFO && (status = coff_parse_comments(bprm->file, sect_ptr2, &personality)) > 0) goto found; sect_ptr2 = (COFF_SCNHDR *) &((char *)sect_ptr2)[COFF_SCNHSZ]; } /* * If no .comments section was found there is no way to * figure out the personality. Odds on it is SCO though... */ personality = abi_defhandler_coff; found: set_personality(personality); current->mm->start_data = 0; current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; current->mm->_rss = 0; /* * Construct the parameter and environment * string table entries. */ if ((status = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT)) < 0) goto sigsegv; p = (u_long)coff_mktables((char *)bprm->p, bprm->argc, bprm->envc); current->mm->end_code = text.size + (current->mm->start_code = text.vaddr); current->mm->end_data = data.size + (current->mm->start_data = data.vaddr); current->mm->brk = bss.size + (current->mm->start_brk = bss.vaddr); current->mm->start_stack = p; compute_creds(bprm); start_thread(regs, start_addr, p); } old_fs = get_fs(); set_fs(get_ds()); if (!pageable) { /* * Read the file from disk... * * XXX: untested. */ loff_t pos = data.scnptr; status = do_brk(text.vaddr, text.size); bprm->file->f_op->read(bprm->file, (char *)data.vaddr, data.scnptr, &pos); status = do_brk(data.vaddr, data.size); bprm->file->f_op->read(bprm->file, (char *)text.vaddr, text.scnptr, &pos); status = 0; } else { /* map the text pages...*/ status = map_coff(bprm->file, &text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, text.scnptr & PAGE_MASK); if (status != (text.vaddr & PAGE_MASK)) { status = -ENOEXEC; goto out_free_file; } /* map the data pages */ if (data.size != 0) { status = map_coff(bprm->file, &data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, data.scnptr & PAGE_MASK); if (status != (data.vaddr & PAGE_MASK)) { status = -ENOEXEC; goto out_free_file; } } status = 0; } /* * Construct the bss data for the process. The bss ranges from the * end of the data (which may not be on a page boundary) to the end * of the bss section. Allocate any necessary pages for the data. */ if (bss.size != 0) { down_write(¤t->mm->mmap_sem); do_mmap(NULL, PAGE_ALIGN(bss.vaddr), bss.size + bss.vaddr - PAGE_ALIGN(bss.vaddr), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); if ((status = coff_clear_memory(bss.vaddr, bss.size)) < 0) goto out_free_file; } set_fs(old_fs); if (!binary) goto out_free_file; /* * Load any shared library for the executable. */ if (lib_count) status = coff_preload_shlibs(bprm, sect_bufr, sections); set_binfmt(&coff_format); /* * Generate any needed trap for this process. If an error occured then * generate a segmentation violation. If the process is being debugged * then generate the load trap. (Note: If this is a library load then * do not generate the trap here. Pass the error to the caller who * will do it for the process in the outer lay of this procedure call.) */ if (status < 0) { sigsegv: printk(KERN_WARNING "coff: trapping process with SEGV\n"); send_sig(SIGSEGV, current, 0); /* Generate the error trap */ } else if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); /* We are committed. It can't fail */ status = 0; out_free_file: sys_close(coff_exec_fileno); out_free_buf: kfree(sect_bufr); return (status); }
static int load_exeso_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct elfhdr *elf_ex; struct elf_phdr *elf_phdata = NULL; struct mm_struct *mm; unsigned long load_addr = 0; unsigned long error; int retval = 0; unsigned long pe_entry, ntdll_load_addr = 0; unsigned long start_code, end_code, start_data, end_data; unsigned long ntdll_entry; int executable_stack = EXSTACK_DEFAULT; unsigned long def_flags = 0; unsigned long stack_top; #ifdef NTDLL_SO unsigned long interp_load_addr; unsigned long interp_entry; #endif struct eprocess *process; struct ethread *thread; PRTL_USER_PROCESS_PARAMETERS ppb; OBJECT_ATTRIBUTES ObjectAttributes; INITIAL_TEB init_teb; BOOLEAN is_win32=FALSE; struct startup_info *info=NULL; struct eprocess *parent_eprocess=NULL; struct ethread *parent_ethread=NULL; struct w32process* child_w32process =NULL; struct w32process* parent_w32process =NULL; elf_ex = (struct elfhdr *)bprm->buf; retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0) goto out; if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN) goto out; if (!elf_check_arch(elf_ex)) goto out; if (!bprm->file->f_op||!bprm->file->f_op->mmap) goto out; if (elf_ex->e_phentsize != sizeof(struct elf_phdr)) goto out; if (elf_ex->e_phnum < 1 || elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr)) goto out; if(!check_exeso(bprm)) goto out; start_code = ~0UL; end_code = 0; start_data = 0; end_data = 0; if(current->parent->ethread) { is_win32 = TRUE; parent_ethread = current->parent->ethread; parent_eprocess = parent_ethread->threads_process; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) { goto out; } /* OK, This is the point of no return */ mm = current->mm; current->flags &= ~PF_FORKNOEXEC; mm->def_flags = def_flags; current->signal->rlim[RLIMIT_STACK].rlim_cur = WIN32_STACK_LIMIT; current->signal->rlim[RLIMIT_STACK].rlim_max = WIN32_STACK_LIMIT; current->personality |= ADDR_COMPAT_LAYOUT; arch_pick_mmap_layout(mm); /* Do this so that we can load the ntdll, if need be. We will change some of these later */ mm->free_area_cache = mm->mmap_base = WIN32_UNMAPPED_BASE; mm->cached_hole_size = 0; stack_top = WIN32_STACK_LIMIT + WIN32_LOWEST_ADDR; retval = setup_arg_pages(bprm, stack_top, executable_stack); if (retval < 0) goto out_free_file; down_write(&mm->mmap_sem); /* reserve first 0x100000 */ do_mmap_pgoff(NULL, 0, WIN32_LOWEST_ADDR, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); /* reserve first 0x7fff0000 - 0x80000000 */ do_mmap_pgoff(NULL, WIN32_TASK_SIZE - 0x10000, 0x10000, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); /* reserve first 0x81000000 - 0xc0000000 * 0x80000000 - 0x81000000 used for wine SYSTEM_HEAP */ do_mmap_pgoff(NULL, WIN32_TASK_SIZE + WIN32_SYSTEM_HEAP_SIZE, TASK_SIZE - WIN32_TASK_SIZE - WIN32_SYSTEM_HEAP_SIZE, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, 0); up_write(&mm->mmap_sem); #ifdef NTDLL_SO /* search ntdll.dll.so in $PATH, default is /usr/local/lib/wine/ntdll.dll.so */ if (!*ntdll_name) search_ntdll(); /* map ntdll.dll.so */ map_system_dll(current, ntdll_name, &ntdll_load_addr, &interp_load_addr); pe_entry = get_pe_entry(); ntdll_entry = get_ntdll_entry(); interp_entry = get_interp_entry(); #endif set_binfmt(&exeso_format); #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES retval = arch_setup_additional_pages(bprm, executable_stack); if (retval < 0) { goto out_free_file; } #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */ install_exec_creds(bprm); current->flags &= ~PF_FORKNOEXEC; #ifdef NTDLL_SO /* copy argv, env, and auxvec to stack, all for interpreter */ create_elf_tables_aux(bprm, ntdll_load_addr, ntdll_phoff, ntdll_phnum, get_ntdll_start_thunk(), load_addr, elf_ex->e_phoff, elf_ex->e_phnum, 0, interp_load_addr, interp_entry, 0); #endif mm->end_code = end_code; mm->start_code = start_code; mm->start_data = start_data; mm->end_data = end_data; mm->start_stack = bprm->p; if (current->personality & MMAP_PAGE_ZERO) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ down_write(&mm->mmap_sem); error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(&mm->mmap_sem); } /* create win-related structure */ INIT_OBJECT_ATTR(&ObjectAttributes, NULL, 0, NULL, NULL); /* Create EPROCESS */ retval = create_object(KernelMode, process_object_type, &ObjectAttributes, KernelMode, NULL, sizeof(struct eprocess), 0, 0, (PVOID *)&process); if (retval != STATUS_SUCCESS) { goto out_free_file; } /* init eprocess */ eprocess_init(NULL, FALSE, process); process->unique_processid = create_cid_handle(process, process_object_type); if (!process->unique_processid) goto out_free_eproc; /* initialize EProcess and KProcess */ process->section_base_address = (void *)load_addr; /* FIXME: PsCreateCidHandle */ /* Create PEB */ if ((retval = create_peb(process))) goto out_free_process_cid; /* Create PPB */ if(is_win32 == FALSE) { create_ppb(&ppb, process, bprm, bprm->filename, NULL, NULL, NULL, NULL, NULL, NULL, NULL); ((PEB *)process->peb)->ProcessParameters = ppb; } /* allocate a Win32 thread object */ retval = create_object(KernelMode, thread_object_type, &ObjectAttributes, KernelMode, NULL, sizeof(struct ethread), 0, 0, (PVOID *)&thread); if (retval) { goto out_free_process_cid; } thread->cid.unique_thread = create_cid_handle(thread, thread_object_type); thread->cid.unique_process = process->unique_processid; if (!thread->cid.unique_thread) goto out_free_ethread; /* set the teb */ init_teb.StackBase = (PVOID)(bprm->p); init_teb.StackLimit = (PVOID)WIN32_LOWEST_ADDR + PAGE_SIZE; thread->tcb.teb = create_teb(process, (PCLIENT_ID)&thread->cid, &init_teb); if (IS_ERR(thread->tcb.teb)) { retval = PTR_ERR(thread->tcb.teb); goto out_free_thread_cid; } /* Init KThreaad */ ethread_init(thread, process, current); sema_init(&thread->exec_semaphore,0); if (is_win32 == TRUE) //parent is a windows process { down(&thread->exec_semaphore); //wait for the parent child_w32process = process->win32process; parent_w32process = parent_eprocess->win32process; info = child_w32process->startup_info; //now parent has finished its work if(thread->inherit_all) { create_handle_table(parent_eprocess, TRUE, process); child_w32process = create_w32process(parent_w32process, TRUE, process); } } deref_object(process); deref_object(thread); set_teb_selector(current, (long)thread->tcb.teb); thread->start_address = (void *)pe_entry; /* FIXME */ /* save current trap frame */ thread->tcb.trap_frame = (struct ktrap_frame *)regs; /* init apc, to call LdrInitializeThunk */ #if 0 thread_apc = kmalloc(sizeof(KAPC), GFP_KERNEL); if (!thread_apc) { retval = -ENOMEM; goto out_free_thread_cid; } apc_init(thread_apc, &thread->tcb, OriginalApcEnvironment, thread_special_apc, NULL, (PKNORMAL_ROUTINE)ntdll_entry, UserMode, (void *)(bprm->p + 12)); insert_queue_apc(thread_apc, (void *)interp_entry, (void *)extra_page, IO_NO_INCREMENT); #ifndef TIF_APC #define TIF_APC 13 #endif set_tsk_thread_flag(current, TIF_APC); #endif #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. In addition, it may also specify (eg, PowerPC64 ELF) * that the e_entry field is the address of the function descriptor * for the startup routine, rather than the address of the startup * routine itself. This macro performs whatever initialization to * the regs structure is required as well as any relocations to the * function descriptor entries when executing dynamically links apps. */ ELF_PLAT_INIT(regs, reloc_func_desc); #endif start_thread(regs, interp_entry, bprm->p); if (unlikely(current->ptrace & PT_PTRACED)) { if (current->ptrace & PT_TRACE_EXEC) ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); else send_sig(SIGTRAP, current, 0); } retval = 0; try_module_get(THIS_MODULE); /* return from w32syscall_exit, not syscall_exit */ ((unsigned long *)regs)[-1] = (unsigned long)w32syscall_exit; regs->fs = TEB_SELECTOR; out: if(elf_phdata) kfree(elf_phdata); return retval; /* error cleanup */ out_free_thread_cid: delete_cid_handle(thread->cid.unique_thread, thread_object_type); out_free_ethread: deref_object(thread); out_free_process_cid: delete_cid_handle(process->unique_processid, process_object_type); out_free_eproc: deref_object(process); out_free_file: send_sig(SIGKILL, current, 0); goto out; }
static inline int do_load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct file * file; struct dentry *interpreter_dentry = NULL; /* to shut gcc up */ unsigned long load_addr = 0, load_bias; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned int interpreter_type = INTERPRETER_NONE; unsigned char ibcs2_interpreter = 0; mm_segment_t old_fs; unsigned long error; struct elf_phdr * elf_ppnt, *elf_phdata; unsigned long elf_bss, k, elf_brk; int elf_exec_fileno; int retval, size, i; unsigned long elf_entry, interp_load_addr = 0; unsigned long start_code, end_code, end_data; struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct exec interp_ex; char passed_fileno[6]; /* Get the exec-header */ elf_ex = *((struct elfhdr *) bprm->buf); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (elf_ex.e_ident[0] != 0x7f || strncmp(&elf_ex.e_ident[1], "ELF", 3) != 0) goto out; if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(elf_ex.e_machine)) goto out; #ifdef __mips__ /* allow only mips1 if exec is MIPSEB elf, because IRIX binaries handled elsewhere. */ /* borrowed from binutils/include/elf/common.h*/ #define EI_DATA 5 /* Data encoding */ #define ELFDATA2MSB 2 /* 2's complement, big endian */ if ((elf_ex.e_ident[EI_DATA] == ELFDATA2MSB ) && (elf_ex.e_flags & EF_MIPS_ARCH) ) { retval = -ENOEXEC; goto out; } #endif if (!bprm->dentry->d_inode->i_op || !bprm->dentry->d_inode->i_op->default_file_ops || !bprm->dentry->d_inode->i_op->default_file_ops->mmap) goto out; /* Now read in all of the header information */ retval = -ENOMEM; size = elf_ex.e_phentsize * elf_ex.e_phnum; elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); if (!elf_phdata) goto out; retval = read_exec(bprm->dentry, elf_ex.e_phoff, (char *) elf_phdata, size, 1); if (retval < 0) goto out_free_ph; retval = open_dentry(bprm->dentry, O_RDONLY); if (retval < 0) goto out_free_ph; elf_exec_fileno = retval; file = fget(elf_exec_fileno); elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; end_data = 0; for (i = 0; i < elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { retval = -EINVAL; if (elf_interpreter) goto out_free_interp; /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOMEM; elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz, GFP_KERNEL); if (!elf_interpreter) goto out_free_file; retval = read_exec(bprm->dentry, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz, 1); if (retval < 0) goto out_free_interp; /* If the program interpreter is one of these two, * then assume an iBCS2 image. Otherwise assume * a native linux image. */ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) ibcs2_interpreter = 1; #if 0 printk("Using ELF interpreter %s\n", elf_interpreter); #endif old_fs = get_fs(); /* This could probably be optimized */ set_fs(get_ds()); #ifdef __sparc__ if (ibcs2_interpreter) { unsigned long old_pers = current->personality; current->personality = PER_SVR4; interpreter_dentry = open_namei(elf_interpreter, 0, 0); current->personality = old_pers; } else #endif interpreter_dentry = open_namei(elf_interpreter, 0, 0); set_fs(old_fs); retval = PTR_ERR(interpreter_dentry); if (IS_ERR(interpreter_dentry)) goto out_free_interp; retval = permission(interpreter_dentry->d_inode, MAY_EXEC); if (retval < 0) goto out_free_dentry; retval = read_exec(interpreter_dentry, 0, bprm->buf, 128, 1); if (retval < 0) goto out_free_dentry; /* Get the exec headers */ interp_ex = *((struct exec *) bprm->buf); interp_elf_ex = *((struct elfhdr *) bprm->buf); } elf_ppnt++; } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; /* Now figure out which format our binary is */ if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && (N_MAGIC(interp_ex) != QMAGIC)) interpreter_type = INTERPRETER_ELF; if (interp_elf_ex.e_ident[0] != 0x7f || strncmp(&interp_elf_ex.e_ident[1], "ELF", 3) != 0) interpreter_type &= ~INTERPRETER_ELF; retval = -ELIBBAD; if (!interpreter_type) goto out_free_dentry; /* Make sure only one type was selected */ if ((interpreter_type & INTERPRETER_ELF) && interpreter_type != INTERPRETER_ELF) { printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n"); interpreter_type = INTERPRETER_ELF; } } /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ if (!bprm->sh_bang) { char * passed_p; if (interpreter_type == INTERPRETER_AOUT) { sprintf(passed_fileno, "%d", elf_exec_fileno); passed_p = passed_fileno; if (elf_interpreter) { bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p,2); bprm->argc++; } } retval = -E2BIG; if (!bprm->p) goto out_free_dentry; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* OK, This is the point of no return */ current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned long) elf_ex.e_entry; /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(elf_ex, ibcs2_interpreter); /* Do this so that we can load the interpreter, if need be. We will change some of these later */ current->mm->rss = 0; bprm->p = setup_arg_pages(bprm->p, bprm); current->mm->start_stack = bprm->p; /* Try and get dynamic programs out of the way of the default mmap base, as well as whatever program they might try to exec. This is because the brk will follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(elf_ex.e_type==ET_DYN ? ELF_ET_DYN_BASE : 0); /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. At this point, we assume that the image should be loaded at fixed address, not at a variable address. */ old_fs = get_fs(); set_fs(get_ds()); for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } error = do_mmap(file, ELF_PAGESTART(load_bias + vaddr), (elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); if (elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += error; } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } set_fs(old_fs); fput(file); /* all done with the file */ elf_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; end_data += load_bias; if (elf_interpreter) { if (interpreter_type == INTERPRETER_AOUT) elf_entry = load_aout_interp(&interp_ex, interpreter_dentry); else elf_entry = load_elf_interp(&interp_elf_ex, interpreter_dentry, &interp_load_addr); dput(interpreter_dentry); kfree(elf_interpreter); if (elf_entry == ~0UL) { printk(KERN_ERR "Unable to load interpreter\n"); kfree(elf_phdata); send_sig(SIGSEGV, current, 0); return 0; } } kfree(elf_phdata); if (interpreter_type != INTERPRETER_AOUT) sys_close(elf_exec_fileno); if (current->exec_domain && current->exec_domain->module) __MOD_DEC_USE_COUNT(current->exec_domain->module); if (current->binfmt && current->binfmt->module) __MOD_DEC_USE_COUNT(current->binfmt->module); current->exec_domain = lookup_exec_domain(current->personality); current->binfmt = &elf_format; if (current->exec_domain && current->exec_domain->module) __MOD_INC_USE_COUNT(current->exec_domain->module); if (current->binfmt && current->binfmt->module) __MOD_INC_USE_COUNT(current->binfmt->module); #ifndef VM_STACK_FLAGS current->executable = dget(bprm->dentry); #endif compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; bprm->p = (unsigned long) create_elf_tables((char *)bprm->p, bprm->argc, bprm->envc, (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL), load_addr, load_bias, interp_load_addr, (interpreter_type == INTERPRETER_AOUT ? 0 : 1)); /* N.B. passed_fileno might not be initialized? */ if (interpreter_type == INTERPRETER_AOUT) current->mm->arg_start += strlen(passed_fileno) + 1; current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections */ set_brk(elf_bss, elf_brk); padzero(elf_bss); #if 0 printk("(start_brk) %x\n" , current->mm->start_brk); printk("(end_code) %x\n" , current->mm->end_code); printk("(start_code) %x\n" , current->mm->start_code); printk("(end_data) %x\n" , current->mm->end_data); printk("(start_stack) %x\n" , current->mm->start_stack); printk("(brk) %x\n" , current->mm->brk); #endif if ( current->personality == PER_SVR4 ) { /* Why this, you ask??? Well SVr4 maps page 0 as read-only, and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ /* N.B. Shouldn't the size here be PAGE_SIZE?? */ error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); } #ifdef ELF_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. This macro performs whatever initialization to * the regs structure is required. */ ELF_PLAT_INIT(regs); #endif start_thread(regs, elf_entry, bprm->p); if (current->flags & PF_PTRACED) send_sig(SIGTRAP, current, 0); retval = 0; out: return retval; /* error cleanup */ out_free_dentry: dput(interpreter_dentry); out_free_interp: if (elf_interpreter) kfree(elf_interpreter); out_free_file: fput(file); sys_close(elf_exec_fileno); out_free_ph: kfree(elf_phdata); goto out; }
static int load_object (struct linux_binprm * bprm, struct pt_regs *regs, int lib_ok) { COFF_FILHDR *coff_hdr = (COFF_FILHDR *) bprm->buf; /* COFF Header */ COFF_SCNHDR *sect_bufr; /* Pointer to section table */ COFF_SCNHDR *text_sect; /* Pointer to the text section */ COFF_SCNHDR *data_sect; /* Pointer to the data section */ COFF_SCNHDR *bss_sect; /* Pointer to the bss section */ int text_count; /* Number of text sections */ int data_count; /* Number of data sections */ int bss_count; /* Number of bss sections */ int lib_count; /* Number of lib sections */ unsigned int start_addr = 0;/* Starting location for program */ int status = 0; /* Result status register */ int fd = -1; /* Open file descriptor */ struct file *fp = NULL; /* Pointer to the file at "fd" */ short int sections = 0; /* Number of sections in the file */ short int aout_size = 0; /* Size of the a.out header area */ short int flags; /* Flag bits from the COFF header */ #ifdef COFF_DEBUG printk ("binfmt_coff entry: %s\n", bprm->filename); #endif /* * Validate the magic value for the object file. */ do { if (COFF_I386BADMAG (*coff_hdr)) { #ifdef COFF_DEBUG printk ("bad filehdr magic\n"); #endif status = -ENOEXEC; break; } /* * The object file should have 32 BIT little endian format. Do not allow * it to have the 16 bit object file flag set as Linux is not able to run * on the 80286/80186/8086. */ flags = COFF_SHORT (coff_hdr->f_flags); if ((flags & (COFF_F_AR32WR | COFF_F_AR16WR)) != COFF_F_AR32WR) { #ifdef COFF_DEBUG printk ("invalid f_flags bits\n"); #endif status = -ENOEXEC; break; } /* * Extract the header information which we need. */ sections = COFF_SHORT (coff_hdr->f_nscns); /* Number of sections */ aout_size = COFF_SHORT (coff_hdr->f_opthdr); /* Size of opt. headr */ /* * If the file is not executable then reject the execution. This means * that there must not be external references. */ if ((flags & COFF_F_EXEC) == 0) { #ifdef COFF_DEBUG printk ("not executable bit\n"); #endif status = -ENOEXEC; break; } /* * There must be at least one section. */ if (sections == 0) { #ifdef COFF_DEBUG printk ("no sections\n"); #endif status = -ENOEXEC; break; } /* * Do some additional consistency checks. * The system requires mapping for this loader. If you try * to use a file system with no mapping, the format is not valid. */ if (!bprm->inode->i_op || !bprm->inode->i_op->default_file_ops->mmap) { #ifdef COFF_DEBUG printk ("no mmap in fs\n"); #endif status = -ENOEXEC; } } while (0); /* * Allocate a buffer to hold the entire coff section list. */ if (status >= 0) { int nbytes = sections * COFF_SCNHSZ; sect_bufr = (COFF_SCNHDR *) kmalloc (nbytes, GFP_KERNEL); if (0 == sect_bufr) { #ifdef COFF_DEBUG printk ("kmalloc failed\n"); #endif status = -ENOEXEC; } /* * Read the section list from the disk file. */ else { int old_fs = get_fs (); set_fs (get_ds ()); /* Make it point to the proper location */ status = read_exec (bprm->inode, /* INODE for file */ aout_size + COFF_FILHSZ, /* Offset in the file */ (char *) sect_bufr, /* Buffer for read */ nbytes); /* Byte count reqd. */ set_fs (old_fs); /* Restore the selector */ #ifdef COFF_DEBUG if (status < 0) printk ("read aout hdr, status = %d\n", status); #endif } } else sect_bufr = NULL; /* Errors do not have a section buffer */ /* * Count the number of sections for the required types and store the location * of the last section for the three primary types. */ text_count = 0; data_count = 0; bss_count = 0; lib_count = 0; text_sect = NULL; data_sect = NULL; bss_sect = NULL; /* * Loop through the sections and find the various types */ if (status >= 0) { int nIndex; COFF_SCNHDR *sect_ptr = sect_bufr; for (nIndex = 0; nIndex < sections; ++nIndex) { long int sect_flags = COFF_LONG (sect_ptr->s_flags); switch (sect_flags) { case COFF_STYP_TEXT: text_sect = sect_ptr; ++text_count; status = is_properly_aligned (sect_ptr); break; case COFF_STYP_DATA: data_sect = sect_ptr; ++data_count; status = is_properly_aligned (sect_ptr); break; case COFF_STYP_BSS: bss_sect = sect_ptr; ++bss_count; break; case COFF_STYP_LIB: #ifdef COFF_DEBUG printk (".lib section found\n"); #endif ++lib_count; break; default: break; } sect_ptr = (COFF_SCNHDR *) & ((char *) sect_ptr)[COFF_SCNHSZ]; } /* * Ensure that there are the required sections. There must be one text * sections and one each of the data and bss sections for an executable. * A library may or may not have a data / bss section. */ if (text_count != 1) { status = -ENOEXEC; #ifdef COFF_DEBUG printk ("no text sections\n"); #endif } else { if (lib_ok) { if (data_count != 1 || bss_count != 1) { status = -ENOEXEC; #ifdef COFF_DEBUG printk ("no .data nor .bss sections\n"); #endif } } } } /* * If there is no additional header then assume the file starts at * the first byte of the text section. This may not be the proper place, * so the best solution is to include the optional header. A shared library * __MUST__ have an optional header to indicate that it is a shared library. */ if (status >= 0) { if (aout_size == 0) { if (!lib_ok) { status = -ENOEXEC; #ifdef COFF_DEBUG printk ("no header in library\n"); #endif } start_addr = COFF_LONG (text_sect->s_vaddr); } /* * There is some header. Ensure that it is sufficient. */ else { if (aout_size < COFF_AOUTSZ) { status = -ENOEXEC; #ifdef COFF_DEBUG printk ("header too small\n"); #endif } else { COFF_AOUTHDR *aout_hdr = /* Pointer to a.out header */ (COFF_AOUTHDR *) & ((char *) coff_hdr)[COFF_FILHSZ]; short int aout_magic = COFF_SHORT (aout_hdr->magic); /* id */ /* * Validate the magic number in the a.out header. If it is valid then * update the starting symbol location. Do not accept these file formats * when loading a shared library. */ switch (aout_magic) { case COFF_OMAGIC: case COFF_ZMAGIC: case COFF_STMAGIC: if (!lib_ok) { status = -ENOEXEC; #ifdef COFF_DEBUG printk ("wrong a.out header magic\n"); #endif } start_addr = (unsigned int) COFF_LONG (aout_hdr->entry); break; /* * Magic value for a shared library. This is valid only when loading a * shared library. (There is no need for a start_addr. It won't be used.) */ case COFF_SHMAGIC: if (lib_ok) { #ifdef COFF_DEBUG printk ("wrong a.out header magic\n"); #endif status = -ENOEXEC; } break; default: #ifdef COFF_DEBUG printk ("wrong a.out header magic\n"); #endif status = -ENOEXEC; break; } } } } /* * Fetch a file pointer to the executable. */ if (status >= 0) { fd = open_inode (bprm->inode, O_RDONLY); if (fd < 0) { #ifdef COFF_DEBUG printk ("can not open inode, result = %d\n", fd); #endif status = fd; } else fp = current->files->fd[fd]; } else fd = -1; /* Invalidate the open file descriptor */ /* * Generate the proper values for the text fields * * THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD * SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD. */ if (status >= 0) { long text_scnptr = COFF_LONG (text_sect->s_scnptr); long text_size = COFF_LONG (text_sect->s_size); long text_vaddr = COFF_LONG (text_sect->s_vaddr); long data_scnptr; long data_size; long data_vaddr; long bss_size; long bss_vaddr; /* * Generate the proper values for the data fields */ if (data_sect != NULL) { data_scnptr = COFF_LONG (data_sect->s_scnptr); data_size = COFF_LONG (data_sect->s_size); data_vaddr = COFF_LONG (data_sect->s_vaddr); } else { data_scnptr = 0; data_size = 0; data_vaddr = 0; } /* * Generate the proper values for the bss fields */ if (bss_sect != NULL) { bss_size = COFF_LONG (bss_sect->s_size); bss_vaddr = COFF_LONG (bss_sect->s_vaddr); } else { bss_size = 0; bss_vaddr = 0; } /* * Flush the executable from memory. At this point the executable is * committed to being defined or a segmentation violation will occur. */ if (lib_ok) { #ifdef COFF_DEBUG printk ("flushing executable\n"); #endif flush_old_exec (bprm); /* * Define the initial locations for the various items in the new process */ current->mm->mmap = NULL; current->mm->rss = 0; /* * Construct the parameter and environment string table entries. */ bprm->p += change_ldt (0, bprm->page); bprm->p -= MAX_ARG_PAGES*PAGE_SIZE; bprm->p = (unsigned long) create_tables ((char *) bprm->p, bprm->argc, bprm->envc, 1); /* * Do the end processing once the stack has been constructed */ current->mm->start_code = text_vaddr & PAGE_MASK; current->mm->end_code = text_vaddr + text_size; current->mm->end_data = data_vaddr + data_size; current->mm->start_brk = current->mm->brk = bss_vaddr + bss_size; current->suid = current->euid = bprm->e_uid; current->sgid = current->egid = bprm->e_gid; current->executable = bprm->inode; /* Store inode for file */ ++bprm->inode->i_count; /* Count the open inode */ regs->eip = start_addr; /* Current EIP register */ regs->esp = current->mm->start_stack = bprm->p; } /* * Map the text pages */ #ifdef COFF_DEBUG printk (".text: vaddr = %d, size = %d, scnptr = %d\n", text_vaddr, text_size, text_scnptr); #endif status = do_mmap (fp, text_vaddr & PAGE_MASK, text_size + (text_vaddr & ~PAGE_MASK), PROT_READ | PROT_EXEC, MAP_FIXED | MAP_SHARED, text_scnptr & PAGE_MASK); status = (status == (text_vaddr & PAGE_MASK)) ? 0 : -ENOEXEC; /* * Map the data pages */ if (status >= 0 && data_size != 0) { #ifdef COFF_DEBUG printk (".data: vaddr = %d, size = %d, scnptr = %d\n", data_vaddr, data_size, data_scnptr); #endif status = do_mmap (fp, data_vaddr & PAGE_MASK, data_size + (data_vaddr & ~PAGE_MASK), PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, data_scnptr & PAGE_MASK); status = (status == (data_vaddr & PAGE_MASK)) ? 0 : -ENOEXEC; } /* * Construct the bss data for the process. The bss ranges from the * end of the data (which may not be on a page boundary) to the end * of the bss section. Allocate any necessary pages for the data. */ if (status >= 0 && bss_size != 0) { #ifdef COFF_DEBUG printk (".bss: vaddr = %d, size = %d\n", bss_vaddr, bss_size); #endif zeromap_page_range (PAGE_ALIGN (bss_vaddr), PAGE_ALIGN (bss_size), PAGE_COPY); status = clear_memory (bss_vaddr, bss_size); } /* * Load any shared library for the executable. */ if (status >= 0 && lib_ok && lib_count != 0) { int nIndex; COFF_SCNHDR *sect_ptr = sect_bufr; /* * Find the library sections. (There should be at least one. It was counted * earlier.) This will eventually recurse to our code and load the shared * library with our own procedures. */ for (nIndex = 0; nIndex < sections; ++nIndex) { long int sect_flags = COFF_LONG (sect_ptr->s_flags); if (sect_flags == COFF_STYP_LIB) { status = preload_library (bprm, sect_ptr, fp); if (status != 0) break; } sect_ptr = (COFF_SCNHDR *) &((char *) sect_ptr) [COFF_SCNHSZ]; } } /* * Generate any needed trap for this process. If an error occurred then * generate a segmentation violation. If the process is being debugged * then generate the load trap. (Note: If this is a library load then * do not generate the trap here. Pass the error to the caller who * will do it for the process in the outer lay of this procedure call.) */ if (lib_ok) { if (status < 0) send_sig (SIGSEGV, current, 0); /* Generate the error trap */ else { if (current->flags & PF_PTRACED) send_sig (SIGTRAP, current, 0); } status = 0; /* We are committed. It can't fail */ } } /* * Do any cleanup processing */ if (fd >= 0) sys_close (fd); /* Close unused code file */ if (sect_bufr != NULL) kfree (sect_bufr); /* Release section list buffer */ /* * Return the completion status. */ #ifdef COFF_DEBUG printk ("binfmt_coff: result = %d\n", status); #endif return (status); }
/* * load an fdpic binary into various bits of memory */ static int load_elf_fdpic_binary(struct linux_binprm *bprm, struct pt_regs *regs) { struct elf_fdpic_params exec_params, interp_params; struct elf_phdr *phdr; unsigned long stack_size; struct file *interpreter = NULL; /* to shut gcc up */ char *interpreter_name = NULL; int executable_stack; int retval, i; memset(&exec_params, 0, sizeof(exec_params)); memset(&interp_params, 0, sizeof(interp_params)); exec_params.hdr = *(struct elfhdr *) bprm->buf; exec_params.flags = ELF_FDPIC_FLAG_PRESENT | ELF_FDPIC_FLAG_EXECUTABLE; /* check that this is a binary we know how to deal with */ retval = -ENOEXEC; if (!is_elf_fdpic(&exec_params.hdr, bprm->file)) goto error; /* read the program header table */ retval = elf_fdpic_fetch_phdrs(&exec_params, bprm->file); if (retval < 0) goto error; /* scan for a program header that specifies an interpreter */ phdr = exec_params.phdrs; for (i = 0; i < exec_params.hdr.e_phnum; i++, phdr++) { switch (phdr->p_type) { case PT_INTERP: retval = -ENOMEM; if (phdr->p_filesz > PATH_MAX) goto error; retval = -ENOENT; if (phdr->p_filesz < 2) goto error; /* read the name of the interpreter into memory */ interpreter_name = (char *) kmalloc(phdr->p_filesz, GFP_KERNEL); if (!interpreter_name) goto error; retval = kernel_read(bprm->file, phdr->p_offset, interpreter_name, phdr->p_filesz); if (retval < 0) goto error; retval = -ENOENT; if (interpreter_name[phdr->p_filesz - 1] != '\0') goto error; kdebug("Using ELF interpreter %s", interpreter_name); /* replace the program with the interpreter */ interpreter = open_exec(interpreter_name); retval = PTR_ERR(interpreter); if (IS_ERR(interpreter)) { interpreter = NULL; goto error; } retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE); if (retval < 0) goto error; interp_params.hdr = *((struct elfhdr *) bprm->buf); break; case PT_LOAD: #ifdef CONFIG_MMU if (exec_params.load_addr == 0) exec_params.load_addr = phdr->p_vaddr; #endif break; } } if (elf_check_const_displacement(&exec_params.hdr)) exec_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; /* perform insanity checks on the interpreter */ if (interpreter_name) { retval = -ELIBBAD; if (!is_elf_fdpic(&interp_params.hdr, interpreter)) goto error; interp_params.flags = ELF_FDPIC_FLAG_PRESENT; /* read the interpreter's program header table */ retval = elf_fdpic_fetch_phdrs(&interp_params, interpreter); if (retval < 0) goto error; } stack_size = exec_params.stack_size; if (stack_size < interp_params.stack_size) stack_size = interp_params.stack_size; if (exec_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) executable_stack = EXSTACK_ENABLE_X; else if (exec_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) executable_stack = EXSTACK_DISABLE_X; else if (interp_params.flags & ELF_FDPIC_FLAG_EXEC_STACK) executable_stack = EXSTACK_ENABLE_X; else if (interp_params.flags & ELF_FDPIC_FLAG_NOEXEC_STACK) executable_stack = EXSTACK_DISABLE_X; else executable_stack = EXSTACK_DEFAULT; retval = -ENOEXEC; if (stack_size == 0) goto error; if (elf_check_const_displacement(&interp_params.hdr)) interp_params.flags |= ELF_FDPIC_FLAG_CONSTDISP; /* flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto error; /* there's now no turning back... the old userspace image is dead, * defunct, deceased, etc. after this point we have to exit via * error_kill */ set_personality(PER_LINUX_FDPIC); set_binfmt(&elf_fdpic_format); current->mm->start_code = 0; current->mm->end_code = 0; current->mm->start_stack = 0; current->mm->start_data = 0; current->mm->end_data = 0; current->mm->context.exec_fdpic_loadmap = 0; current->mm->context.interp_fdpic_loadmap = 0; current->flags &= ~PF_FORKNOEXEC; #ifdef CONFIG_MMU elf_fdpic_arch_lay_out_mm(&exec_params, &interp_params, ¤t->mm->start_stack, ¤t->mm->start_brk); #endif /* do this so that we can load the interpreter, if need be * - we will change some of these later */ set_mm_counter(current->mm, rss, 0); #ifdef CONFIG_MMU retval = setup_arg_pages(bprm, current->mm->start_stack, executable_stack); if (retval < 0) { send_sig(SIGKILL, current, 0); goto error_kill; } #endif /* load the executable and interpreter into memory */ retval = elf_fdpic_map_file(&exec_params, bprm->file, current->mm, "executable"); if (retval < 0) goto error_kill; if (interpreter_name) { retval = elf_fdpic_map_file(&interp_params, interpreter, current->mm, "interpreter"); if (retval < 0) { printk(KERN_ERR "Unable to load interpreter\n"); goto error_kill; } allow_write_access(interpreter); fput(interpreter); interpreter = NULL; } #ifdef CONFIG_MMU if (!current->mm->start_brk) current->mm->start_brk = current->mm->end_data; current->mm->brk = current->mm->start_brk = PAGE_ALIGN(current->mm->start_brk); #else /* create a stack and brk area big enough for everyone * - the brk heap starts at the bottom and works up * - the stack starts at the top and works down */ stack_size = (stack_size + PAGE_SIZE - 1) & PAGE_MASK; if (stack_size < PAGE_SIZE * 2) stack_size = PAGE_SIZE * 2; down_write(¤t->mm->mmap_sem); current->mm->start_brk = do_mmap(NULL, 0, stack_size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON | MAP_GROWSDOWN, 0); if (IS_ERR((void *) current->mm->start_brk)) { up_write(¤t->mm->mmap_sem); retval = current->mm->start_brk; current->mm->start_brk = 0; goto error_kill; } if (do_mremap(current->mm->start_brk, stack_size, ksize((char *) current->mm->start_brk), 0, 0 ) == current->mm->start_brk ) stack_size = ksize((char *) current->mm->start_brk); up_write(¤t->mm->mmap_sem); current->mm->brk = current->mm->start_brk; current->mm->context.end_brk = current->mm->start_brk; current->mm->context.end_brk += (stack_size > PAGE_SIZE) ? (stack_size - PAGE_SIZE) : 0; current->mm->start_stack = current->mm->start_brk + stack_size; #endif compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; if (create_elf_fdpic_tables(bprm, current->mm, &exec_params, &interp_params) < 0) goto error_kill; kdebug("- start_code %lx", (long) current->mm->start_code); kdebug("- end_code %lx", (long) current->mm->end_code); kdebug("- start_data %lx", (long) current->mm->start_data); kdebug("- end_data %lx", (long) current->mm->end_data); kdebug("- start_brk %lx", (long) current->mm->start_brk); kdebug("- brk %lx", (long) current->mm->brk); kdebug("- start_stack %lx", (long) current->mm->start_stack); #ifdef ELF_FDPIC_PLAT_INIT /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. This macro performs whatever initialization to * the regs structure is required. */ ELF_FDPIC_PLAT_INIT(regs, exec_params.map_addr, interp_params.map_addr, interp_params.dynamic_addr ?: exec_params.dynamic_addr ); #endif /* everything is now ready... get the userspace context ready to roll */ start_thread(regs, interp_params.entry_addr ?: exec_params.entry_addr, current->mm->start_stack); if (unlikely(current->ptrace & PT_PTRACED)) { if (current->ptrace & PT_TRACE_EXEC) ptrace_notify ((PTRACE_EVENT_EXEC << 8) | SIGTRAP); else send_sig(SIGTRAP, current, 0); } retval = 0; error: if (interpreter) { allow_write_access(interpreter); fput(interpreter); } if (interpreter_name) kfree(interpreter_name); if (exec_params.phdrs) kfree(exec_params.phdrs); if (exec_params.loadmap) kfree(exec_params.loadmap); if (interp_params.phdrs) kfree(interp_params.phdrs); if (interp_params.loadmap) kfree(interp_params.loadmap); return retval; /* unrecoverable error - kill the process */ error_kill: send_sig(SIGSEGV, current, 0); goto error; } /* end load_elf_fdpic_binary() */
static int load_macho_binary(struct linux_binprm *bprm, struct pt_regs *regs) { unsigned long def_flags = 0; void* entry_point = 0; int retval = -ENOEXEC; int file_size = 0; int executable_stack = EXSTACK_DEFAULT; size_t macho_header_sz = sizeof(macho_header); macho_header* head = ((macho_header*)bprm->buf); struct file *linker_file = NULL; /* have we got enough space? */ if (!head) { retval = -ENOMEM; goto out_ret; } retval = ml_checkImage(bprm->file, head); if (retval) { printk(KERN_WARNING "load_macho_binary: image failed sanity checks, not loading \n"); goto out_ret; } /* XXX: this should be retrieved by ml_checkImage() */ file_size = ml_getFileSize(bprm->file); /* The file seems to be alright, so set up an environment for the new binary to run in. After this, the old image will no longer be usable. If some of the load commands are broken, this process is doomed. */ retval = flush_old_exec(bprm); if (retval) { panic("load_macho_binary: flush_old_exec failed\n"); } else { current->flags &= ~PF_FORKNOEXEC; current->mm->def_flags = def_flags; setup_new_exec(bprm); /* set personality */ unsigned int personality = current->personality & ~PER_MASK; personality |= PER_LINUX; /* This flag has to be set for 32x architectures (I think). */ personality |= ADDR_LIMIT_32BIT; set_personality(personality); /* set stuff */ current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; //retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP), executable_stack); if (retval < 0) { //send_sig(SIGKILL, current, 0); //goto out_ret; } /* stack */ current->mm->start_stack = bprm->p; } /* Read the load commands from the file. */ size_t offset; size_t oldoffset; uint32_t ncmds; uint8_t* addr; offset = 0; ncmds = head->ncmds; addr = kmalloc(head->sizeofcmds, GFP_KERNEL); /***/ retval = -EINVAL; int ret = 0; /* Top of the image data. This is needed to position the heap. */ int top_data = 0; /* First text segment where the mach header is. */ void* first_text = 0; void* first_text_linker = 0; /* read in load commands */ kernel_read(bprm->file, macho_header_sz, addr, head->sizeofcmds); while (ncmds--) { /* LC pointer */ struct load_command *lcp = (struct load_command *)(addr + offset); oldoffset = offset; offset += lcp->cmdsize; if (oldoffset > offset || lcp->cmdsize < sizeof(struct load_command) || offset > head->sizeofcmds + macho_header_sz) { printk(KERN_WARNING "load_macho_binary: malformed binary - lc overflow \n"); goto lc_ret; } /* Parse load commands. We only need a bare minimum to get the image up an running. Dyld will take care of all the other stuff. */ switch(lcp->cmd) { case LC_SEGMENT: ret = ml_loadSegment(bprm, file_size, (struct segment_command*)lcp, &top_data, &first_text, 0); if (ret != LOAD_SUCCESS) { printk(KERN_WARNING "load_macho_binary: segment loading failure \n"); goto lc_ret; } break; case LC_LOAD_DYLINKER: ret = ml_loadDylinker(bprm, file_size, (struct dylinker_command*)lcp, &linker_file); if (ret != LOAD_SUCCESS) { printk(KERN_WARNING "load_macho_binary: dylinker loading failure \n"); goto lc_ret; } else { /* done */ } break; case LC_UNIXTHREAD: ret = ml_loadUnixThread(bprm, file_size, (struct arm_thread_command*)lcp, &entry_point); if (ret != LOAD_SUCCESS) { printk(KERN_WARNING "load_macho_binary: unix thread loading failure \n"); goto lc_ret; } break; default: if (_verboseLog) printk(KERN_WARNING "load_macho_binary: unsupported lc 0x%p \n", (void*)lcp->cmd); break; } } /* Bootstrap the dynamic linker if needed. */ if (linker_file) { int dylinker_load_addr = top_data; ml_bootstrapDylinker(linker_file, &top_data, &first_text_linker, &entry_point); /* slide the entry point */ entry_point = entry_point + dylinker_load_addr; if (_verboseLog) printk(KERN_WARNING "load_macho_binary: dylinker's first text segment @ %d, new pc @ %d \n", first_text_linker, (int)entry_point); } /* Now, I don't know what these are used for, but I'm fairly sure they're *very* important. So let's set them up. See 'linux/mm_types.h': unsigned long start_code, end_code, start_data, end_data; unsigned long start_brk, brk, start_stack; */ current->mm->start_code = 0; /* IMP */ current->mm->end_code = top_data; /* IMP */ current->mm->start_data = 0; current->mm->end_data = top_data; if (_verboseLog) printk(KERN_WARNING "load_macho_binary: setting up heap ...\n"); /* Set up an empty heap. This will be grown as more memory is allocated. */ int brkret = ml_setBrk(top_data, top_data); if (_verboseLog) printk(KERN_WARNING "load_macho_binary: setting up misc ...\n"); /* setup misc stuff */ set_binfmt(&macho_format); install_exec_creds(bprm); /* Stack (grows down on ARM). */ uint32_t* stack = bprm->p; uint32_t* argv_array; uint32_t* argv; uint32_t* envp_array; uint32_t* envp; uint32_t total_argv_size; uint32_t total_env_size; /* Construct envp array. */ envp = envp_array = stack = (uint32_t*)stack - ((bprm->envc+1)); /* Construct argv array. */ argv = argv_array = stack = (uint32_t*)stack - ((bprm->argc+1)); if (_verboseLog) printk(KERN_WARNING "load_macho_binary: setting up stack @ %p ...\n", (uint32_t*)stack); uint32_t argc = bprm->argc; uint32_t envc = bprm->envc; char* p = bprm->p; /* Set up argv pointers */ current->mm->arg_start = (unsigned long)p; while(argc--) { char c; put_user(p,argv++); do { get_user(c,p++); } while (c); } put_user(NULL,argv); /* Set up envp pointers */ current->mm->arg_end = current->mm->env_start = (unsigned long) p; while(envc--) { char c; put_user(p,envp++); do { get_user(c,p++); } while (c); } put_user(NULL,envp); current->mm->env_end = (unsigned long) p; /* The actual stuff passed to the linker goes here. */ stack = (uint32_t*)stack - (4); stack[0] = (uint32_t)first_text; /* mach_header */ stack[1] = bprm->argc; /* argc */ stack[2] = argv_array; /* argv */ stack[3] = (uint32_t)first_text_linker; /* linker's mach_header */ if (_verboseLog) printk(KERN_WARNING "load_macho_binary: setting up main thread ...\n"); /* Set up the main thread */ if (BAD_ADDR(entry_point)) { /* entry point is not executable */ printk(KERN_WARNING "load_macho_binary: bad entry point \n"); force_sig(SIGSEGV, current); retval = -EINVAL; goto lc_ret; } if (_verboseLog) printk(KERN_WARNING "load_macho_binary: setting up registers ...\n"); /* See 'start_thread' in 'processor.h' 'start_thread' provides an ELF implementation of this function. This is for the Darwin ABI implementation which is used by iPhoneOS binaries. */ unsigned long initial_pc = (unsigned long)entry_point; /* exit supervisor and enter user */ set_fs(USER_DS); memset(regs->uregs, 0, sizeof(regs->uregs)); regs->ARM_cpsr = USR_MODE; /* not sure */ if (elf_hwcap & HWCAP_THUMB && initial_pc & 1) regs->ARM_cpsr |= PSR_T_BIT; /* set up control regs */ regs->ARM_cpsr |= PSR_ENDSTATE; regs->ARM_pc = initial_pc & ~1; /* pc */ regs->ARM_sp = stack; /* sp */ /* This is actually ignored, but set it anyway */ regs->ARM_r2 = stack[2]; /* r2 (envp) */ regs->ARM_r1 = stack[1]; /* r1 (argv) */ regs->ARM_r0 = stack[0]; /* r0 (argc) */ /* this will work for mmu and nonmmu */ nommu_start_thread(regs); wire_weird_pages(); /* Binary is now loaded. Return 0 to signify success. */ retval = 0; if (_verboseLog) printk(KERN_WARNING "load_macho_binary: complete, heap starts at %d, brkret %d \n", top_data, brkret); /* Teardown */ lc_ret: kfree(addr); out_ret: return retval; }
static inline int do_load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct exec ex; struct file * file; int fd; unsigned long error; unsigned long p = bprm->p; unsigned long fd_offset; unsigned long rlim; ex = *((struct exec *) bprm->buf); /* exec-header */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && N_MAGIC(ex) != QMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || bprm->inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { return -ENOEXEC; } current->personality = PER_LINUX; fd_offset = N_TXTOFF(ex); #if defined (__i386__) || defined (CONFIG_ARM) if (N_MAGIC(ex) == ZMAGIC && fd_offset != BLOCK_SIZE) { printk(KERN_NOTICE "N_TXTOFF != BLOCK_SIZE. See a.out.h.\n"); return -ENOEXEC; } if (N_MAGIC(ex) == ZMAGIC && ex.a_text && (fd_offset < bprm->inode->i_sb->s_blocksize)) { printk(KERN_NOTICE "N_TXTOFF < BLOCK_SIZE. Please convert binary.\n"); return -ENOEXEC; } #endif #if defined(CONFIG_ARM) if (N_MACHTYPE(ex) != M_ARM) { printk(KERN_NOTICE "Binary != ARM. Please recompile binary.\n"); return -ENOEXEC; } #endif /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = current->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; if (flush_old_exec(bprm)) return -ENOMEM; /* OK, This is the point of no return */ current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); current->mm->end_data = ex.a_data + (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->rss = 0; current->mm->mmap = NULL; current->suid = current->euid = current->fsuid = bprm->e_uid; current->sgid = current->egid = current->fsgid = bprm->e_gid; current->flags &= ~PF_FORKNOEXEC; if (N_MAGIC(ex) == OMAGIC) { #if defined(__alpha__) || defined(CONFIG_ARM) #ifndef CONFIG_ARM do_mmap(NULL, N_TXTADDR(ex) & PAGE_MASK, ex.a_text+ex.a_data + PAGE_SIZE - 1, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0); #else do_mmap(NULL, N_TXTADDR(ex), ex.a_text+ex.a_data, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0); #endif read_exec(bprm->inode, fd_offset, (char *) N_TXTADDR(ex), ex.a_text+ex.a_data, 0); #else do_mmap(NULL, 0, ex.a_text+ex.a_data, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0); read_exec(bprm->inode, 32, (char *) 0, ex.a_text+ex.a_data, 0); #endif } else { if (ex.a_text & 0xfff || ex.a_data & 0xfff) printk(KERN_NOTICE "executable not page aligned\n"); fd = open_inode(bprm->inode, O_RDONLY); if (fd < 0) { send_sig(SIGKILL, current, 0); return fd; } file = current->files->fd[fd]; if (!file->f_op || !file->f_op->mmap) { sys_close(fd); do_mmap(NULL, 0, ex.a_text+ex.a_data, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_FIXED|MAP_PRIVATE, 0); read_exec(bprm->inode, fd_offset, (char *) N_TXTADDR(ex), ex.a_text+ex.a_data, 0); goto beyond_if; } error = do_mmap(file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset); if (error != N_TXTADDR(ex)) { sys_close(fd); send_sig(SIGKILL, current, 0); return error; } error = do_mmap(file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); sys_close(fd); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } } beyond_if: if (current->exec_domain && current->exec_domain->use_count) (*current->exec_domain->use_count)--; if (current->binfmt && current->binfmt->use_count) (*current->binfmt->use_count)--; current->exec_domain = lookup_exec_domain(current->personality); current->binfmt = &aout_format; if (current->exec_domain && current->exec_domain->use_count) (*current->exec_domain->use_count)++; if (current->binfmt && current->binfmt->use_count) (*current->binfmt->use_count)++; set_brk(current->mm->start_brk, current->mm->brk); p = setup_arg_pages(p, bprm); p = (unsigned long) create_aout_tables((char *)p, bprm); current->mm->start_stack = p; #ifdef __alpha__ regs->gp = ex.a_gpvalue; #endif start_thread(regs, ex.a_entry, p); if (current->flags & PF_PTRACED) send_sig(SIGTRAP, current, 0); #ifndef CONFIG_ARM return 0; #else return regs->ARM_r0; #endif }
/* * Helper function to process the load operation. */ static int xout_load_object(struct linux_binprm * bpp, struct pt_regs *rp, int executable) { struct xexec *xexec = (struct xexec *)bpp->buf; struct xext *xext = (struct xext *)(xexec + 1); struct xseg *seglist; struct file *fp = NULL; u_long addr, lPers; int nsegs, ntext, ndata; int pageable = 1, err = 0, i; #ifdef CONFIG_BINFMT_XOUT_X286 struct file *file; #endif lPers = abi_personality((char *)_BX(rp)); if (lPers == 0) lPers = PER_XENIX; if (xexec->x_magic != X_MAGIC) { return -ENOEXEC; } switch (xexec->x_cpu & XC_CPU) { case XC_386: break; #if defined(CONFIG_BINFMT_XOUT_X286) case XC_8086: case XC_286: case XC_286V: case XC_186: if (!Emulx286) return -ENOEXEC; file = open_exec(Emulx286); if (file) { fput(bpp->file); bpp->file = file; kernel_read(bpp->file, 0L, bpp->buf, sizeof(bpp->buf)); } return -ENOEXEC; #endif default: dprintk(KERN_DEBUG "xout: unsupported CPU type (%02x)\n", xexec->x_cpu); return -ENOEXEC; } /* * We can't handle byte or word swapped headers. Well, we * *could* but they should never happen surely? */ if ((xexec->x_cpu & (XC_BSWAP | XC_WSWAP)) != XC_WSWAP) { dprintk(KERN_DEBUG "xout: wrong byte or word sex (%02x)\n", xexec->x_cpu); return -ENOEXEC; } /* Check it's an executable. */ if (!(xexec->x_renv & XE_EXEC)) { dprintk(KERN_DEBUG "xout: not executable\n"); return -ENOEXEC; } /* * There should be an extended header and there should be * some segments. At this stage we don't handle non-segmented * binaries. I'm not sure you can get them under Xenix anyway. */ if (xexec->x_ext != sizeof(struct xext)) { dprintk(KERN_DEBUG "xout: bad extended header\n"); return -ENOEXEC; } if (!(xexec->x_renv & XE_SEG) || !xext->xe_segsize) { dprintk(KERN_DEBUG "xout: not segmented\n"); return -ENOEXEC; } if (!(seglist = kmalloc(xext->xe_segsize, GFP_KERNEL))) { printk(KERN_WARNING "xout: allocating segment list failed\n"); return -ENOMEM; } err = kernel_read(bpp->file, xext->xe_segpos, (char *)seglist, xext->xe_segsize); if (err < 0) { dprintk(KERN_DEBUG "xout: problem reading segment table\n"); goto out; } if (!bpp->file->f_op->mmap) pageable = 0; nsegs = xext->xe_segsize / sizeof(struct xseg); ntext = ndata = 0; for (i = 0; i < nsegs; i++) { switch (seglist[i].xs_type) { case XS_TTEXT: if (isnotaligned(seglist+i)) pageable = 0; ntext++; break; case XS_TDATA: if (isnotaligned(seglist+i)) pageable = 0; ndata++; break; } } if (!ndata) goto out; /* * Generate the proper values for the text fields * * THIS IS THE POINT OF NO RETURN. THE NEW PROCESS WILL TRAP OUT SHOULD * SOMETHING FAIL IN THE LOAD SEQUENCE FROM THIS POINT ONWARD. */ /* * Flush the executable from memory. At this point the executable is * committed to being defined or a segmentation violation will occur. */ if (executable) { dprintk(KERN_DEBUG "xout: flushing executable\n"); flush_old_exec(bpp); if ( (lPers & 0xFF) == (current->personality & 0xFF) ) set_personality(0); set_personality(lPers); #if defined(CONFIG_ABI_TRACE) abi_trace(ABI_TRACE_UNIMPL, "Personality %08X assigned\n", (unsigned int)current->personality); #endif #ifdef CONFIG_64BIT set_thread_flag(TIF_IA32); clear_thread_flag(TIF_ABI_PENDING); #endif current->mm->mmap = NULL; #ifdef set_mm_counter #if _KSL > 14 set_mm_counter(current->mm, file_rss, 0); #else set_mm_counter(current->mm, rss, 0); #endif #else current->mm->rss = 0; #endif #if _KSL > 10 if ((err = setup_arg_pages(bpp, STACK_TOP, EXSTACK_DEFAULT)) < 0) #else if ((err = setup_arg_pages(bpp, EXSTACK_DEFAULT)) < 0) #endif { send_sig(SIGSEGV, current, 1); return (err); } bpp->p = (u_long)xout_create_tables((char *)bpp->p, bpp, (xexec->x_cpu & XC_CPU) == XC_386 ? 1 : 0); current->mm->start_code = 0; current->mm->end_code = xexec->x_text; current->mm->end_data = xexec->x_text + xexec->x_data; current->mm->start_brk = current->mm->brk = xexec->x_text + xexec->x_data + xexec->x_bss; #if _KSL > 28 install_exec_creds(bpp); #else compute_creds(bpp); #endif current->flags &= ~PF_FORKNOEXEC; #if _KSL < 15 #ifdef CONFIG_64BIT __asm__ volatile ( "movl %0,%%fs; movl %0,%%es; movl %0,%%ds" : :"r" (0)); __asm__ volatile ( "pushf; cli; swapgs; movl %0,%%gs; mfence; swapgs; popf" : :"r" (0)); write_pda(oldrsp,bpp->p); _FLG(rp) = 0x200; #else __asm__ volatile ( "movl %0,%%fs ; movl %0,%%gs" : :"r" (0)); _DS(rp) = _ES(rp) = __USER_DS; #endif _SS(rp) = __USER_DS; _SP(rp) = bpp->p; _CS(rp) = __USER_CS; _IP(rp) = xexec->x_entry; set_fs(USER_DS); #else start_thread(rp, xexec->x_entry, bpp->p); #endif #ifdef CONFIG_64BIT __asm__ volatile("movl %0,%%es; movl %0,%%ds": :"r" (__USER32_DS)); _SS(rp) = __USER32_DS; _CS(rp) = __USER32_CS; #endif dprintk(KERN_DEBUG "xout: entry point = 0x%x:0x%08lx\n", xext->xe_eseg, xexec->x_entry); } /* * Scan the segments and map them into the process space. If this * executable is pageable (unlikely since Xenix aligns to 1k * boundaries and we want it aligned to 4k boundaries) this is * all we need to do. If it isn't pageable we go round again * afterwards and load the data. We have to do this in two steps * because if segments overlap within a 4K page we'll lose the * first instance when we remap the page. Hope that's clear... */ for (i = 0; err >= 0 && i < nsegs; i++) { struct xseg *sp = seglist+i; if (sp->xs_attr & XS_AMEM) { err = xout_amen(fp, sp, pageable, &addr, xexec, rp, (!ntext && ndata == 1)); } } /* * We better fix start_data because sys_brk looks there to * calculate data size. * Kernel 2.2 did look at end_code so this is reasonable. */ if (current->mm->start_data == current->mm->start_code) current->mm->start_data = current->mm->end_code; dprintk(KERN_DEBUG "xout: start code 0x%08lx, end code 0x%08lx," " start data 0x%08lx, end data 0x%08lx, brk 0x%08lx\n", current->mm->start_code, current->mm->end_code, current->mm->start_data, current->mm->end_data, current->mm->brk); if (pageable) goto trap; if (err < 0) goto trap; for (i = 0; (err >= 0) && (i < nsegs); i++) { struct xseg *sp = seglist + i; u_long psize; if (sp->xs_type == XS_TTEXT || sp->xs_type == XS_TDATA) { dprintk(KERN_DEBUG "xout: read to 0x%08lx from 0x%08lx," " length 0x%8lx\n", sp->xs_rbase, sp->xs_filpos, sp->xs_psize); if (sp->xs_psize < 0) continue; /* * Do we still get the size ? Yes! [joerg] */ psize = kernel_read(bpp->file, sp->xs_filpos, (char *)((long)sp->xs_rbase), sp->xs_psize); if (psize != sp->xs_psize) { dprintk(KERN_DEBUG "xout: short read 0x%8lx\n",psize); err = -1; break; } } } /* * Generate any needed trap for this process. If an error occured then * generate a segmentation violation. If the process is being debugged * then generate the load trap. (Note: If this is a library load then * do not generate the trap here. Pass the error to the caller who * will do it for the process in the outer lay of this procedure call.) */ trap: if (executable) { if (err < 0) { dprintk(KERN_DEBUG "xout: loader forces seg fault " "(err = %d)\n", err); send_sig(SIGSEGV, current, 0); } #ifdef CONFIG_PTRACE /* --- Red Hat specific handling --- */ #else else if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); #endif err = 0; } out: kfree(seglist); dprintk(KERN_DEBUG "xout: binfmt_xout: result = %d\n", err); /* * If we are using the [2]86 emulation overlay we enter this * rather than the real program and give it the information * it needs to start the ball rolling. */ /* * Xenix 386 programs expect the initial brk value to be in eax * on start up. Hence if we succeeded we need to pass back * the brk value rather than the status. Ultimately the * ret_from_sys_call assembly will place this in eax before * resuming (starting) the process. */ return (err < 0 ? err : current->mm->brk); }
static int load_elf_binary(struct linux_binprm *bprm) { struct pt_regs regs; int interpreter_fd = -1; unsigned long load_addr = 0, load_bias; int load_addr_set = 0; char * elf_interpreter = NULL; unsigned int interpreter_type = INTERPRETER_NONE; unsigned long error; struct elf_phdr * elf_ppnt, *elf_phdata; unsigned long elf_bss, k, elf_brk; int elf_exec_fileno; int retval, size, i; unsigned long elf_entry, interp_load_addr = 0; unsigned long start_code, end_code, end_data; struct elfhdr elf_ex; struct elfhdr interp_elf_ex; struct exec interp_ex; char passed_fileno[6]; /* Get the exec-header */ elf_ex = *((struct elfhdr *) bprm->buf); my_print("[debug]here to run elf\n"); retval = -ENOEXEC; /* First of all, some simple consistency checks */ if (elf_ex.e_ident[0] != 0x7f || strncmp(&elf_ex.e_ident[1], "ELF", 3) != 0) goto out; //my_print("[ender]1\n"); if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) goto out; if (!elf_check_arch(elf_ex.e_machine)) goto out; //my_print("[ender]2\n"); /* Now read in all of the header information */ if (elf_ex.e_phentsize != sizeof(struct elf_phdr) || elf_ex.e_phnum < 1 || elf_ex.e_phnum > 65536 / sizeof(struct elf_phdr)) goto out; // my_print("[ender]3\n"); retval = -ENOMEM; size = elf_ex.e_phentsize * elf_ex.e_phnum; elf_phdata = (struct elf_phdr *) malloc(size); if (!elf_phdata) goto out; retval = read_exec(bprm->fd, elf_ex.e_phoff, (char *) elf_phdata, size, 1); if (retval < 0) goto out_free_ph; //my_print("[ender]4\n"); elf_exec_fileno = dup(bprm->fd); lseek(elf_exec_fileno, 0, SEEK_SET); elf_ppnt = elf_phdata; elf_bss = 0; elf_brk = 0; start_code = ~0UL; end_code = 0; end_data = 0; /* look for interpreter */ for (i = 0; i < elf_ex.e_phnum; i++) { if (elf_ppnt->p_type == PT_INTERP) { retval = -ENOEXEC; if (elf_interpreter || elf_ppnt->p_filesz < 2 || elf_ppnt->p_filesz > PAGE_SIZE) goto out_free_dentry; /* This is the program interpreter used for * shared libraries - for now assume that this * is an a.out format binary */ retval = -ENOMEM; elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); if (!elf_interpreter) goto out_free_file; retval = read_exec(bprm->fd, elf_ppnt->p_offset, elf_interpreter, elf_ppnt->p_filesz, 1); if (retval < 0) goto out_free_interp; elf_interpreter[elf_ppnt->p_filesz - 1] = 0; #if 0 /* If the program interpreter is one of these two, * then assume an iBCS2 image. Otherwise assume * a native linux image. */ if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) ibcs2_interpreter = 1; #endif log_debug(LOG_LINEXEC_EXEC, "Using ELF interpreter: %s", elf_interpreter); if( elf_interpreter[0] == '/'){ char tmp [MAX_PATH]; change_path_to_relative(tmp, elf_interpreter); free(elf_interpreter); //elf_interpreter = (char *)malloc(elf_ppnt->p_filesz); elf_interpreter = (char *)malloc(strlen(tmp)+1); if (!elf_interpreter) goto out_free_file; strcpy(elf_interpreter, tmp); } interpreter_fd = open(elf_interpreter, O_RDONLY); my_print("[debug]open elf_interpreter %s\n", elf_interpreter); if (interpreter_fd < 0) { retval = -errno; goto out_free_interp; } #if 0 retval = permission(interpreter_dentry->d_inode, MAY_EXEC); if (retval < 0) goto out_free_dentry; #endif retval = read_exec(interpreter_fd, 0, bprm->buf, 128, 1); if (retval < 0) goto out_free_dentry; /* Get the exec headers */ interp_ex = *((struct exec *) bprm->buf); interp_elf_ex = *((struct elfhdr *) bprm->buf); } elf_ppnt++; //my_print("[ender]6\n"); } /* Some simple consistency checks for the interpreter */ if (elf_interpreter) { interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; /* Now figure out which format our binary is */ if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) && (N_MAGIC(interp_ex) != QMAGIC)) interpreter_type = INTERPRETER_ELF; if (interp_elf_ex.e_ident[0] != 0x7f || strncmp(&interp_elf_ex.e_ident[1], "ELF", 3) != 0) interpreter_type &= ~INTERPRETER_ELF; retval = -ELIBBAD; if (!interpreter_type) goto out_free_dentry; /* Make sure only one type was selected */ if ((interpreter_type & INTERPRETER_ELF) && interpreter_type != INTERPRETER_ELF) { printf("ELF: Ambiguous type, using ELF\n"); interpreter_type = INTERPRETER_ELF; } } //my_print("[ender]7\n"); /* OK, we are done with that, now set up the arg stuff, and then start this sucker up */ if (!bprm->sh_bang) { char * passed_p; if (interpreter_type == INTERPRETER_AOUT) { sprintf(passed_fileno, "%d", elf_exec_fileno); passed_p = passed_fileno; if (elf_interpreter) { bprm->p = copy_strings(1,&passed_p,bprm->page,bprm->p); bprm->argc++; } } retval = -E2BIG; if (!bprm->p) goto out_free_dentry; } #if 0 /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; #endif /* OK, This is the point of no return */ current->end_data = 0; current->end_code = 0; #if 0 current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; #endif elf_entry = (unsigned long) elf_ex.e_entry; //printf("[ender]8\n"); #if 0 /* Do this immediately, since STACK_TOP as used in setup_arg_pages may depend on the personality. */ SET_PERSONALITY(elf_ex, ibcs2_interpreter); #endif /* Do this so that we can load the interpreter, if need be. We will change some of these later */ // current->mm->rss = 0; bprm->p = setup_arg_pages(bprm->p, bprm); current->start_stack = bprm->p; /* Try and get dynamic programs out of the way of the default mmap base, as well as whatever program they might try to exec. This is because the brk will follow the loader, and is not movable. */ load_bias = ELF_PAGESTART(elf_ex.e_type==ET_DYN ? ELF_ET_DYN_BASE : 0); #ifdef __VERBOSE__ printf("load_bias: %08lX\n", load_bias); #endif /* Now we do a little grungy work by mmaping the ELF image into the correct location in memory. At this point, we assume that the image should be loaded at fixed address, not at a variable address. */ for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { int elf_prot = 0, elf_flags; unsigned long vaddr; if (elf_ppnt->p_type != PT_LOAD) continue; if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ; if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC; elf_flags = MAP_PRIVATE; // |MAP_DENYWRITE|MAP_EXECUTABLE; vaddr = elf_ppnt->p_vaddr; if (elf_ex.e_type == ET_EXEC || load_addr_set) { elf_flags |= MAP_FIXED; } //my_print("[ender]9\n"); #ifdef __VERBOSE__ printf("mapping: %08lX\n", ELF_PAGESTART(load_bias + vaddr)); #endif error = do_mmap(bprm->fd, ELF_PAGESTART(load_bias + vaddr), (elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); #ifdef __VERBOSE__ printf("error: %08lX\n", error); #endif if (!load_addr_set) { load_addr_set = 1; load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset); #ifdef __VERBOSE__ printf("load_addr: %08lX, vaddr: %08lX\n", load_addr, vaddr); #endif if (elf_ex.e_type == ET_DYN) { load_bias += error - ELF_PAGESTART(load_bias + vaddr); load_addr += error; #ifdef __VERBOSE__ printf("new\nload_bias: %08lX, load_addr: %08lX\n", load_bias, load_addr); #endif } } k = elf_ppnt->p_vaddr; if (k < start_code) start_code = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; if (k > elf_bss) elf_bss = k; if ((elf_ppnt->p_flags & PF_X) && end_code < k) end_code = k; if (end_data < k) end_data = k; k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; if (k > elf_brk) elf_brk = k; } close(bprm->fd); elf_entry += load_bias; elf_bss += load_bias; elf_brk += load_bias; start_code += load_bias; end_code += load_bias; end_data += load_bias; if (elf_interpreter) { if (interpreter_type == INTERPRETER_AOUT) { elf_entry = load_aout_interp(&interp_ex, interpreter_fd); } else { elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, &interp_load_addr); } close(interpreter_fd); if (elf_entry == ~0UL) { printf("Unable to load interpreter %.128s\n", elf_interpreter); free(elf_interpreter); free(elf_phdata); //send_sig(SIGSEGV, current, 0); exit(1); return 0; } free(elf_interpreter); } free(elf_phdata); if (interpreter_type != INTERPRETER_AOUT) close(elf_exec_fileno); #if 0 #ifndef VM_STACK_FLAGS current->executable = dget(bprm->dentry); #endif #endif bprm->p = (unsigned long)create_elf_tables((char *)bprm->p, bprm->argc, bprm->envc, (interpreter_type == INTERPRETER_ELF ? &elf_ex : NULL), load_addr, load_bias, interp_load_addr, (interpreter_type == INTERPRETER_AOUT ? 0 : 1)); #if 0 /* N.B. passed_fileno might not be initialized? */ if (interpreter_type == INTERPRETER_AOUT) current->arg_start += strlen(passed_fileno) + 1; #endif current->start_brk = current->brk = elf_brk; current->end_code = end_code; current->start_code = start_code; current->end_data = end_data; current->start_stack = bprm->p; /* Calling set_brk effectively mmaps the pages that we need * for the bss and break sections */ set_brk(elf_bss, elf_brk); padzero(elf_bss); log_debug(LOG_LINEXEC_EXEC,"start_brk: %lx" , current->start_brk); log_debug(LOG_LINEXEC_EXEC,"end_code: %lx" , current->end_code); log_debug(LOG_LINEXEC_EXEC,"start_code: %lx" , current->start_code); log_debug(LOG_LINEXEC_EXEC,"end_data: %lx" , current->end_data); log_debug(LOG_LINEXEC_EXEC,"start_stack: %lx" , current->start_stack); log_debug(LOG_LINEXEC_EXEC,"brk: %lx" , current->brk); /* * The ABI may specify that certain registers be set up in special * ways (on i386 %edx is the address of a DT_FINI function, for * example. This macro performs whatever initialization to * the regs structure is required. */ ELF_PLAT_INIT((®s)); regs.eip = elf_entry; regs.esp = bprm->p; #if 0 if (current->flags & PF_PTRACED) send_sig(SIGTRAP, current, 0); #endif #ifndef __DEBUG__ // dumpMemoryMap(); log_verbose(LOG_LINEXEC_EXEC, "[transfering control to Linux executable]"); //getchar(); //printf("[ender]11\n"); ASM_EXEC_JUMP(regs); printf("You should never see this message!\n"); #else printf("execve() finished, but in debug mode. exiting...\n"); #endif retval = 0; out: return retval; /* error cleanup */ out_free_dentry: close(interpreter_fd); out_free_interp: if (elf_interpreter) { free(elf_interpreter); } out_free_file: close(elf_exec_fileno); out_free_ph: free(elf_phdata); goto out; }
static int load_aout32_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct exec ex; unsigned long error; unsigned long fd_offset; unsigned long rlim; unsigned long orig_thr_flags; int retval; ex = *((struct exec *) bprm->buf); /* exec-header */ if ((N_MAGIC(ex) != ZMAGIC && N_MAGIC(ex) != OMAGIC && N_MAGIC(ex) != QMAGIC && N_MAGIC(ex) != NMAGIC) || N_TRSIZE(ex) || N_DRSIZE(ex) || bprm->file->f_path.dentry->d_inode->i_size < ex.a_text+ex.a_data+N_SYMSIZE(ex)+N_TXTOFF(ex)) { return -ENOEXEC; } fd_offset = N_TXTOFF(ex); /* Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; if (ex.a_data + ex.a_bss > rlim) return -ENOMEM; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) return retval; /* OK, This is the point of no return */ set_personality(PER_SUNOS); current->mm->end_code = ex.a_text + (current->mm->start_code = N_TXTADDR(ex)); current->mm->end_data = ex.a_data + (current->mm->start_data = N_DATADDR(ex)); current->mm->brk = ex.a_bss + (current->mm->start_brk = N_BSSADDR(ex)); current->mm->free_area_cache = current->mm->mmap_base; current->mm->cached_hole_size = 0; current->mm->mmap = NULL; compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; if (N_MAGIC(ex) == NMAGIC) { loff_t pos = fd_offset; /* F**k me plenty... */ down_write(¤t->mm->mmap_sem); error = do_brk(N_TXTADDR(ex), ex.a_text); up_write(¤t->mm->mmap_sem); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text, &pos); down_write(¤t->mm->mmap_sem); error = do_brk(N_DATADDR(ex), ex.a_data); up_write(¤t->mm->mmap_sem); bprm->file->f_op->read(bprm->file, (char __user *)N_DATADDR(ex), ex.a_data, &pos); goto beyond_if; } if (N_MAGIC(ex) == OMAGIC) { loff_t pos = fd_offset; down_write(¤t->mm->mmap_sem); do_brk(N_TXTADDR(ex) & PAGE_MASK, ex.a_text+ex.a_data + PAGE_SIZE - 1); up_write(¤t->mm->mmap_sem); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); } else { static unsigned long error_time; if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time) > 5*HZ) { printk(KERN_NOTICE "executable not page aligned\n"); error_time = jiffies; } if (!bprm->file->f_op->mmap) { loff_t pos = fd_offset; down_write(¤t->mm->mmap_sem); do_brk(0, ex.a_text+ex.a_data); up_write(¤t->mm->mmap_sem); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); goto beyond_if; } down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset); up_write(¤t->mm->mmap_sem); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } down_write(¤t->mm->mmap_sem); error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); up_write(¤t->mm->mmap_sem); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } } beyond_if: set_binfmt(&aout32_format); set_brk(current->mm->start_brk, current->mm->brk); /* Make sure STACK_TOP returns the right thing. */ orig_thr_flags = current_thread_info()->flags; current_thread_info()->flags |= _TIF_32BIT; retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); if (retval < 0) { current_thread_info()->flags = orig_thr_flags; /* Someone check-me: is this error path enough? */ send_sig(SIGKILL, current, 0); return retval; } current->mm->start_stack = (unsigned long) create_aout32_tables((char __user *)bprm->p, bprm); tsb_context_switch(current->mm); start_thread32(regs, ex.a_entry, current->mm->start_stack); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); return 0; }
static int load_fn_file(struct linux_binprm * bprm,unsigned long *extra_stack) { unsigned long stack_len; unsigned long stack_start; unsigned long start_code, end_code; unsigned long result; unsigned long rlim; stack_len = USERSPACE_STACK_SIZE; if (extra_stack) { stack_len += *extra_stack; *extra_stack = stack_len; } /* * Check initial limits. This avoids letting people circumvent * size limits imposed on them by creating programs with large * arrays in the data or bss. */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; if (rlim >= RLIM_INFINITY) rlim = ~0; /* Flush all traces of the currently running executable */ result = flush_old_exec(bprm); if (result) return result; /* OK, This is the point of no return */ set_personality(PER_LINUX); /* * there are a couple of cases here, the separate code/data * case, and then the fully copied to RAM case which lumps * it all together. */ down_write(¤t->mm->mmap_sem); stack_start= do_mmap(0, 0,stack_len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); if (!stack_start|| stack_start >= (unsigned long) -4096) { if (!stack_start) stack_start = (unsigned long) -ENOMEM; printk("Unable to allocate RAM for process text/data, errno %d\n", (int)-stack_start); return(stack_start); } /* The main program needs a little extra setup in the task structure */ start_code = (unsigned long)bprm->filename; end_code = start_code + PAGE_SIZE*2; current->mm->start_code = start_code; current->mm->end_code = end_code; current->mm->start_data = 0; current->mm->end_data = 0; /* * set up the brk stuff, uses any slack left in data/bss/stack * allocation. We put the brk after the bss (between the bss * and stack) like other platforms. */ current->mm->start_brk = 0; current->mm->brk = 0; current->mm->context.end_brk = stack_start; set_mm_counter(current->mm, rss, 0); flush_icache_range(start_code, end_code); memset((unsigned char*)stack_start,0,stack_len); return 0; }
static int load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs) { int retval; unsigned int size; unsigned long som_entry; struct som_hdr *som_ex; struct som_exec_auxhdr *hpuxhdr; /* Get the exec-header */ som_ex = (struct som_hdr *) bprm->buf; retval = check_som_header(som_ex); if (retval != 0) goto out; /* Now read in the auxiliary header information */ retval = -ENOMEM; size = som_ex->aux_header_size; if (size > SOM_PAGESIZE) goto out; hpuxhdr = kmalloc(size, GFP_KERNEL); if (!hpuxhdr) goto out; retval = kernel_read(bprm->file, som_ex->aux_header_location, (char *) hpuxhdr, size); if (retval != size) { if (retval >= 0) retval = -EIO; goto out_free; } /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free; /* OK, This is the point of no return */ current->flags &= ~PF_FORKNOEXEC; current->personality = PER_HPUX; setup_new_exec(bprm); /* Set the task size for HP-UX processes such that * the gateway page is outside the address space. * This can be fixed later, but for now, this is much * easier. */ current->thread.task_size = 0xc0000000; /* Set map base to allow enough room for hp-ux heap growth */ current->thread.map_base = 0x80000000; retval = map_som_binary(bprm->file, hpuxhdr); if (retval < 0) goto out_free; som_entry = hpuxhdr->exec_entry; kfree(hpuxhdr); set_binfmt(&som_format); install_exec_creds(bprm); setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT); create_som_tables(bprm); current->mm->start_stack = bprm->p; #if 0 printk("(start_brk) %08lx\n" , (unsigned long) current->mm->start_brk); printk("(end_code) %08lx\n" , (unsigned long) current->mm->end_code); printk("(start_code) %08lx\n" , (unsigned long) current->mm->start_code); printk("(end_data) %08lx\n" , (unsigned long) current->mm->end_data); printk("(start_stack) %08lx\n" , (unsigned long) current->mm->start_stack); printk("(brk) %08lx\n" , (unsigned long) current->mm->brk); #endif map_hpux_gateway_page(current,current->mm); start_thread_som(regs, som_entry, bprm->p); return 0; /* error cleanup */ out_free: kfree(hpuxhdr); out: return retval; }
/* These are the functions used to load ELF style executables and shared * libraries. There is no binary dependent code anywhere else. */ static int load_irix_binary(struct linux_binprm * bprm, struct pt_regs * regs) { struct elfhdr elf_ex, interp_elf_ex; struct file *interpreter; struct elf_phdr *elf_phdata, *elf_ihdr, *elf_ephdr; unsigned int load_addr, elf_bss, elf_brk; unsigned int elf_entry, interp_load_addr = 0; unsigned int start_code, end_code, end_data, elf_stack; int retval, has_interp, has_ephdr, size, i; char *elf_interpreter; mm_segment_t old_fs; load_addr = 0; has_interp = has_ephdr = 0; elf_ihdr = elf_ephdr = 0; elf_ex = *((struct elfhdr *) bprm->buf); retval = -ENOEXEC; if (verify_binary(&elf_ex, bprm)) goto out; #ifdef DEBUG_ELF print_elfhdr(&elf_ex); #endif /* Now read in all of the header information */ size = elf_ex.e_phentsize * elf_ex.e_phnum; if (size > 65536) goto out; elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL); if (elf_phdata == NULL) { retval = -ENOMEM; goto out; } retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *)elf_phdata, size); if (retval < 0) goto out_free_ph; #ifdef DEBUG_ELF dump_phdrs(elf_phdata, elf_ex.e_phnum); #endif /* Set some things for later. */ for(i = 0; i < elf_ex.e_phnum; i++) { switch(elf_phdata[i].p_type) { case PT_INTERP: has_interp = 1; elf_ihdr = &elf_phdata[i]; break; case PT_PHDR: has_ephdr = 1; elf_ephdr = &elf_phdata[i]; break; }; } #ifdef DEBUG_ELF printk("\n"); #endif elf_bss = 0; elf_brk = 0; elf_stack = 0xffffffff; elf_interpreter = NULL; start_code = 0xffffffff; end_code = 0; end_data = 0; retval = look_for_irix_interpreter(&elf_interpreter, &interpreter, &interp_elf_ex, elf_phdata, bprm, elf_ex.e_phnum); if (retval) goto out_free_file; if (elf_interpreter) { retval = verify_irix_interpreter(&interp_elf_ex); if(retval) goto out_free_interp; } /* OK, we are done with that, now set up the arg stuff, * and then start this sucker up. */ retval = -E2BIG; if (!bprm->sh_bang && !bprm->p) goto out_free_interp; /* Flush all traces of the currently running executable */ retval = flush_old_exec(bprm); if (retval) goto out_free_dentry; /* OK, This is the point of no return */ current->mm->end_data = 0; current->mm->end_code = 0; current->mm->mmap = NULL; current->flags &= ~PF_FORKNOEXEC; elf_entry = (unsigned int) elf_ex.e_entry; /* Do this so that we can load the interpreter, if need be. We will * change some of these later. */ current->mm->rss = 0; setup_arg_pages(bprm); current->mm->start_stack = bprm->p; /* At this point, we assume that the image should be loaded at * fixed address, not at a variable address. */ old_fs = get_fs(); set_fs(get_ds()); map_executable(bprm->file, elf_phdata, elf_ex.e_phnum, &elf_stack, &load_addr, &start_code, &elf_bss, &end_code, &end_data, &elf_brk); if(elf_interpreter) { retval = map_interpreter(elf_phdata, &interp_elf_ex, interpreter, &interp_load_addr, elf_ex.e_phnum, old_fs, &elf_entry); kfree(elf_interpreter); if(retval) { set_fs(old_fs); printk("Unable to load IRIX ELF interpreter\n"); send_sig(SIGSEGV, current, 0); retval = 0; goto out_free_file; } } set_fs(old_fs); kfree(elf_phdata); set_personality(PER_IRIX32); set_binfmt(&irix_format); compute_creds(bprm); current->flags &= ~PF_FORKNOEXEC; bprm->p = (unsigned long) create_irix_tables((char *)bprm->p, bprm->argc, bprm->envc, (elf_interpreter ? &elf_ex : NULL), load_addr, interp_load_addr, regs, elf_ephdr); current->mm->start_brk = current->mm->brk = elf_brk; current->mm->end_code = end_code; current->mm->start_code = start_code; current->mm->end_data = end_data; current->mm->start_stack = bprm->p; /* Calling set_brk effectively mmaps the pages that we need for the * bss and break sections. */ set_brk(elf_bss, elf_brk); /* * IRIX maps a page at 0x200000 which holds some system * information. Programs depend on this. */ irix_map_prda_page (); padzero(elf_bss); #ifdef DEBUG_ELF printk("(start_brk) %lx\n" , (long) current->mm->start_brk); printk("(end_code) %lx\n" , (long) current->mm->end_code); printk("(start_code) %lx\n" , (long) current->mm->start_code); printk("(end_data) %lx\n" , (long) current->mm->end_data); printk("(start_stack) %lx\n" , (long) current->mm->start_stack); printk("(brk) %lx\n" , (long) current->mm->brk); #endif #if 0 /* XXX No f*****g way dude... */ /* Why this, you ask??? Well SVr4 maps page 0 as read-only, * and some applications "depend" upon this behavior. * Since we do not have the power to recompile these, we * emulate the SVr4 behavior. Sigh. */ down_write(¤t->mm->mmap_sem); (void) do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); up_write(¤t->mm->mmap_sem); #endif start_thread(regs, elf_entry, bprm->p); if (current->ptrace & PT_PTRACED) send_sig(SIGTRAP, current, 0); return 0; out: return retval; out_free_dentry: allow_write_access(interpreter); fput(interpreter); out_free_interp: if (elf_interpreter) kfree(elf_interpreter); out_free_file: out_free_ph: kfree (elf_phdata); goto out; }