Example #1
0
static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs)
{
	struct file *interpreter = NULL; /* to shut gcc up */
 	unsigned long load_addr = 0, load_bias = 0;
	int load_addr_set = 0;
	char * elf_interpreter = NULL;
	unsigned int interpreter_type = INTERPRETER_NONE;
	unsigned char ibcs2_interpreter = 0;
	unsigned long error;
	struct elf_phdr * elf_ppnt, *elf_phdata;
	unsigned long elf_bss, k, elf_brk;
	int elf_exec_fileno;
	int retval, i;
	unsigned int size;
	unsigned long elf_entry, interp_load_addr = 0;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long reloc_func_desc = 0;
	struct elfhdr elf_ex;
	struct elfhdr interp_elf_ex;
  	struct exec interp_ex;
	char passed_fileno[6];
	struct files_struct *files;
	
	/* Get the exec-header */
	elf_ex = *((struct elfhdr *) bprm->buf);

	retval = -ENOEXEC;
	/* First of all, some simple consistency checks */
	if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
		goto out;

	if (elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(&elf_ex))
		goto out;
	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
		goto out;

	/* Now read in all of the header information */

	retval = -ENOMEM;
	if (elf_ex.e_phentsize != sizeof(struct elf_phdr))
		goto out;
	if (elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
		goto out;
	size = elf_ex.e_phnum * sizeof(struct elf_phdr);
	elf_phdata = (struct elf_phdr *) kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(bprm->file, elf_ex.e_phoff, (char *) elf_phdata, size);
	if (retval < 0)
		goto out_free_ph;
		
	files = current->files;		/* Refcounted so ok */
	retval = unshare_files();
	if (retval < 0)
		goto out_free_ph;
	if (files == current->files) {
		put_files_struct(files);
		files = NULL;
	}

	/* exec will make our files private anyway, but for the a.out
	   loader stuff we need to do it earlier */
	   
	retval = get_unused_fd();
	if (retval < 0)
		goto out_free_fh;
	get_file(bprm->file);
	fd_install(elf_exec_fileno = retval, bprm->file);

	elf_ppnt = elf_phdata;
	elf_bss = 0;
	elf_brk = 0;

	start_code = ~0UL;
	end_code = 0;
	start_data = 0;
	end_data = 0;

	for (i = 0; i < elf_ex.e_phnum; i++) {
		if (elf_ppnt->p_type == PT_INTERP) {
			/* This is the program interpreter used for
			 * shared libraries - for now assume that this
			 * is an a.out format binary
			 */

			retval = -ENOMEM;
			if (elf_ppnt->p_filesz > PATH_MAX)
				goto out_free_file;
			elf_interpreter = (char *) kmalloc(elf_ppnt->p_filesz,
							   GFP_KERNEL);
			if (!elf_interpreter)
				goto out_free_file;

			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
					   elf_interpreter,
					   elf_ppnt->p_filesz);
			if (retval < 0)
				goto out_free_interp;
			/* If the program interpreter is one of these two,
			 * then assume an iBCS2 image. Otherwise assume
			 * a native linux image.
			 */
			if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
			    strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0)
				ibcs2_interpreter = 1;
#if 0
			printk("Using ELF interpreter %s\n", elf_interpreter);
#endif

			SET_PERSONALITY(elf_ex, ibcs2_interpreter);

			interpreter = open_exec(elf_interpreter);
			retval = PTR_ERR(interpreter);
			if (IS_ERR(interpreter))
				goto out_free_interp;
			retval = kernel_read(interpreter, 0, bprm->buf, BINPRM_BUF_SIZE);
			if (retval < 0)
				goto out_free_dentry;

			/* Get the exec headers */
			interp_ex = *((struct exec *) bprm->buf);
			interp_elf_ex = *((struct elfhdr *) bprm->buf);
			break;
		}
		elf_ppnt++;
	}

	/* Some simple consistency checks for the interpreter */
	if (elf_interpreter) {
		interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;

		/* Now figure out which format our binary is */
		if ((N_MAGIC(interp_ex) != OMAGIC) &&
		    (N_MAGIC(interp_ex) != ZMAGIC) &&
		    (N_MAGIC(interp_ex) != QMAGIC))
			interpreter_type = INTERPRETER_ELF;

		if (memcmp(interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
			interpreter_type &= ~INTERPRETER_ELF;

		retval = -ELIBBAD;
		if (!interpreter_type)
			goto out_free_dentry;

		/* Make sure only one type was selected */
		if ((interpreter_type & INTERPRETER_ELF) &&
		     interpreter_type != INTERPRETER_ELF) {
	     		// FIXME - ratelimit this before re-enabling
			// printk(KERN_WARNING "ELF: Ambiguous type, using ELF\n");
			interpreter_type = INTERPRETER_ELF;
		}
		/* Verify the interpreter has a valid arch */
		if ((interpreter_type == INTERPRETER_ELF) &&
		    !elf_check_arch(&interp_elf_ex))
			goto out_free_dentry;
	} else {
		/* Executables without an interpreter also need a personality  */
		SET_PERSONALITY(elf_ex, ibcs2_interpreter);
	}

	/* OK, we are done with that, now set up the arg stuff,
	   and then start this sucker up */

	if (!bprm->sh_bang) {
		char * passed_p;

		if (interpreter_type == INTERPRETER_AOUT) {
		  sprintf(passed_fileno, "%d", elf_exec_fileno);
		  passed_p = passed_fileno;

		  if (elf_interpreter) {
		    retval = copy_strings_kernel(1,&passed_p,bprm);
			if (retval)
				goto out_free_dentry; 
		    bprm->argc++;
		  }
		}
	}

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	/* Discard our unneeded old files struct */
	if (files) {
		steal_locks(files);
		put_files_struct(files);
		files = NULL;
	}

	/* OK, This is the point of no return */
	current->mm->start_data = 0;
	current->mm->end_data = 0;
	current->mm->end_code = 0;
	current->mm->mmap = NULL;
	current->flags &= ~PF_FORKNOEXEC;
	elf_entry = (unsigned long) elf_ex.e_entry;

	/* Do this so that we can load the interpreter, if need be.  We will
	   change some of these later */
	current->mm->rss = 0;
	retval = setup_arg_pages(bprm);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		return retval;
	}
	
	current->mm->start_stack = bprm->p;

	/* Now we do a little grungy work by mmaping the ELF image into
	   the correct location in memory.  At this point, we assume that
	   the image should be loaded at fixed address, not at a variable
	   address. */

	for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
		int elf_prot = 0, elf_flags;
		unsigned long vaddr;

		if (elf_ppnt->p_type != PT_LOAD)
			continue;

		if (unlikely (elf_brk > elf_bss)) {
			unsigned long nbyte;
	            
			/* There was a PT_LOAD segment with p_memsz > p_filesz
			   before this one. Map anonymous pages, if needed,
			   and clear the area.  */
			set_brk (elf_bss + load_bias, elf_brk + load_bias);
			nbyte = ELF_PAGEOFFSET(elf_bss);
			if (nbyte) {
				nbyte = ELF_MIN_ALIGN - nbyte;
				if (nbyte > elf_brk - elf_bss)
					nbyte = elf_brk - elf_bss;
				clear_user((void *) elf_bss + load_bias, nbyte);
			}
		}

		if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
		if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
		if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;

		elf_flags = MAP_PRIVATE|MAP_DENYWRITE|MAP_EXECUTABLE;

		vaddr = elf_ppnt->p_vaddr;
		if (elf_ex.e_type == ET_EXEC || load_addr_set) {
			elf_flags |= MAP_FIXED;
		} else if (elf_ex.e_type == ET_DYN) {
			/* Try and get dynamic programs out of the way of the default mmap
			   base, as well as whatever program they might try to exec.  This
		           is because the brk will follow the loader, and is not movable.  */
			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
		}

		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
		if (BAD_ADDR(error))
			continue;

		if (!load_addr_set) {
			load_addr_set = 1;
			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
			if (elf_ex.e_type == ET_DYN) {
				load_bias += error -
				             ELF_PAGESTART(load_bias + vaddr);
				load_addr += load_bias;
				reloc_func_desc = load_addr;
			}
		}
		k = elf_ppnt->p_vaddr;
		if (k < start_code) start_code = k;
		if (start_data < k) start_data = k;

		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;

		if (k > elf_bss)
			elf_bss = k;
		if ((elf_ppnt->p_flags & PF_X) && end_code <  k)
			end_code = k;
		if (end_data < k)
			end_data = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
		if (k > elf_brk)
			elf_brk = k;
	}

	elf_entry += load_bias;
	elf_bss += load_bias;
	elf_brk += load_bias;
	start_code += load_bias;
	end_code += load_bias;
	start_data += load_bias;
	end_data += load_bias;

	if (elf_interpreter) {
		if (interpreter_type == INTERPRETER_AOUT)
			elf_entry = load_aout_interp(&interp_ex,
						     interpreter);
		else
			elf_entry = load_elf_interp(&interp_elf_ex,
						    interpreter,
						    &interp_load_addr);
		if (BAD_ADDR(elf_entry)) {
			printk(KERN_ERR "Unable to load interpreter\n");
			send_sig(SIGSEGV, current, 0);
			retval = -ENOEXEC; /* Nobody gets to see this, but.. */
			goto out_free_dentry;
		}
		reloc_func_desc = interp_load_addr;

		allow_write_access(interpreter);
		fput(interpreter);
		kfree(elf_interpreter);
	}

	kfree(elf_phdata);

	if (interpreter_type != INTERPRETER_AOUT)
		sys_close(elf_exec_fileno);

	set_binfmt(&elf_format);

	compute_creds(bprm);
	current->flags &= ~PF_FORKNOEXEC;
	bprm->p = (unsigned long)
	  create_elf_tables((char *)bprm->p,
			bprm->argc,
			bprm->envc,
			&elf_ex,
			load_addr, load_bias,
			interp_load_addr,
			(interpreter_type == INTERPRETER_AOUT ? 0 : 1));
	/* N.B. passed_fileno might not be initialized? */
	if (interpreter_type == INTERPRETER_AOUT)
		current->mm->arg_start += strlen(passed_fileno) + 1;
	current->mm->start_brk = current->mm->brk = elf_brk;
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->start_data = start_data;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

	/* Calling set_brk effectively mmaps the pages that we need
	 * for the bss and break sections
	 */
	set_brk(elf_bss, elf_brk);

	padzero(elf_bss);

#if 0
	printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
	printk("(end_code) %lx\n" , (long) current->mm->end_code);
	printk("(start_code) %lx\n" , (long) current->mm->start_code);
	printk("(start_data) %lx\n" , (long) current->mm->start_data);
	printk("(end_data) %lx\n" , (long) current->mm->end_data);
	printk("(start_stack) %lx\n" , (long) current->mm->start_stack);
	printk("(brk) %lx\n" , (long) current->mm->brk);
#endif

	if (current->personality & MMAP_PAGE_ZERO) {
		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
		   and some applications "depend" upon this behavior.
		   Since we do not have the power to recompile these, we
		   emulate the SVr4 behavior.  Sigh.  */
		/* N.B. Shouldn't the size here be PAGE_SIZE?? */
		down_write(&current->mm->mmap_sem);
		error = do_mmap(NULL, 0, 4096, PROT_READ | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE, 0);
		up_write(&current->mm->mmap_sem);
	}

#ifdef ELF_PLAT_INIT
	/*
	 * The ABI may specify that certain registers be set up in special
	 * ways (on i386 %edx is the address of a DT_FINI function, for
	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
	 * that the e_entry field is the address of the function descriptor
	 * for the startup routine, rather than the address of the startup
	 * routine itself.  This macro performs whatever initialization to
	 * the regs structure is required as well as any relocations to the
	 * function descriptor entries when executing dynamically linked apps.
	 */
	ELF_PLAT_INIT(regs, reloc_func_desc);
#endif

	start_thread(regs, elf_entry, bprm->p);
	if (current->ptrace & PT_PTRACED)
		send_sig(SIGTRAP, current, 0);
	retval = 0;
out:
	return retval;

	/* error cleanup */
out_free_dentry:
	allow_write_access(interpreter);
	if (interpreter)
		fput(interpreter);
out_free_interp:
	if (elf_interpreter)
		kfree(elf_interpreter);
out_free_file:
	sys_close(elf_exec_fileno);
out_free_fh:
	if (files) {
		put_files_struct(current->files);
		current->files = files;
	}
out_free_ph:
	kfree(elf_phdata);
	goto out;
}
Example #2
0
static ssize_t read_kcore(struct file *file, char *buf, size_t count, loff_t *ppos)
{
	unsigned long long p = *ppos, memsize;
	ssize_t read;
	ssize_t count1;
	char * pnt;
	struct user dump;
#if defined (__i386__) || defined (__mc68000__) || defined(__x86_64__)
#	define FIRST_MAPPED	PAGE_SIZE	/* we don't have page 0 mapped on x86.. */
#else
#	define FIRST_MAPPED	0
#endif

	memset(&dump, 0, sizeof(struct user));
	dump.magic = CMAGIC;
	dump.u_dsize = (virt_to_phys(high_memory) >> PAGE_SHIFT);
#if defined (__i386__) || defined(__x86_64__)
	dump.start_code = PAGE_OFFSET;
#endif
#ifdef __alpha__
	dump.start_data = PAGE_OFFSET;
#endif

	memsize = virt_to_phys(high_memory);
	if (p >= memsize)
		return 0;
	if (count > memsize - p)
		count = memsize - p;
	read = 0;

	if (p < sizeof(struct user) && count > 0) {
		count1 = count;
		if (p + count1 > sizeof(struct user))
			count1 = sizeof(struct user)-p;
		pnt = (char *) &dump + p;
		if (copy_to_user(buf,(void *) pnt, count1))
			return -EFAULT;
		buf += count1;
		p += count1;
		count -= count1;
		read += count1;
	}

	if (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
		count1 = PAGE_SIZE + FIRST_MAPPED - p;
		if (count1 > count)
			count1 = count;
		if (clear_user(buf, count1))
			return -EFAULT;
		buf += count1;
		p += count1;
		count -= count1;
		read += count1;
	}
	if (count > 0) {
		if (copy_to_user(buf, (void *) (PAGE_OFFSET+p-PAGE_SIZE), count))
			return -EFAULT;
		read += count;
	}
	*ppos += read;
	return read;
}
static int CVE_2010_0307_linux2_6_27_31_load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
	struct file *interpreter = NULL; /* to shut gcc up */
 	unsigned long load_addr = 0, load_bias = 0;
	int load_addr_set = 0;
	char * elf_interpreter = NULL;
	unsigned long error;
	struct elf_phdr *elf_ppnt, *elf_phdata;
	unsigned long elf_bss, elf_brk;
	int elf_exec_fileno;
	int retval, i;
	unsigned int size;
	unsigned long elf_entry;
	unsigned long interp_load_addr = 0;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long reloc_func_desc = 0;
	int executable_stack = EXSTACK_DEFAULT;
	unsigned long def_flags = 0;
	struct {
		struct elfhdr elf_ex;
		struct elfhdr interp_elf_ex;
	} *loc;

	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
	if (!loc) {
		retval = -ENOMEM;
		goto out_ret;
	}
	
	/* Get the exec-header */
	loc->elf_ex = *((struct elfhdr *)bprm->buf);

	retval = -ENOEXEC;
	/* First of all, some simple consistency checks */
	if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
		goto out;

	if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(&loc->elf_ex))
		goto out;
	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
		goto out;

	/* Now read in all of the header information */
	if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
		goto out;
	if (loc->elf_ex.e_phnum < 1 ||
	 	loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
		goto out;
	size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
	retval = -ENOMEM;
	elf_phdata = kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
			     (char *)elf_phdata, size);
	if (retval != size) {
		if (retval >= 0)
			retval = -EIO;
		goto out_free_ph;
	}

	retval = get_unused_fd();
	if (retval < 0)
		goto out_free_ph;
	get_file(bprm->file);
	fd_install(elf_exec_fileno = retval, bprm->file);

	elf_ppnt = elf_phdata;
	elf_bss = 0;
	elf_brk = 0;

	start_code = ~0UL;
	end_code = 0;
	start_data = 0;
	end_data = 0;

	for (i = 0; i < loc->elf_ex.e_phnum; i++) {
		if (elf_ppnt->p_type == PT_INTERP) {
			/* This is the program interpreter used for
			 * shared libraries - for now assume that this
			 * is an a.out format binary
			 */
			retval = -ENOEXEC;
			if (elf_ppnt->p_filesz > PATH_MAX || 
			    elf_ppnt->p_filesz < 2)
				goto out_free_file;

			retval = -ENOMEM;
			elf_interpreter = kmalloc(elf_ppnt->p_filesz,
						  GFP_KERNEL);
			if (!elf_interpreter)
				goto out_free_file;

			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
					     elf_interpreter,
					     elf_ppnt->p_filesz);
			if (retval != elf_ppnt->p_filesz) {
				if (retval >= 0)
					retval = -EIO;
				goto out_free_interp;
			}
			/* make sure path is NULL terminated */
			retval = -ENOEXEC;
			if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
				goto out_free_interp;

			/*
			 * The early SET_PERSONALITY here is so that the lookup
			 * for the interpreter happens in the namespace of the 
			 * to-be-execed image.  SET_PERSONALITY can select an
			 * alternate root.
			 *
			 * However, SET_PERSONALITY is NOT allowed to switch
			 * this task into the new images's memory mapping
			 * policy - that is, TASK_SIZE must still evaluate to
			 * that which is appropriate to the execing application.
			 * This is because exit_mmap() needs to have TASK_SIZE
			 * evaluate to the size of the old image.
			 *
			 * So if (say) a 64-bit application is execing a 32-bit
			 * application it is the architecture's responsibility
			 * to defer changing the value of TASK_SIZE until the
			 * switch really is going to happen - do this in
			 * flush_thread().	- akpm
			 */
			SET_PERSONALITY(loc->elf_ex, 0);

			interpreter = open_exec(elf_interpreter);
			retval = PTR_ERR(interpreter);
			if (IS_ERR(interpreter))
				goto out_free_interp;

			/*
			 * If the binary is not readable then enforce
			 * mm->dumpable = 0 regardless of the interpreter's
			 * permissions.
			 */
			if (file_permission(interpreter, MAY_READ) < 0)
				bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;

			retval = kernel_read(interpreter, 0, bprm->buf,
					     BINPRM_BUF_SIZE);
			if (retval != BINPRM_BUF_SIZE) {
				if (retval >= 0)
					retval = -EIO;
				goto out_free_dentry;
			}

			/* Get the exec headers */
			loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
			break;
		}
		elf_ppnt++;
	}

	elf_ppnt = elf_phdata;
	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
		if (elf_ppnt->p_type == PT_GNU_STACK) {
			if (elf_ppnt->p_flags & PF_X)
				executable_stack = EXSTACK_ENABLE_X;
			else
				executable_stack = EXSTACK_DISABLE_X;
			break;
		}

	/* Some simple consistency checks for the interpreter */
	if (elf_interpreter) {
		retval = -ELIBBAD;
		/* Not an ELF interpreter */
		if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
			goto out_free_dentry;
		/* Verify the interpreter has a valid arch */
		if (!elf_check_arch(&loc->interp_elf_ex))
			goto out_free_dentry;
	} else {
		/* Executables without an interpreter also need a personality  */
		SET_PERSONALITY(loc->elf_ex, 0);
	}

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	/* OK, This is the point of no return */
	current->flags &= ~PF_FORKNOEXEC;
	current->mm->def_flags = def_flags;

	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
	   may depend on the personality.  */
	SET_PERSONALITY(loc->elf_ex, 0);
	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
		current->personality |= READ_IMPLIES_EXEC;

	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		current->flags |= PF_RANDOMIZE;
	arch_pick_mmap_layout(current->mm);

	/* Do this so that we can load the interpreter, if need be.  We will
	   change some of these later */
	current->mm->free_area_cache = current->mm->mmap_base;
	current->mm->cached_hole_size = 0;
	retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
				 executable_stack);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out_free_dentry;
	}
	
	current->mm->start_stack = bprm->p;

	/* Now we do a little grungy work by mmaping the ELF image into
	   the correct location in memory. */
	for(i = 0, elf_ppnt = elf_phdata;
	    i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
		int elf_prot = 0, elf_flags;
		unsigned long k, vaddr;

		if (elf_ppnt->p_type != PT_LOAD)
			continue;

		if (unlikely (elf_brk > elf_bss)) {
			unsigned long nbyte;
	            
			/* There was a PT_LOAD segment with p_memsz > p_filesz
			   before this one. Map anonymous pages, if needed,
			   and clear the area.  */
			retval = set_brk (elf_bss + load_bias,
					  elf_brk + load_bias);
			if (retval) {
				send_sig(SIGKILL, current, 0);
				goto out_free_dentry;
			}
			nbyte = ELF_PAGEOFFSET(elf_bss);
			if (nbyte) {
				nbyte = ELF_MIN_ALIGN - nbyte;
				if (nbyte > elf_brk - elf_bss)
					nbyte = elf_brk - elf_bss;
				if (clear_user((void __user *)elf_bss +
							load_bias, nbyte)) {
					/*
					 * This bss-zeroing can fail if the ELF
					 * file specifies odd protections. So
					 * we don't check the return value
					 */
				}
			}
		}

		if (elf_ppnt->p_flags & PF_R)
			elf_prot |= PROT_READ;
		if (elf_ppnt->p_flags & PF_W)
			elf_prot |= PROT_WRITE;
		if (elf_ppnt->p_flags & PF_X)
			elf_prot |= PROT_EXEC;

		elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;

		vaddr = elf_ppnt->p_vaddr;
		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
			elf_flags |= MAP_FIXED;
		} else if (loc->elf_ex.e_type == ET_DYN) {
			/* Try and get dynamic programs out of the way of the
			 * default mmap base, as well as whatever program they
			 * might try to exec.  This is because the brk will
			 * follow the loader, and is not movable.  */
#ifdef CONFIG_X86
			load_bias = 0;
#else
			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
		}

		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
				elf_prot, elf_flags, 0);
		if (BAD_ADDR(error)) {
			send_sig(SIGKILL, current, 0);
			retval = IS_ERR((void *)error) ?
				PTR_ERR((void*)error) : -EINVAL;
			goto out_free_dentry;
		}

		if (!load_addr_set) {
			load_addr_set = 1;
			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
			if (loc->elf_ex.e_type == ET_DYN) {
				load_bias += error -
				             ELF_PAGESTART(load_bias + vaddr);
				load_addr += load_bias;
				reloc_func_desc = load_bias;
			}
		}
		k = elf_ppnt->p_vaddr;
		if (k < start_code)
			start_code = k;
		if (start_data < k)
			start_data = k;

		/*
		 * Check to see if the section's size will overflow the
		 * allowed task size. Note that p_filesz must always be
		 * <= p_memsz so it is only necessary to check p_memsz.
		 */
		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
		    elf_ppnt->p_memsz > TASK_SIZE ||
		    TASK_SIZE - elf_ppnt->p_memsz < k) {
			/* set_brk can never work. Avoid overflows. */
			send_sig(SIGKILL, current, 0);
			retval = -EINVAL;
			goto out_free_dentry;
		}

		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;

		if (k > elf_bss)
			elf_bss = k;
		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
			end_code = k;
		if (end_data < k)
			end_data = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
		if (k > elf_brk)
			elf_brk = k;
	}

	loc->elf_ex.e_entry += load_bias;
	elf_bss += load_bias;
	elf_brk += load_bias;
	start_code += load_bias;
	end_code += load_bias;
	start_data += load_bias;
	end_data += load_bias;

	/* Calling set_brk effectively mmaps the pages that we need
	 * for the bss and break sections.  We must do this before
	 * mapping in the interpreter, to make sure it doesn't wind
	 * up getting placed where the bss needs to go.
	 */
	retval = set_brk(elf_bss, elf_brk);
	if (retval) {
		send_sig(SIGKILL, current, 0);
		goto out_free_dentry;
	}
	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
		send_sig(SIGSEGV, current, 0);
		retval = -EFAULT; /* Nobody gets to see this, but.. */
		goto out_free_dentry;
	}

	if (elf_interpreter) {
		unsigned long uninitialized_var(interp_map_addr);

		elf_entry = load_elf_interp(&loc->interp_elf_ex,
					    interpreter,
					    &interp_map_addr,
					    load_bias);
		if (!IS_ERR((void *)elf_entry)) {
			/*
			 * load_elf_interp() returns relocation
			 * adjustment
			 */
			interp_load_addr = elf_entry;
			elf_entry += loc->interp_elf_ex.e_entry;
		}
		if (BAD_ADDR(elf_entry)) {
			force_sig(SIGSEGV, current);
			retval = IS_ERR((void *)elf_entry) ?
					(int)elf_entry : -EINVAL;
			goto out_free_dentry;
		}
		reloc_func_desc = interp_load_addr;

		allow_write_access(interpreter);
		fput(interpreter);
		kfree(elf_interpreter);
	} else {
		elf_entry = loc->elf_ex.e_entry;
		if (BAD_ADDR(elf_entry)) {
			force_sig(SIGSEGV, current);
			retval = -EINVAL;
			goto out_free_dentry;
		}
	}

	kfree(elf_phdata);

	sys_close(elf_exec_fileno);

	set_binfmt(&elf_format);

#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
	retval = arch_setup_additional_pages(bprm, executable_stack);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out;
	}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */

	compute_creds(bprm);
	current->flags &= ~PF_FORKNOEXEC;
	retval = create_elf_tables(bprm, &loc->elf_ex,
			  load_addr, interp_load_addr);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out;
	}
	/* N.B. passed_fileno might not be initialized? */
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->start_data = start_data;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

#ifdef arch_randomize_brk
	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
		current->mm->brk = current->mm->start_brk =
			arch_randomize_brk(current->mm);
#endif

	if (current->personality & MMAP_PAGE_ZERO) {
		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
		   and some applications "depend" upon this behavior.
		   Since we do not have the power to recompile these, we
		   emulate the SVr4 behavior. Sigh. */
		down_write(&current->mm->mmap_sem);
		error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE, 0);
		up_write(&current->mm->mmap_sem);
	}

#ifdef ELF_PLAT_INIT
	/*
	 * The ABI may specify that certain registers be set up in special
	 * ways (on i386 %edx is the address of a DT_FINI function, for
	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
	 * that the e_entry field is the address of the function descriptor
	 * for the startup routine, rather than the address of the startup
	 * routine itself.  This macro performs whatever initialization to
	 * the regs structure is required as well as any relocations to the
	 * function descriptor entries when executing dynamically links apps.
	 */
	ELF_PLAT_INIT(regs, reloc_func_desc);
#endif

	start_thread(regs, elf_entry, bprm->p);
	retval = 0;
out:
	kfree(loc);
out_ret:
	return retval;

	/* error cleanup */
out_free_dentry:
	allow_write_access(interpreter);
	if (interpreter)
		fput(interpreter);
out_free_interp:
	kfree(elf_interpreter);
out_free_file:
	sys_close(elf_exec_fileno);
out_free_ph:
	kfree(elf_phdata);
	goto out;
}
Example #4
0
asmlinkage void do_ptrace(struct pt_regs *regs)
{
	int request = regs->u_regs[UREG_I0];
	pid_t pid = regs->u_regs[UREG_I1];
	unsigned long addr = regs->u_regs[UREG_I2];
	unsigned long data = regs->u_regs[UREG_I3];
	unsigned long addr2 = regs->u_regs[UREG_I4];
	struct task_struct *child;
	int ret;

	if (test_thread_flag(TIF_32BIT)) {
		addr &= 0xffffffffUL;
		data &= 0xffffffffUL;
		addr2 &= 0xffffffffUL;
	}
	lock_kernel();
#ifdef DEBUG_PTRACE
	{
		char *s;

		if ((request >= 0) && (request <= 24))
			s = pt_rq [request];
		else
			s = "unknown";

		if (request == PTRACE_POKEDATA && data == 0x91d02001){
			printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
				pid, addr, addr2);
		} else 
			printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
			       s, request, pid, addr, data, addr2);
	}
#endif
	if (request == PTRACE_TRACEME) {
		int ret;

		/* are we already being traced? */
		if (current->ptrace & PT_PTRACED) {
			pt_error_return(regs, EPERM);
			goto out;
		}
		ret = security_ptrace(current->parent, current);
		if (ret) {
			pt_error_return(regs, -ret);
			goto out;
		}

		/* set the ptrace bit in the process flags. */
		current->ptrace |= PT_PTRACED;
		pt_succ_return(regs, 0);
		goto out;
	}
#ifndef ALLOW_INIT_TRACING
	if (pid == 1) {
		/* Can't dork with init. */
		pt_error_return(regs, EPERM);
		goto out;
	}
#endif
	read_lock(&tasklist_lock);
	child = find_task_by_pid(pid);
	if (child)
		get_task_struct(child);
	read_unlock(&tasklist_lock);

	if (!child) {
		pt_error_return(regs, ESRCH);
		goto out;
	}

	if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
	    || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
		if (ptrace_attach(child)) {
			pt_error_return(regs, EPERM);
			goto out_tsk;
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	ret = ptrace_check_attach(child, request == PTRACE_KILL);
	if (ret < 0) {
		pt_error_return(regs, -ret);
		goto out_tsk;
	}

	if (!(test_thread_flag(TIF_32BIT))	&&
	    ((request == PTRACE_READDATA64)		||
	     (request == PTRACE_WRITEDATA64)		||
	     (request == PTRACE_READTEXT64)		||
	     (request == PTRACE_WRITETEXT64)		||
	     (request == PTRACE_PEEKTEXT64)		||
	     (request == PTRACE_POKETEXT64)		||
	     (request == PTRACE_PEEKDATA64)		||
	     (request == PTRACE_POKEDATA64))) {
		addr = regs->u_regs[UREG_G2];
		addr2 = regs->u_regs[UREG_G3];
		request -= 30; /* wheee... */
	}

	switch(request) {
	case PTRACE_PEEKTEXT: /* read word at location addr. */ 
	case PTRACE_PEEKDATA: {
		unsigned long tmp64;
		unsigned int tmp32;
		int res, copied;

		res = -EIO;
		if (test_thread_flag(TIF_32BIT)) {
			copied = access_process_vm(child, addr,
						   &tmp32, sizeof(tmp32), 0);
			tmp64 = (unsigned long) tmp32;
			if (copied == sizeof(tmp32))
				res = 0;
		} else {
			copied = access_process_vm(child, addr,
						   &tmp64, sizeof(tmp64), 0);
			if (copied == sizeof(tmp64))
				res = 0;
		}
		if (res < 0)
			pt_error_return(regs, -res);
		else
			pt_os_succ_return(regs, tmp64, (long *) data);
		goto flush_and_out;
	}

	case PTRACE_POKETEXT: /* write the word at location addr. */
	case PTRACE_POKEDATA: {
		unsigned long tmp64;
		unsigned int tmp32;
		int copied, res = -EIO;

		if (test_thread_flag(TIF_32BIT)) {
			tmp32 = data;
			copied = access_process_vm(child, addr,
						   &tmp32, sizeof(tmp32), 1);
			if (copied == sizeof(tmp32))
				res = 0;
		} else {
			tmp64 = data;
			copied = access_process_vm(child, addr,
						   &tmp64, sizeof(tmp64), 1);
			if (copied == sizeof(tmp64))
				res = 0;
		}
		if (res < 0)
			pt_error_return(regs, -res);
		else
			pt_succ_return(regs, res);
		goto flush_and_out;
	}

	case PTRACE_GETREGS: {
		struct pt_regs32 __user *pregs =
			(struct pt_regs32 __user *) addr;
		struct pt_regs *cregs = child->thread_info->kregs;
		int rval;

		if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
		    __put_user(cregs->tpc, (&pregs->pc)) ||
		    __put_user(cregs->tnpc, (&pregs->npc)) ||
		    __put_user(cregs->y, (&pregs->y))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		for (rval = 1; rval < 16; rval++)
			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
				pt_error_return(regs, EFAULT);
				goto out_tsk;
			}
		pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
		goto out_tsk;
	}

	case PTRACE_GETREGS64: {
		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
		struct pt_regs *cregs = child->thread_info->kregs;
		unsigned long tpc = cregs->tpc;
		int rval;

		if ((child->thread_info->flags & _TIF_32BIT) != 0)
			tpc &= 0xffffffff;
		if (__put_user(cregs->tstate, (&pregs->tstate)) ||
		    __put_user(tpc, (&pregs->tpc)) ||
		    __put_user(cregs->tnpc, (&pregs->tnpc)) ||
		    __put_user(cregs->y, (&pregs->y))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		for (rval = 1; rval < 16; rval++)
			if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
				pt_error_return(regs, EFAULT);
				goto out_tsk;
			}
		pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
		printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
		goto out_tsk;
	}

	case PTRACE_SETREGS: {
		struct pt_regs32 __user *pregs =
			(struct pt_regs32 __user *) addr;
		struct pt_regs *cregs = child->thread_info->kregs;
		unsigned int psr, pc, npc, y;
		int i;

		/* Must be careful, tracing process can only set certain
		 * bits in the psr.
		 */
		if (__get_user(psr, (&pregs->psr)) ||
		    __get_user(pc, (&pregs->pc)) ||
		    __get_user(npc, (&pregs->npc)) ||
		    __get_user(y, (&pregs->y))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		cregs->tstate &= ~(TSTATE_ICC);
		cregs->tstate |= psr_to_tstate_icc(psr);
               	if (!((pc | npc) & 3)) {
			cregs->tpc = pc;
			cregs->tnpc = npc;
		}
		cregs->y = y;
		for (i = 1; i < 16; i++) {
			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
				pt_error_return(regs, EFAULT);
				goto out_tsk;
			}
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_SETREGS64: {
		struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
		struct pt_regs *cregs = child->thread_info->kregs;
		unsigned long tstate, tpc, tnpc, y;
		int i;

		/* Must be careful, tracing process can only set certain
		 * bits in the psr.
		 */
		if (__get_user(tstate, (&pregs->tstate)) ||
		    __get_user(tpc, (&pregs->tpc)) ||
		    __get_user(tnpc, (&pregs->tnpc)) ||
		    __get_user(y, (&pregs->y))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		if ((child->thread_info->flags & _TIF_32BIT) != 0) {
			tpc &= 0xffffffff;
			tnpc &= 0xffffffff;
		}
		tstate &= (TSTATE_ICC | TSTATE_XCC);
		cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
		cregs->tstate |= tstate;
		if (!((tpc | tnpc) & 3)) {
			cregs->tpc = tpc;
			cregs->tnpc = tnpc;
		}
		cregs->y = y;
		for (i = 1; i < 16; i++) {
			if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
				pt_error_return(regs, EFAULT);
				goto out_tsk;
			}
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_GETFPREGS: {
		struct fps {
			unsigned int regs[32];
			unsigned int fsr;
			unsigned int flags;
			unsigned int extra;
			unsigned int fpqd;
			struct fq {
				unsigned int insnaddr;
				unsigned int insn;
			} fpq[16];
		};
		struct fps __user *fps = (struct fps __user *) addr;
		unsigned long *fpregs = child->thread_info->fpregs;

		if (copy_to_user(&fps->regs[0], fpregs,
				 (32 * sizeof(unsigned int))) ||
		    __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
		    __put_user(0, (&fps->fpqd)) ||
		    __put_user(0, (&fps->flags)) ||
		    __put_user(0, (&fps->extra)) ||
		    clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_GETFPREGS64: {
		struct fps {
			unsigned int regs[64];
			unsigned long fsr;
		};
		struct fps __user *fps = (struct fps __user *) addr;
		unsigned long *fpregs = child->thread_info->fpregs;

		if (copy_to_user(&fps->regs[0], fpregs,
				 (64 * sizeof(unsigned int))) ||
		    __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_SETFPREGS: {
		struct fps {
			unsigned int regs[32];
			unsigned int fsr;
			unsigned int flags;
			unsigned int extra;
			unsigned int fpqd;
			struct fq {
				unsigned int insnaddr;
				unsigned int insn;
			} fpq[16];
		};
		struct fps __user *fps = (struct fps __user *) addr;
		unsigned long *fpregs = child->thread_info->fpregs;
		unsigned fsr;

		if (copy_from_user(fpregs, &fps->regs[0],
				   (32 * sizeof(unsigned int))) ||
		    __get_user(fsr, (&fps->fsr))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
		child->thread_info->xfsr[0] |= fsr;
		if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
			child->thread_info->gsr[0] = 0;
		child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_SETFPREGS64: {
		struct fps {
			unsigned int regs[64];
			unsigned long fsr;
		};
		struct fps __user *fps = (struct fps __user *) addr;
		unsigned long *fpregs = child->thread_info->fpregs;

		if (copy_from_user(fpregs, &fps->regs[0],
				   (64 * sizeof(unsigned int))) ||
		    __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
			pt_error_return(regs, EFAULT);
			goto out_tsk;
		}
		if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
			child->thread_info->gsr[0] = 0;
		child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_READTEXT:
	case PTRACE_READDATA: {
		int res = ptrace_readdata(child, addr,
					  (char __user *)addr2, data);
		if (res == data) {
			pt_succ_return(regs, 0);
			goto flush_and_out;
		}
		if (res >= 0)
			res = -EIO;
		pt_error_return(regs, -res);
		goto flush_and_out;
	}

	case PTRACE_WRITETEXT:
	case PTRACE_WRITEDATA: {
		int res = ptrace_writedata(child, (char __user *) addr2,
					   addr, data);
		if (res == data) {
			pt_succ_return(regs, 0);
			goto flush_and_out;
		}
		if (res >= 0)
			res = -EIO;
		pt_error_return(regs, -res);
		goto flush_and_out;
	}
	case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
		addr = 1;

	case PTRACE_CONT: { /* restart after signal. */
		if (data > _NSIG) {
			pt_error_return(regs, EIO);
			goto out_tsk;
		}
		if (addr != 1) {
			unsigned long pc_mask = ~0UL;

			if ((child->thread_info->flags & _TIF_32BIT) != 0)
				pc_mask = 0xffffffff;

			if (addr & 3) {
				pt_error_return(regs, EINVAL);
				goto out_tsk;
			}
#ifdef DEBUG_PTRACE
			printk ("Original: %016lx %016lx\n",
				child->thread_info->kregs->tpc,
				child->thread_info->kregs->tnpc);
			printk ("Continuing with %016lx %016lx\n", addr, addr+4);
#endif
			child->thread_info->kregs->tpc = (addr & pc_mask);
			child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
		}

		if (request == PTRACE_SYSCALL) {
			set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		} else {
			clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
		}

		child->exit_code = data;
#ifdef DEBUG_PTRACE
		printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
			child->pid, child->exit_code,
			child->thread_info->kregs->tpc,
			child->thread_info->kregs->tnpc);
		       
#endif
		wake_up_process(child);
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

/*
 * make the child exit.  Best I can do is send it a sigkill. 
 * perhaps it should be put in the status that it wants to 
 * exit.
 */
	case PTRACE_KILL: {
		if (child->state == TASK_ZOMBIE) {	/* already dead */
			pt_succ_return(regs, 0);
			goto out_tsk;
		}
		child->exit_code = SIGKILL;
		wake_up_process(child);
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	case PTRACE_SUNDETACH: { /* detach a process that was attached. */
		int error = ptrace_detach(child, data);
		if (error) {
			pt_error_return(regs, EIO);
			goto out_tsk;
		}
		pt_succ_return(regs, 0);
		goto out_tsk;
	}

	/* PTRACE_DUMPCORE unsupported... */

	default: {
		int err = ptrace_request(child, request, addr, data);
		if (err)
			pt_error_return(regs, -err);
		else
			pt_succ_return(regs, 0);
		goto out_tsk;
	}
	}
flush_and_out:
	{
		unsigned long va;

		if (tlb_type == cheetah || tlb_type == cheetah_plus) {
			for (va = 0; va < (1 << 16); va += (1 << 5))
				spitfire_put_dcache_tag(va, 0x0);
			/* No need to mess with I-cache on Cheetah. */
		} else {
			for (va =  0; va < L1DCACHE_SIZE; va += 32)
				spitfire_put_dcache_tag(va, 0x0);
			if (request == PTRACE_PEEKTEXT ||
			    request == PTRACE_POKETEXT ||
			    request == PTRACE_READTEXT ||
			    request == PTRACE_WRITETEXT) {
				for (va =  0; va < (PAGE_SIZE << 1); va += 32)
					spitfire_put_icache_tag(va, 0x0);
				__asm__ __volatile__("flush %g6");
			}
		}
	}
out_tsk:
	if (child)
		put_task_struct(child);
out:
	unlock_kernel();
}
Example #5
0
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	mc_gregset_t __user *grp;
	mcontext_t __user *mcp;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	synchronize_user_stack();
	if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
		goto do_sigsegv;

#if 1
	fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
	fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
		
	mcp = &ucp->uc_mcontext;
	grp = &mcp->mc_gregs;

	/* Skip over the trap instruction, first. */
	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc   = (regs->tnpc & 0xffffffff);
		regs->tnpc  = (regs->tnpc + 4) & 0xffffffff;
	} else {
		regs->tpc   = regs->tnpc;
		regs->tnpc += 4;
	}
	err = 0;
	if (_NSIG_WORDS == 1)
		err |= __put_user(current->blocked.sig[0],
				  (unsigned long __user *)&ucp->uc_sigmask);
	else
		err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
				      sizeof(sigset_t));

	err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
	err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
	err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
	err |= __put_user(regs->y, &((*grp)[MC_Y]));
	err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
	err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
	err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
	err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
	err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
	err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
	err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
	err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
	err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
	err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
	err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
	err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
	err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
	err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
	err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));

	err |= __get_user(fp,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __get_user(i7,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
	err |= __put_user(fp, &(mcp->mc_fp));
	err |= __put_user(i7, &(mcp->mc_i7));

	err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs = current_thread_info()->fpsaved[0];
		if (fprs & FPRS_DL)
			err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
					    (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_to_user(
                          ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
			  (sizeof(unsigned int) * 32));
		err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
		err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
		err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
}
Example #6
0
static unsigned long get_kcore_size(int *num_vma, size_t *elf_buflen)
{
	unsigned long size;
#ifndef NO_MM
	unsigned long try;
	struct vm_struct *m;
#endif

	*num_vma = 0;
	size = ((size_t)high_memory - PAGE_OFFSET + PAGE_SIZE);
#ifdef NO_MM
	/* vmlist is not available then */
	*elf_buflen = PAGE_SIZE;
	return size;
#else
	if (!vmlist) {
		*elf_buflen = PAGE_SIZE;
		return (size);
	}

	for (m=vmlist; m; m=m->next) {
		try = (unsigned long)m->addr + m->size;
		if (try > size)
			size = try;
		*num_vma = *num_vma + 1;
	}
	*elf_buflen =	sizeof(struct elfhdr) + 
			(*num_vma + 2)*sizeof(struct elf_phdr) + 
			3 * sizeof(struct memelfnote);
	*elf_buflen = PAGE_ALIGN(*elf_buflen);
	return (size - PAGE_OFFSET + *elf_buflen);
#endif
}


/*****************************************************************************/
/*
 * determine size of ELF note
 */
static int notesize(struct memelfnote *en)
{
	int sz;

	sz = sizeof(struct elf_note);
	sz += roundup(strlen(en->name), 4);
	sz += roundup(en->datasz, 4);

	return sz;
} /* end notesize() */

/*****************************************************************************/
/*
 * store a note in the header buffer
 */
static char *storenote(struct memelfnote *men, char *bufp)
{
	struct elf_note en;

#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)

	en.n_namesz = strlen(men->name);
	en.n_descsz = men->datasz;
	en.n_type = men->type;

	DUMP_WRITE(&en, sizeof(en));
	DUMP_WRITE(men->name, en.n_namesz);

	/* XXX - cast from long long to long to avoid need for libgcc.a */
	bufp = (char*) roundup((unsigned long)bufp,4);
	DUMP_WRITE(men->data, men->datasz);
	bufp = (char*) roundup((unsigned long)bufp,4);

#undef DUMP_WRITE

	return bufp;
} /* end storenote() */

/*
 * store an ELF coredump header in the supplied buffer
 * num_vma is the number of elements in vmlist
 */
static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
{
	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
	struct elf_phdr *nhdr, *phdr;
	struct elfhdr *elf;
	struct memelfnote notes[3];
	off_t offset = 0;
#ifndef NO_MM
	struct vm_struct *m;
#endif

	/* setup ELF header */
	elf = (struct elfhdr *) bufp;
	bufp += sizeof(struct elfhdr);
	offset += sizeof(struct elfhdr);
	memcpy(elf->e_ident, ELFMAG, SELFMAG);
	elf->e_ident[EI_CLASS]	= ELF_CLASS;
	elf->e_ident[EI_DATA]	= ELF_DATA;
	elf->e_ident[EI_VERSION]= EV_CURRENT;
	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
	elf->e_type	= ET_CORE;
	elf->e_machine	= ELF_ARCH;
	elf->e_version	= EV_CURRENT;
	elf->e_entry	= 0;
	elf->e_phoff	= sizeof(struct elfhdr);
	elf->e_shoff	= 0;
	elf->e_flags	= 0;
	elf->e_ehsize	= sizeof(struct elfhdr);
	elf->e_phentsize= sizeof(struct elf_phdr);
	elf->e_phnum	= 2 + num_vma;
	elf->e_shentsize= 0;
	elf->e_shnum	= 0;
	elf->e_shstrndx	= 0;

	/* setup ELF PT_NOTE program header */
	nhdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	nhdr->p_type	= PT_NOTE;
	nhdr->p_offset	= 0;
	nhdr->p_vaddr	= 0;
	nhdr->p_paddr	= 0;
	nhdr->p_filesz	= 0;
	nhdr->p_memsz	= 0;
	nhdr->p_flags	= 0;
	nhdr->p_align	= 0;

	/* setup ELF PT_LOAD program header for the 
	 * virtual range 0xc0000000 -> high_memory */
	phdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	phdr->p_type	= PT_LOAD;
	phdr->p_flags	= PF_R|PF_W|PF_X;
	phdr->p_offset	= dataoff;
	phdr->p_vaddr	= PAGE_OFFSET;
	phdr->p_paddr	= __pa(PAGE_OFFSET);
	phdr->p_filesz	= phdr->p_memsz = ((unsigned long)high_memory - PAGE_OFFSET);
	phdr->p_align	= PAGE_SIZE;

#ifndef NO_MM
	/* setup ELF PT_LOAD program header for every vmalloc'd area */
	for (m=vmlist; m; m=m->next) {
		if (m->flags & VM_IOREMAP) /* don't dump ioremap'd stuff! (TA) */
			continue;

		phdr = (struct elf_phdr *) bufp;
		bufp += sizeof(struct elf_phdr);
		offset += sizeof(struct elf_phdr);

		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= (size_t)m->addr - PAGE_OFFSET + dataoff;
		phdr->p_vaddr	= (size_t)m->addr;
		phdr->p_paddr	= __pa(m->addr);
		phdr->p_filesz	= phdr->p_memsz	= m->size;
		phdr->p_align	= PAGE_SIZE;
	}
#endif /* NO_MM */

	/*
	 * Set up the notes in similar form to SVR4 core dumps made
	 * with info from their /proc.
	 */
	nhdr->p_offset	= offset;

	/* set up the process status */
	notes[0].name = "CORE";
	notes[0].type = NT_PRSTATUS;
	notes[0].datasz = sizeof(struct elf_prstatus);
	notes[0].data = &prstatus;

	memset(&prstatus, 0, sizeof(struct elf_prstatus));

	nhdr->p_filesz	= notesize(&notes[0]);
	bufp = storenote(&notes[0], bufp);

	/* set up the process info */
	notes[1].name	= "CORE";
	notes[1].type	= NT_PRPSINFO;
	notes[1].datasz	= sizeof(struct elf_prpsinfo);
	notes[1].data	= &prpsinfo;

	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
	prpsinfo.pr_state	= 0;
	prpsinfo.pr_sname	= 'R';
	prpsinfo.pr_zomb	= 0;

	strcpy(prpsinfo.pr_fname, "vmlinux");
	strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);

	nhdr->p_filesz	= notesize(&notes[1]);
	bufp = storenote(&notes[1], bufp);

	/* set up the task structure */
	notes[2].name	= "CORE";
	notes[2].type	= NT_TASKSTRUCT;
	notes[2].datasz	= sizeof(struct task_struct);
	notes[2].data	= current;

	nhdr->p_filesz	= notesize(&notes[2]);
	bufp = storenote(&notes[2], bufp);

} /* end elf_kcore_store_hdr() */

/*****************************************************************************/
/*
 * read from the ELF header and then kernel memory
 */
static ssize_t read_kcore(struct file *file, char *buffer, size_t buflen, loff_t *fpos)
{
	ssize_t acc = 0;
	unsigned long size, tsz;
	size_t elf_buflen;
	int num_vma;
	unsigned long start;

#ifdef NO_MM
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
#else
	read_lock(&vmlist_lock);
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
	if (buflen == 0 || (unsigned long long)*fpos >= size) {
		read_unlock(&vmlist_lock);
		return 0;
	}
#endif /* NO_MM */

	/* trim buflen to not go beyond EOF */
	if (buflen > size - *fpos)
		buflen = size - *fpos;

	/* construct an ELF core header if we'll need some of it */
	if (*fpos < elf_buflen) {
		char * elf_buf;

		tsz = elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;
		elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
		if (!elf_buf) {
			read_unlock(&vmlist_lock);
			return -ENOMEM;
		}
		memset(elf_buf, 0, elf_buflen);
		elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
		read_unlock(&vmlist_lock);
		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
			kfree(elf_buf);
			return -EFAULT;
		}
		kfree(elf_buf);
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return acc;
	} else
		read_unlock(&vmlist_lock);

	/* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__) || defined(__x86_64__)
	if (*fpos < PAGE_SIZE + elf_buflen) {
		/* work out how much to clear */
		tsz = PAGE_SIZE + elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;

		/* write zeros to buffer */
		if (clear_user(buffer, tsz))
			return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return tsz;
	}
#endif
	
	/*
	 * Fill the remainder of the buffer from kernel VM space.
	 * We said in the ELF header that the data which starts
	 * at 'elf_buflen' is virtual address PAGE_OFFSET. --rmk
	 */
	start = PAGE_OFFSET + (*fpos - elf_buflen);
	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
		tsz = buflen;
	while (buflen) {
		int err; 
	
		if ((start > PAGE_OFFSET) && (start < (unsigned long)high_memory)) {
			if (kern_addr_valid(start)) {
				err = copy_to_user(buffer, (char *)start, tsz);
			} else {
				err = clear_user(buffer, tsz);
			}
		} else {
#ifndef NO_MM
			char * elf_buf;
			struct vm_struct *m;
			unsigned long curstart = start;
			unsigned long cursize = tsz;

			elf_buf = kmalloc(tsz, GFP_KERNEL);
			if (!elf_buf)
				return -ENOMEM;
			memset(elf_buf, 0, tsz);

			read_lock(&vmlist_lock);
			for (m=vmlist; m && cursize; m=m->next) {
				unsigned long vmstart;
				unsigned long vmsize;
				unsigned long msize = m->size - PAGE_SIZE;

				if (((unsigned long)m->addr + msize) < 
								curstart)
					continue;
				if ((unsigned long)m->addr > (curstart + 
								cursize))
					break;
				vmstart = (curstart < (unsigned long)m->addr ? 
					(unsigned long)m->addr : curstart);
				if (((unsigned long)m->addr + msize) > 
							(curstart + cursize))
					vmsize = curstart + cursize - vmstart;
				else
					vmsize = (unsigned long)m->addr + 
							msize - vmstart;
				curstart = vmstart + vmsize;
				cursize -= vmsize;
				/* don't dump ioremap'd stuff! (TA) */
				if (m->flags & VM_IOREMAP)
					continue;
				memcpy(elf_buf + (vmstart - start),
					(char *)vmstart, vmsize);
			}
			read_unlock(&vmlist_lock);
			err = copy_to_user(buffer, elf_buf, tsz); 
				kfree(elf_buf);
#endif /* NO_MM */
			}
		if (err)
					return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;
		start += tsz;
		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
	}

	return acc;
}
long arch_ptrace(struct task_struct *child, long request,
		 unsigned long addr, unsigned long data)
{
	unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
	void __user *addr2p;
	const struct user_regset_view *view;
	struct pt_regs __user *pregs;
	struct fps __user *fps;
	int ret;

	view = task_user_regset_view(current);
	addr2p = (void __user *) addr2;
	pregs = (struct pt_regs __user *) addr;
	fps = (struct fps __user *) addr;

	switch(request) {
	case PTRACE_GETREGS: {
		ret = copy_regset_to_user(child, view, REGSET_GENERAL,
					  32 * sizeof(u32),
					  4 * sizeof(u32),
					  &pregs->psr);
		if (!ret)
			copy_regset_to_user(child, view, REGSET_GENERAL,
					    1 * sizeof(u32),
					    15 * sizeof(u32),
					    &pregs->u_regs[0]);
		break;
	}

	case PTRACE_SETREGS: {
		ret = copy_regset_from_user(child, view, REGSET_GENERAL,
					    32 * sizeof(u32),
					    4 * sizeof(u32),
					    &pregs->psr);
		if (!ret)
			copy_regset_from_user(child, view, REGSET_GENERAL,
					      1 * sizeof(u32),
					      15 * sizeof(u32),
					      &pregs->u_regs[0]);
		break;
	}

	case PTRACE_GETFPREGS: {
		ret = copy_regset_to_user(child, view, REGSET_FP,
					  0 * sizeof(u32),
					  32 * sizeof(u32),
					  &fps->regs[0]);
		if (!ret)
			ret = copy_regset_to_user(child, view, REGSET_FP,
						  33 * sizeof(u32),
						  1 * sizeof(u32),
						  &fps->fsr);

		if (!ret) {
			if (__put_user(0, &fps->fpqd) ||
			    __put_user(0, &fps->flags) ||
			    __put_user(0, &fps->extra) ||
			    clear_user(fps->fpq, sizeof(fps->fpq)))
				ret = -EFAULT;
		}
		break;
	}

	case PTRACE_SETFPREGS: {
		ret = copy_regset_from_user(child, view, REGSET_FP,
					    0 * sizeof(u32),
					    32 * sizeof(u32),
					    &fps->regs[0]);
		if (!ret)
			ret = copy_regset_from_user(child, view, REGSET_FP,
						    33 * sizeof(u32),
						    1 * sizeof(u32),
						    &fps->fsr);
		break;
	}

	case PTRACE_READTEXT:
	case PTRACE_READDATA:
		ret = ptrace_readdata(child, addr, addr2p, data);

		if (ret == data)
			ret = 0;
		else if (ret >= 0)
			ret = -EIO;
		break;

	case PTRACE_WRITETEXT:
	case PTRACE_WRITEDATA:
		ret = ptrace_writedata(child, addr2p, addr, data);

		if (ret == data)
			ret = 0;
		else if (ret >= 0)
			ret = -EIO;
		break;

	default:
		if (request == PTRACE_SPARC_DETACH)
			request = PTRACE_DETACH;
		ret = ptrace_request(child, request, addr, data);
		break;
	}

	return ret;
}
Example #8
0
File: priv.c Project: 59psi/linux
static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	int reg1, reg2;
	unsigned long start, end;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!MACHINE_HAS_PFMF)
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if the host supports it */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
	    S390_lowcore.stfl_fac_list & 0x00020000)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* No support for conditional-SSKE */
	if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
		break;
	case 0x00001000:
		end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
		break;
	/* We dont support EDAT2
	case 0x00002000:
		end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
		break;*/
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}
	while (start < end) {
		unsigned long useraddr;

		useraddr = gmap_translate(start, vcpu->arch.gmap);
		if (IS_ERR((void *)useraddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (clear_user((void __user *)useraddr, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
			if (set_guest_storage_key(current->mm, useraddr,
					vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
					vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
		vcpu->run->s.regs.gprs[reg2] = end;
	return 0;
}
/*
 * map a binary by direct mmap() of the individual PT_LOAD segments
 */
static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
					     struct file *file,
					     struct mm_struct *mm)
{
	struct elf32_fdpic_loadseg *seg;
	struct elf32_phdr *phdr;
	unsigned long load_addr, delta_vaddr;
	int loop, dvset;

	load_addr = params->load_addr;
	delta_vaddr = 0;
	dvset = 0;

	seg = params->loadmap->segs;

	/* deal with each load segment separately */
	phdr = params->phdrs;
	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
		unsigned long maddr, disp, excess, excess1;
		int prot = 0, flags;

		if (phdr->p_type != PT_LOAD)
			continue;

		kdebug("[LOAD] va=%lx of=%lx fs=%lx ms=%lx",
		       (unsigned long) phdr->p_vaddr,
		       (unsigned long) phdr->p_offset,
		       (unsigned long) phdr->p_filesz,
		       (unsigned long) phdr->p_memsz);

		/* determine the mapping parameters */
		if (phdr->p_flags & PF_R) prot |= PROT_READ;
		if (phdr->p_flags & PF_W) prot |= PROT_WRITE;
		if (phdr->p_flags & PF_X) prot |= PROT_EXEC;

		flags = MAP_PRIVATE | MAP_DENYWRITE;
		if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
			flags |= MAP_EXECUTABLE;

		maddr = 0;

		switch (params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) {
		case ELF_FDPIC_FLAG_INDEPENDENT:
			/* PT_LOADs are independently locatable */
			break;

		case ELF_FDPIC_FLAG_HONOURVADDR:
			/* the specified virtual address must be honoured */
			maddr = phdr->p_vaddr;
			flags |= MAP_FIXED;
			break;

		case ELF_FDPIC_FLAG_CONSTDISP:
			/* constant displacement
			 * - can be mapped anywhere, but must be mapped as a unit
			 */
			if (!dvset) {
				maddr = load_addr;
				delta_vaddr = phdr->p_vaddr;
				dvset = 1;
			}
			else {
				maddr = load_addr + phdr->p_vaddr - delta_vaddr;
				flags |= MAP_FIXED;
			}
			break;

		case ELF_FDPIC_FLAG_CONTIGUOUS:
			/* contiguity handled later */
			break;

		default:
			BUG();
		}

		maddr &= PAGE_MASK;

		/* create the mapping */
		disp = phdr->p_vaddr & ~PAGE_MASK;
		down_write(&mm->mmap_sem);
		maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags,
				phdr->p_offset - disp);
		up_write(&mm->mmap_sem);

		kdebug("mmap[%d] <file> sz=%lx pr=%x fl=%x of=%lx --> %08lx",
		       loop, phdr->p_memsz + disp, prot, flags, phdr->p_offset - disp,
		       maddr);

		if (IS_ERR((void *) maddr))
			return (int) maddr;

		if ((params->flags & ELF_FDPIC_FLAG_ARRANGEMENT) == ELF_FDPIC_FLAG_CONTIGUOUS)
			load_addr += PAGE_ALIGN(phdr->p_memsz + disp);

		seg->addr = maddr + disp;
		seg->p_vaddr = phdr->p_vaddr;
		seg->p_memsz = phdr->p_memsz;

		/* map the ELF header address if in this segment */
		if (phdr->p_offset == 0)
			params->elfhdr_addr = seg->addr;

		/* clear the bit between beginning of mapping and beginning of PT_LOAD */
		if (prot & PROT_WRITE && disp > 0) {
			kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp);
			clear_user((void *) maddr, disp);
			maddr += disp;
		}

		/* clear any space allocated but not loaded
		 * - on uClinux we can just clear the lot
		 * - on MMU linux we'll get a SIGBUS beyond the last page
		 *   extant in the file
		 */
		excess = phdr->p_memsz - phdr->p_filesz;
		excess1 = PAGE_SIZE - ((maddr + phdr->p_filesz) & ~PAGE_MASK);

#ifdef CONFIG_MMU

		if (excess > excess1) {
			unsigned long xaddr = maddr + phdr->p_filesz + excess1;
			unsigned long xmaddr;

			flags |= MAP_FIXED | MAP_ANONYMOUS;
			down_write(&mm->mmap_sem);
			xmaddr = do_mmap(NULL, xaddr, excess - excess1, prot, flags, 0);
			up_write(&mm->mmap_sem);

			kdebug("mmap[%d] <anon>"
			       " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx",
			       loop, xaddr, excess - excess1, prot, flags, xmaddr);

			if (xmaddr != xaddr)
				return -ENOMEM;
		}

		if (prot & PROT_WRITE && excess1 > 0) {
			kdebug("clear[%d] ad=%lx sz=%lx",
			       loop, maddr + phdr->p_filesz, excess1);
			clear_user((void *) maddr + phdr->p_filesz, excess1);
		}

#else
		if (excess > 0) {
			kdebug("clear[%d] ad=%lx sz=%lx",
			       loop, maddr + phdr->p_filesz, excess);
			clear_user((void *) maddr + phdr->p_filesz, excess);
		}
#endif

		if (mm) {
			if (phdr->p_flags & PF_X) {
				mm->start_code = maddr;
				mm->end_code = maddr + phdr->p_memsz;
			}
			else if (!mm->start_data) {
				mm->start_data = maddr;
				mm->end_data = maddr + phdr->p_memsz;
			}
		}

		seg++;
	}

	return 0;
} /* end elf_fdpic_map_file_by_direct_mmap() */
static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *params,
						   struct file *file,
						   struct mm_struct *mm)
{
	struct elf32_fdpic_loadseg *seg;
	struct elf32_phdr *phdr;
	unsigned long load_addr, base = ULONG_MAX, top = 0, maddr = 0, mflags;
	loff_t fpos;
	int loop, ret;

	load_addr = params->load_addr;
	seg = params->loadmap->segs;

	/* determine the bounds of the contiguous overall allocation we must make */
	phdr = params->phdrs;
	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
		if (params->phdrs[loop].p_type != PT_LOAD)
			continue;

		if (base > phdr->p_vaddr)
			base = phdr->p_vaddr;
		if (top < phdr->p_vaddr + phdr->p_memsz)
			top = phdr->p_vaddr + phdr->p_memsz;
	}

	/* allocate one big anon block for everything */
	mflags = MAP_PRIVATE;
	if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE)
		mflags |= MAP_EXECUTABLE;

	down_write(&mm->mmap_sem);
	maddr = do_mmap(NULL, load_addr, top - base,
			PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0);
	up_write(&mm->mmap_sem);
	if (IS_ERR((void *) maddr))
		return (int) maddr;

	if (load_addr != 0)
		load_addr += PAGE_ALIGN(top - base);

	/* and then load the file segments into it */
	phdr = params->phdrs;
	for (loop = 0; loop < params->hdr.e_phnum; loop++, phdr++) {
		if (params->phdrs[loop].p_type != PT_LOAD)
			continue;

		fpos = phdr->p_offset;

		seg->addr = maddr + (phdr->p_vaddr - base);
		seg->p_vaddr = phdr->p_vaddr;
		seg->p_memsz = phdr->p_memsz;

		ret = file->f_op->read(file, (void *) seg->addr, phdr->p_filesz, &fpos);
		if (ret < 0)
			return ret;

		/* map the ELF header address if in this segment */
		if (phdr->p_offset == 0)
			params->elfhdr_addr = seg->addr;

		/* clear any space allocated but not loaded */
		if (phdr->p_filesz < phdr->p_memsz)
			clear_user((void *) (seg->addr + phdr->p_filesz),
				   phdr->p_memsz - phdr->p_filesz);

		if (mm) {
			if (phdr->p_flags & PF_X) {
				mm->start_code = seg->addr;
				mm->end_code = seg->addr + phdr->p_memsz;
			}
			else if (!mm->start_data) {
				mm->start_data = seg->addr;
#ifndef CONFIG_MMU
				mm->end_data = seg->addr + phdr->p_memsz;
#endif
			}

#ifdef CONFIG_MMU
			if (seg->addr + phdr->p_memsz > mm->end_data)
				mm->end_data = seg->addr + phdr->p_memsz;
#endif
		}

		seg++;
	}

	return 0;
} /* end elf_fdpic_map_file_constdisp_on_uclinux() */
Example #11
0
static long compat_fimg2d_ioctl32(struct file *file, unsigned int cmd,
							unsigned long arg)
{
	switch (cmd) {
	case COMPAT_FIMG2D_BITBLT_BLIT:
	{
		struct compat_fimg2d_blit __user *data32;
		struct fimg2d_blit __user *data;
		struct mm_struct *mm;
		enum blit_op op;
		enum blit_sync sync;
		enum fimg2d_qos_level qos_lv;
		compat_uint_t seq_no;
		unsigned long stack_cursor = 0;
		int err;

		mm = get_task_mm(current);
		if (!mm) {
			fimg2d_err("no mm for ctx\n");
			return -ENXIO;
		}

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data) {
			fimg2d_err("failed to allocate user compat space\n");
			mmput(mm);
			return -ENOMEM;
		}

		stack_cursor += sizeof(*data);
		if (clear_user(data, sizeof(*data))) {
			fimg2d_err("failed to access to userspace\n");
			mmput(mm);
			return -EPERM;
		}

		err = get_user(op, &data32->op);
		err |= put_user(op, &data->op);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = compat_get_fimg2d_param(&data->param, &data32->param);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		if (data32->src) {
			data->src = compat_alloc_user_space(sizeof(*data->src) +
								stack_cursor);
			if (!data->src) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->src);
			err = compat_get_fimg2d_image(data->src, data32->src);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->msk) {
			data->msk = compat_alloc_user_space(sizeof(*data->msk) +
								stack_cursor);
			if (!data->msk) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->msk);
			err = compat_get_fimg2d_image(data->msk, data32->msk);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->tmp) {
			data->tmp = compat_alloc_user_space(sizeof(*data->tmp) +
								stack_cursor);
			if (!data->tmp) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->tmp);
			err = compat_get_fimg2d_image(data->tmp, data32->tmp);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		if (data32->dst) {
			data->dst = compat_alloc_user_space(sizeof(*data->dst) +
								stack_cursor);
			if (!data->dst) {
				fimg2d_err("failed to allocate user compat space\n");
				mmput(mm);
				return -ENOMEM;
			}

			stack_cursor += sizeof(*data->dst);
			err = compat_get_fimg2d_image(data->dst, data32->dst);
			if (err) {
				fimg2d_err("failed to get compat data\n");
				mmput(mm);
				return err;
			}
		}

		err = get_user(sync, &data32->sync);
		err |= put_user(sync, &data->sync);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = get_user(seq_no, &data32->seq_no);
		err |= put_user(seq_no, &data->seq_no);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = get_user(qos_lv, &data32->qos_lv);
		err |= put_user(qos_lv, &data->qos_lv);
		if (err) {
			fimg2d_err("failed to get compat data\n");
			mmput(mm);
			return err;
		}

		err = file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_BLIT, (unsigned long)data);
		mmput(mm);
		return err;
	}
	case COMPAT_FIMG2D_BITBLT_VERSION:
	{
		struct compat_fimg2d_version __user *data32;
		struct fimg2d_version __user *data;
		compat_uint_t i;
		int err;

		data32 = compat_ptr(arg);
		data = compat_alloc_user_space(sizeof(*data));
		if (!data) {
			fimg2d_err("failed to allocate user compat space\n");
			return -ENOMEM;
		}

		err = get_user(i, &data32->hw);
		err |= put_user(i, &data->hw);
		err |= get_user(i, &data32->sw);
		err |= put_user(i, &data->sw);

		if (err)
			return err;

		return file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_VERSION, (unsigned long)data);
	}
	case FIMG2D_BITBLT_ACTIVATE:
	{
		return file->f_op->unlocked_ioctl(file,
				FIMG2D_BITBLT_ACTIVATE, arg);
	}
	default:
		fimg2d_err("unknown ioctl\n");
		return -EINVAL;
	}
}
Example #12
0
unsigned long
__clear_user(void __user *to, unsigned long n)
{
	return clear_user(to, n);
}