Example #1
0
/*
 * This funcion reads the *physical* memory. The f_pos points directly to the 
 * memory location. 
 */
static ssize_t read_mem(struct file * file, char __user * buf,
			size_t count, loff_t *ppos)
{
	unsigned long p = *ppos;
	ssize_t read;

	if (!valid_phys_addr_range(p, &count))
		return -EFAULT;
	read = 0;
#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
	/* we don't have page 0 mapped on sparc and m68k.. */
	if (p < PAGE_SIZE) {
		unsigned long sz = PAGE_SIZE-p;
		if (sz > count) 
			sz = count; 
		if (sz > 0) {
			if (clear_user(buf, sz))
				return -EFAULT;
			buf += sz; 
			p += sz; 
			count -= sz; 
			read += sz; 
		}
	}
#endif
	if (copy_to_user(buf, __va(p), count))
		return -EFAULT;
	read += count;
	*ppos += read;
	return read;
}
Example #2
0
/*
 * This function reads the *virtual* memory as seen by the kernel.
 */
static ssize_t read_kmem(struct file *file, char __user *buf, 
			 size_t count, loff_t *ppos)
{
	unsigned long p = *ppos;
	ssize_t read = 0;
	ssize_t virtr = 0;
	char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
		
	if (p < (unsigned long) high_memory) {
		read = count;
		if (count > (unsigned long) high_memory - p)
			read = (unsigned long) high_memory - p;

#if defined(__sparc__) || (defined(__mc68000__) && defined(CONFIG_MMU))
		/* we don't have page 0 mapped on sparc and m68k.. */
		if (p < PAGE_SIZE && read > 0) {
			size_t tmp = PAGE_SIZE - p;
			if (tmp > read) tmp = read;
			if (clear_user(buf, tmp))
				return -EFAULT;
			buf += tmp;
			p += tmp;
			read -= tmp;
			count -= tmp;
		}
#endif
		if (copy_to_user(buf, (char *)p, read))
			return -EFAULT;
		p += read;
		buf += read;
		count -= read;
	}

	if (count > 0) {
		kbuf = (char *)__get_free_page(GFP_KERNEL);
		if (!kbuf)
			return -ENOMEM;
		while (count > 0) {
			int len = count;

			if (len > PAGE_SIZE)
				len = PAGE_SIZE;
			len = vread(kbuf, (char *)p, len);
			if (!len)
				break;
			if (copy_to_user(buf, kbuf, len)) {
				free_page((unsigned long)kbuf);
				return -EFAULT;
			}
			count -= len;
			buf += len;
			virtr += len;
			p += len;
		}
		free_page((unsigned long)kbuf);
	}
 	*ppos = p;
 	return virtr + read;
}
Example #3
0
static ssize_t read_zero(struct file * file, char __user * buf, 
			 size_t count, loff_t *ppos)
{
	unsigned long left, unwritten, written = 0;

	if (!count)
		return 0;

	if (!access_ok(VERIFY_WRITE, buf, count))
		return -EFAULT;

	left = count;

	/* do we want to be clever? Arbitrary cut-off */
	if (count >= PAGE_SIZE*4) {
		unsigned long partial;

		/* How much left of the page? */
		partial = (PAGE_SIZE-1) & -(unsigned long) buf;
		unwritten = clear_user(buf, partial);
		written = partial - unwritten;
		if (unwritten)
			goto out;
		left -= partial;
		buf += partial;
		unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
		written += (left & PAGE_MASK) - unwritten;
		if (unwritten)
			goto out;
		buf += left & PAGE_MASK;
		left &= ~PAGE_MASK;
	}
	unwritten = clear_user(buf, left);
	written += left - unwritten;
out:
	return written ? written : -EFAULT;
}
Example #4
0
/*
 * For fun, we are using the MMU for this.
 */
static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
{
	struct mm_struct *mm;
	struct vm_area_struct * vma;
	unsigned long addr=(unsigned long)buf;

	mm = current->mm;
	/* Oops, this was forgotten before. -ben */
	down_read(&mm->mmap_sem);

	/* For private mappings, just map in zero pages. */
	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
		unsigned long count;

		if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
			goto out_up;
		if (vma->vm_flags & VM_SHARED)
			break;
		count = vma->vm_end - addr;
		if (count > size)
			count = size;

		zap_page_range(vma, addr, count, NULL);
        	zeromap_page_range(vma, addr, count, PAGE_COPY);

		size -= count;
		buf += count;
		addr += count;
		if (size == 0)
			goto out_up;
	}

	up_read(&mm->mmap_sem);
	
	/* The shared case is hard. Let's do the conventional zeroing. */ 
	do {
		unsigned long unwritten = clear_user(buf, PAGE_SIZE);
		if (unwritten)
			return size + unwritten - PAGE_SIZE;
		cond_resched();
		buf += PAGE_SIZE;
		size -= PAGE_SIZE;
	} while (size);

	return size;
out_up:
	up_read(&mm->mmap_sem);
	return size;
}
Example #5
0
		/* How much left of the page? */
		partial = (PAGE_SIZE-1) & -(unsigned long) buf;
		unwritten = clear_user(buf, partial);
		written = partial - unwritten;
		if (unwritten)
			goto out;
		left -= partial;
		buf += partial;
		unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
		written += (left & PAGE_MASK) - unwritten;
		if (unwritten)
			goto out;
		buf += left & PAGE_MASK;
		left &= ~PAGE_MASK;
	}
	unwritten = clear_user(buf, left);
	written += left - unwritten;
out:
	return written ? written : -EFAULT;
}

static int mmap_zero(struct file * file, struct vm_area_struct * vma)
{
	if (vma->vm_flags & VM_SHARED)
		return shmem_zero_setup(vma);
	if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
		return -EAGAIN;
	return 0;
}
#else /* CONFIG_MMU */
static ssize_t read_zero(struct file * file, char * buf, 
			 size_t count, loff_t *ppos)
{
	size_t todo = count;

	while (todo) {
		size_t chunk = todo;

		if (chunk > 4096)
			chunk = 4096;	/* Just for latency reasons */
		if (clear_user(buf, chunk))
			return -EFAULT;
		buf += chunk;
		todo -= chunk;
		cond_resched();
	}
	return count;
}
Example #6
0
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
{
	drm32_version_t __user *uversion = (drm32_version_t __user *)arg;
	drm_version_t __user *p = compat_alloc_user_space(sizeof(*p));
	compat_uptr_t addr;
	int n;
	int ret;

	if (clear_user(p, 3 * sizeof(int)) ||
	    get_user(n, &uversion->name_len) ||
	    put_user(n, &p->name_len) ||
	    get_user(addr, &uversion->name) ||
	    put_user(compat_ptr(addr), &p->name) ||
	    get_user(n, &uversion->date_len) ||
	    put_user(n, &p->date_len) ||
	    get_user(addr, &uversion->date) ||
	    put_user(compat_ptr(addr), &p->date) ||
	    get_user(n, &uversion->desc_len) ||
	    put_user(n, &p->desc_len) ||
	    get_user(addr, &uversion->desc) ||
	    put_user(compat_ptr(addr), &p->desc))
		return -EFAULT;

        ret = sys_ioctl(fd, DRM_IOCTL_VERSION, (unsigned long)p);
	if (ret)
		return ret;

	if (copy_in_user(uversion, p, 3 * sizeof(int)) ||
	    get_user(n, &p->name_len) ||
	    put_user(n, &uversion->name_len) ||
	    get_user(n, &p->date_len) ||
	    put_user(n, &uversion->date_len) ||
	    get_user(n, &p->desc_len) ||
	    put_user(n, &uversion->desc_len))
		return -EFAULT;

	return 0;
}
static ssize_t read_kcore(struct file *file, char *buf, size_t count, loff_t *ppos)
{
	unsigned long long p = *ppos, memsize;
	ssize_t read;
	ssize_t count1;
	char * pnt;
	struct user dump;
#if defined (__i386__) || defined (__mc68000__) || defined(__x86_64__)
#	define FIRST_MAPPED	PAGE_SIZE	/* we don't have page 0 mapped on x86.. */
#else
#	define FIRST_MAPPED	0
#endif

	memset(&dump, 0, sizeof(struct user));
	dump.magic = CMAGIC;
	dump.u_dsize = (virt_to_phys(high_memory) >> PAGE_SHIFT);
#if defined (__i386__) || defined(__x86_64__)
	dump.start_code = PAGE_OFFSET;
#endif
#ifdef __alpha__
	dump.start_data = PAGE_OFFSET;
#endif

	memsize = virt_to_phys(high_memory);
	if (p >= memsize)
		return 0;
	if (count > memsize - p)
		count = memsize - p;
	read = 0;

	if (p < sizeof(struct user) && count > 0) {
		count1 = count;
		if (p + count1 > sizeof(struct user))
			count1 = sizeof(struct user)-p;
		pnt = (char *) &dump + p;
		if (copy_to_user(buf,(void *) pnt, count1))
			return -EFAULT;
		buf += count1;
		p += count1;
		count -= count1;
		read += count1;
	}

	if (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
		count1 = PAGE_SIZE + FIRST_MAPPED - p;
		if (count1 > count)
			count1 = count;
		if (clear_user(buf, count1))
			return -EFAULT;
		buf += count1;
		p += count1;
		count -= count1;
		read += count1;
	}
	if (count > 0) {
		if (copy_to_user(buf, (void *) (unsigned long) (PAGE_OFFSET+p-PAGE_SIZE), count))
			return -EFAULT;
		read += count;
		p += count;
	}
	*ppos = p;
	return read;
}
static unsigned long get_kcore_size(int *num_vma, size_t *elf_buflen)
{
	unsigned long size;
#ifndef NO_MM
	unsigned long try;
	struct vm_struct *m;
#endif

	*num_vma = 0;
	size = ((size_t)high_memory - PAGE_OFFSET + PAGE_SIZE);
#ifdef NO_MM
	/* vmlist is not available then */
	*elf_buflen = PAGE_SIZE;
	return size;
#else
	if (!vmlist) {
		*elf_buflen = PAGE_SIZE;
		return (size);
	}

	for (m=vmlist; m; m=m->next) {
		try = (unsigned long)m->addr + m->size;
		if (try > size)
			size = try;
		*num_vma = *num_vma + 1;
	}
	*elf_buflen =	sizeof(struct elfhdr) + 
			(*num_vma + 2)*sizeof(struct elf_phdr) + 
			3 * (sizeof(struct elf_note) + 4) +
			sizeof(struct elf_prstatus) +
			sizeof(struct elf_prpsinfo) +
			sizeof(struct task_struct);
	*elf_buflen = PAGE_ALIGN(*elf_buflen);
	return (size - PAGE_OFFSET + *elf_buflen);
#endif
}


/*****************************************************************************/
/*
 * determine size of ELF note
 */
static int notesize(struct memelfnote *en)
{
	int sz;

	sz = sizeof(struct elf_note);
	sz += roundup(strlen(en->name), 4);
	sz += roundup(en->datasz, 4);

	return sz;
} /* end notesize() */

/*****************************************************************************/
/*
 * store a note in the header buffer
 */
static char *storenote(struct memelfnote *men, char *bufp)
{
	struct elf_note en;

#define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)

	en.n_namesz = strlen(men->name);
	en.n_descsz = men->datasz;
	en.n_type = men->type;

	DUMP_WRITE(&en, sizeof(en));
	DUMP_WRITE(men->name, en.n_namesz);

	/* XXX - cast from long long to long to avoid need for libgcc.a */
	bufp = (char*) roundup((unsigned long)bufp,4);
	DUMP_WRITE(men->data, men->datasz);
	bufp = (char*) roundup((unsigned long)bufp,4);

#undef DUMP_WRITE

	return bufp;
} /* end storenote() */

/*
 * store an ELF coredump header in the supplied buffer
 * num_vma is the number of elements in vmlist
 */
static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
{
	struct elf_prstatus prstatus;	/* NT_PRSTATUS */
	struct elf_prpsinfo prpsinfo;	/* NT_PRPSINFO */
	struct elf_phdr *nhdr, *phdr;
	struct elfhdr *elf;
	struct memelfnote notes[3];
	off_t offset = 0;
#ifndef NO_MM
	struct vm_struct *m;
#endif

	/* setup ELF header */
	elf = (struct elfhdr *) bufp;
	bufp += sizeof(struct elfhdr);
	offset += sizeof(struct elfhdr);
	memcpy(elf->e_ident, ELFMAG, SELFMAG);
	elf->e_ident[EI_CLASS]	= ELF_CLASS;
	elf->e_ident[EI_DATA]	= ELF_DATA;
	elf->e_ident[EI_VERSION]= EV_CURRENT;
	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
	elf->e_type	= ET_CORE;
	elf->e_machine	= ELF_ARCH;
	elf->e_version	= EV_CURRENT;
	elf->e_entry	= 0;
	elf->e_phoff	= sizeof(struct elfhdr);
	elf->e_shoff	= 0;
	elf->e_flags	= 0;
	elf->e_ehsize	= sizeof(struct elfhdr);
	elf->e_phentsize= sizeof(struct elf_phdr);
	elf->e_phnum	= 2 + num_vma;
	elf->e_shentsize= 0;
	elf->e_shnum	= 0;
	elf->e_shstrndx	= 0;

	/* setup ELF PT_NOTE program header */
	nhdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	nhdr->p_type	= PT_NOTE;
	nhdr->p_offset	= 0;
	nhdr->p_vaddr	= 0;
	nhdr->p_paddr	= 0;
	nhdr->p_filesz	= 0;
	nhdr->p_memsz	= 0;
	nhdr->p_flags	= 0;
	nhdr->p_align	= 0;

	/* setup ELF PT_LOAD program header for the 
	 * virtual range 0xc0000000 -> high_memory */
	phdr = (struct elf_phdr *) bufp;
	bufp += sizeof(struct elf_phdr);
	offset += sizeof(struct elf_phdr);
	phdr->p_type	= PT_LOAD;
	phdr->p_flags	= PF_R|PF_W|PF_X;
	phdr->p_offset	= dataoff;
	phdr->p_vaddr	= PAGE_OFFSET;
	phdr->p_paddr	= __pa(PAGE_OFFSET);
	phdr->p_filesz	= phdr->p_memsz = ((unsigned long)high_memory - PAGE_OFFSET);
	phdr->p_align	= PAGE_SIZE;

#ifndef NO_MM
	/* setup ELF PT_LOAD program header for every vmalloc'd area */
	for (m=vmlist; m; m=m->next) {
		if (m->flags & VM_IOREMAP) /* don't dump ioremap'd stuff! (TA) */
			continue;

		phdr = (struct elf_phdr *) bufp;
		bufp += sizeof(struct elf_phdr);
		offset += sizeof(struct elf_phdr);

		phdr->p_type	= PT_LOAD;
		phdr->p_flags	= PF_R|PF_W|PF_X;
		phdr->p_offset	= (size_t)m->addr - PAGE_OFFSET + dataoff;
		phdr->p_vaddr	= (size_t)m->addr;
		phdr->p_paddr	= __pa(m->addr);
		phdr->p_filesz	= phdr->p_memsz	= m->size;
		phdr->p_align	= PAGE_SIZE;
	}
#endif /* NO_MM */

	/*
	 * Set up the notes in similar form to SVR4 core dumps made
	 * with info from their /proc.
	 */
	nhdr->p_offset	= offset;

	/* set up the process status */
	notes[0].name = "CORE";
	notes[0].type = NT_PRSTATUS;
	notes[0].datasz = sizeof(struct elf_prstatus);
	notes[0].data = &prstatus;

	memset(&prstatus, 0, sizeof(struct elf_prstatus));

	nhdr->p_filesz += notesize(&notes[0]);
	bufp = storenote(&notes[0], bufp);

	/* set up the process info */
	notes[1].name	= "CORE";
	notes[1].type	= NT_PRPSINFO;
	notes[1].datasz	= sizeof(struct elf_prpsinfo);
	notes[1].data	= &prpsinfo;

	memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
	prpsinfo.pr_state	= 0;
	prpsinfo.pr_sname	= 'R';
	prpsinfo.pr_zomb	= 0;

	strcpy(prpsinfo.pr_fname, "vmlinux");
	strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);

	nhdr->p_filesz += notesize(&notes[1]);
	bufp = storenote(&notes[1], bufp);

	/* set up the task structure */
	notes[2].name	= "CORE";
	notes[2].type	= NT_TASKSTRUCT;
	notes[2].datasz	= sizeof(struct task_struct);
	notes[2].data	= current;

	nhdr->p_filesz += notesize(&notes[2]);
	bufp = storenote(&notes[2], bufp);

} /* end elf_kcore_store_hdr() */

/*****************************************************************************/
/*
 * read from the ELF header and then kernel memory
 */
static ssize_t read_kcore(struct file *file, char *buffer, size_t buflen, loff_t *fpos)
{
	ssize_t acc = 0;
	unsigned long size, tsz;
	size_t elf_buflen;
	int num_vma;
	unsigned long start;

#ifdef NO_MM
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
#else
	read_lock(&vmlist_lock);
	proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
	if (buflen == 0 || (unsigned long long)*fpos >= size) {
		read_unlock(&vmlist_lock);
		return 0;
	}
#endif /* NO_MM */

	/* trim buflen to not go beyond EOF */
	if (buflen > size - *fpos)
		buflen = size - *fpos;

	/* construct an ELF core header if we'll need some of it */
	if (*fpos < elf_buflen) {
		char * elf_buf;

		tsz = elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;
		elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
		if (!elf_buf) {
#ifndef NO_MM
			read_unlock(&vmlist_lock);
#endif
			return -ENOMEM;
		}
		memset(elf_buf, 0, elf_buflen);
		elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
#ifndef NO_MM
		read_unlock(&vmlist_lock);
#endif
		if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
			kfree(elf_buf);
			return -EFAULT;
		}
		kfree(elf_buf);
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return acc;
	} else {
#ifndef NO_MM
		read_unlock(&vmlist_lock);
#endif
	}

	/* where page 0 not mapped, write zeros into buffer */
#if defined (__i386__) || defined (__mc68000__) || defined(__x86_64__)
	if (*fpos < PAGE_SIZE + elf_buflen) {
		/* work out how much to clear */
		tsz = PAGE_SIZE + elf_buflen - *fpos;
		if (buflen < tsz)
			tsz = buflen;

		/* write zeros to buffer */
		if (clear_user(buffer, tsz))
			return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;

		/* leave now if filled buffer already */
		if (buflen == 0)
			return tsz;
	}
#endif
	
	/*
	 * Fill the remainder of the buffer from kernel VM space.
	 * We said in the ELF header that the data which starts
	 * at 'elf_buflen' is virtual address PAGE_OFFSET. --rmk
	 */
	start = PAGE_OFFSET + (*fpos - elf_buflen);
	if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
		tsz = buflen;
	while (buflen) {
		int err = 0; 
	
		if ((start > PAGE_OFFSET) && (start < (unsigned long)high_memory)) {
			if (kern_addr_valid(start)) {
				err = copy_to_user(buffer, (char *)start, tsz);
			} else {
				err = clear_user(buffer, tsz);
			}
		} else {
#ifndef NO_MM
			char * elf_buf;
			struct vm_struct *m;
			unsigned long curstart = start;
			unsigned long cursize = tsz;

			elf_buf = kmalloc(tsz, GFP_KERNEL);
			if (!elf_buf)
				return -ENOMEM;
			memset(elf_buf, 0, tsz);

			read_lock(&vmlist_lock);
			for (m=vmlist; m && cursize; m=m->next) {
				unsigned long vmstart;
				unsigned long vmsize;
				unsigned long msize = m->size - PAGE_SIZE;

				if (((unsigned long)m->addr + msize) < 
								curstart)
					continue;
				if ((unsigned long)m->addr > (curstart + 
								cursize))
					break;
				vmstart = (curstart < (unsigned long)m->addr ? 
					(unsigned long)m->addr : curstart);
				if (((unsigned long)m->addr + msize) > 
							(curstart + cursize))
					vmsize = curstart + cursize - vmstart;
				else
					vmsize = (unsigned long)m->addr + 
							msize - vmstart;
				curstart = vmstart + vmsize;
				cursize -= vmsize;
				/* don't dump ioremap'd stuff! (TA) */
				if (m->flags & VM_IOREMAP)
					continue;
				memcpy(elf_buf + (vmstart - start),
					(char *)vmstart, vmsize);
			}
			read_unlock(&vmlist_lock);
			err = copy_to_user(buffer, elf_buf, tsz); 
				kfree(elf_buf);
#endif /* NO_MM */
			}
		if (err)
					return -EFAULT;
		buflen -= tsz;
		*fpos += tsz;
		buffer += tsz;
		acc += tsz;
		start += tsz;
		tsz = (buflen > PAGE_SIZE ? PAGE_SIZE : buflen);
	}

	return acc;
}
static int CVE_2010_0307_linux2_6_27_31_load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
{
	struct file *interpreter = NULL; /* to shut gcc up */
 	unsigned long load_addr = 0, load_bias = 0;
	int load_addr_set = 0;
	char * elf_interpreter = NULL;
	unsigned long error;
	struct elf_phdr *elf_ppnt, *elf_phdata;
	unsigned long elf_bss, elf_brk;
	int elf_exec_fileno;
	int retval, i;
	unsigned int size;
	unsigned long elf_entry;
	unsigned long interp_load_addr = 0;
	unsigned long start_code, end_code, start_data, end_data;
	unsigned long reloc_func_desc = 0;
	int executable_stack = EXSTACK_DEFAULT;
	unsigned long def_flags = 0;
	struct {
		struct elfhdr elf_ex;
		struct elfhdr interp_elf_ex;
	} *loc;

	loc = kmalloc(sizeof(*loc), GFP_KERNEL);
	if (!loc) {
		retval = -ENOMEM;
		goto out_ret;
	}
	
	/* Get the exec-header */
	loc->elf_ex = *((struct elfhdr *)bprm->buf);

	retval = -ENOEXEC;
	/* First of all, some simple consistency checks */
	if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
		goto out;

	if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
		goto out;
	if (!elf_check_arch(&loc->elf_ex))
		goto out;
	if (!bprm->file->f_op||!bprm->file->f_op->mmap)
		goto out;

	/* Now read in all of the header information */
	if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
		goto out;
	if (loc->elf_ex.e_phnum < 1 ||
	 	loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
		goto out;
	size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
	retval = -ENOMEM;
	elf_phdata = kmalloc(size, GFP_KERNEL);
	if (!elf_phdata)
		goto out;

	retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
			     (char *)elf_phdata, size);
	if (retval != size) {
		if (retval >= 0)
			retval = -EIO;
		goto out_free_ph;
	}

	retval = get_unused_fd();
	if (retval < 0)
		goto out_free_ph;
	get_file(bprm->file);
	fd_install(elf_exec_fileno = retval, bprm->file);

	elf_ppnt = elf_phdata;
	elf_bss = 0;
	elf_brk = 0;

	start_code = ~0UL;
	end_code = 0;
	start_data = 0;
	end_data = 0;

	for (i = 0; i < loc->elf_ex.e_phnum; i++) {
		if (elf_ppnt->p_type == PT_INTERP) {
			/* This is the program interpreter used for
			 * shared libraries - for now assume that this
			 * is an a.out format binary
			 */
			retval = -ENOEXEC;
			if (elf_ppnt->p_filesz > PATH_MAX || 
			    elf_ppnt->p_filesz < 2)
				goto out_free_file;

			retval = -ENOMEM;
			elf_interpreter = kmalloc(elf_ppnt->p_filesz,
						  GFP_KERNEL);
			if (!elf_interpreter)
				goto out_free_file;

			retval = kernel_read(bprm->file, elf_ppnt->p_offset,
					     elf_interpreter,
					     elf_ppnt->p_filesz);
			if (retval != elf_ppnt->p_filesz) {
				if (retval >= 0)
					retval = -EIO;
				goto out_free_interp;
			}
			/* make sure path is NULL terminated */
			retval = -ENOEXEC;
			if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
				goto out_free_interp;

			/*
			 * The early SET_PERSONALITY here is so that the lookup
			 * for the interpreter happens in the namespace of the 
			 * to-be-execed image.  SET_PERSONALITY can select an
			 * alternate root.
			 *
			 * However, SET_PERSONALITY is NOT allowed to switch
			 * this task into the new images's memory mapping
			 * policy - that is, TASK_SIZE must still evaluate to
			 * that which is appropriate to the execing application.
			 * This is because exit_mmap() needs to have TASK_SIZE
			 * evaluate to the size of the old image.
			 *
			 * So if (say) a 64-bit application is execing a 32-bit
			 * application it is the architecture's responsibility
			 * to defer changing the value of TASK_SIZE until the
			 * switch really is going to happen - do this in
			 * flush_thread().	- akpm
			 */
			SET_PERSONALITY(loc->elf_ex, 0);

			interpreter = open_exec(elf_interpreter);
			retval = PTR_ERR(interpreter);
			if (IS_ERR(interpreter))
				goto out_free_interp;

			/*
			 * If the binary is not readable then enforce
			 * mm->dumpable = 0 regardless of the interpreter's
			 * permissions.
			 */
			if (file_permission(interpreter, MAY_READ) < 0)
				bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;

			retval = kernel_read(interpreter, 0, bprm->buf,
					     BINPRM_BUF_SIZE);
			if (retval != BINPRM_BUF_SIZE) {
				if (retval >= 0)
					retval = -EIO;
				goto out_free_dentry;
			}

			/* Get the exec headers */
			loc->interp_elf_ex = *((struct elfhdr *)bprm->buf);
			break;
		}
		elf_ppnt++;
	}

	elf_ppnt = elf_phdata;
	for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
		if (elf_ppnt->p_type == PT_GNU_STACK) {
			if (elf_ppnt->p_flags & PF_X)
				executable_stack = EXSTACK_ENABLE_X;
			else
				executable_stack = EXSTACK_DISABLE_X;
			break;
		}

	/* Some simple consistency checks for the interpreter */
	if (elf_interpreter) {
		retval = -ELIBBAD;
		/* Not an ELF interpreter */
		if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
			goto out_free_dentry;
		/* Verify the interpreter has a valid arch */
		if (!elf_check_arch(&loc->interp_elf_ex))
			goto out_free_dentry;
	} else {
		/* Executables without an interpreter also need a personality  */
		SET_PERSONALITY(loc->elf_ex, 0);
	}

	/* Flush all traces of the currently running executable */
	retval = flush_old_exec(bprm);
	if (retval)
		goto out_free_dentry;

	/* OK, This is the point of no return */
	current->flags &= ~PF_FORKNOEXEC;
	current->mm->def_flags = def_flags;

	/* Do this immediately, since STACK_TOP as used in setup_arg_pages
	   may depend on the personality.  */
	SET_PERSONALITY(loc->elf_ex, 0);
	if (elf_read_implies_exec(loc->elf_ex, executable_stack))
		current->personality |= READ_IMPLIES_EXEC;

	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
		current->flags |= PF_RANDOMIZE;
	arch_pick_mmap_layout(current->mm);

	/* Do this so that we can load the interpreter, if need be.  We will
	   change some of these later */
	current->mm->free_area_cache = current->mm->mmap_base;
	current->mm->cached_hole_size = 0;
	retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
				 executable_stack);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out_free_dentry;
	}
	
	current->mm->start_stack = bprm->p;

	/* Now we do a little grungy work by mmaping the ELF image into
	   the correct location in memory. */
	for(i = 0, elf_ppnt = elf_phdata;
	    i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
		int elf_prot = 0, elf_flags;
		unsigned long k, vaddr;

		if (elf_ppnt->p_type != PT_LOAD)
			continue;

		if (unlikely (elf_brk > elf_bss)) {
			unsigned long nbyte;
	            
			/* There was a PT_LOAD segment with p_memsz > p_filesz
			   before this one. Map anonymous pages, if needed,
			   and clear the area.  */
			retval = set_brk (elf_bss + load_bias,
					  elf_brk + load_bias);
			if (retval) {
				send_sig(SIGKILL, current, 0);
				goto out_free_dentry;
			}
			nbyte = ELF_PAGEOFFSET(elf_bss);
			if (nbyte) {
				nbyte = ELF_MIN_ALIGN - nbyte;
				if (nbyte > elf_brk - elf_bss)
					nbyte = elf_brk - elf_bss;
				if (clear_user((void __user *)elf_bss +
							load_bias, nbyte)) {
					/*
					 * This bss-zeroing can fail if the ELF
					 * file specifies odd protections. So
					 * we don't check the return value
					 */
				}
			}
		}

		if (elf_ppnt->p_flags & PF_R)
			elf_prot |= PROT_READ;
		if (elf_ppnt->p_flags & PF_W)
			elf_prot |= PROT_WRITE;
		if (elf_ppnt->p_flags & PF_X)
			elf_prot |= PROT_EXEC;

		elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;

		vaddr = elf_ppnt->p_vaddr;
		if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
			elf_flags |= MAP_FIXED;
		} else if (loc->elf_ex.e_type == ET_DYN) {
			/* Try and get dynamic programs out of the way of the
			 * default mmap base, as well as whatever program they
			 * might try to exec.  This is because the brk will
			 * follow the loader, and is not movable.  */
#ifdef CONFIG_X86
			load_bias = 0;
#else
			load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
		}

		error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
				elf_prot, elf_flags, 0);
		if (BAD_ADDR(error)) {
			send_sig(SIGKILL, current, 0);
			retval = IS_ERR((void *)error) ?
				PTR_ERR((void*)error) : -EINVAL;
			goto out_free_dentry;
		}

		if (!load_addr_set) {
			load_addr_set = 1;
			load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
			if (loc->elf_ex.e_type == ET_DYN) {
				load_bias += error -
				             ELF_PAGESTART(load_bias + vaddr);
				load_addr += load_bias;
				reloc_func_desc = load_bias;
			}
		}
		k = elf_ppnt->p_vaddr;
		if (k < start_code)
			start_code = k;
		if (start_data < k)
			start_data = k;

		/*
		 * Check to see if the section's size will overflow the
		 * allowed task size. Note that p_filesz must always be
		 * <= p_memsz so it is only necessary to check p_memsz.
		 */
		if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
		    elf_ppnt->p_memsz > TASK_SIZE ||
		    TASK_SIZE - elf_ppnt->p_memsz < k) {
			/* set_brk can never work. Avoid overflows. */
			send_sig(SIGKILL, current, 0);
			retval = -EINVAL;
			goto out_free_dentry;
		}

		k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;

		if (k > elf_bss)
			elf_bss = k;
		if ((elf_ppnt->p_flags & PF_X) && end_code < k)
			end_code = k;
		if (end_data < k)
			end_data = k;
		k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
		if (k > elf_brk)
			elf_brk = k;
	}

	loc->elf_ex.e_entry += load_bias;
	elf_bss += load_bias;
	elf_brk += load_bias;
	start_code += load_bias;
	end_code += load_bias;
	start_data += load_bias;
	end_data += load_bias;

	/* Calling set_brk effectively mmaps the pages that we need
	 * for the bss and break sections.  We must do this before
	 * mapping in the interpreter, to make sure it doesn't wind
	 * up getting placed where the bss needs to go.
	 */
	retval = set_brk(elf_bss, elf_brk);
	if (retval) {
		send_sig(SIGKILL, current, 0);
		goto out_free_dentry;
	}
	if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
		send_sig(SIGSEGV, current, 0);
		retval = -EFAULT; /* Nobody gets to see this, but.. */
		goto out_free_dentry;
	}

	if (elf_interpreter) {
		unsigned long uninitialized_var(interp_map_addr);

		elf_entry = load_elf_interp(&loc->interp_elf_ex,
					    interpreter,
					    &interp_map_addr,
					    load_bias);
		if (!IS_ERR((void *)elf_entry)) {
			/*
			 * load_elf_interp() returns relocation
			 * adjustment
			 */
			interp_load_addr = elf_entry;
			elf_entry += loc->interp_elf_ex.e_entry;
		}
		if (BAD_ADDR(elf_entry)) {
			force_sig(SIGSEGV, current);
			retval = IS_ERR((void *)elf_entry) ?
					(int)elf_entry : -EINVAL;
			goto out_free_dentry;
		}
		reloc_func_desc = interp_load_addr;

		allow_write_access(interpreter);
		fput(interpreter);
		kfree(elf_interpreter);
	} else {
		elf_entry = loc->elf_ex.e_entry;
		if (BAD_ADDR(elf_entry)) {
			force_sig(SIGSEGV, current);
			retval = -EINVAL;
			goto out_free_dentry;
		}
	}

	kfree(elf_phdata);

	sys_close(elf_exec_fileno);

	set_binfmt(&elf_format);

#ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
	retval = arch_setup_additional_pages(bprm, executable_stack);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out;
	}
#endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */

	compute_creds(bprm);
	current->flags &= ~PF_FORKNOEXEC;
	retval = create_elf_tables(bprm, &loc->elf_ex,
			  load_addr, interp_load_addr);
	if (retval < 0) {
		send_sig(SIGKILL, current, 0);
		goto out;
	}
	/* N.B. passed_fileno might not be initialized? */
	current->mm->end_code = end_code;
	current->mm->start_code = start_code;
	current->mm->start_data = start_data;
	current->mm->end_data = end_data;
	current->mm->start_stack = bprm->p;

#ifdef arch_randomize_brk
	if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1))
		current->mm->brk = current->mm->start_brk =
			arch_randomize_brk(current->mm);
#endif

	if (current->personality & MMAP_PAGE_ZERO) {
		/* Why this, you ask???  Well SVr4 maps page 0 as read-only,
		   and some applications "depend" upon this behavior.
		   Since we do not have the power to recompile these, we
		   emulate the SVr4 behavior. Sigh. */
		down_write(&current->mm->mmap_sem);
		error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
				MAP_FIXED | MAP_PRIVATE, 0);
		up_write(&current->mm->mmap_sem);
	}

#ifdef ELF_PLAT_INIT
	/*
	 * The ABI may specify that certain registers be set up in special
	 * ways (on i386 %edx is the address of a DT_FINI function, for
	 * example.  In addition, it may also specify (eg, PowerPC64 ELF)
	 * that the e_entry field is the address of the function descriptor
	 * for the startup routine, rather than the address of the startup
	 * routine itself.  This macro performs whatever initialization to
	 * the regs structure is required as well as any relocations to the
	 * function descriptor entries when executing dynamically links apps.
	 */
	ELF_PLAT_INIT(regs, reloc_func_desc);
#endif

	start_thread(regs, elf_entry, bprm->p);
	retval = 0;
out:
	kfree(loc);
out_ret:
	return retval;

	/* error cleanup */
out_free_dentry:
	allow_write_access(interpreter);
	if (interpreter)
		fput(interpreter);
out_free_interp:
	kfree(elf_interpreter);
out_free_file:
	sys_close(elf_exec_fileno);
out_free_ph:
	kfree(elf_phdata);
	goto out;
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
	struct ucontext __user *ucp = (struct ucontext __user *)
		regs->u_regs[UREG_I0];
	mc_gregset_t __user *grp;
	mcontext_t __user *mcp;
	unsigned long fp, i7;
	unsigned char fenab;
	int err;

	synchronize_user_stack();
	if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
		goto do_sigsegv;

#if 1
	fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
	fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
		
	mcp = &ucp->uc_mcontext;
	grp = &mcp->mc_gregs;

	/* Skip over the trap instruction, first. */
	if (test_thread_flag(TIF_32BIT)) {
		regs->tpc   = (regs->tnpc & 0xffffffff);
		regs->tnpc  = (regs->tnpc + 4) & 0xffffffff;
	} else {
		regs->tpc   = regs->tnpc;
		regs->tnpc += 4;
	}
	err = 0;
	if (_NSIG_WORDS == 1)
		err |= __put_user(current->blocked.sig[0],
				  (unsigned long __user *)&ucp->uc_sigmask);
	else
		err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
				      sizeof(sigset_t));

	err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
	err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
	err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
	err |= __put_user(regs->y, &((*grp)[MC_Y]));
	err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
	err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
	err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
	err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
	err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
	err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
	err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
	err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
	err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
	err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
	err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
	err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
	err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
	err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
	err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));

	err |= __get_user(fp,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
	err |= __get_user(i7,
		 (&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
	err |= __put_user(fp, &(mcp->mc_fp));
	err |= __put_user(i7, &(mcp->mc_i7));

	err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
	if (fenab) {
		unsigned long *fpregs = current_thread_info()->fpregs;
		unsigned long fprs;
		
		fprs = current_thread_info()->fpsaved[0];
		if (fprs & FPRS_DL)
			err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
					    (sizeof(unsigned int) * 32));
		if (fprs & FPRS_DU)
			err |= copy_to_user(
                          ((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
			  (sizeof(unsigned int) * 32));
		err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
		err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
		err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
	}
	if (err)
		goto do_sigsegv;

	return;
do_sigsegv:
	force_sig(SIGSEGV, current);
}
Example #11
0
asmlinkage void do_ptrace(struct pt_regs *regs)
{
    int request = regs->u_regs[UREG_I0];
    pid_t pid = regs->u_regs[UREG_I1];
    unsigned long addr = regs->u_regs[UREG_I2];
    unsigned long data = regs->u_regs[UREG_I3];
    unsigned long addr2 = regs->u_regs[UREG_I4];
    struct task_struct *child;
    int ret;

    if (test_thread_flag(TIF_32BIT)) {
        addr &= 0xffffffffUL;
        data &= 0xffffffffUL;
        addr2 &= 0xffffffffUL;
    }
    lock_kernel();
#ifdef DEBUG_PTRACE
    {
        char *s;

        if ((request >= 0) && (request <= 24))
            s = pt_rq [request];
        else
            s = "unknown";

        if (request == PTRACE_POKEDATA && data == 0x91d02001) {
            printk ("do_ptrace: breakpoint pid=%d, addr=%016lx addr2=%016lx\n",
                    pid, addr, addr2);
        } else
            printk("do_ptrace: rq=%s(%d) pid=%d addr=%016lx data=%016lx addr2=%016lx\n",
                   s, request, pid, addr, data, addr2);
    }
#endif
    if (request == PTRACE_TRACEME) {
        int ret;

        /* are we already being traced? */
        if (current->ptrace & PT_PTRACED) {
            pt_error_return(regs, EPERM);
            goto out;
        }
        ret = security_ptrace(current->parent, current);
        if (ret) {
            pt_error_return(regs, -ret);
            goto out;
        }

        /* set the ptrace bit in the process flags. */
        current->ptrace |= PT_PTRACED;
        pt_succ_return(regs, 0);
        goto out;
    }
#ifndef ALLOW_INIT_TRACING
    if (pid == 1) {
        /* Can't dork with init. */
        pt_error_return(regs, EPERM);
        goto out;
    }
#endif
    read_lock(&tasklist_lock);
    child = find_task_by_pid(pid);
    if (child)
        get_task_struct(child);
    read_unlock(&tasklist_lock);

    if (!child) {
        pt_error_return(regs, ESRCH);
        goto out;
    }

    if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
            || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
        if (ptrace_attach(child)) {
            pt_error_return(regs, EPERM);
            goto out_tsk;
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    ret = ptrace_check_attach(child, request == PTRACE_KILL);
    if (ret < 0) {
        pt_error_return(regs, -ret);
        goto out_tsk;
    }

    if (!(test_thread_flag(TIF_32BIT))	&&
            ((request == PTRACE_READDATA64)		||
             (request == PTRACE_WRITEDATA64)		||
             (request == PTRACE_READTEXT64)		||
             (request == PTRACE_WRITETEXT64)		||
             (request == PTRACE_PEEKTEXT64)		||
             (request == PTRACE_POKETEXT64)		||
             (request == PTRACE_PEEKDATA64)		||
             (request == PTRACE_POKEDATA64))) {
        addr = regs->u_regs[UREG_G2];
        addr2 = regs->u_regs[UREG_G3];
        request -= 30; /* wheee... */
    }

    switch(request) {
    case PTRACE_PEEKTEXT: /* read word at location addr. */
    case PTRACE_PEEKDATA: {
        unsigned long tmp64;
        unsigned int tmp32;
        int res, copied;

        res = -EIO;
        if (test_thread_flag(TIF_32BIT)) {
            copied = access_process_vm(child, addr,
                                       &tmp32, sizeof(tmp32), 0);
            tmp64 = (unsigned long) tmp32;
            if (copied == sizeof(tmp32))
                res = 0;
        } else {
            copied = access_process_vm(child, addr,
                                       &tmp64, sizeof(tmp64), 0);
            if (copied == sizeof(tmp64))
                res = 0;
        }
        if (res < 0)
            pt_error_return(regs, -res);
        else
            pt_os_succ_return(regs, tmp64, (long *) data);
        goto flush_and_out;
    }

    case PTRACE_POKETEXT: /* write the word at location addr. */
    case PTRACE_POKEDATA: {
        unsigned long tmp64;
        unsigned int tmp32;
        int copied, res = -EIO;

        if (test_thread_flag(TIF_32BIT)) {
            tmp32 = data;
            copied = access_process_vm(child, addr,
                                       &tmp32, sizeof(tmp32), 1);
            if (copied == sizeof(tmp32))
                res = 0;
        } else {
            tmp64 = data;
            copied = access_process_vm(child, addr,
                                       &tmp64, sizeof(tmp64), 1);
            if (copied == sizeof(tmp64))
                res = 0;
        }
        if (res < 0)
            pt_error_return(regs, -res);
        else
            pt_succ_return(regs, res);
        goto flush_and_out;
    }

    case PTRACE_GETREGS: {
        struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
        struct pt_regs *cregs = child->thread_info->kregs;
        int rval;

        if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
                __put_user(cregs->tpc, (&pregs->pc)) ||
                __put_user(cregs->tnpc, (&pregs->npc)) ||
                __put_user(cregs->y, (&pregs->y))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        for (rval = 1; rval < 16; rval++)
            if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
                pt_error_return(regs, EFAULT);
                goto out_tsk;
            }
        pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
        printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
        goto out_tsk;
    }

    case PTRACE_GETREGS64: {
        struct pt_regs *pregs = (struct pt_regs *) addr;
        struct pt_regs *cregs = child->thread_info->kregs;
        unsigned long tpc = cregs->tpc;
        int rval;

        if ((child->thread_info->flags & _TIF_32BIT) != 0)
            tpc &= 0xffffffff;
        if (__put_user(cregs->tstate, (&pregs->tstate)) ||
                __put_user(tpc, (&pregs->tpc)) ||
                __put_user(cregs->tnpc, (&pregs->tnpc)) ||
                __put_user(cregs->y, (&pregs->y))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        for (rval = 1; rval < 16; rval++)
            if (__put_user(cregs->u_regs[rval], (&pregs->u_regs[rval - 1]))) {
                pt_error_return(regs, EFAULT);
                goto out_tsk;
            }
        pt_succ_return(regs, 0);
#ifdef DEBUG_PTRACE
        printk ("PC=%lx nPC=%lx o7=%lx\n", cregs->tpc, cregs->tnpc, cregs->u_regs [15]);
#endif
        goto out_tsk;
    }

    case PTRACE_SETREGS: {
        struct pt_regs32 *pregs = (struct pt_regs32 *) addr;
        struct pt_regs *cregs = child->thread_info->kregs;
        unsigned int psr, pc, npc, y;
        int i;

        /* Must be careful, tracing process can only set certain
         * bits in the psr.
         */
        if (__get_user(psr, (&pregs->psr)) ||
                __get_user(pc, (&pregs->pc)) ||
                __get_user(npc, (&pregs->npc)) ||
                __get_user(y, (&pregs->y))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        cregs->tstate &= ~(TSTATE_ICC);
        cregs->tstate |= psr_to_tstate_icc(psr);
        if (!((pc | npc) & 3)) {
            cregs->tpc = pc;
            cregs->tnpc = npc;
        }
        cregs->y = y;
        for (i = 1; i < 16; i++) {
            if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
                pt_error_return(regs, EFAULT);
                goto out_tsk;
            }
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_SETREGS64: {
        struct pt_regs *pregs = (struct pt_regs *) addr;
        struct pt_regs *cregs = child->thread_info->kregs;
        unsigned long tstate, tpc, tnpc, y;
        int i;

        /* Must be careful, tracing process can only set certain
         * bits in the psr.
         */
        if (__get_user(tstate, (&pregs->tstate)) ||
                __get_user(tpc, (&pregs->tpc)) ||
                __get_user(tnpc, (&pregs->tnpc)) ||
                __get_user(y, (&pregs->y))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        if ((child->thread_info->flags & _TIF_32BIT) != 0) {
            tpc &= 0xffffffff;
            tnpc &= 0xffffffff;
        }
        tstate &= (TSTATE_ICC | TSTATE_XCC);
        cregs->tstate &= ~(TSTATE_ICC | TSTATE_XCC);
        cregs->tstate |= tstate;
        if (!((tpc | tnpc) & 3)) {
            cregs->tpc = tpc;
            cregs->tnpc = tnpc;
        }
        cregs->y = y;
        for (i = 1; i < 16; i++) {
            if (__get_user(cregs->u_regs[i], (&pregs->u_regs[i-1]))) {
                pt_error_return(regs, EFAULT);
                goto out_tsk;
            }
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_GETFPREGS: {
        struct fps {
            unsigned int regs[32];
            unsigned int fsr;
            unsigned int flags;
            unsigned int extra;
            unsigned int fpqd;
            struct fq {
                unsigned int insnaddr;
                unsigned int insn;
            } fpq[16];
        } *fps = (struct fps *) addr;
        unsigned long *fpregs = child->thread_info->fpregs;

        if (copy_to_user(&fps->regs[0], fpregs,
                         (32 * sizeof(unsigned int))) ||
                __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
                __put_user(0, (&fps->fpqd)) ||
                __put_user(0, (&fps->flags)) ||
                __put_user(0, (&fps->extra)) ||
                clear_user(&fps->fpq[0], 32 * sizeof(unsigned int))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_GETFPREGS64: {
        struct fps {
            unsigned int regs[64];
            unsigned long fsr;
        } *fps = (struct fps *) addr;
        unsigned long *fpregs = child->thread_info->fpregs;

        if (copy_to_user(&fps->regs[0], fpregs,
                         (64 * sizeof(unsigned int))) ||
                __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_SETFPREGS: {
        struct fps {
            unsigned int regs[32];
            unsigned int fsr;
            unsigned int flags;
            unsigned int extra;
            unsigned int fpqd;
            struct fq {
                unsigned int insnaddr;
                unsigned int insn;
            } fpq[16];
        } *fps = (struct fps *) addr;
        unsigned long *fpregs = child->thread_info->fpregs;
        unsigned fsr;

        if (copy_from_user(fpregs, &fps->regs[0],
                           (32 * sizeof(unsigned int))) ||
                __get_user(fsr, (&fps->fsr))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
        child->thread_info->xfsr[0] |= fsr;
        if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
            child->thread_info->gsr[0] = 0;
        child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_SETFPREGS64: {
        struct fps {
            unsigned int regs[64];
            unsigned long fsr;
        } *fps = (struct fps *) addr;
        unsigned long *fpregs = child->thread_info->fpregs;

        if (copy_from_user(fpregs, &fps->regs[0],
                           (64 * sizeof(unsigned int))) ||
                __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
            pt_error_return(regs, EFAULT);
            goto out_tsk;
        }
        if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
            child->thread_info->gsr[0] = 0;
        child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_READTEXT:
    case PTRACE_READDATA: {
        int res = ptrace_readdata(child, addr,
                                  (void *)addr2, data);
        if (res == data) {
            pt_succ_return(regs, 0);
            goto flush_and_out;
        }
        if (res >= 0)
            res = -EIO;
        pt_error_return(regs, -res);
        goto flush_and_out;
    }

    case PTRACE_WRITETEXT:
    case PTRACE_WRITEDATA: {
        int res = ptrace_writedata(child, (void *) addr2,
                                   addr, data);
        if (res == data) {
            pt_succ_return(regs, 0);
            goto flush_and_out;
        }
        if (res >= 0)
            res = -EIO;
        pt_error_return(regs, -res);
        goto flush_and_out;
    }
    case PTRACE_SYSCALL: /* continue and stop at (return from) syscall */
        addr = 1;

    case PTRACE_CONT: { /* restart after signal. */
        if (data > _NSIG) {
            pt_error_return(regs, EIO);
            goto out_tsk;
        }
        if (addr != 1) {
            unsigned long pc_mask = ~0UL;

            if ((child->thread_info->flags & _TIF_32BIT) != 0)
                pc_mask = 0xffffffff;

            if (addr & 3) {
                pt_error_return(regs, EINVAL);
                goto out_tsk;
            }
#ifdef DEBUG_PTRACE
            printk ("Original: %016lx %016lx\n",
                    child->thread_info->kregs->tpc,
                    child->thread_info->kregs->tnpc);
            printk ("Continuing with %016lx %016lx\n", addr, addr+4);
#endif
            child->thread_info->kregs->tpc = (addr & pc_mask);
            child->thread_info->kregs->tnpc = ((addr + 4) & pc_mask);
        }

        if (request == PTRACE_SYSCALL) {
            set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        } else {
            clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
        }

        child->exit_code = data;
#ifdef DEBUG_PTRACE
        printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
               child->pid, child->exit_code,
               child->thread_info->kregs->tpc,
               child->thread_info->kregs->tnpc);

#endif
        wake_up_process(child);
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    /*
     * make the child exit.  Best I can do is send it a sigkill.
     * perhaps it should be put in the status that it wants to
     * exit.
     */
    case PTRACE_KILL: {
        if (child->state == TASK_ZOMBIE) {	/* already dead */
            pt_succ_return(regs, 0);
            goto out_tsk;
        }
        child->exit_code = SIGKILL;
        wake_up_process(child);
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    case PTRACE_SUNDETACH: { /* detach a process that was attached. */
        int error = ptrace_detach(child, data);
        if (error) {
            pt_error_return(regs, EIO);
            goto out_tsk;
        }
        pt_succ_return(regs, 0);
        goto out_tsk;
    }

    /* PTRACE_DUMPCORE unsupported... */

    default: {
        int err = ptrace_request(child, request, addr, data);
        if (err)
            pt_error_return(regs, -err);
        else
            pt_succ_return(regs, 0);
        goto out_tsk;
    }
    }
flush_and_out:
    {
        unsigned long va;

        if (tlb_type == cheetah || tlb_type == cheetah_plus) {
            for (va = 0; va < (1 << 16); va += (1 << 5))
                spitfire_put_dcache_tag(va, 0x0);
            /* No need to mess with I-cache on Cheetah. */
        } else {
            for (va =  0; va < L1DCACHE_SIZE; va += 32)
                spitfire_put_dcache_tag(va, 0x0);
            if (request == PTRACE_PEEKTEXT ||
                    request == PTRACE_POKETEXT ||
                    request == PTRACE_READTEXT ||
                    request == PTRACE_WRITETEXT) {
                for (va =  0; va < (PAGE_SIZE << 1); va += 32)
                    spitfire_put_icache_tag(va, 0x0);
                __asm__ __volatile__("flush %g6");
            }
        }
    }
out_tsk:
    if (child)
        put_task_struct(child);
out:
    unlock_kernel();
}
long arch_ptrace(struct task_struct *child, long request,
		 unsigned long addr, unsigned long data)
{
	unsigned long addr2 = current->thread.kregs->u_regs[UREG_I4];
	void __user *addr2p;
	const struct user_regset_view *view;
	struct pt_regs __user *pregs;
	struct fps __user *fps;
	int ret;

	view = task_user_regset_view(current);
	addr2p = (void __user *) addr2;
	pregs = (struct pt_regs __user *) addr;
	fps = (struct fps __user *) addr;

	switch(request) {
	case PTRACE_GETREGS: {
		ret = copy_regset_to_user(child, view, REGSET_GENERAL,
					  32 * sizeof(u32),
					  4 * sizeof(u32),
					  &pregs->psr);
		if (!ret)
			copy_regset_to_user(child, view, REGSET_GENERAL,
					    1 * sizeof(u32),
					    15 * sizeof(u32),
					    &pregs->u_regs[0]);
		break;
	}

	case PTRACE_SETREGS: {
		ret = copy_regset_from_user(child, view, REGSET_GENERAL,
					    32 * sizeof(u32),
					    4 * sizeof(u32),
					    &pregs->psr);
		if (!ret)
			copy_regset_from_user(child, view, REGSET_GENERAL,
					      1 * sizeof(u32),
					      15 * sizeof(u32),
					      &pregs->u_regs[0]);
		break;
	}

	case PTRACE_GETFPREGS: {
		ret = copy_regset_to_user(child, view, REGSET_FP,
					  0 * sizeof(u32),
					  32 * sizeof(u32),
					  &fps->regs[0]);
		if (!ret)
			ret = copy_regset_to_user(child, view, REGSET_FP,
						  33 * sizeof(u32),
						  1 * sizeof(u32),
						  &fps->fsr);

		if (!ret) {
			if (__put_user(0, &fps->fpqd) ||
			    __put_user(0, &fps->flags) ||
			    __put_user(0, &fps->extra) ||
			    clear_user(fps->fpq, sizeof(fps->fpq)))
				ret = -EFAULT;
		}
		break;
	}

	case PTRACE_SETFPREGS: {
		ret = copy_regset_from_user(child, view, REGSET_FP,
					    0 * sizeof(u32),
					    32 * sizeof(u32),
					    &fps->regs[0]);
		if (!ret)
			ret = copy_regset_from_user(child, view, REGSET_FP,
						    33 * sizeof(u32),
						    1 * sizeof(u32),
						    &fps->fsr);
		break;
	}

	case PTRACE_READTEXT:
	case PTRACE_READDATA:
		ret = ptrace_readdata(child, addr, addr2p, data);

		if (ret == data)
			ret = 0;
		else if (ret >= 0)
			ret = -EIO;
		break;

	case PTRACE_WRITETEXT:
	case PTRACE_WRITEDATA:
		ret = ptrace_writedata(child, addr2p, addr, data);

		if (ret == data)
			ret = 0;
		else if (ret >= 0)
			ret = -EIO;
		break;

	default:
		if (request == PTRACE_SPARC_DETACH)
			request = PTRACE_DETACH;
		ret = ptrace_request(child, request, addr, data);
		break;
	}

	return ret;
}