Exemplo n.º 1
0
static int
pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
		      int atomic)
{
	unsigned long copy;

	while (len > 0) {
		while (!iov->iov_len)
			iov++;
		copy = min_t(unsigned long, len, iov->iov_len);

		if (atomic) {
			if (__copy_to_user_inatomic(iov->iov_base, from, copy))
				return -EFAULT;
		} else {
			if (copy_to_user(iov->iov_base, from, copy))
				return -EFAULT;
		}
		from += copy;
		len -= copy;
		iov->iov_base += copy;
		iov->iov_len -= copy;
	}
	return 0;
}
Exemplo n.º 2
0
static int
pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
		      size_t *remaining, int atomic)
{
	unsigned long copy;

	while (*remaining > 0) {
		while (!iov->iov_len)
			iov++;
		copy = min_t(unsigned long, *remaining, iov->iov_len);

		if (atomic) {
			if (__copy_to_user_inatomic(iov->iov_base,
						    addr + *offset, copy))
				return -EFAULT;
		} else {
			if (copy_to_user(iov->iov_base,
					 addr + *offset, copy))
				return -EFAULT;
		}
		*offset += copy;
		*remaining -= copy;
		iov->iov_base += copy;
		iov->iov_len -= copy;
	}
	return 0;
}
Exemplo n.º 3
0
/**
 * probe_kernel_write(): safely attempt to write to a location
 * @dst: address to write to
 * @src: pointer to the data that shall be written
 * @size: size of the data chunk
 *
 * Safely write to address @dst from the buffer at @src.  If a kernel fault
 * happens, handle that and return -EFAULT.
 */
long probe_kernel_write(void *dst, void *src, size_t size)
{
	long ret; 

	ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);

	return ret ? -EFAULT : 0; 
}
Exemplo n.º 4
0
long __probe_kernel_write(void *dst, void *src, size_t size)
{
	long ret;
	mm_segment_t old_fs = get_fs();

	set_fs(KERNEL_DS);
	pagefault_disable();
	ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
	pagefault_enable();
	set_fs(old_fs);

	return ret ? -EFAULT : 0;
}
Exemplo n.º 5
0
static noinline void __init copy_user_test(void)
{
	char *kmem;
	char __user *usermem;
	size_t size = 10;
	int unused;

	kmem = kmalloc(size, GFP_KERNEL);
	if (!kmem)
		return;

	usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
			    PROT_READ | PROT_WRITE | PROT_EXEC,
			    MAP_ANONYMOUS | MAP_PRIVATE, 0);
	if (IS_ERR(usermem)) {
		pr_err("Failed to allocate user memory\n");
		kfree(kmem);
		return;
	}

	pr_info("out-of-bounds in copy_from_user()\n");
	unused = copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in copy_to_user()\n");
	unused = copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user()\n");
	unused = __copy_from_user(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user()\n");
	unused = __copy_to_user(usermem, kmem, size + 1);

	pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
	unused = __copy_from_user_inatomic(kmem, usermem, size + 1);

	pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
	unused = __copy_to_user_inatomic(usermem, kmem, size + 1);

	pr_info("out-of-bounds in strncpy_from_user()\n");
	unused = strncpy_from_user(kmem, usermem, size + 1);

	vm_munmap((unsigned long)usermem, PAGE_SIZE);
	kfree(kmem);
}
Exemplo n.º 6
0
static size_t __iovec_copy_to_user_inatomic(char *vaddr,
			const struct iovec *iov, size_t base, size_t bytes)
{
	size_t copied = 0, left = 0;

	while (bytes) {
		char __user *buf = iov->iov_base + base;
		int copy = min(bytes, iov->iov_len - base);

		base = 0;
		left = __copy_to_user_inatomic(buf, vaddr, copy);
		copied += copy;
		bytes -= copy;
		vaddr += copy;
		iov++;

		if (unlikely(left))
			break;
	}
	return copied - left;
}
/*
 * Copy as much as we can into the page and return the number of bytes which
 * were sucessfully copied.  If a fault is encountered then return the number of
 * bytes which were copied.
 */
static size_t ii_iovec_copy_to_user_atomic(struct page *page,
		struct iov_iter *i, unsigned long offset, size_t bytes)
{
	struct iovec *iov = (struct iovec *)i->data;
	char *kaddr;
	size_t copied;

	BUG_ON(!in_atomic());
	kaddr = kmap_atomic(page);
	if (likely(i->nr_segs == 1)) {
		int left;
		char __user *buf = iov->iov_base + i->iov_offset;
		left = __copy_to_user_inatomic(buf, kaddr + offset, bytes);
		copied = bytes - left;
	} else {
		copied = __iovec_copy_to_user(kaddr + offset, iov,
					      i->iov_offset, bytes, 1);
	}
	kunmap_atomic(kaddr);

	return copied;
}
Exemplo n.º 8
0
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	size_t skip, copy, left, wanted;
	const struct iovec *iov;
	char __user *buf;
	void *kaddr, *from;

	if (unlikely(bytes > i->count))
		bytes = i->count;

	if (unlikely(!bytes))
		return 0;

	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
	buf = iov->iov_base + skip;
	copy = min(bytes, iov->iov_len - skip);

	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
		kaddr = kmap_atomic(page);
		from = kaddr + offset;

		/* first chunk, usually the only one */
		left = __copy_to_user_inatomic(buf, from, copy);
		copy -= left;
		skip += copy;
		from += copy;
		bytes -= copy;

		while (unlikely(!left && bytes)) {
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = __copy_to_user_inatomic(buf, from, copy);
			copy -= left;
			skip = copy;
			from += copy;
			bytes -= copy;
		}
		if (likely(!bytes)) {
			kunmap_atomic(kaddr);
			goto done;
		}
		offset = from - kaddr;
		buf += copy;
		kunmap_atomic(kaddr);
		copy = min(bytes, iov->iov_len - skip);
	}
	/* Too bad - revert to non-atomic kmap */

	kaddr = kmap(page);
	from = kaddr + offset;
	left = __copy_to_user(buf, from, copy);
	copy -= left;
	skip += copy;
	from += copy;
	bytes -= copy;
	while (unlikely(!left && bytes)) {
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = __copy_to_user(buf, from, copy);
		copy -= left;
		skip = copy;
		from += copy;
		bytes -= copy;
	}
	kunmap(page);

done:
	if (skip == iov->iov_len) {
		iov++;
		skip = 0;
	}
	i->count -= wanted - bytes;
	i->nr_segs -= iov - i->iov;
	i->iov = iov;
	i->iov_offset = skip;
	return wanted - bytes;
}
Exemplo n.º 9
0
ssize_t GSFS_file_read (struct file *filp, char __user *charp, size_t len, loff_t *off){
	struct inode		*inode=filp->f_mapping->host;
	struct GSFS_inode	*inf=(struct GSFS_inode*)inode->i_private;
	sector_t sec_start,
		 sec_end,
		 sec_len;
	struct page	**res,
			**res2,
			**restemp;
	unsigned int *pn,
		     *pn2,
		     *pntemp;
	int 	i,
		odirect=filp->f_flags&O_SYNC,
		j,
		lock,
		pncount,
		pn2count;
	unsigned long 	pagestartbyte=0,
			pageendbyte,
			bufstart,
			bytes_in_first_buf_page;
	unsigned long	pagelen;		
	size_t  rlen;
	//char	*dest,
	//	*src;
	
	gwf(printk("<0>" "File read with inode :%lu size:%lu offset:%llu , off:%llu, charp:%lx, pid:%u\n",inode->i_ino,len,*off,filp->f_pos,(unsigned long)charp,current->pid));
	if((*off>=inode->i_size)){
		gwf(printk("<0>" "File read ended for *pos<size with inode :%lu size:%lu offset:%llu , off:%llu, charp:%lx, pid:%u\n",inode->i_ino,len,*off,filp->f_pos,(unsigned long)charp,current->pid));
		return 0;
	}
	if(!access_ok(VERIFY_WRITE,charp,len)){
		gwf(printk("<0>" "File read ended for access_nok with inode :%lu size:%lu offset:%llu , off:%llu, charp:%lx, pid:%u\n",inode->i_ino,len,*off,filp->f_pos,(unsigned long)charp,current->pid));
		return -EIO;
	}
	sec_start=(*off)>>Block_Size_Bits;
	if((*off+len)>inode->i_size)
		len=inode->i_size-*off;
	sec_end=(*off+len-1)>>Block_Size_Bits;
	sec_len=sec_end-sec_start+1;
	bytes_in_first_buf_page=((1+~((*off)&((unsigned long)Block_Size-1)))&(Block_Size-1));
	if(!bytes_in_first_buf_page)
		bytes_in_first_buf_page=Block_Size;
	pn=kzalloc(sec_len*sizeof(unsigned int),GFP_KERNEL);
	pn2=kzalloc(sec_len*sizeof(unsigned int),GFP_KERNEL);
	res=kzalloc(sec_len*sizeof(struct page*),GFP_KERNEL);
	res2=kzalloc(sec_len*sizeof(struct page*),GFP_KERNEL);
	for(i=sec_start,j=0;i<=sec_end;i++,j++)
		pn[j]=i;
	
	gwf(printk("<0>" "GSFS_file_read: sec_start:%lu, sec_end:%lu, sec_len:%lu, bytes_in_first_buf_page: %lu\n",
			sec_start,sec_end,sec_len,bytes_in_first_buf_page));
			
	pncount=GSFS_get_data_pages_of_inode(inode, pn, sec_len ,res,odirect);	
	//printk("<0>" "res[%u]=%d \n",j,res[j]);
	rlen=0;
	pn2count=0;
	lock=0;
	do{
		for(j=0;j<pncount;j++){
			//printk("<0>" "res[%u]=%lx \n",j,res[j]);
			
			if(unlikely(!res[j]))
				continue;
			
			if(lock && PageLocked(res[j])){
				//printk("<0>" "Locking for j:%u\n",j);
				wait_on_page_locked(res[j]);
				lock=0;
			}
			else 
				if(PageLocked(res[j])){
					pn2[pn2count]=pn[j];
					res2[pn2count]=res[j];
					pn2count++;
					continue;
				}
				
			//the page is available for writing to buffer
			 
			if(pn[j]==sec_start){
				pagestartbyte=((*off)&(Block_Size-1));
				bufstart=(unsigned long)charp;
			}
			else{
				pagestartbyte=0;
				bufstart=(unsigned long)(charp)+bytes_in_first_buf_page+((pn[j]-sec_start-1)<<Block_Size_Bits);
			}
			if(pn[j]==sec_end)
				pageendbyte=((*off+len-1)&(Block_Size-1));
			else
				pageendbyte=Block_Size-1;
			pagelen=(unsigned long)(pageendbyte-pagestartbyte+1);
			
			if(inf->igflags & igflag_secure){
				struct GSFS_page	*gp=(struct GSFS_page*)page_private(res[j]);
				
				if(unlikely(!gp || !gp->sip) || unlikely(!(gp->sip->spflags & spflag_page_is_ready_for_read)) ){
					//printk("<0>" "page is not ready for inode:%lu, index: %lu\n", inode->i_ino, res[j]->index);
					//if(gp && gp->sip)
					//	printk("<0>" "and flags:%d\n",gp->sip->spflags);
					goto add_cont;
				}
			}
			
			i=__copy_to_user_inatomic((void*)bufstart,page_address(res[j])+pagestartbyte,pagelen);
add_cont:			
			rlen+=(pagelen-i);
			mark_page_accessed(res[j]);
			/*
			dest=(char*)bufstart;
			src=(char*)pagestartbyte;
			for(i=0;i<pagelen;i++)
				dest[i]=src[i];
			*/
			//printk("<0>" "asdfasd%s",dest);
			//rlen+=i;
			GSFS_put_data_page_of_inode(inode,res[j]);
			//gwf(printk("<0>" "file read for inode:%lu, j:%u pn[j]:%u pagestartbyte:%lx bufstart:%lx pagelen:%lu i:%u sec_start:%lu\n",
			//		inode->i_ino, j, pn[j],(unsigned long)pagestartbyte,(unsigned long)bufstart,pagelen,i,sec_start));
		}
		lock=1;
		pncount=pn2count;
		pn2count=0;
		
		pntemp=pn2;
		pn2=pn;
		pn=pntemp;
		
		restemp=res2;
		res2=res;
		res=restemp;
		
		gwf(printk("<0>" "file read for inode:%lu pncount:%u\n",inode->i_ino,pncount));
	}while(pncount);
	
	kfree(pn);
	kfree(pn2);
	kfree(res);
	kfree(res2);
	
	(*off)+=rlen;
	gwf(printk("<0>" "file read ends rlen=%lu len:%lu\n",rlen,len));
	return rlen;
}