Ejemplo n.º 1
0
static int lfs_copy_from_user( 	loff_t pos, int npages, int nbytes, 
				struct page **segpages, const char __user *buf)
{
	long page_fault=0;
	int i, offset; 

    	for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); 
	      i < npages; i++,offset=0) {
		size_t count = min_t(size_t,PAGE_CACHE_SIZE-offset, nbytes);
		struct page *page = segpages[i];

		fault_in_pages_readable( buf, count);

		kmap(page);
		page_fault = __copy_from_user(page_address(page)+offset, 
					      buf, count);
		flush_dcache_page(page);
		kunmap(page);
		buf += count;
		nbytes -=count;

		if (page_fault)
	    		break;
    	}

	return page_fault?-EFAULT:0;
}
Ejemplo n.º 2
0
/* simple helper to fault in pages and copy.  This should go away
 * and be replaced with calls into generic code.
 */
static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
					 int write_bytes,
					 struct page **prepared_pages,
					 const char __user *buf)
{
	long page_fault = 0;
	int i;
	int offset = pos & (PAGE_CACHE_SIZE - 1);

	for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
		size_t count = min_t(size_t,
				     PAGE_CACHE_SIZE - offset, write_bytes);
		struct page *page = prepared_pages[i];
		fault_in_pages_readable(buf, count);

		/* Copy data from userspace to the current page */
		kmap(page);
		page_fault = __copy_from_user(page_address(page) + offset,
					      buf, count);
		/* Flush processor's dcache for this page */
		flush_dcache_page(page);
		kunmap(page);
		buf += count;
		write_bytes -= count;

		if (page_fault)
			break;
	}
	return page_fault ? -EFAULT : 0;
}
/*
 * Fault in the first iovec of the given iov_iter, to a maximum length
 * of bytes. Returns 0 on success, or non-zero if the memory could not be
 * accessed (ie. because it is an invalid address).
 *
 * writev-intensive code may want this to prefault several iovecs -- that
 * would be possible (callers must not rely on the fact that _only_ the
 * first iovec will be faulted with the current implementation).
 */
static int ii_iovec_fault_in_readable(struct iov_iter *i, size_t bytes)
{
	struct iovec *iov = (struct iovec *)i->data;
	char __user *buf = iov->iov_base + i->iov_offset;
	bytes = min(bytes, iov->iov_len - i->iov_offset);
	return fault_in_pages_readable(buf, bytes);
}
Ejemplo n.º 4
0
/*
 * Fault in the first iovec of the given iov_iter, to a maximum length
 * of bytes. Returns 0 on success, or non-zero if the memory could not be
 * accessed (ie. because it is an invalid address).
 *
 * writev-intensive code may want this to prefault several iovecs -- that
 * would be possible (callers must not rely on the fact that _only_ the
 * first iovec will be faulted with the current implementation).
 */
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
		char __user *buf = i->iov->iov_base + i->iov_offset;
		bytes = min(bytes, i->iov->iov_len - i->iov_offset);
		return fault_in_pages_readable(buf, bytes);
	}
	return 0;
}
Ejemplo n.º 5
0
/*
 * Pre-fault in the user memory, so we can use atomic copies.
 */
static void iov_fault_in_pages_read(struct iovec *iov, unsigned long len)
{
	while (!iov->iov_len)
		iov++;

	while (len > 0) {
		unsigned long this_len;

		this_len = min_t(unsigned long, len, iov->iov_len);
		fault_in_pages_readable(iov->iov_base, this_len);
		len -= this_len;
		iov++;
	}
}
Ejemplo n.º 6
0
/*
 * Fault in one or more iovecs of the given iov_iter, to a maximum length of
 * bytes.  For each iovec, fault in each page that constitutes the iovec.
 *
 * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
 * because it is an invalid address).
 */
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
{
	size_t skip = i->iov_offset;
	const struct iovec *iov;
	int err;
	struct iovec v;

	if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
		iterate_iovec(i, bytes, v, iov, skip, ({
			err = fault_in_pages_readable(v.iov_base, v.iov_len);
			if (unlikely(err))
			return err;
		0;}))
	}
	return 0;
Ejemplo n.º 7
0
static void kvm_use_magic_page(void)
{
	u32 *p;
	u32 *start, *end;
	u32 features;

	/* Tell the host to map the magic page to -4096 on all CPUs */
	on_each_cpu(kvm_map_magic_page, &features, 1);

	/* Quick self-test to see if the mapping works */
	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
		kvm_patching_worked = false;
		return;
	}

	/* Now loop through all code and find instructions */
	start = (void*)_stext;
	end = (void*)_etext;

	/*
	 * Being interrupted in the middle of patching would
	 * be bad for SPRG4-7, which KVM can't keep in sync
	 * with emulated accesses because reads don't trap.
	 */
	local_irq_disable();

	for (p = start; p < end; p++) {
		/* Avoid patching the template code */
		if (p >= kvm_template_start && p < kvm_template_end) {
			p = kvm_template_end - 1;
			continue;
		}
		kvm_check_ins(p, features);
	}

	local_irq_enable();

	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
			 kvm_patching_worked ? "worked" : "failed");
}
Ejemplo n.º 8
0
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	size_t skip, copy, left, wanted;
	const struct iovec *iov;
	char __user *buf;
	void *kaddr, *to;

	if (unlikely(bytes > i->count))
		bytes = i->count;

	if (unlikely(!bytes))
		return 0;

	might_fault();
	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
	buf = iov->iov_base + skip;
	copy = min(bytes, iov->iov_len - skip);

	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
		kaddr = kmap_atomic(page);
		to = kaddr + offset;

		/* first chunk, usually the only one */
		left = copyin(to, buf, copy);
		copy -= left;
		skip += copy;
		to += copy;
		bytes -= copy;

		while (unlikely(!left && bytes)) {
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = copyin(to, buf, copy);
			copy -= left;
			skip = copy;
			to += copy;
			bytes -= copy;
		}
		if (likely(!bytes)) {
			kunmap_atomic(kaddr);
			goto done;
		}
		offset = to - kaddr;
		buf += copy;
		kunmap_atomic(kaddr);
		copy = min(bytes, iov->iov_len - skip);
	}
	/* Too bad - revert to non-atomic kmap */

	kaddr = kmap(page);
	to = kaddr + offset;
	left = copyin(to, buf, copy);
	copy -= left;
	skip += copy;
	to += copy;
	bytes -= copy;
	while (unlikely(!left && bytes)) {
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = copyin(to, buf, copy);
		copy -= left;
		skip = copy;
		to += copy;
		bytes -= copy;
	}
	kunmap(page);

done:
	if (skip == iov->iov_len) {
		iov++;
		skip = 0;
	}
	i->count -= wanted - bytes;
	i->nr_segs -= iov - i->iov;
	i->iov = iov;
	i->iov_offset = skip;
	return wanted - bytes;
}
Ejemplo n.º 9
0
/* FIXME: Ugliest function of all in LFS, need I say more? */
static ssize_t lfs_file_write( 	struct file *file, const char __user *buf, 	
				size_t count, loff_t *ppos)
{	
	loff_t pos;
	struct page *page;
	ssize_t res, written, bytes;
	struct inode *inode = file->f_dentry->d_inode;
	struct super_block *sb = inode->i_sb;
	struct segment *segp = LFS_SBI(sb)->s_curr;

	//dprintk("lfs_file_write called for %lu at pos %Lu\n", inode->i_ino, *ppos);
	if(file->f_flags & O_DIRECT) {
		dprintk("The file is requesting direct IO\n");
		return -EINVAL;
	}

	if (unlikely(count < 0 ))
		return -EINVAL;
	if (unlikely(!access_ok(VERIFY_READ, buf, count)))
		return -EFAULT;

	//down(&inode->i_sem);	/* lock the file */
	mutex_lock(&inode->i_mutex); //BrechREiZ: We need this for Kernel 2.6.17
	lfs_lock(sb);
		
	pos = *ppos;
	res = generic_write_checks(file, &pos, &count, 0);
	if (res)
		goto out;
	if(count == 0)
		goto out;
	
	res = remove_suid(file->f_dentry);
	if(res)
		goto out;
	//inode_update_time(inode, 1);	/* update mtime and ctime */
	file_update_time(inode); //BrechREiZ: We need this for Kernel 2.6.17

	written = 0;
	do {
		long offset;
		size_t copied;
		int i, siblock, eiblock, boffset;
		sector_t block;
				
		offset = (segp->offset % BUF_IN_PAGE) * LFS_BSIZE; 
		offset += pos & (LFS_BSIZE - 1); /* within block */
		bytes = PAGE_CACHE_SIZE - offset; /* number of bytes written
						     in this iteration */
		invalidate_old_page(inode, pos);

		if (bytes > count) 
			bytes = count;
		
		//dprintk("1:segp->start=%Lu,segp->offset=%d,segp->end=%Lu,offset=%lu,bytes=%d\n", segp->start, segp->offset, segp->end,offset,bytes);
		
		siblock = pos >> LFS_BSIZE_BITS;
		eiblock = (pos + bytes - 1) >> LFS_BSIZE_BITS;

		//dprintk("writing %d bytes at offset %ld (pos = %Lu)\n", bytes, offset, pos);
		//dprintk("siblock = %d, eiblock = %d\n", siblock, eiblock);
		

		/*
		 * Bring in the user page that we will copy from _first_.
		 * Otherwise there's a nasty deadlock on copying from the
		 * same page as we're writing to, without it being marked
		 * up-to-date.
		 */
		fault_in_pages_readable(buf, bytes);
		page = get_seg_page(segp);
		if (!page) {
			res = -ENOMEM;
			break;
		}

		/* fill the page with current inode blocks if any */
		boffset = offset / LFS_BSIZE;;
		for(i = siblock; i <= eiblock; ++i, ++boffset) {
			struct buffer_head *bh;
			//dprintk("Asking for block %d\n", i);
			bh = lfs_read_block(inode, i);
			if(!bh) /* new block */
				break;
			//dprintk("boffset = %d\n", boffset);
			memcpy(page_address(page) + LFS_BSIZE * boffset, bh->b_data, LFS_BSIZE);
			brelse(bh);
		}

		copied = __copy_from_user(page_address(page) + offset, buf, bytes);
		flush_dcache_page(page);

		block = segp->start + segp->offset;
		for(i = siblock;i <= eiblock; ++i, ++block)
			segsum_update_finfo(segp, inode->i_ino, i, block);

		block = segp->start + segp->offset;
		segp->offset += (bytes  - 1)/LFS_BSIZE + 1;
		//dprintk("2:segp->start=%Lu,segp->offset=%d,segp->end=%Lu,offset=%lu,bytes=%d\n",
		//segp->start, segp->offset, segp->end,offset,bytes);
		BUG_ON(segp->start + segp->offset > segp->end);
		if(segp->start + segp->offset == segp->end) {
			dprintk("allocating new segment\n");
			/* This also is going to write the previous segment */
			segment_allocate_new(inode->i_sb, segp, segp->start + segp->offset);
			segp = LFS_SBI(sb)->s_curr;
		}

		/* update the inode */
		for(i = siblock;i <= eiblock; ++i, ++block)
			update_inode(inode, i, block);
		//dprintk("start=%Lu,offset=%d,end=%Lu\n", segp->start, segp->offset, segp->end);
		segusetbl_add_livebytes(sb, segp->segnum, bytes);
		
		written += bytes;
		buf += bytes;
		pos += bytes;
		count -= bytes;
	} while(count);

	*ppos = pos;
	if(pos > inode->i_size)
		i_size_write(inode, pos);
	if(written)
		mark_inode_dirty(inode);
	
	lfs_unlock(sb);
	//up(&inode->i_sem);
	mutex_unlock(&inode->i_mutex); //BrechREiZ: and unlocking...
	return written ? written : res;
out:
	lfs_unlock(sb);
	//up(&inode->i_sem);
	mutex_unlock(&inode->i_mutex); //BrechREiZ: and unlocking...
	return res; 
}