Exemple #1
0
static int do_readpage(struct ubifs_info *c, struct inode *inode,
		       struct page *page, int last_block_size)
{
	void *addr;
	int err = 0, i;
	unsigned int block, beyond;
	struct ubifs_data_node *dn;
	loff_t i_size = inode->i_size;

	dbg_gen("ino %lu, pg %lu, i_size %lld",
		inode->i_ino, page->index, i_size);

	addr = kmap(page);

	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
	beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
	if (block >= beyond) {
		/* Reading beyond inode */
		memset(addr, 0, PAGE_CACHE_SIZE);
		goto out;
	}

	dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
	if (!dn)
		return -ENOMEM;

	i = 0;
	while (1) {
		int ret;

		if (block >= beyond) {
			/* Reading beyond inode */
			err = -ENOENT;
			memset(addr, 0, UBIFS_BLOCK_SIZE);
		} else {
			/*
			 * Reading last block? Make sure to not write beyond
			 * the requested size in the destination buffer.
			 */
			if (((block + 1) == beyond) || last_block_size) {
				void *buff;
				int dlen;

				/*
				 * We need to buffer the data locally for the
				 * last block. This is to not pad the
				 * destination area to a multiple of
				 * UBIFS_BLOCK_SIZE.
				 */
				buff = malloc(UBIFS_BLOCK_SIZE);
				if (!buff) {
					printf("%s: Error, malloc fails!\n",
					       __func__);
					err = -ENOMEM;
					break;
				}

				/* Read block-size into temp buffer */
				ret = read_block(inode, buff, block, dn);
				if (ret) {
					err = ret;
					if (err != -ENOENT) {
						free(buff);
						break;
					}
				}

				if (last_block_size)
					dlen = last_block_size;
				else
					dlen = le32_to_cpu(dn->size);

				/* Now copy required size back to dest */
				memcpy(addr, buff, dlen);

				free(buff);
			} else {
				ret = read_block(inode, addr, block, dn);
				if (ret) {
					err = ret;
					if (err != -ENOENT)
						break;
				}
			}
		}
		if (++i >= UBIFS_BLOCKS_PER_PAGE)
			break;
		block += 1;
		addr += UBIFS_BLOCK_SIZE;
	}
	if (err) {
		if (err == -ENOENT) {
			/* Not found, so it must be a hole */
			dbg_gen("hole");
			goto out_free;
		}
		ubifs_err("cannot read page %lu of inode %lu, error %d",
			  page->index, inode->i_ino, err);
		goto error;
	}

out_free:
	kfree(dn);
out:
	return 0;

error:
	kfree(dn);
	return err;
}
Exemple #2
0
/*Write function in ISP memory management*/
int hmm_store(void *virt, const void *data, unsigned int bytes)
{
	unsigned int ptr;
	struct hmm_buffer_object *bo;
	unsigned int idx, offset, len;
	char *src, *des;
	int ret;

	ptr = (unsigned int)virt;

	bo = hmm_bo_device_search_in_range(&bo_device, ptr);
	ret = hmm_check_bo(bo, ptr);
	if (ret)
		return ret;

	src = (char *)data;
	while (bytes) {
		idx = (ptr - bo->vm_node->start) >> PAGE_SHIFT;
		offset = (ptr - bo->vm_node->start) - (idx << PAGE_SHIFT);

		if (in_atomic())
			des = (char *)kmap_atomic(bo->pages[idx]);
		else
			des = (char *)kmap(bo->pages[idx]);

		if (!des) {
			v4l2_err(&atomisp_dev,
				    "kmap buffer object page failed: "
				    "pg_idx = %d\n", idx);
			return -EINVAL;
		}

		des += offset;

		if ((bytes + offset) >= PAGE_SIZE) {
			len = PAGE_SIZE - offset;
			bytes -= len;
		} else {
			len = bytes;
			bytes = 0;
		}

		ptr += len;

#ifdef USE_SSSE3
		_ssse3_memcpy(des, src, len);
#else
		memcpy(des, src, len);
#endif
		src += len;

		if (in_atomic())
			/*
			 * Note: kunmap_atomic requires return addr from
			 * kmap_atomic, not the page. See linux/highmem.h
			 */
			kunmap_atomic(des - offset);
		else
			kunmap(bo->pages[idx]);
	}

	return 0;
}
Exemple #3
0
/* capfs_readpage()
 *
 * Inside the page structure are "offset", the offset into the file, and
 * the page address, which can be found with "page_address(page)".
 *
 * See fs/nfs/read.c for readpage example.
 */
static int capfs_readpage(struct file *file, struct page *page)
{
	int error = 0;
	struct inode *inode;
	char *buf;
	capfs_off_t offset;
	size_t count = PAGE_SIZE;
	/* update the statistics */
	if(capfs_collect_stats) capfs_vfs_stat.readpage++;   
	PENTRY;

	/* from generic_readpage() */
	get_page(page);
	/* from brw_page() */
	ClearPageUptodate(page);
	ClearPageError(page);

	/* this should help readpage work correctly for big mem machines */
	buf = (char *)kmap(page);

	offset = ((loff_t)page->index) << PAGE_CACHE_SHIFT;
#if 0
	/* THIS IS WHAT I HAD BEFORE */
	offset = pgoff2loff(page->index);
#endif

	inode = file->f_dentry->d_inode;

	/* Added by Alan Rainey 9-10-2003 */
	if(strcmp(file->f_dentry->d_name.name, (char *)(strrchr(CAPFS_I(inode)->name, '/')+1)))
	{
		if ((error = capfs_inode_getattr(file->f_dentry)) < 0) {
			put_page(page);
			PEXIT;
			return error;
		}
	}

	memset(buf, 0, count);

	PDEBUG(D_FILE, "capfs_readpage called for %s (%ld), offset %ld, size %ld\n",
			CAPFS_I(inode)->name, (unsigned long) CAPFS_I(inode)->handle,
	(long) offset, (long) count);

	error = ll_capfs_file_read(CAPFS_I(inode), buf, count, &offset, 1);
	if (error <= 0) 
	{
		SetPageError(page);
	}
	else
	{
		SetPageUptodate(page);
		ClearPageError(page);
	}
	flush_dcache_page(page);
	kunmap(page);
	unlock_page(page);
	put_page(page); 

	PEXIT;
	return error;
}
Exemple #4
0
/*
 * Returns a pointer to a buffer containing at least LEN bytes of
 * filesystem starting at byte offset OFFSET into the filesystem.
 */
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
	struct page *pages[BLKS_PER_BUF];
	unsigned i, blocknr, buffer;
	unsigned long devsize;
	char *data;

	if (!len)
		return NULL;
	blocknr = offset >> PAGE_CACHE_SHIFT;
	offset &= PAGE_CACHE_SIZE - 1;

	/* Check if an existing buffer already has the data.. */
	for (i = 0; i < READ_BUFFERS; i++) {
		unsigned int blk_offset;

		if (buffer_dev[i] != sb)
			continue;
		if (blocknr < buffer_blocknr[i])
			continue;
		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
		blk_offset += offset;
		if (blk_offset + len > BUFFER_SIZE)
			continue;
		return read_buffers[i] + blk_offset;
	}

	devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;

	/* Ok, read in BLKS_PER_BUF pages completely first. */
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = NULL;

		if (blocknr + i < devsize) {
			page = read_mapping_page_async(mapping, blocknr + i,
									NULL);
			/* synchronous error? */
			if (IS_ERR(page))
				page = NULL;
		}
		pages[i] = page;
	}

	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			wait_on_page_locked(page);
			if (!PageUptodate(page)) {
				/* asynchronous error */
				page_cache_release(page);
				pages[i] = NULL;
			}
		}
	}

	buffer = next_buffer;
	next_buffer = NEXT_BUFFER(buffer);
	buffer_blocknr[buffer] = blocknr;
	buffer_dev[buffer] = sb;

	data = read_buffers[buffer];
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
			kunmap(page);
			page_cache_release(page);
		} else
			memset(data, 0, PAGE_CACHE_SIZE);
		data += PAGE_CACHE_SIZE;
	}
	return read_buffers[buffer] + offset;
}
Exemple #5
0
/*
 * This does the "real" work of the write. The generic routine has
 * allocated the page, locked it, done all the page alignment stuff
 * calculations etc. Now we should just copy the data from user
 * space and write it back to the real medium..
 *
 * If the writer ends up delaying the write, the writer needs to
 * increment the page use counts until he is done with the page.
 */
static int nfs_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to)
{
	kmap(page);
	return nfs_flush_incompatible(file, page);
}
static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	size_t skip, copy, left, wanted;
	const struct iovec *iov;
	char __user *buf;
	void *kaddr, *from;

	if (unlikely(bytes > i->count))
		bytes = i->count;

	if (unlikely(!bytes))
		return 0;

	might_fault();
	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
	buf = iov->iov_base + skip;
	copy = min(bytes, iov->iov_len - skip);

	if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
		kaddr = kmap_atomic(page);
		from = kaddr + offset;

		/* first chunk, usually the only one */
		left = copyout(buf, from, copy);
		copy -= left;
		skip += copy;
		from += copy;
		bytes -= copy;

		while (unlikely(!left && bytes)) {
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = copyout(buf, from, copy);
			copy -= left;
			skip = copy;
			from += copy;
			bytes -= copy;
		}
		if (likely(!bytes)) {
			kunmap_atomic(kaddr);
			goto done;
		}
		offset = from - kaddr;
		buf += copy;
		kunmap_atomic(kaddr);
		copy = min(bytes, iov->iov_len - skip);
	}
	/* Too bad - revert to non-atomic kmap */

	kaddr = kmap(page);
	from = kaddr + offset;
	left = copyout(buf, from, copy);
	copy -= left;
	skip += copy;
	from += copy;
	bytes -= copy;
	while (unlikely(!left && bytes)) {
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = copyout(buf, from, copy);
		copy -= left;
		skip = copy;
		from += copy;
		bytes -= copy;
	}
	kunmap(page);

done:
	if (skip == iov->iov_len) {
		iov++;
		skip = 0;
	}
	i->count -= wanted - bytes;
	i->nr_segs -= iov - i->iov;
	i->iov = iov;
	i->iov_offset = skip;
	return wanted - bytes;
}
Exemple #7
0
Extent*
cchain(uchar *buf, ulong offset, int len, Extent **tail)
{
	int l;
	Page *p;
	KMap *k;
	Extent *e, *start, **t;

	start = 0;
	*tail = 0;
	t = &start;
	while(len) {
		e = extentalloc();
		if(e == 0)
			break;

		p = auxpage();
		if(p == 0) {
			extentfree(e);
			break;
		}
		l = len;
		if(l > BY2PG)
			l = BY2PG;

		e->cache = p;
		e->start = offset;
		e->len = l;

		qlock(&cache);
		e->bid = cache.pgno;
		cache.pgno += BY2PG;
		/* wrap the counter; low bits are unused by pghash but checked by lookpage */
		if((cache.pgno & ~(BY2PG-1)) == 0){
			if(cache.pgno == BY2PG-1){
				print("cache wrapped\n");
				cache.pgno = 0;
			}else
				cache.pgno++;
		}
		qunlock(&cache);

		p->daddr = e->bid;
		k = kmap(p);
		if(waserror()) {		/* buf may be virtual */
			kunmap(k);
			nexterror();
		}
		memmove((void*)VA(k), buf, l);
		poperror();
		kunmap(k);

		cachepage(p, &fscache);
		putpage(p);

		buf += l;
		offset += l;
		len -= l;

		*t = e;
		*tail = e;
		t = &e->next;
	}

	return start;
}
Exemple #8
0
/*H:010
 * We need to set up the Switcher at a high virtual address.  Remember the
 * Switcher is a few hundred bytes of assembler code which actually changes the
 * CPU to run the Guest, and then changes back to the Host when a trap or
 * interrupt happens.
 *
 * The Switcher code must be at the same virtual address in the Guest as the
 * Host since it will be running as the switchover occurs.
 *
 * Trying to map memory at a particular address is an unusual thing to do, so
 * it's not a simple one-liner.
 */
static __init int map_switcher(void)
{
	int i, err;

	/*
	 * Map the Switcher in to high memory.
	 *
	 * It turns out that if we choose the address 0xFFC00000 (4MB under the
	 * top virtual address), it makes setting up the page tables really
	 * easy.
	 */

	/* We assume Switcher text fits into a single page. */
	if (end_switcher_text - start_switcher_text > PAGE_SIZE) {
		printk(KERN_ERR "lguest: switcher text too large (%zu)\n",
		       end_switcher_text - start_switcher_text);
		return -EINVAL;
	}

	/*
	 * We allocate an array of struct page pointers.  map_vm_area() wants
	 * this, rather than just an array of pages.
	 */
	lg_switcher_pages = kmalloc(sizeof(lg_switcher_pages[0])
				    * TOTAL_SWITCHER_PAGES,
				    GFP_KERNEL);
	if (!lg_switcher_pages) {
		err = -ENOMEM;
		goto out;
	}

	/*
	 * Now we actually allocate the pages.  The Guest will see these pages,
	 * so we make sure they're zeroed.
	 */
	for (i = 0; i < TOTAL_SWITCHER_PAGES; i++) {
		lg_switcher_pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
		if (!lg_switcher_pages[i]) {
			err = -ENOMEM;
			goto free_some_pages;
		}
	}

	/*
	 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
	 * It goes in the first page, which we map in momentarily.
	 */
	memcpy(kmap(lg_switcher_pages[0]), start_switcher_text,
	       end_switcher_text - start_switcher_text);
	kunmap(lg_switcher_pages[0]);

	/*
	 * We place the Switcher underneath the fixmap area, which is the
	 * highest virtual address we can get.  This is important, since we
	 * tell the Guest it can't access this memory, so we want its ceiling
	 * as high as possible.
	 */
	switcher_addr = FIXADDR_START - TOTAL_SWITCHER_PAGES*PAGE_SIZE;

	/*
	 * Now we reserve the "virtual memory area"s we want.  We might
	 * not get them in theory, but in practice it's worked so far.
	 *
	 * We want the switcher text to be read-only and executable, and
	 * the stacks to be read-write and non-executable.
	 */
	switcher_text_vma = __get_vm_area(PAGE_SIZE, VM_ALLOC|VM_NO_GUARD,
					  switcher_addr,
					  switcher_addr + PAGE_SIZE);

	if (!switcher_text_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_pages;
	}

	switcher_stacks_vma = __get_vm_area(SWITCHER_STACK_PAGES * PAGE_SIZE,
					    VM_ALLOC|VM_NO_GUARD,
					    switcher_addr + PAGE_SIZE,
					    switcher_addr + TOTAL_SWITCHER_PAGES * PAGE_SIZE);
	if (!switcher_stacks_vma) {
		err = -ENOMEM;
		printk("lguest: could not map switcher pages high\n");
		goto free_text_vma;
	}

	/*
	 * This code actually sets up the pages we've allocated to appear at
	 * switcher_addr.  map_vm_area() takes the vma we allocated above, the
	 * kind of pages we're mapping (kernel text pages and kernel writable
	 * pages respectively), and a pointer to our array of struct pages.
	 */
	err = map_vm_area(switcher_text_vma, PAGE_KERNEL_RX, lg_switcher_pages);
	if (err) {
		printk("lguest: text map_vm_area failed: %i\n", err);
		goto free_vmas;
	}

	err = map_vm_area(switcher_stacks_vma, PAGE_KERNEL,
			  lg_switcher_pages + SWITCHER_TEXT_PAGES);
	if (err) {
		printk("lguest: stacks map_vm_area failed: %i\n", err);
		goto free_vmas;
	}

	/*
	 * Now the Switcher is mapped at the right address, we can't fail!
	 */
	printk(KERN_INFO "lguest: mapped switcher at %p\n",
	       switcher_text_vma->addr);
	/* And we succeeded... */
	return 0;

free_vmas:
	/* Undoes map_vm_area and __get_vm_area */
	vunmap(switcher_stacks_vma->addr);
free_text_vma:
	vunmap(switcher_text_vma->addr);
free_pages:
	i = TOTAL_SWITCHER_PAGES;
free_some_pages:
	for (--i; i >= 0; i--)
		__free_pages(lg_switcher_pages[i], 0);
	kfree(lg_switcher_pages);
out:
	return err;
}
/* for every page of file: read page, cut part of extent pointing to this page,
   put data of page tree by tail item */
int extent2tail(struct file * file, struct unix_file_info *uf_info)
{
	int result;
	struct inode *inode;
	struct page *page;
	unsigned long num_pages, i;
	unsigned long start_page;
	reiser4_key from;
	reiser4_key to;
	unsigned count;
	__u64 offset;

	assert("nikita-3362", ea_obtained(uf_info));
	inode = unix_file_info_to_inode(uf_info);
	assert("nikita-3412", !IS_RDONLY(inode));
	assert("vs-1649", uf_info->container != UF_CONTAINER_TAILS);
	assert("", !reiser4_inode_get_flag(inode, REISER4_PART_IN_CONV));

	offset = 0;
	if (reiser4_inode_get_flag(inode, REISER4_PART_MIXED)) {
		/*
		 * file is marked on disk as there was a conversion which did
		 * not complete due to either crash or some error. Find which
		 * offset tail conversion stopped at
		 */
		result = find_start(inode, EXTENT_POINTER_ID, &offset);
		if (result == -ENOENT) {
			/* no extent found, everything is converted */
			uf_info->container = UF_CONTAINER_TAILS;
			complete_conversion(inode);
			return 0;
		} else if (result != 0)
			/* some other error */
			return result;
	}

	reiser4_inode_set_flag(inode, REISER4_PART_IN_CONV);

	/* number of pages in the file */
	num_pages =
	    (inode->i_size + - offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	start_page = offset >> PAGE_CACHE_SHIFT;

	inode_file_plugin(inode)->key_by_inode(inode, offset, &from);
	to = from;

	result = 0;
	for (i = 0; i < num_pages; i++) {
		__u64 start_byte;

		result = reserve_extent2tail_iteration(inode);
		if (result != 0)
			break;
		if (i == 0 && offset == 0) {
			reiser4_inode_set_flag(inode, REISER4_PART_MIXED);
			reiser4_update_sd(inode);
		}

		page = read_mapping_page(inode->i_mapping,
					 (unsigned)(i + start_page), NULL);
		if (IS_ERR(page)) {
			result = PTR_ERR(page);
			break;
		}

		wait_on_page_locked(page);

		if (!PageUptodate(page)) {
			page_cache_release(page);
			result = RETERR(-EIO);
			break;
		}

		/* cut part of file we have read */
		start_byte = (__u64) ((i + start_page) << PAGE_CACHE_SHIFT);
		set_key_offset(&from, start_byte);
		set_key_offset(&to, start_byte + PAGE_CACHE_SIZE - 1);
		/*
		 * reiser4_cut_tree_object() returns -E_REPEAT to allow atom
		 * commits during over-long truncates. But
		 * extent->tail conversion should be performed in one
		 * transaction.
		 */
		result = reiser4_cut_tree(reiser4_tree_by_inode(inode), &from,
					  &to, inode, 0);

		if (result) {
			page_cache_release(page);
			break;
		}

		/* put page data into tree via tail_write */
		count = PAGE_CACHE_SIZE;
		if ((i == (num_pages - 1)) &&
		    (inode->i_size & ~PAGE_CACHE_MASK))
			/* last page can be incompleted */
			count = (inode->i_size & ~PAGE_CACHE_MASK);
		while (count) {
			loff_t pos = start_byte;

			assert("edward-1537",
			       file != NULL && file->f_dentry != NULL);
			assert("edward-1538",
			       file->f_dentry->d_inode == inode);

			result = reiser4_write_tail(file, inode,
						    (char __user *)kmap(page),
						    count, &pos);
			reiser4_free_file_fsdata(file);
			if (result <= 0) {
				warning("", "reiser4_write_tail failed");
				page_cache_release(page);
				reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV);
				return result;
			}
			count -= result;
		}

		/* release page */
		lock_page(page);
		/* page is already detached from jnode and mapping. */
		assert("vs-1086", page->mapping == NULL);
		assert("nikita-2690",
		       (!PagePrivate(page) && jprivate(page) == 0));
		/* waiting for writeback completion with page lock held is
		 * perfectly valid. */
		wait_on_page_writeback(page);
		reiser4_drop_page(page);
		/* release reference taken by read_cache_page() above */
		page_cache_release(page);

		drop_exclusive_access(uf_info);
		/*
		 * throttle the conversion.
		 * FIXME-EDWARD: Calculate and pass the precise number
		 * of pages that was dirtied
		 */
		reiser4_throttle_write(inode, 1);
		get_exclusive_access(uf_info);
		/*
		 * nobody is allowed to complete conversion but a process which
		 * started it
		 */
		assert("", reiser4_inode_get_flag(inode, REISER4_PART_MIXED));
	}

	reiser4_inode_clr_flag(inode, REISER4_PART_IN_CONV);

	if (i == num_pages) {
		/* file is converted to formatted items */
		assert("vs-1698", reiser4_inode_get_flag(inode,
							 REISER4_PART_MIXED));
		assert("vs-1260",
		       inode_has_no_jnodes(reiser4_inode_data(inode)));

		uf_info->container = UF_CONTAINER_TAILS;
		complete_conversion(inode);
		return 0;
	}
	/*
	 * conversion is not complete. Inode was already marked as
	 * REISER4_PART_MIXED and stat-data were updated at the first
	 * iteration of the loop above.
	 */
	warning("nikita-2282",
		"Partial conversion of %llu: %lu of %lu: %i",
		(unsigned long long)get_inode_oid(inode), i,
		num_pages, result);

	/* this flag should be cleared, otherwise get_exclusive_access_careful()
	   will fall into infinite loop */
	assert("edward-1550", !reiser4_inode_get_flag(inode,
						      REISER4_PART_IN_CONV));
	return result;
}
Exemple #10
0
struct dentry *f2fs_get_parent(struct dentry *child)
{
	struct qstr dotdot = {.len = 2, .name = ".."};
	unsigned long ino = f2fs_inode_by_name(child->d_inode, &dotdot);
	if (!ino)
		return ERR_PTR(-ENOENT);
	return d_obtain_alias(f2fs_iget(child->d_inode->i_sb, ino));
}

static int __recover_dot_dentries(struct inode *dir, nid_t pino)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct qstr dot = {.len = 1, .name = "."};
	struct qstr dotdot = {.len = 2, .name = ".."};
	struct f2fs_dir_entry *de;
	struct page *page;
	int err = 0;

	f2fs_lock_op(sbi);

	de = f2fs_find_entry(dir, &dot, &page);
	if (de) {
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
	} else {
		err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
		if (err)
			goto out;
	}

	de = f2fs_find_entry(dir, &dotdot, &page);
	if (de) {
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
	} else {
		err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
	}
out:
	if (!err) {
		clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
		mark_inode_dirty(dir);
	}

	f2fs_unlock_op(sbi);
	return err;
}

static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
					struct nameidata *nd)
{
	struct inode *inode = NULL;
	struct f2fs_dir_entry *de;
	struct page *page;
	nid_t ino;
	int err = 0;

	if (dentry->d_name.len > F2FS_NAME_LEN)
		return ERR_PTR(-ENAMETOOLONG);

	de = f2fs_find_entry(dir, &dentry->d_name, &page);
	if (!de)
		return d_splice_alias(inode, dentry);

	ino = le32_to_cpu(de->ino);
	f2fs_dentry_kunmap(dir, page);
	f2fs_put_page(page, 0);

	inode = f2fs_iget(dir->i_sb, ino);
	if (IS_ERR(inode))
		return ERR_CAST(inode);

	if (f2fs_has_inline_dots(inode)) {
		err = __recover_dot_dentries(inode, dir->i_ino);
		if (err)
			goto err_out;
	}
	return d_splice_alias(inode, dentry);

err_out:
	iget_failed(inode);
	return ERR_PTR(err);
}

static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode = dentry->d_inode;
	struct f2fs_dir_entry *de;
	struct page *page;
	int err = -ENOENT;

	trace_f2fs_unlink_enter(dir, dentry);
	f2fs_balance_fs(sbi);

	de = f2fs_find_entry(dir, &dentry->d_name, &page);
	if (!de)
		goto fail;

	f2fs_lock_op(sbi);
	err = acquire_orphan_inode(sbi);
	if (err) {
		f2fs_unlock_op(sbi);
		f2fs_dentry_kunmap(dir, page);
		f2fs_put_page(page, 0);
		goto fail;
	}
	f2fs_delete_entry(de, page, dir, inode);
	f2fs_unlock_op(sbi);

	/* In order to evict this inode, we set it dirty */
	mark_inode_dirty(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
fail:
	trace_f2fs_unlink_exit(inode, err);
	return err;
}

static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
	struct page *page;

	page = page_follow_link_light(dentry, nd);
	if (IS_ERR(page))
		return page;

	/* this is broken symlink case */
	if (*nd_get_link(nd) == 0) {
		kunmap(page);
		page_cache_release(page);
		return ERR_PTR(-ENOENT);
	}
	return page;
}

static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
					const char *symname)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	size_t len = strlen(symname);
	size_t p_len;
	char *p_str;
	struct f2fs_str disk_link = FSTR_INIT(NULL, 0);
	struct f2fs_encrypted_symlink_data *sd = NULL;
	int err;

	if (len > dir->i_sb->s_blocksize)
		return -ENAMETOOLONG;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFLNK | S_IRWXUGO);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	if (f2fs_encrypted_inode(inode))
		inode->i_op = &f2fs_encrypted_symlink_inode_operations;
	else
		inode->i_op = &f2fs_symlink_inode_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);
	alloc_nid_done(sbi, inode->i_ino);

	if (f2fs_encrypted_inode(dir)) {
		struct qstr istr = QSTR_INIT(symname, len);

		err = f2fs_get_encryption_info(inode);
		if (err)
			goto err_out;

		err = f2fs_fname_crypto_alloc_buffer(inode, len, &disk_link);
		if (err)
			goto err_out;

		err = f2fs_fname_usr_to_disk(inode, &istr, &disk_link);
		if (err < 0)
			goto err_out;

		p_len = encrypted_symlink_data_len(disk_link.len) + 1;

		if (p_len > dir->i_sb->s_blocksize) {
			err = -ENAMETOOLONG;
			goto err_out;
		}

		sd = kzalloc(p_len, GFP_NOFS);
		if (!sd) {
			err = -ENOMEM;
			goto err_out;
		}
		memcpy(sd->encrypted_path, disk_link.name, disk_link.len);
		sd->len = cpu_to_le16(disk_link.len);
		p_str = (char *)sd;
	} else {
		p_len = len + 1;
		p_str = (char *)symname;
	}

	err = page_symlink(inode, p_str, p_len);

err_out:
	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	/*
	 * Let's flush symlink data in order to avoid broken symlink as much as
	 * possible. Nevertheless, fsyncing is the best way, but there is no
	 * way to get a file descriptor in order to flush that.
	 *
	 * Note that, it needs to do dir->fsync to make this recoverable.
	 * If the symlink path is stored into inline_data, there is no
	 * performance regression.
	 */
	if (!err)
		filemap_write_and_wait_range(inode->i_mapping, 0, p_len - 1);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);

	kfree(sd);
	f2fs_fname_crypto_free_buffer(&disk_link);
	return err;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, S_IFDIR | mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &f2fs_dir_inode_operations;
	inode->i_fop = &f2fs_dir_operations;
	inode->i_mapping->a_ops = &f2fs_dblock_aops;
	mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_HIGH_ZERO);

	set_inode_flag(F2FS_I(inode), FI_INC_LINK);
	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out_fail;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;

out_fail:
	clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rmdir(struct inode *dir, struct dentry *dentry)
{
	struct inode *inode = dentry->d_inode;
	if (f2fs_empty_dir(inode))
		return f2fs_unlink(dir, dentry);
	return -ENOTEMPTY;
}

static int f2fs_mknod(struct inode *dir, struct dentry *dentry,
				umode_t mode, dev_t rdev)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
	struct inode *inode;
	int err = 0;

	if (!new_valid_dev(rdev))
		return -EINVAL;

	f2fs_balance_fs(sbi);

	inode = f2fs_new_inode(dir, mode);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	init_special_inode(inode, inode->i_mode, rdev);
	inode->i_op = &f2fs_special_inode_operations;

	f2fs_lock_op(sbi);
	err = f2fs_add_link(dentry, inode);
	if (err)
		goto out;
	f2fs_unlock_op(sbi);

	alloc_nid_done(sbi, inode->i_ino);

	d_instantiate(dentry, inode);
	unlock_new_inode(inode);

	if (IS_DIRSYNC(dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;
out:
	handle_failed_inode(inode);
	return err;
}

static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
			struct inode *new_dir, struct dentry *new_dentry)
{
	struct f2fs_sb_info *sbi = F2FS_I_SB(old_dir);
	struct inode *old_inode = old_dentry->d_inode;
	struct inode *new_inode = new_dentry->d_inode;
	struct page *old_dir_page;
	struct page *old_page, *new_page;
	struct f2fs_dir_entry *old_dir_entry = NULL;
	struct f2fs_dir_entry *old_entry;
	struct f2fs_dir_entry *new_entry;
	int err = -ENOENT;

	if ((old_dir != new_dir) && f2fs_encrypted_inode(new_dir) &&
		!f2fs_is_child_context_consistent_with_parent(new_dir,
							old_inode)) {
		err = -EPERM;
		goto out;
	}

	f2fs_balance_fs(sbi);

	old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
	if (!old_entry)
		goto out;

	if (S_ISDIR(old_inode->i_mode)) {
		err = -EIO;
		old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
		if (!old_dir_entry)
			goto out_old;
	}

	if (new_inode) {

		err = -ENOTEMPTY;
		if (old_dir_entry && !f2fs_empty_dir(new_inode))
			goto out_dir;

		err = -ENOENT;
		new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
						&new_page);
		if (!new_entry)
			goto out_dir;

		f2fs_lock_op(sbi);

		err = acquire_orphan_inode(sbi);
		if (err)
			goto put_out_dir;

		if (update_dent_inode(old_inode, new_inode,
						&new_dentry->d_name)) {
			release_orphan_inode(sbi);
			goto put_out_dir;
		}

		f2fs_set_link(new_dir, new_entry, new_page, old_inode);

		new_inode->i_ctime = CURRENT_TIME;
		down_write(&F2FS_I(new_inode)->i_sem);
		if (old_dir_entry)
			drop_nlink(new_inode);
		drop_nlink(new_inode);
		up_write(&F2FS_I(new_inode)->i_sem);

		mark_inode_dirty(new_inode);

		if (!new_inode->i_nlink)
			add_orphan_inode(sbi, new_inode->i_ino);
		else
			release_orphan_inode(sbi);

		update_inode_page(old_inode);
		update_inode_page(new_inode);
	} else {
		f2fs_lock_op(sbi);

		err = f2fs_add_link(new_dentry, old_inode);
		if (err) {
			f2fs_unlock_op(sbi);
			goto out_dir;
		}

		if (old_dir_entry) {
			inc_nlink(new_dir);
			update_inode_page(new_dir);
		}
	}

	down_write(&F2FS_I(old_inode)->i_sem);
	file_lost_pino(old_inode);
	if (new_inode && file_enc_name(new_inode))
		file_set_enc_name(old_inode);
	up_write(&F2FS_I(old_inode)->i_sem);

	old_inode->i_ctime = CURRENT_TIME;
	mark_inode_dirty(old_inode);

	f2fs_delete_entry(old_entry, old_page, old_dir, NULL);

	if (old_dir_entry) {
		if (old_dir != new_dir) {
			f2fs_set_link(old_inode, old_dir_entry,
						old_dir_page, new_dir);
			update_inode_page(old_inode);
		} else {
			f2fs_dentry_kunmap(old_inode, old_dir_page);
			f2fs_put_page(old_dir_page, 0);
		}
		drop_nlink(old_dir);
		mark_inode_dirty(old_dir);
		update_inode_page(old_dir);
	}

	f2fs_unlock_op(sbi);

	if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
		f2fs_sync_fs(sbi->sb, 1);
	return 0;

put_out_dir:
	f2fs_unlock_op(sbi);
	f2fs_dentry_kunmap(new_dir, new_page);
	f2fs_put_page(new_page, 0);
out_dir:
	if (old_dir_entry) {
		f2fs_dentry_kunmap(old_inode, old_dir_page);
		f2fs_put_page(old_dir_page, 0);
	}
out_old:
	f2fs_dentry_kunmap(old_dir, old_page);
	f2fs_put_page(old_page, 0);
out:
	return err;
}

#ifdef CONFIG_F2FS_FS_ENCRYPTION
static void *f2fs_encrypted_follow_link(struct dentry *dentry,
						struct nameidata *nd)
{
	struct page *cpage = NULL;
	char *caddr, *paddr = NULL;
	struct f2fs_str cstr;
	struct f2fs_str pstr = FSTR_INIT(NULL, 0);
	struct inode *inode = dentry->d_inode;
	struct f2fs_encrypted_symlink_data *sd;
	loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
	u32 max_size = inode->i_sb->s_blocksize;
	int res;

	res = f2fs_get_encryption_info(inode);
	if (res)
		return ERR_PTR(res);

	cpage = read_mapping_page(inode->i_mapping, 0, NULL);
	if (IS_ERR(cpage))
		return cpage;
	caddr = kmap(cpage);
	caddr[size] = 0;

	/* Symlink is encrypted */
	sd = (struct f2fs_encrypted_symlink_data *)caddr;
	cstr.name = sd->encrypted_path;
	cstr.len = le16_to_cpu(sd->len);

	/* this is broken symlink case */
	if (cstr.name[0] == 0 && cstr.len == 0) {
		res = -ENOENT;
		goto errout;
	}

	if ((cstr.len + sizeof(struct f2fs_encrypted_symlink_data) - 1) >
								max_size) {
		/* Symlink data on the disk is corrupted */
		res = -EIO;
		goto errout;
	}
	res = f2fs_fname_crypto_alloc_buffer(inode, cstr.len, &pstr);
	if (res)
		goto errout;

	res = f2fs_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
	if (res < 0)
		goto errout;

	paddr = pstr.name;

	/* Null-terminate the name */
	paddr[res] = '\0';
	nd_set_link(nd, paddr);

	kunmap(cpage);
	page_cache_release(cpage);
	return NULL;
errout:
	f2fs_fname_crypto_free_buffer(&pstr);
	kunmap(cpage);
	page_cache_release(cpage);
	return ERR_PTR(res);
}

void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
		void *cookie)
{
	char *s = nd_get_link(nd);
	if (!IS_ERR(s))
		kfree(s);
}

const struct inode_operations f2fs_encrypted_symlink_inode_operations = {
	.readlink       = generic_readlink,
	.follow_link    = f2fs_encrypted_follow_link,
	.put_link       = kfree_put_link,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
};
#endif

const struct inode_operations f2fs_dir_inode_operations = {
	.create		= f2fs_create,
	.lookup		= f2fs_lookup,
	.link		= f2fs_link,
	.unlink		= f2fs_unlink,
	.symlink	= f2fs_symlink,
	.mkdir		= f2fs_mkdir,
	.rmdir		= f2fs_rmdir,
	.mknod		= f2fs_mknod,
	.rename		= f2fs_rename,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_symlink_inode_operations = {
	.readlink       = generic_readlink,
	.follow_link    = f2fs_follow_link,
	.put_link       = page_put_link,
	.getattr	= f2fs_getattr,
	.setattr	= f2fs_setattr,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr	= generic_setxattr,
	.getxattr	= generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr	= generic_removexattr,
#endif
};

const struct inode_operations f2fs_special_inode_operations = {
	.getattr	= f2fs_getattr,
	.setattr        = f2fs_setattr,
	.get_acl	= f2fs_get_acl,
#ifdef CONFIG_F2FS_FS_XATTR
	.setxattr       = generic_setxattr,
	.getxattr       = generic_getxattr,
	.listxattr	= f2fs_listxattr,
	.removexattr    = generic_removexattr,
#endif
};
Exemple #11
0
static int udf_adinicb_prepare_write(struct file *file, struct page *page,
				     unsigned offset, unsigned to)
{
	kmap(page);
	return 0;
}
Exemple #12
0
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
	/* ignore errors, just check how much was sucessfully transfered */
	while (len) {
		int bytes, ret, offset;
		void *maddr;
		unsigned long paddr;
		int xip = 0;
#ifdef CONFIG_CRAMFS_XIP_DEBUGGABLE
		if (xip_enable_debug && !write) {
			vma = find_extend_vma(mm, addr);
			if (vma && (vma->vm_flags & VM_XIP))
				xip = find_xip_untouched_entry(mm, addr, &paddr);
		}
#endif
		if (xip) {
			maddr = ioremap(paddr, PAGE_SIZE);
			if (!maddr) 
				break;
			page = NULL;
		} else {
			ret = get_user_pages(tsk, mm, addr, 1,
					     write, 1, &page, &vma);
			if (ret <= 0)
				break;
			maddr = kmap(page);
		}
		
		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		
		if (xip) 
			iounmap(maddr);
		else {
			kunmap(page);
			page_cache_release(page);
		}

		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);
	
	return buf - old_buf;
}
Exemple #13
0
static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
{
	struct mock_dmabuf *mock = to_mock(dma_buf);

	return kmap(mock->pages[page_num]);
}
Exemple #14
0
/* Map a non page aligned fbchain into user space.  This
 * requires creating a iovec and populating it correctly */
static int map_fb_to_user_sg(struct file *filep, struct pme_fbchain *buffers,
				unsigned long *user_addr, size_t *size)
{
	void *data;
	size_t data_size;
	struct iovec *vect;
	int vector_size, ret, list_count, index = 0;
	unsigned long paddr;
	struct vm_area_struct *vma;
	struct pme_fb_vma *mem_node, *iovec_mem_node;
	list_count = pme_fbchain_num(buffers);

	vector_size = sizeof(struct iovec) * list_count;
	iovec_mem_node = fb_vma_create(NULL, fb_phys_mapped, 1, list_count,
			vector_size, 0);
	if (!iovec_mem_node)
		return -ENOMEM;

	/* The space for the iovec is allocate as whole pages and
	 * a kernel mapping needs to be created in case they were
	 * allocated from high mem */
	vect = kmap(iovec_mem_node->iovec_pages);
	/* Create a mem node to keep track of the fbchain
	 * Otherwise, we won't know when to release the freebuff list */
	mem_node = fb_vma_create(buffers, fb_phys_mapped, 0, 0, 0, 0);
	if (!mem_node) {
		fb_vma_free(iovec_mem_node);
		kunmap(iovec_mem_node->iovec_pages);
		return -ENOMEM;
	}
	/* For each freebuff, map it to user space, storing the
	 * userspace data in the iovec */
	data = pme_fbchain_current(buffers);

	down_write(&current->mm->mmap_sem);

	while (data) {
		data_size = pme_fbchain_current_bufflen(buffers);
		vect[index].iov_base = (void *) do_mmap(filep, 0,
							data_size +
							offset_in_page(data),
							PROT_READ | PROT_WRITE,
							MAP_PRIVATE,
							virt_to_phys(data) &
							PAGE_MASK);
		ret = check_mmap_result(vect[index].iov_base);
		if (ret)
			/*  Need to unmap any previous sucesses */
			goto err;

		vma = find_vma(current->mm,
				(unsigned long) vect[index].iov_base);

		vma->vm_private_data = mem_node;
		atomic_inc(&mem_node->ref_count);

		vect[index].iov_base += offset_in_page(data);
		vect[index].iov_len = data_size;
		++index;
		data = pme_fbchain_next(buffers);
	}

	/* Now map the iovec into user spcae */
	paddr = page_to_pfn(iovec_mem_node->iovec_pages) << PAGE_SHIFT;
	*user_addr = (unsigned long) do_mmap(filep, 0,
					     vector_size +
					     offset_in_page(paddr),
					     PROT_READ |
					     PROT_WRITE, MAP_PRIVATE,
					     paddr & PAGE_MASK);

	ret = check_mmap_result((void *) *user_addr);
	if (ret)
		goto err;

	vma = find_vma(current->mm, (unsigned long) *user_addr);

	vma->vm_private_data = iovec_mem_node;

	up_write(&current->mm->mmap_sem);
	*user_addr += offset_in_page(paddr);
	*size = list_count;
	kunmap(iovec_mem_node->iovec_pages);
	return PME_MEM_SG;
err:
	while (index--)
		do_munmap(current->mm,
			((unsigned long)vect[index].iov_base) & PAGE_MASK,
			 vect[index].iov_len +
			 offset_in_page(vect[index].iov_base));

	up_write(&current->mm->mmap_sem);
	kunmap(iovec_mem_node->iovec_pages);
	return -EINVAL;
}
Exemple #15
0
void *co_os_map(struct co_manager *manager, co_pfn_t pfn)
{
	return kmap(pfn_to_page(pfn));
}
Exemple #16
0
static int do_readpage(struct ubifs_info *c, struct inode *inode, struct page *page)
{
	void *addr;
	int err = 0, i;
	unsigned int block, beyond;
	struct ubifs_data_node *dn;
	loff_t i_size = inode->i_size;

	dbg_gen("ino %lu, pg %lu, i_size %lld",
		inode->i_ino, page->index, i_size);

	addr = kmap(page);

	block = page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT;
	beyond = (i_size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
	if (block >= beyond) {
		/* Reading beyond inode */
		memset(addr, 0, PAGE_CACHE_SIZE);
		goto out;
	}

	dn = kmalloc(UBIFS_MAX_DATA_NODE_SZ, GFP_NOFS);
	if (!dn) {
		err = -ENOMEM;
		goto error;
	}

	i = 0;
	while (1) {
		int ret;

		if (block >= beyond) {
			/* Reading beyond inode */
			err = -ENOENT;
			memset(addr, 0, UBIFS_BLOCK_SIZE);
		} else {
			ret = read_block(inode, addr, block, dn);
			if (ret) {
				err = ret;
				if (err != -ENOENT)
					break;
			} else if (block + 1 == beyond) {
				int dlen = le32_to_cpu(dn->size);
				int ilen = i_size & (UBIFS_BLOCK_SIZE - 1);

				if (ilen && ilen < dlen)
					memset(addr + ilen, 0, dlen - ilen);
			}
		}
		if (++i >= UBIFS_BLOCKS_PER_PAGE)
			break;
		block += 1;
		addr += UBIFS_BLOCK_SIZE;
	}
	if (err) {
		if (err == -ENOENT) {
			/* Not found, so it must be a hole */
			dbg_gen("hole");
			goto out_free;
		}
		ubifs_err("cannot read page %lu of inode %lu, error %d",
			  page->index, inode->i_ino, err);
		goto error;
	}

out_free:
	kfree(dn);
out:
	return 0;

error:
	kfree(dn);
	return err;
}
Exemple #17
0
/*
 * Read a directory, using filldir to fill the dirent memory.
 * smb_proc_readdir does the actual reading from the smb server.
 *
 * The cache code is almost directly taken from ncpfs
 */
static int
smb_readdir(struct file *filp, void *dirent, filldir_t filldir)
{
    struct dentry *dentry = filp->f_path.dentry;
    struct inode *dir = dentry->d_inode;
    struct smb_sb_info *server = server_from_dentry(dentry);
    union  smb_dir_cache *cache = NULL;
    struct smb_cache_control ctl;
    struct page *page = NULL;
    int result;

    ctl.page  = NULL;
    ctl.cache = NULL;

    VERBOSE("reading %s/%s, f_pos=%d\n",
            DENTRY_PATH(dentry),  (int) filp->f_pos);

    result = 0;

    lock_kernel();

    switch ((unsigned int) filp->f_pos) {
    case 0:
        if (filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR) < 0)
            goto out;
        filp->f_pos = 1;
    /* fallthrough */
    case 1:
        if (filldir(dirent, "..", 2, 1, parent_ino(dentry), DT_DIR) < 0)
            goto out;
        filp->f_pos = 2;
    }

    /*
     * Make sure our inode is up-to-date.
     */
    result = smb_revalidate_inode(dentry);
    if (result)
        goto out;


    page = grab_cache_page(&dir->i_data, 0);
    if (!page)
        goto read_really;

    ctl.cache = cache = kmap(page);
    ctl.head  = cache->head;

    if (!PageUptodate(page) || !ctl.head.eof) {
        VERBOSE("%s/%s, page uptodate=%d, eof=%d\n",
                DENTRY_PATH(dentry), PageUptodate(page),ctl.head.eof);
        goto init_cache;
    }

    if (filp->f_pos == 2) {
        if (jiffies - ctl.head.time >= SMB_MAX_AGE(server))
            goto init_cache;

        /*
         * N.B. ncpfs checks mtime of dentry too here, we don't.
         *   1. common smb servers do not update mtime on dir changes
         *   2. it requires an extra smb request
         *      (revalidate has the same timeout as ctl.head.time)
         *
         * Instead smbfs invalidates its own cache on local changes
         * and remote changes are not seen until timeout.
         */
    }

    if (filp->f_pos > ctl.head.end)
        goto finished;

    ctl.fpos = filp->f_pos + (SMB_DIRCACHE_START - 2);
    ctl.ofs  = ctl.fpos / SMB_DIRCACHE_SIZE;
    ctl.idx  = ctl.fpos % SMB_DIRCACHE_SIZE;

    for (;;) {
        if (ctl.ofs != 0) {
            ctl.page = find_lock_page(&dir->i_data, ctl.ofs);
            if (!ctl.page)
                goto invalid_cache;
            ctl.cache = kmap(ctl.page);
            if (!PageUptodate(ctl.page))
                goto invalid_cache;
        }
        while (ctl.idx < SMB_DIRCACHE_SIZE) {
            struct dentry *dent;
            int res;

            dent = smb_dget_fpos(ctl.cache->dentry[ctl.idx],
                                 dentry, filp->f_pos);
            if (!dent)
                goto invalid_cache;

            res = filldir(dirent, dent->d_name.name,
                          dent->d_name.len, filp->f_pos,
                          dent->d_inode->i_ino, DT_UNKNOWN);
            dput(dent);
            if (res)
                goto finished;
            filp->f_pos += 1;
            ctl.idx += 1;
            if (filp->f_pos > ctl.head.end)
                goto finished;
        }
        if (ctl.page) {
            kunmap(ctl.page);
            SetPageUptodate(ctl.page);
            unlock_page(ctl.page);
            page_cache_release(ctl.page);
            ctl.page = NULL;
        }
        ctl.idx  = 0;
        ctl.ofs += 1;
    }
invalid_cache:
    if (ctl.page) {
        kunmap(ctl.page);
        unlock_page(ctl.page);
        page_cache_release(ctl.page);
        ctl.page = NULL;
    }
    ctl.cache = cache;
init_cache:
    smb_invalidate_dircache_entries(dentry);
    ctl.head.time = jiffies;
    ctl.head.eof = 0;
    ctl.fpos = 2;
    ctl.ofs = 0;
    ctl.idx = SMB_DIRCACHE_START;
    ctl.filled = 0;
    ctl.valid  = 1;
read_really:
    result = server->ops->readdir(filp, dirent, filldir, &ctl);
    if (result == -ERESTARTSYS && page)
        ClearPageUptodate(page);
    if (ctl.idx == -1)
        goto invalid_cache;	/* retry */
    ctl.head.end = ctl.fpos - 1;
    ctl.head.eof = ctl.valid;
finished:
    if (page) {
        cache->head = ctl.head;
        kunmap(page);
        if (result != -ERESTARTSYS)
            SetPageUptodate(page);
        unlock_page(page);
        page_cache_release(page);
    }
    if (ctl.page) {
        kunmap(ctl.page);
        SetPageUptodate(ctl.page);
        unlock_page(ctl.page);
        page_cache_release(ctl.page);
    }
out:
    unlock_kernel();
    return result;
}
Exemple #18
0
static void kcdfsd_process_request(void){
  struct list_head * tmp;
  struct kcdfsd_req * req;
  struct page * page;
  struct inode * inode;
  unsigned request;
  
  while (!list_empty (&kcdfsd_req_list)){
    /* Grab the next entry from the beginning of the list */
    tmp = kcdfsd_req_list.next;
    req = list_entry (tmp, struct kcdfsd_req, req_list);
    list_del (tmp);
    page = req->page;
    inode = req->dentry->d_inode;
    request = req->request_type;
    if (!PageLocked(page))
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12))
      PAGE_BUG(page);
#else
      BUG();
#endif

    switch (request){
      case CDDA_REQUEST:
      case CDDA_RAW_REQUEST:
        {
          cd *this_cd = cdfs_info (inode->i_sb);
          char *p;
          track_info *this_track = &(this_cd->track[inode->i_ino]);
          cdfs_cdda_file_read (inode,
                               p = (char *) kmap (page),
                               1 << PAGE_CACHE_SHIFT,
                               (page->index << PAGE_CACHE_SHIFT) +
                               ((this_track->avi) ? this_track->
                                avi_offset : 0),
                               (request == CDDA_RAW_REQUEST));
          if ((this_track->avi) && (this_track->avi_swab)){
              int k;
              for (k=0; k<(1 << PAGE_CACHE_SHIFT); k+=2){
                  char c;
                  c = p[k];
                  p[k] = p[k + 1];
                  p[k + 1] = c;
                }
            }
        }
        break;
      case CDXA_REQUEST:
        cdfs_copy_from_cdXA(inode->i_sb,
                            inode->i_ino,
                            page->index << PAGE_CACHE_SHIFT,
                            (page->index + 1) << PAGE_CACHE_SHIFT,
                            (char *)kmap(page));
        break;
      case CDDATA_REQUEST:
        cdfs_copy_from_cddata(inode->i_sb,
                              inode->i_ino,
                              page->index << PAGE_CACHE_SHIFT,
                              (page->index + 1) << PAGE_CACHE_SHIFT,
                              (char *)kmap(page));
        break;
      case CDHFS_REQUEST:
        cdfs_copy_from_cdhfs(inode->i_sb,
                             inode->i_ino,
                             page->index << PAGE_CACHE_SHIFT,
                             (page->index + 1) << PAGE_CACHE_SHIFT,
                             (char *)kmap(page));
        break;
    }

    SetPageUptodate (page);
    kunmap (page);
    unlock_page (page);
    kfree (req);
  }
}
Exemple #19
0
int
cread(Chan *c, uchar *buf, int len, vlong off)
{
	KMap *k;
	Page *p;
	Mntcache *m;
	Extent *e, **t;
	int o, l, total;
	ulong offset;

	if(off+len > maxcache)
		return 0;

	m = c->mcp;
	if(m == 0)
		return 0;

	qlock(m);
	if(cdev(m, c) == 0) {
		qunlock(m);
		return 0;
	}

	offset = off;
	t = &m->list;
	for(e = *t; e; e = e->next) {
		if(offset >= e->start && offset < e->start+e->len)
			break;
		t = &e->next;
	}

	if(e == 0) {
		qunlock(m);
		return 0;
	}

	total = 0;
	while(len) {
		p = cpage(e);
		if(p == 0) {
			*t = e->next;
			extentfree(e);
			qunlock(m);
			return total;
		}

		o = offset - e->start;
		l = len;
		if(l > e->len-o)
			l = e->len-o;

		k = kmap(p);
		if(waserror()) {
			kunmap(k);
			putpage(p);
			qunlock(m);
			nexterror();
		}

		memmove(buf, (uchar*)VA(k) + o, l);

		poperror();
		kunmap(k);

		putpage(p);

		buf += l;
		len -= l;
		offset += l;
		total += l;
		t = &e->next;
		e = e->next;
		if(e == 0 || e->start != offset)
			break;
	}

	qunlock(m);
	return total;
}
Exemple #20
0
/**
 * get_pageset1_load_addresses - generate pbes for conflicting pages
 *
 * We check here that pagedir & pages it points to won't collide
 * with pages where we're going to restore from the loaded pages
 * later.
 *
 * Returns:
 *	Zero on success, one if couldn't find enough pages (shouldn't
 *	happen).
 **/
int toi_get_pageset1_load_addresses(void)
{
	int pfn, highallocd = 0, lowallocd = 0;
	int low_needed = pagedir1.size - get_highmem_size(pagedir1);
	int high_needed = get_highmem_size(pagedir1);
	int low_pages_for_highmem = 0;
	gfp_t flags = GFP_ATOMIC | __GFP_NOWARN | __GFP_HIGHMEM;
	struct page *page, *high_pbe_page = NULL, *last_high_pbe_page = NULL,
		    *low_pbe_page, *last_low_pbe_page = NULL;
	struct pbe **last_high_pbe_ptr = &restore_highmem_pblist,
		   *this_high_pbe = NULL;
	int orig_low_pfn, orig_high_pfn;
	int high_pbes_done = 0, low_pbes_done = 0;
	int low_direct = 0, high_direct = 0, result = 0, i;
	int high_page = 1, high_offset = 0, low_page = 1, low_offset = 0;

	memory_bm_set_iterators(pageset1_map, 3);
	memory_bm_position_reset(pageset1_map);

	memory_bm_set_iterators(pageset1_copy_map, 2);
	memory_bm_position_reset(pageset1_copy_map);

	last_low_pbe_ptr = &restore_pblist;

	/* First, allocate pages for the start of our pbe lists. */
	if (high_needed) {
		high_pbe_page = ___toi_get_nonconflicting_page(1);
		if (!high_pbe_page) {
			result = -ENOMEM;
			goto out;
		}
		this_high_pbe = (struct pbe *) kmap(high_pbe_page);
		memset(this_high_pbe, 0, PAGE_SIZE);
	}

	low_pbe_page = ___toi_get_nonconflicting_page(0);
	if (!low_pbe_page) {
		result = -ENOMEM;
		goto out;
	}
	this_low_pbe = (struct pbe *) page_address(low_pbe_page);

	/*
	 * Next, allocate the number of pages we need.
	 */

	i = low_needed + high_needed;

	do {
		int is_high;

		if (i == low_needed)
			flags &= ~__GFP_HIGHMEM;

		page = toi_alloc_page(30, flags);
		BUG_ON(!page);

		SetPagePageset1Copy(page);
		is_high = PageHighMem(page);

		if (PagePageset1(page)) {
			if (is_high)
				high_direct++;
			else
				low_direct++;
		} else {
			if (is_high)
				highallocd++;
			else
				lowallocd++;
		}
	} while (--i);

	high_needed -= high_direct;
	low_needed -= low_direct;

	/*
	 * Do we need to use some lowmem pages for the copies of highmem
	 * pages?
	 */
	if (high_needed > highallocd) {
		low_pages_for_highmem = high_needed - highallocd;
		high_needed -= low_pages_for_highmem;
		low_needed += low_pages_for_highmem;
	}

	/*
	 * Now generate our pbes (which will be used for the atomic restore),
	 * and free unneeded pages.
	 */
	memory_bm_position_reset(pageset1_copy_map);
	for (pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1); pfn != BM_END_OF_MAP;
			pfn = memory_bm_next_pfn_index(pageset1_copy_map, 1)) {
		int is_high;
		page = pfn_to_page(pfn);
		is_high = PageHighMem(page);

		if (PagePageset1(page))
			continue;

		/* Nope. We're going to use this page. Add a pbe. */
		if (is_high || low_pages_for_highmem) {
			struct page *orig_page;
			high_pbes_done++;
			if (!is_high)
				low_pages_for_highmem--;
			do {
				orig_high_pfn = memory_bm_next_pfn_index(pageset1_map, 1);
				BUG_ON(orig_high_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_high_pfn);
			} while (!PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_high_pbe->orig_address = orig_page;
			this_high_pbe->address = page;
			this_high_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "High pbe %d/%d: %p(%d)=>%p",
					high_page, high_offset, page, orig_high_pfn, orig_page);
			if (last_high_pbe_page != high_pbe_page) {
				*last_high_pbe_ptr =
					(struct pbe *) high_pbe_page;
				if (last_high_pbe_page) {
					kunmap(last_high_pbe_page);
					high_page++;
					high_offset = 0;
				} else
					high_offset++;
				last_high_pbe_page = high_pbe_page;
			} else {
				*last_high_pbe_ptr = this_high_pbe;
				high_offset++;
			}
			last_high_pbe_ptr = &this_high_pbe->next;
			this_high_pbe = get_next_pbe(&high_pbe_page,
					this_high_pbe, 1);
			if (IS_ERR(this_high_pbe)) {
				printk(KERN_INFO
						"This high pbe is an error.\n");
				return -ENOMEM;
			}
		} else {
			struct page *orig_page;
			low_pbes_done++;
			do {
				orig_low_pfn = memory_bm_next_pfn_index(pageset1_map, 2);
				BUG_ON(orig_low_pfn == BM_END_OF_MAP);
				orig_page = pfn_to_page(orig_low_pfn);
			} while (PageHighMem(orig_page) ||
					PagePageset1Copy(orig_page));

			this_low_pbe->orig_address = page_address(orig_page);
			this_low_pbe->address = page_address(page);
			this_low_pbe->next = NULL;
			toi_message(TOI_PAGEDIR, TOI_VERBOSE, 0, "Low pbe %d/%d: %p(%d)=>%p",
					low_page, low_offset, this_low_pbe->orig_address,
					orig_low_pfn, this_low_pbe->address);
			*last_low_pbe_ptr = this_low_pbe;
			last_low_pbe_ptr = &this_low_pbe->next;
			this_low_pbe = get_next_pbe(&low_pbe_page,
					this_low_pbe, 0);
			if (low_pbe_page != last_low_pbe_page) {
				if (last_low_pbe_page) {
					low_page++;
					low_offset = 0;
				}
				last_low_pbe_page = low_pbe_page;
			} else
				low_offset++;
			if (IS_ERR(this_low_pbe)) {
				printk(KERN_INFO "this_low_pbe is an error.\n");
				return -ENOMEM;
			}
		}
	}

	if (high_pbe_page)
		kunmap(high_pbe_page);

	if (last_high_pbe_page != high_pbe_page) {
		if (last_high_pbe_page)
			kunmap(last_high_pbe_page);
		toi__free_page(29, high_pbe_page);
	}

	free_conflicting_pages();

out:
	memory_bm_set_iterators(pageset1_map, 1);
	memory_bm_set_iterators(pageset1_copy_map, 1);
	return result;
}
Exemple #21
0
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
			    struct iovec *to, int len)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;
	struct sk_buff *frag_iter;

	trace_skb_copy_datagram_iovec(skb, len);

	
	if (copy > 0) {
		if (copy > len)
			copy = len;
		if (memcpy_toiovec(to, skb->data + offset, copy))
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
	}

	
	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

		WARN_ON(start > offset + len);

		end = start + skb_frag_size(frag);
		if ((copy = end - offset) > 0) {
			int err;
			u8  *vaddr;
			struct page *page = skb_frag_page(frag);

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			err = memcpy_toiovec(to, vaddr + frag->page_offset +
					     offset - start, copy);
			kunmap(page);
			if (err)
				goto fault;
			if (!(len -= copy))
				return 0;
			offset += copy;
		}
		start = end;
	}

	skb_walk_frags(skb, frag_iter) {
		int end;

		WARN_ON(start > offset + len);

		end = start + frag_iter->len;
		if ((copy = end - offset) > 0) {
			if (copy > len)
				copy = len;
			if (skb_copy_datagram_iovec(frag_iter,
						    offset - start,
						    to, copy))
				goto fault;
			if ((len -= copy) == 0)
				return 0;
			offset += copy;
		}
		start = end;
	}
Exemple #22
0
/*
 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
 * f2fs_unlock_op().
 */
int __f2fs_add_link(struct inode *dir, const struct qstr *name,
						struct inode *inode)
{
	unsigned int bit_pos;
	unsigned int level;
	unsigned int current_depth;
	unsigned long bidx, block;
	f2fs_hash_t dentry_hash;
	struct f2fs_dir_entry *de;
	unsigned int nbucket, nblock;
	size_t namelen = name->len;
	struct page *dentry_page = NULL;
	struct f2fs_dentry_block *dentry_blk = NULL;
	int slots = GET_DENTRY_SLOTS(namelen);
	struct page *page;
	int err = 0;
	int i;

	dentry_hash = f2fs_dentry_hash(name->name, name->len);
	level = 0;
	current_depth = F2FS_I(dir)->i_current_depth;
	if (F2FS_I(dir)->chash == dentry_hash) {
		level = F2FS_I(dir)->clevel;
		F2FS_I(dir)->chash = 0;
	}

start:
	if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
		return -ENOSPC;

	/* Increase the depth, if required */
	if (level == current_depth)
		++current_depth;

	nbucket = dir_buckets(level);
	nblock = bucket_blocks(level);

	bidx = dir_block_index(level, (le32_to_cpu(dentry_hash) % nbucket));

	for (block = bidx; block <= (bidx + nblock - 1); block++) {
		dentry_page = get_new_data_page(dir, NULL, block, true);
		if (IS_ERR(dentry_page))
			return PTR_ERR(dentry_page);

		dentry_blk = kmap(dentry_page);
		bit_pos = room_for_filename(dentry_blk, slots);
		if (bit_pos < NR_DENTRY_IN_BLOCK)
			goto add_dentry;

		kunmap(dentry_page);
		f2fs_put_page(dentry_page, 1);
	}

	/* Move to next level to find the empty slot for new dentry */
	++level;
	goto start;
add_dentry:
	wait_on_page_writeback(dentry_page);

	page = init_inode_metadata(inode, dir, name);
	if (IS_ERR(page)) {
		err = PTR_ERR(page);
		goto fail;
	}
	de = &dentry_blk->dentry[bit_pos];
	de->hash_code = dentry_hash;
	de->name_len = cpu_to_le16(namelen);
	memcpy(dentry_blk->filename[bit_pos], name->name, name->len);
	de->ino = cpu_to_le32(inode->i_ino);
	set_de_type(de, inode);
	for (i = 0; i < slots; i++)
		test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap);
	set_page_dirty(dentry_page);

	/* we don't need to mark_inode_dirty now */
	F2FS_I(inode)->i_pino = dir->i_ino;
	update_inode(inode, page);
	f2fs_put_page(page, 1);

	update_parent_metadata(dir, inode, current_depth);
fail:
	clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
	kunmap(dentry_page);
	f2fs_put_page(dentry_page, 1);
	return err;
}
static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
			 struct iov_iter *i)
{
	size_t skip, copy, left, wanted;
	const struct iovec *iov;
	char __user *buf;
	void *kaddr, *to;

	if (unlikely(bytes > i->count))
		bytes = i->count;

	if (unlikely(!bytes))
		return 0;

	wanted = bytes;
	iov = i->iov;
	skip = i->iov_offset;
	buf = iov->iov_base + skip;
	copy = min(bytes, iov->iov_len - skip);

	if (!fault_in_pages_readable(buf, copy)) {
		kaddr = kmap_atomic(page);
		to = kaddr + offset;

		/* first chunk, usually the only one */
		left = __copy_from_user_inatomic(to, buf, copy);
		copy -= left;
		skip += copy;
		to += copy;
		bytes -= copy;

		while (unlikely(!left && bytes)) {
			iov++;
			buf = iov->iov_base;
			copy = min(bytes, iov->iov_len);
			left = __copy_from_user_inatomic(to, buf, copy);
			copy -= left;
			skip = copy;
			to += copy;
			bytes -= copy;
		}
		if (likely(!bytes)) {
			kunmap_atomic(kaddr);
			goto done;
		}
		offset = to - kaddr;
		buf += copy;
		kunmap_atomic(kaddr);
		copy = min(bytes, iov->iov_len - skip);
	}
	/* Too bad - revert to non-atomic kmap */
	kaddr = kmap(page);
	to = kaddr + offset;
	left = __copy_from_user(to, buf, copy);
	copy -= left;
	skip += copy;
	to += copy;
	bytes -= copy;
	while (unlikely(!left && bytes)) {
		iov++;
		buf = iov->iov_base;
		copy = min(bytes, iov->iov_len);
		left = __copy_from_user(to, buf, copy);
		copy -= left;
		skip = copy;
		to += copy;
		bytes -= copy;
	}
	kunmap(page);
done:
	if (skip == iov->iov_len) {
		iov++;
		skip = 0;
	}
	i->count -= wanted - bytes;
	i->nr_segs -= iov - i->iov;
	i->iov = iov;
	i->iov_offset = skip;
	return wanted - bytes;
}
Exemple #24
0
static int f2fs_readdir(struct file *file, void *dirent, filldir_t filldir)
{
	unsigned long pos = file->f_pos;
	struct inode *inode = file->f_dentry->d_inode;
	unsigned long npages = dir_blocks(inode);
	unsigned char *types = NULL;
	unsigned int bit_pos = 0, start_bit_pos = 0;
	int over = 0;
	struct f2fs_dentry_block *dentry_blk = NULL;
	struct f2fs_dir_entry *de = NULL;
	struct page *dentry_page = NULL;
	unsigned int n = 0;
	unsigned char d_type = DT_UNKNOWN;
	int slots;

	types = f2fs_filetype_table;
	bit_pos = (pos % NR_DENTRY_IN_BLOCK);
	n = (pos / NR_DENTRY_IN_BLOCK);

	for (; n < npages; n++) {
		dentry_page = get_lock_data_page(inode, n);
		if (IS_ERR(dentry_page))
			continue;

		start_bit_pos = bit_pos;
		dentry_blk = kmap(dentry_page);
		while (bit_pos < NR_DENTRY_IN_BLOCK) {
			d_type = DT_UNKNOWN;
			bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap,
							NR_DENTRY_IN_BLOCK,
							bit_pos);
			if (bit_pos >= NR_DENTRY_IN_BLOCK)
				break;

			de = &dentry_blk->dentry[bit_pos];
			if (types && de->file_type < F2FS_FT_MAX)
				d_type = types[de->file_type];

			over = filldir(dirent,
					dentry_blk->filename[bit_pos],
					le16_to_cpu(de->name_len),
					(n * NR_DENTRY_IN_BLOCK) + bit_pos,
					le32_to_cpu(de->ino), d_type);
			if (over) {
				file->f_pos += bit_pos - start_bit_pos;
				goto success;
			}
			slots = GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
			bit_pos += slots;
		}
		bit_pos = 0;
		file->f_pos = (n + 1) * NR_DENTRY_IN_BLOCK;
		kunmap(dentry_page);
		f2fs_put_page(dentry_page, 1);
		dentry_page = NULL;
	}
success:
	if (dentry_page && !IS_ERR(dentry_page)) {
		kunmap(dentry_page);
		f2fs_put_page(dentry_page, 1);
	}

	return 0;
}
Exemple #25
0
/*
 * create a vfsmount to be automounted
 */
static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
{
	struct afs_super_info *super;
	struct vfsmount *mnt;
	struct page *page = NULL;
	size_t size;
	char *buf, *devname = NULL, *options = NULL;
	int ret;

	_enter("{%s}", mntpt->d_name.name);

	BUG_ON(!mntpt->d_inode);

	ret = -EINVAL;
	size = mntpt->d_inode->i_size;
	if (size > PAGE_SIZE - 1)
		goto error;

	ret = -ENOMEM;
	devname = (char *) get_zeroed_page(GFP_KERNEL);
	if (!devname)
		goto error;

	options = (char *) get_zeroed_page(GFP_KERNEL);
	if (!options)
		goto error;

	/* read the contents of the AFS special symlink */
	page = read_mapping_page(mntpt->d_inode->i_mapping, 0, NULL);
	if (IS_ERR(page)) {
		ret = PTR_ERR(page);
		goto error;
	}

	ret = -EIO;
	if (PageError(page))
		goto error;

	buf = kmap(page);
	memcpy(devname, buf, size);
	kunmap(page);
	page_cache_release(page);
	page = NULL;

	/* work out what options we want */
	super = AFS_FS_S(mntpt->d_sb);
	memcpy(options, "cell=", 5);
	strcpy(options + 5, super->volume->cell->name);
	if (super->volume->type == AFSVL_RWVOL)
		strcat(options, ",rwpath");

	/* try and do the mount */
	_debug("--- attempting mount %s -o %s ---", devname, options);
	mnt = vfs_kern_mount(&afs_fs_type, 0, devname, options);
	_debug("--- mount result %p ---", mnt);

	free_page((unsigned long) devname);
	free_page((unsigned long) options);
	_leave(" = %p", mnt);
	return mnt;

error:
	if (page)
		page_cache_release(page);
	if (devname)
		free_page((unsigned long) devname);
	if (options)
		free_page((unsigned long) options);
	_leave(" = %d", ret);
	return ERR_PTR(ret);
}
Exemple #26
0
/* Readpage expects a locked page, and must unlock it */
static int wrapfs_readpage(struct file *file, struct page *page)
{
    int err = 0;
    int count = 0;
    struct file *lower_file;
    struct inode *inode;
    mm_segment_t old_fs;
    char *page_data = NULL;
    mode_t orig_mode;
    
#ifdef WRAPFS_CRYPTO         
    /* for decryption, use a temp page(cipher_page) to store what we
       vfs_read from lower_file, then decrypt it and store the result
       in upper page
     */
    struct page *cipher_page;
    void *cipher;
    if (MMAP_FLAG == 0) {
	err = -EPERM;
	goto out_page;
    }
    /* init temp page here */
    cipher_page = alloc_page(GFP_KERNEL);
    if (IS_ERR(cipher_page)){
	err = PTR_ERR(cipher_page);
	goto out_page;
    }
    cipher = kmap(cipher_page);

#endif
    if (!WRAPFS_F(file)) {
	err = -ENOENT;
	goto out;
    }
    lower_file = wrapfs_lower_file(file);
    /* FIXME: is this assertion right here? */
    BUG_ON(lower_file == NULL);
    
    inode = file->f_path.dentry->d_inode;
    
    page_data = (char *) kmap(page);
    /*
     * Use vfs_read because some lower file systems don't have a
     * readpage method, and some file systems (esp. distributed ones)
     * don't like their pages to be accessed directly.  Using vfs_read
     * may be a little slower, but a lot safer, as the VFS does a lot of
     * the necessary magic for us.
     */
    lower_file->f_pos = page_offset(page);
    old_fs = get_fs();
    set_fs(KERNEL_DS);
    /*
     * generic_file_splice_write may call us on a file not opened for
     * reading, so temporarily allow reading.
     */
    orig_mode = lower_file->f_mode;
    lower_file->f_mode |= FMODE_READ;
#ifdef WRAPFS_CRYPTO
    count = vfs_read(lower_file, cipher, PAGE_CACHE_SIZE,
		   &lower_file->f_pos);
#else
    count = vfs_read(lower_file, page_data, PAGE_CACHE_SIZE,
		     &lower_file->f_pos);
#endif
    lower_file->f_mode = orig_mode;
    
    set_fs(old_fs);
#ifdef WRAPFS_CRYPTO
    /* do decryption here */
    if (count >= 0) 
	err = wrapfs_decrypt(WRAPFS_SB(inode->i_sb)->key,page_data, cipher, count);
    if (err < 0)
	goto out;
#endif
    if (count >= 0 && count < PAGE_CACHE_SIZE)
	memset(page_data + count, 0, PAGE_CACHE_SIZE - count);
    
    /* if vfs_read succeeded above, sync up our times */
    wrapfs_copy_attr_times(inode);
    kunmap(page);
    flush_dcache_page(page);
    /*
     * we have to unlock our page, b/c we _might_ have gotten a locked
     * page.  but we no longer have to wakeup on our page here, b/c
     * UnlockPage does it
     */
out:
#ifdef WRAPFS_CRYPTO
    kunmap(cipher_page);
    __free_page(cipher_page);
out_page:
#endif
    if (err == 0)
	SetPageUptodate(page);
    else
	ClearPageUptodate(page);
    
    unlock_page(page);    
    return err;
}
Exemple #27
0
static int ptpfs_file_readpage(struct file *filp, struct page *page)
{
    //printk(KERN_INFO "%s\n",  __FUNCTION__);
    struct inode *inode;
    int ret;
    struct ptp_data_buffer *data=(struct ptp_data_buffer*)filp->private_data;

    inode = page->mapping->host;

    if (!PageLocked(page))
        PAGE_BUG(page);

    ret = -ESTALE;

    /* work out how much to get and from where */
    int offset = page->index << PAGE_CACHE_SHIFT;
    int size   = min((size_t)(inode->i_size - offset),(size_t)PAGE_SIZE);
    char *buffer = kmap(page);
    clear_page(buffer);

    /* read the contents of the file from the server into the page */
    int block = 0;
    while (block < data->num_blocks && offset > data->blocks[block].block_size)
    {
        offset -= data->blocks[block].block_size;
        block++;
    }

    if (block == data->num_blocks)
    {
        kunmap(page);
        ret = -ESTALE;
        goto error;
    }

    int toCopy = min(size,data->blocks[block].block_size-offset);
    memcpy(buffer,&data->blocks[block].block[offset],toCopy);
    size -= toCopy;
    int pos = toCopy;
    block++;
    while (size && block < data->num_blocks)
    {
        toCopy = min(size,data->blocks[block].block_size);
        memcpy(&buffer[pos],data->blocks[block].block,toCopy);
        size -= toCopy;
        pos += toCopy;
        block++;
    }

    if (block == data->num_blocks && size > 0)
    {
        kunmap(page);
        ret = -ESTALE;
        goto error;
    }


    kunmap(page);
    flush_dcache_page(page);
    SetPageUptodate(page);
    unlock_page(page);

    return 0;

    error:
    SetPageError(page);
    unlock_page(page);
    return ret;

} 
Exemple #28
0
/* code is a modification of old unionfs/mmap.c */
static int wrapfs_writepage(struct page *page, struct writeback_control *wbc)
{
    int err = -EIO;
    struct inode *inode;
    struct inode *lower_inode;
    struct page *lower_page;
    
#ifdef WRAPFS_CRYPTO
    void *cipher, *plain;
#endif

    struct address_space *lower_mapping; /* lower inode mapping */
    gfp_t mask;
    
    BUG_ON(!PageUptodate(page));
    inode = page->mapping->host;
    /* if no lower inode, nothing to do */
    if (!inode || !WRAPFS_I(inode) || WRAPFS_I(inode)->lower_inode) {
	err = 0;
	goto out;
    }
    lower_inode = wrapfs_lower_inode(inode);
    lower_mapping = lower_inode->i_mapping;
    
    /*
     * find lower page (returns a locked page)
     *
     * We turn off __GFP_FS while we look for or create a new lower
     * page.  This prevents a recursion into the file system code, which
     * under memory pressure conditions could lead to a deadlock.  This
     * is similar to how the loop driver behaves (see loop_set_fd in
     * drivers/block/loop.c).  If we can't find the lower page, we
     * redirty our page and return "success" so that the VM will call us
     * again in the (hopefully near) future.
     */
    mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS);
    lower_page = find_or_create_page(lower_mapping, page->index, mask);
    if (!lower_page) {
	err = 0;
	set_page_dirty(page);
	goto out;
    }
#ifdef WRAPFS_CRYPTO
    plain = kmap(page);
    cipher = kmap(lower_page);
    if (MMAP_FLAG == 0){
	err = -EPERM;
	goto out_release;
    }
    /* this piece of code maps page and lower_page
       then do the encryption
     */
    err = wrapfs_encrypt(WRAPFS_SB(inode->i_sb)->key,cipher, plain, PAGE_CACHE_SIZE);
    if (err < 0) {
	goto out_release;
    }
#else
    /* copy page data from our upper page to the lower page */
    copy_highpage(lower_page, page); 
#endif
    
    flush_dcache_page(lower_page);
    SetPageUptodate(lower_page);
    set_page_dirty(lower_page);
    
    /*
     * Call lower writepage (expects locked page).  However, if we are
     * called with wbc->for_reclaim, then the VFS/VM just wants to
     * reclaim our page.  Therefore, we don't need to call the lower
     * ->writepage: just copy our data to the lower page (already done
     * above), then mark the lower page dirty and unlock it, and return
     * success.
     */
    if (wbc->for_reclaim) {
	unlock_page(lower_page);
	goto out_release;
    }
    
    BUG_ON(!lower_mapping->a_ops->writepage);
    wait_on_page_writeback(lower_page); /* prevent multiple writers */
    clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */
    err = lower_mapping->a_ops->writepage(lower_page, wbc);
    if (err < 0)
	goto out_release;
    
    /*
     * Lower file systems such as ramfs and tmpfs, may return
     * AOP_WRITEPAGE_ACTIVATE so that the VM won't try to (pointlessly)
     * write the page again for a while.  But those lower file systems
     * also set the page dirty bit back again.  Since we successfully
     * copied our page data to the lower page, then the VM will come
     * back to the lower page (directly) and try to flush it.  So we can
     * save the VM the hassle of coming back to our page and trying to
     * flush too.  Therefore, we don't re-dirty our own page, and we
     * never return AOP_WRITEPAGE_ACTIVATE back to the VM (we consider
     * this a success).
     *
     * We also unlock the lower page if the lower ->writepage returned
     * AOP_WRITEPAGE_ACTIVATE.  (This "anomalous" behaviour may be
     * addressed in future shmem/VM code.)
     */
    if (err == AOP_WRITEPAGE_ACTIVATE) {
	err = 0;
	unlock_page(lower_page);
    }
    
    /* all is well */
    
    /* lower mtimes have changed: update ours */
    wrapfs_copy_attr_times(inode);
    
out_release:
#ifdef WRAPFS_CRYPTO
    kunmap(page);
    kunmap(lower_page);
#endif
    /* b/c find_or_create_page increased refcnt */
    page_cache_release(lower_page);
out:
    /*
     * We unlock our page unconditionally, because we never return
     * AOP_WRITEPAGE_ACTIVATE.
     */
    unlock_page(page);
    return err;
}
Exemple #29
0
static int jffs2_write_end(struct file *filp, struct address_space *mapping,
			loff_t pos, unsigned len, unsigned copied,
			struct page *pg, void *fsdata)
{
	/* Actually commit the write from the page cache page we're looking at.
	 * For now, we write the full page out each time. It sucks, but it's simple
	 */
	struct inode *inode = mapping->host;
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_raw_inode *ri;
	unsigned start = pos & (PAGE_CACHE_SIZE - 1);
	unsigned end = start + copied;
	unsigned aligned_start = start & ~3;
	int ret = 0;
	uint32_t writtenlen = 0;

	jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
		  __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT,
		  start, end, pg->flags);

	/* We need to avoid deadlock with page_cache_read() in
	   jffs2_garbage_collect_pass(). So the page must be
	   up to date to prevent page_cache_read() from trying
	   to re-lock it. */
	BUG_ON(!PageUptodate(pg));

	if (end == PAGE_CACHE_SIZE) {
		/* When writing out the end of a page, write out the
		   _whole_ page. This helps to reduce the number of
		   nodes in files which have many short writes, like
		   syslog files. */
		aligned_start = 0;
	}

	ri = jffs2_alloc_raw_inode();

	if (!ri) {
		jffs2_dbg(1, "%s(): Allocation of raw inode failed\n",
			  __func__);
		unlock_page(pg);
		page_cache_release(pg);
		return -ENOMEM;
	}

	/* Set the fields that the generic jffs2_write_inode_range() code can't find */
	ri->ino = cpu_to_je32(inode->i_ino);
	ri->mode = cpu_to_jemode(inode->i_mode);
	ri->uid = cpu_to_je16(inode->i_uid);
	ri->gid = cpu_to_je16(inode->i_gid);
	ri->isize = cpu_to_je32((uint32_t)inode->i_size);
	ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());

	/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
	   hurt to do it again. The alternative is ifdefs, which are ugly. */
	kmap(pg);

	ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start,
				      (pg->index << PAGE_CACHE_SHIFT) + aligned_start,
				      end - aligned_start, &writtenlen);

	kunmap(pg);

	if (ret) {
		/* There was an error writing. */
		SetPageError(pg);
	}

	/* Adjust writtenlen for the padding we did, so we don't confuse our caller */
	writtenlen -= min(writtenlen, (start - aligned_start));

	if (writtenlen) {
		if (inode->i_size < pos + writtenlen) {
			inode->i_size = pos + writtenlen;
			inode->i_blocks = (inode->i_size + 511) >> 9;

			inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
		}
	}
Exemple #30
0
/*
 *  create the first process
 */
void
userinit(void)
{
	Proc *p;
	Segment *s;
	KMap *k;
	Page *pg;

	/* no processes yet */
	up = nil;

	p = newproc();
	p->pgrp = newpgrp();
	p->egrp = smalloc(sizeof(Egrp));
	p->egrp->ref = 1;
	p->fgrp = dupfgrp(nil);
	p->rgrp = newrgrp();
	p->procmode = 0640;

	kstrdup(&eve, "");
	kstrdup(&p->text, "*init*");
	kstrdup(&p->user, eve);

	/*
	 * Kernel Stack
	 */
	p->sched.pc = PTR2UINT(init0);
	p->sched.sp = PTR2UINT(p->kstack+KSTACK-sizeof(up->s.args)-sizeof(uintptr));
	p->sched.sp = STACKALIGN(p->sched.sp);

	/*
	 * User Stack
	 *
	 * Technically, newpage can't be called here because it
	 * should only be called when in a user context as it may
	 * try to sleep if there are no pages available, but that
	 * shouldn't be the case here.
	 */
	s = newseg(SG_STACK, USTKTOP-USTKSIZE, USTKSIZE/BY2PG);
	p->seg[SSEG] = s;
	pg = newpage(1, 0, USTKTOP-BY2PG);
	segpage(s, pg);
	k = kmap(pg);
	bootargs(VA(k));
	kunmap(k);

	/*
	 * Text
	 */
	s = newseg(SG_TEXT, UTZERO, 1);
	s->flushme++;
	p->seg[TSEG] = s;
	pg = newpage(1, 0, UTZERO);
	memset(pg->cachectl, PG_TXTFLUSH, sizeof(pg->cachectl));
	segpage(s, pg);
	k = kmap(s->map[0]->pages[0]);
	memmove(UINT2PTR(VA(k)), initcode, sizeof initcode);
	kunmap(k);

	ready(p);
}