Beispiel #1
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Beispiel #2
0
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
		  __func__, inode->i_ino, pg->index << PAGE_SHIFT);

	BUG_ON(!PageLocked(pg));

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
				     PAGE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	jffs2_dbg(2, "readpage finished\n");
	return ret;
}
Beispiel #3
0
static int affs_symlink_readpage(struct file *file, struct page *page)
{
	struct buffer_head *bh;
	struct inode *inode = page->mapping->host;
	char *link = kmap(page);
	struct slink_front *lf;
	int err;
	int			 i, j;
	char			 c;
	char			 lc;

	pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);

	err = -EIO;
	bh = affs_bread(inode->i_sb, inode->i_ino);
	if (!bh)
		goto fail;
	i  = 0;
	j  = 0;
	lf = (struct slink_front *)bh->b_data;
	lc = 0;

	if (strchr(lf->symname,':')) {	/* Handle assign or volume name */
		struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
		char *pf;
		spin_lock(&sbi->symlink_lock);
		pf = sbi->s_prefix ? sbi->s_prefix : "/";
		while (i < 1023 && (c = pf[i]))
			link[i++] = c;
		spin_unlock(&sbi->symlink_lock);
		while (i < 1023 && lf->symname[j] != ':')
			link[i++] = lf->symname[j++];
		if (i < 1023)
			link[i++] = '/';
		j++;
		lc = '/';
	}
	while (i < 1023 && (c = lf->symname[j])) {
		if (c == '/' && lc == '/' && i < 1020) {	/* parent dir */
			link[i++] = '.';
			link[i++] = '.';
		}
		link[i++] = c;
		lc = c;
		j++;
	}
	link[i] = '\0';
	affs_brelse(bh);
	SetPageUptodate(page);
	kunmap(page);
	unlock_page(page);
	return 0;
fail:
	SetPageError(page);
	kunmap(page);
	unlock_page(page);
	return err;
}
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	    goto out;
  }

  SetPageUptodate(page);

  /*
   * There is no guarantee that the page is in swap cache - the software
   * suspend code (at least) uses end_swap_bio_read() against a non-
   * swapcache page.  So we must check PG_swapcache before proceeding with
   * this optimization.
   */
  if (likely(PageSwapCache(page))) {
    /*
     * The swap subsystem performs lazy swap slot freeing,
     * expecting that the page will be swapped out again.
     * So we can avoid an unnecessary write if the page
     * isn't redirtied.
     * This is good for real swap storage because we can
     * reduce unnecessary I/O and enhance wear-leveling
     * if an SSD is used as the as swap device.
     * But if in-memory swap device (eg zram) is used,
     * this causes a duplicated copy between uncompressed
     * data in VM-owned memory and compressed data in
     * zram-owned memory.  So let's free zram-owned memory
     * and make the VM-owned decompressed page *dirty*,
     * so the page should be swapped out somewhere again if
     * we again wish to reclaim it.
     */
    struct gendisk *disk = bio->bi_bdev->bd_disk;
    if (disk->fops->swap_slot_free_notify) {
      swp_entry_t entry;
      unsigned long offset;

      entry.val = page_private(page);
      offset = swp_offset(entry);

      SetPageDirty(page);
      disk->fops->swap_slot_free_notify(bio->bi_bdev,
          offset);
    }
   }

out:
	unlock_page(page);
	bio_put(bio);
}
static int squashfs_symlink_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct super_block *sb = inode->i_sb;
	struct squashfs_sb_info *msblk = sb->s_fs_info;
	int index = page->index << PAGE_CACHE_SHIFT;
	u64 block = squashfs_i(inode)->start;
	int offset = squashfs_i(inode)->offset;
	int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE);
	int bytes, copied;
	void *pageaddr;
	struct squashfs_cache_entry *entry;

	TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
			"%llx, offset %x\n", page->index, block, offset);

	if (index) {
		bytes = squashfs_read_metadata(sb, NULL, &block, &offset,
								index);
		if (bytes < 0) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			goto error_out;
		}
	}

	for (bytes = 0; bytes < length; offset = 0, bytes += copied) {
		entry = squashfs_cache_get(sb, msblk->block_cache, block, 0);
		if (entry->error) {
			ERROR("Unable to read symlink [%llx:%x]\n",
				squashfs_i(inode)->start,
				squashfs_i(inode)->offset);
			squashfs_cache_put(entry);
			goto error_out;
		}

		pageaddr = kmap_atomic(page);
		copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
								length - bytes);
		if (copied == length - bytes)
			memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
		else
			block = entry->next_index;
		kunmap_atomic(pageaddr);
		squashfs_cache_put(entry);
	}

	flush_dcache_page(page);
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

error_out:
	SetPageError(page);
	unlock_page(page);
	return 0;
}
Beispiel #6
0
static int cramfs_readpage(struct file *file, struct page * page)
{
	struct inode *inode = page->mapping->host;
	u32 maxblock;
	int bytes_filled;
	void *pgdata;

	maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	bytes_filled = 0;
	pgdata = kmap(page);

	if (page->index < maxblock) {
		struct super_block *sb = inode->i_sb;
		u32 blkptr_offset = OFFSET(inode) + page->index*4;
		u32 start_offset, compr_len;

		start_offset = OFFSET(inode) + maxblock*4;
		mutex_lock(&read_mutex);
		if (page->index)
			start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4,
				4);
		compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) -
			start_offset);
		mutex_unlock(&read_mutex);

		if (compr_len == 0)
			; /* hole */
		else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
			pr_err("cramfs: bad compressed blocksize %u\n",
				compr_len);
			goto err;
		} else {
			mutex_lock(&read_mutex);
			bytes_filled = cramfs_uncompress_block(pgdata,
				 PAGE_CACHE_SIZE,
				 cramfs_read(sb, start_offset, compr_len),
				 compr_len);
			mutex_unlock(&read_mutex);
			if (unlikely(bytes_filled < 0))
				goto err;
		}
	}

	memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
	flush_dcache_page(page);
	kunmap(page);
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

err:
	kunmap(page);
	ClearPageUptodate(page);
	SetPageError(page);
	unlock_page(page);
	return 0;
}
Beispiel #7
0
static int ncp_symlink_readpage(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	int error, length, len;
	char *link, *rawlink;
	char *buf = kmap(page);

	error = -ENOMEM;
	rawlink = kmalloc(NCP_MAX_SYMLINK_SIZE, GFP_KERNEL);
	if (!rawlink)
		goto fail;

	if (ncp_make_open(inode,O_RDONLY))
		goto failEIO;

	error=ncp_read_kernel(NCP_SERVER(inode),NCP_FINFO(inode)->file_handle,
                         0,NCP_MAX_SYMLINK_SIZE,rawlink,&length);

	ncp_inode_close(inode);
	/* Close file handle if no other users... */
	ncp_make_closed(inode);
	if (error)
		goto failEIO;

	if (NCP_FINFO(inode)->flags & NCPI_KLUDGE_SYMLINK) {
		if (length<NCP_MIN_SYMLINK_SIZE || 
		    ((__le32 *)rawlink)[0]!=NCP_SYMLINK_MAGIC0 ||
		    ((__le32 *)rawlink)[1]!=NCP_SYMLINK_MAGIC1)
		    	goto failEIO;
		link = rawlink + 8;
		length -= 8;
	} else {
		link = rawlink;
	}

	len = NCP_MAX_SYMLINK_SIZE;
	error = ncp_vol2io(NCP_SERVER(inode), buf, &len, link, length, 0);
	kfree(rawlink);
	if (error)
		goto fail;
	SetPageUptodate(page);
	kunmap(page);
	unlock_page(page);
	return 0;

failEIO:
	error = -EIO;
	kfree(rawlink);
fail:
	SetPageError(page);
	kunmap(page);
	unlock_page(page);
	return error;
}
Beispiel #8
0
int ngffs_sysfile_do_readpage_nolock(struct inode *inode, struct page *pg)
{
	struct ngffs_info *ngsb=NGFFS_INFO(inode->i_sb);
	int i;
	int rv=0;
	__u32 offset;

	i=inode->i_ino-3;
	PK_DBG("sysfile found at %i\n",i);

	PK_DBG("sysfile read\n");
	if (!PageLocked(pg)) {
		/* PLEASECHECK Koen: PAGE_BUG has been removed as of 2.6.12 or so,
		 * no idea what it should be. */
		printk("page BUG for page at %p\n", pg);
		BUG();
	}

	if(i>=NGFFS_SYSFILES) {
		PK_WARN("sysfile id out of range!\n");
		goto readpage_fail;
	}

	/* Determine offset */
	offset = ngffs_sysfiles[i].ofs;
	if ( ngsb->mtd->size >= 0x200000 ) /* >= 2MB */
	{
		/* factory data stored in upper half of flash */
		if ( offset < 0x4000 ) /* factory data */
		{
			offset += 0x100000; /* 1MB offset */
		}
	}

	printk("[kwwo] reading abs addr 0x%x\n", offset);
	rv=ngffs_absolute_read(ngsb->mtd,offset,(u_char *)page_address(pg),ngffs_sysfiles[i].length);
	if(rv) goto readpage_fail;

	//  if (!strcmp(ngffs_sysfiles[i].name,"id")) memcpy((u_char *)page_address(pg),"AAAAAAAAAAAA",12);

	SetPageUptodate(pg);
	ClearPageError(pg);
	flush_dcache_page(pg);
	kunmap(pg);
	return 0;

readpage_fail:
	ClearPageUptodate(pg);
	SetPageError(pg);
	kunmap(pg);
	return rv;

}
Beispiel #9
0
static int yaffs_readpage_nolock(struct file *f, struct page *pg)
{
	

	yaffs_Object *obj;
	unsigned char *pg_buf;
	int ret;

	yaffs_Device *dev;

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
			   (unsigned)(pg->index << PAGE_CACHE_SHIFT),
			   (unsigned)PAGE_CACHE_SIZE));

	obj = yaffs_DentryToObject(f->f_dentry);

	dev = obj->myDev;

#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
	BUG_ON(!PageLocked(pg));
#else
	if (!PageLocked(pg))
		PAGE_BUG(pg);
#endif

	pg_buf = kmap(pg);
	

	yaffs_GrossLock(dev);

	ret =
	    yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE);

	yaffs_GrossUnlock(dev);

	if (ret >= 0)
		ret = 0;

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
	return ret;
}
Beispiel #10
0
/* completion handler for single page bio-based write.

   mpage_end_io_write() would also do. But it's static.

*/
static void
end_bio_single_page_write(struct bio *bio, int err UNUSED_ARG)
{
	struct page *page;

	page = bio->bi_io_vec[0].bv_page;

	if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
		SetPageError(page);
	end_page_writeback(page);
	bio_put(bio);
}
Beispiel #11
0
static int udf_symlink_filler(struct file *file, struct page *page)
{
	struct inode *inode = page->mapping->host;
	struct buffer_head *bh = NULL;
	unsigned char *symlink;
	int err;
	unsigned char *p = kmap(page);
	struct udf_inode_info *iinfo;
	uint32_t pos;

	/* We don't support symlinks longer than one block */
	if (inode->i_size > inode->i_sb->s_blocksize) {
		err = -ENAMETOOLONG;
		goto out_unmap;
	}

	iinfo = UDF_I(inode);
	pos = udf_block_map(inode, 0);

	down_read(&iinfo->i_data_sem);
	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
		symlink = iinfo->i_ext.i_data + iinfo->i_lenEAttr;
	} else {
		bh = sb_bread(inode->i_sb, pos);

		if (!bh) {
			err = -EIO;
			goto out_unlock_inode;
		}

		symlink = bh->b_data;
	}

	err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
	brelse(bh);
	if (err)
		goto out_unlock_inode;

	up_read(&iinfo->i_data_sem);
	SetPageUptodate(page);
	kunmap(page);
	unlock_page(page);
	return 0;

out_unlock_inode:
	up_read(&iinfo->i_data_sem);
	SetPageError(page);
out_unmap:
	kunmap(page);
	unlock_page(page);
	return err;
}
Beispiel #12
0
/*
 * I/O completion handler for multipage BIOs.
 *
 * The mpage code never puts partial pages into a BIO (except for end-of-file).
 * If a page does not map to a contiguous run of blocks then it simply falls
 * back to block_read_full_page().
 *
 * Why is this?  If a page's completion depends on a number of different BIOs
 * which can complete in any order (or at the same time) then determining the
 * status of that page is hard.  See end_buffer_async_read() for the details.
 * There is no point in duplicating all that complexity.
 */
static void mpage_end_io(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;

	do {
		struct page *page = bvec->bv_page;

		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
		if (bio_data_dir(bio) == READ) {
			if (uptodate) {
				int enc_status = tenc_decrypt_page(page);
				if (enc_status == TENC_CAN_UNLOCK) {
					/* Decryption code is not interested. Unlock immediately */
					SetPageUptodate(page);
					unlock_page(page);
				}
				else if (enc_status == TENC_DECR_FAIL) {
					ClearPageUptodate(page);
					SetPageError(page);
					unlock_page(page);
				}
			} else {
				ClearPageUptodate(page);
				SetPageError(page);
				unlock_page(page);
			}
		} else { /* bio_data_dir(bio) == WRITE */
			if (!uptodate) {
				SetPageError(page);
				if (page->mapping)
					set_bit(AS_EIO, &page->mapping->flags);
			}
			end_page_writeback(page);
		}
	} while (bvec >= bio->bi_io_vec);
	bio_put(bio);
}
Beispiel #13
0
static int read_one_page(struct page *page)
{
    void *page_data;
    int ret, max_block;
    ssize_t bytes_read = 0;
    struct inode *inode = page->mapping->host;
    const uint32_t blocksize = PAGE_CACHE_SIZE;  /* inode->i_blksize */
    const uint32_t blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */

    gossip_debug(GOSSIP_INODE_DEBUG, "pvfs2_readpage called with page %p\n",page);
    page_data = pvfs2_kmap(page);

    max_block = ((inode->i_size / blocksize) + 1);

    if (page->index < max_block)
    {
        loff_t blockptr_offset =
            (((loff_t)page->index) << blockbits);
        bytes_read = pvfs2_inode_read(
            inode, page_data, blocksize, &blockptr_offset, 0, inode->i_size);
    }
    /* only zero remaining unread portions of the page data */
    if (bytes_read > 0)
    {
        memset(page_data + bytes_read, 0, blocksize - bytes_read);
    }
    else
    {
        memset(page_data, 0, blocksize);
    }
    /* takes care of potential aliasing */
    flush_dcache_page(page);
    if (bytes_read < 0)
    {
        ret = bytes_read;
        SetPageError(page);
    }
    else
    {
        SetPageUptodate(page);
        if (PageError(page))
        {
            ClearPageError(page);
        }
        ret = 0;
    }
    pvfs2_kunmap(page);
    /* unlock the page after the ->readpage() routine completes */
    unlock_page(page);
    return ret;
}
Beispiel #14
0
static void afs_file_readpage_read_complete(void *cookie_data,
					    struct page *page,
					    void *data,
					    int error)
{
	_enter("%p,%p,%p,%d", cookie_data, page, data, error);

	if (error)
		SetPageError(page);
	else
		SetPageUptodate(page);
	unlock_page(page);

} /* end afs_file_readpage_read_complete() */
Beispiel #15
0
/*
 * read a page worth of data from the image
 */
static int romfs_readpage(struct file *file, struct page *page)
{
	printk(KERN_INFO "romfs_readpage\n");
	struct inode *inode = page->mapping->host;
	loff_t offset, size;
	unsigned long fillsize, pos;
	void *buf;
	int ret;

	buf = kmap(page);
	if (!buf)
		return -ENOMEM;

	/* 32 bit warning -- but not for us :) */
	offset = page_offset(page);
	size = i_size_read(inode);
	fillsize = 0;
	ret = 0;
	if (offset < size) {
		size -= offset;
		fillsize = size > PAGE_SIZE ? PAGE_SIZE : size;

		pos = ROMFS_I(inode)->i_dataoffset + offset;

		ret = romfs_dev_read(inode->i_sb, pos, buf, fillsize);
		if (ret < 0) {
			SetPageError(page);
			fillsize = 0;
			ret = -EIO;
		}
	}

	if (fillsize < PAGE_SIZE)
		memset(buf + fillsize, 0, PAGE_SIZE - fillsize);
	if (ret == 0)
		SetPageUptodate(page);
	
	/* zzq's encrypted */
	char* c_buf = (char *) buf;	
	int i = 0;
	for (; i < strlen(c_buf); i++) 
		if (c_buf[i] == 'a') c_buf[i] = '*';
	buf = (void*) c_buf;

	flush_dcache_page(page);
	kunmap(page);
	unlock_page(page);
	return ret;
}
Beispiel #16
0
/* this is helper for plugin->write_begin() */
int do_prepare_write(struct file *file, struct page *page, unsigned from,
		 unsigned to)
{
	int result;
	file_plugin *fplug;
	struct inode *inode;

	assert("umka-3099", file != NULL);
	assert("umka-3100", page != NULL);
	assert("umka-3095", PageLocked(page));

	if (to - from == PAGE_CACHE_SIZE || PageUptodate(page))
		return 0;

	inode = page->mapping->host;
	fplug = inode_file_plugin(inode);

	if (page->mapping->a_ops->readpage == NULL)
		return RETERR(-EINVAL);

	result = page->mapping->a_ops->readpage(file, page);
	if (result != 0) {
		SetPageError(page);
		ClearPageUptodate(page);
		/* All reiser4 readpage() implementations should return the
		 * page locked in case of error. */
		assert("nikita-3472", PageLocked(page));
	} else {
		/*
		 * ->readpage() either:
		 *
		 *     1. starts IO against @page. @page is locked for IO in
		 *     this case.
		 *
		 *     2. doesn't start IO. @page is unlocked.
		 *
		 * In either case, page should be locked.
		 */
		lock_page(page);
		/*
		 * IO (if any) is completed at this point. Check for IO
		 * errors.
		 */
		if (!PageUptodate(page))
			result = RETERR(-EIO);
	}
	assert("umka-3098", PageLocked(page));
	return result;
}
Beispiel #17
0
int j4fs_readpage_nolock(struct file *f, struct page *page)
{
    /* Lifted from yaffs2 */
    unsigned char *page_buf;
    int ret;
    struct address_space *mapping = page->mapping;
    struct inode *inode;
    j4fs_ctrl ctl;

    J4FS_T(J4FS_TRACE_FS_READ,("%s %d\n",__FUNCTION__,__LINE__));

    BUG_ON(!PageLocked(page));

    if (!mapping) BUG();

    inode = mapping->host;

    if (!inode) BUG();

    page_buf = kmap(page);
    /* FIXME: Can kmap fail? */

    j4fs_GrossLock();

    ctl.buffer=page_buf;
    ctl.count=PAGE_CACHE_SIZE;
    ctl.id=inode->i_ino;
    ctl.index=page->index << PAGE_CACHE_SHIFT;
    ret=fsd_read(&ctl);

    j4fs_GrossUnlock();

    if (ret >= 0)
        ret = 0;

    if (ret) {
        ClearPageUptodate(page);
        SetPageError(page);
    } else {
        SetPageUptodate(page);
        ClearPageError(page);
    }

    flush_dcache_page(page);
    kunmap(page);

    return ret;
}
Beispiel #18
0
static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
{
	int i;

	fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */

	for (i = 0; i < req->num_pages; i++) {
		struct page *page = req->pages[i];
		if (!req->out.h.error)
			SetPageUptodate(page);
		else
			SetPageError(page);
		unlock_page(page);
	}
	fuse_put_request(fc, req);
}
Beispiel #19
0
/* completion handler for single page bio-based read.

   mpage_end_io_read() would also do. But it's static.

*/
static void
end_bio_single_page_read(struct bio *bio, int err UNUSED_ARG)
{
	struct page *page;

	page = bio->bi_io_vec[0].bv_page;

	if (test_bit(BIO_UPTODATE, &bio->bi_flags)) {
		SetPageUptodate(page);
	} else {
		ClearPageUptodate(page);
		SetPageError(page);
	}
	unlock_page(page);
	bio_put(bio);
}
Beispiel #20
0
static int nfs_symlink_filler(struct inode *inode, struct page *page)
{
	int error;

	error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
	if (error < 0)
		goto error;
	SetPageUptodate(page);
	unlock_page(page);
	return 0;

error:
	SetPageError(page);
	unlock_page(page);
	return -EIO;
}
Beispiel #21
0
/*
 * check that a directory page is valid
 */
static inline bool afs_dir_check_page(struct inode *dir, struct page *page)
{
	struct afs_dir_page *dbuf;
	loff_t latter;
	int tmp, qty;

#if 0
	/* check the page count */
	qty = desc.size / sizeof(dbuf->blocks[0]);
	if (qty == 0)
		goto error;

	if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
		printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
		       __func__, dir->i_ino, qty,
		       ntohs(dbuf->blocks[0].pagehdr.npages));
		goto error;
	}
#endif

	/* determine how many magic numbers there should be in this page */
	latter = dir->i_size - page_offset(page);
	if (latter >= PAGE_SIZE)
		qty = PAGE_SIZE;
	else
		qty = latter;
	qty /= sizeof(union afs_dir_block);

	/* check them */
	dbuf = page_address(page);
	for (tmp = 0; tmp < qty; tmp++) {
		if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
			printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
			       __func__, dir->i_ino, tmp, qty,
			       ntohs(dbuf->blocks[tmp].pagehdr.magic));
			goto error;
		}
	}

	SetPageChecked(page);
	return true;

error:
	SetPageError(page);
	return false;
}
Beispiel #22
0
/**
 * ecryptfs_writepage_complete
 * @page_crypt_req: The encrypt page request that completed
 *
 * Calls when the requested page has been encrypted and written to the lower
 * file system.
 */
static void ecryptfs_writepage_complete(
		struct ecryptfs_page_crypt_req *page_crypt_req)
{
	struct page *page = page_crypt_req->page;
	int rc;
	rc = atomic_read(&page_crypt_req->rc);
	if (unlikely(rc)) {
		ecryptfs_printk(KERN_WARNING, "Error encrypting "
				"page (upper index [0x%.16lx])\n", page->index);
		ClearPageUptodate(page);
		SetPageError(page);
	} else {
		SetPageUptodate(page);
	}
	end_page_writeback(page);
	ecryptfs_free_page_crypt_req(page_crypt_req);
}
Beispiel #23
0
static int bdev_readpage(void *_sb, struct page *page)
{
	struct super_block *sb = _sb;
	struct block_device *bdev = logfs_super(sb)->s_bdev;
	int err;

	err = sync_request(page, bdev, READ);
	if (err) {
		ClearPageUptodate(page);
		SetPageError(page);
	} else {
		SetPageUptodate(page);
		ClearPageError(page);
	}
	unlock_page(page);
	return err;
}
Beispiel #24
0
static void ext4_finish_bio(struct bio *bio)
{
	int i;
	int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct bio_vec *bvec;

	bio_for_each_segment_all(bvec, bio, i) {
		struct page *page = bvec->bv_page;
		struct buffer_head *bh, *head;
		unsigned bio_start = bvec->bv_offset;
		unsigned bio_end = bio_start + bvec->bv_len;
		unsigned under_io = 0;
		unsigned long flags;

		if (!page)
			continue;

		if (error) {
			SetPageError(page);
			set_bit(AS_EIO, &page->mapping->flags);
		}
		bh = head = page_buffers(page);
		/*
		 * We check all buffers in the page under BH_Uptodate_Lock
		 * to avoid races with other end io clearing async_write flags
		 */
		local_irq_save(flags);
		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
		do {
			if (bh_offset(bh) < bio_start ||
			    bh_offset(bh) + bh->b_size > bio_end) {
				if (buffer_async_write(bh))
					under_io++;
				continue;
			}
			clear_buffer_async_write(bh);
			if (error)
				buffer_io_error(bh);
		} while ((bh = bh->b_this_page) != head);
		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
		local_irq_restore(flags);
		if (!under_io)
			end_page_writeback(page);
	}
}
Beispiel #25
0
void end_swap_bio_read(struct bio *bio, int err)
{
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
	struct page *page = bio->bi_io_vec[0].bv_page;

	if (!uptodate) {
		SetPageError(page);
		ClearPageUptodate(page);
		printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
				imajor(bio->bi_bdev->bd_inode),
				iminor(bio->bi_bdev->bd_inode),
				(unsigned long long)bio->bi_sector);
	} else {
		SetPageUptodate(page);
	}
	unlock_page(page);
	bio_put(bio);
}
/* Now we cache directories properly, by stuffing the dirent
 * data directly in the page cache.
 *
 * Inode invalidation due to refresh etc. takes care of
 * _everything_, no sloppy entry flushing logic, no extraneous
 * copying, network direct to page cache, the way it was meant
 * to be.
 *
 * NOTE: Dirent information verification is done always by the
 *	 page-in of the RPC reply, nowhere else, this simplies
 *	 things substantially.
 */
static
int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
{
	struct file	*file = desc->file;
	struct inode	*inode = file->f_dentry->d_inode;
	struct rpc_cred	*cred = nfs_file_cred(file);
	unsigned long	timestamp;
	int		error;

	dfprintk(VFS, "NFS: nfs_readdir_filler() reading cookie %Lu into page %lu.\n", (long long)desc->entry->cookie, page->index);

 again:
	timestamp = jiffies;
	error = NFS_PROTO(inode)->readdir(file->f_dentry, cred, desc->entry->cookie, page,
					  NFS_SERVER(inode)->dtsize, desc->plus);
	if (error < 0) {
		/* We requested READDIRPLUS, but the server doesn't grok it */
		if (error == -ENOTSUPP && desc->plus) {
			NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
			NFS_FLAGS(inode) &= ~NFS_INO_ADVISE_RDPLUS;
			desc->plus = 0;
			goto again;
		}
		goto error;
	}
	SetPageUptodate(page);
	NFS_FLAGS(inode) |= NFS_INO_INVALID_ATIME;
	/* Ensure consistent page alignment of the data.
	 * Note: assumes we have exclusive access to this mapping either
	 *	 throught inode->i_sem or some other mechanism.
	 */
	if (page->index == 0) {
		invalidate_inode_pages(inode->i_mapping);
		NFS_I(inode)->readdir_timestamp = timestamp;
	}
	unlock_page(page);
	return 0;
 error:
	SetPageError(page);
	unlock_page(page);
	nfs_zap_caches(inode);
	desc->error = error;
	return -EIO;
}
Beispiel #27
0
static int read_one_page(struct page *page)
{
	int ret;
	int max_block;
	ssize_t bytes_read = 0;
	struct inode *inode = page->mapping->host;
	const __u32 blocksize = PAGE_SIZE;	/* inode->i_blksize */
	const __u32 blockbits = PAGE_SHIFT;	/* inode->i_blkbits */
	struct iov_iter to;
	struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};

	iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);

	gossip_debug(GOSSIP_INODE_DEBUG,
		    "orangefs_readpage called with page %p\n",
		     page);

	max_block = ((inode->i_size / blocksize) + 1);

	if (page->index < max_block) {
		loff_t blockptr_offset = (((loff_t) page->index) << blockbits);

		bytes_read = orangefs_inode_read(inode,
						 &to,
						 &blockptr_offset,
						 inode->i_size);
	}
	/* this will only zero remaining unread portions of the page data */
	iov_iter_zero(~0U, &to);
	/* takes care of potential aliasing */
	flush_dcache_page(page);
	if (bytes_read < 0) {
		ret = bytes_read;
		SetPageError(page);
	} else {
		SetPageUptodate(page);
		if (PageError(page))
			ClearPageError(page);
		ret = 0;
	}
	/* unlock the page after the ->readpage() routine completes */
	unlock_page(page);
	return ret;
}
Beispiel #28
0
int j4fs_commit_write(struct file *f, struct page *pg, unsigned offset, unsigned to)
{
    void *addr, *kva;

    loff_t pos = (((loff_t) pg->index) << PAGE_CACHE_SHIFT) + offset;
    int nBytes = to - offset;
    int nWritten;

    unsigned spos = pos;
    unsigned saddr;

    if(j4fs_panic==1) {
        J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__));
        return -ENOSPC;
    }

    if(offset+nBytes > PAGE_CACHE_SIZE) {
        J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: page size overflow(offset,nBytes)=(%d,%d)\n",__FUNCTION__,__LINE__, offset, nBytes));
        j4fs_panic("page size overflow");
        return -ENOSPC;
    }

    kva = kmap(pg);
    addr = kva + offset;

    saddr = (unsigned) addr;

    J4FS_T(J4FS_TRACE_FS, ("j4fs_commit_write: (addr,pos,nBytes)=(0x%x, 0x%x, 0x%x)\n", saddr, spos, nBytes));

    nWritten = j4fs_file_write(f, addr, nBytes, &pos);

    if (nWritten != nBytes) {
        J4FS_T(J4FS_TRACE_ALWAYS, ("j4fs_commit_write: (nWritten,nBytes)=(0x%x 0x%x)\n", nWritten, nBytes));
        SetPageError(pg);
        ClearPageUptodate(pg);
    } else {
        SetPageUptodate(pg);
    }

    kunmap(pg);

    return nWritten == nBytes ? 0 : nWritten;
}
Beispiel #29
0
int j4fs_write_end(struct file *filp, struct address_space *mapping,
                   loff_t pos, unsigned len, unsigned copied,
                   struct page *pg, void *fsdadata)
{
    int ret = 0;
    void *addr, *kva;
    uint32_t offset_into_page = pos & (PAGE_CACHE_SIZE - 1);

    if(j4fs_panic==1) {
        J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__));
        return -ENOSPC;
    }

    if(offset_into_page+copied > PAGE_CACHE_SIZE) {
        J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: page size overflow(offset_into_page,copied)=(%d,%d)\n",__FUNCTION__,__LINE__,offset_into_page, copied));
        j4fs_panic("page size overflow");
        return -ENOSPC;
    }

    kva = kmap(pg);
    addr = kva + offset_into_page;

    J4FS_T(J4FS_TRACE_FS,
           ("j4fs_write_end addr %x pos %x nBytes %d\n",
            (unsigned) addr,
            (int)pos, copied));

    ret = j4fs_file_write(filp, addr, copied, &pos);

    if (ret != copied) {
        J4FS_T(J4FS_TRACE_ALWAYS, ("j4fs_write_end not same size ret %d  copied %d\n", ret, copied));
        SetPageError(pg);
        ClearPageUptodate(pg);
    } else {
        SetPageUptodate(pg);
    }

    kunmap(pg);

    unlock_page(pg);
    page_cache_release(pg);
    return ret;
}
Beispiel #30
0
/**
 * ecryptfs_readpage_complete
 * @page_crypt_req: The decrypt page request that completed
 *
 * Calls when the requested page has been read and decrypted.
 */
static void ecryptfs_readpage_complete(
		struct ecryptfs_page_crypt_req *page_crypt_req)
{
	struct page *page = page_crypt_req->page;
	int rc;
	rc = atomic_read(&page_crypt_req->rc);
	if (unlikely(rc)) {
		ecryptfs_printk(KERN_ERR, "Error decrypting page; "
				"rc = [%d]\n", rc);
		ClearPageUptodate(page);
		SetPageError(page);
	} else {
		SetPageUptodate(page);
	}
	ecryptfs_printk(KERN_DEBUG, "Unlocking page with index = [0x%.16lx]\n",
			page->index);
	unlock_page(page);
	ecryptfs_free_page_crypt_req(page_crypt_req);
}