Example #1
0
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n",
		  __func__, inode->i_ino, pg->index << PAGE_SHIFT);

	BUG_ON(!PageLocked(pg));

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT,
				     PAGE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	jffs2_dbg(2, "readpage finished\n");
	return ret;
}
Example #2
0
/*
 * Populate a page with data for the Linux page cache.  This function is
 * only used to support mmap(2).  There will be an identical copy of the
 * data in the ARC which is kept up to date via .write() and .writepage().
 *
 * Current this function relies on zpl_read_common() and the O_DIRECT
 * flag to read in a page.  This works but the more correct way is to
 * update zfs_fillpage() to be Linux friendly and use that interface.
 */
static int
zpl_readpage(struct file *filp, struct page *pp)
{
	struct inode *ip;
	struct page *pl[1];
	int error = 0;

	ASSERT(PageLocked(pp));
	ip = pp->mapping->host;
	pl[0] = pp;

	error = -zfs_getpage(ip, pl, 1);

	if (error) {
		SetPageError(pp);
		ClearPageUptodate(pp);
	} else {
		ClearPageError(pp);
		SetPageUptodate(pp);
		flush_dcache_page(pp);
	}

	unlock_page(pp);
	return (error);
}
Example #3
0
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

	D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	D2(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Example #4
0
static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	unsigned char *pg_buf;
	int ret;

;

	BUG_ON(!PageLocked(pg));

	pg_buf = kmap(pg);
	/* FIXME: Can kmap fail? */

	ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE);

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

;
	return ret;
}
Example #5
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(PageWriteback(page));
	set_page_writeback(page);
	ClearPageError(page);

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {
		block_end = block_start + blocksize;
		if (block_start >= len) {
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	/*
	 * If the page was truncated before we could do the writeback,
	 * or we had a memory allocation error while trying to write
	 * the first buffer head, we won't have submitted any pages for
	 * I/O.  In that case we need to make sure we've cleared the
	 * PageWriteback bit from the page to prevent the system from
	 * wedging later on.
	 */
	put_io_page(io_page);
	return ret;
}
Example #6
0
int ngffs_sysfile_do_readpage_nolock(struct inode *inode, struct page *pg)
{
	struct ngffs_info *ngsb=NGFFS_INFO(inode->i_sb);
	int i;
	int rv=0;
	__u32 offset;

	i=inode->i_ino-3;
	PK_DBG("sysfile found at %i\n",i);

	PK_DBG("sysfile read\n");
	if (!PageLocked(pg)) {
		/* PLEASECHECK Koen: PAGE_BUG has been removed as of 2.6.12 or so,
		 * no idea what it should be. */
		printk("page BUG for page at %p\n", pg);
		BUG();
	}

	if(i>=NGFFS_SYSFILES) {
		PK_WARN("sysfile id out of range!\n");
		goto readpage_fail;
	}

	/* Determine offset */
	offset = ngffs_sysfiles[i].ofs;
	if ( ngsb->mtd->size >= 0x200000 ) /* >= 2MB */
	{
		/* factory data stored in upper half of flash */
		if ( offset < 0x4000 ) /* factory data */
		{
			offset += 0x100000; /* 1MB offset */
		}
	}

	printk("[kwwo] reading abs addr 0x%x\n", offset);
	rv=ngffs_absolute_read(ngsb->mtd,offset,(u_char *)page_address(pg),ngffs_sysfiles[i].length);
	if(rv) goto readpage_fail;

	//  if (!strcmp(ngffs_sysfiles[i].name,"id")) memcpy((u_char *)page_address(pg),"AAAAAAAAAAAA",12);

	SetPageUptodate(pg);
	ClearPageError(pg);
	flush_dcache_page(pg);
	kunmap(pg);
	return 0;

readpage_fail:
	ClearPageUptodate(pg);
	SetPageError(pg);
	kunmap(pg);
	return rv;

}
Example #7
0
static int yaffs_readpage_nolock(struct file *f, struct page *pg)
{
	

	yaffs_Object *obj;
	unsigned char *pg_buf;
	int ret;

	yaffs_Device *dev;

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage at %08x, size %08x\n",
			   (unsigned)(pg->index << PAGE_CACHE_SHIFT),
			   (unsigned)PAGE_CACHE_SIZE));

	obj = yaffs_DentryToObject(f->f_dentry);

	dev = obj->myDev;

#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
	BUG_ON(!PageLocked(pg));
#else
	if (!PageLocked(pg))
		PAGE_BUG(pg);
#endif

	pg_buf = kmap(pg);
	

	yaffs_GrossLock(dev);

	ret =
	    yaffs_ReadDataFromFile(obj, pg_buf, pg->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE);

	yaffs_GrossUnlock(dev);

	if (ret >= 0)
		ret = 0;

	if (ret) {
		ClearPageUptodate(pg);
		SetPageError(pg);
	} else {
		SetPageUptodate(pg);
		ClearPageError(pg);
	}

	flush_dcache_page(pg);
	kunmap(pg);

	T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_readpage done\n"));
	return ret;
}
Example #8
0
File: inode.c Project: sumitn/pvfs
static int read_one_page(struct page *page)
{
    void *page_data;
    int ret, max_block;
    ssize_t bytes_read = 0;
    struct inode *inode = page->mapping->host;
    const uint32_t blocksize = PAGE_CACHE_SIZE;  /* inode->i_blksize */
    const uint32_t blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */

    gossip_debug(GOSSIP_INODE_DEBUG, "pvfs2_readpage called with page %p\n",page);
    page_data = pvfs2_kmap(page);

    max_block = ((inode->i_size / blocksize) + 1);

    if (page->index < max_block)
    {
        loff_t blockptr_offset =
            (((loff_t)page->index) << blockbits);
        bytes_read = pvfs2_inode_read(
            inode, page_data, blocksize, &blockptr_offset, 0, inode->i_size);
    }
    /* only zero remaining unread portions of the page data */
    if (bytes_read > 0)
    {
        memset(page_data + bytes_read, 0, blocksize - bytes_read);
    }
    else
    {
        memset(page_data, 0, blocksize);
    }
    /* takes care of potential aliasing */
    flush_dcache_page(page);
    if (bytes_read < 0)
    {
        ret = bytes_read;
        SetPageError(page);
    }
    else
    {
        SetPageUptodate(page);
        if (PageError(page))
        {
            ClearPageError(page);
        }
        ret = 0;
    }
    pvfs2_kunmap(page);
    /* unlock the page after the ->readpage() routine completes */
    unlock_page(page);
    return ret;
}
Example #9
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			zero_user_segment(page, block_start, block_end);
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	put_io_page(io_page);
	return ret;
}
Example #10
0
int j4fs_readpage_nolock(struct file *f, struct page *page)
{
    /* Lifted from yaffs2 */
    unsigned char *page_buf;
    int ret;
    struct address_space *mapping = page->mapping;
    struct inode *inode;
    j4fs_ctrl ctl;

    J4FS_T(J4FS_TRACE_FS_READ,("%s %d\n",__FUNCTION__,__LINE__));

    BUG_ON(!PageLocked(page));

    if (!mapping) BUG();

    inode = mapping->host;

    if (!inode) BUG();

    page_buf = kmap(page);
    /* FIXME: Can kmap fail? */

    j4fs_GrossLock();

    ctl.buffer=page_buf;
    ctl.count=PAGE_CACHE_SIZE;
    ctl.id=inode->i_ino;
    ctl.index=page->index << PAGE_CACHE_SHIFT;
    ret=fsd_read(&ctl);

    j4fs_GrossUnlock();

    if (ret >= 0)
        ret = 0;

    if (ret) {
        ClearPageUptodate(page);
        SetPageError(page);
    } else {
        SetPageUptodate(page);
        ClearPageError(page);
    }

    flush_dcache_page(page);
    kunmap(page);

    return ret;
}
Example #11
0
static int bdev_readpage(void *_sb, struct page *page)
{
	struct super_block *sb = _sb;
	struct block_device *bdev = logfs_super(sb)->s_bdev;
	int err;

	err = sync_request(page, bdev, READ);
	if (err) {
		ClearPageUptodate(page);
		SetPageError(page);
	} else {
		SetPageUptodate(page);
		ClearPageError(page);
	}
	unlock_page(page);
	return err;
}
Example #12
0
static int read_one_page(struct page *page)
{
	int ret;
	int max_block;
	ssize_t bytes_read = 0;
	struct inode *inode = page->mapping->host;
	const __u32 blocksize = PAGE_SIZE;	/* inode->i_blksize */
	const __u32 blockbits = PAGE_SHIFT;	/* inode->i_blkbits */
	struct iov_iter to;
	struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE};

	iov_iter_bvec(&to, ITER_BVEC | READ, &bv, 1, PAGE_SIZE);

	gossip_debug(GOSSIP_INODE_DEBUG,
		    "orangefs_readpage called with page %p\n",
		     page);

	max_block = ((inode->i_size / blocksize) + 1);

	if (page->index < max_block) {
		loff_t blockptr_offset = (((loff_t) page->index) << blockbits);

		bytes_read = orangefs_inode_read(inode,
						 &to,
						 &blockptr_offset,
						 inode->i_size);
	}
	/* this will only zero remaining unread portions of the page data */
	iov_iter_zero(~0U, &to);
	/* takes care of potential aliasing */
	flush_dcache_page(page);
	if (bytes_read < 0) {
		ret = bytes_read;
		SetPageError(page);
	} else {
		SetPageUptodate(page);
		if (PageError(page))
			ClearPageError(page);
		ret = 0;
	}
	/* unlock the page after the ->readpage() routine completes */
	unlock_page(page);
	return ret;
}
Example #13
0
static int
sf_writepage(struct page *page, struct writeback_control *wbc)
{
    struct address_space *mapping = page->mapping;
    struct inode *inode = mapping->host;
    struct sf_glob_info *sf_g = GET_GLOB_INFO(inode->i_sb);
    struct sf_inode_info *sf_i = GET_INODE_INFO(inode);
    struct file *file = sf_i->file;
    struct sf_reg_info *sf_r = file->private_data;
    char *buf;
    uint32_t nwritten = PAGE_SIZE;
    int end_index = inode->i_size >> PAGE_SHIFT;
    loff_t off = ((loff_t) page->index) << PAGE_SHIFT;
    int err;

    TRACE();

    if (page->index >= end_index)
        nwritten = inode->i_size & (PAGE_SIZE-1);

    buf = kmap(page);

    err = sf_reg_write_aux(__func__, sf_g, sf_r, buf, &nwritten, off);
    if (err < 0)
    {
        ClearPageUptodate(page);
        goto out;
    }

    if (off > inode->i_size)
        inode->i_size = off;

    if (PageError(page))
        ClearPageError(page);
    err = 0;

out:
    kunmap(page);

    unlock_page(page);
    return err;
}
Example #14
0
/* read one page from the block device */
static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page)
{
    struct bio *bio;
    struct completion event;
    int err = -ENOMEM;

    if(PageUptodate(page)) {
        DEBUG(2, "blkmtd: readpage page %ld is already upto date\n", page->index);
        unlock_page(page);
        return 0;
    }

    ClearPageUptodate(page);
    ClearPageError(page);

    bio = bio_alloc(GFP_KERNEL, 1);
    if(bio) {
        init_completion(&event);
        bio->bi_bdev = dev->blkdev;
        bio->bi_sector = page->index << (PAGE_SHIFT-9);
        bio->bi_private = &event;
        bio->bi_end_io = bi_read_complete;
        if(bio_add_page(bio, page, PAGE_SIZE, 0) == PAGE_SIZE) {
            submit_bio(READ_SYNC, bio);
            wait_for_completion(&event);
            err = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : -EIO;
            bio_put(bio);
        }
    }

    if(err)
        SetPageError(page);
    else
        SetPageUptodate(page);
    flush_dcache_page(page);
    unlock_page(page);
    return err;
}
Example #15
0
File: blkmtd.c Project: nhanh0/hah
/* readpage() - reads one page from the block device */                 
static int blkmtd_readpage(struct file *file, struct page *page)
{  
  int err;
  int sectornr, sectors, i;
  struct kiobuf *iobuf;
  mtd_raw_dev_data_t *rawdevice = (mtd_raw_dev_data_t *)file->private_data;
  kdev_t dev;

  if(!rawdevice) {
    printk("blkmtd: readpage: PANIC file->private_data == NULL\n");
    return -EIO;
  }
  dev = to_kdev_t(rawdevice->binding->bd_dev);

  DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n",
	bdevname(dev), page, page->index);

  if(Page_Uptodate(page)) {
    DEBUG(1, "blkmtd: readpage page %ld is already upto date\n", page->index);
    UnlockPage(page);
    return 0;
  }

  ClearPageUptodate(page);
  ClearPageError(page);

  /* see if page is in the outgoing write queue */
  spin_lock(&mbd_writeq_lock);
  if(write_queue_cnt) {
    int i = write_queue_tail;
    while(i != write_queue_head) {
      mtdblkdev_write_queue_t *item = &write_queue[i];
      if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) {
	/* yes it is */
	int index = item->pagenr - page->index;
	DEBUG(1, "blkmtd: readpage: found page %ld in outgoing write queue\n",
	      page->index);
	if(item->iserase) {
	  memset(page_address(page), 0xff, PAGE_SIZE);
	} else {
	  memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE);
	}
	SetPageUptodate(page);
	flush_dcache_page(page);
	UnlockPage(page);
	spin_unlock(&mbd_writeq_lock);
	return 0;
      }
      i++;
      i %= WRITE_QUEUE_SZ;
    }
  }
  spin_unlock(&mbd_writeq_lock);


  DEBUG(3, "blkmtd: readpage: getting kiovec\n");
  err = alloc_kiovec(1, &iobuf);
  if (err) {
    return err;
  }
  iobuf->offset = 0;
  iobuf->nr_pages = 1;
  iobuf->length = PAGE_SIZE;
  iobuf->locked = 1;
  iobuf->maplist[0] = page;
  sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits);
  sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits);
  DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors);
  for(i = 0; i < sectors; i++) {
    iobuf->blocks[i] = sectornr++;
  }

  DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n");
  err = brw_kiovec(READ, 1, &iobuf, dev, iobuf->blocks, rawdevice->sector_size);
  DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err);
  iobuf->locked = 0;
  free_kiovec(1, &iobuf);
  if(err != PAGE_SIZE) {
    printk("blkmtd: readpage: error reading page %ld\n", page->index);
    memset(page_address(page), 0, PAGE_SIZE);
    SetPageError(page);
    err = -EIO;
  } else {
    DEBUG(3, "blkmtd: readpage: setting page upto date\n");
    SetPageUptodate(page);
    err = 0;
  }
  flush_dcache_page(page);
  UnlockPage(page);
  DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);
  return 0;
}
Example #16
0
/* Read page, by pass page cache, always read from device. */
static int rawfs_readpage_nolock(struct file *filp, struct page *page)
{
    struct super_block *sb = filp->f_path.dentry->d_sb;
    struct inode *inode = filp->f_path.dentry->d_inode;
    struct rawfs_sb_info *rawfs_sb = RAWFS_SB(sb);
    struct rawfs_inode_info *inode_info = RAWFS_I(inode);
    loff_t pos;
    unsigned int curr_file_pos;
    unsigned int curr_buf_pos = 0;
    int remain_buf_size;
    unsigned size;
    int retval;
	unsigned char *pg_buf;

//	pg_buf = kmap_atomic(page);
	pg_buf = kmap(page);

    // TODO: check pg_buf

    size = i_size_read(inode);
    curr_file_pos = pos = page->index << PAGE_CACHE_SHIFT;
    retval = PAGE_CACHE_SIZE;

    RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s @ folder %X, "
        "page_index %ld, pos %lld, len %d, filesize: %d, pg_buf %X\n",
        inode_info->i_name,
        inode_info->i_parent_folder_id,
        page->index,
        pos, retval, size, (unsigned)pg_buf);

    if ((retval + pos) >= size)
        retval = size - pos;

    if (pos < size) {
        {
            int preceding_pages, rear_pages;
            struct rawfs_page *page_buf = NULL;
            int i;

            // Prepare page buffer
            page_buf = kzalloc(rawfs_sb->page_size, GFP_NOFS);

            if (page_buf == NULL)
            {
                retval = 0;
                goto out;
            }

            preceding_pages = FLOOR((unsigned)pos, rawfs_sb->page_data_size);
            rear_pages = CEILING((unsigned)pos + retval,
                rawfs_sb->page_data_size);
            remain_buf_size = retval;

            RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s, "
                "preceding_pages %d, rear_pages %d, remain_buf_size %d ",
                inode_info->i_name, preceding_pages, rear_pages,
                remain_buf_size);

            /* Step 1: Copy preceding pages, if starting pos is not 0. */
            for (i=preceding_pages;i<rear_pages;i++)
            {
                // Read page
                rawfs_sb->dev.read_page(sb,
                    inode_info->i_location_block,
                    inode_info->i_location_page+i,
                    page_buf);

                /* Copy requried parts */
                {
                    int start_in_buf;
                    int copy_len;

                    start_in_buf = (curr_file_pos % rawfs_sb->page_data_size);
                    copy_len =  ((start_in_buf + remain_buf_size) >
                                  rawfs_sb->page_data_size) ?
                                (rawfs_sb->page_data_size - start_in_buf) :
                                remain_buf_size;

                    memcpy(pg_buf + curr_buf_pos,
                        &page_buf->i_data[0] + start_in_buf, copy_len);

                    RAWFS_PRINT(RAWFS_DBG_FILE, "rawfs_readpage %s, %d, "
                        "curr_buf_pos %d, remain_buf_size %d start_in_buf %d "
                        "copy_len %d starting pattern %X",
                        inode_info->i_name, i, curr_buf_pos, remain_buf_size,
                        start_in_buf, copy_len,
                        *(unsigned int*)(&page_buf->i_data[0] + start_in_buf));

                    curr_buf_pos    += copy_len;
                    remain_buf_size -= copy_len;
                }
            }

            if (page_buf)
                kfree(page_buf);
        }
    }
    else
        retval = 0;

out:

    SetPageUptodate(page);
    ClearPageError(page);
	flush_dcache_page(page);

	//kunmap_atomic(pg_buf);
	kunmap(page);
    unlock_page(page);

    return 0;
}
Example #17
0
File: file.c Project: cilynx/dd-wrt
int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg)
{
	struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
	struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
	struct jffs2_node_frag *frag = f->fraglist;
	__u32 offset = pg->index << PAGE_CACHE_SHIFT;
	__u32 end = offset + PAGE_CACHE_SIZE;
	unsigned char *pg_buf;
	int ret;

	D1(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%x\n", inode->i_ino, offset));

	if (!PageLocked(pg))
                PAGE_BUG(pg);

	while(frag && frag->ofs + frag->size  <= offset) {
		//		D1(printk(KERN_DEBUG "skipping frag %d-%d; before the region we care about\n", frag->ofs, frag->ofs + frag->size));
		frag = frag->next;
	}

	pg_buf = kmap(pg);

	/* XXX FIXME: Where a single physical node actually shows up in two
	   frags, we read it twice. Don't do that. */
	/* Now we're pointing at the first frag which overlaps our page */
	while(offset < end) {
		D2(printk(KERN_DEBUG "jffs2_readpage: offset %d, end %d\n", offset, end));
		if (!frag || frag->ofs > offset) {
			__u32 holesize = end - offset;
			if (frag) {
				D1(printk(KERN_NOTICE "Eep. Hole in ino %ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", inode->i_ino, frag->ofs, offset));
				holesize = min(holesize, frag->ofs - offset);
				D1(jffs2_print_frag_list(f));
			}
			D1(printk(KERN_DEBUG "Filling non-frag hole from %d-%d\n", offset, offset+holesize));
			memset(pg_buf, 0, holesize);
			pg_buf += holesize;
			offset += holesize;
			continue;
		} else if (frag->ofs < offset && (offset & (PAGE_CACHE_SIZE-1)) != 0) {
			D1(printk(KERN_NOTICE "Eep. Overlap in ino #%ld fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n",
				  inode->i_ino, frag->ofs, offset));
			D1(jffs2_print_frag_list(f));
			memset(pg_buf, 0, end - offset);
			ClearPageUptodate(pg);
			SetPageError(pg);
			kunmap(pg);
			return -EIO;
		} else if (!frag->node) {
			__u32 holeend = min(end, frag->ofs + frag->size);
			D1(printk(KERN_DEBUG "Filling frag hole from %d-%d (frag 0x%x 0x%x)\n", offset, holeend, frag->ofs, frag->ofs + frag->size));
			memset(pg_buf, 0, holeend - offset);
			pg_buf += holeend - offset;
			offset = holeend;
			frag = frag->next;
			continue;
		} else {
			__u32 readlen;
			__u32 fragofs; /* offset within the frag to start reading */

			fragofs = offset - frag->ofs;
			readlen = min(frag->size - fragofs, end - offset);
			D1(printk(KERN_DEBUG "Reading %d-%d from node at 0x%x\n", frag->ofs+fragofs, 
				  fragofs+frag->ofs+readlen, frag->node->raw->flash_offset & ~3));
			ret = jffs2_read_dnode(c, frag->node, pg_buf, fragofs + frag->ofs - frag->node->ofs, readlen);
			D2(printk(KERN_DEBUG "node read done\n"));
			if (ret) {
				D1(printk(KERN_DEBUG"jffs2_readpage error %d\n",ret));
				memset(pg_buf, 0, readlen);
				ClearPageUptodate(pg);
				SetPageError(pg);
				kunmap(pg);
				return ret;
			}
		
			pg_buf += readlen;
			offset += readlen;
			frag = frag->next;
			D2(printk(KERN_DEBUG "node read was OK. Looping\n"));
		}
	}
	D2(printk(KERN_DEBUG "readpage finishing\n"));
	SetPageUptodate(pg);
	ClearPageError(pg);

	flush_dcache_page(pg);

	kunmap(pg);
	D1(printk(KERN_DEBUG "readpage finished\n"));
	return 0;
}
Example #18
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			/*
			 * Comments copied from block_write_full_page_endio:
			 *
			 * The page straddles i_size.  It must be zeroed out on
			 * each and every writepage invocation because it may
			 * be mmapped.  "A file is mapped in multiples of the
			 * page size.  For a file that is not a multiple of
			 * the  page size, the remaining memory is zeroed when
			 * mapped, and writes to that region are not written
			 * out to the file."
			 */
			zero_user_segment(page, block_start, block_end);
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	put_io_page(io_page);
	return ret;
}
Example #19
0
int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
	unsigned block_start, block_end, blocksize;
	struct ext4_io_page *io_page;
	struct buffer_head *bh, *head;
	int ret = 0;

	blocksize = 1 << inode->i_blkbits;

	BUG_ON(!PageLocked(page));
	BUG_ON(PageWriteback(page));

	io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
	if (!io_page) {
		set_page_dirty(page);
		unlock_page(page);
		return -ENOMEM;
	}
	io_page->p_page = page;
	atomic_set(&io_page->p_count, 1);
	get_page(page);
	set_page_writeback(page);
	ClearPageError(page);

	/*
	 * Comments copied from block_write_full_page_endio:
	 *
	 * The page straddles i_size.  It must be zeroed out on each and every
	 * writepage invocation because it may be mmapped.  "A file is mapped
	 * in multiples of the page size.  For a file that is not a multiple of
	 * the page size, the remaining memory is zeroed when mapped, and
	 * writes to that region are not written out to the file."
	 */
	if (len < PAGE_CACHE_SIZE)
		zero_user_segment(page, len, PAGE_CACHE_SIZE);

	for (bh = head = page_buffers(page), block_start = 0;
	     bh != head || !block_start;
	     block_start = block_end, bh = bh->b_this_page) {

		block_end = block_start + blocksize;
		if (block_start >= len) {
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
		clear_buffer_dirty(bh);
		ret = io_submit_add_bh(io, io_page, inode, wbc, bh);
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
			set_page_dirty(page);
			break;
		}
	}
	unlock_page(page);
	/*
	 * If the page was truncated before we could do the writeback,
	 * or we had a memory allocation error while trying to write
	 * the first buffer head, we won't have submitted any pages for
	 * I/O.  In that case we need to make sure we've cleared the
	 * PageWriteback bit from the page to prevent the system from
	 * wedging later on.
	 */
	put_io_page(io_page);
	return ret;
}
Example #20
0
/*
 * Write a page synchronously.
 * Offset is the data offset within the page.
 */
static int
nfs_writepage_sync(struct file *file, struct inode *inode, struct page *page,
		   unsigned int offset, unsigned int count)
{
	struct rpc_cred	*cred = NULL;
	loff_t		base;
	unsigned int	wsize = NFS_SERVER(inode)->wsize;
	int		result, refresh = 0, written = 0, flags;
	u8		*buffer;
	struct nfs_fattr fattr;
	struct nfs_writeverf verf;


	if (file)
		cred = get_rpccred(nfs_file_cred(file));
	if (!cred)
		cred = get_rpccred(NFS_I(inode)->mm_cred);

	dprintk("NFS:      nfs_writepage_sync(%x/%Ld %d@%Ld)\n",
		inode->i_dev, (long long)NFS_FILEID(inode),
		count, (long long)(page_offset(page) + offset));

	buffer = kmap(page) + offset;
	base = page_offset(page) + offset;

	flags = ((IS_SWAPFILE(inode)) ? NFS_RW_SWAP : 0) | NFS_RW_SYNC;

	do {
		if (count < wsize && !IS_SWAPFILE(inode))
			wsize = count;

		result = NFS_PROTO(inode)->write(inode, cred, &fattr, flags,
						 base, wsize, buffer, &verf);
		nfs_write_attributes(inode, &fattr);

		if (result < 0) {
			/* Must mark the page invalid after I/O error */
			ClearPageUptodate(page);
			goto io_error;
		}
		if (result != wsize)
			printk("NFS: short write, wsize=%u, result=%d\n",
			wsize, result);
		refresh = 1;
		buffer  += wsize;
	        base    += wsize;
		written += wsize;
		count   -= wsize;
		/*
		 * If we've extended the file, update the inode
		 * now so we don't invalidate the cache.
		 */
		if (base > inode->i_size)
			inode->i_size = base;
	} while (count);

	if (PageError(page))
		ClearPageError(page);

io_error:
	kunmap(page);
	if (cred)
		put_rpccred(cred);

	return written? written : result;
}
Example #21
0
/*
 * Set up the argument/result storage required for the RPC call.
 */
static int nfs_write_rpcsetup(struct nfs_page *req,
		struct nfs_write_data *data,
		const struct rpc_call_ops *call_ops,
		unsigned int count, unsigned int offset,
		int how)
{
	struct inode *inode = req->wb_context->path.dentry->d_inode;
	int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
	int priority = flush_task_priority(how);
	struct rpc_task *task;
	struct rpc_message msg = {
		.rpc_argp = &data->args,
		.rpc_resp = &data->res,
		.rpc_cred = req->wb_context->cred,
	};
	struct rpc_task_setup task_setup_data = {
		.rpc_client = NFS_CLIENT(inode),
		.task = &data->task,
		.rpc_message = &msg,
		.callback_ops = call_ops,
		.callback_data = data,
		.workqueue = nfsiod_workqueue,
		.flags = flags,
		.priority = priority,
	};

	/* Set up the RPC argument and reply structs
	 * NB: take care not to mess about with data->commit et al. */

	data->req = req;
	data->inode = inode = req->wb_context->path.dentry->d_inode;
	data->cred = msg.rpc_cred;

	data->args.fh     = NFS_FH(inode);
	data->args.offset = req_offset(req) + offset;
	data->args.pgbase = req->wb_pgbase + offset;
	data->args.pages  = data->pagevec;
	data->args.count  = count;
	data->args.context = get_nfs_open_context(req->wb_context);
	data->args.stable  = NFS_UNSTABLE;
	if (how & FLUSH_STABLE) {
		data->args.stable = NFS_DATA_SYNC;
		if (!nfs_need_commit(NFS_I(inode)))
			data->args.stable = NFS_FILE_SYNC;
	}

	data->res.fattr   = &data->fattr;
	data->res.count   = count;
	data->res.verf    = &data->verf;
	nfs_fattr_init(&data->fattr);

	/* Set up the initial task struct.  */
	NFS_PROTO(inode)->write_setup(data, &msg);

	dprintk("NFS: %5u initiated write call "
		"(req %s/%lld, %u bytes @ offset %llu)\n",
		data->task.tk_pid,
		inode->i_sb->s_id,
		(long long)NFS_FILEID(inode),
		count,
		(unsigned long long)data->args.offset);

	task = rpc_run_task(&task_setup_data);
	if (IS_ERR(task))
		return PTR_ERR(task);
	rpc_put_task(task);
	return 0;
}

/* If a nfs_flush_* function fails, it should remove reqs from @head and
 * call this on each, which will prepare them to be retried on next
 * writeback using standard nfs.
 */
static void nfs_redirty_request(struct nfs_page *req)
{
	nfs_mark_request_dirty(req);
	nfs_end_page_writeback(req->wb_page);
	nfs_clear_page_tag_locked(req);
}

/*
 * Generate multiple small requests to write out a single
 * contiguous dirty area on one page.
 */
static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page *req = nfs_list_entry(head->next);
	struct page *page = req->wb_page;
	struct nfs_write_data *data;
	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
	unsigned int offset;
	int requests = 0;
	int ret = 0;
	LIST_HEAD(list);

	nfs_list_remove_request(req);

	nbytes = count;
	do {
		size_t len = min(nbytes, wsize);

		data = nfs_writedata_alloc(1);
		if (!data)
			goto out_bad;
		list_add(&data->pages, &list);
		requests++;
		nbytes -= len;
	} while (nbytes != 0);
	atomic_set(&req->wb_complete, requests);

	ClearPageError(page);
	offset = 0;
	nbytes = count;
	do {
		int ret2;

		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del_init(&data->pages);

		data->pagevec[0] = page;

		if (nbytes < wsize)
			wsize = nbytes;
		ret2 = nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
				   wsize, offset, how);
		if (ret == 0)
			ret = ret2;
		offset += wsize;
		nbytes -= wsize;
	} while (nbytes != 0);

	return ret;

out_bad:
	while (!list_empty(&list)) {
		data = list_entry(list.next, struct nfs_write_data, pages);
		list_del(&data->pages);
		nfs_writedata_release(data);
	}
	nfs_redirty_request(req);
	return -ENOMEM;
}

/*
 * Create an RPC task for the given write request and kick it.
 * The page must have been locked by the caller.
 *
 * It may happen that the page we're passed is not marked dirty.
 * This is the case if nfs_updatepage detects a conflicting request
 * that has been written but not committed.
 */
static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
{
	struct nfs_page		*req;
	struct page		**pages;
	struct nfs_write_data	*data;

	data = nfs_writedata_alloc(npages);
	if (!data)
		goto out_bad;

	pages = data->pagevec;
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_list_add_request(req, &data->pages);
		ClearPageError(req->wb_page);
		*pages++ = req->wb_page;
	}
	req = nfs_list_entry(data->pages.next);

	/* Set up the argument struct */
	return nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
 out_bad:
	while (!list_empty(head)) {
		req = nfs_list_entry(head->next);
		nfs_list_remove_request(req);
		nfs_redirty_request(req);
	}
	return -ENOMEM;
}

static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
				  struct inode *inode, int ioflags)
{
	size_t wsize = NFS_SERVER(inode)->wsize;

	if (wsize < PAGE_CACHE_SIZE)
		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
	else
		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
}
Example #22
0
File: file.c Project: mnv104/capfs
/* capfs_readpage()
 *
 * Inside the page structure are "offset", the offset into the file, and
 * the page address, which can be found with "page_address(page)".
 *
 * See fs/nfs/read.c for readpage example.
 */
static int capfs_readpage(struct file *file, struct page *page)
{
	int error = 0;
	struct inode *inode;
	char *buf;
	capfs_off_t offset;
	size_t count = PAGE_SIZE;
	/* update the statistics */
	if(capfs_collect_stats) capfs_vfs_stat.readpage++;   
	PENTRY;

	/* from generic_readpage() */
	get_page(page);
	/* from brw_page() */
	ClearPageUptodate(page);
	ClearPageError(page);

	/* this should help readpage work correctly for big mem machines */
	buf = (char *)kmap(page);

	offset = ((loff_t)page->index) << PAGE_CACHE_SHIFT;
#if 0
	/* THIS IS WHAT I HAD BEFORE */
	offset = pgoff2loff(page->index);
#endif

	inode = file->f_dentry->d_inode;

	/* Added by Alan Rainey 9-10-2003 */
	if(strcmp(file->f_dentry->d_name.name, (char *)(strrchr(CAPFS_I(inode)->name, '/')+1)))
	{
		if ((error = capfs_inode_getattr(file->f_dentry)) < 0) {
			put_page(page);
			PEXIT;
			return error;
		}
	}

	memset(buf, 0, count);

	PDEBUG(D_FILE, "capfs_readpage called for %s (%ld), offset %ld, size %ld\n",
			CAPFS_I(inode)->name, (unsigned long) CAPFS_I(inode)->handle,
	(long) offset, (long) count);

	error = ll_capfs_file_read(CAPFS_I(inode), buf, count, &offset, 1);
	if (error <= 0) 
	{
		SetPageError(page);
	}
	else
	{
		SetPageUptodate(page);
		ClearPageError(page);
	}
	flush_dcache_page(page);
	kunmap(page);
	unlock_page(page);
	put_page(page); 

	PEXIT;
	return error;
}