Ejemplo n.º 1
0
static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
{
	struct inode *dir = file->f_dentry->d_inode;
	loff_t pos = file->f_pos - IMPLICIT_NODES;
	struct page *page;
	struct logfs_disk_dentry *dd;
	int full;

	BUG_ON(pos < 0);
	for (;; pos++) {
		if (beyond_eof(dir, pos))
			break;
		if (!logfs_exist_block(dir, pos)) {
			/* deleted dentry */
			pos = dir_seek_data(dir, pos);
			continue;
		}
		page = read_cache_page(dir->i_mapping, pos,
				(filler_t *)logfs_readpage, NULL);
		if (IS_ERR(page))
			return PTR_ERR(page);
		dd = kmap(page);
		BUG_ON(dd->namelen == 0);

		full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
				pos, be64_to_cpu(dd->ino), dd->type);
		kunmap(page);
		page_cache_release(page);
		if (full)
			break;
	}

	file->f_pos = pos + IMPLICIT_NODES;
	return 0;
}
int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
                       char *buf, loff_t *pos, unsigned size)
{
	struct address_space *mapping = ip->i_inode.i_mapping;
	unsigned long index = *pos / PAGE_CACHE_SIZE;
	unsigned offset = *pos & (PAGE_CACHE_SIZE - 1);
	unsigned copied = 0;
	unsigned amt;
	struct page *page;
	void *p;

	do {
		amt = size - copied;
		if (offset + size > PAGE_CACHE_SIZE)
			amt = PAGE_CACHE_SIZE - offset;
		page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
		if (IS_ERR(page))
			return PTR_ERR(page);
		p = kmap_atomic(page, KM_USER0);
		memcpy(buf + copied, p + offset, amt);
		kunmap_atomic(p, KM_USER0);
		mark_page_accessed(page);
		page_cache_release(page);
		copied += amt;
		index++;
		offset = 0;
	} while(copied < size);
	(*pos) += size;
	return size;
}
Ejemplo n.º 3
0
static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
	struct inode *inode = dentry->d_inode;
	struct page *page;
	void *err = ERR_PTR(nfs_revalidate_inode(NFS_SERVER(inode), inode));
	if (err)
		goto read_failed;
	page = read_cache_page(&inode->i_data, 0,
				(filler_t *)nfs_symlink_filler, inode);
	if (IS_ERR(page)) {
		err = page;
		goto read_failed;
	}
	if (!PageUptodate(page)) {
		err = ERR_PTR(-EIO);
		goto getlink_read_error;
	}
	nd_set_link(nd, kmap(page));
	return page;

getlink_read_error:
	page_cache_release(page);
read_failed:
	nd_set_link(nd, err);
	return NULL;
}
/*
 * Find the given page, and call find_dirent() in order to try to
 * return the next entry.
 */
static inline
int find_dirent_page(nfs_readdir_descriptor_t *desc)
{
	struct inode	*inode = desc->file->f_dentry->d_inode;
	struct page	*page;
	int		status;

	dfprintk(VFS, "NFS: find_dirent_page() searching directory page %ld\n", desc->page_index);

	desc->plus = NFS_USE_READDIRPLUS(inode);
	page = read_cache_page(&inode->i_data, desc->page_index,
			       (filler_t *)nfs_readdir_filler, desc);
	if (IS_ERR(page)) {
		status = PTR_ERR(page);
		goto out;
	}
	if (!Page_Uptodate(page))
		goto read_error;

	/* NOTE: Someone else may have changed the READDIRPLUS flag */
	desc->page = page;
	desc->ptr = kmap(page);
	status = find_dirent(desc, page);
	if (status < 0)
		dir_page_release(desc);
 out:
	dfprintk(VFS, "NFS: find_dirent_page() returns %d\n", status);
	return status;
 read_error:
	page_cache_release(page);
	return -EIO;
}
Ejemplo n.º 5
0
static const char *nfs_get_link(struct dentry *dentry,
				struct inode *inode,
				struct delayed_call *done)
{
	struct page *page;
	void *err;

	if (!dentry) {
		err = ERR_PTR(nfs_revalidate_mapping_rcu(inode));
		if (err)
			return err;
		page = find_get_page(inode->i_mapping, 0);
		if (!page)
			return ERR_PTR(-ECHILD);
		if (!PageUptodate(page)) {
			put_page(page);
			return ERR_PTR(-ECHILD);
		}
	} else {
		err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
		if (err)
			return err;
		page = read_cache_page(&inode->i_data, 0, nfs_symlink_filler,
				inode);
		if (IS_ERR(page))
			return ERR_CAST(page);
	}
	set_delayed_call(done, page_put_link, page);
	return page_address(page);
}
Ejemplo n.º 6
0
/**
 * vxfs_get_page - read a page into memory.
 * @ip:		inode to read from
 * @n:		page number
 *
 * Description:
 *   vxfs_get_page reads the @n th page of @ip into the pagecache.
 *
 * Returns:
 *   The wanted page on success, else a NULL pointer.
 */
struct page *
vxfs_get_page(struct address_space *mapping, u_long n)
{
	struct page *			pp;

	pp = read_cache_page(mapping, n,
			(filler_t*)mapping->a_ops->readpage, NULL);

	if (!IS_ERR(pp)) {
		wait_on_page(pp);
		kmap(pp);
		if (!Page_Uptodate(pp))
			goto fail;
		/** if (!PageChecked(pp)) **/
			/** vxfs_check_page(pp); **/
		if (PageError(pp))
			goto fail;
	}
	
	return (pp);
		 
fail:
	vxfs_put_page(pp);
	return ERR_PTR(-EIO);
}
Ejemplo n.º 7
0
/* read a range of the data via the page cache */
static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
                       size_t *retlen, u_char *buf)
{
    struct blkmtd_dev *dev = mtd->priv;
    int err = 0;
    int offset;
    int pagenr, pages;
    size_t thislen = 0;

    DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
          mtd->name+9, (long int)from, len, buf);

    if(from > mtd->size)
        return -EINVAL;
    if(from + len > mtd->size)
        len = mtd->size - from;

    pagenr = from >> PAGE_SHIFT;
    offset = from - (pagenr << PAGE_SHIFT);

    pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
    DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n",
          pagenr, offset, pages);

    while(pages) {
        struct page *page;
        int cpylen;

        DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
        page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
        if(IS_ERR(page)) {
            err = -EIO;
            goto readerr;
        }

        cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
        if(offset+cpylen > PAGE_SIZE)
            cpylen = PAGE_SIZE-offset;

        memcpy(buf + thislen, page_address(page) + offset, cpylen);
        offset = 0;
        len -= cpylen;
        thislen += cpylen;
        pagenr++;
        pages--;
        if(!PageDirty(page))
            page_cache_release(page);
    }

readerr:
    if(retlen)
        *retlen = thislen;
    DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", thislen, err);
    return err;
}
Ejemplo n.º 8
0
Archivo: blkmtd.c Proyecto: nhanh0/hah
/* read a range of the data via the page cache */
static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len,
	     size_t *retlen, u_char *buf)
{
  mtd_raw_dev_data_t *rawdevice = mtd->priv;
  int err = 0;
  int offset;
  int pagenr, pages;

  *retlen = 0;

  DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n",
	bdevname(rawdevice->binding->bd_dev), (long int)from, len, buf);

  pagenr = from >> PAGE_SHIFT;
  offset = from - (pagenr << PAGE_SHIFT);
  
  pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT;
  DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages);

  /* just loop through each page, getting it via readpage() - slow but easy */
  while(pages) {
    struct page *page;
    int cpylen;
    DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
    page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);
    if(IS_ERR(page)) {
      return PTR_ERR(page);
    }
    wait_on_page(page);
    if(!Page_Uptodate(page)) {
      /* error reading page */
      printk("blkmtd: read: page not uptodate\n");
      page_cache_release(page);
      return -EIO;
    }

    cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE;
    if(offset+cpylen > PAGE_SIZE)
      cpylen = PAGE_SIZE-offset;
    
    memcpy(buf + *retlen, page_address(page) + offset, cpylen);
    offset = 0;
    len -= cpylen;
    *retlen += cpylen;
    pagenr++;
    pages--;
    page_cache_release(page);
  }
  
  DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err);
  return err;
}
Ejemplo n.º 9
0
/*
 * check a symbolic link to see whether it actually encodes a mountpoint
 * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
 */
int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
{
	struct page *page;
	size_t size;
	char *buf;
	int ret;

	_enter("{%x:%u,%u}",
	       vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);

	/* read the contents of the symlink into the pagecache */
	page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
			       afs_page_filler, key);
	if (IS_ERR(page)) {
		ret = PTR_ERR(page);
		goto out;
	}

	ret = -EIO;
	if (PageError(page))
		goto out_free;

	buf = kmap(page);

	/* examine the symlink's contents */
	size = vnode->status.size;
	_debug("symlink to %*.*s", (int) size, (int) size, buf);

	if (size > 2 &&
	    (buf[0] == '%' || buf[0] == '#') &&
	    buf[size - 1] == '.'
	    ) {
		_debug("symlink is a mountpoint");
		spin_lock(&vnode->lock);
		set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
		vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
		spin_unlock(&vnode->lock);
	}

	ret = 0;

	kunmap(page);
out_free:
	page_cache_release(page);
out:
	_leave(" = %d", ret);
	return ret;
}
Ejemplo n.º 10
0
static struct page * dir_get_page(struct inode *dir, unsigned long n)
{
	struct address_space *mapping = dir->i_mapping;
	struct page *page = read_cache_page(mapping, n,
				(filler_t*)mapping->a_ops->readpage, NULL);
	if (!IS_ERR(page)) {
		wait_on_page(page);
		kmap(page);
		if (!Page_Uptodate(page))
			goto fail;
	}
	return page;

fail:
	dir_put_page(page);
	return ERR_PTR(-EIO);
}
Ejemplo n.º 11
0
static struct page *get_mapping_page(struct super_block *sb, pgoff_t index,
		int use_filler)
{
	struct logfs_super *super = logfs_super(sb);
	struct address_space *mapping = super->s_mapping_inode->i_mapping;
	filler_t *filler = super->s_devops->readpage;
	struct page *page;

	BUG_ON(mapping_gfp_mask(mapping) & __GFP_FS);
	if (use_filler)
		page = read_cache_page(mapping, index, filler, sb);
	else {
		page = find_or_create_page(mapping, index, GFP_NOFS);
		unlock_page(page);
	}
	return page;
}
Ejemplo n.º 12
0
/**
 * ecryptfs_get1page
 *
 * Get one page from cache or lower f/s, return error otherwise.
 *
 * Returns unlocked and up-to-date page (if ok), with increased
 * refcnt.
 */
static struct page *ecryptfs_get1page(struct file *file, int index)
{
	struct page *page;
	struct dentry *dentry;
	struct inode *inode;
	struct address_space *mapping;

	dentry = file->f_path.dentry;
	inode = dentry->d_inode;
	mapping = inode->i_mapping;
	page = read_cache_page(mapping, index,
			       (filler_t *)mapping->a_ops->readpage,
			       (void *)file);
	if (IS_ERR(page))
		goto out;
	wait_on_page_locked(page);
out:
	return page;
}
Ejemplo n.º 13
0
Archivo: dir.c Proyecto: 19Dan01/linux
/* FIXME: readdir currently has it's own dir_walk code.  I don't see a good
 * way to combine the two copies */
static int logfs_readdir(struct file *file, struct dir_context *ctx)
{
	struct inode *dir = file_inode(file);
	loff_t pos;
	struct page *page;
	struct logfs_disk_dentry *dd;

	if (ctx->pos < 0)
		return -EINVAL;

	if (!dir_emit_dots(file, ctx))
		return 0;

	pos = ctx->pos - 2;
	BUG_ON(pos < 0);
	for (;; pos++, ctx->pos++) {
		bool full;
		if (beyond_eof(dir, pos))
			break;
		if (!logfs_exist_block(dir, pos)) {
			/* deleted dentry */
			pos = dir_seek_data(dir, pos);
			continue;
		}
		page = read_cache_page(dir->i_mapping, pos,
				(filler_t *)logfs_readpage, NULL);
		if (IS_ERR(page))
			return PTR_ERR(page);
		dd = kmap(page);
		BUG_ON(dd->namelen == 0);

		full = !dir_emit(ctx, (char *)dd->name,
				be16_to_cpu(dd->namelen),
				be64_to_cpu(dd->ino), dd->type);
		kunmap(page);
		page_cache_release(page);
		if (full)
			break;
	}
	return 0;
}
Ejemplo n.º 14
0
/*
 * get a page into the pagecache
 */
static struct page *afs_dir_get_page(struct inode *dir, unsigned long index,
				     struct key *key)
{
	struct page *page;
	_enter("{%lu},%lu", dir->i_ino, index);

	page = read_cache_page(dir->i_mapping, index, afs_page_filler, key);
	if (!IS_ERR(page)) {
		kmap(page);
		if (unlikely(!PageChecked(page))) {
			if (PageError(page) || !afs_dir_check_page(dir, page))
				goto fail;
		}
	}
	return page;

fail:
	afs_dir_put_page(page);
	_leave(" = -EIO");
	return ERR_PTR(-EIO);
}
Ejemplo n.º 15
0
struct page *emergency_read_begin(struct address_space *mapping, pgoff_t index)
{
	filler_t *filler = (filler_t *)mapping->a_ops->readpage;
	struct page *page;
	int err;

	page = read_cache_page(mapping, index, filler, NULL);
	if (page)
		return page;

	/* No more pages available, switch to emergency page */
	printk(KERN_INFO"Logfs: Using emergency page\n");
	mutex_lock(&emergency_mutex);
	err = filler(NULL, emergency_page);
	if (err) {
		mutex_unlock(&emergency_mutex);
		printk(KERN_EMERG"Logfs: Error reading emergency page\n");
		return ERR_PTR(err);
	}
	return emergency_page;
}
Ejemplo n.º 16
0
static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
	struct inode *inode = dentry->d_inode;
	struct page *page;
	void *err;

	err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
	if (err)
		goto read_failed;
	page = read_cache_page(&inode->i_data, 0,
				(filler_t *)nfs_symlink_filler, inode);
	if (IS_ERR(page)) {
		err = page;
		goto read_failed;
	}
	nd_set_link(nd, kmap(page));
	return page;

read_failed:
	nd_set_link(nd, err);
	return NULL;
}
Ejemplo n.º 17
0
static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
{
	struct qstr *name = &dentry->d_name;
	struct page *page;
	struct logfs_disk_dentry *dd;
	u32 hash = hash_32(name->name, name->len, 0);
	pgoff_t index;
	int round;

	if (name->len > LOGFS_MAX_NAMELEN)
		return ERR_PTR(-ENAMETOOLONG);

	for (round = 0; round < 20; round++) {
		index = hash_index(hash, round);

		if (beyond_eof(dir, index))
			return NULL;
		if (!logfs_exist_block(dir, index))
			continue;
		page = read_cache_page(dir->i_mapping, index,
				(filler_t *)logfs_readpage, NULL);
		if (IS_ERR(page))
			return page;
		dd = kmap_atomic(page, KM_USER0);
		BUG_ON(dd->namelen == 0);

		if (name->len != be16_to_cpu(dd->namelen) ||
				memcmp(name->name, dd->name, name->len)) {
			kunmap_atomic(dd, KM_USER0);
			page_cache_release(page);
			continue;
		}

		kunmap_atomic(dd, KM_USER0);
		return page;
	}
	return NULL;
}
Ejemplo n.º 18
0
/* read a page from a file, pinning it into cache, and return bytes_read */
static struct page *read_page(struct file *file, unsigned long index,
                              unsigned long *bytes_read)
{
    struct inode *inode = file->f_mapping->host;
    struct page *page = NULL;
    loff_t isize = i_size_read(inode);
    unsigned long end_index = isize >> PAGE_SHIFT;

    PRINTK("read bitmap file (%dB @ %Lu)\n", (int)PAGE_SIZE,
           (unsigned long long)index << PAGE_SHIFT);

    page = read_cache_page(inode->i_mapping, index,
                           (filler_t *)inode->i_mapping->a_ops->readpage, file);
    if (IS_ERR(page))
        goto out;
    wait_on_page_locked(page);
    if (!PageUptodate(page) || PageError(page)) {
        put_page(page);
        page = ERR_PTR(-EIO);
        goto out;
    }

    if (index > end_index) /* we have read beyond EOF */
        *bytes_read = 0;
    else if (index == end_index) /* possible short read */
        *bytes_read = isize & ~PAGE_MASK;
    else
        *bytes_read = PAGE_SIZE; /* got a full page */
out:
    if (IS_ERR(page))
        printk(KERN_ALERT "md: bitmap read error: (%dB @ %Lu): %ld\n",
               (int)PAGE_SIZE,
               (unsigned long long)index << PAGE_SHIFT,
               PTR_ERR(page));
    return page;
}
Ejemplo n.º 19
0
/*
 * get a page into the pagecache
 */
static struct page *afs_dir_get_page(struct inode *dir, unsigned long index)
{
	struct page *page;

	_enter("{%lu},%lu",dir->i_ino,index);

	page = read_cache_page(dir->i_mapping,index,
			       (filler_t*)dir->i_mapping->a_ops->readpage,NULL);
	if (!IS_ERR(page)) {
		wait_on_page_locked(page);
		kmap(page);
		if (!PageUptodate(page))
			goto fail;
		if (!PageChecked(page))
			afs_dir_check_page(dir,page);
		if (PageError(page))
			goto fail;
	}
	return page;

 fail:
	afs_dir_put_page(page);
	return ERR_PTR(-EIO);
} /* end afs_dir_get_page() */
Ejemplo n.º 20
0
Archivo: blkmtd.c Proyecto: nhanh0/hah
static int blkmtd_write(struct mtd_info *mtd, loff_t to, size_t len,
	      size_t *retlen, const u_char *buf)
{
  mtd_raw_dev_data_t *rawdevice = mtd->priv;
  int err = 0;
  int offset;
  int pagenr;
  size_t len1 = 0, len2 = 0, len3 = 0;
  struct page **pages;
  int pagecnt = 0;

  *retlen = 0;
  DEBUG(2, "blkmtd: write: dev = `%s' to = %ld len = %d buf = %p\n",
	bdevname(rawdevice->binding->bd_dev), (long int)to, len, buf);

  /* handle readonly and out of range numbers */

  if(rawdevice->readonly) {
    printk("blkmtd: error: trying to write to a readonly device %s\n", device);
    return -EROFS;
  }

  if(to >= rawdevice->totalsize) {
    return -ENOSPC;
  }

  if(to + len > rawdevice->totalsize) {
    len = (rawdevice->totalsize - to);
  }


  pagenr = to >> PAGE_SHIFT;
  offset = to - (pagenr << PAGE_SHIFT);

  /* see if we have to do a partial write at the start */
  if(offset) {
    if((offset + len) > PAGE_SIZE) {
      len1 = PAGE_SIZE - offset;
      len -= len1;
    } else {
      len1 = len;
      len = 0;
    }
  }

  /* calculate the length of the other two regions */
  len3 = len & ~PAGE_MASK;
  len -= len3;
  len2 = len;


  if(len1)
    pagecnt++;
  if(len2)
    pagecnt += len2 >> PAGE_SHIFT;
  if(len3)
    pagecnt++;

  DEBUG(3, "blkmtd: write: len1 = %d len2 = %d len3 = %d pagecnt = %d\n", len1, len2, len3, pagecnt);
  
  /* get space for list of pages */
  pages = kmalloc(pagecnt * sizeof(struct page *), GFP_KERNEL);
  if(pages == NULL) {
    return -ENOMEM;
  }
  pagecnt = 0;

  if(len1) {
    /* do partial start region */
    struct page *page;
    
    DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n", pagenr, len1, offset);
    page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);

    if(IS_ERR(page)) {
      kfree(pages);
      return PTR_ERR(page);
    }
    memcpy(page_address(page)+offset, buf, len1);
    pages[pagecnt++] = page;
    buf += len1;
    *retlen = len1;
    err = 0;
    pagenr++;
  }

  /* Now do the main loop to a page aligned, n page sized output */
  if(len2) {
    int pagesc = len2 >> PAGE_SHIFT;
    DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n", pagenr, pagesc);
    while(pagesc) {
      struct page *page;

      /* see if page is in the page cache */
      DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
      page = grab_cache_page(&rawdevice->as, pagenr);
      DEBUG(3, "blkmtd: write: got page %d from page cache\n", pagenr);
      if(!page) {
	printk("blkmtd: write: cant grab cache page %d\n", pagenr);
	err = -EIO;
	goto write_err;
      }
      memcpy(page_address(page), buf, PAGE_SIZE);
      pages[pagecnt++] = page;
      UnlockPage(page);
      pagenr++;
      pagesc--;
      buf += PAGE_SIZE;
      *retlen += PAGE_SIZE;
    }
  }


  if(len3) {
    /* do the third region */
    struct page *page;
    DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n", pagenr, len3);
    page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file);
    if(IS_ERR(page)) {
      err = PTR_ERR(page);
      goto write_err;
    }
    memcpy(page_address(page), buf, len3);
    DEBUG(3, "blkmtd: write: writing out partial end\n");
    pages[pagecnt++] = page;
    *retlen += len3;
    err = 0;
  }
  DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
  /* submit it to the write task */
  err = queue_page_write(rawdevice, pages, to >> PAGE_SHIFT, pagecnt, 0);
  if(!err) {
    while(pagecnt--) {
      SetPageUptodate(pages[pagecnt]);
      flush_dcache_page(pages[pagecnt]);
      page_cache_release(pages[pagecnt]);
    }
    kfree(pages);
    return 0;
  }

 write_err:
  while(--pagecnt) {
    SetPageError(pages[pagecnt]);
    page_cache_release(pages[pagecnt]);
  }
  kfree(pages);
  return err;
}
Ejemplo n.º 21
0
/**
 * write_pages - write block of data to device via the page cache
 * @dev: device to write to
 * @buf: data source or NULL if erase (output is set to 0xff)
 * @to: offset into output device
 * @len: amount to data to write
 * @retlen: amount of data written
 *
 * Grab pages from the page cache and fill them with the source data.
 * Non page aligned start and end result in a readin of the page and
 * part of the page being modified. Pages are added to the bio and then written
 * out.
 */
static int write_pages(struct blkmtd_dev *dev, const u_char *buf, loff_t to,
                       size_t len, size_t *retlen)
{
    int pagenr, offset;
    size_t start_len = 0, end_len;
    int pagecnt = 0;
    int err = 0;
    struct bio *bio = NULL;
    size_t thislen = 0;

    pagenr = to >> PAGE_SHIFT;
    offset = to & ~PAGE_MASK;

    DEBUG(2, "blkmtd: write_pages: buf = %p to = %ld len = %d pagenr = %d offset = %d\n",
          buf, (long)to, len, pagenr, offset);

    /* see if we have to do a partial write at the start */
    if(offset) {
        start_len = ((offset + len) > PAGE_SIZE) ? PAGE_SIZE - offset : len;
        len -= start_len;
    }

    /* calculate the length of the other two regions */
    end_len = len & ~PAGE_MASK;
    len -= end_len;

    if(start_len)
        pagecnt++;

    if(len)
        pagecnt += len >> PAGE_SHIFT;

    if(end_len)
        pagecnt++;

    down(&dev->wrbuf_mutex);

    DEBUG(3, "blkmtd: write: start_len = %d len = %d end_len = %d pagecnt = %d\n",
          start_len, len, end_len, pagecnt);

    if(start_len) {
        /* do partial start region */
        struct page *page;

        DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
              pagenr, start_len, offset);

        BUG_ON(!buf);
        page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
        lock_page(page);
        if(PageDirty(page)) {
            err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
                to, start_len, len, end_len, pagenr);
            BUG();
        }
        memcpy(page_address(page)+offset, buf, start_len);
        SetPageDirty(page);
        SetPageUptodate(page);
        buf += start_len;
        thislen = start_len;
        bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
        if(!bio) {
            err = -ENOMEM;
            err("bio_add_page failed\n");
            goto write_err;
        }
        pagecnt--;
        pagenr++;
    }

    /* Now do the main loop to a page aligned, n page sized output */
    if(len) {
        int pagesc = len >> PAGE_SHIFT;
        DEBUG(3, "blkmtd: write: whole pages start = %d, count = %d\n",
              pagenr, pagesc);
        while(pagesc) {
            struct page *page;

            /* see if page is in the page cache */
            DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
            page = grab_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr);
            if(PageDirty(page)) {
                BUG();
            }
            if(!page) {
                warn("write: cannot grab cache page %d", pagenr);
                err = -ENOMEM;
                goto write_err;
            }
            if(!buf) {
                memset(page_address(page), 0xff, PAGE_SIZE);
            } else {
                memcpy(page_address(page), buf, PAGE_SIZE);
                buf += PAGE_SIZE;
            }
            bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
            if(!bio) {
                err = -ENOMEM;
                err("bio_add_page failed\n");
                goto write_err;
            }
            pagenr++;
            pagecnt--;
            SetPageDirty(page);
            SetPageUptodate(page);
            pagesc--;
            thislen += PAGE_SIZE;
        }
    }

    if(end_len) {
        /* do the third region */
        struct page *page;
        DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
              pagenr, end_len);
        BUG_ON(!buf);
        page = read_cache_page(dev->blkdev->bd_inode->i_mapping, pagenr, (filler_t *)blkmtd_readpage, dev);
        lock_page(page);
        if(PageDirty(page)) {
            err("to = %lld start_len = %d len = %d end_len = %d pagenr = %d\n",
                to, start_len, len, end_len, pagenr);
            BUG();
        }
        memcpy(page_address(page), buf, end_len);
        SetPageDirty(page);
        SetPageUptodate(page);
        DEBUG(3, "blkmtd: write: writing out partial end\n");
        thislen += end_len;
        bio = blkmtd_add_page(bio, dev->blkdev, page, pagecnt);
        if(!bio) {
            err = -ENOMEM;
            err("bio_add_page failed\n");
            goto write_err;
        }
        pagenr++;
    }

    DEBUG(3, "blkmtd: write: got %d vectors to write\n", bio->bi_vcnt);
write_err:
    if(bio)
        blkmtd_write_out(bio);

    DEBUG(2, "blkmtd: write: end, retlen = %d, err = %d\n", *retlen, err);
    up(&dev->wrbuf_mutex);

    if(retlen)
        *retlen = thislen;
    return err;
}
Ejemplo n.º 22
0
/*
 * Returns a pointer to a buffer containing at least LEN bytes of
 * filesystem starting at byte offset OFFSET into the filesystem.
 */
static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
{
	struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
	struct page *pages[BLKS_PER_BUF];
	unsigned i, blocknr, buffer, unread;
	unsigned long devsize;
	int major, minor;

	char *data;

	if (!len)
		return NULL;
	blocknr = offset >> PAGE_CACHE_SHIFT;
	offset &= PAGE_CACHE_SIZE - 1;

	/* Check if an existing buffer already has the data.. */
	for (i = 0; i < READ_BUFFERS; i++) {
		unsigned int blk_offset;

		if (buffer_dev[i] != sb)
			continue;
		if (blocknr < buffer_blocknr[i])
			continue;
		blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
		blk_offset += offset;
		if (blk_offset + len > BUFFER_SIZE)
			continue;
		return read_buffers[i] + blk_offset;
	}

	devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
	major = MAJOR(sb->s_dev);
	minor = MINOR(sb->s_dev);

	if (blk_size[major])
		devsize = blk_size[major][minor] >> 2;

	/* Ok, read in BLKS_PER_BUF pages completely first. */
	unread = 0;
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = NULL;

		if (blocknr + i < devsize) {
			page = read_cache_page(mapping, blocknr + i,
				(filler_t *)mapping->a_ops->readpage,
				NULL);
			/* synchronous error? */
			if (IS_ERR(page))
				page = NULL;
		}
		pages[i] = page;
	}

	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			wait_on_page(page);
			if (!Page_Uptodate(page)) {
				/* asynchronous error */
				page_cache_release(page);
				pages[i] = NULL;
			}
		}
	}
		
	buffer = next_buffer;
	next_buffer = NEXT_BUFFER(buffer);
	buffer_blocknr[buffer] = blocknr;
	buffer_dev[buffer] = sb;

	data = read_buffers[buffer];
	for (i = 0; i < BLKS_PER_BUF; i++) {
		struct page *page = pages[i];
		if (page) {
			memcpy(data, kmap(page), PAGE_CACHE_SIZE);
			kunmap(page);
			page_cache_release(page);
		} else
			memset(data, 0, PAGE_CACHE_SIZE);
		data += PAGE_CACHE_SIZE;
	}
	return read_buffers[buffer] + offset;
}
Ejemplo n.º 23
0
struct dentry *umsdos_solve_hlink (struct dentry *hlink)
{
	/* root is our root for resolving pseudo-hardlink */
	struct dentry *base = hlink->d_sb->s_root;
	struct dentry *dentry_dst;
	char *path, *pt;
	int len;
	struct address_space *mapping = hlink->d_inode->i_mapping;
	struct page *page;

	page=read_cache_page(mapping,0,(filler_t *)mapping->a_ops->readpage,NULL);
	dentry_dst=(struct dentry *)page;
	if (IS_ERR(page))
		goto out;
	wait_on_page(page);
	if (!Page_Uptodate(page))
		goto async_fail;

	dentry_dst = ERR_PTR(-ENOMEM);
	path = (char *) kmalloc (PATH_MAX, GFP_KERNEL);
	if (path == NULL)
		goto out_release;
	memcpy(path, kmap(page), hlink->d_inode->i_size);
	kunmap(page);
	page_cache_release(page);

	len = hlink->d_inode->i_size;

	/* start at root dentry */
	dentry_dst = dget(base);
	path[len] = '\0';
	
	pt = path;
	if (*path == '/')
		pt++; /* skip leading '/' */
	
	if (base->d_inode == pseudo_root)
		pt += (UMSDOS_PSDROOT_LEN + 1);
	
	while (1) {
		struct dentry *dir = dentry_dst, *demd;
		char *start = pt;
		int real;

		while (*pt != '\0' && *pt != '/') pt++;
		len = (int) (pt - start);
		if (*pt == '/') *pt++ = '\0';

		real = 1;
		demd = umsdos_get_emd_dentry(dir);
		if (!IS_ERR(demd)) {
			if (demd->d_inode)
				real = 0;
			dput(demd);
		}

#ifdef UMSDOS_DEBUG_VERBOSE
printk ("umsdos_solve_hlink: dir %s/%s, name=%s, real=%d\n",
dir->d_parent->d_name.name, dir->d_name.name, start, real);
#endif
		dentry_dst = umsdos_lookup_dentry(dir, start, len, real);
/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
		if (real)
			d_drop(dir);
		dput (dir);
		if (IS_ERR(dentry_dst))
			break;
		/* not found? stop search ... */
		if (!dentry_dst->d_inode) {
			break;
		}
		if (*pt == '\0')	/* we're finished! */
			break;
	} /* end while */

	if (!IS_ERR(dentry_dst)) {
		struct inode *inode = dentry_dst->d_inode;
		if (inode) {
			inode->u.umsdos_i.i_is_hlink = 1;
#ifdef UMSDOS_DEBUG_VERBOSE
printk ("umsdos_solve_hlink: resolved link %s/%s, ino=%ld\n",
dentry_dst->d_parent->d_name.name, dentry_dst->d_name.name, inode->i_ino);
#endif
		} else {
#ifdef UMSDOS_DEBUG_VERBOSE
printk ("umsdos_solve_hlink: resolved link %s/%s negative!\n",
dentry_dst->d_parent->d_name.name, dentry_dst->d_name.name);
#endif
		}
	} else
		printk(KERN_WARNING
			"umsdos_solve_hlink: err=%ld\n", PTR_ERR(dentry_dst));
	kfree (path);

out:
	dput(hlink);	/* original hlink no longer needed */
	return dentry_dst;

async_fail:
	dentry_dst = ERR_PTR(-EIO);
out_release:
	page_cache_release(page);
	goto out;
}