/* * Find the given page, and call find_dirent() in order to try to * return the next entry. */ static inline int find_dirent_page(nfs_readdir_descriptor_t *desc) { struct inode *inode = desc->file->f_dentry->d_inode; struct page *page; int status; dfprintk(VFS, "NFS: find_dirent_page() searching directory page %ld\n", desc->page_index); desc->plus = NFS_USE_READDIRPLUS(inode); page = read_cache_page(&inode->i_data, desc->page_index, (filler_t *)nfs_readdir_filler, desc); if (IS_ERR(page)) { status = PTR_ERR(page); goto out; } if (!Page_Uptodate(page)) goto read_error; /* NOTE: Someone else may have changed the READDIRPLUS flag */ desc->page = page; desc->ptr = kmap(page); status = find_dirent(desc, page); if (status < 0) dir_page_release(desc); out: dfprintk(VFS, "NFS: find_dirent_page() returns %d\n", status); return status; read_error: page_cache_release(page); return -EIO; }
/** * vxfs_get_page - read a page into memory. * @ip: inode to read from * @n: page number * * Description: * vxfs_get_page reads the @n th page of @ip into the pagecache. * * Returns: * The wanted page on success, else a NULL pointer. */ struct page * vxfs_get_page(struct address_space *mapping, u_long n) { struct page * pp; pp = read_cache_page(mapping, n, (filler_t*)mapping->a_ops->readpage, NULL); if (!IS_ERR(pp)) { wait_on_page(pp); kmap(pp); if (!Page_Uptodate(pp)) goto fail; /** if (!PageChecked(pp)) **/ /** vxfs_check_page(pp); **/ if (PageError(pp)) goto fail; } return (pp); fail: vxfs_put_page(pp); return ERR_PTR(-EIO); }
static int yaffs_prepare_write(struct file *f, struct page *pg, unsigned offset, unsigned to) { T(YAFFS_TRACE_OS, (KERN_DEBUG "yaffs_prepair_write\n")); if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE)) return yaffs_readpage_nolock(f, pg); return 0; }
static int ramdisk_prepare_write(struct file *file, struct page *page, unsigned offset, unsigned to) { if (!Page_Uptodate(page)) { void *addr = page_address(page); memset(addr, 0, PAGE_CACHE_SIZE); flush_dcache_page(page); SetPageUptodate(page); } SetPageDirty(page); return 0; }
/* * Copyright (C) 2000 Linus Torvalds. * 2000 Transmeta Corp. * aops copied from ramfs. */ static int ramdisk_readpage(struct file *file, struct page * page) { if (!Page_Uptodate(page)) { memset(kmap(page), 0, PAGE_CACHE_SIZE); kunmap(page); flush_dcache_page(page); SetPageUptodate(page); } UnlockPage(page); return 0; }
/* read a range of the data via the page cache */ static int blkmtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { mtd_raw_dev_data_t *rawdevice = mtd->priv; int err = 0; int offset; int pagenr, pages; *retlen = 0; DEBUG(2, "blkmtd: read: dev = `%s' from = %ld len = %d buf = %p\n", bdevname(rawdevice->binding->bd_dev), (long int)from, len, buf); pagenr = from >> PAGE_SHIFT; offset = from - (pagenr << PAGE_SHIFT); pages = (offset+len+PAGE_SIZE-1) >> PAGE_SHIFT; DEBUG(3, "blkmtd: read: pagenr = %d offset = %d, pages = %d\n", pagenr, offset, pages); /* just loop through each page, getting it via readpage() - slow but easy */ while(pages) { struct page *page; int cpylen; DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr); page = read_cache_page(&rawdevice->as, pagenr, (filler_t *)blkmtd_readpage, rawdevice->file); if(IS_ERR(page)) { return PTR_ERR(page); } wait_on_page(page); if(!Page_Uptodate(page)) { /* error reading page */ printk("blkmtd: read: page not uptodate\n"); page_cache_release(page); return -EIO; } cpylen = (PAGE_SIZE > len) ? len : PAGE_SIZE; if(offset+cpylen > PAGE_SIZE) cpylen = PAGE_SIZE-offset; memcpy(buf + *retlen, page_address(page) + offset, cpylen); offset = 0; len -= cpylen; *retlen += cpylen; pagenr++; pages--; page_cache_release(page); } DEBUG(2, "blkmtd: end read: retlen = %d, err = %d\n", *retlen, err); return err; }
static struct page * dir_get_page(struct inode *dir, unsigned long n) { struct address_space *mapping = dir->i_mapping; struct page *page = read_cache_page(mapping, n, (filler_t*)mapping->a_ops->readpage, NULL); if (!IS_ERR(page)) { wait_on_page(page); kmap(page); if (!Page_Uptodate(page)) goto fail; } return page; fail: dir_put_page(page); return ERR_PTR(-EIO); }
int j4fs_prepare_write(struct file *f, struct page *pg, unsigned offset, unsigned to) { if(j4fs_panic==1) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__)); return -ENOSPC; } J4FS_T(J4FS_TRACE_FS, ("\nj4fs_prepare_write\n")); if(to>PAGE_CACHE_SIZE) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: page size overflow(offset,to)=(%d,%d)\n",__FUNCTION__,__LINE__,offset,to)); j4fs_panic("page size overflow"); return -ENOSPC; } if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE)) return j4fs_readpage_nolock(f, pg); return 0; }
int j4fs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *pg = NULL; pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t offset = pos & (PAGE_CACHE_SIZE - 1); uint32_t to = offset + len; int ret = 0; int space_held = 0; if(j4fs_panic==1) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__)); return -ENOSPC; } J4FS_T(J4FS_TRACE_FS, ("start j4fs_write_begin\n")); if(to>PAGE_CACHE_SIZE) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: page size overflow(pos,index,offset,len,to)=(%d,%d,%d,%d,%d)\n",__FUNCTION__,__LINE__,pos,index,offset,len,to)); j4fs_panic("page size overflow"); return -ENOSPC; } /* Get a page */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) pg = grab_cache_page_write_begin(mapping, index, flags); #else pg = __grab_cache_page(mapping, index); #endif *pagep = pg; if (!pg) { ret = -ENOMEM; goto out; } /* Get fs space */ space_held = j4fs_hold_space(PAGE_CACHE_SIZE); if (!space_held) { ret = -ENOSPC; goto out; } /* Update page if required */ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE)) ret = j4fs_readpage_nolock(filp, pg); if (ret) goto out; /* Happy path return */ J4FS_T(J4FS_TRACE_FS, ("end j4fs_write_begin - ok\n")); return 0; out: J4FS_T(J4FS_TRACE_FS, ("end j4fs_write_begin fail returning %d\n", ret)); if (pg) { unlock_page(pg); page_cache_release(pg); } return ret; }
int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end) { struct inode *inode = pg->mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); __u32 pageofs = pg->index << PAGE_CACHE_SHIFT; int ret = 0; D1(printk(KERN_DEBUG "jffs2_prepare_write() nrpages %ld\n", inode->i_mapping->nrpages)); if (pageofs > inode->i_size) { /* Make new hole frag from old EOF to new page */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; __u32 phys_ofs, alloc_len; D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs)); ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); if (ret) return ret; down(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = JFFS2_MAGIC_BITMASK; ri.nodetype = JFFS2_NODETYPE_INODE; ri.totlen = sizeof(ri); ri.hdr_crc = crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4); ri.ino = f->inocache->ino; ri.version = ++f->highest_version; ri.mode = inode->i_mode; ri.uid = inode->i_uid; ri.gid = inode->i_gid; ri.isize = max((__u32)inode->i_size, pageofs); ri.atime = ri.ctime = ri.mtime = CURRENT_TIME; ri.offset = inode->i_size; ri.dsize = pageofs - inode->i_size; ri.csize = 0; ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = crc32(0, &ri, sizeof(ri)-8); ri.data_crc = 0; fn = jffs2_write_dnode(inode, &ri, NULL, 0, phys_ofs, NULL); jffs2_complete_reservation(c); if (IS_ERR(fn)) { ret = PTR_ERR(fn); up(&f->sem); return ret; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret)); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); up(&f->sem); return ret; } inode->i_size = pageofs; up(&f->sem); } /* Read in the page if it wasn't already present, unless it's a whole page */ if (!Page_Uptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { down(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); up(&f->sem); } D1(printk(KERN_DEBUG "end prepare_write(). pg->flags %lx\n", pg->flags)); return ret; }
/* path points to first direct item of the file regarless of how many of them are there */ int direct2indirect (struct reiserfs_transaction_handle *th, struct inode * inode, struct path * path, struct buffer_head * unbh, loff_t tail_offset) { struct super_block * sb = inode->i_sb; struct buffer_head *up_to_date_bh ; struct item_head * p_le_ih = PATH_PITEM_HEAD (path); unsigned long total_tail = 0 ; struct cpu_key end_key; /* Key to search for the last byte of the converted item. */ struct item_head ind_ih; /* new indirect item to be inserted or key of unfm pointer to be pasted */ int n_blk_size, n_retval; /* returned value for reiserfs_insert_item and clones */ struct unfm_nodeinfo unfm_ptr; /* Handle on an unformatted node that will be inserted in the tree. */ sb->u.reiserfs_sb.s_direct2indirect ++; n_blk_size = sb->s_blocksize; /* and key to search for append or insert pointer to the new unformatted node. */ copy_item_head (&ind_ih, p_le_ih); set_le_ih_k_offset (&ind_ih, tail_offset); set_le_ih_k_type (&ind_ih, TYPE_INDIRECT); /* Set the key to search for the place for new unfm pointer */ make_cpu_key (&end_key, inode, tail_offset, TYPE_INDIRECT, 4); // FIXME: we could avoid this if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND ) { reiserfs_warning ("PAP-14030: direct2indirect: " "pasted or inserted byte exists in the tree %K. " "Use fsck to repair.\n", &end_key); pathrelse(path); return -EIO; } p_le_ih = PATH_PITEM_HEAD (path); unfm_ptr.unfm_nodenum = cpu_to_le32 (unbh->b_blocknr); unfm_ptr.unfm_freespace = 0; // ??? if ( is_statdata_le_ih (p_le_ih) ) { /* Insert new indirect item. */ set_ih_free_space (&ind_ih, 0); /* delete at nearest future */ put_ih_item_len( &ind_ih, UNFM_P_SIZE ); PATH_LAST_POSITION (path)++; n_retval = reiserfs_insert_item (th, path, &end_key, &ind_ih, (char *)&unfm_ptr); } else { /* Paste into last indirect item of an object. */ n_retval = reiserfs_paste_into_item(th, path, &end_key, (char *)&unfm_ptr, UNFM_P_SIZE); } if ( n_retval ) { return n_retval; } // note: from here there are two keys which have matching first // three key components. They only differ by the fourth one. /* Set the key to search for the direct items of the file */ make_cpu_key (&end_key, inode, max_reiserfs_offset (inode), TYPE_DIRECT, 4); /* Move bytes from the direct items to the new unformatted node and delete them. */ while (1) { int tail_size; /* end_key.k_offset is set so, that we will always have found last item of the file */ if ( search_for_position_by_key (sb, &end_key, path) == POSITION_FOUND ) reiserfs_panic (sb, "PAP-14050: direct2indirect: " "direct item (%K) not found", &end_key); p_le_ih = PATH_PITEM_HEAD (path); RFALSE( !is_direct_le_ih (p_le_ih), "vs-14055: direct item expected(%K), found %h", &end_key, p_le_ih); tail_size = (le_ih_k_offset (p_le_ih) & (n_blk_size - 1)) + ih_item_len(p_le_ih) - 1; /* we only send the unbh pointer if the buffer is not up to date. ** this avoids overwriting good data from writepage() with old data ** from the disk or buffer cache ** Special case: unbh->b_page will be NULL if we are coming through ** DIRECT_IO handler here. */ if ( !unbh->b_page || buffer_uptodate(unbh) || Page_Uptodate(unbh->b_page)) { up_to_date_bh = NULL ; } else { up_to_date_bh = unbh ; } n_retval = reiserfs_delete_item (th, path, &end_key, inode, up_to_date_bh) ; total_tail += n_retval ; if (tail_size == n_retval) // done: file does not have direct items anymore break; } /* if we've copied bytes from disk into the page, we need to zero ** out the unused part of the block (it was not up to date before) ** the page is still kmapped (by whoever called reiserfs_get_block) */ if (up_to_date_bh) { unsigned pgoff = (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); memset(page_address(unbh->b_page) + pgoff, 0, n_blk_size - total_tail) ; } inode->u.reiserfs_i.i_first_direct_byte = U32_MAX; reiserfs_update_tail_transaction(inode); return 0; }
//int jffs2_prepare_write (struct file *filp, struct page *pg, unsigned start, unsigned end) int jffs2_prepare_write (struct inode *d_inode, struct page *pg, unsigned start, unsigned end) { struct inode *inode = d_inode; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); uint32_t pageofs = pg->index << PAGE_CACHE_SHIFT; int ret = 0; D1(printk(KERN_DEBUG "jffs2_prepare_write()\n")); if (pageofs > inode->i_size) { /* Make new hole frag from old EOF to new page */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; uint32_t phys_ofs, alloc_len; D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs)); ret = jffs2_reserve_space(c, sizeof(ri), &phys_ofs, &alloc_len, ALLOC_NORMAL); if (ret) return ret; down(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri)); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(inode->i_mode); ri.uid = cpu_to_je16(inode->i_uid); ri.gid = cpu_to_je16(inode->i_gid); ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); ri.atime = ri.ctime = ri.mtime = cpu_to_je32(cyg_timestamp()); ri.offset = cpu_to_je32(inode->i_size); ri.dsize = cpu_to_je32(pageofs - inode->i_size); ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(0); fn = jffs2_write_dnode(c, f, &ri, NULL, 0, phys_ofs, NULL); jffs2_complete_reservation(c); if (IS_ERR(fn)) { ret = PTR_ERR(fn); up(&f->sem); return ret; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in prepare_write, returned %d\n", ret)); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); up(&f->sem); return ret; } inode->i_size = pageofs; up(&f->sem); } /* Read in the page if it wasn't already present, unless it's a whole page */ // eCos has no concept of uptodate and by default always reads pages afresh if (!Page_Uptodate(pg) && (start || end < PAGE_CACHE_SIZE)) { down(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); up(&f->sem); } D1(printk(KERN_DEBUG "end prepare_write()\n")); return ret; }
/* * Returns a pointer to a buffer containing at least LEN bytes of * filesystem starting at byte offset OFFSET into the filesystem. */ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len) { struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; struct page *pages[BLKS_PER_BUF]; unsigned i, blocknr, buffer, unread; unsigned long devsize; int major, minor; char *data; if (!len) return NULL; blocknr = offset >> PAGE_CACHE_SHIFT; offset &= PAGE_CACHE_SIZE - 1; /* Check if an existing buffer already has the data.. */ for (i = 0; i < READ_BUFFERS; i++) { unsigned int blk_offset; if (buffer_dev[i] != sb) continue; if (blocknr < buffer_blocknr[i]) continue; blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; blk_offset += offset; if (blk_offset + len > BUFFER_SIZE) continue; return read_buffers[i] + blk_offset; } devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; major = MAJOR(sb->s_dev); minor = MINOR(sb->s_dev); if (blk_size[major]) devsize = blk_size[major][minor] >> 2; /* Ok, read in BLKS_PER_BUF pages completely first. */ unread = 0; for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = NULL; if (blocknr + i < devsize) { page = read_cache_page(mapping, blocknr + i, (filler_t *)mapping->a_ops->readpage, NULL); /* synchronous error? */ if (IS_ERR(page)) page = NULL; } pages[i] = page; } for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = pages[i]; if (page) { wait_on_page(page); if (!Page_Uptodate(page)) { /* asynchronous error */ page_cache_release(page); pages[i] = NULL; } } } buffer = next_buffer; next_buffer = NEXT_BUFFER(buffer); buffer_blocknr[buffer] = blocknr; buffer_dev[buffer] = sb; data = read_buffers[buffer]; for (i = 0; i < BLKS_PER_BUF; i++) { struct page *page = pages[i]; if (page) { memcpy(data, kmap(page), PAGE_CACHE_SIZE); kunmap(page); page_cache_release(page); } else memset(data, 0, PAGE_CACHE_SIZE); data += PAGE_CACHE_SIZE; } return read_buffers[buffer] + offset; }
static int rd_blkdev_pagecache_IO(int rw, struct buffer_head * sbh, int minor) { struct address_space * mapping; unsigned long index; int offset, size, err; err = -EIO; err = 0; mapping = rd_bdev[minor]->bd_inode->i_mapping; index = sbh->b_rsector >> (PAGE_CACHE_SHIFT - 9); offset = (sbh->b_rsector << 9) & ~PAGE_CACHE_MASK; size = sbh->b_size; do { int count; struct page ** hash; struct page * page; char * src, * dst; int unlock = 0; count = PAGE_CACHE_SIZE - offset; if (count > size) count = size; size -= count; hash = page_hash(mapping, index); page = __find_get_page(mapping, index, hash); if (!page) { page = grab_cache_page(mapping, index); err = -ENOMEM; if (!page) goto out; err = 0; if (!Page_Uptodate(page)) { memset(kmap(page), 0, PAGE_CACHE_SIZE); kunmap(page); SetPageUptodate(page); } unlock = 1; } index++; if (rw == READ) { src = kmap(page); src += offset; dst = bh_kmap(sbh); } else { dst = kmap(page); dst += offset; src = bh_kmap(sbh); } offset = 0; memcpy(dst, src, count); kunmap(page); bh_kunmap(sbh); if (rw == READ) { flush_dcache_page(page); } else { SetPageDirty(page); } if (unlock) UnlockPage(page); __free_page(page); } while (size); out: return err; }
/* readpage() - reads one page from the block device */ static int blkmtd_readpage(struct file *file, struct page *page) { int err; int sectornr, sectors, i; struct kiobuf *iobuf; mtd_raw_dev_data_t *rawdevice = (mtd_raw_dev_data_t *)file->private_data; kdev_t dev; if(!rawdevice) { printk("blkmtd: readpage: PANIC file->private_data == NULL\n"); return -EIO; } dev = to_kdev_t(rawdevice->binding->bd_dev); DEBUG(2, "blkmtd: readpage called, dev = `%s' page = %p index = %ld\n", bdevname(dev), page, page->index); if(Page_Uptodate(page)) { DEBUG(1, "blkmtd: readpage page %ld is already upto date\n", page->index); UnlockPage(page); return 0; } ClearPageUptodate(page); ClearPageError(page); /* see if page is in the outgoing write queue */ spin_lock(&mbd_writeq_lock); if(write_queue_cnt) { int i = write_queue_tail; while(i != write_queue_head) { mtdblkdev_write_queue_t *item = &write_queue[i]; if(page->index >= item->pagenr && page->index < item->pagenr+item->pagecnt) { /* yes it is */ int index = item->pagenr - page->index; DEBUG(1, "blkmtd: readpage: found page %ld in outgoing write queue\n", page->index); if(item->iserase) { memset(page_address(page), 0xff, PAGE_SIZE); } else { memcpy(page_address(page), page_address(item->pages[index]), PAGE_SIZE); } SetPageUptodate(page); flush_dcache_page(page); UnlockPage(page); spin_unlock(&mbd_writeq_lock); return 0; } i++; i %= WRITE_QUEUE_SZ; } } spin_unlock(&mbd_writeq_lock); DEBUG(3, "blkmtd: readpage: getting kiovec\n"); err = alloc_kiovec(1, &iobuf); if (err) { return err; } iobuf->offset = 0; iobuf->nr_pages = 1; iobuf->length = PAGE_SIZE; iobuf->locked = 1; iobuf->maplist[0] = page; sectornr = page->index << (PAGE_SHIFT - rawdevice->sector_bits); sectors = 1 << (PAGE_SHIFT - rawdevice->sector_bits); DEBUG(3, "blkmtd: readpage: sectornr = %d sectors = %d\n", sectornr, sectors); for(i = 0; i < sectors; i++) { iobuf->blocks[i] = sectornr++; } DEBUG(3, "bklmtd: readpage: starting brw_kiovec\n"); err = brw_kiovec(READ, 1, &iobuf, dev, iobuf->blocks, rawdevice->sector_size); DEBUG(3, "blkmtd: readpage: finished, err = %d\n", err); iobuf->locked = 0; free_kiovec(1, &iobuf); if(err != PAGE_SIZE) { printk("blkmtd: readpage: error reading page %ld\n", page->index); memset(page_address(page), 0, PAGE_SIZE); SetPageError(page); err = -EIO; } else { DEBUG(3, "blkmtd: readpage: setting page upto date\n"); SetPageUptodate(page); err = 0; } flush_dcache_page(page); UnlockPage(page); DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err); return 0; }
struct dentry *umsdos_solve_hlink (struct dentry *hlink) { /* root is our root for resolving pseudo-hardlink */ struct dentry *base = hlink->d_sb->s_root; struct dentry *dentry_dst; char *path, *pt; int len; struct address_space *mapping = hlink->d_inode->i_mapping; struct page *page; page=read_cache_page(mapping,0,(filler_t *)mapping->a_ops->readpage,NULL); dentry_dst=(struct dentry *)page; if (IS_ERR(page)) goto out; wait_on_page(page); if (!Page_Uptodate(page)) goto async_fail; dentry_dst = ERR_PTR(-ENOMEM); path = (char *) kmalloc (PATH_MAX, GFP_KERNEL); if (path == NULL) goto out_release; memcpy(path, kmap(page), hlink->d_inode->i_size); kunmap(page); page_cache_release(page); len = hlink->d_inode->i_size; /* start at root dentry */ dentry_dst = dget(base); path[len] = '\0'; pt = path; if (*path == '/') pt++; /* skip leading '/' */ if (base->d_inode == pseudo_root) pt += (UMSDOS_PSDROOT_LEN + 1); while (1) { struct dentry *dir = dentry_dst, *demd; char *start = pt; int real; while (*pt != '\0' && *pt != '/') pt++; len = (int) (pt - start); if (*pt == '/') *pt++ = '\0'; real = 1; demd = umsdos_get_emd_dentry(dir); if (!IS_ERR(demd)) { if (demd->d_inode) real = 0; dput(demd); } #ifdef UMSDOS_DEBUG_VERBOSE printk ("umsdos_solve_hlink: dir %s/%s, name=%s, real=%d\n", dir->d_parent->d_name.name, dir->d_name.name, start, real); #endif dentry_dst = umsdos_lookup_dentry(dir, start, len, real); /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ if (real) d_drop(dir); dput (dir); if (IS_ERR(dentry_dst)) break; /* not found? stop search ... */ if (!dentry_dst->d_inode) { break; } if (*pt == '\0') /* we're finished! */ break; } /* end while */ if (!IS_ERR(dentry_dst)) { struct inode *inode = dentry_dst->d_inode; if (inode) { inode->u.umsdos_i.i_is_hlink = 1; #ifdef UMSDOS_DEBUG_VERBOSE printk ("umsdos_solve_hlink: resolved link %s/%s, ino=%ld\n", dentry_dst->d_parent->d_name.name, dentry_dst->d_name.name, inode->i_ino); #endif } else { #ifdef UMSDOS_DEBUG_VERBOSE printk ("umsdos_solve_hlink: resolved link %s/%s negative!\n", dentry_dst->d_parent->d_name.name, dentry_dst->d_name.name); #endif } } else printk(KERN_WARNING "umsdos_solve_hlink: err=%ld\n", PTR_ERR(dentry_dst)); kfree (path); out: dput(hlink); /* original hlink no longer needed */ return dentry_dst; async_fail: dentry_dst = ERR_PTR(-EIO); out_release: page_cache_release(page); goto out; }