/* added: begin address space operations definitions */ static int wrapfs_writepage(struct page *page, struct writeback_control *wbc){ int err = -EIO; struct inode *inode; struct inode *lower_inode; struct page *lower_page; struct address_space *lower_mapping; /* lower inode mapping */ gfp_t mask; BUG_ON(!PageUptodate(page)); inode = page->mapping->host; if (!inode || !WRAPFS_I(inode)){ err = 0; goto out; } lower_inode = wrapfs_lower_inode(inode); lower_mapping = lower_inode->i_mapping; mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS); lower_page = find_or_create_page(lower_mapping, page->index, mask); if (!lower_page) { err = 0; set_page_dirty(page); goto out; } copy_highpage(lower_page, page); flush_dcache_page(lower_page); SetPageUptodate(lower_page); set_page_dirty(lower_page); if (wbc->for_reclaim) { unlock_page(lower_page); goto out_release; } BUG_ON(!lower_mapping->a_ops->writepage); wait_on_page_writeback(lower_page); /* prevent multiple writers */ clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */ err = lower_mapping->a_ops->writepage(lower_page, wbc); if (err < 0) goto out_release; if (err == AOP_WRITEPAGE_ACTIVATE) { err = 0; unlock_page(lower_page); } fsstack_copy_attr_times(inode, lower_inode); out_release: page_cache_release(lower_page); out: unlock_page(page); return err; }
static int wrapfs_open(struct inode *inode, struct file *file) { int err = 0; struct dentry *dentry = file->f_path.dentry; struct dentry *parent = NULL; int size; /* don't open unhashed/deleted files */ if (d_unhashed(dentry)) { err = -ENOENT; goto out_err; } file->private_data = kzalloc(sizeof(struct wrapfs_file_info), GFP_KERNEL); if (!WRAPFS_F(file)) { err = -ENOMEM; goto out_err; } atomic_set(&WRAPFS_F(file)->generation, atomic_read(&WRAPFS_I(inode)->generation)); size = sizeof(struct file *) * 2; WRAPFS_F(file)->lower_files = kzalloc(size, GFP_KERNEL); if (unlikely(!WRAPFS_F(file)->lower_files)) { err = -ENOMEM; goto out_err; } parent = dget_parent(dentry); if (S_ISDIR(inode->i_mode)) err = __open_dir(inode, file, parent); else err = __open_file(inode, file, parent); /* if(!err) fsstack_copy_attr_all(inode, wrapfs_lower_inode(inode)); */ out_err: dput(parent); return err; }
static int wrapfs_writepage(struct page *page, struct writeback_control *wbc) { int err = -EIO; struct inode *inode; struct inode *lower_inode; struct page *lower_page; struct address_space *lower_mapping; /* lower inode mapping */ gfp_t mask; /*printk(KERN_ALERT "in writepage() \n");*/ BUG_ON(!PageUptodate(page)); inode = page->mapping->host; /* if no lower inode, nothing to do */ if (!inode || !WRAPFS_I(inode) || WRAPFS_I(inode)->lower_inode) { err = 0; goto out; } lower_inode = wrapfs_lower_inode(inode); lower_mapping = lower_inode->i_mapping; /* * find lower page (returns a locked page) * * We turn off __GFP_FS while we look for or create a new lower * page. This prevents a recursion into the file system code, which * under memory pressure conditions could lead to a deadlock. This * is similar to how the loop driver behaves (see loop_set_fd in * drivers/block/loop.c). If we can't find the lower page, we * redirty our page and return "success" so that the VM will call us * again in the (hopefully near) future. */ mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS); lower_page = find_or_create_page(lower_mapping, page->index, mask); if (!lower_page) { err = 0; set_page_dirty(page); goto out; } /* copy page data from our upper page to the lower page */ copy_highpage(lower_page, page); flush_dcache_page(lower_page); SetPageUptodate(lower_page); set_page_dirty(lower_page); /* * Call lower writepage (expects locked page). However, if we are * called with wbc->for_reclaim, then the VFS/VM just wants to * reclaim our page. Therefore, we don't need to call the lower * ->writepage: just copy our data to the lower page (already done * above), then mark the lower page dirty and unlock it, and return * success. */ if (wbc->for_reclaim) { unlock_page(lower_page); goto out_release; } BUG_ON(!lower_mapping->a_ops->writepage); wait_on_page_writeback(lower_page); /* prevent multiple writers */ clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */ err = lower_mapping->a_ops->writepage(lower_page, wbc); if (err < 0) goto out_release; /* * Lower file systems such as ramfs and tmpfs, may return * AOP_WRITEPAGE_ACTIVATE so that the VM won't try to (pointlessly) * write the page again for a while. But those lower file systems * also set the page dirty bit back again. Since we successfully * copied our page data to the lower page, then the VM will come * back to the lower page (directly) and try to flush it. So we can * save the VM the hassle of coming back to our page and trying to * flush too. Therefore, we don't re-dirty our own page, and we * never return AOP_WRITEPAGE_ACTIVATE back to the VM (we consider * this a success). * * We also unlock the lower page if the lower ->writepage returned * AOP_WRITEPAGE_ACTIVATE. (This "anomalous" behaviour may be * addressed in future shmem/VM code.) */ if (err == AOP_WRITEPAGE_ACTIVATE) { err = 0; unlock_page(lower_page); } /* all is well */ /* lower mtimes have changed: update ours */ /* fsstack_copy_inode_size(dentry->d_inode, lower_file->f_path.dentry->d_inode); fsstack_copy_attr_times(dentry->d_inode, lower_file->f_path.dentry->d_inode); */ out_release: /* b/c find_or_create_page increased refcnt */ page_cache_release(lower_page); out: /* * We unlock our page unconditionally, because we never return * AOP_WRITEPAGE_ACTIVATE. */ unlock_page(page); return err; }
struct inode *wrapfs_iget(struct super_block *sb, struct inode *lower_inode) { struct wrapfs_inode_info *info; struct inode *inode; /* the new inode to return */ int err; inode = iget5_locked(sb, /* our superblock */ /* * hashval: we use inode number, but we can * also use "(unsigned long)lower_inode" * instead. */ lower_inode->i_ino, /* hashval */ wrapfs_inode_test, /* inode comparison function */ wrapfs_inode_set, /* inode init function */ lower_inode); /* data passed to test+set fxns */ if (!inode) { err = -EACCES; iput(lower_inode); return ERR_PTR(err); } /* if found a cached inode, then just return it */ if (!(inode->i_state & I_NEW)) return inode; /* initialize new inode */ info = WRAPFS_I(inode); inode->i_ino = lower_inode->i_ino; if (!igrab(lower_inode)) { err = -ESTALE; return ERR_PTR(err); } wrapfs_set_lower_inode(inode, lower_inode); inode->i_version++; /* use different set of inode ops for symlinks & directories */ if (S_ISDIR(lower_inode->i_mode)) inode->i_op = &wrapfs_dir_iops; else if (S_ISLNK(lower_inode->i_mode)) inode->i_op = &wrapfs_symlink_iops; else inode->i_op = &wrapfs_main_iops; /* use different set of file ops for directories */ if (S_ISDIR(lower_inode->i_mode)) inode->i_fop = &wrapfs_dir_fops; else inode->i_fop = &wrapfs_main_fops; inode->i_mapping->a_ops = &wrapfs_aops; inode->i_atime.tv_sec = 0; inode->i_atime.tv_nsec = 0; inode->i_mtime.tv_sec = 0; inode->i_mtime.tv_nsec = 0; inode->i_ctime.tv_sec = 0; inode->i_ctime.tv_nsec = 0; /* properly initialize special inodes */ if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) || S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode)) init_special_inode(inode, lower_inode->i_mode, lower_inode->i_rdev); /* all well, copy inode attributes */ fsstack_copy_attr_all(inode, lower_inode); fsstack_copy_inode_size(inode, lower_inode); unlock_new_inode(inode); return inode; }
static void wrapfs_destroy_inode(struct inode *inode) { kmem_cache_free(wrapfs_inode_cachep, WRAPFS_I(inode)); }
static int wrapfs_writepage(struct page *page, struct writeback_control *wbc) { int err = -EIO; struct inode *inode; struct inode *lower_inode; struct page *lower_page; struct address_space *lower_mapping; /* lower inode mapping */ gfp_t mask; char *lower_page_data = NULL; /*#ifdef WRAPFS_CRYPTO char *enc_buf = NULL; #endif*/ wrapfs_debug_aops( WRAPFS_SB(page->mapping->host->i_sb)->wrapfs_debug_a_ops, ""); wrapfs_debug(""); BUG_ON(!PageUptodate(page)); wrapfs_debug(""); inode = page->mapping->host; /* if no lower inode, nothing to do */ if (!inode || !WRAPFS_I(inode) || WRAPFS_I(inode)->lower_inode) { err = 0; goto out; } lower_inode = wrapfs_lower_inode(inode); lower_mapping = lower_inode->i_mapping; /* * find lower page (returns a locked page) * * We turn off __GFP_FS while we look for or create a new lower * page. This prevents a recursion into the file system code, which * under memory pressure conditions could lead to a deadlock. This * is similar to how the loop driver behaves (see loop_set_fd in * drivers/block/loop.c). If we can't find the lower page, we * redirty our page and return "success" so that the VM will call us * again in the (hopefully near) future. */ mask = mapping_gfp_mask(lower_mapping) & ~(__GFP_FS); lower_page = find_or_create_page(lower_mapping, page->index, mask); if (!lower_page) { err = 0; set_page_dirty(page); goto out; } lower_page_data = (char *)kmap(lower_page); /* copy page data from our upper page to the lower page */ copy_highpage(lower_page, page); flush_dcache_page(lower_page); SetPageUptodate(lower_page); set_page_dirty(lower_page); /*#ifdef WRAPFS_CRYPTO enc_buf = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); if (enc_buf == NULL) { wrapfs_debug("No memory!!"); err = -ENOMEM; goto out_release; } err = my_encrypt(lower_page_data, PAGE_CACHE_SIZE, enc_buf, PAGE_CACHE_SIZE, WRAPFS_SB(inode->i_sb)->key, WRAPFS_CRYPTO_KEY_LEN); if (err < 0) { wrapfs_debug("encrypt error!!"); kfree(enc_buf); err = -EINVAL; goto out_release; } memcpy(lower_page_data, enc_buf, PAGE_CACHE_SIZE); kfree(enc_buf); #endif*/ /* * Call lower writepage (expects locked page). However, if we are * called with wbc->for_reclaim, then the VFS/VM just wants to * reclaim our page. Therefore, we don't need to call the lower * ->writepage: just copy our data to the lower page (already done * above), then mark the lower page dirty and unlock it, and return * success. */ /*if (wbc->for_reclaim) { unlock_page(lower_page); goto out_release; }*/ BUG_ON(!lower_mapping->a_ops->writepage); wait_on_page_writeback(lower_page); /* prevent multiple writers */ clear_page_dirty_for_io(lower_page); /* emulate VFS behavior */ err = lower_mapping->a_ops->writepage(lower_page, wbc); if (err < 0) goto out_release; /* * Lower file systems such as ramfs and tmpfs, may return * AOP_WRITEPAGE_ACTIVATE so that the VM won't try to (pointlessly) * write the page again for a while. But those lower file systems * also set the page dirty bit back again. Since we successfully * copied our page data to the lower page, then the VM will come * back to the lower page (directly) and try to flush it. So we can * save the VM the hassle of coming back to our page and trying to * flush too. Therefore, we don't re-dirty our own page, and we * never return AOP_WRITEPAGE_ACTIVATE back to the VM (we consider * this a success). * * We also unlock the lower page if the lower ->writepage returned * AOP_WRITEPAGE_ACTIVATE. (This "anomalous" behaviour may be * addressed in future shmem/VM code.) */ if (err == AOP_WRITEPAGE_ACTIVATE) { err = 0; unlock_page(lower_page); } out_release: kunmap(lower_page); /* b/c find_or_create_page increased refcnt */ page_cache_release(lower_page); out: /* * We unlock our page unconditionally, because we never return * AOP_WRITEPAGE_ACTIVATE. */ unlock_page(page); wrapfs_debug_aops(WRAPFS_SB(inode->i_sb)->wrapfs_debug_a_ops, "err : %d", err); return err; }