static int logfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { struct inode *inode = mapping->host; pgoff_t index = page->index; unsigned start = pos & (PAGE_CACHE_SIZE - 1); unsigned end = start + copied; int ret = 0; BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); BUG_ON(page->index > I3_BLOCKS); if (copied < len) { /* * Short write of a non-initialized paged. Just tell userspace * to retry the entire page. */ if (!PageUptodate(page)) { copied = 0; goto out; } } if (copied == 0) goto out; /* FIXME: do we need to update inode? */ if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); mark_inode_dirty_sync(inode); } SetPageUptodate(page); if (!PageDirty(page)) { if (!get_page_reserve(inode, page)) __set_page_dirty_nobuffers(page); else ret = logfs_write_buf(inode, page, WF_LOCK); } out: unlock_page(page); page_cache_release(page); return ret ? ret : copied; }
static int logfs_write_dir(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0); pgoff_t index; int round, err; for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (logfs_exist_block(dir, index)) continue; page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL); if (!page) return -ENOMEM; dd = kmap_atomic(page, KM_USER0); memset(dd, 0, sizeof(*dd)); dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); logfs_set_name(dd, &dentry->d_name); kunmap_atomic(dd, KM_USER0); err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); page_cache_release(page); if (!err) grow_dir(dir, index); return err; } /* FIXME: Is there a better return value? In most cases neither * the filesystem nor the directory are full. But we have had * too many collisions for this particular hash and no fallback. */ return -ENOSPC; }