/* * This does the "real" work of the write. We must allocate and lock the * page to be sent back to the generic routine, which then copies the * data from user space. * * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ static int nfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; pgoff_t index; struct page *page; index = pos >> PAGE_CACHE_SHIFT; dfprintk(PAGECACHE, "NFS: write_begin(%s/%s(%ld), %u@%lld)\n", file->f_path.dentry->d_parent->d_name.name, file->f_path.dentry->d_name.name, mapping->host->i_ino, len, (long long) pos); page = __grab_cache_page(mapping, index); if (!page) return -ENOMEM; *pagep = page; ret = nfs_flush_incompatible(file, page); if (ret) { unlock_page(page); page_cache_release(page); } return ret; }
/* * This does the "real" work of the write. We must allocate and lock the * page to be sent back to the generic routine, which then copies the * data from user space. * * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ static int nfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { int ret; pgoff_t index; struct page *page; index = pos >> PAGE_CACHE_SHIFT; page = __grab_cache_page(mapping, index); if (!page) return -ENOMEM; *pagep = page; ret = nfs_flush_incompatible(file, page); if (ret) { unlock_page(page); page_cache_release(page); } return ret; }
int j4fs_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *pg = NULL; pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t offset = pos & (PAGE_CACHE_SIZE - 1); uint32_t to = offset + len; int ret = 0; int space_held = 0; if(j4fs_panic==1) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: j4fs panic\n",__FUNCTION__,__LINE__)); return -ENOSPC; } J4FS_T(J4FS_TRACE_FS, ("start j4fs_write_begin\n")); if(to>PAGE_CACHE_SIZE) { J4FS_T(J4FS_TRACE_ALWAYS,("%s %d: page size overflow(pos,index,offset,len,to)=(%d,%d,%d,%d,%d)\n",__FUNCTION__,__LINE__,pos,index,offset,len,to)); j4fs_panic("page size overflow"); return -ENOSPC; } /* Get a page */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) pg = grab_cache_page_write_begin(mapping, index, flags); #else pg = __grab_cache_page(mapping, index); #endif *pagep = pg; if (!pg) { ret = -ENOMEM; goto out; } /* Get fs space */ space_held = j4fs_hold_space(PAGE_CACHE_SIZE); if (!space_held) { ret = -ENOSPC; goto out; } /* Update page if required */ if (!Page_Uptodate(pg) && (offset || to < PAGE_CACHE_SIZE)) ret = j4fs_readpage_nolock(filp, pg); if (ret) goto out; /* Happy path return */ J4FS_T(J4FS_TRACE_FS, ("end j4fs_write_begin - ok\n")); return 0; out: J4FS_T(J4FS_TRACE_FS, ("end j4fs_write_begin fail returning %d\n", ret)); if (pg) { unlock_page(pg); page_cache_release(pg); } return ret; }
/* * This does the "real" work of the write. The generic routine has * allocated the page, locked it, done all the page alignment stuff * calculations etc. Now we should just copy the data from user * space and write it back to the real medium.. * * If the writer ends up delaying the write, the writer needs to * increment the page use counts until he is done with the page. */ static int smb_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { pgoff_t index = pos >> PAGE_CACHE_SHIFT; *pagep = __grab_cache_page(mapping, index); if (!*pagep) return -ENOMEM; return 0; } static int smb_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { int status; unsigned offset = pos & (PAGE_CACHE_SIZE - 1); lock_kernel(); status = smb_updatepage(file, page, offset, copied); unlock_kernel();
static int jffs2_write_begin(struct file *filp, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); pgoff_t index = pos >> PAGE_CACHE_SHIFT; uint32_t pageofs = index << PAGE_CACHE_SHIFT; int ret = 0; pg = __grab_cache_page(mapping, index); if (!pg) return -ENOMEM; *pagep = pg; D1(printk(KERN_DEBUG "jffs2_write_begin()\n")); if (pageofs > inode->i_size) { /* Make new hole frag from old EOF to new page */ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode ri; struct jffs2_full_dnode *fn; uint32_t alloc_len; D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", (unsigned int)inode->i_size, pageofs)); ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); if (ret) goto out_page; down(&f->sem); memset(&ri, 0, sizeof(ri)); ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); ri.nodetype = cpu_to_je16(JFFS2_NODETYPE_INODE); ri.totlen = cpu_to_je32(sizeof(ri)); ri.hdr_crc = cpu_to_je32(crc32(0, &ri, sizeof(struct jffs2_unknown_node)-4)); ri.ino = cpu_to_je32(f->inocache->ino); ri.version = cpu_to_je32(++f->highest_version); ri.mode = cpu_to_jemode(inode->i_mode); ri.uid = cpu_to_je16(inode->i_uid); ri.gid = cpu_to_je16(inode->i_gid); ri.isize = cpu_to_je32(max((uint32_t)inode->i_size, pageofs)); ri.atime = ri.ctime = ri.mtime = cpu_to_je32(get_seconds()); ri.offset = cpu_to_je32(inode->i_size); ri.dsize = cpu_to_je32(pageofs - inode->i_size); ri.csize = cpu_to_je32(0); ri.compr = JFFS2_COMPR_ZERO; ri.node_crc = cpu_to_je32(crc32(0, &ri, sizeof(ri)-8)); ri.data_crc = cpu_to_je32(0); fn = jffs2_write_dnode(c, f, &ri, NULL, 0, ALLOC_NORMAL); if (IS_ERR(fn)) { ret = PTR_ERR(fn); jffs2_complete_reservation(c); up(&f->sem); goto out_page; } ret = jffs2_add_full_dnode_to_inode(c, f, fn); if (f->metadata) { jffs2_mark_node_obsolete(c, f->metadata->raw); jffs2_free_full_dnode(f->metadata); f->metadata = NULL; } if (ret) { D1(printk(KERN_DEBUG "Eep. add_full_dnode_to_inode() failed in write_begin, returned %d\n", ret)); jffs2_mark_node_obsolete(c, fn->raw); jffs2_free_full_dnode(fn); jffs2_complete_reservation(c); up(&f->sem); goto out_page; } jffs2_complete_reservation(c); inode->i_size = pageofs; up(&f->sem); } /* * Read in the page if it wasn't already present. Cannot optimize away * the whole page write case until jffs2_write_end can handle the * case of a short-copy. */ if (!PageUptodate(pg)) { down(&f->sem); ret = jffs2_do_readpage_nolock(inode, pg); up(&f->sem); if (ret) goto out_page; } D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags)); return ret; out_page: unlock_page(pg); page_cache_release(pg); return ret; }
static int gfs2_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { struct gfs2_inode *ip = GFS2_I(mapping->host); struct gfs2_sbd *sdp = GFS2_SB(mapping->host); unsigned int data_blocks, ind_blocks, rblocks; int alloc_required; int error = 0; struct gfs2_alloc *al; pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned from = pos & (PAGE_CACHE_SIZE - 1); unsigned to = from + len; struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh); error = gfs2_glock_nq_atime(&ip->i_gh); if (unlikely(error)) goto out_uninit; gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); if (error) goto out_unlock; if (alloc_required) { al = gfs2_alloc_get(ip); error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); if (error) goto out_alloc_put; error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid); if (error) goto out_qunlock; al->al_requested = data_blocks + ind_blocks; error = gfs2_inplace_reserve(ip); if (error) goto out_qunlock; } rblocks = RES_DINODE + ind_blocks; if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) rblocks += RES_STATFS + RES_QUOTA; error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; error = -ENOMEM; page = __grab_cache_page(mapping, index); *pagep = page; if (unlikely(!page)) goto out_endtrans; if (gfs2_is_stuffed(ip)) { error = 0; if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) { error = gfs2_unstuff_dinode(ip, page); if (error == 0) goto prepare_write; } else if (!PageUptodate(page)) { error = stuffed_readpage(ip, page); } goto out; } prepare_write: error = block_prepare_write(page, from, to, gfs2_block_map); out: if (error == 0) return 0; page_cache_release(page); if (pos + len > ip->i_inode.i_size) vmtruncate(&ip->i_inode, ip->i_inode.i_size); out_endtrans: gfs2_trans_end(sdp); out_trans_fail: if (alloc_required) { gfs2_inplace_release(ip); out_qunlock: gfs2_quota_unlock(ip); out_alloc_put: gfs2_alloc_put(ip); } out_unlock: gfs2_glock_dq(&ip->i_gh); out_uninit: gfs2_holder_uninit(&ip->i_gh); return error; }