/* * xfs_iozero * * xfs_iozero clears the specified range of buffer supplied, * and marks all the affected blocks as valid and modified. If * an affected block is not allocated, it will be allocated. If * an affected block is not completely overwritten, and is not * valid before the operation, it will be read from disk before * being partially zeroed. */ STATIC int xfs_iozero( struct inode *ip, /* inode */ loff_t pos, /* offset in file */ size_t count) /* size of data to zero */ { unsigned bytes; struct page *page; struct address_space *mapping; int status; mapping = ip->i_mapping; do { unsigned long index, offset; offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ index = pos >> PAGE_CACHE_SHIFT; bytes = PAGE_CACHE_SIZE - offset; if (bytes > count) bytes = count; status = -ENOMEM; page = grab_cache_page(mapping, index); if (!page) break; status = mapping->a_ops->prepare_write(NULL, page, offset, offset + bytes); if (status) goto unlock; memclear_highpage_flush(page, offset, bytes); status = mapping->a_ops->commit_write(NULL, page, offset, offset + bytes); if (!status) { pos += bytes; count -= bytes; } unlock: unlock_page(page); page_cache_release(page); if (status) break; } while (count); return (-status); }
static inline void truncate_partial_page(struct page *page, unsigned partial) { memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial); if (PagePrivate(page)) do_invalidatepage(page, partial); }