static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, loff_t start, loff_t end, get_block_t get_block, struct buffer_head *bh) { loff_t pos = start, max = start, bh_max = start; bool hole = false, need_wmb = false; struct block_device *bdev = NULL; int rw = iov_iter_rw(iter), rc; long map_len = 0; struct blk_dax_ctl dax = { .addr = (void __pmem *) ERR_PTR(-EIO), }; if (rw == READ) end = min(end, i_size_read(inode)); while (pos < end) { size_t len; if (pos == max) { unsigned blkbits = inode->i_blkbits; long page = pos >> PAGE_SHIFT; sector_t block = page << (PAGE_SHIFT - blkbits); unsigned first = pos - (block << blkbits); long size; if (pos == bh_max) { bh->b_size = PAGE_ALIGN(end - pos); bh->b_state = 0; rc = get_block(inode, block, bh, rw == WRITE); if (rc) break; if (!buffer_size_valid(bh)) bh->b_size = 1 << blkbits; bh_max = pos - first + bh->b_size; bdev = bh->b_bdev; } else { unsigned done = bh->b_size - (bh_max - (pos - first)); bh->b_blocknr += done >> blkbits; bh->b_size -= done; } hole = rw == READ && !buffer_written(bh); if (hole) { size = bh->b_size - first; } else { dax_unmap_atomic(bdev, &dax); dax.sector = to_sector(bh, inode); dax.size = bh->b_size; map_len = dax_map_atomic(bdev, &dax); if (map_len < 0) { rc = map_len; break; } if (buffer_unwritten(bh) || buffer_new(bh)) { dax_new_buf(dax.addr, map_len, first, pos, end); need_wmb = true; } dax.addr += first; size = map_len - first; } max = min(pos + size, end); } if (iov_iter_rw(iter) == WRITE) { len = copy_from_iter_pmem(dax.addr, max - pos, iter); need_wmb = true; } else if (!hole) len = copy_to_iter((void __force *) dax.addr, max - pos, iter); else len = iov_iter_zero(max - pos, iter); if (!len) { rc = -EFAULT; break; } pos += len; if (!IS_ERR(dax.addr)) dax.addr += len; } if (need_wmb) wmb_pmem(); dax_unmap_atomic(bdev, &dax); return (pos == start) ? rc : pos - start; }
static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, loff_t start, loff_t end, get_block_t get_block, struct buffer_head *bh) { loff_t pos = start, max = start, bh_max = start; bool hole = false, need_wmb = false; struct block_device *bdev = NULL; int rw = iov_iter_rw(iter), rc; long map_len = 0; struct blk_dax_ctl dax = { .addr = (void __pmem *) ERR_PTR(-EIO), }; unsigned blkbits = inode->i_blkbits; sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1) >> blkbits; if (rw == READ) end = min(end, i_size_read(inode)); while (pos < end) { size_t len; if (pos == max) { long page = pos >> PAGE_SHIFT; sector_t block = page << (PAGE_SHIFT - blkbits); unsigned first = pos - (block << blkbits); long size; if (pos == bh_max) { bh->b_size = PAGE_ALIGN(end - pos); bh->b_state = 0; rc = get_block(inode, block, bh, rw == WRITE); if (rc) break; if (!buffer_size_valid(bh)) bh->b_size = 1 << blkbits; bh_max = pos - first + bh->b_size; bdev = bh->b_bdev; /* * We allow uninitialized buffers for writes * beyond EOF as those cannot race with faults */ WARN_ON_ONCE( (buffer_new(bh) && block < file_blks) || (rw == WRITE && buffer_unwritten(bh))); } else { unsigned done = bh->b_size - (bh_max - (pos - first)); bh->b_blocknr += done >> blkbits; bh->b_size -= done; } hole = rw == READ && !buffer_written(bh); if (hole) { size = bh->b_size - first; } else { dax_unmap_atomic(bdev, &dax); dax.sector = to_sector(bh, inode); dax.size = bh->b_size; map_len = dax_map_atomic(bdev, &dax); if (map_len < 0) { rc = map_len; break; } dax.addr += first; size = map_len - first; } /* * pos + size is one past the last offset for IO, * so pos + size can overflow loff_t at extreme offsets. * Cast to u64 to catch this and get the true minimum. */ max = min_t(u64, pos + size, end); } if (iov_iter_rw(iter) == WRITE) { len = copy_from_iter_pmem(dax.addr, max - pos, iter); need_wmb = true; } else if (!hole) len = copy_to_iter((void __force *) dax.addr, max - pos, iter); else len = iov_iter_zero(max - pos, iter); if (!len) { rc = -EFAULT; break; } pos += len; if (!IS_ERR(dax.addr)) dax.addr += len; } if (need_wmb) wmb_pmem(); dax_unmap_atomic(bdev, &dax); return (pos == start) ? rc : pos - start; }