static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) { blk_status_t rc = 0; bool do_acct; unsigned long start; struct bio_vec bvec; struct bvec_iter iter; struct pmem_device *pmem = q->queuedata; struct nd_region *nd_region = to_region(pmem); if (bio->bi_opf & REQ_PREFLUSH) nvdimm_flush(nd_region); do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset, bio_op(bio), iter.bi_sector); if (rc) { bio->bi_status = rc; break; } }
static int nsio_rw_bytes(struct nd_namespace_common *ndns, resource_size_t offset, void *buf, size_t size, int rw) { struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); if (unlikely(offset + size > nsio->size)) { dev_WARN_ONCE(&ndns->dev, 1, "request out of range\n"); return -EFAULT; } if (rw == READ) { unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512); if (unlikely(is_bad_pmem(&nsio->bb, offset / 512, sz_align))) return -EIO; return memcpy_from_pmem(buf, nsio->addr + offset, size); } else { memcpy_to_pmem(nsio->addr + offset, buf, size); nvdimm_flush(to_nd_region(ndns->dev.parent)); } return 0; }