/** * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to * @map_data: pointer to the rq_map_data holding pages (if necessary) * @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count * @gfp_mask: memory allocation flags * * Description: * Data will be mapped directly for zero copy I/O, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of I/O, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct rq_map_data *map_data, struct sg_iovec *iov, int iov_count, unsigned int len, gfp_t gfp_mask) { struct bio *bio; int i, read = rq_data_dir(rq) == READ; int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; if (!iov[i].iov_len) return -EINVAL; /* * Keep going so we check length of all segments */ if (uaddr & queue_dma_alignment(q)) unaligned = 1; } if (unaligned || (q->dma_pad_mask & len) || map_data) bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, gfp_mask); else bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); if (bio->bi_size != len) { /* * Grab an extra reference to this bio, as bio_unmap_user() * expects to be able to drop it twice as it happens on the * normal IO completion path */ bio_get(bio); bio_endio(bio, 0); __blk_rq_unmap_user(bio); return -EINVAL; } if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = NULL; return 0; }
/** * blk_rq_map_user_iov - map user data to a request, for REQ_BLOCK_PC usage * @q: request queue where request should be inserted * @rq: request to map data to * @iov: pointer to the iovec * @iov_count: number of elements in the iovec * @len: I/O byte count * * Description: * Data will be mapped directly for zero copy io, if possible. Otherwise * a kernel bounce buffer is used. * * A matching blk_rq_unmap_user() must be issued at the end of io, while * still in process context. * * Note: The mapped bio may need to be bounced through blk_queue_bounce() * before being submitted to the device, as pages mapped may be out of * reach. It's the callers responsibility to make sure this happens. The * original bio must be passed back in to blk_rq_unmap_user() for proper * unmapping. */ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, struct sg_iovec *iov, int iov_count, unsigned int len) { struct bio *bio; int i, read = rq_data_dir(rq) == READ; int unaligned = 0; if (!iov || iov_count <= 0) return -EINVAL; for (i = 0; i < iov_count; i++) { unsigned long uaddr = (unsigned long)iov[i].iov_base; if (uaddr & queue_dma_alignment(q)) { unaligned = 1; break; } if (!iov[i].iov_len) return -EINVAL; } if (unaligned || (q->dma_pad_mask & len)) bio = bio_copy_user_iov(q, iov, iov_count, read); else bio = bio_map_user_iov(q, NULL, iov, iov_count, read); if (IS_ERR(bio)) return PTR_ERR(bio); if (bio->bi_size != len) { bio_endio(bio, 0); bio_unmap_user(bio); return -EINVAL; } if (!bio_flagged(bio, BIO_USER_MAPPED)) rq->cmd_flags |= REQ_COPY_USER; blk_queue_bounce(q, &bio); bio_get(bio); blk_rq_bio_prep(q, rq, bio); rq->buffer = rq->data = NULL; return 0; }
static int __blk_rq_map_user_iov(struct request *rq, struct rq_map_data *map_data, struct iov_iter *iter, gfp_t gfp_mask, bool copy) { struct request_queue *q = rq->q; struct bio *bio, *orig_bio; int ret; if (copy) bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); else bio = bio_map_user_iov(q, iter, gfp_mask); if (IS_ERR(bio)) return PTR_ERR(bio); if (map_data && map_data->null_mapped) bio_set_flag(bio, BIO_NULL_MAPPED); iov_iter_advance(iter, bio->bi_iter.bi_size); if (map_data) map_data->offset += bio->bi_iter.bi_size; orig_bio = bio; blk_queue_bounce(q, &bio); /* * We link the bounce buffer in and could have to traverse it * later so we have to get a ref to prevent it from being freed */ bio_get(bio); ret = blk_rq_append_bio(q, rq, bio); if (ret) { bio_endio(bio); __blk_rq_unmap_user(orig_bio); bio_put(bio); return ret; } return 0; }