Exemplo n.º 1
0
enum mmc_issue_type mmc_issue_type(struct mmc_queue *mq, struct request *req)
{
	struct mmc_host *host = mq->card->host;

	if (mq->use_cqe)
		return mmc_cqe_issue_type(host, req);

	if (req_op(req) == REQ_OP_READ || req_op(req) == REQ_OP_WRITE)
		return MMC_ISSUE_ASYNC;

	return MMC_ISSUE_SYNC;
}
Exemplo n.º 2
0
static void dm_done(struct request *clone, blk_status_t error, bool mapped)
{
	int r = DM_ENDIO_DONE;
	struct dm_rq_target_io *tio = clone->end_io_data;
	dm_request_endio_fn rq_end_io = NULL;

	if (tio->ti) {
		rq_end_io = tio->ti->type->rq_end_io;

		if (mapped && rq_end_io)
			r = rq_end_io(tio->ti, clone, error, &tio->info);
	}

	if (unlikely(error == BLK_STS_TARGET)) {
		if (req_op(clone) == REQ_OP_WRITE_SAME &&
		    !clone->q->limits.max_write_same_sectors)
			disable_write_same(tio->md);
		if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
		    !clone->q->limits.max_write_zeroes_sectors)
			disable_write_zeroes(tio->md);
	}

	switch (r) {
	case DM_ENDIO_DONE:
		/* The target wants to complete the I/O */
		dm_end_request(clone, error);
		break;
	case DM_ENDIO_INCOMPLETE:
		/* The target will handle the I/O */
		return;
	case DM_ENDIO_REQUEUE:
		/* The target wants to requeue the I/O */
		dm_requeue_original_request(tio, false);
		break;
	case DM_ENDIO_DELAY_REQUEUE:
		/* The target wants to requeue the I/O after a delay */
		dm_requeue_original_request(tio, true);
		break;
	default:
		DMWARN("unimplemented target endio return value: %d", r);
		BUG();
	}
}
Exemplo n.º 3
0
/*
 * q->request_fn for old request-based dm.
 * Called with the queue lock held.
 */
static void dm_old_request_fn(struct request_queue *q)
{
	struct mapped_device *md = q->queuedata;
	struct dm_target *ti = md->immutable_target;
	struct request *rq;
	struct dm_rq_target_io *tio;
	sector_t pos = 0;

	if (unlikely(!ti)) {
		int srcu_idx;
		struct dm_table *map = dm_get_live_table(md, &srcu_idx);

		if (unlikely(!map)) {
			dm_put_live_table(md, srcu_idx);
			return;
		}
		ti = dm_table_find_target(map, pos);
		dm_put_live_table(md, srcu_idx);
	}

	/*
	 * For suspend, check blk_queue_stopped() and increment
	 * ->pending within a single queue_lock not to increment the
	 * number of in-flight I/Os after the queue is stopped in
	 * dm_suspend().
	 */
	while (!blk_queue_stopped(q)) {
		rq = blk_peek_request(q);
		if (!rq)
			return;

		/* always use block 0 to find the target for flushes for now */
		pos = 0;
		if (req_op(rq) != REQ_OP_FLUSH)
			pos = blk_rq_pos(rq);

		if ((dm_old_request_peeked_before_merge_deadline(md) &&
		     md_in_flight(md) && rq->bio && !bio_multiple_segments(rq->bio) &&
		     md->last_rq_pos == pos && md->last_rq_rw == rq_data_dir(rq)) ||
		    (ti->type->busy && ti->type->busy(ti))) {
			blk_delay_queue(q, 10);
			return;
		}

		dm_start_request(md, rq);

		tio = tio_from_request(rq);
		init_tio(tio, rq, md);
		/* Establish tio->ti before queuing work (map_tio_request) */
		tio->ti = ti;
		kthread_queue_work(&md->kworker, &tio->work);
		BUG_ON(!irqs_disabled());
	}
}
Exemplo n.º 4
0
/*
 * Prepare a MMC request. This just filters out odd stuff.
 */
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
	struct mmc_queue *mq = q->queuedata;

	/*
	 * We only like normal block requests and discards.
	 */
	if (req->cmd_type != REQ_TYPE_FS && req_op(req) != REQ_OP_DISCARD &&
	    req_op(req) != REQ_OP_SECURE_ERASE) {
		blk_dump_rq_flags(req, "MMC bad request");
		return BLKPREP_KILL;
	}

	if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
		return BLKPREP_KILL;

	req->rq_flags |= RQF_DONTPREP;

	return BLKPREP_OK;
}
Exemplo n.º 5
0
static inline void virtblk_request_done(struct request *req)
{
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);

	switch (req_op(req)) {
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		virtblk_scsi_request_done(req);
		break;
	}

	blk_mq_end_request(req, virtblk_result(vbr));
}
Exemplo n.º 6
0
static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host,
					      struct request *req)
{
	switch (req_op(req)) {
	case REQ_OP_DRV_IN:
	case REQ_OP_DRV_OUT:
	case REQ_OP_DISCARD:
	case REQ_OP_SECURE_ERASE:
		return MMC_ISSUE_SYNC;
	case REQ_OP_FLUSH:
		return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC;
	default:
		return MMC_ISSUE_ASYNC;
	}
}
Exemplo n.º 7
0
static inline void virtblk_request_done(struct request *req)
{
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	int error = virtblk_result(vbr);

	switch (req_op(req)) {
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		virtblk_scsi_reques_done(req);
		break;
	case REQ_OP_DRV_IN:
		req->errors = (error != 0);
		break;
	}

	blk_mq_end_request(req, error);
}
Exemplo n.º 8
0
/*
 * Return true if a request is a write requests that needs zone write locking.
 */
bool blk_req_needs_zone_write_lock(struct request *rq)
{
	if (!rq->q->seq_zones_wlock)
		return false;

	if (blk_rq_is_passthrough(rq))
		return false;

	switch (req_op(rq)) {
	case REQ_OP_WRITE_ZEROES:
	case REQ_OP_WRITE_SAME:
	case REQ_OP_WRITE:
		return blk_rq_zone_is_seq(rq);
	default:
		return false;
	}
}
Exemplo n.º 9
0
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct request *req = bd->rq;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	unsigned long flags;
	unsigned int num;
	int qid = hctx->queue_num;
	int err;
	bool notify = false;
	u32 type;

	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);

	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		type = VIRTIO_BLK_T_SCSI_CMD;
		break;
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

	blk_mq_start_request(req);

	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
	if (num) {
		if (rq_data_dir(req) == WRITE)
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
		else
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
	}

	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	else
		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	if (err) {
		virtqueue_kick(vblk->vqs[qid].vq);
		blk_mq_stop_hw_queue(hctx);
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
	}

	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
		notify = true;
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);

	if (notify)
		virtqueue_notify(vblk->vqs[qid].vq);
	return BLK_MQ_RQ_QUEUE_OK;
}
Exemplo n.º 10
0
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
	struct request *req = blk_mq_rq_from_pdu(cmd);
	int result, flags;
	struct nbd_request request;
	unsigned long size = blk_rq_bytes(req);
	struct bio *bio;
	u32 type;
	u32 tag = blk_mq_unique_tag(req);

	if (req_op(req) == REQ_OP_DISCARD)
		type = NBD_CMD_TRIM;
	else if (req_op(req) == REQ_OP_FLUSH)
		type = NBD_CMD_FLUSH;
	else if (rq_data_dir(req) == WRITE)
		type = NBD_CMD_WRITE;
	else
		type = NBD_CMD_READ;

	memset(&request, 0, sizeof(request));
	request.magic = htonl(NBD_REQUEST_MAGIC);
	request.type = htonl(type);
	if (type != NBD_CMD_FLUSH) {
		request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
		request.len = htonl(size);
	}
	memcpy(request.handle, &tag, sizeof(tag));

	dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
		cmd, nbdcmd_to_ascii(type),
		(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
	result = sock_xmit(nbd, index, 1, &request, sizeof(request),
			(type == NBD_CMD_WRITE) ? MSG_MORE : 0);
	if (result <= 0) {
		dev_err_ratelimited(disk_to_dev(nbd->disk),
			"Send control failed (result %d)\n", result);
		return -EIO;
	}

	if (type != NBD_CMD_WRITE)
		return 0;

	flags = 0;
	bio = req->bio;
	while (bio) {
		struct bio *next = bio->bi_next;
		struct bvec_iter iter;
		struct bio_vec bvec;

		bio_for_each_segment(bvec, bio, iter) {
			bool is_last = !next && bio_iter_last(bvec, iter);

			if (is_last)
				flags = MSG_MORE;
			dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
				cmd, bvec.bv_len);
			result = sock_send_bvec(nbd, index, &bvec, flags);
			if (result <= 0) {
				dev_err(disk_to_dev(nbd->disk),
					"Send data failed (result %d)\n",
					result);
				return -EIO;
			}
			/*
			 * The completion might already have come in,
			 * so break for the last one instead of letting
			 * the iterator do it. This prevents use-after-free
			 * of the bio.
			 */
			if (is_last)
				break;
		}
		bio = next;
	}
Exemplo n.º 11
0
/* always call with the tx_lock held */
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd)
{
    struct request *req = blk_mq_rq_from_pdu(cmd);
    int result, flags;
    struct nbd_request request;
    unsigned long size = blk_rq_bytes(req);
    u32 type;

    if (req->cmd_type == REQ_TYPE_DRV_PRIV)
        type = NBD_CMD_DISC;
    else if (req_op(req) == REQ_OP_DISCARD)
        type = NBD_CMD_TRIM;
    else if (req_op(req) == REQ_OP_FLUSH)
        type = NBD_CMD_FLUSH;
    else if (rq_data_dir(req) == WRITE)
        type = NBD_CMD_WRITE;
    else
        type = NBD_CMD_READ;

    memset(&request, 0, sizeof(request));
    request.magic = htonl(NBD_REQUEST_MAGIC);
    request.type = htonl(type);
    if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) {
        request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
        request.len = htonl(size);
    }
    memcpy(request.handle, &req->tag, sizeof(req->tag));

    dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
            cmd, nbdcmd_to_ascii(type),
            (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
    result = sock_xmit(nbd, 1, &request, sizeof(request),
                       (type == NBD_CMD_WRITE) ? MSG_MORE : 0);
    if (result <= 0) {
        dev_err(disk_to_dev(nbd->disk),
                "Send control failed (result %d)\n", result);
        return -EIO;
    }

    if (type == NBD_CMD_WRITE) {
        struct req_iterator iter;
        struct bio_vec bvec;
        /*
         * we are really probing at internals to determine
         * whether to set MSG_MORE or not...
         */
        rq_for_each_segment(bvec, req, iter) {
            flags = 0;
            if (!rq_iter_last(bvec, iter))
                flags = MSG_MORE;
            dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
                    cmd, bvec.bv_len);
            result = sock_send_bvec(nbd, &bvec, flags);
            if (result <= 0) {
                dev_err(disk_to_dev(nbd->disk),
                        "Send data failed (result %d)\n",
                        result);
                return -EIO;
            }
        }
    }