Пример #1
0
/**
 * blk_execute_rq - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 */
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
		   struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	char sense[SCSI_SENSE_BUFFERSIZE];
	int err = 0;
	unsigned long hang_check;

	/*
	 * we need an extra reference to the request, so we can look at
	 * it after io completion
	 */
	rq->ref_count++;

	if (!rq->sense) {
		memset(sense, 0, sizeof(sense));
		rq->sense = sense;
		rq->sense_len = 0;
	}

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);

	/* Prevent hang_check timer from firing at us during very long I/O */
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
		while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
	else
		wait_for_completion(&wait);

	if (rq->errors)
		err = -EIO;

	return err;
}
Пример #2
0
/*
 * submit_stpg - Issue a SET TARGET GROUP STATES command
 *
 * Currently we're only setting the current target port group state
 * to 'active/optimized' and let the array firmware figure out
 * the states of the remaining groups.
 */
static unsigned submit_stpg(struct alua_dh_data *h)
{
	struct request *rq;
	int stpg_len = 8;
	struct scsi_device *sdev = h->sdev;

	/* Prepare the data buffer */
	memset(h->buff, 0, stpg_len);
	h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f;
	put_unaligned_be16(h->group_id, &h->buff[6]);

	rq = get_alua_req(sdev, h->buff, stpg_len, WRITE);
	if (!rq)
		return SCSI_DH_RES_TEMP_UNAVAIL;

	/* Prepare the command. */
	rq->cmd[0] = MAINTENANCE_OUT;
	rq->cmd[1] = MO_SET_TARGET_PGS;
	put_unaligned_be32(stpg_len, &rq->cmd[6]);
	rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT);

	rq->sense = h->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;
	rq->end_io_data = h;

	blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio);
	return SCSI_DH_OK;
}
Пример #3
0
/**
 * blk_execute_rq - insert a request into queue for execution
 * @q:		queue to insert the request in
 * @bd_disk:	matching gendisk
 * @rq:		request to insert
 * @at_head:    insert request at head or tail of queue
 *
 * Description:
 *    Insert a fully prepared request at the back of the I/O scheduler queue
 *    for execution and wait for completion.
 */
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
		   struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	char sense[SCSI_SENSE_BUFFERSIZE];
	int err = 0;

	/*
	 * we need an extra reference to the request, so we can look at
	 * it after io completion
	 */
	rq->ref_count++;

	if (!rq->sense) {
		memset(sense, 0, sizeof(sense));
		rq->sense = sense;
		rq->sense_len = 0;
	}

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
	wait_for_completion(&wait);

	if (rq->errors)
		err = -EIO;

	return err;
}
Пример #4
0
static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct request *rq;
	struct bio *bio = rqd->bio;

	rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
	if (IS_ERR(rq))
		return -ENOMEM;

	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	rq->__sector = bio->bi_iter.bi_sector;
	rq->ioprio = bio_prio(bio);

	if (bio_has_data(bio))
		rq->nr_phys_segments = bio_phys_segments(q, bio);

	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;

	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, null_lnvm_end_io);

	return 0;
}
int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk,
		   struct request *rq, int at_head)
{
	DECLARE_COMPLETION_ONSTACK(wait);
	char sense[SCSI_SENSE_BUFFERSIZE];
	int err = 0;
	unsigned long hang_check;

	rq->ref_count++;

	if (!rq->sense) {
		memset(sense, 0, sizeof(sense));
		rq->sense = sense;
		rq->sense_len = 0;
	}

	rq->end_io_data = &wait;
	blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);

	
	hang_check = sysctl_hung_task_timeout_secs;
	if (hang_check)
		while (!wait_for_completion_timeout(&wait, hang_check * (HZ/2)));
	else
		wait_for_completion(&wait);

	if (rq->errors)
		err = -EIO;

	return err;
}
Пример #6
0
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct nvme_ns *ns = q->queuedata;
	struct request *rq;
	struct bio *bio = rqd->bio;
	struct nvme_nvm_command *cmd;

	rq = blk_mq_alloc_request(q, bio_data_dir(bio), 0);
	if (IS_ERR(rq))
		return -ENOMEM;

	cmd = kzalloc(sizeof(struct nvme_nvm_command) +
				sizeof(struct nvme_nvm_completion), GFP_KERNEL);
	if (!cmd) {
		blk_mq_free_request(rq);
		return -ENOMEM;
	}

	rq->cmd_type = REQ_TYPE_DRV_PRIV;
	rq->ioprio = bio_prio(bio);

	if (bio_has_data(bio))
		rq->nr_phys_segments = bio_phys_segments(q, bio);

	rq->__data_len = bio->bi_iter.bi_size;
	rq->bio = rq->biotail = bio;

	nvme_nvm_rqtocmd(rq, rqd, ns, cmd);

	rq->cmd = (unsigned char *)cmd;
	rq->cmd_len = sizeof(struct nvme_nvm_command);
	rq->special = cmd + 1;

	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

	return 0;
}
Пример #7
0
/*
 * hp_sw_pg_init - HP path activation implementation.
 * @hwh: hardware handler specific data
 * @bypassed: unused; is the path group bypassed? (see dm-mpath.c)
 * @path: path to send initialization command
 *
 * Send an HP-specific path activation command on 'path'.
 * Do not try to optimize in any way, just send the activation command.
 * More than one path activation command may be sent to the same controller.
 * This seems to work fine for basic failover support.
 *
 * Possible optimizations
 * 1. Detect an in-progress activation request and avoid submitting another one
 * 2. Model the controller and only send a single activation request at a time
 * 3. Determine the state of a path before sending an activation request
 *
 * Context: kmpathd (see process_queued_ios() in dm-mpath.c)
 */
static void hp_sw_pg_init(struct hw_handler *hwh, unsigned bypassed,
			  struct path *path)
{
	struct request *req;
	struct hp_sw_context *h;

	path->hwhcontext = hwh->context;
	h = hwh->context;

	req = hp_sw_get_request(path);
	if (!req) {
		DMERR("%s path activation command - allocation fail",
		      path->dev->name);
		goto retry;
	}

	blk_execute_rq_nowait(req->q, NULL, req, 1, hp_sw_end_io);
	return;

retry:
	dm_pg_init_complete(path, MP_RETRY);
}
Пример #8
0
static int nvme_nvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
{
	struct request_queue *q = dev->q;
	struct nvme_nvm_command *cmd;
	struct request *rq;

	cmd = kzalloc(sizeof(struct nvme_nvm_command), GFP_KERNEL);
	if (!cmd)
		return -ENOMEM;

	rq = nvme_nvm_alloc_request(q, rqd, cmd);
	if (IS_ERR(rq)) {
		kfree(cmd);
		return PTR_ERR(rq);
	}

	rq->end_io_data = rqd;

	blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io);

	return 0;
}
Пример #9
0
/*
 * hp_sw_start_stop - Send START STOP UNIT command
 * @sdev: sdev command should be sent to
 *
 * Sending START STOP UNIT activates the SP.
 */
static int hp_sw_start_stop(struct hp_sw_dh_data *h)
{
	struct request *req;

	req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC);
	if (IS_ERR(req))
		return SCSI_DH_RES_TEMP_UNAVAIL;

	blk_rq_set_block_pc(req);
	req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
			  REQ_FAILFAST_DRIVER;
	req->cmd_len = COMMAND_SIZE(START_STOP);
	req->cmd[0] = START_STOP;
	req->cmd[4] = 1;	/* Start spin cycle */
	req->timeout = HP_SW_TIMEOUT;
	req->sense = h->sense;
	memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
	req->sense_len = 0;
	req->end_io_data = h;

	blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio);
	return SCSI_DH_OK;
}
Пример #10
0
static int osst_execute(struct osst_request *SRpnt, const unsigned char *cmd,
			int cmd_len, int data_direction, void *buffer, unsigned bufflen,
			int use_sg, int timeout, int retries)
{
	struct request *req;
	struct page **pages = NULL;
	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;

	int err = 0;
	int write = (data_direction == DMA_TO_DEVICE);

	req = blk_get_request(SRpnt->stp->device->request_queue, write, GFP_KERNEL);
	if (!req)
		return DRIVER_ERROR << 24;

	req->cmd_type = REQ_TYPE_BLOCK_PC;
	req->cmd_flags |= REQ_QUIET;

	SRpnt->bio = NULL;

	if (use_sg) {
		struct scatterlist *sg, *sgl = (struct scatterlist *)buffer;
		int i;

		pages = kzalloc(use_sg * sizeof(struct page *), GFP_KERNEL);
		if (!pages)
			goto free_req;

		for_each_sg(sgl, sg, use_sg, i)
			pages[i] = sg_page(sg);

		mdata->null_mapped = 1;

		mdata->page_order = get_order(sgl[0].length);
		mdata->nr_entries =
			DIV_ROUND_UP(bufflen, PAGE_SIZE << mdata->page_order);
		mdata->offset = 0;

		err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, GFP_KERNEL);
		if (err) {
			kfree(pages);
			goto free_req;
		}
		SRpnt->bio = req->bio;
		mdata->pages = pages;

	} else if (bufflen) {
		err = blk_rq_map_kern(req->q, req, buffer, bufflen, GFP_KERNEL);
		if (err)
			goto free_req;
	}

	req->cmd_len = cmd_len;
	memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
	memcpy(req->cmd, cmd, req->cmd_len);
	req->sense = SRpnt->sense;
	req->sense_len = 0;
	req->timeout = timeout;
	req->retries = retries;
	req->end_io_data = SRpnt;

	blk_execute_rq_nowait(req->q, NULL, req, 1, osst_end_async);
	return 0;
free_req:
	blk_put_request(req);
	return DRIVER_ERROR << 24;
}
Пример #11
0
static int idescsi_queue (struct scsi_cmnd *cmd,
		void (*done)(struct scsi_cmnd *))
{
	struct Scsi_Host *host = cmd->device->host;
	idescsi_scsi_t *scsi = scsihost_to_idescsi(host);
	ide_drive_t *drive = scsi->drive;
	struct request *rq = NULL;
	struct ide_atapi_pc *pc = NULL;
	int write = cmd->sc_data_direction == DMA_TO_DEVICE;

	if (!drive) {
		scmd_printk (KERN_ERR, cmd, "drive not present\n");
		goto abort;
	}
	scsi = drive_to_idescsi(drive);
	pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
	rq = blk_get_request(drive->queue, write, GFP_ATOMIC);
	if (rq == NULL || pc == NULL) {
		printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name);
		goto abort;
	}

	memset (pc->c, 0, 12);
	pc->flags = 0;
	if (cmd->sc_data_direction == DMA_TO_DEVICE)
		pc->flags |= PC_FLAG_WRITING;
	pc->rq = rq;
	memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
	pc->buf = NULL;
	pc->sg = scsi_sglist(cmd);
	pc->sg_cnt = scsi_sg_count(cmd);
	pc->b_count = 0;
	pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
	pc->scsi_cmd = cmd;
	pc->done = done;
	pc->timeout = jiffies + cmd->timeout_per_command;

	if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
		printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
		ide_scsi_hex_dump(cmd->cmnd, cmd->cmd_len);
		if (memcmp(pc->c, cmd->cmnd, cmd->cmd_len)) {
			printk ("ide-scsi: %s: que %lu, tsl = ", drive->name, cmd->serial_number);
			ide_scsi_hex_dump(pc->c, 12);
		}
	}

	rq->special = (char *) pc;
	rq->cmd_type = REQ_TYPE_SPECIAL;
	spin_unlock_irq(host->host_lock);
	rq->ref_count++;
	memcpy(rq->cmd, pc->c, 12);
	blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
	spin_lock_irq(host->host_lock);
	return 0;
abort:
	kfree (pc);
	if (rq)
		blk_put_request(rq);
	cmd->result = DID_ERROR << 16;
	done(cmd);
	return 0;
}