Beispiel #1
0
/*
 * hp_sw_end_io - Completion handler for HP path activation.
 * @req: path activation request
 * @error: scsi-ml error
 *
 *  Check sense data, free request structure, and notify dm that
 *  pg initialization has completed.
 *
 * Context: scsi-ml softirq
 *
 */
static void hp_sw_end_io(struct request *req, int error)
{
    struct dm_path *path = req->end_io_data;
    unsigned err_flags = 0;

    if (!error) {
        DMDEBUG("%s path activation command - success",
            path->dev->name);
        goto out;
    }

    if (hp_sw_error_is_retryable(req)) {
        DMDEBUG("%s path activation command - retry",
            path->dev->name);
        err_flags = MP_RETRY;
        goto out;
    }

    DMWARN("%s path activation fail - error=0x%x",
           path->dev->name, error);
    err_flags = MP_FAIL_PATH;

out:
    req->end_io_data = NULL;
    __blk_put_request(req->q, req);
    dm_pg_init_complete(path, err_flags);
}
static void long_seq_test_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_data *ptd = test_get_test_data();

	if (rq)
		test_rq = (struct test_request *)rq->elv.priv[0];
	else {
		test_pr_err("%s: error: NULL request", __func__);
		return;
	}

	BUG_ON(!test_rq);

	spin_lock_irq(&ptd->lock);
	ptd->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(ptd->req_q, test_rq->rq);
	spin_unlock_irq(&ptd->lock);

	kfree(test_rq->bios_buffer);
	kfree(test_rq);
	utd->completed_req_count++;

	test_pr_err("%s: request %d completed, err=%d",
	       __func__, test_rq->req_id, err);

	check_test_completion();

}
static void blk_end_sync_rq(struct request *rq, int error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = NULL;
	__blk_put_request(rq->q, rq);

	complete(waiting);
}
Beispiel #4
0
/**
 * blk_end_sync_rq - executes a completion event on a request
 * @rq: request to complete
 * @error: end I/O status of the request
 */
static void blk_end_sync_rq(struct request *rq, int error)
{
	struct completion *waiting = rq->end_io_data;

	rq->end_io_data = NULL;
	__blk_put_request(rq->q, rq);

	/*
	 * complete last, if this is a stack request the process (and thus
	 * the rq pointer) could be invalid right after this complete()
	 */
	complete(waiting);
}
/*
 * Function:	scsi_host_put_command()
 *
 * Purpose:	Free a scsi command block
 *
 * Arguments:	shost	- scsi host
 * 		cmd	- command block to free
 *
 * Returns:	Nothing.
 *
 * Notes:	The command must not belong to any lists.
 */
void scsi_host_put_command(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
	struct request_queue *q = shost->uspace_req_q;
	struct request *rq = cmd->request;
	struct scsi_tgt_cmd *tcmd = rq->end_io_data;
	unsigned long flags;

	kmem_cache_free(scsi_tgt_cmd_cache, tcmd);

	spin_lock_irqsave(q->queue_lock, flags);
	__blk_put_request(q, rq);
	spin_unlock_irqrestore(q->queue_lock, flags);

	__scsi_put_command(shost, cmd, &shost->shost_gendev);
}
Beispiel #6
0
/*
 * alua_stpg - Evaluate SET TARGET GROUP STATES
 * @sdev: the device to be evaluated
 * @state: the new target group state
 *
 * Send a SET TARGET GROUP STATES command to the device.
 * We only have to test here if we should resubmit the command;
 * any other error is assumed as a failure.
 */
static void stpg_endio(struct request *req, int error)
{
	struct alua_dh_data *h = req->end_io_data;
	struct scsi_sense_hdr sense_hdr;
	unsigned err = SCSI_DH_OK;

	if (host_byte(req->errors) != DID_OK ||
	    msg_byte(req->errors) != COMMAND_COMPLETE) {
		err = SCSI_DH_IO;
		goto done;
	}

	if (req->sense_len > 0) {
		err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
					   &sense_hdr);
		if (!err) {
			err = SCSI_DH_IO;
			goto done;
		}
		err = alua_check_sense(h->sdev, &sense_hdr);
		if (err == ADD_TO_MLQUEUE) {
			err = SCSI_DH_RETRY;
			goto done;
		}
		sdev_printk(KERN_INFO, h->sdev,
			    "%s: stpg sense code: %02x/%02x/%02x\n",
			    ALUA_DH_NAME, sense_hdr.sense_key,
			    sense_hdr.asc, sense_hdr.ascq);
		err = SCSI_DH_IO;
	} else if (error)
		err = SCSI_DH_IO;

	if (err == SCSI_DH_OK) {
		h->state = TPGS_STATE_OPTIMIZED;
		sdev_printk(KERN_INFO, h->sdev,
			    "%s: port group %02x switched to state %c\n",
			    ALUA_DH_NAME, h->group_id,
			    print_alua_state(h->state));
	}
done:
	req->end_io_data = NULL;
	__blk_put_request(req->q, req);
	if (h->callback_fn) {
		h->callback_fn(h->callback_data, err);
		h->callback_fn = h->callback_data = NULL;
	}
	return;
}
Beispiel #7
0
/*
 * stpg_endio - Evaluate SET TARGET GROUP STATES
 * @sdev: the device to be evaluated
 * @state: the new target group state
 *
 * Evaluate a SET TARGET GROUP STATES command response.
 */
static void stpg_endio(struct request *req, int error)
{
	struct alua_dh_data *h = req->end_io_data;
	struct scsi_sense_hdr sense_hdr;
	unsigned err = SCSI_DH_OK;

	if (host_byte(req->errors) != DID_OK ||
	    msg_byte(req->errors) != COMMAND_COMPLETE) {
		err = SCSI_DH_IO;
		goto done;
	}

	if (scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE,
				 &sense_hdr)) {
		if (sense_hdr.sense_key == NOT_READY &&
		    sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
			/* ALUA state transition already in progress */
			err = SCSI_DH_OK;
			goto done;
		}
		if (sense_hdr.sense_key == UNIT_ATTENTION) {
			err = SCSI_DH_RETRY;
			goto done;
		}
		sdev_printk(KERN_INFO, h->sdev, "%s: stpg failed\n",
			    ALUA_DH_NAME);
		scsi_print_sense_hdr(h->sdev, ALUA_DH_NAME, &sense_hdr);
		err = SCSI_DH_IO;
	} else if (error)
		err = SCSI_DH_IO;

	if (err == SCSI_DH_OK) {
		h->state = TPGS_STATE_OPTIMIZED;
		sdev_printk(KERN_INFO, h->sdev,
			    "%s: port group %02x switched to state %c\n",
			    ALUA_DH_NAME, h->group_id,
			    print_alua_state(h->state));
	}
done:
	req->end_io_data = NULL;
	__blk_put_request(req->q, req);
	if (h->callback_fn) {
		h->callback_fn(h->callback_data, err);
		h->callback_fn = h->callback_data = NULL;
	}
	return;
}
Beispiel #8
0
/* Wakeup from interrupt */
static void osst_end_async(struct request *req, int update)
{
	struct osst_request *SRpnt = req->end_io_data;
	struct osst_tape *STp = SRpnt->stp;
	struct rq_map_data *mdata = &SRpnt->stp->buffer->map_data;

	STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors;
#if DEBUG
	STp->write_pending = 0;
#endif
	if (SRpnt->waiting)
		complete(SRpnt->waiting);

	if (SRpnt->bio) {
		kfree(mdata->pages);
		blk_rq_unmap_user(SRpnt->bio);
	}

	__blk_put_request(req->q, req);
}
static void long_test_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;

	if (!rq) {
		pr_err("%s: error: NULL request", __func__);
		return;
	}

	test_rq = (struct test_request *)rq->elv.priv[0];

	BUG_ON(!test_rq);

	spin_lock_irq(&test_iosched->lock);
	test_iosched->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(test_iosched->req_q, test_rq->rq);
	spin_unlock_irq(&test_iosched->lock);

	if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2 &&
			rq_data_dir(rq) == READ &&
			compare_buffer_to_pattern(test_rq)) {
		/* if the pattern does not match */
		pr_err("%s: read pattern not as expected", __func__);
		utd->test_stage = UFS_TEST_ERROR;
		check_test_completion(test_iosched);
		return;
	}

	test_iosched_free_test_req_data_buffer(test_rq);
	kfree(test_rq);
	utd->completed_req_count++;

	if (err)
		pr_err("%s: request %d completed, err=%d", __func__,
			test_rq->req_id, err);

	check_test_completion(test_iosched);
}
Beispiel #10
0
static void start_stop_endio(struct request *req, int error)
{
	struct hp_sw_dh_data *h = req->end_io_data;
	unsigned err = SCSI_DH_OK;

	if (error || host_byte(req->errors) != DID_OK ||
			msg_byte(req->errors) != COMMAND_COMPLETE) {
		sdev_printk(KERN_WARNING, h->sdev,
			    "%s: sending start_stop_unit failed with %x\n",
			    HP_SW_NAME, req->errors);
		err = SCSI_DH_IO;
		goto done;
	}

	if (req->sense_len > 0) {
		err = start_done(h->sdev, h->sense);
		if (err == SCSI_DH_RETRY) {
			err = SCSI_DH_IO;
			if (--h->retry_cnt) {
				blk_put_request(req);
				err = hp_sw_start_stop(h);
				if (err == SCSI_DH_OK)
					return;
			}
		}
	}
done:
	req->end_io_data = NULL;
	__blk_put_request(req->q, req);
	if (h->callback_fn) {
		h->callback_fn(h->callback_data, err);
		h->callback_fn = h->callback_data = NULL;
	}
	return;

}
static void scenario_free_end_io_fn(struct request *rq, int err)
{
	struct test_request *test_rq;
	struct test_data *ptd = test_get_test_data();

	BUG_ON(!rq);
	test_rq = (struct test_request *)rq->elv.priv[0];
	BUG_ON(!test_rq);

	spin_lock_irq(&ptd->lock);
	ptd->dispatched_count--;
	list_del_init(&test_rq->queuelist);
	__blk_put_request(ptd->req_q, test_rq->rq);
	spin_unlock_irq(&ptd->lock);

	kfree(test_rq->bios_buffer);
	kfree(test_rq);

	if (err)
		pr_err("%s: request %d completed, err=%d", __func__,
			test_rq->req_id, err);

	check_test_completion();
}
Beispiel #12
0
/*
 * Called with the clone's queue lock held (in the case of .request_fn)
 */
static void end_clone_request(struct request *clone, int error)
{
	struct dm_rq_target_io *tio = clone->end_io_data;

	if (!clone->q->mq_ops) {
		/*
		 * For just cleaning up the information of the queue in which
		 * the clone was dispatched.
		 * The clone is *NOT* freed actually here because it is alloced
		 * from dm own mempool (REQ_ALLOCED isn't set).
		 */
		__blk_put_request(clone->q, clone);
	}

	/*
	 * Actual request completion is done in a softirq context which doesn't
	 * hold the clone's queue lock.  Otherwise, deadlock could occur because:
	 *     - another request may be submitted by the upper level driver
	 *       of the stacking during the completion
	 *     - the submission which requires queue lock may be done
	 *       against this clone's queue
	 */
	dm_complete_request(tio->orig, error);
}
Beispiel #13
0
/*
* Get block request for REQ_BLOCK_PC command issued to path.  Currently
* limited to MODE_SELECT (trespass) and INQUIRY (VPD page 0xC0) commands.
*
* Uses data and sense buffers in hardware handler context structure and
* assumes serial servicing of commands, both issuance and completion.
*/
static struct request *get_req(struct scsi_device *sdev, int cmd)
{
	struct clariion_dh_data *csdev = get_clariion_data(sdev);
	struct request *rq;
	unsigned char *page22;
	int len = 0;

	rq = blk_get_request(sdev->request_queue,
			(cmd == MODE_SELECT) ? WRITE : READ, GFP_ATOMIC);
	if (!rq) {
		sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
		return NULL;
	}

	memset(&rq->cmd, 0, BLK_MAX_CDB);
	rq->cmd[0] = cmd;
	rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);

	switch (cmd) {
	case MODE_SELECT:
		if (csdev->short_trespass) {
			page22 = csdev->hr ? short_trespass_hr : short_trespass;
			len = sizeof(short_trespass);
		} else {
			page22 = csdev->hr ? long_trespass_hr : long_trespass;
			len = sizeof(long_trespass);
		}
		/*
		 * Can't DMA from kernel BSS -- must copy selected trespass
		 * command mode page contents to context buffer which is
		 * allocated by kmalloc.
		 */
		BUG_ON((len > CLARIION_BUFFER_SIZE));
		memcpy(csdev->buffer, page22, len);
		rq->cmd_flags |= REQ_RW;
		rq->cmd[1] = 0x10;
		break;
	case INQUIRY:
		rq->cmd[1] = 0x1;
		rq->cmd[2] = 0xC0;
		len = CLARIION_BUFFER_SIZE;
		memset(csdev->buffer, 0, CLARIION_BUFFER_SIZE);
		break;
	default:
		BUG_ON(1);
		break;
	}

	rq->cmd[4] = len;
	rq->cmd_type = REQ_TYPE_BLOCK_PC;
	rq->cmd_flags |= REQ_FAILFAST;
	rq->timeout = CLARIION_TIMEOUT;
	rq->retries = CLARIION_RETRIES;

	rq->sense = csdev->sense;
	memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
	rq->sense_len = 0;

	if (blk_rq_map_kern(sdev->request_queue, rq, csdev->buffer,
							len, GFP_ATOMIC)) {
		__blk_put_request(rq->q, rq);
		return NULL;
	}

	return rq;
}
/*
 * Has to be called with the request spinlock acquired
 */
static int attempt_merge(struct request_queue *q, struct request *req,
			  struct request *next)
{
	if (!rq_mergeable(req) || !rq_mergeable(next))
		return 0;

	if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
		return 0;

	/*
	 * not contiguous
	 */
	if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next))
		return 0;

	if (rq_data_dir(req) != rq_data_dir(next)
	    || req->rq_disk != next->rq_disk
	    || req_no_special_merge(next))
		return 0;

	if (req->cmd_flags & REQ_WRITE_SAME &&
	    !blk_write_same_mergeable(req->bio, next->bio))
		return 0;

	/*
	 * If we are allowed to merge, then append bio list
	 * from next to rq and release next. merge_requests_fn
	 * will have updated segment counts, update sector
	 * counts here.
	 */
	if (!ll_merge_requests_fn(q, req, next))
		return 0;

	/*
	 * If failfast settings disagree or any of the two is already
	 * a mixed merge, mark both as mixed before proceeding.  This
	 * makes sure that all involved bios have mixable attributes
	 * set properly.
	 */
	if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE ||
	    (req->cmd_flags & REQ_FAILFAST_MASK) !=
	    (next->cmd_flags & REQ_FAILFAST_MASK)) {
		blk_rq_set_mixed_merge(req);
		blk_rq_set_mixed_merge(next);
	}

	/*
	 * At this point we have either done a back merge
	 * or front merge. We need the smaller start_time of
	 * the merged requests to be the current request
	 * for accounting purposes.
	 */
	if (time_after(req->start_time, next->start_time))
		req->start_time = next->start_time;

	req->biotail->bi_next = next->bio;
	req->biotail = next->biotail;

	req->__data_len += blk_rq_bytes(next);

	elv_merge_requests(q, req, next);

	/*
	 * 'next' is going away, so update stats accordingly
	 */
	blk_account_io_merge(next);

	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
	if (blk_rq_cpu_valid(next))
		req->cpu = next->cpu;

	/* owner-ship of bio passed from next to req */
	next->bio = NULL;
	__blk_put_request(q, next);
	return 1;
}