static int pvscsi_abort(struct scsi_cmnd *cmd)
{
	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
	struct pvscsi_ctx *ctx;
	unsigned long flags;

	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
		    adapter->host->host_no, cmd);

	spin_lock_irqsave(&adapter->hw_lock, flags);

	/*
	 * Poll the completion ring first - we might be trying to abort
	 * a command that is waiting to be dispatched in the completion ring.
	 */
	pvscsi_process_completion_ring(adapter);

	/*
	 * If there is no context for the command, it either already succeeded
	 * or else was never properly issued.  Not our problem.
	 */
	ctx = pvscsi_find_context(adapter, cmd);
	if (!ctx) {
		scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
		goto out;
	}

	pvscsi_abort_cmd(adapter, ctx);

	pvscsi_process_completion_ring(adapter);

out:
	spin_unlock_irqrestore(&adapter->hw_lock, flags);
	return SUCCESS;
}
void
scsi_cmd_print_sense_hdr(struct scsi_cmnd *scmd, const char *desc,
			  struct scsi_sense_hdr *sshdr)
{
	scmd_printk(KERN_INFO, scmd, "%s: ", desc);
	scsi_show_sense_hdr(sshdr);
	scmd_printk(KERN_INFO, scmd, "%s: ", desc);
	scsi_show_extd_sense(sshdr->asc, sshdr->ascq);
}
/* Normalize and print sense buffer in SCSI command */
void scsi_print_sense(char *name, struct scsi_cmnd *cmd)
{
	struct scsi_sense_hdr sshdr;

	scmd_printk(KERN_INFO, cmd, "");
	scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
				 &sshdr);
	scsi_show_sense_hdr(&sshdr);
	scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
				 &sshdr);
	scmd_printk(KERN_INFO, cmd, "");
	scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
}
Example #4
0
static inline int macscsi_pwrite(struct NCR5380_hostdata *hostdata,
                                 unsigned char *src, int len)
{
	unsigned char *s = src;
	unsigned char *d = hostdata->pdma_io + (OUTPUT_DATA_REG << 4);
	int n = len;
	int transferred;

	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
	                              BASR_DRQ | BASR_PHASE_MATCH,
	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
		CP_MEM_TO_IO(s, d, n);

		transferred = s - src - n;
		hostdata->pdma_residual = len - transferred;

		/* Target changed phase early? */
		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
			scmd_printk(KERN_ERR, hostdata->connected,
			            "%s: !REQ and !ACK\n", __func__);
		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
			return 0;

		/* No bus error. */
		if (n == 0) {
			if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG,
			                          TCR_LAST_BYTE_SENT,
			                          TCR_LAST_BYTE_SENT, HZ / 64) < 0)
				scmd_printk(KERN_ERR, hostdata->connected,
				            "%s: Last Byte Sent timeout\n", __func__);
			return 0;
		}

		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
		s = src + transferred;
		n = len - transferred;
	}

	scmd_printk(KERN_ERR, hostdata->connected,
	            "%s: phase mismatch or !DRQ\n", __func__);
	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);

	return -1;
}
Example #5
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}
	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			scsi_bufflen(sc), tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count);
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
static int pvscsi_host_reset(struct scsi_cmnd *cmd)
{
	struct Scsi_Host *host = cmd->device->host;
	struct pvscsi_adapter *adapter = shost_priv(host);
	unsigned long flags;
	bool use_msg;

	scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");

	spin_lock_irqsave(&adapter->hw_lock, flags);

	use_msg = adapter->use_msg;

	if (use_msg) {
		adapter->use_msg = 0;
		spin_unlock_irqrestore(&adapter->hw_lock, flags);

		/*
		 * Now that we know that the ISR won't add more work on the
		 * workqueue we can safely flush any outstanding work.
		 */
		flush_workqueue(adapter->workqueue);
		spin_lock_irqsave(&adapter->hw_lock, flags);
	}

	/*
	 * We're going to tear down the entire ring structure and set it back
	 * up, so stalling new requests until all completions are flushed and
	 * the rings are back in place.
	 */

	pvscsi_process_request_ring(adapter);

	ll_adapter_reset(adapter);

	/*
	 * Now process any completions.  Note we do this AFTER adapter reset,
	 * which is strange, but stops races where completions get posted
	 * between processing the ring and issuing the reset.  The backend will
	 * not touch the ring memory after reset, so the immediately pre-reset
	 * completion ring state is still valid.
	 */
	pvscsi_process_completion_ring(adapter);

	pvscsi_reset_all(adapter);
	adapter->use_msg = use_msg;
	pvscsi_setup_all_rings(adapter);
	pvscsi_unmask_intr(adapter);

	spin_unlock_irqrestore(&adapter->hw_lock, flags);

	return SUCCESS;
}
Example #7
0
static inline int macscsi_pread(struct NCR5380_hostdata *hostdata,
                                unsigned char *dst, int len)
{
	unsigned char *s = hostdata->pdma_io + (INPUT_DATA_REG << 4);
	unsigned char *d = dst;
	int n = len;
	int transferred;

	while (!NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG,
	                              BASR_DRQ | BASR_PHASE_MATCH,
	                              BASR_DRQ | BASR_PHASE_MATCH, HZ / 64)) {
		CP_IO_TO_MEM(s, d, n);

		transferred = d - dst - n;
		hostdata->pdma_residual = len - transferred;

		/* No bus error. */
		if (n == 0)
			return 0;

		/* Target changed phase early? */
		if (NCR5380_poll_politely2(hostdata, STATUS_REG, SR_REQ, SR_REQ,
		                           BUS_AND_STATUS_REG, BASR_ACK, BASR_ACK, HZ / 64) < 0)
			scmd_printk(KERN_ERR, hostdata->connected,
			            "%s: !REQ and !ACK\n", __func__);
		if (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH))
			return 0;

		dsprintk(NDEBUG_PSEUDO_DMA, hostdata->host,
		         "%s: bus error (%d/%d)\n", __func__, transferred, len);
		NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
		d = dst + transferred;
		n = len - transferred;
	}

	scmd_printk(KERN_ERR, hostdata->connected,
	            "%s: phase mismatch or !DRQ\n", __func__);
	NCR5380_dprint(NDEBUG_PSEUDO_DMA, hostdata->host);
	return -1;
}
void scsi_print_command(struct scsi_cmnd *cmd)
{
	int k;

	scmd_printk(KERN_INFO, cmd, "CDB: ");
	print_opcode_name(cmd->cmnd, cmd->cmd_len);

	/* print out all bytes in cdb */
	printk(":");
	for (k = 0; k < cmd->cmd_len; ++k)
		printk(" %02x", cmd->cmnd[k]);
	printk("\n");
}
Example #9
0
void scsi_print_command(struct scsi_cmnd *cmd)
{
	int k;

	if (cmd->cmnd == NULL)
		return;

	scmd_printk(KERN_INFO, cmd, "CDB: ");
	print_opcode_name(cmd->cmnd, cmd->cmd_len);

	
	printk(":");
	for (k = 0; k < cmd->cmd_len; ++k)
		printk(" %02x", cmd->cmnd[k]);
	printk("\n");
}
static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
{
	unsigned i;

	for (i = 0; i < adapter->req_depth; i++) {
		struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
		struct scsi_cmnd *cmd = ctx->cmd;
		if (cmd) {
			scmd_printk(KERN_ERR, cmd,
				    "Forced reset on cmd %p\n", cmd);
			pvscsi_unmap_buffers(adapter, ctx);
			pvscsi_release_context(adapter, ctx);
			cmd->result = (DID_RESET << 16);
			cmd->scsi_done(cmd);
		}
	}
}
static int pvscsi_device_reset(struct scsi_cmnd *cmd)
{
	struct Scsi_Host *host = cmd->device->host;
	struct pvscsi_adapter *adapter = shost_priv(host);
	unsigned long flags;

	scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
		    host->host_no, cmd->device->id);

	/*
	 * We don't want to queue new requests for this device after flushing
	 * all pending requests to emulation, since new requests could then
	 * sneak in during this device reset phase, so take the lock now.
	 */
	spin_lock_irqsave(&adapter->hw_lock, flags);

	pvscsi_process_request_ring(adapter);
	ll_device_reset(adapter, cmd->device->id);
	pvscsi_process_completion_ring(adapter);

	spin_unlock_irqrestore(&adapter->hw_lock, flags);

	return SUCCESS;
}
static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
			     struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
{
	struct PVSCSIRingsState *s;
	struct PVSCSIRingReqDesc *e;
	struct scsi_device *sdev;
	u32 req_entries;

	s = adapter->rings_state;
	sdev = cmd->device;
	req_entries = s->reqNumEntriesLog2;

	/*
	 * If this condition holds, we might have room on the request ring, but
	 * we might not have room on the completion ring for the response.
	 * However, we have already ruled out this possibility - we would not
	 * have successfully allocated a context if it were true, since we only
	 * have one context per request entry.  Check for it anyway, since it
	 * would be a serious bug.
	 */
	if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
		scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
			    "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
			    s->reqProdIdx, s->cmpConsIdx);
		return -1;
	}

	e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));

	e->bus    = sdev->channel;
	e->target = sdev->id;
	memset(e->lun, 0, sizeof(e->lun));
	e->lun[1] = sdev->lun;

	if (cmd->sense_buffer) {
		ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
					      SCSI_SENSE_BUFFERSIZE,
					      PCI_DMA_FROMDEVICE);
		e->senseAddr = ctx->sensePA;
		e->senseLen = SCSI_SENSE_BUFFERSIZE;
	} else {
		e->senseLen  = 0;
		e->senseAddr = 0;
	}
	e->cdbLen   = cmd->cmd_len;
	e->vcpuHint = smp_processor_id();
	memcpy(e->cdb, cmd->cmnd, e->cdbLen);

	e->tag = SIMPLE_QUEUE_TAG;
	if (sdev->tagged_supported &&
	    (cmd->tag == HEAD_OF_QUEUE_TAG ||
	     cmd->tag == ORDERED_QUEUE_TAG))
		e->tag = cmd->tag;

	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
		e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
		e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
	else if (cmd->sc_data_direction == DMA_NONE)
		e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
	else
		e->flags = 0;

	pvscsi_map_buffers(adapter, ctx, cmd, e);

	e->context = pvscsi_map_context(adapter, ctx);

	barrier();

	s->reqProdIdx++;

	return 0;
}
Example #13
0
static int pvscsi_abort(struct scsi_cmnd *cmd)
{
	struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
	struct pvscsi_ctx *ctx;
	unsigned long flags;
	int result = SUCCESS;
	DECLARE_COMPLETION_ONSTACK(abort_cmp);

	scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
		    adapter->host->host_no, cmd);

	spin_lock_irqsave(&adapter->hw_lock, flags);

	/*
	 * Poll the completion ring first - we might be trying to abort
	 * a command that is waiting to be dispatched in the completion ring.
	 */
	pvscsi_process_completion_ring(adapter);

	/*
	 * If there is no context for the command, it either already succeeded
	 * or else was never properly issued.  Not our problem.
	 */
	ctx = pvscsi_find_context(adapter, cmd);
	if (!ctx) {
		scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
		goto out;
	}

	/*
	 * Mark that the command has been requested to be aborted and issue
	 * the abort.
	 */
	ctx->abort_cmp = &abort_cmp;

	pvscsi_abort_cmd(adapter, ctx);
	spin_unlock_irqrestore(&adapter->hw_lock, flags);
	/* Wait for 2 secs for the completion. */
	wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000));
	spin_lock_irqsave(&adapter->hw_lock, flags);

	if (!completion_done(&abort_cmp)) {
		/*
		 * Failed to abort the command, unmark the fact that it
		 * was requested to be aborted.
		 */
		ctx->abort_cmp = NULL;
		result = FAILED;
		scmd_printk(KERN_DEBUG, cmd,
			    "Failed to get completion for aborted cmd %p\n",
			    cmd);
		goto out;
	}

	/*
	 * Successfully aborted the command.
	 */
	cmd->result = (DID_ABORT << 16);
	cmd->scsi_done(cmd);

out:
	spin_unlock_irqrestore(&adapter->hw_lock, flags);
	return result;
}
Example #14
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0;
	int ret;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}

	transport_init_se_cmd(se_cmd, tl_tpg->tl_se_tpg.se_tpg_tfo,
			tl_nexus->se_sess,
			scsi_bufflen(sc), sc->sc_data_direction,
			tcm_loop_sam_attr(sc), &tl_cmd->tl_sense_buf[0]);

	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}

	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in target_setup_cmd_from_cdb() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	ret = target_setup_cmd_from_cdb(se_cmd, sc->cmnd);
	if (ret == -ENOMEM) {
		transport_send_check_condition_and_sense(se_cmd,
				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	} else if (ret < 0) {
		if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
			tcm_loop_queue_status(se_cmd);
		else
			transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}

	ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
			scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
	if (ret) {
		transport_send_check_condition_and_sense(se_cmd,
					se_cmd->scsi_sense_reason, 0);
		transport_generic_free_cmd(se_cmd, 0);
		return;
	}
	transport_handle_cdb_direct(se_cmd);
	return;

out_done:
	sc->scsi_done(sc);
	return;
}
void scsi_print_result(struct scsi_cmnd *cmd)
{
	scmd_printk(KERN_INFO, cmd, "");
	scsi_show_result(cmd->result);
}
static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
				    const struct PVSCSIRingCmpDesc *e)
{
	struct pvscsi_ctx *ctx;
	struct scsi_cmnd *cmd;
	u32 btstat = e->hostStatus;
	u32 sdstat = e->scsiStatus;

	ctx = pvscsi_get_context(adapter, e->context);
	cmd = ctx->cmd;
	pvscsi_unmap_buffers(adapter, ctx);
	pvscsi_release_context(adapter, ctx);
	cmd->result = 0;

	if (sdstat != SAM_STAT_GOOD &&
	    (btstat == BTSTAT_SUCCESS ||
	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
	     btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
		cmd->result = (DID_OK << 16) | sdstat;
		if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
			cmd->result |= (DRIVER_SENSE << 24);
	} else
		switch (btstat) {
		case BTSTAT_SUCCESS:
		case BTSTAT_LINKED_COMMAND_COMPLETED:
		case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
			/* If everything went fine, let's move on..  */
			cmd->result = (DID_OK << 16);
			break;

		case BTSTAT_DATARUN:
		case BTSTAT_DATA_UNDERRUN:
			/* Report residual data in underruns */
			scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
			cmd->result = (DID_ERROR << 16);
			break;

		case BTSTAT_SELTIMEO:
			/* Our emulation returns this for non-connected devs */
			cmd->result = (DID_BAD_TARGET << 16);
			break;

		case BTSTAT_LUNMISMATCH:
		case BTSTAT_TAGREJECT:
		case BTSTAT_BADMSG:
			cmd->result = (DRIVER_INVALID << 24);
			/* fall through */

		case BTSTAT_HAHARDWARE:
		case BTSTAT_INVPHASE:
		case BTSTAT_HATIMEOUT:
		case BTSTAT_NORESPONSE:
		case BTSTAT_DISCONNECT:
		case BTSTAT_HASOFTWARE:
		case BTSTAT_BUSFREE:
		case BTSTAT_SENSFAILED:
			cmd->result |= (DID_ERROR << 16);
			break;

		case BTSTAT_SENTRST:
		case BTSTAT_RECVRST:
		case BTSTAT_BUSRESET:
			cmd->result = (DID_RESET << 16);
			break;

		case BTSTAT_ABORTQUEUE:
			cmd->result = (DID_ABORT << 16);
			break;

		case BTSTAT_SCSIPARITY:
			cmd->result = (DID_PARITY << 16);
			break;

		default:
			cmd->result = (DID_ERROR << 16);
			scmd_printk(KERN_DEBUG, cmd,
				    "Unknown completion status: 0x%x\n",
				    btstat);
	}

	dev_dbg(&cmd->device->sdev_gendev,
		"cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
		cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);

	cmd->scsi_done(cmd);
}
Example #17
0
static void tcm_loop_submission_work(struct work_struct *work)
{
	struct tcm_loop_cmd *tl_cmd =
		container_of(work, struct tcm_loop_cmd, work);
	struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
	struct scsi_cmnd *sc = tl_cmd->sc;
	struct tcm_loop_nexus *tl_nexus;
	struct tcm_loop_hba *tl_hba;
	struct tcm_loop_tpg *tl_tpg;
	struct scatterlist *sgl_bidi = NULL;
	u32 sgl_bidi_count = 0, transfer_length;
	int rc;

	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];

	/*
	 * Ensure that this tl_tpg reference from the incoming sc->device->id
	 * has already been configured via tcm_loop_make_naa_tpg().
	 */
	if (!tl_tpg->tl_hba) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
		set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
		goto out_done;
	}
	tl_nexus = tl_hba->tl_nexus;
	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		goto out_done;
	}
	if (scsi_bidi_cmnd(sc)) {
		struct scsi_data_buffer *sdb = scsi_in(sc);

		sgl_bidi = sdb->table.sgl;
		sgl_bidi_count = sdb->table.nents;
		se_cmd->se_cmd_flags |= SCF_BIDI;

	}

	transfer_length = scsi_transfer_length(sc);
	if (!scsi_prot_sg_count(sc) &&
	    scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
		se_cmd->prot_pto = true;
		/*
		 * loopback transport doesn't support
		 * WRITE_GENERATE, READ_STRIP protection
		 * information operations, go ahead unprotected.
		 */
		transfer_length = scsi_bufflen(sc);
	}

	rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
			&tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
			transfer_length, tcm_loop_sam_attr(sc),
			sc->sc_data_direction, 0,
			scsi_sglist(sc), scsi_sg_count(sc),
			sgl_bidi, sgl_bidi_count,
			scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
	if (rc < 0) {
		set_host_byte(sc, DID_NO_CONNECT);
		goto out_done;
	}
	return;

out_done:
	kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
	sc->scsi_done(sc);
	return;
}
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
	struct tcm_loop_hba *tl_hba,
	struct se_portal_group *se_tpg,
	struct scsi_cmnd *sc)
{
	struct se_cmd *se_cmd;
	struct se_session *se_sess;
	struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
	struct tcm_loop_cmd *tl_cmd;
	int sam_task_attr;

	if (!tl_nexus) {
		scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
				" does not exist\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_sess = tl_nexus->se_sess;

	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
	if (!tl_cmd) {
		printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
		set_host_byte(sc, DID_ERROR);
		return NULL;
	}
	se_cmd = &tl_cmd->tl_se_cmd;
	/*
	 * Save the pointer to struct scsi_cmnd *sc
	 */
	tl_cmd->sc = sc;
	/*
	 * Locate the SAM Task Attr from struct scsi_cmnd *
	 */
	if (sc->device->tagged_supported) {
		switch (sc->tag) {
		case HEAD_OF_QUEUE_TAG:
			sam_task_attr = MSG_HEAD_TAG;
			break;
		case ORDERED_QUEUE_TAG:
			sam_task_attr = MSG_ORDERED_TAG;
			break;
		default:
			sam_task_attr = MSG_SIMPLE_TAG;
			break;
		}
	} else
		sam_task_attr = MSG_SIMPLE_TAG;

	/*
	 * Initialize struct se_cmd descriptor from target_core_mod infrastructure
	 */
	transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
			scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
			&tl_cmd->tl_sense_buf[0]);

	/*
	 * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
	 */
	if (scsi_bidi_cmnd(sc))
		T_TASK(se_cmd)->t_tasks_bidi = 1;
	/*
	 * Locate the struct se_lun pointer and attach it to struct se_cmd
	 */
	if (transport_get_lun_for_cmd(se_cmd, NULL, tl_cmd->sc->device->lun) < 0) {
		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
		set_host_byte(sc, DID_NO_CONNECT);
		return NULL;
	}
	/*
	 * Because some userspace code via scsi-generic do not memset their
	 * associated read buffers, go ahead and do that here for type
	 * SCF_SCSI_CONTROL_SG_IO_CDB.  Also note that this is currently
	 * guaranteed to be a single SGL for SCF_SCSI_CONTROL_SG_IO_CDB
	 * by target core in transport_generic_allocate_tasks() ->
	 * transport_generic_cmd_sequencer().
	 */
	if (se_cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB &&
	    se_cmd->data_direction == DMA_FROM_DEVICE) {
		struct scatterlist *sg = scsi_sglist(sc);
		unsigned char *buf = kmap(sg_page(sg)) + sg->offset;

		if (buf != NULL) {
			memset(buf, 0, sg->length);
			kunmap(sg_page(sg));
		}
	}

	transport_device_setup_cmd(se_cmd);
	return se_cmd;
}
Example #19
0
/*
 * Allocate a tcm_loop cmd descriptor from target_core_mod code
 *
 * Can be called from interrupt context in tcm_loop_queuecommand() below
 */
static struct se_cmd *tcm_loop_allocate_core_cmd(
    struct tcm_loop_hba *tl_hba,
    struct se_portal_group *se_tpg,
    struct scsi_cmnd *sc)
{
    struct se_cmd *se_cmd;
    struct se_session *se_sess;
    struct tcm_loop_nexus *tl_nexus = tl_hba->tl_nexus;
    struct tcm_loop_cmd *tl_cmd;
    int sam_task_attr;

    if (!tl_nexus) {
        scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                    " does not exist\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_sess = tl_nexus->se_sess;

    tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
    if (!tl_cmd) {
        pr_err("Unable to allocate struct tcm_loop_cmd\n");
        set_host_byte(sc, DID_ERROR);
        return NULL;
    }
    se_cmd = &tl_cmd->tl_se_cmd;
    /*
     * Save the pointer to struct scsi_cmnd *sc
     */
    tl_cmd->sc = sc;
    /*
     * Locate the SAM Task Attr from struct scsi_cmnd *
     */
    if (sc->device->tagged_supported) {
        switch (sc->tag) {
        case HEAD_OF_QUEUE_TAG:
            sam_task_attr = MSG_HEAD_TAG;
            break;
        case ORDERED_QUEUE_TAG:
            sam_task_attr = MSG_ORDERED_TAG;
            break;
        default:
            sam_task_attr = MSG_SIMPLE_TAG;
            break;
        }
    } else
        sam_task_attr = MSG_SIMPLE_TAG;

    /*
     * Initialize struct se_cmd descriptor from target_core_mod infrastructure
     */
    transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
                          scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
                          &tl_cmd->tl_sense_buf[0]);

    if (scsi_bidi_cmnd(sc))
        se_cmd->se_cmd_flags |= SCF_BIDI;

    /*
     * Locate the struct se_lun pointer and attach it to struct se_cmd
     */
    if (transport_lookup_cmd_lun(se_cmd, tl_cmd->sc->device->lun) < 0) {
        kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        set_host_byte(sc, DID_NO_CONNECT);
        return NULL;
    }

    return se_cmd;
}
static int idescsi_queue (struct scsi_cmnd *cmd,
		void (*done)(struct scsi_cmnd *))
{
	struct Scsi_Host *host = cmd->device->host;
	idescsi_scsi_t *scsi = scsihost_to_idescsi(host);
	ide_drive_t *drive = scsi->drive;
	struct request *rq = NULL;
	struct ide_atapi_pc *pc = NULL;
	int write = cmd->sc_data_direction == DMA_TO_DEVICE;

	if (!drive) {
		scmd_printk (KERN_ERR, cmd, "drive not present\n");
		goto abort;
	}
	scsi = drive_to_idescsi(drive);
	pc = kmalloc(sizeof(struct ide_atapi_pc), GFP_ATOMIC);
	rq = blk_get_request(drive->queue, write, GFP_ATOMIC);
	if (rq == NULL || pc == NULL) {
		printk (KERN_ERR "ide-scsi: %s: out of memory\n", drive->name);
		goto abort;
	}

	memset (pc->c, 0, 12);
	pc->flags = 0;
	if (cmd->sc_data_direction == DMA_TO_DEVICE)
		pc->flags |= PC_FLAG_WRITING;
	pc->rq = rq;
	memcpy (pc->c, cmd->cmnd, cmd->cmd_len);
	pc->buf = NULL;
	pc->sg = scsi_sglist(cmd);
	pc->sg_cnt = scsi_sg_count(cmd);
	pc->b_count = 0;
	pc->req_xfer = pc->buf_size = scsi_bufflen(cmd);
	pc->scsi_cmd = cmd;
	pc->done = done;
	pc->timeout = jiffies + cmd->timeout_per_command;

	if (test_bit(IDESCSI_LOG_CMD, &scsi->log)) {
		printk ("ide-scsi: %s: que %lu, cmd = ", drive->name, cmd->serial_number);
		ide_scsi_hex_dump(cmd->cmnd, cmd->cmd_len);
		if (memcmp(pc->c, cmd->cmnd, cmd->cmd_len)) {
			printk ("ide-scsi: %s: que %lu, tsl = ", drive->name, cmd->serial_number);
			ide_scsi_hex_dump(pc->c, 12);
		}
	}

	rq->special = (char *) pc;
	rq->cmd_type = REQ_TYPE_SPECIAL;
	spin_unlock_irq(host->host_lock);
	rq->ref_count++;
	memcpy(rq->cmd, pc->c, 12);
	blk_execute_rq_nowait(drive->queue, scsi->disk, rq, 0, NULL);
	spin_lock_irq(host->host_lock);
	return 0;
abort:
	kfree (pc);
	if (rq)
		blk_put_request(rq);
	cmd->result = DID_ERROR << 16;
	done(cmd);
	return 0;
}