static struct sas_task *sas_create_task(struct scsi_cmnd *cmd, struct domain_device *dev, gfp_t gfp_flags) { struct sas_task *task = sas_alloc_task(gfp_flags); struct scsi_lun lun; if (!task) return NULL; task->uldd_task = cmd; ASSIGN_SAS_TASK(cmd, task); task->dev = dev; task->task_proto = task->dev->tproto; /* BUG_ON(!SSP) */ task->ssp_task.retry_count = 1; int_to_scsilun(cmd->device->lun, &lun); memcpy(task->ssp_task.LUN, &lun.scsi_lun, 8); task->ssp_task.task_attr = TASK_ATTR_SIMPLE; memcpy(task->ssp_task.cdb, cmd->cmnd, 16); task->scatter = scsi_sglist(cmd); task->num_scatter = scsi_sg_count(cmd); task->total_xfer_len = scsi_bufflen(cmd); task->data_dir = cmd->sc_data_direction; task->task_done = sas_scsi_task_done; return task; }
/** * qla4xxx_send_marker_iocb - issues marker iocb to HBA * @ha: Pointer to host adapter structure. * @ddb_entry: Pointer to device database entry * @lun: SCSI LUN * @marker_type: marker identifier * * This routine issues a marker IOCB. **/ int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod) { struct qla4_marker_entry *marker_entry; unsigned long flags = 0; uint8_t status = QLA_SUCCESS; /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); /* Get pointer to the queue entry for the marker */ if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) != QLA_SUCCESS) { status = QLA_ERROR; goto exit_send_marker; } /* Put the marker in the request queue */ marker_entry->hdr.entryType = ET_MARKER; marker_entry->hdr.entryCount = 1; marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); marker_entry->modifier = cpu_to_le16(mrkr_mod); int_to_scsilun(lun, &marker_entry->lun); wmb(); /* Tell ISP it's got a new I/O request */ ha->isp_ops->queue_iocb(ha); exit_send_marker: spin_unlock_irqrestore(&ha->hardware_lock, flags); return status; }
/** * qla4xxx_send_command_to_isp - issues command to HBA * @ha: pointer to host adapter structure. * @srb: pointer to SCSI Request Block to be sent to ISP * * This routine is called by qla4xxx_queuecommand to build an ISP * command and pass it to the ISP for execution. **/ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) { struct scsi_cmnd *cmd = srb->cmd; struct ddb_entry *ddb_entry; struct command_t3_entry *cmd_entry; struct scatterlist *sg = NULL; uint16_t tot_dsds; uint16_t req_cnt; unsigned long flags; uint16_t cnt; uint32_t index; char tag[2]; /* Get real lun and adapter */ ddb_entry = srb->ddb; /* Send marker(s) if needed. */ if (ha->marker_needed == 1) { if (qla4xxx_send_marker_iocb(ha, ddb_entry, cmd->device->lun) != QLA_SUCCESS) return QLA_ERROR; ha->marker_needed = 0; } tot_dsds = 0; /* Acquire hardware specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); index = (uint32_t)cmd->request->tag; /* Calculate the number of request entries needed. */ if (cmd->use_sg) { sg = (struct scatterlist *)cmd->request_buffer; tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, cmd->sc_data_direction); if (tot_dsds == 0) goto queuing_error; } else if (cmd->request_bufflen) { dma_addr_t req_dma; req_dma = pci_map_single(ha->pdev, cmd->request_buffer, cmd->request_bufflen, cmd->sc_data_direction); if (dma_mapping_error(req_dma)) goto queuing_error; srb->dma_handle = req_dma; tot_dsds = 1; } req_cnt = qla4xxx_calc_request_entries(tot_dsds); if (ha->req_q_count < (req_cnt + 2)) { cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out); if (ha->request_in < cnt) ha->req_q_count = cnt - ha->request_in; else ha->req_q_count = REQUEST_QUEUE_DEPTH - (ha->request_in - cnt); } if (ha->req_q_count < (req_cnt + 2)) goto queuing_error; /* total iocbs active */ if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH) goto queuing_error; /* Build command packet */ cmd_entry = (struct command_t3_entry *) ha->request_ptr; memset(cmd_entry, 0, sizeof(struct command_t3_entry)); cmd_entry->hdr.entryType = ET_COMMAND; cmd_entry->handle = cpu_to_le32(index); cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id); int_to_scsilun(cmd->device->lun, &cmd_entry->lun); cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); cmd_entry->hdr.entryCount = req_cnt; /* Set data transfer direction control flags * NOTE: Look at data_direction bits iff there is data to be * transferred, as the data direction bit is sometimed filled * in when there is no data to be transferred */ cmd_entry->control_flags = CF_NO_DATA; if (cmd->request_bufflen) { if (cmd->sc_data_direction == DMA_TO_DEVICE) cmd_entry->control_flags = CF_WRITE; else if (cmd->sc_data_direction == DMA_FROM_DEVICE) cmd_entry->control_flags = CF_READ; ha->bytes_xfered += cmd->request_bufflen; if (ha->bytes_xfered & ~0xFFFFF){ ha->total_mbytes_xferred += ha->bytes_xfered >> 20; ha->bytes_xfered &= 0xFFFFF; } }
/** * qla24xx_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP * * Returns non-zero if a failure occured, else zero. */ int qla24xx_start_scsi(srb_t *sp) { int ret; unsigned long flags; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; uint32_t *clr_ptr; uint32_t index; uint32_t handle; struct cmd_type_7 *cmd_pkt; struct scatterlist *sg; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; struct device_reg_24xx __iomem *reg; /* Setup device pointers. */ ret = 0; ha = sp->ha; reg = &ha->iobase->isp24; cmd = sp->cmd; /* So we know we haven't pci_map'ed anything yet */ tot_dsds = 0; /* Send marker if required */ if (ha->marker_needed != 0) { if (qla2x00_marker(ha, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) { return QLA_FUNCTION_FAILED; } ha->marker_needed = 0; } /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); /* Check for room in outstanding command list. */ handle = ha->current_outstanding_cmd; for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) { handle++; if (handle == MAX_OUTSTANDING_COMMANDS) handle = 1; if (ha->outstanding_cmds[handle] == 0) break; } if (index == MAX_OUTSTANDING_COMMANDS) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ if (cmd->use_sg) { sg = (struct scatterlist *) cmd->request_buffer; tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, cmd->sc_data_direction); if (tot_dsds == 0) goto queuing_error; } else if (cmd->request_bufflen) { dma_addr_t req_dma; req_dma = pci_map_single(ha->pdev, cmd->request_buffer, cmd->request_bufflen, cmd->sc_data_direction); if (dma_mapping_error(req_dma)) goto queuing_error; sp->dma_handle = req_dma; tot_dsds = 1; } req_cnt = qla24xx_calc_iocbs(tot_dsds); if (ha->req_q_cnt < (req_cnt + 2)) { cnt = (uint16_t)RD_REG_DWORD_RELAXED(®->req_q_out); if (ha->req_ring_index < cnt) ha->req_q_cnt = cnt - ha->req_ring_index; else ha->req_q_cnt = ha->request_q_length - (ha->req_ring_index - cnt); } if (ha->req_q_cnt < (req_cnt + 2)) goto queuing_error; /* Build command packet. */ ha->current_outstanding_cmd = handle; ha->outstanding_cmds[handle] = sp; sp->ha = ha; sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle; ha->req_q_cnt -= req_cnt; cmd_pkt = (struct cmd_type_7 *)ha->request_ring_ptr; cmd_pkt->handle = handle; /* Zero out remaining portion of packet. */ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */ clr_ptr = (uint32_t *)cmd_pkt + 2; memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8); cmd_pkt->dseg_count = cpu_to_le16(tot_dsds); /* Set NPORT-ID and LUN number*/ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id); cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); /* Load SCSI command packet. */ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); /* Build IOCB segments */ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); /* Set total data segment count. */ cmd_pkt->entry_count = (uint8_t)req_cnt; wmb(); /* Adjust ring index. */ ha->req_ring_index++; if (ha->req_ring_index == ha->request_q_length) { ha->req_ring_index = 0; ha->request_ring_ptr = ha->request_ring; } else ha->request_ring_ptr++; sp->flags |= SRB_DMA_VALID; sp->state = SRB_ACTIVE_STATE; /* Set chip new ring index. */ WRT_REG_DWORD(®->req_q_in, ha->req_ring_index); RD_REG_DWORD_RELAXED(®->req_q_in); /* PCI Posting. */ /* Manage unprocessed RIO/ZIO commands in response queue. */ if (ha->flags.process_response_queue && ha->response_ring_ptr->signature != RESPONSE_PROCESSED) qla24xx_process_response_queue(ha); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; queuing_error: if (cmd->use_sg && tot_dsds) { sg = (struct scatterlist *) cmd->request_buffer; pci_unmap_sg(ha->pdev, sg, cmd->use_sg, cmd->sc_data_direction); } else if (tot_dsds) { pci_unmap_single(ha->pdev, sp->dma_handle, cmd->request_bufflen, cmd->sc_data_direction); } spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; }