static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; scsi_change_queue_depth(sdev, fnic_max_qdepth); return 0; }
static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); sdev->tagged_supported = 1; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); return 0; }
static int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); int status, scsi_result, ret; /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); return 0; } status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { /* only LUN access denied, but port is good * not covered by FC transport, have to fail here */ zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { /* This could be either * open LUN pending: this is temporary, will result in * open LUN or ERP_FAILED, so retry command * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); return 0; } ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) return SCSI_MLQUEUE_HOST_BUSY; return ret; }
static int fnic_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct fc_lport *lp = shost_priv(sdev->host); struct fnic *fnic = lport_priv(lp); sdev->tagged_supported = 1; if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; if (sdev->tagged_supported) scsi_activate_tcq(sdev, fnic_max_qdepth); rport->dev_loss_tmo = fnic->config.port_down_timeout / 1000; return 0; }
static int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); int status, scsi_result, ret; scpnt->result = 0; scpnt->host_scribble = NULL; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); return 0; } status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); return 0; } ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) return SCSI_MLQUEUE_HOST_BUSY; return ret; }
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op, void *data, u32 data_len, void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg), struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec) { struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct fc_rport *rport = tgt->rport; struct fc_lport *lport = port->lport; struct bnx2fc_cmd *els_req; struct bnx2fc_mp_req *mp_req; struct fc_frame_header *fc_hdr; struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; int rc = 0; int task_idx, index; u32 did, sid; u16 xid; rc = fc_remote_port_chkready(rport); if (rc) { printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op); rc = -EINVAL; goto els_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op); rc = -EINVAL; goto els_err; } if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) || (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) { printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op); rc = -EINVAL; goto els_err; } els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS); if (!els_req) { rc = -ENOMEM; goto els_err; } els_req->sc_cmd = NULL; els_req->port = port; els_req->tgt = tgt; els_req->cb_func = cb_func; cb_arg->io_req = els_req; els_req->cb_arg = cb_arg; mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req); rc = bnx2fc_init_mp_req(els_req); if (rc == FAILED) { printk(KERN_ERR PFX "ELS MP request init failed\n"); spin_lock_bh(&tgt->tgt_lock); kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); rc = -ENOMEM; goto els_err; } else { /* rc SUCCESS */ rc = 0; } /* Set the data_xfer_len to the size of ELS payload */ mp_req->req_len = data_len; els_req->data_xfer_len = mp_req->req_len; /* Fill ELS Payload */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { memcpy(mp_req->req_buf, data, data_len); } else { printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op); els_req->cb_func = NULL; els_req->cb_arg = NULL; spin_lock_bh(&tgt->tgt_lock); kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); rc = -EINVAL; } if (rc) goto els_err; /* Fill FC header */ fc_hdr = &(mp_req->req_fc_hdr); did = tgt->rport->port_id; sid = tgt->sid; if (op == ELS_SRR) __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid, FC_TYPE_FCP, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); else __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = els_req->xid; task_idx = xid/BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; /* Initialize task context for this IO request */ task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); bnx2fc_init_mp_task(els_req, task); spin_lock_bh(&tgt->tgt_lock); if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { printk(KERN_ERR PFX "initiate_els.. session not ready\n"); els_req->cb_func = NULL; els_req->cb_arg = NULL; kref_put(&els_req->refcount, bnx2fc_cmd_release); spin_unlock_bh(&tgt->tgt_lock); return -EINVAL; } if (timer_msec) bnx2fc_cmd_timer_set(els_req, timer_msec); bnx2fc_add_2_sq(tgt, xid); els_req->on_active_queue = 1; list_add_tail(&els_req->link, &tgt->els_queue); /* Ring doorbell */ bnx2fc_ring_doorbell(tgt); spin_unlock_bh(&tgt->tgt_lock); els_err: return rc; }
/* It's assumed that the lock is held when calling this function. */ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op, void *data, uint32_t data_len, void (*cb_func)(struct qedf_els_cb_arg *cb_arg), struct qedf_els_cb_arg *cb_arg, uint32_t timer_msec) { struct qedf_ctx *qedf = fcport->qedf; struct fc_lport *lport = qedf->lport; struct qedf_ioreq *els_req; struct qedf_mp_req *mp_req; struct fc_frame_header *fc_hdr; struct e4_fcoe_task_context *task; int rc = 0; uint32_t did, sid; uint16_t xid; uint32_t start_time = jiffies / HZ; uint32_t current_time; struct fcoe_wqe *sqe; unsigned long flags; u16 sqe_idx; QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Sending ELS\n"); rc = fc_remote_port_chkready(fcport->rport); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: rport not ready\n", op); rc = -EAGAIN; goto els_err; } if (lport->state != LPORT_ST_READY || !(lport->link_up)) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: link is not ready\n", op); rc = -EAGAIN; goto els_err; } if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op); rc = -EINVAL; goto els_err; } retry_els: els_req = qedf_alloc_cmd(fcport, QEDF_ELS); if (!els_req) { current_time = jiffies / HZ; if ((current_time - start_time) > 10) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "els: Failed els 0x%x\n", op); rc = -ENOMEM; goto els_err; } mdelay(20 * USEC_PER_MSEC); goto retry_els; } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "initiate_els els_req = " "0x%p cb_arg = %p xid = %x\n", els_req, cb_arg, els_req->xid); els_req->sc_cmd = NULL; els_req->cmd_type = QEDF_ELS; els_req->fcport = fcport; els_req->cb_func = cb_func; cb_arg->io_req = els_req; cb_arg->op = op; els_req->cb_arg = cb_arg; els_req->data_xfer_len = data_len; /* Record which cpu this request is associated with */ els_req->cpu = smp_processor_id(); mp_req = (struct qedf_mp_req *)&(els_req->mp_req); rc = qedf_init_mp_req(els_req); if (rc) { QEDF_ERR(&(qedf->dbg_ctx), "ELS MP request init failed\n"); kref_put(&els_req->refcount, qedf_release_cmd); goto els_err; } else { rc = 0; } /* Fill ELS Payload */ if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) { memcpy(mp_req->req_buf, data, data_len); } else { QEDF_ERR(&(qedf->dbg_ctx), "Invalid ELS op 0x%x\n", op); els_req->cb_func = NULL; els_req->cb_arg = NULL; kref_put(&els_req->refcount, qedf_release_cmd); rc = -EINVAL; } if (rc) goto els_err; /* Fill FC header */ fc_hdr = &(mp_req->req_fc_hdr); did = fcport->rdata->ids.port_id; sid = fcport->sid; __fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid, FC_TYPE_ELS, FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0); /* Obtain exchange id */ xid = els_req->xid; spin_lock_irqsave(&fcport->rport_lock, flags); sqe_idx = qedf_get_sqe_idx(fcport); sqe = &fcport->sq[sqe_idx]; memset(sqe, 0, sizeof(struct fcoe_wqe)); /* Initialize task context for this IO request */ task = qedf_get_task_mem(&qedf->tasks, xid); qedf_init_mp_task(els_req, task, sqe); /* Put timer on original I/O request */ if (timer_msec) qedf_cmd_timer_set(qedf, els_req, timer_msec); /* Ring doorbell */ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Ringing doorbell for ELS " "req\n"); qedf_ring_doorbell(fcport); spin_unlock_irqrestore(&fcport->rport_lock, flags); els_err: return rc; }