static void zfcp_erp_lun_unblock(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status)) zfcp_dbf_rec_run("erlubl1", &sdev_to_zfcp(sdev)->erp_action); atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status); }
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); put_device(&zfcp_sdev->port->dev); }
static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) { struct zfcp_adapter *adapter = erp_action->adapter; struct zfcp_scsi_dev *zfcp_sdev; adapter->erp_total_count--; if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) { adapter->erp_low_mem_count--; erp_action->status &= ~ZFCP_STATUS_ERP_LOWMEM; } list_del(&erp_action->list); zfcp_dbf_rec_run("eractd1", erp_action); switch (erp_action->action) { case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(erp_action->sdev); atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->port->status); break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &erp_action->adapter->status); break; } }
static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); switch (result) { case ZFCP_ERP_SUCCEEDED : atomic_set(&zfcp_sdev->erp_counter, 0); zfcp_erp_lun_unblock(sdev); break; case ZFCP_ERP_FAILED : atomic_inc(&zfcp_sdev->erp_counter); if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) { dev_err(&zfcp_sdev->port->adapter->ccw_device->dev, "ERP failed for LUN 0x%016Lx on " "port 0x%016Lx\n", (unsigned long long)zfcp_scsi_dev_lun(sdev), (unsigned long long)zfcp_sdev->port->wwpn); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED); } break; } if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) { zfcp_erp_lun_block(sdev, 0); result = ZFCP_ERP_EXIT; } return result; }
static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); atomic_andnot(ZFCP_STATUS_COMMON_ACCESS_DENIED, &zfcp_sdev->status); }
static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&zfcp_sdev->erp_action); }
/** * zfcp_dbf_rec_run_lvl - trace event related to running recovery * @level: trace level to be used for event * @tag: identifier for event * @erp: erp_action running */ void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) { struct zfcp_dbf *dbf = erp->adapter->dbf; struct zfcp_dbf_rec *rec = &dbf->rec_buf; unsigned long flags; if (!debug_level_enabled(dbf->rec, level)) return; spin_lock_irqsave(&dbf->rec_lock, flags); memset(rec, 0, sizeof(*rec)); rec->id = ZFCP_DBF_REC_RUN; memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN); zfcp_dbf_set_common(rec, erp->adapter, erp->port, erp->sdev); rec->u.run.fsf_req_id = erp->fsf_req_id; rec->u.run.rec_status = erp->status; rec->u.run.rec_step = erp->step; rec->u.run.rec_action = erp->action; if (erp->sdev) rec->u.run.rec_count = atomic_read(&sdev_to_zfcp(erp->sdev)->erp_counter); else if (erp->port) rec->u.run.rec_count = atomic_read(&erp->port->erp_counter); else rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); debug_event(dbf->rec, level, rec, sizeof(*rec)); spin_unlock_irqrestore(&dbf->rec_lock, flags); }
static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) { struct zfcp_erp_action *erp_action; struct zfcp_scsi_dev *zfcp_sdev; switch (need) { case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(sdev); if (!(act_status & ZFCP_STATUS_ERP_NO_REF)) if (scsi_device_get(sdev)) return NULL; atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &zfcp_sdev->status); erp_action = &zfcp_sdev->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->port = port; erp_action->sdev = sdev; if (!(atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_PORT: case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: if (!get_device(&port->dev)) return NULL; zfcp_erp_action_dismiss_port(port); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); erp_action = &port->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); erp_action->port = port; if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: kref_get(&adapter->ref); zfcp_erp_action_dismiss_adapter(adapter); atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); erp_action = &adapter->erp_action; memset(erp_action, 0, sizeof(struct zfcp_erp_action)); if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; break; default: return NULL; } erp_action->adapter = adapter; erp_action->action = need; erp_action->status = act_status; return erp_action; }
static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED | ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status); }
static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, char *id) { struct scsi_device *sdev; shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) _zfcp_erp_lun_reopen(sdev, clear, id, 0); }
static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) { struct scsi_device *sdev; if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); else shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) zfcp_erp_action_dismiss_lun(sdev); }
void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id) { unsigned long flags; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; write_lock_irqsave(&adapter->erp_lock, flags); _zfcp_erp_lun_reopen(sdev, clear, id, 0); write_unlock_irqrestore(&adapter->erp_lock, flags); }
static void zfcp_scsi_slave_destroy(struct scsi_device *sdev) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); /* if previous slave_alloc returned early, there is nothing to do */ if (!zfcp_sdev->port) return; zfcp_erp_lun_shutdown_wait(sdev, "scssd_1"); put_device(&zfcp_sdev->port->dev); }
/** * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery * @port: zfcp_port whose fc_rport we should try to unblock */ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) { unsigned long flags; struct zfcp_adapter *adapter = port->adapter; int port_status; struct Scsi_Host *shost = adapter->scsi_host; struct scsi_device *sdev; write_lock_irqsave(&adapter->erp_lock, flags); port_status = atomic_read(&port->status); if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE | ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) { /* new ERP of severity >= port triggered elsewhere meanwhile or * local link down (adapter erp_failed but not clear unblock) */ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action); write_unlock_irqrestore(&adapter->erp_lock, flags); return; } spin_lock(shost->host_lock); __shost_for_each_device(sdev, shost) { struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); int lun_status; if (zsdev->port != port) continue; /* LUN under port of interest */ lun_status = atomic_read(&zsdev->status); if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0) continue; /* unblock rport despite failed LUNs */ /* LUN recovery not given up yet [maybe follow-up pending] */ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) { /* LUN blocked: * not yet unblocked [LUN recovery pending] * or meanwhile blocked [new LUN recovery triggered] */ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action); spin_unlock(shost->host_lock); write_unlock_irqrestore(&adapter->erp_lock, flags); return; } } /* now port has no child or all children have completed recovery, * and no ERP of severity >= port was meanwhile triggered elsewhere */ zfcp_scsi_schedule_rport_register(port); spin_unlock(shost->host_lock); write_unlock_irqrestore(&adapter->erp_lock, flags); }
void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id) { unsigned long flags; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port = zfcp_sdev->port; struct zfcp_adapter *adapter = port->adapter; int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED; write_lock_irqsave(&adapter->erp_lock, flags); _zfcp_erp_lun_reopen(sdev, clear, id, ZFCP_STATUS_ERP_NO_REF); write_unlock_irqrestore(&adapter->erp_lock, flags); zfcp_erp_wait(adapter); }
static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; return SUCCESS; }
static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) { int need = want; int l_status, p_status, a_status; struct zfcp_scsi_dev *zfcp_sdev; switch (want) { case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(sdev); l_status = atomic_read(&zfcp_sdev->status); if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; p_status = atomic_read(&port->status); if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) || p_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_PORT; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: p_status = atomic_read(&port->status); if (!(p_status & ZFCP_STATUS_COMMON_OPEN)) need = ZFCP_ERP_ACTION_REOPEN_PORT; case ZFCP_ERP_ACTION_REOPEN_PORT: p_status = atomic_read(&port->status); if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; a_status = atomic_read(&adapter->status); if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) || a_status & ZFCP_STATUS_COMMON_ERP_FAILED) return 0; if (p_status & ZFCP_STATUS_COMMON_NOESC) return need; if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED)) need = ZFCP_ERP_ACTION_REOPEN_ADAPTER; case ZFCP_ERP_ACTION_REOPEN_ADAPTER: a_status = atomic_read(&adapter->status); if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE) return 0; if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) && !(a_status & ZFCP_STATUS_COMMON_OPEN)) return 0; } return need; }
static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id, u32 act_status) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; zfcp_erp_lun_block(sdev, clear); if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) return; zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter, zfcp_sdev->port, sdev, id, act_status); }
static int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); int status, scsi_result, ret; /* reset the status for this request */ scpnt->result = 0; scpnt->host_scribble = NULL; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); return 0; } status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { /* only LUN access denied, but port is good * not covered by FC transport, have to fail here */ zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { /* This could be either * open LUN pending: this is temporary, will result in * open LUN or ERP_FAILED, so retry command * call to rport_delete pending: mimic retry from * fc_remote_port_chkready until rport is BLOCKED */ zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); return 0; } ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) return SCSI_MLQUEUE_HOST_BUSY; return ret; }
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, struct zfcp_adapter *adapter, struct zfcp_port *port, struct scsi_device *sdev) { rec->adapter_status = atomic_read(&adapter->status); if (port) { rec->port_status = atomic_read(&port->status); rec->wwpn = port->wwpn; rec->d_id = port->d_id; } if (sdev) { rec->lun_status = atomic_read(&sdev_to_zfcp(sdev)->status); rec->lun = zfcp_scsi_dev_lun(sdev); } }
static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev); struct scsi_device *sdev; unsigned int status, failed = 1; sdev = zfcp_unit_sdev(unit); if (sdev) { status = atomic_read(&sdev_to_zfcp(sdev)->status); failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0; scsi_device_put(sdev); } return sprintf(buf, "%d\n", failed); }
static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct zfcp_adapter *adapter = (struct zfcp_adapter *) sdev->host->hostdata[0]; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_port *port; struct zfcp_unit *unit; int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; zfcp_sdev->erp_action.adapter = adapter; zfcp_sdev->erp_action.sdev = sdev; port = zfcp_get_port_by_wwpn(adapter, rport->port_name); if (!port) return -ENXIO; zfcp_sdev->erp_action.port = port; unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); if (unit) put_device(&unit->dev); if (!unit && !(allow_lun_scan && npiv)) { put_device(&port->dev); return -ENXIO; } zfcp_sdev->port = port; zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF; zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF; zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF; zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF; zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF; zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF; spin_lock_init(&zfcp_sdev->latencies.lock); zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING); zfcp_erp_lun_reopen(sdev, 0, "scsla_1"); zfcp_erp_wait(port->adapter); return 0; }
static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret) { int action = act->action; struct zfcp_adapter *adapter = act->adapter; struct zfcp_port *port = act->port; struct scsi_device *sdev = act->sdev; struct zfcp_scsi_dev *zfcp_sdev; u32 erp_status = act->status; switch (action) { case ZFCP_ERP_ACTION_REOPEN_ADAPTER: if (zfcp_erp_strat_change_det(&adapter->status, erp_status)) { _zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, "ersscg1"); return ZFCP_ERP_EXIT; } break; case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: case ZFCP_ERP_ACTION_REOPEN_PORT: if (zfcp_erp_strat_change_det(&port->status, erp_status)) { _zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "ersscg2"); return ZFCP_ERP_EXIT; } break; case ZFCP_ERP_ACTION_REOPEN_LUN: zfcp_sdev = sdev_to_zfcp(sdev); if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) { _zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED, "ersscg3", 0); return ZFCP_ERP_EXIT; } break; } return ret; }
static int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device)); int status, scsi_result, ret; scpnt->result = 0; scpnt->host_scribble = NULL; scsi_result = fc_remote_port_chkready(rport); if (unlikely(scsi_result)) { scpnt->result = scsi_result; zfcp_dbf_scsi_fail_send(scpnt); scpnt->scsi_done(scpnt); return 0; } status = atomic_read(&zfcp_sdev->status); if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) && !(atomic_read(&zfcp_sdev->port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)) { zfcp_scsi_command_fail(scpnt, DID_ERROR); return 0; } if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { zfcp_scsi_command_fail(scpnt, DID_IMM_RETRY); return 0; } ret = zfcp_fsf_fcp_cmnd(scpnt); if (unlikely(ret == -EBUSY)) return SCSI_MLQUEUE_DEVICE_BUSY; else if (unlikely(ret < 0)) return SCSI_MLQUEUE_HOST_BUSY; return ret; }
static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); if (fsf_req) break; zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); return SUCCESS; } } if (!fsf_req) return FAILED; wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; } else zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); zfcp_fsf_req_free(fsf_req); return retval; }
static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action) { struct scsi_device *sdev = erp_action->sdev; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); switch (erp_action->step) { case ZFCP_ERP_STEP_UNINITIALIZED: zfcp_erp_lun_strategy_clearstati(sdev); if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return zfcp_erp_lun_strategy_close(erp_action); case ZFCP_ERP_STEP_LUN_CLOSING: if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_FAILED; if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY) return ZFCP_ERP_EXIT; return zfcp_erp_lun_strategy_open(erp_action); case ZFCP_ERP_STEP_LUN_OPENING: if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN) return ZFCP_ERP_SUCCEEDED; } return ZFCP_ERP_FAILED; }
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) { struct zfcp_adapter *adapter = zsdev->port->adapter; struct zfcp_scsi_req_filter filter = { .tmf_scope = FCP_TMF_TGT_RESET, .port_handle = zsdev->port->handle, }; unsigned long flags; if (tm_flags == FCP_TMF_LUN_RESET) { filter.tmf_scope = FCP_TMF_LUN_RESET; filter.lun_handle = zsdev->lun_handle; } /* * abort_lock secures against other processings - in the abort-function * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data */ write_lock_irqsave(&adapter->abort_lock, flags); zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, &filter); write_unlock_irqrestore(&adapter->abort_lock, flags); } /** * zfcp_scsi_task_mgmt_function() - Send a task management function (sync). * @sdev: Pointer to SCSI device to send the task management command to. * @tm_flags: Task management flags, * here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET. */ static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags); if (fsf_req) break; zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL); zfcp_erp_wait(adapter); ret = fc_block_rport(rport); if (ret) { zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL); return ret; } if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL); return SUCCESS; } } if (!fsf_req) { zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL); return FAILED; } wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req); retval = FAILED; } else { zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } zfcp_fsf_req_free(fsf_req); return retval; } static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { struct scsi_device *sdev = scpnt->device; return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { struct scsi_target *starget = scsi_target(scpnt->device); struct fc_rport *rport = starget_to_rport(starget); struct Scsi_Host *shost = rport_to_shost(rport); struct scsi_device *sdev = NULL, *tmp_sdev; struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; int ret; shost_for_each_device(tmp_sdev, shost) { if (tmp_sdev->id == starget->id) { sdev = tmp_sdev; break; } } if (!sdev) { ret = FAILED; zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret); return ret; } ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET); /* release reference from above shost_for_each_device */ if (sdev) scsi_device_put(tmp_sdev); return ret; } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret = SUCCESS, fc_ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); fc_ret = fc_block_scsi_eh(scpnt); if (fc_ret) ret = fc_ret; zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret); return ret; } /** * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset. * @shost: Pointer to Scsi_Host to perform action on. * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET. * * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise. * * This is similar to zfcp_sysfs_adapter_failed_store(). */ static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type) { struct zfcp_adapter *adapter = (struct zfcp_adapter *)shost->hostdata[0]; int ret = 0; if (reset_type != SCSI_ADAPTER_RESET) { ret = -EOPNOTSUPP; zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret); return ret; } zfcp_erp_adapter_reset_sync(adapter, "scshr_y"); return ret; } struct scsi_transport_template *zfcp_scsi_transport_template; static struct scsi_host_template zfcp_scsi_host_template = { .module = THIS_MODULE, .name = "zfcp", .queuecommand = zfcp_scsi_queuecommand, .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .change_queue_depth = scsi_change_queue_depth, .host_reset = zfcp_scsi_sysfs_host_reset, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), /* GCD, adjusted later */ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ /* report size limit per scatter-gather segment */ .max_segment_size = ZFCP_QDIO_SBALE_LEN, .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, .track_queue_depth = 1, .supported_mode = MODE_INITIATOR, }; /** * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer */ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; if (adapter->scsi_host) return 0; ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, "Registering the FCP device with the " "SCSI stack failed\n"); return -EIO; } /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 511; adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); return -EIO; } return 0; } /** * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer * @adapter: The zfcp adapter to unregister. */ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; shost = adapter->scsi_host; if (!shost) return; read_lock_irq(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) port->rport = NULL; read_unlock_irq(&adapter->port_list_lock); fc_remove_host(shost); scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; } static struct fc_host_statistics* zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter) { struct fc_host_statistics *fc_stats; if (!adapter->fc_stats) { fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); if (!fc_stats) return NULL; adapter->fc_stats = fc_stats; /* freed in adapter_release */ } memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); return adapter->fc_stats; } static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data, struct fsf_qtcb_bottom_port *old) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - old->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames - old->tx_frames; fc_stats->tx_words = data->tx_words - old->tx_words; fc_stats->rx_frames = data->rx_frames - old->rx_frames; fc_stats->rx_words = data->rx_words - old->rx_words; fc_stats->lip_count = data->lip - old->lip; fc_stats->nos_count = data->nos - old->nos; fc_stats->error_frames = data->error_frames - old->error_frames; fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; fc_stats->link_failure_count = data->link_failure - old->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal - old->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - old->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words - old->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests - old->input_requests; fc_stats->fcp_output_requests = data->output_requests - old->output_requests; fc_stats->fcp_control_requests = data->control_requests - old->control_requests; fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; fc_stats->tx_words = data->tx_words; fc_stats->rx_frames = data->rx_frames; fc_stats->rx_words = data->rx_words; fc_stats->lip_count = data->lip; fc_stats->nos_count = data->nos; fc_stats->error_frames = data->error_frames; fc_stats->dumped_frames = data->dumped_frames; fc_stats->link_failure_count = data->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests; fc_stats->fcp_output_requests = data->output_requests; fc_stats->fcp_control_requests = data->control_requests; fc_stats->fcp_input_megabytes = data->input_mb; fc_stats->fcp_output_megabytes = data->output_mb; }
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) { struct zfcp_adapter *adapter = zsdev->port->adapter; struct zfcp_scsi_req_filter filter = { .tmf_scope = FCP_TMF_TGT_RESET, .port_handle = zsdev->port->handle, }; unsigned long flags; if (tm_flags == FCP_TMF_LUN_RESET) { filter.tmf_scope = FCP_TMF_LUN_RESET; filter.lun_handle = zsdev->lun_handle; } /* * abort_lock secures against other processings - in the abort-function * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data */ write_lock_irqsave(&adapter->abort_lock, flags); zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, &filter); write_unlock_irqrestore(&adapter->abort_lock, flags); } static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; struct zfcp_fsf_req *fsf_req = NULL; int retval = SUCCESS, ret; int retry = 3; while (retry--) { fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags); if (fsf_req) break; zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; if (!(atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_RUNNING)) { zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags); return SUCCESS; } } if (!fsf_req) return FAILED; wait_for_completion(&fsf_req->completion); if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); retval = FAILED; } else { zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); } zfcp_fsf_req_free(fsf_req); return retval; } static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt) { return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET); } static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt) { return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET); } static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) { struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; int ret; zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); zfcp_erp_wait(adapter); ret = fc_block_scsi_eh(scpnt); if (ret) return ret; return SUCCESS; } struct scsi_transport_template *zfcp_scsi_transport_template; static struct scsi_host_template zfcp_scsi_host_template = { .module = THIS_MODULE, .name = "zfcp", .queuecommand = zfcp_scsi_queuecommand, .eh_abort_handler = zfcp_scsi_eh_abort_handler, .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, .eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler, .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, .slave_alloc = zfcp_scsi_slave_alloc, .slave_configure = zfcp_scsi_slave_configure, .slave_destroy = zfcp_scsi_slave_destroy, .change_queue_depth = scsi_change_queue_depth, .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, .sg_tablesize = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2), /* GCD, adjusted later */ .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, /* GCD, adjusted later */ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .use_clustering = 1, .shost_attrs = zfcp_sysfs_shost_attrs, .sdev_attrs = zfcp_sysfs_sdev_attrs, .track_queue_depth = 1, }; /** * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer * @adapter: The zfcp adapter to register with the SCSI midlayer */ int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter) { struct ccw_dev_id dev_id; if (adapter->scsi_host) return 0; ccw_device_get_id(adapter->ccw_device, &dev_id); /* register adapter as SCSI host with mid layer of SCSI stack */ adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template, sizeof (struct zfcp_adapter *)); if (!adapter->scsi_host) { dev_err(&adapter->ccw_device->dev, "Registering the FCP device with the " "SCSI stack failed\n"); return -EIO; } /* tell the SCSI stack some characteristics of this adapter */ adapter->scsi_host->max_id = 511; adapter->scsi_host->max_lun = 0xFFFFFFFF; adapter->scsi_host->max_channel = 0; adapter->scsi_host->unique_id = dev_id.devno; adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */ adapter->scsi_host->transportt = zfcp_scsi_transport_template; adapter->scsi_host->hostdata[0] = (unsigned long) adapter; if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) { scsi_host_put(adapter->scsi_host); return -EIO; } return 0; } /** * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer * @adapter: The zfcp adapter to unregister. */ void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter) { struct Scsi_Host *shost; struct zfcp_port *port; shost = adapter->scsi_host; if (!shost) return; read_lock_irq(&adapter->port_list_lock); list_for_each_entry(port, &adapter->port_list, list) port->rport = NULL; read_unlock_irq(&adapter->port_list_lock); fc_remove_host(shost); scsi_remove_host(shost); scsi_host_put(shost); adapter->scsi_host = NULL; } static struct fc_host_statistics* zfcp_init_fc_host_stats(struct zfcp_adapter *adapter) { struct fc_host_statistics *fc_stats; if (!adapter->fc_stats) { fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL); if (!fc_stats) return NULL; adapter->fc_stats = fc_stats; /* freed in adapter_release */ } memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats)); return adapter->fc_stats; } static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data, struct fsf_qtcb_bottom_port *old) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset - old->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames - old->tx_frames; fc_stats->tx_words = data->tx_words - old->tx_words; fc_stats->rx_frames = data->rx_frames - old->rx_frames; fc_stats->rx_words = data->rx_words - old->rx_words; fc_stats->lip_count = data->lip - old->lip; fc_stats->nos_count = data->nos - old->nos; fc_stats->error_frames = data->error_frames - old->error_frames; fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames; fc_stats->link_failure_count = data->link_failure - old->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal - old->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts - old->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words - old->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests - old->input_requests; fc_stats->fcp_output_requests = data->output_requests - old->output_requests; fc_stats->fcp_control_requests = data->control_requests - old->control_requests; fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb; fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb; } static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats, struct fsf_qtcb_bottom_port *data) { fc_stats->seconds_since_last_reset = data->seconds_since_last_reset; fc_stats->tx_frames = data->tx_frames; fc_stats->tx_words = data->tx_words; fc_stats->rx_frames = data->rx_frames; fc_stats->rx_words = data->rx_words; fc_stats->lip_count = data->lip; fc_stats->nos_count = data->nos; fc_stats->error_frames = data->error_frames; fc_stats->dumped_frames = data->dumped_frames; fc_stats->link_failure_count = data->link_failure; fc_stats->loss_of_sync_count = data->loss_of_sync; fc_stats->loss_of_signal_count = data->loss_of_signal; fc_stats->prim_seq_protocol_err_count = data->psp_error_counts; fc_stats->invalid_tx_word_count = data->invalid_tx_words; fc_stats->invalid_crc_count = data->invalid_crcs; fc_stats->fcp_input_requests = data->input_requests; fc_stats->fcp_output_requests = data->output_requests; fc_stats->fcp_control_requests = data->control_requests; fc_stats->fcp_input_megabytes = data->input_mb; fc_stats->fcp_output_megabytes = data->output_mb; }