static int bfad_im_vport_disable(struct fc_vport *fc_vport, bool disable) { struct bfad_vport_s *vport; struct bfad_s *bfad; struct bfa_fcs_vport_s *fcs_vport; struct Scsi_Host *vshost; wwn_t pwwn; unsigned long flags; vport = (struct bfad_vport_s *)fc_vport->dd_data; bfad = vport->drv_port.bfad; vshost = vport->drv_port.im_port->shost; pwwn = wwn_to_u64((u8 *) &fc_vport->port_name); spin_lock_irqsave(&bfad->bfad_lock, flags); fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs, 0, pwwn); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (fcs_vport == NULL) return VPCERR_BAD_WWN; if (disable) { bfa_fcs_vport_stop(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_DISABLED); } else { bfa_fcs_vport_start(fcs_vport); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } return 0; }
/** * Create a vport under a vf. */ bfa_status_t bfad_vport_create(struct bfad_s *bfad, u16 vf_id, struct bfa_port_cfg_s *port_cfg) { struct bfad_vport_s *vport; int rc = BFA_STATUS_OK; unsigned long flags; struct completion fcomp; vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL); if (!vport) { rc = BFA_STATUS_ENOMEM; goto ext; } vport->drv_port.bfad = bfad; spin_lock_irqsave(&bfad->bfad_lock, flags); rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id, port_cfg, vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); if (rc != BFA_STATUS_OK) goto ext_free_vport; if (port_cfg->roles & BFA_PORT_ROLE_FCP_IM) { rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port); if (rc != BFA_STATUS_OK) goto ext_free_fcs_vport; } spin_lock_irqsave(&bfad->bfad_lock, flags); bfa_fcs_vport_start(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); return BFA_STATUS_OK; ext_free_fcs_vport: spin_lock_irqsave(&bfad->bfad_lock, flags); vport->comp_del = &fcomp; init_completion(vport->comp_del); bfa_fcs_vport_delete(&vport->fcs_vport); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(vport->comp_del); ext_free_vport: kfree(vport); ext: return rc; }