Esempio n. 1
0
/**
 * qla2x00_marker() - Send a marker IOCB to the firmware.
 * @ha: HA context
 * @loop_id: loop ID
 * @lun: LUN
 * @type: marker modifier
 *
 * Can be called from both normal and interrupt context.
 *
 * Returns non-zero if a failure occured, else zero.
 */
int
__qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun,
                 uint8_t type)
{
    mrk_entry_t *mrk;
    struct mrk_entry_24xx *mrk24;

    mrk24 = NULL;
    mrk = (mrk_entry_t *)qla2x00_req_pkt(ha);
    if (mrk == NULL) {
        DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
                        __func__, ha->host_no));

        return (QLA_FUNCTION_FAILED);
    }

    mrk->entry_type = MARKER_TYPE;
    mrk->modifier = type;
    if (type != MK_SYNC_ALL) {
        if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
            mrk24 = (struct mrk_entry_24xx *) mrk;
            mrk24->nport_handle = cpu_to_le16(loop_id);
            mrk24->lun[1] = LSB(lun);
            mrk24->lun[2] = MSB(lun);
        } else {
            SET_TARGET_ID(ha, mrk->target, loop_id);
            mrk->lun = cpu_to_le16(lun);
        }
    }
    wmb();

    qla2x00_isp_cmd(ha);

    return (QLA_SUCCESS);
}
Esempio n. 2
0
/**
 * qla2x00_isp_cmd() - Modify the request ring pointer.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 */
void
qla2x00_isp_cmd(scsi_qla_host_t *ha)
{
    device_reg_t __iomem *reg = ha->iobase;

    DEBUG5(printk("%s(): IOCB data:\n", __func__));
    DEBUG5(qla2x00_dump_buffer(
               (uint8_t *)ha->request_ring_ptr, REQUEST_ENTRY_SIZE));

    /* Adjust ring index. */
    ha->req_ring_index++;
    if (ha->req_ring_index == ha->request_q_length) {
        ha->req_ring_index = 0;
        ha->request_ring_ptr = ha->request_ring;
    } else
        ha->request_ring_ptr++;

    /* Set chip new ring index. */
    if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
        WRT_REG_DWORD(&reg->isp24.req_q_in, ha->req_ring_index);
        RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
    } else {
        WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), ha->req_ring_index);
        RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
    }

}
Esempio n. 3
0
static inline ms_iocb_entry_t *
qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size)
{
	ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
	struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;

	if (IS_QLA24XX(ha) || IS_QLA25XX(ha)) {
		ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
		ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
	} else {
		ms_pkt->req_bytecount = cpu_to_le32(req_size);
		ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
	}

	return ms_pkt;
}
Esempio n. 4
0
static int
qla2x00_chk_ms_status(scsi_qla_host_t *ha, ms_iocb_entry_t *ms_pkt,
    struct ct_sns_rsp *ct_rsp, const char *routine)
{
	int rval;
	uint16_t comp_status;

	rval = QLA_FUNCTION_FAILED;
	if (ms_pkt->entry_status != 0) {
		DEBUG2_3(printk("scsi(%ld): %s failed, error status (%x).\n",
		    ha->host_no, routine, ms_pkt->entry_status));
	} else {
		if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
			comp_status =
			    ((struct ct_entry_24xx *)ms_pkt)->comp_status;
		else
			comp_status = le16_to_cpu(ms_pkt->status);
		switch (comp_status) {
		case CS_COMPLETE:
		case CS_DATA_UNDERRUN:
		case CS_DATA_OVERRUN:		/* Overrun? */
			if (ct_rsp->header.response !=
			    __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
				DEBUG2_3(printk("scsi(%ld): %s failed, "
				    "rejected request:\n", ha->host_no,
				    routine));
				DEBUG2_3(qla2x00_dump_buffer(
				    (uint8_t *)&ct_rsp->header,
				    sizeof(struct ct_rsp_hdr)));
			} else
				rval = QLA_SUCCESS;
			break;
		default:
			DEBUG2_3(printk("scsi(%ld): %s failed, completion "
			    "status (%x).\n", ha->host_no, routine,
			    comp_status));
			break;
		}
	}
	return rval;
}
Esempio n. 5
0
/*
* qla2x00_initialize_adapter
*      Initialize board.
*
* Input:
*      ha = adapter block pointer.
*
* Returns:
*      0 = success
*/
int
qla2x00_initialize_adapter(scsi_qla_host_t *vha)
{
	int	rval;
	struct qla_hw_data *ha = vha->hw;
	struct req_que *req = ha->req_q_map[0];

	/* Clear adapter flags. */
	vha->flags.online = 0;
	ha->flags.chip_reset_done = 0;
	vha->flags.reset_active = 0;
	ha->flags.pci_channel_io_perm_failure = 0;
	ha->flags.eeh_busy = 0;
	ha->flags.thermal_supported = 1;
	atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
	atomic_set(&vha->loop_state, LOOP_DOWN);
	vha->device_flags = DFLG_NO_CABLE;
	vha->dpc_flags = 0;
	vha->flags.management_server_logged_in = 0;
	vha->marker_needed = 0;
	ha->isp_abort_cnt = 0;
	ha->beacon_blink_led = 0;

	set_bit(0, ha->req_qid_map);
	set_bit(0, ha->rsp_qid_map);

	ql_dbg(ql_dbg_init, vha, 0x0040,
	    "Configuring PCI space...\n");
	rval = ha->isp_ops->pci_config(vha);
	if (rval) {
		ql_log(ql_log_warn, vha, 0x0044,
		    "Unable to configure PCI space.\n");
		return (rval);
	}

	ha->isp_ops->reset_chip(vha);

	rval = qla2xxx_get_flash_info(vha);
	if (rval) {
		ql_log(ql_log_fatal, vha, 0x004f,
		    "Unable to validate FLASH data.\n");
		return (rval);
	}

	ha->isp_ops->get_flash_version(vha, req->ring);
	ql_dbg(ql_dbg_init, vha, 0x0061,
	    "Configure NVRAM parameters...\n");

	ha->isp_ops->nvram_config(vha);

	if (ha->flags.disable_serdes) {
		/* Mask HBA via NVRAM settings? */
		ql_log(ql_log_info, vha, 0x0077,
		    "Masking HBA WWPN "
		    "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
		    vha->port_name[0], vha->port_name[1],
		    vha->port_name[2], vha->port_name[3],
		    vha->port_name[4], vha->port_name[5],
		    vha->port_name[6], vha->port_name[7]);
		return QLA_FUNCTION_FAILED;
	}

	ql_dbg(ql_dbg_init, vha, 0x0078,
	    "Verifying loaded RISC code...\n");

	if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
		rval = ha->isp_ops->chip_diag(vha);
		if (rval)
			return (rval);
		rval = qla2x00_setup_chip(vha);
		if (rval)
			return (rval);
	}

	if (IS_QLA84XX(ha)) {
		ha->cs84xx = qla84xx_get_chip(vha);
		if (!ha->cs84xx) {
			ql_log(ql_log_warn, vha, 0x00d0,
			    "Unable to configure ISP84XX.\n");
			return QLA_FUNCTION_FAILED;
		}
	}

	if (qla_ini_mode_enabled(vha))
		rval = qla2x00_init_rings(vha);

	ha->flags.chip_reset_done = 1;

	if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
		/* Issue verify 84xx FW IOCB to complete 84xx initialization */
		rval = qla84xx_init_chip(vha);
		if (rval != QLA_SUCCESS) {
			ql_log(ql_log_warn, vha, 0x00d4,
			    "Unable to initialize ISP84XX.\n");
		qla84xx_put_chip(vha);
		}
	}

	if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
		qla24xx_read_fcp_prio_cfg(vha);

	return (rval);
}
Esempio n. 6
0
static int
qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
{
	struct Scsi_Host *host = bsg_job->shost;
	scsi_qla_host_t *vha = shost_priv(host);
	struct qla_hw_data *ha = vha->hw;
	int ret = 0;
	uint32_t len;
	uint32_t oper;

	bsg_job->reply->reply_payload_rcv_len = 0;

	if (!IS_QLA24XX_TYPE(ha) || !IS_QLA25XX(ha)) {
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}

	if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
		test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
		test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
		ret = -EBUSY;
		goto exit_fcp_prio_cfg;
	}

	/* Get the sub command */
	oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];

	/* Only set config is allowed if config memory is not allocated */
	if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
		ret = -EINVAL;
		goto exit_fcp_prio_cfg;
	}
	switch (oper) {
	case QLFC_FCP_PRIO_DISABLE:
		if (ha->flags.fcp_prio_enabled) {
			ha->flags.fcp_prio_enabled = 0;
			ha->fcp_prio_cfg->attributes &=
				~FCP_PRIO_ATTR_ENABLE;
			qla24xx_update_all_fcp_prio(vha);
			bsg_job->reply->result = DID_OK;
		} else {
			ret = -EINVAL;
			bsg_job->reply->result = (DID_ERROR << 16);
			goto exit_fcp_prio_cfg;
		}
		break;

	case QLFC_FCP_PRIO_ENABLE:
		if (!ha->flags.fcp_prio_enabled) {
			if (ha->fcp_prio_cfg) {
				ha->flags.fcp_prio_enabled = 1;
				ha->fcp_prio_cfg->attributes |=
				    FCP_PRIO_ATTR_ENABLE;
				qla24xx_update_all_fcp_prio(vha);
				bsg_job->reply->result = DID_OK;
			} else {
				ret = -EINVAL;
				bsg_job->reply->result = (DID_ERROR << 16);
				goto exit_fcp_prio_cfg;
			}
		}
		break;

	case QLFC_FCP_PRIO_GET_CONFIG:
		len = bsg_job->reply_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
			ret = -EINVAL;
			bsg_job->reply->result = (DID_ERROR << 16);
			goto exit_fcp_prio_cfg;
		}

		bsg_job->reply->result = DID_OK;
		bsg_job->reply->reply_payload_rcv_len =
			sg_copy_from_buffer(
			bsg_job->reply_payload.sg_list,
			bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
			len);

		break;

	case QLFC_FCP_PRIO_SET_CONFIG:
		len = bsg_job->request_payload.payload_len;
		if (!len || len > FCP_PRIO_CFG_SIZE) {
			bsg_job->reply->result = (DID_ERROR << 16);
			ret = -EINVAL;
			goto exit_fcp_prio_cfg;
		}

		if (!ha->fcp_prio_cfg) {
			ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
			if (!ha->fcp_prio_cfg) {
				qla_printk(KERN_WARNING, ha,
					"Unable to allocate memory "
					"for fcp prio config data (%x).\n",
					FCP_PRIO_CFG_SIZE);
				bsg_job->reply->result = (DID_ERROR << 16);
				ret = -ENOMEM;
				goto exit_fcp_prio_cfg;
			}
		}

		memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
		sg_copy_to_buffer(bsg_job->request_payload.sg_list,
		bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
			FCP_PRIO_CFG_SIZE);

		/* validate fcp priority data */
		if (!qla24xx_fcp_prio_cfg_valid(
			(struct qla_fcp_prio_cfg *)
			ha->fcp_prio_cfg, 1)) {
			bsg_job->reply->result = (DID_ERROR << 16);
			ret = -EINVAL;
			/* If buffer was invalidatic int
			 * fcp_prio_cfg is of no use
			 */
			vfree(ha->fcp_prio_cfg);
			ha->fcp_prio_cfg = NULL;
			goto exit_fcp_prio_cfg;
		}

		ha->flags.fcp_prio_enabled = 0;
		if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
			ha->flags.fcp_prio_enabled = 1;
		qla24xx_update_all_fcp_prio(vha);
		bsg_job->reply->result = DID_OK;
		break;
	default:
		ret = -EINVAL;
		break;
	}
exit_fcp_prio_cfg:
	bsg_job->job_done(bsg_job);
	return ret;
}
Esempio n. 7
0
/**
 * qla2x00_req_pkt() - Retrieve a request packet from the request ring.
 * @ha: HA context
 *
 * Note: The caller must hold the hardware lock before calling this routine.
 *
 * Returns NULL if function failed, else, a pointer to the request packet.
 */
static request_t *
qla2x00_req_pkt(scsi_qla_host_t *ha)
{
    device_reg_t __iomem *reg = ha->iobase;
    request_t	*pkt = NULL;
    uint16_t	cnt;
    uint32_t	*dword_ptr;
    uint32_t	timer;
    uint16_t	req_cnt = 1;

    /* Wait 1 second for slot. */
    for (timer = HZ; timer; timer--) {
        if ((req_cnt + 2) >= ha->req_q_cnt) {
            /* Calculate number of free request entries. */
            if (IS_QLA24XX(ha) || IS_QLA25XX(ha))
                cnt = (uint16_t)RD_REG_DWORD(
                          &reg->isp24.req_q_out);
            else
                cnt = qla2x00_debounce_register(
                          ISP_REQ_Q_OUT(ha, &reg->isp));
            if  (ha->req_ring_index < cnt)
                ha->req_q_cnt = cnt - ha->req_ring_index;
            else
                ha->req_q_cnt = ha->request_q_length -
                                (ha->req_ring_index - cnt);
        }
        /* If room for request in request ring. */
        if ((req_cnt + 2) < ha->req_q_cnt) {
            ha->req_q_cnt--;
            pkt = ha->request_ring_ptr;

            /* Zero out packet. */
            dword_ptr = (uint32_t *)pkt;
            for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
                *dword_ptr++ = 0;

            /* Set system defined field. */
            pkt->sys_define = (uint8_t)ha->req_ring_index;

            /* Set entry count. */
            pkt->entry_count = 1;

            break;
        }

        /* Release ring specific lock */
        spin_unlock(&ha->hardware_lock);

        udelay(2);   /* 2 us */

        /* Check for pending interrupts. */
        /* During init we issue marker directly */
        if (!ha->marker_needed)
            qla2x00_poll(ha);

        spin_lock_irq(&ha->hardware_lock);
    }
    if (!pkt) {
        DEBUG2_3(printk("%s(): **** FAILED ****\n", __func__));
    }

    return (pkt);
}
static int
qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
{
	int rval, alen;
	uint32_t size, max_frame_size;
	struct qla_hw_data *ha = vha->hw;
	ms_iocb_entry_t *ms_pkt;
	struct ct_sns_req *ct_req;
	struct ct_sns_rsp *ct_rsp;
	uint8_t *entries;
	struct ct_fdmi_port_attr *eiter;
	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;

	
	
	
	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);

	
	ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
	    RPA_RSP_SIZE);
	ct_rsp = &ha->ct_sns->p.rsp;

	
	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
	size = WWN_SIZE + 4;

	
	ct_req->req.rpa.attrs.count =
	    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1);
	entries = ct_req->req.rpa.port_name;

	
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
	eiter->len = __constant_cpu_to_be16(4 + 32);
	eiter->a.fc4_types[2] = 0x01;
	size += 4 + 32;

	ql_dbg(ql_dbg_disc, vha, 0x2039,
	    "FC4_TYPES=%02x %02x.\n",
	    eiter->a.fc4_types[2],
	    eiter->a.fc4_types[1]);

	
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	if (IS_CNA_CAPABLE(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(
		    FDMI_PORT_SPEED_10GB);
	else if (IS_QLA25XX(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(
		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
		    FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
	else if (IS_QLA24XX_TYPE(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(
		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
		    FDMI_PORT_SPEED_4GB);
	else if (IS_QLA23XX(ha))
		eiter->a.sup_speed =__constant_cpu_to_be32(
		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
	else
		eiter->a.sup_speed = __constant_cpu_to_be32(
		    FDMI_PORT_SPEED_1GB);
	size += 4 + 4;

	ql_dbg(ql_dbg_disc, vha, 0x203a,
	    "Supported_Speed=%x.\n", eiter->a.sup_speed);

	
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	switch (ha->link_data_rate) {
	case PORT_SPEED_1GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
		break;
	case PORT_SPEED_2GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
		break;
	case PORT_SPEED_4GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
		break;
	case PORT_SPEED_8GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
		break;
	case PORT_SPEED_10GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
		break;
	case PORT_SPEED_16GB:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
		break;
	default:
		eiter->a.cur_speed =
		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
		break;
	}
	size += 4 + 4;

	ql_dbg(ql_dbg_disc, vha, 0x203b,
	    "Current_Speed=%x.\n", eiter->a.cur_speed);

	
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	max_frame_size = IS_FWI2_CAPABLE(ha) ?
	    le16_to_cpu(icb24->frame_payload_size):
	    le16_to_cpu(ha->init_cb->frame_payload_size);
	eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
	size += 4 + 4;

	ql_dbg(ql_dbg_disc, vha, 0x203c,
	    "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);

	
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
	strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
	alen = strlen(eiter->a.os_dev_name);
	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
	eiter->len = cpu_to_be16(4 + alen);
	size += 4 + alen;

	ql_dbg(ql_dbg_disc, vha, 0x204b,
	    "OS_Device_Name=%s.\n", eiter->a.os_dev_name);

	
	if (strlen(fc_host_system_hostname(vha->host))) {
		ct_req->req.rpa.attrs.count =
		    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
		eiter = (struct ct_fdmi_port_attr *) (entries + size);
		eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
		    "%s", fc_host_system_hostname(vha->host));
		alen = strlen(eiter->a.host_name);
		alen += (alen & 3) ? (4 - (alen & 3)) : 4;
		eiter->len = cpu_to_be16(4 + alen);
		size += 4 + alen;

		ql_dbg(ql_dbg_disc, vha, 0x203d,
		    "HostName=%s.\n", eiter->a.host_name);
	}

	
	qla2x00_update_ms_fdmi_iocb(vha, size + 16);

	ql_dbg(ql_dbg_disc, vha, 0x203e,
	    "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
	    ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
	    ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
	    ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
	    ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
	    size);
	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
	    entries, size);

	
	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
	    sizeof(ms_iocb_entry_t));
	if (rval != QLA_SUCCESS) {
		
		ql_dbg(ql_dbg_disc, vha, 0x2040,
		    "RPA issue IOCB failed (%d).\n", rval);
	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
	    QLA_SUCCESS) {
		rval = QLA_FUNCTION_FAILED;
	} else {
		ql_dbg(ql_dbg_disc, vha, 0x2041,
		    "RPA exiting nornally.\n");
	}

	return rval;
}
Esempio n. 9
0
/**
 * qla2x00_fdmi_rpa() -
 * @ha: HA context
 *
 * Returns 0 on success.
 */
static int
qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
{
	int rval, alen;
	uint32_t size, max_frame_size;

	ms_iocb_entry_t *ms_pkt;
	struct ct_sns_req *ct_req;
	struct ct_sns_rsp *ct_rsp;
	uint8_t *entries;
	struct ct_fdmi_port_attr *eiter;
	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;

	/* Issue RPA */
	/* Prepare common MS IOCB */
	/*   Request size adjusted after CT preparation */
	ms_pkt = ha->isp_ops.prep_ms_fdmi_iocb(ha, 0, RPA_RSP_SIZE);

	/* Prepare CT request */
	ct_req = qla2x00_prep_ct_fdmi_req(&ha->ct_sns->p.req, RPA_CMD,
	    RPA_RSP_SIZE);
	ct_rsp = &ha->ct_sns->p.rsp;

	/* Prepare FDMI command arguments -- attribute block, attributes. */
	memcpy(ct_req->req.rpa.port_name, ha->port_name, WWN_SIZE);
	size = WWN_SIZE + 4;

	/* Attributes */
	ct_req->req.rpa.attrs.count =
	    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
	entries = ct_req->req.rpa.port_name;

	/* FC4 types. */
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
	eiter->len = __constant_cpu_to_be16(4 + 32);
	eiter->a.fc4_types[2] = 0x01;
	size += 4 + 32;

	DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__, ha->host_no,
	    eiter->a.fc4_types[2], eiter->a.fc4_types[1]));

	/* Supported speed. */
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	if (IS_QLA25XX(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(4);
	else if (IS_QLA24XX(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(8);
	else if (IS_QLA23XX(ha))
		eiter->a.sup_speed = __constant_cpu_to_be32(2);
	else
		eiter->a.sup_speed = __constant_cpu_to_be32(1);
	size += 4 + 4;

	DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, ha->host_no,
	    eiter->a.sup_speed));

	/* Current speed. */
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	switch (ha->link_data_rate) {
	case 0:
		eiter->a.cur_speed = __constant_cpu_to_be32(1);
		break;
	case 1:
		eiter->a.cur_speed = __constant_cpu_to_be32(2);
		break;
	case 3:
		eiter->a.cur_speed = __constant_cpu_to_be32(8);
		break;
	case 4:
		eiter->a.cur_speed = __constant_cpu_to_be32(4);
		break;
	}
	size += 4 + 4;

	DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, ha->host_no,
	    eiter->a.cur_speed));

	/* Max frame size. */
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
	eiter->len = __constant_cpu_to_be16(4 + 4);
	max_frame_size = IS_QLA24XX(ha) || IS_QLA25XX(ha) ?
		(uint32_t) icb24->frame_payload_size:
		(uint32_t) ha->init_cb->frame_payload_size;
	eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
	size += 4 + 4;

	DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, ha->host_no,
	    eiter->a.max_frame_size));

	/* OS device name. */
	eiter = (struct ct_fdmi_port_attr *) (entries + size);
	eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
	sprintf(eiter->a.os_dev_name, "/proc/scsi/qla2xxx/%ld", ha->host_no);
	alen = strlen(eiter->a.os_dev_name);
	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
	eiter->len = cpu_to_be16(4 + alen);
	size += 4 + alen;

	DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, ha->host_no,
	    eiter->a.os_dev_name));

	/* Update MS request size. */
	qla2x00_update_ms_fdmi_iocb(ha, size + 16);

	DEBUG13(printk("%s(%ld): RPA portname="
	    "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
	    ha->host_no, ct_req->req.rpa.port_name[0],
	    ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
	    ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
	    ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
	    ct_req->req.rpa.port_name[7], size));
	DEBUG13(qla2x00_dump_buffer(entries, size));

	/* Execute MS IOCB */
	rval = qla2x00_issue_iocb(ha, ha->ms_iocb, ha->ms_iocb_dma,
	    sizeof(ms_iocb_entry_t));
	if (rval != QLA_SUCCESS) {
		/*EMPTY*/
		DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
		    ha->host_no, rval));
	} else if (qla2x00_chk_ms_status(ha, ms_pkt, ct_rsp, "RPA") !=
	    QLA_SUCCESS) {
		rval = QLA_FUNCTION_FAILED;
	} else {
		DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
		    ha->host_no));
	}

	return rval;
}