示例#1
0
void fnic_log_q_error(struct fnic *fnic)
{
	unsigned int i;
	u32 error_status;

	for (i = 0; i < fnic->raw_wq_count; i++) {
		error_status = ioread32(&fnic->wq[i].ctrl->error_status);
		if (error_status)
			shost_printk(KERN_ERR, fnic->lport->host,
				     "WQ[%d] error_status"
				     " %d\n", i, error_status);
	}

	for (i = 0; i < fnic->rq_count; i++) {
		error_status = ioread32(&fnic->rq[i].ctrl->error_status);
		if (error_status)
			shost_printk(KERN_ERR, fnic->lport->host,
				     "RQ[%d] error_status"
				     " %d\n", i, error_status);
	}

	for (i = 0; i < fnic->wq_copy_count; i++) {
		error_status = ioread32(&fnic->wq_copy[i].ctrl->error_status);
		if (error_status)
			shost_printk(KERN_ERR, fnic->lport->host,
				     "CWQ[%d] error_status"
				     " %d\n", i, error_status);
	}
}
示例#2
0
static int
beiscsi_set_static_ip(struct Scsi_Host *shost,
		struct iscsi_iface_param_info *iface_param,
		void *data, uint32_t dt_len)
{
	struct beiscsi_hba *phba = iscsi_host_priv(shost);
	struct iscsi_iface_param_info *iface_ip = NULL;
	struct iscsi_iface_param_info *iface_subnet = NULL;
	struct nlattr *nla;
	int ret;


	switch (iface_param->param) {
	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
		if (nla)
			iface_ip = nla_data(nla);

		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
		if (nla)
			iface_subnet = nla_data(nla);
		break;
	case ISCSI_NET_PARAM_IPV4_ADDR:
		iface_ip = iface_param;
		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_SUBNET);
		if (nla)
			iface_subnet = nla_data(nla);
		break;
	case ISCSI_NET_PARAM_IPV4_SUBNET:
		iface_subnet = iface_param;
		nla = nla_find(data, dt_len, ISCSI_NET_PARAM_IPV4_ADDR);
		if (nla)
			iface_ip = nla_data(nla);
		break;
	default:
		shost_printk(KERN_ERR, shost, "Unsupported param %d\n",
			     iface_param->param);
	}

	if (!iface_ip || !iface_subnet) {
		shost_printk(KERN_ERR, shost, "IP and Subnet Mask required\n");
		return -EINVAL;
	}

	ret = mgmt_set_ip(phba, iface_ip, iface_subnet,
			ISCSI_BOOTPROTO_STATIC);

	return ret;
}
示例#3
0
文件: fnic_main.c 项目: 7799/linux
static int fnic_notify_set(struct fnic *fnic)
{
	int err;

	switch (vnic_dev_get_intr_mode(fnic->vdev)) {
	case VNIC_DEV_INTR_MODE_INTX:
		err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY);
		break;
	case VNIC_DEV_INTR_MODE_MSI:
		err = vnic_dev_notify_set(fnic->vdev, -1);
		break;
	case VNIC_DEV_INTR_MODE_MSIX:
		err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY);
		break;
	default:
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Interrupt mode should be set up"
			     " before devcmd notify set %d\n",
			     vnic_dev_get_intr_mode(fnic->vdev));
		err = -1;
		break;
	}

	return err;
}
示例#4
0
static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
				    struct vscsiif_response *ring_rsp)
{
	uint16_t id = ring_rsp->rqid;
	unsigned long flags;
	struct vscsifrnt_shadow *shadow = info->shadow[id];
	int kick;

	spin_lock_irqsave(&info->shadow_lock, flags);
	shadow->wait_reset = 1;
	switch (shadow->rslt_reset) {
	case RSLT_RESET_WAITING:
		shadow->rslt_reset = ring_rsp->rslt;
		break;
	case RSLT_RESET_ERR:
		kick = _scsifront_put_rqid(info, id);
		spin_unlock_irqrestore(&info->shadow_lock, flags);
		kfree(shadow);
		if (kick)
			scsifront_wake_up(info);
		return;
	default:
		shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "bad reset state %d, possibly leaking %u\n",
			     shadow->rslt_reset, id);
		break;
	}
	spin_unlock_irqrestore(&info->shadow_lock, flags);

	wake_up(&shadow->wq_reset);
}
示例#5
0
int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short chute)
{
	struct be_ctrl_info *ctrl = &phba->ctrl;
	struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
	struct iscsi_cleanup_req *req = embedded_payload(wrb);
	int status = 0;

	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
			   OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req));

	req->chute = chute;
	req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba));
	req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba));

	status =  be_mcc_notify_wait(phba);
	if (status)
		shost_printk(KERN_WARNING, phba->shost,
			     " mgmt_epfw_cleanup , FAILED\n");
	spin_unlock(&ctrl->mbox_lock);
	return status;
}
示例#6
0
static int
beiscsi_set_ipv6(struct Scsi_Host *shost,
		struct iscsi_iface_param_info *iface_param,
		void *data, uint32_t dt_len)
{
	struct beiscsi_hba *phba = iscsi_host_priv(shost);
	int ret = 0;

	switch (iface_param->param) {
	case ISCSI_NET_PARAM_IFACE_ENABLE:
		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
			ret = beiscsi_create_ipv6_iface(phba);
		else {
			iscsi_destroy_iface(phba->ipv6_iface);
			ret = 0;
		}
		break;
	case ISCSI_NET_PARAM_IPV6_ADDR:
		ret = mgmt_set_ip(phba, iface_param, NULL,
				  ISCSI_BOOTPROTO_STATIC);
		break;
	default:
		shost_printk(KERN_ERR, shost, "Param %d not supported\n",
			     iface_param->param);
	}

	return ret;
}
示例#7
0
int be2iscsi_iface_set_param(struct Scsi_Host *shost,
		void *data, uint32_t dt_len)
{
	struct iscsi_iface_param_info *iface_param = NULL;
	struct nlattr *attrib;
	uint32_t rm_len = dt_len;
	int ret = 0 ;

	nla_for_each_attr(attrib, data, dt_len, rm_len) {
		iface_param = nla_data(attrib);

		if (iface_param->param_type != ISCSI_NET_PARAM)
			continue;

		/*
		 * BE2ISCSI only supports 1 interface
		 */
		if (iface_param->iface_num) {
			shost_printk(KERN_ERR, shost, "Invalid iface_num %d."
				     "Only iface_num 0 is supported.\n",
				     iface_param->iface_num);
			return -EINVAL;
		}

		switch (iface_param->iface_type) {
		case ISCSI_IFACE_TYPE_IPV4:
			ret = beiscsi_set_ipv4(shost, iface_param,
					       data, dt_len);
			break;
		case ISCSI_IFACE_TYPE_IPV6:
			ret = beiscsi_set_ipv6(shost, iface_param,
					       data, dt_len);
			break;
		default:
			shost_printk(KERN_ERR, shost,
				     "Invalid iface type :%d passed\n",
				     iface_param->iface_type);
			break;
		}

		if (ret)
			return ret;
	}
示例#8
0
static int
beiscsi_set_ipv4(struct Scsi_Host *shost,
		struct iscsi_iface_param_info *iface_param,
		void *data, uint32_t dt_len)
{
	struct beiscsi_hba *phba = iscsi_host_priv(shost);
	int ret = 0;

	/* Check the param */
	switch (iface_param->param) {
	case ISCSI_NET_PARAM_IPV4_GW:
		ret = mgmt_set_gateway(phba, iface_param);
		break;
	case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
		if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP)
			ret = mgmt_set_ip(phba, iface_param,
					NULL, ISCSI_BOOTPROTO_DHCP);
		else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC)
			ret = beiscsi_set_static_ip(shost, iface_param,
						    data, dt_len);
		else
			shost_printk(KERN_ERR, shost, "Invalid BOOTPROTO: %d\n",
					iface_param->value[0]);
		break;
	case ISCSI_NET_PARAM_IFACE_ENABLE:
		if (iface_param->value[0] == ISCSI_IFACE_ENABLE)
			ret = beiscsi_create_ipv4_iface(phba);
		else
			iscsi_destroy_iface(phba->ipv4_iface);
		break;
	case ISCSI_NET_PARAM_IPV4_SUBNET:
	case ISCSI_NET_PARAM_IPV4_ADDR:
		ret = beiscsi_set_static_ip(shost, iface_param,
					    data, dt_len);
		break;
	default:
		shost_printk(KERN_ERR, shost, "Param %d not supported\n",
			     iface_param->param);
	}

	return ret;
}
示例#9
0
/**
 * beiscsi_session_create - creates a new iscsi session
 * @cmds_max: max commands supported
 * @qdepth: max queue depth supported
 * @initial_cmdsn: initial iscsi CMDSN
 */
struct iscsi_cls_session *beiscsi_session_create(struct iscsi_endpoint *ep,
						 u16 cmds_max,
						 u16 qdepth,
						 u32 initial_cmdsn)
{
	struct Scsi_Host *shost;
	struct beiscsi_endpoint *beiscsi_ep;
	struct iscsi_cls_session *cls_session;
	struct beiscsi_hba *phba;
	struct iscsi_session *sess;
	struct beiscsi_session *beiscsi_sess;
	struct beiscsi_io_task *io_task;

	SE_DEBUG(DBG_LVL_8, "In beiscsi_session_create\n");

	if (!ep) {
		SE_DEBUG(DBG_LVL_1, "beiscsi_session_create: invalid ep\n");
		return NULL;
	}
	beiscsi_ep = ep->dd_data;
	phba = beiscsi_ep->phba;
	shost = phba->shost;
	if (cmds_max > beiscsi_ep->phba->params.wrbs_per_cxn) {
		shost_printk(KERN_ERR, shost, "Cannot handle %d cmds."
			     "Max cmds per session supported is %d. Using %d. "
			     "\n", cmds_max,
			      beiscsi_ep->phba->params.wrbs_per_cxn,
			      beiscsi_ep->phba->params.wrbs_per_cxn);
		cmds_max = beiscsi_ep->phba->params.wrbs_per_cxn;
	}

	cls_session = iscsi_session_setup(&beiscsi_iscsi_transport,
					  shost, cmds_max,
					  sizeof(*beiscsi_sess),
					  sizeof(*io_task),
					  initial_cmdsn, ISCSI_MAX_TARGET);
	if (!cls_session)
		return NULL;
	sess = cls_session->dd_data;
	beiscsi_sess = sess->dd_data;
	beiscsi_sess->bhs_pool =  pci_pool_create("beiscsi_bhs_pool",
						   phba->pcidev,
						   sizeof(struct be_cmd_bhs),
						   64, 0);
	if (!beiscsi_sess->bhs_pool)
		goto destroy_sess;

	return cls_session;
destroy_sess:
	iscsi_session_teardown(cls_session);
	return NULL;
}
示例#10
0
static int beiscsi_create_ipv6_iface(struct beiscsi_hba *phba)
{
	if (phba->ipv6_iface)
		return 0;

	phba->ipv6_iface = iscsi_create_iface(phba->shost,
					      &beiscsi_iscsi_transport,
					      ISCSI_IFACE_TYPE_IPV6,
					      0, 0);
	if (!phba->ipv6_iface) {
		shost_printk(KERN_ERR, phba->shost, "Could not "
			     "create default IPv6 address.\n");
		return -ENODEV;
	}

	return 0;
}
示例#11
0
int mgmt_get_fw_config(struct be_ctrl_info *ctrl,
				struct beiscsi_hba *phba)
{
	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
	struct be_fw_cfg *req = embedded_payload(wrb);
	int status = 0;

	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
	status = be_mbox_notify(ctrl);
	if (!status) {
		struct be_fw_cfg *pfw_cfg;
		pfw_cfg = req;
		phba->fw_config.phys_port = pfw_cfg->phys_port;
		phba->fw_config.iscsi_icd_start =
					pfw_cfg->ulp[0].icd_base;
		phba->fw_config.iscsi_icd_count =
					pfw_cfg->ulp[0].icd_count;
		phba->fw_config.iscsi_cid_start =
					pfw_cfg->ulp[0].sq_base;
		phba->fw_config.iscsi_cid_count =
					pfw_cfg->ulp[0].sq_count;
		if (phba->fw_config.iscsi_cid_count > (BE2_MAX_SESSIONS / 2)) {
			SE_DEBUG(DBG_LVL_8,
				"FW reported MAX CXNS as %d\t"
				"Max Supported = %d.\n",
				phba->fw_config.iscsi_cid_count,
				BE2_MAX_SESSIONS);
			phba->fw_config.iscsi_cid_count = BE2_MAX_SESSIONS / 2;
		}
	} else {
		shost_printk(KERN_WARNING, phba->shost,
			     "Failed in mgmt_get_fw_config\n");
	}

	spin_unlock(&ctrl->mbox_lock);
	return status;
}
示例#12
0
/**
 * srp_reconnect_work() - reconnect and schedule a new attempt if necessary
 * @work: Work structure used for scheduling this operation.
 */
static void srp_reconnect_work(struct work_struct *work)
{
	struct srp_rport *rport = container_of(to_delayed_work(work),
					struct srp_rport, reconnect_work);
	struct Scsi_Host *shost = rport_to_shost(rport);
	int delay, res;

	res = srp_reconnect_rport(rport);
	if (res != 0) {
		shost_printk(KERN_ERR, shost,
			     "reconnect attempt %d failed (%d)\n",
			     ++rport->failed_reconnects, res);
		delay = rport->reconnect_delay *
			min(100, max(1, rport->failed_reconnects - 10));
		if (delay > 0)
			queue_delayed_work(system_long_wq,
					   &rport->reconnect_work, delay * HZ);
	}
}
示例#13
0
static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
{
	struct vscsifrnt_shadow *s = info->shadow[id];
	int i;

	if (s->sc->sc_data_direction == DMA_NONE)
		return;

	for (i = 0; i < s->nr_grants; i++) {
		if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
			shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
				     "grant still in use by backend\n");
			BUG();
		}
		gnttab_end_foreign_access(s->gref[i], 0, 0UL);
	}

	kfree(s->sg);
}
示例#14
0
void BNX2FC_HBA_DBG(const struct fc_lport *lport, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (likely(!(bnx2fc_debug_level & LOG_HBA)))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (lport && lport->host)
		shost_printk(KERN_INFO, lport->host, PFX "%pV", &vaf);
	else
		pr_info("NULL %pV", &vaf);

	va_end(args);
}
示例#15
0
int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do)
{
	unsigned int tot_rq_work_done = 0, cur_work_done;
	unsigned int i;
	int err;

	for (i = 0; i < fnic->rq_count; i++) {
		cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do,
						fnic_rq_cmpl_handler_cont,
						NULL);
		if (cur_work_done) {
			err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
			if (err)
				shost_printk(KERN_ERR, fnic->lport->host,
					     "fnic_alloc_rq_frame cant alloc"
					     " frame\n");
		}
		tot_rq_work_done += cur_work_done;
	}

	return tot_rq_work_done;
}
示例#16
0
void BNX2FC_TGT_DBG(const struct bnx2fc_rport *tgt, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (likely(!(bnx2fc_debug_level & LOG_TGT)))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (tgt && tgt->port && tgt->port->lport && tgt->port->lport->host &&
	    tgt->rport)
		shost_printk(KERN_INFO, tgt->port->lport->host,
			     PFX "port:%x %pV",
			     tgt->rport->port_id, &vaf);
	else
		pr_info("NULL %pV", &vaf);

	va_end(args);
}
示例#17
0
void BNX2FC_IO_DBG(const struct bnx2fc_cmd *io_req, const char *fmt, ...)
{
	struct va_format vaf;
	va_list args;

	if (likely(!(bnx2fc_debug_level & LOG_IO)))
		return;

	va_start(args, fmt);

	vaf.fmt = fmt;
	vaf.va = &args;

	if (io_req && io_req->port && io_req->port->lport &&
	    io_req->port->lport->host)
		shost_printk(KERN_INFO, io_req->port->lport->host,
			     PFX "xid:0x%x %pV",
			     io_req->xid, &vaf);
	else
		pr_info("NULL %pV", &vaf);

	va_end(args);
}
示例#18
0
unsigned char mgmt_get_fw_config(struct be_ctrl_info *ctrl,
				struct beiscsi_hba *phba)
{
	struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
	struct be_fw_cfg *req = embedded_payload(wrb);
	int status = 0;

	spin_lock(&ctrl->mbox_lock);
	memset(wrb, 0, sizeof(*wrb));

	be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);

	be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
			   OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));

	status = be_mbox_notify(ctrl);
	if (!status) {
		struct be_fw_cfg *pfw_cfg;
		pfw_cfg = req;
		phba->fw_config.phys_port = pfw_cfg->phys_port;
		phba->fw_config.iscsi_icd_start =
					pfw_cfg->ulp[0].icd_base;
		phba->fw_config.iscsi_icd_count =
					pfw_cfg->ulp[0].icd_count;
		phba->fw_config.iscsi_cid_start =
					pfw_cfg->ulp[0].sq_base;
		phba->fw_config.iscsi_cid_count =
					pfw_cfg->ulp[0].sq_count;
	} else {
		shost_printk(KERN_WARNING, phba->shost,
			     "Failed in mgmt_get_fw_config \n");
	}

	spin_unlock(&ctrl->mbox_lock);
	return status;
}
示例#19
0
static int generic_NCR5380_init_one(struct scsi_host_template *tpnt,
			struct device *pdev, int base, int irq, int board)
{
	bool is_pmio = base <= 0xffff;
	int ret;
	int flags = 0;
	unsigned int *ports = NULL;
	u8 *magic = NULL;
	int i;
	int port_idx = -1;
	unsigned long region_size;
	struct Scsi_Host *instance;
	struct NCR5380_hostdata *hostdata;
	u8 __iomem *iomem;

	switch (board) {
	case BOARD_NCR5380:
		flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP;
		break;
	case BOARD_NCR53C400A:
		ports = ncr_53c400a_ports;
		magic = ncr_53c400a_magic;
		break;
	case BOARD_HP_C2502:
		ports = ncr_53c400a_ports;
		magic = hp_c2502_magic;
		break;
	case BOARD_DTC3181E:
		ports = dtc_3181e_ports;
		magic = ncr_53c400a_magic;
		break;
	}

	if (is_pmio && ports && magic) {
		/* wakeup sequence for the NCR53C400A and DTC3181E */

		/* Disable the adapter and look for a free io port */
		magic_configure(-1, 0, magic);

		region_size = 16;
		if (base)
			for (i = 0; ports[i]; i++) {
				if (base == ports[i]) {	/* index found */
					if (!request_region(ports[i],
							    region_size,
							    "ncr53c80"))
						return -EBUSY;
					break;
				}
			}
		else
			for (i = 0; ports[i]; i++) {
				if (!request_region(ports[i], region_size,
						    "ncr53c80"))
					continue;
				if (inb(ports[i]) == 0xff)
					break;
				release_region(ports[i], region_size);
			}
		if (ports[i]) {
			/* At this point we have our region reserved */
			magic_configure(i, 0, magic); /* no IRQ yet */
			base = ports[i];
			outb(0xc0, base + 9);
			if (inb(base + 9) != 0x80) {
				ret = -ENODEV;
				goto out_release;
			}
			port_idx = i;
		} else
			return -EINVAL;
	} else if (is_pmio) {
		/* NCR5380 - no configuration, just grab */
		region_size = 8;
		if (!base || !request_region(base, region_size, "ncr5380"))
			return -EBUSY;
	} else {	/* MMIO */
		region_size = NCR53C400_region_size;
		if (!request_mem_region(base, region_size, "ncr5380"))
			return -EBUSY;
	}

	if (is_pmio)
		iomem = ioport_map(base, region_size);
	else
		iomem = ioremap(base, region_size);

	if (!iomem) {
		ret = -ENOMEM;
		goto out_release;
	}

	instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata));
	if (instance == NULL) {
		ret = -ENOMEM;
		goto out_unmap;
	}
	hostdata = shost_priv(instance);

	hostdata->io = iomem;
	hostdata->region_size = region_size;

	if (is_pmio) {
		hostdata->io_port = base;
		hostdata->io_width = 1; /* 8-bit PDMA by default */
		hostdata->offset = 0;

		/*
		 * On NCR53C400 boards, NCR5380 registers are mapped 8 past
		 * the base address.
		 */
		switch (board) {
		case BOARD_NCR53C400:
			hostdata->io_port += 8;
			hostdata->c400_ctl_status = 0;
			hostdata->c400_blk_cnt = 1;
			hostdata->c400_host_buf = 4;
			break;
		case BOARD_DTC3181E:
			hostdata->io_width = 2;	/* 16-bit PDMA */
			/* fall through */
		case BOARD_NCR53C400A:
		case BOARD_HP_C2502:
			hostdata->c400_ctl_status = 9;
			hostdata->c400_blk_cnt = 10;
			hostdata->c400_host_buf = 8;
			break;
		}
	} else {
		hostdata->base = base;
		hostdata->offset = NCR53C400_mem_base;
		switch (board) {
		case BOARD_NCR53C400:
			hostdata->c400_ctl_status = 0x100;
			hostdata->c400_blk_cnt = 0x101;
			hostdata->c400_host_buf = 0x104;
			break;
		case BOARD_DTC3181E:
		case BOARD_NCR53C400A:
		case BOARD_HP_C2502:
			pr_err(DRV_MODULE_NAME ": unknown register offsets\n");
			ret = -EINVAL;
			goto out_unregister;
		}
	}

	/* Check for vacant slot */
	NCR5380_write(MODE_REG, 0);
	if (NCR5380_read(MODE_REG) != 0) {
		ret = -ENODEV;
		goto out_unregister;
	}

	ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP);
	if (ret)
		goto out_unregister;

	switch (board) {
	case BOARD_NCR53C400:
	case BOARD_DTC3181E:
	case BOARD_NCR53C400A:
	case BOARD_HP_C2502:
		NCR5380_write(hostdata->c400_ctl_status, CSR_BASE);
	}

	NCR5380_maybe_reset_bus(instance);

	/* Compatibility with documented NCR5380 kernel parameters */
	if (irq == 255 || irq == 0)
		irq = NO_IRQ;
	else if (irq == -1)
		irq = IRQ_AUTO;

	if (board == BOARD_HP_C2502) {
		int *irq_table = hp_c2502_irqs;
		int board_irq = -1;

		switch (irq) {
		case NO_IRQ:
			board_irq = 0;
			break;
		case IRQ_AUTO:
			board_irq = legacy_find_free_irq(irq_table);
			break;
		default:
			while (*irq_table != -1)
				if (*irq_table++ == irq)
					board_irq = irq;
		}

		if (board_irq <= 0) {
			board_irq = 0;
			irq = NO_IRQ;
		}

		magic_configure(port_idx, board_irq, magic);
	}

	if (irq == IRQ_AUTO) {
		instance->irq = g_NCR5380_probe_irq(instance);
		if (instance->irq == NO_IRQ)
			shost_printk(KERN_INFO, instance, "no irq detected\n");
	} else {
		instance->irq = irq;
		if (instance->irq == NO_IRQ)
			shost_printk(KERN_INFO, instance, "no irq provided\n");
	}

	if (instance->irq != NO_IRQ) {
		if (request_irq(instance->irq, generic_NCR5380_intr,
				0, "NCR5380", instance)) {
			instance->irq = NO_IRQ;
			shost_printk(KERN_INFO, instance,
			             "irq %d denied\n", instance->irq);
		} else {
			shost_printk(KERN_INFO, instance,
			             "irq %d acquired\n", instance->irq);
		}
	}

	ret = scsi_add_host(instance, pdev);
	if (ret)
		goto out_free_irq;
	scsi_scan_host(instance);
	dev_set_drvdata(pdev, instance);
	return 0;

out_free_irq:
	if (instance->irq != NO_IRQ)
		free_irq(instance->irq, instance);
	NCR5380_exit(instance);
out_unregister:
	scsi_host_put(instance);
out_unmap:
	iounmap(iomem);
out_release:
	if (is_pmio)
		release_region(base, region_size);
	else
		release_mem_region(base, region_size);
	return ret;
}
示例#20
0
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
    struct aha1542_hostdata *aha1542 = shost_priv(sh);
    u8 direction;
    u8 target = cmd->device->id;
    u8 lun = cmd->device->lun;
    unsigned long flags;
    int bufflen = scsi_bufflen(cmd);
    int mbo, sg_count;
    struct mailbox *mb = aha1542->mb;
    struct ccb *ccb = aha1542->ccb;
    struct chain *cptr;

    if (*cmd->cmnd == REQUEST_SENSE) {
        /* Don't do the command - we have the sense data already */
        cmd->result = 0;
        cmd->scsi_done(cmd);
        return 0;
    }
#ifdef DEBUG
    {
        int i = -1;
        if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
            i = xscsi2int(cmd->cmnd + 2);
        else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
            i = scsi2int(cmd->cmnd + 2);
        shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
                     target, *cmd->cmnd, i, bufflen);
        print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
    }
#endif
    if (bufflen) {	/* allocate memory before taking host_lock */
        sg_count = scsi_sg_count(cmd);
        cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
        if (!cptr)
            return SCSI_MLQUEUE_HOST_BUSY;
    } else {
        sg_count = 0;
        cptr = NULL;
    }

    /* Use the outgoing mailboxes in a round-robin fashion, because this
       is how the host adapter will scan for them */

    spin_lock_irqsave(sh->host_lock, flags);
    mbo = aha1542->aha1542_last_mbo_used + 1;
    if (mbo >= AHA1542_MAILBOXES)
        mbo = 0;

    do {
        if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
            break;
        mbo++;
        if (mbo >= AHA1542_MAILBOXES)
            mbo = 0;
    } while (mbo != aha1542->aha1542_last_mbo_used);

    if (mb[mbo].status || aha1542->int_cmds[mbo])
        panic("Unable to find empty mailbox for aha1542.\n");

    aha1542->int_cmds[mbo] = cmd;	/* This will effectively prevent someone else from
					   screwing with this cdb. */

    aha1542->aha1542_last_mbo_used = mbo;

#ifdef DEBUG
    shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif

    any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo]));	/* This gets trashed for some reason */

    memset(&ccb[mbo], 0, sizeof(struct ccb));

    ccb[mbo].cdblen = cmd->cmd_len;

    direction = 0;
    if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
        direction = 8;
    else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
        direction = 16;

    memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);

    if (bufflen) {
        struct scatterlist *sg;
        int i;

        ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */
        cmd->host_scribble = (void *)cptr;
        scsi_for_each_sg(cmd, sg, sg_count, i) {
            any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
                     + sg->offset);
            any2scsi(cptr[i].datalen, sg->length);
        };
        any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
        any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr));
#ifdef DEBUG
        shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr);
        print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18);
#endif
    } else {
示例#21
0
static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
{
    struct Scsi_Host *sh = dev_id;
    struct aha1542_hostdata *aha1542 = shost_priv(sh);
    void (*my_done)(struct scsi_cmnd *) = NULL;
    int errstatus, mbi, mbo, mbistatus;
    int number_serviced;
    unsigned long flags;
    struct scsi_cmnd *tmp_cmd;
    int flag;
    struct mailbox *mb = aha1542->mb;
    struct ccb *ccb = aha1542->ccb;

#ifdef DEBUG
    {
        flag = inb(INTRFLAGS(sh->io_port));
        shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: ");
        if (!(flag & ANYINTR))
            printk("no interrupt?");
        if (flag & MBIF)
            printk("MBIF ");
        if (flag & MBOA)
            printk("MBOF ");
        if (flag & HACC)
            printk("HACC ");
        if (flag & SCRD)
            printk("SCRD ");
        printk("status %02x\n", inb(STATUS(sh->io_port)));
    };
#endif
    number_serviced = 0;

    spin_lock_irqsave(sh->host_lock, flags);
    while (1) {
        flag = inb(INTRFLAGS(sh->io_port));

        /* Check for unusual interrupts.  If any of these happen, we should
           probably do something special, but for now just printing a message
           is sufficient.  A SCSI reset detected is something that we really
           need to deal with in some way. */
        if (flag & ~MBIF) {
            if (flag & MBOA)
                printk("MBOF ");
            if (flag & HACC)
                printk("HACC ");
            if (flag & SCRD)
                printk("SCRD ");
        }
        aha1542_intr_reset(sh->io_port);

        mbi = aha1542->aha1542_last_mbi_used + 1;
        if (mbi >= 2 * AHA1542_MAILBOXES)
            mbi = AHA1542_MAILBOXES;

        do {
            if (mb[mbi].status != 0)
                break;
            mbi++;
            if (mbi >= 2 * AHA1542_MAILBOXES)
                mbi = AHA1542_MAILBOXES;
        } while (mbi != aha1542->aha1542_last_mbi_used);

        if (mb[mbi].status == 0) {
            spin_unlock_irqrestore(sh->host_lock, flags);
            /* Hmm, no mail.  Must have read it the last time around */
            if (!number_serviced)
                shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n");
            return IRQ_HANDLED;
        };

        mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb);
        mbistatus = mb[mbi].status;
        mb[mbi].status = 0;
        aha1542->aha1542_last_mbi_used = mbi;

#ifdef DEBUG
        if (ccb[mbo].tarstat | ccb[mbo].hastat)
            shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n",
                         ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status);
#endif

        if (mbistatus == 3)
            continue;	/* Aborted command not found */

#ifdef DEBUG
        shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi);
#endif

        tmp_cmd = aha1542->int_cmds[mbo];

        if (!tmp_cmd || !tmp_cmd->scsi_done) {
            spin_unlock_irqrestore(sh->host_lock, flags);
            shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n");
            shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat,
                         ccb[mbo].hastat, ccb[mbo].idlun, mbo);
            return IRQ_HANDLED;
        }
        my_done = tmp_cmd->scsi_done;
        kfree(tmp_cmd->host_scribble);
        tmp_cmd->host_scribble = NULL;
        /* Fetch the sense data, and tuck it away, in the required slot.  The
           Adaptec automatically fetches it, and there is no guarantee that
           we will still have it in the cdb when we come back */
        if (ccb[mbo].tarstat == 2)
            memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
                   SCSI_SENSE_BUFFERSIZE);


        /* is there mail :-) */

        /* more error checking left out here */
        if (mbistatus != 1)
            /* This is surely wrong, but I don't know what's right */
            errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat);
        else
            errstatus = 0;

#ifdef DEBUG
        if (errstatus)
            shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus,
                         ccb[mbo].hastat, ccb[mbo].tarstat);
        if (ccb[mbo].tarstat == 2)
            print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12);
        if (errstatus)
            printk("aha1542_intr_handle: returning %6x\n", errstatus);
#endif
        tmp_cmd->result = errstatus;
        aha1542->int_cmds[mbo] = NULL;	/* This effectively frees up the mailbox slot, as
						   far as queuecommand is concerned */
        my_done(tmp_cmd);
        number_serviced++;
    };
}
示例#22
0
int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
			 struct request *rsp)
{
	u8 *req_data = NULL, *resp_data = NULL, *buf;
	struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
	int error = -EINVAL;

	/* eight is the minimum size for request and response frames */
	if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8)
		goto out;

	if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE ||
	    bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) {
		shost_printk(KERN_ERR, shost,
			"SMP request/response frame crosses page boundary");
		goto out;
	}

	req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL);

	/* make sure frame can always be built ... we copy
	 * back only the requested length */
	resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL);

	if (!req_data || !resp_data) {
		error = -ENOMEM;
		goto out;
	}

	local_irq_disable();
	buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
	memcpy(req_data, buf, blk_rq_bytes(req));
	kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
	local_irq_enable();

	if (req_data[0] != SMP_REQUEST)
		goto out;

	/* always succeeds ... even if we can't process the request
	 * the result is in the response frame */
	error = 0;

	/* set up default don't know response */
	resp_data[0] = SMP_RESPONSE;
	resp_data[1] = req_data[1];
	resp_data[2] = SMP_RESP_FUNC_UNK;

	switch (req_data[1]) {
	case SMP_REPORT_GENERAL:
		req->resid_len -= 8;
		rsp->resid_len -= 32;
		resp_data[2] = SMP_RESP_FUNC_ACC;
		resp_data[9] = sas_ha->num_phys;
		break;

	case SMP_REPORT_MANUF_INFO:
		req->resid_len -= 8;
		rsp->resid_len -= 64;
		resp_data[2] = SMP_RESP_FUNC_ACC;
		memcpy(resp_data + 12, shost->hostt->name,
		       SAS_EXPANDER_VENDOR_ID_LEN);
		memcpy(resp_data + 20, "libsas virt phy",
		       SAS_EXPANDER_PRODUCT_ID_LEN);
		break;

	case SMP_READ_GPIO_REG:
		/* FIXME: need GPIO support in the transport class */
		break;

	case SMP_DISCOVER:
		req->resid_len -= 16;
		if ((int)req->resid_len < 0) {
			req->resid_len = 0;
			error = -EINVAL;
			goto out;
		}
		rsp->resid_len -= 56;
		sas_host_smp_discover(sas_ha, resp_data, req_data[9]);
		break;

	case SMP_REPORT_PHY_ERR_LOG:
		/* FIXME: could implement this with additional
		 * libsas callbacks providing the HW supports it */
		break;

	case SMP_REPORT_PHY_SATA:
		req->resid_len -= 16;
		if ((int)req->resid_len < 0) {
			req->resid_len = 0;
			error = -EINVAL;
			goto out;
		}
		rsp->resid_len -= 60;
		sas_report_phy_sata(sas_ha, resp_data, req_data[9]);
		break;

	case SMP_REPORT_ROUTE_INFO:
		/* Can't implement; hosts have no routes */
		break;

	case SMP_WRITE_GPIO_REG:
		/* FIXME: need GPIO support in the transport class */
		break;

	case SMP_CONF_ROUTE_INFO:
		/* Can't implement; hosts have no routes */
		break;

	case SMP_PHY_CONTROL:
		req->resid_len -= 44;
		if ((int)req->resid_len < 0) {
			req->resid_len = 0;
			error = -EINVAL;
			goto out;
		}
		rsp->resid_len -= 8;
		sas_phy_control(sas_ha, req_data[9], req_data[10],
				req_data[32] >> 4, req_data[33] >> 4,
				resp_data);
		break;

	case SMP_PHY_TEST_FUNCTION:
		/* FIXME: should this be implemented? */
		break;

	default:
		/* probably a 2.0 function */
		break;
	}

	local_irq_disable();
	buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
	memcpy(buf, resp_data, blk_rq_bytes(rsp));
	flush_kernel_dcache_page(bio_page(rsp->bio));
	kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
	local_irq_enable();

 out:
	kfree(req_data);
	kfree(resp_data);
	return error;
}
int fnic_alloc_vnic_resources(struct fnic *fnic)
{
	enum vnic_dev_intr_mode intr_mode;
	unsigned int mask_on_assertion;
	unsigned int interrupt_offset;
	unsigned int error_interrupt_enable;
	unsigned int error_interrupt_offset;
	unsigned int i, cq_index;
	unsigned int wq_copy_cq_desc_count;
	int err;

	intr_mode = vnic_dev_get_intr_mode(fnic->vdev);

	shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
		     intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
		     intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
		     intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
		     "MSI-X" : "unknown");

	shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
		     "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
		     fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
		     fnic->rq_count, fnic->cq_count, fnic->intr_count);

	
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
			fnic->config.wq_enet_desc_count,
			sizeof(struct wq_enet_desc));
		if (err)
			goto err_out_cleanup;
	}

	
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
			(fnic->raw_wq_count + i),
			fnic->config.wq_copy_desc_count,
			sizeof(struct fcpio_host_req));
		if (err)
			goto err_out_cleanup;
	}

	
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
			fnic->config.rq_desc_count,
			sizeof(struct rq_enet_desc));
		if (err)
			goto err_out_cleanup;
	}

	
	for (i = 0; i < fnic->rq_count; i++) {
		cq_index = i;
		err = vnic_cq_alloc(fnic->vdev,
			&fnic->cq[cq_index], cq_index,
			fnic->config.rq_desc_count,
			sizeof(struct cq_enet_rq_desc));
		if (err)
			goto err_out_cleanup;
	}

	
	for (i = 0; i < fnic->raw_wq_count; i++) {
		cq_index = fnic->rq_count + i;
		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
			fnic->config.wq_enet_desc_count,
			sizeof(struct cq_enet_wq_desc));
		if (err)
			goto err_out_cleanup;
	}

	
	wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
	for (i = 0; i < fnic->wq_copy_count; i++) {
		cq_index = fnic->raw_wq_count + fnic->rq_count + i;
		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
			cq_index,
			wq_copy_cq_desc_count,
			sizeof(struct fcpio_fw_req));
		if (err)
			goto err_out_cleanup;
	}

	for (i = 0; i < fnic->intr_count; i++) {
		err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
		if (err)
			goto err_out_cleanup;
	}

	fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
				RES_TYPE_INTR_PBA_LEGACY, 0);

	if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to hook legacy pba resource\n");
		err = -ENODEV;
		goto err_out_cleanup;
	}


	switch (intr_mode) {
	case VNIC_DEV_INTR_MODE_INTX:
	case VNIC_DEV_INTR_MODE_MSIX:
		error_interrupt_enable = 1;
		error_interrupt_offset = fnic->err_intr_offset;
		break;
	default:
		error_interrupt_enable = 0;
		error_interrupt_offset = 0;
		break;
	}

	for (i = 0; i < fnic->rq_count; i++) {
		cq_index = i;
		vnic_rq_init(&fnic->rq[i],
			     cq_index,
			     error_interrupt_enable,
			     error_interrupt_offset);
	}

	for (i = 0; i < fnic->raw_wq_count; i++) {
		cq_index = i + fnic->rq_count;
		vnic_wq_init(&fnic->wq[i],
			     cq_index,
			     error_interrupt_enable,
			     error_interrupt_offset);
	}

	for (i = 0; i < fnic->wq_copy_count; i++) {
		vnic_wq_copy_init(&fnic->wq_copy[i],
				  0 ,
				  error_interrupt_enable,
				  error_interrupt_offset);
	}

	for (i = 0; i < fnic->cq_count; i++) {

		switch (intr_mode) {
		case VNIC_DEV_INTR_MODE_MSIX:
			interrupt_offset = i;
			break;
		default:
			interrupt_offset = 0;
			break;
		}

		vnic_cq_init(&fnic->cq[i],
			0 ,
			1 ,
			0 ,
			0 ,
			1 ,
			1 ,
			1 ,
			0 ,
			interrupt_offset,
			0 );
	}


	switch (intr_mode) {
	case VNIC_DEV_INTR_MODE_MSI:
	case VNIC_DEV_INTR_MODE_MSIX:
		mask_on_assertion = 1;
		break;
	default:
		mask_on_assertion = 0;
		break;
	}

	for (i = 0; i < fnic->intr_count; i++) {
		vnic_intr_init(&fnic->intr[i],
			fnic->config.intr_timer,
			fnic->config.intr_timer_type,
			mask_on_assertion);
	}

	
	err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vnic_dev_stats_dump failed - x%x\n", err);
		goto err_out_cleanup;
	}

	
	vnic_dev_stats_clear(fnic->vdev);

	return 0;

err_out_cleanup:
	fnic_free_vnic_resources(fnic);

	return err;
}
示例#24
0
static int __devinit fnic_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	host = scsi_host_alloc(&fnic_host_template,
			       sizeof(struct fc_lport) + sizeof(struct fnic));
	if (!host) {
		printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
		err = -ENOMEM;
		goto err_out;
	}
	lp = shost_priv(host);
	lp->host = host;
	fnic = lport_priv(lp);
	fnic->lport = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to alloc shared tag map\n");
		goto err_out_free_hba;
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 40-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 40-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}
	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_clear_intr;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_free_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	fnic->flogi_oxid = FC_XID_UNKNOWN;
	fnic->flogi = NULL;
	fnic->flogi_resp = NULL;
	fnic->state = FNIC_IN_FC_MODE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;
	lp->tt = fnic_transport_template;

	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
				    FCPIO_HOST_EXCH_RANGE_START,
				    FCPIO_HOST_EXCH_RANGE_END);
	if (!lp->emp) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fc_exch_init(lp);
	fc_lport_init(lp);
	fc_elsct_init(lp);
	fc_rport_init(lp);
	fc_disc_init(lp);

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp->emp);
err_out_remove_scsi_host:
	fc_remove_host(fnic->lport->host);
	scsi_remove_host(fnic->lport->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_free_intr:
	fnic_free_intr(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	scsi_host_put(lp->host);
err_out:
	return err;
}
示例#25
0
文件: aha1542.c 项目: avagin/linux
static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
{
	struct aha1542_cmd *acmd = scsi_cmd_priv(cmd);
	struct aha1542_hostdata *aha1542 = shost_priv(sh);
	u8 direction;
	u8 target = cmd->device->id;
	u8 lun = cmd->device->lun;
	unsigned long flags;
	int bufflen = scsi_bufflen(cmd);
	int mbo, sg_count;
	struct mailbox *mb = aha1542->mb;
	struct ccb *ccb = aha1542->ccb;

	if (*cmd->cmnd == REQUEST_SENSE) {
		/* Don't do the command - we have the sense data already */
		cmd->result = 0;
		cmd->scsi_done(cmd);
		return 0;
	}
#ifdef DEBUG
	{
		int i = -1;
		if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10)
			i = xscsi2int(cmd->cmnd + 2);
		else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6)
			i = scsi2int(cmd->cmnd + 2);
		shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d",
						target, *cmd->cmnd, i, bufflen);
		print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
	}
#endif
	sg_count = scsi_dma_map(cmd);
	if (sg_count) {
		size_t len = sg_count * sizeof(struct chain);

		acmd->chain = kmalloc(len, GFP_DMA);
		if (!acmd->chain)
			goto out_unmap;
		acmd->chain_handle = dma_map_single(sh->dma_dev, acmd->chain,
				len, DMA_TO_DEVICE);
		if (dma_mapping_error(sh->dma_dev, acmd->chain_handle))
			goto out_free_chain;
	}

	/* Use the outgoing mailboxes in a round-robin fashion, because this
	   is how the host adapter will scan for them */

	spin_lock_irqsave(sh->host_lock, flags);
	mbo = aha1542->aha1542_last_mbo_used + 1;
	if (mbo >= AHA1542_MAILBOXES)
		mbo = 0;

	do {
		if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL)
			break;
		mbo++;
		if (mbo >= AHA1542_MAILBOXES)
			mbo = 0;
	} while (mbo != aha1542->aha1542_last_mbo_used);

	if (mb[mbo].status || aha1542->int_cmds[mbo])
		panic("Unable to find empty mailbox for aha1542.\n");

	aha1542->int_cmds[mbo] = cmd;	/* This will effectively prevent someone else from
					   screwing with this cdb. */

	aha1542->aha1542_last_mbo_used = mbo;

#ifdef DEBUG
	shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done);
#endif

	/* This gets trashed for some reason */
	any2scsi(mb[mbo].ccbptr, aha1542->ccb_handle + mbo * sizeof(*ccb));

	memset(&ccb[mbo], 0, sizeof(struct ccb));

	ccb[mbo].cdblen = cmd->cmd_len;

	direction = 0;
	if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6)
		direction = 8;
	else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6)
		direction = 16;

	memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen);

	if (bufflen) {
		struct scatterlist *sg;
		int i;

		ccb[mbo].op = 2;	/* SCSI Initiator Command  w/scatter-gather */
		scsi_for_each_sg(cmd, sg, sg_count, i) {
			any2scsi(acmd->chain[i].dataptr, sg_dma_address(sg));
			any2scsi(acmd->chain[i].datalen, sg_dma_len(sg));
		};
		any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain));
		any2scsi(ccb[mbo].dataptr, acmd->chain_handle);
#ifdef DEBUG
		shost_printk(KERN_DEBUG, sh, "cptr %p: ", acmd->chain);
		print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, acmd->chain, 18);
#endif
	} else {
示例#26
0
static int map_data_for_request(struct vscsifrnt_info *info,
				struct scsi_cmnd *sc,
				struct vscsiif_request *ring_req,
				struct vscsifrnt_shadow *shadow)
{
	grant_ref_t gref_head;
	struct page *page;
	int err, ref, ref_cnt = 0;
	int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
	unsigned int i, off, len, bytes;
	unsigned int data_len = scsi_bufflen(sc);
	unsigned int data_grants = 0, seg_grants = 0;
	struct scatterlist *sg;
	unsigned long mfn;
	struct scsiif_request_segment *seg;

	ring_req->nr_segments = 0;
	if (sc->sc_data_direction == DMA_NONE || !data_len)
		return 0;

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
		data_grants += PFN_UP(sg->offset + sg->length);

	if (data_grants > VSCSIIF_SG_TABLESIZE) {
		if (data_grants > info->host->sg_tablesize) {
			shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "Unable to map request_buffer for command!\n");
			return -E2BIG;
		}
		seg_grants = vscsiif_grants_sg(data_grants);
		shadow->sg = kcalloc(data_grants,
			sizeof(struct scsiif_request_segment), GFP_ATOMIC);
		if (!shadow->sg)
			return -ENOMEM;
	}
	seg = shadow->sg ? : ring_req->seg;

	err = gnttab_alloc_grant_references(seg_grants + data_grants,
					    &gref_head);
	if (err) {
		kfree(shadow->sg);
		shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
			     "gnttab_alloc_grant_references() error\n");
		return -ENOMEM;
	}

	if (seg_grants) {
		page = virt_to_page(seg);
		off = (unsigned long)seg & ~PAGE_MASK;
		len = sizeof(struct scsiif_request_segment) * data_grants;
		while (len > 0) {
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, 1);
			shadow->gref[ref_cnt] = ref;
			ring_req->seg[ref_cnt].gref   = ref;
			ring_req->seg[ref_cnt].offset = (uint16_t)off;
			ring_req->seg[ref_cnt].length = (uint16_t)bytes;

			page++;
			len -= bytes;
			off = 0;
			ref_cnt++;
		}
		BUG_ON(seg_grants < ref_cnt);
		seg_grants = ref_cnt;
	}

	scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
		page = sg_page(sg);
		off = sg->offset;
		len = sg->length;

		while (len > 0 && data_len > 0) {
			/*
			 * sg sends a scatterlist that is larger than
			 * the data_len it wants transferred for certain
			 * IO sizes.
			 */
			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
			bytes = min(bytes, data_len);

			ref = gnttab_claim_grant_reference(&gref_head);
			BUG_ON(ref == -ENOSPC);

			mfn = pfn_to_mfn(page_to_pfn(page));
			gnttab_grant_foreign_access_ref(ref,
				info->dev->otherend_id, mfn, grant_ro);

			shadow->gref[ref_cnt] = ref;
			seg->gref   = ref;
			seg->offset = (uint16_t)off;
			seg->length = (uint16_t)bytes;

			page++;
			seg++;
			len -= bytes;
			data_len -= bytes;
			off = 0;
			ref_cnt++;
		}
	}
示例#27
0
文件: fnic_res.c 项目: 020gzh/linux
int fnic_alloc_vnic_resources(struct fnic *fnic)
{
	enum vnic_dev_intr_mode intr_mode;
	unsigned int mask_on_assertion;
	unsigned int interrupt_offset;
	unsigned int error_interrupt_enable;
	unsigned int error_interrupt_offset;
	unsigned int i, cq_index;
	unsigned int wq_copy_cq_desc_count;
	int err;

	intr_mode = vnic_dev_get_intr_mode(fnic->vdev);

	shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n",
		     intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" :
		     intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" :
		     intr_mode == VNIC_DEV_INTR_MODE_MSIX ?
		     "MSI-X" : "unknown");

	shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: "
		     "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n",
		     fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count,
		     fnic->rq_count, fnic->cq_count, fnic->intr_count);

	/* Allocate Raw WQ used for FCS frames */
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i,
			fnic->config.wq_enet_desc_count,
			sizeof(struct wq_enet_desc));
		if (err)
			goto err_out_cleanup;
	}

	/* Allocate Copy WQs used for SCSI IOs */
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i],
			(fnic->raw_wq_count + i),
			fnic->config.wq_copy_desc_count,
			sizeof(struct fcpio_host_req));
		if (err)
			goto err_out_cleanup;
	}

	/* RQ for receiving FCS frames */
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i,
			fnic->config.rq_desc_count,
			sizeof(struct rq_enet_desc));
		if (err)
			goto err_out_cleanup;
	}

	/* CQ for each RQ */
	for (i = 0; i < fnic->rq_count; i++) {
		cq_index = i;
		err = vnic_cq_alloc(fnic->vdev,
			&fnic->cq[cq_index], cq_index,
			fnic->config.rq_desc_count,
			sizeof(struct cq_enet_rq_desc));
		if (err)
			goto err_out_cleanup;
	}

	/* CQ for each WQ */
	for (i = 0; i < fnic->raw_wq_count; i++) {
		cq_index = fnic->rq_count + i;
		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index,
			fnic->config.wq_enet_desc_count,
			sizeof(struct cq_enet_wq_desc));
		if (err)
			goto err_out_cleanup;
	}

	/* CQ for each COPY WQ */
	wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3);
	for (i = 0; i < fnic->wq_copy_count; i++) {
		cq_index = fnic->raw_wq_count + fnic->rq_count + i;
		err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index],
			cq_index,
			wq_copy_cq_desc_count,
			sizeof(struct fcpio_fw_req));
		if (err)
			goto err_out_cleanup;
	}

	for (i = 0; i < fnic->intr_count; i++) {
		err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i);
		if (err)
			goto err_out_cleanup;
	}

	fnic->legacy_pba = vnic_dev_get_res(fnic->vdev,
				RES_TYPE_INTR_PBA_LEGACY, 0);

	if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to hook legacy pba resource\n");
		err = -ENODEV;
		goto err_out_cleanup;
	}

	/*
	 * Init RQ/WQ resources.
	 *
	 * RQ[0 to n-1] point to CQ[0 to n-1]
	 * WQ[0 to m-1] point to CQ[n to n+m-1]
	 * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1]
	 *
	 * Note for copy wq we always initialize with cq_index = 0
	 *
	 * Error interrupt is not enabled for MSI.
	 */

	switch (intr_mode) {
	case VNIC_DEV_INTR_MODE_INTX:
	case VNIC_DEV_INTR_MODE_MSIX:
		error_interrupt_enable = 1;
		error_interrupt_offset = fnic->err_intr_offset;
		break;
	default:
		error_interrupt_enable = 0;
		error_interrupt_offset = 0;
		break;
	}

	for (i = 0; i < fnic->rq_count; i++) {
		cq_index = i;
		vnic_rq_init(&fnic->rq[i],
			     cq_index,
			     error_interrupt_enable,
			     error_interrupt_offset);
	}

	for (i = 0; i < fnic->raw_wq_count; i++) {
		cq_index = i + fnic->rq_count;
		vnic_wq_init(&fnic->wq[i],
			     cq_index,
			     error_interrupt_enable,
			     error_interrupt_offset);
	}

	for (i = 0; i < fnic->wq_copy_count; i++) {
		vnic_wq_copy_init(&fnic->wq_copy[i],
				  0 /* cq_index 0 - always */,
				  error_interrupt_enable,
				  error_interrupt_offset);
	}

	for (i = 0; i < fnic->cq_count; i++) {

		switch (intr_mode) {
		case VNIC_DEV_INTR_MODE_MSIX:
			interrupt_offset = i;
			break;
		default:
			interrupt_offset = 0;
			break;
		}

		vnic_cq_init(&fnic->cq[i],
			0 /* flow_control_enable */,
			1 /* color_enable */,
			0 /* cq_head */,
			0 /* cq_tail */,
			1 /* cq_tail_color */,
			1 /* interrupt_enable */,
			1 /* cq_entry_enable */,
			0 /* cq_message_enable */,
			interrupt_offset,
			0 /* cq_message_addr */);
	}

	/*
	 * Init INTR resources
	 *
	 * mask_on_assertion is not used for INTx due to the level-
	 * triggered nature of INTx
	 */

	switch (intr_mode) {
	case VNIC_DEV_INTR_MODE_MSI:
	case VNIC_DEV_INTR_MODE_MSIX:
		mask_on_assertion = 1;
		break;
	default:
		mask_on_assertion = 0;
		break;
	}

	for (i = 0; i < fnic->intr_count; i++) {
		vnic_intr_init(&fnic->intr[i],
			fnic->config.intr_timer,
			fnic->config.intr_timer_type,
			mask_on_assertion);
	}

	/* init the stats memory by making the first call here */
	err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vnic_dev_stats_dump failed - x%x\n", err);
		goto err_out_cleanup;
	}

	/* Clear LIF stats */
	vnic_dev_stats_clear(fnic->vdev);

	return 0;

err_out_cleanup:
	fnic_free_vnic_resources(fnic);

	return err;
}
示例#28
0
文件: fnic_res.c 项目: 020gzh/linux
int fnic_get_vnic_config(struct fnic *fnic)
{
	struct vnic_fc_config *c = &fnic->config;
	int err;

#define GET_CONFIG(m) \
	do { \
		err = vnic_dev_spec(fnic->vdev, \
				    offsetof(struct vnic_fc_config, m), \
				    sizeof(c->m), &c->m); \
		if (err) { \
			shost_printk(KERN_ERR, fnic->lport->host, \
				     "Error getting %s, %d\n", #m, \
				     err); \
			return err; \
		} \
	} while (0);

	GET_CONFIG(node_wwn);
	GET_CONFIG(port_wwn);
	GET_CONFIG(wq_enet_desc_count);
	GET_CONFIG(wq_copy_desc_count);
	GET_CONFIG(rq_desc_count);
	GET_CONFIG(maxdatafieldsize);
	GET_CONFIG(ed_tov);
	GET_CONFIG(ra_tov);
	GET_CONFIG(intr_timer);
	GET_CONFIG(intr_timer_type);
	GET_CONFIG(flags);
	GET_CONFIG(flogi_retries);
	GET_CONFIG(flogi_timeout);
	GET_CONFIG(plogi_retries);
	GET_CONFIG(plogi_timeout);
	GET_CONFIG(io_throttle_count);
	GET_CONFIG(link_down_timeout);
	GET_CONFIG(port_down_timeout);
	GET_CONFIG(port_down_io_retries);
	GET_CONFIG(luns_per_tgt);

	c->wq_enet_desc_count =
		min_t(u32, VNIC_FNIC_WQ_DESCS_MAX,
		      max_t(u32, VNIC_FNIC_WQ_DESCS_MIN,
			    c->wq_enet_desc_count));
	c->wq_enet_desc_count = ALIGN(c->wq_enet_desc_count, 16);

	c->wq_copy_desc_count =
		min_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MAX,
		      max_t(u32, VNIC_FNIC_WQ_COPY_DESCS_MIN,
			    c->wq_copy_desc_count));
	c->wq_copy_desc_count = ALIGN(c->wq_copy_desc_count, 16);

	c->rq_desc_count =
		min_t(u32, VNIC_FNIC_RQ_DESCS_MAX,
		      max_t(u32, VNIC_FNIC_RQ_DESCS_MIN,
			    c->rq_desc_count));
	c->rq_desc_count = ALIGN(c->rq_desc_count, 16);

	c->maxdatafieldsize =
		min_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MAX,
		      max_t(u16, VNIC_FNIC_MAXDATAFIELDSIZE_MIN,
			    c->maxdatafieldsize));
	c->ed_tov =
		min_t(u32, VNIC_FNIC_EDTOV_MAX,
		      max_t(u32, VNIC_FNIC_EDTOV_MIN,
			    c->ed_tov));

	c->ra_tov =
		min_t(u32, VNIC_FNIC_RATOV_MAX,
		      max_t(u32, VNIC_FNIC_RATOV_MIN,
			    c->ra_tov));

	c->flogi_retries =
		min_t(u32, VNIC_FNIC_FLOGI_RETRIES_MAX, c->flogi_retries);

	c->flogi_timeout =
		min_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MAX,
		      max_t(u32, VNIC_FNIC_FLOGI_TIMEOUT_MIN,
			    c->flogi_timeout));

	c->plogi_retries =
		min_t(u32, VNIC_FNIC_PLOGI_RETRIES_MAX, c->plogi_retries);

	c->plogi_timeout =
		min_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MAX,
		      max_t(u32, VNIC_FNIC_PLOGI_TIMEOUT_MIN,
			    c->plogi_timeout));

	c->io_throttle_count =
		min_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MAX,
		      max_t(u32, VNIC_FNIC_IO_THROTTLE_COUNT_MIN,
			    c->io_throttle_count));

	c->link_down_timeout =
		min_t(u32, VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX,
		      c->link_down_timeout);

	c->port_down_timeout =
		min_t(u32, VNIC_FNIC_PORT_DOWN_TIMEOUT_MAX,
		      c->port_down_timeout);

	c->port_down_io_retries =
		min_t(u32, VNIC_FNIC_PORT_DOWN_IO_RETRIES_MAX,
		      c->port_down_io_retries);

	c->luns_per_tgt =
		min_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MAX,
		      max_t(u32, VNIC_FNIC_LUNS_PER_TARGET_MIN,
			    c->luns_per_tgt));

	c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
	c->intr_timer_type = c->intr_timer_type;

	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC MAC addr %pM "
		     "wq/wq_copy/rq %d/%d/%d\n",
		     fnic->ctlr.ctl_src_addr,
		     c->wq_enet_desc_count, c->wq_copy_desc_count,
		     c->rq_desc_count);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC node wwn %llx port wwn %llx\n",
		     c->node_wwn, c->port_wwn);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC ed_tov %d ra_tov %d\n",
		     c->ed_tov, c->ra_tov);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC mtu %d intr timer %d\n",
		     c->maxdatafieldsize, c->intr_timer);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC flags 0x%x luns per tgt %d\n",
		     c->flags, c->luns_per_tgt);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC flogi_retries %d flogi timeout %d\n",
		     c->flogi_retries, c->flogi_timeout);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC plogi retries %d plogi timeout %d\n",
		     c->plogi_retries, c->plogi_timeout);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC io throttle count %d link dn timeout %d\n",
		     c->io_throttle_count, c->link_down_timeout);
	shost_printk(KERN_INFO, fnic->lport->host,
		     "vNIC port dn io retries %d port dn timeout %d\n",
		     c->port_down_io_retries, c->port_down_timeout);

	return 0;
}
示例#29
0
int fnic_request_intr(struct fnic *fnic)
{
	int err = 0;
	int i;

	switch (vnic_dev_get_intr_mode(fnic->vdev)) {

	case VNIC_DEV_INTR_MODE_INTX:
		err = request_irq(fnic->pdev->irq, &fnic_isr_legacy,
				  IRQF_SHARED, DRV_NAME, fnic);
		break;

	case VNIC_DEV_INTR_MODE_MSI:
		err = request_irq(fnic->pdev->irq, &fnic_isr_msi,
				  0, fnic->name, fnic);
		break;

	case VNIC_DEV_INTR_MODE_MSIX:

		sprintf(fnic->msix[FNIC_MSIX_RQ].devname,
			"%.11s-fcs-rq", fnic->name);
		fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq;
		fnic->msix[FNIC_MSIX_RQ].devid = fnic;

		sprintf(fnic->msix[FNIC_MSIX_WQ].devname,
			"%.11s-fcs-wq", fnic->name);
		fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq;
		fnic->msix[FNIC_MSIX_WQ].devid = fnic;

		sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname,
			"%.11s-scsi-wq", fnic->name);
		fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy;
		fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic;

		sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname,
			"%.11s-err-notify", fnic->name);
		fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr =
			fnic_isr_msix_err_notify;
		fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic;

		for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) {
			err = request_irq(fnic->msix_entry[i].vector,
					  fnic->msix[i].isr, 0,
					  fnic->msix[i].devname,
					  fnic->msix[i].devid);
			if (err) {
				shost_printk(KERN_ERR, fnic->lport->host,
					     "MSIX: request_irq"
					     " failed %d\n", err);
				fnic_free_intr(fnic);
				break;
			}
			fnic->msix[i].requested = 1;
		}
		break;

	default:
		break;
	}

	return err;
}
示例#30
0
文件: fnic_main.c 项目: 7799/linux
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = fnic_stats_debugfs_init(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
				"Failed to initialize debugfs for stats\n");
		fnic_stats_debugfs_remove(fnic);
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 64-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 64-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	/* Configure Maximum Outstanding IO reqs*/
	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
					max_t(u32, FNIC_MIN_IO_REQ,
					fnic->config.io_throttle_count));
	}
	fnic->fnic_max_tag_id = host->can_queue;

	err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			  "Unable to alloc shared tag map\n");
		goto err_out_dev_close;
	}

	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fnic->set_vlan = fnic_set_vlan;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
		setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
							(unsigned long)fnic);
		spin_lock_init(&fnic->vlans_lock);
		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
		INIT_WORK(&fnic->event_work, fnic_handle_event);
		skb_queue_head_init(&fnic->fip_frame_queue);
		INIT_LIST_HEAD(&fnic->evlist);
		INIT_LIST_HEAD(&fnic->vlans);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
	}
	fnic->state = FNIC_IN_FC_MODE;

	atomic_set(&fnic->in_flight, 0);
	fnic->state_flags = FNIC_FLAGS_NONE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);
	fnic->stats_reset_time = jiffies;

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	fnic_stats_debugfs_remove(fnic);
	scsi_host_put(lp->host);
err_out:
	return err;
}