Beispiel #1
0
void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
	struct fc_frame *fp = buf->os_buf;
	struct fnic *fnic = vnic_dev_priv(wq->vdev);

	pci_unmap_single(fnic->pdev, buf->dma_addr,
			 buf->len, PCI_DMA_TODEVICE);

	dev_kfree_skb(fp_skb(fp));
	buf->os_buf = NULL;
}
Beispiel #2
0
static void fnic_wq_complete_frame_send(struct vnic_wq *wq,
					struct cq_desc *cq_desc,
					struct vnic_wq_buf *buf, void *opaque)
{
	struct sk_buff *skb = buf->os_buf;
	struct fc_frame *fp = (struct fc_frame *)skb;
	struct fnic *fnic = vnic_dev_priv(wq->vdev);

	pci_unmap_single(fnic->pdev, buf->dma_addr,
			 buf->len, PCI_DMA_TODEVICE);
	dev_kfree_skb_irq(fp_skb(fp));
	buf->os_buf = NULL;
}
struct fc_frame *fc_frame_alloc_fill(struct fc_lport *lp, size_t payload_len)
{
	struct fc_frame *fp;
	size_t fill;

	fill = payload_len % 4;
	if (fill != 0)
		fill = 4 - fill;
	fp = _fc_frame_alloc(payload_len + fill);
	if (fp) {
		memset((char *) fr_hdr(fp) + payload_len, 0, fill);
		/* trim is OK, we just allocated it so there are no fragments */
		skb_trim(fp_skb(fp),
			 payload_len + sizeof(struct fc_frame_header));
	}
	return fp;
}
Beispiel #4
0
/*
 * Deliver read data back to initiator.
 * XXX TBD handle resource problems later.
 */
int ft_queue_data_in(struct se_cmd *se_cmd)
{
	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	struct scatterlist *sg = NULL;
	size_t remaining;
	u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
	u32 mem_off = 0;
	u32 fh_off = 0;
	u32 frame_off = 0;
	size_t frame_len = 0;
	size_t mem_len = 0;
	size_t tlen;
	size_t off_in_page;
	struct page *page = NULL;
	int use_sg;
	int error;
	void *page_addr;
	void *from;
	void *to = NULL;

	if (cmd->aborted)
		return 0;

	if (se_cmd->scsi_status == SAM_STAT_TASK_SET_FULL)
		goto queue_status;

	ep = fc_seq_exch(cmd->seq);
	lport = ep->lp;
	cmd->seq = lport->tt.seq_start_next(cmd->seq);

	remaining = se_cmd->data_length;

	/*
	 * Setup to use first mem list entry, unless no data.
	 */
	BUG_ON(remaining && !se_cmd->t_data_sg);
	if (remaining) {
		sg = se_cmd->t_data_sg;
		mem_len = sg->length;
		mem_off = sg->offset;
		page = sg_page(sg);
	}

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4);

	while (remaining) {
		struct fc_seq *seq = cmd->seq;

		if (!seq) {
			pr_debug("%s: Command aborted, xid 0x%x\n",
				 __func__, ep->xid);
			break;
		}
		if (!mem_len) {
			sg = sg_next(sg);
			mem_len = min((size_t)sg->length, remaining);
			mem_off = sg->offset;
			page = sg_page(sg);
		}
		if (!frame_len) {
			/*
			 * If lport's has capability of Large Send Offload LSO)
			 * , then allow 'frame_len' to be as big as 'lso_max'
			 * if indicated transfer length is >= lport->lso_max
			 */
			frame_len = (lport->seq_offload) ? lport->lso_max :
							  cmd->sess->max_frame;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp)
				return -ENOMEM;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
			frame_off += frame_len;
			/*
			 * Setup the frame's max payload which is used by base
			 * driver to indicate HW about max frame size, so that
			 * HW can do fragmentation appropriately based on
			 * "gso_max_size" of underline netdev.
			 */
			fr_max_payload(fp) = cmd->sess->max_frame;
		}
		tlen = min(mem_len, frame_len);

		if (use_sg) {
			off_in_page = mem_off;
			BUG_ON(!page);
			get_page(page);
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, off_in_page, tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
		} else {
			BUG_ON(!page);
			from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
			page_addr = from;
			from += mem_off & ~PAGE_MASK;
			tlen = min(tlen, (size_t)(PAGE_SIZE -
						(mem_off & ~PAGE_MASK)));
			memcpy(to, from, tlen);
			kunmap_atomic(page_addr);
			to += tlen;
		}

		mem_off += tlen;
		mem_len -= tlen;
		frame_len -= tlen;
		remaining -= tlen;

		if (frame_len &&
		    (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
			continue;
		if (!remaining)
			f_ctl |= FC_FC_END_SEQ;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_off);
		error = lport->tt.seq_send(lport, seq, fp);
		if (error) {
			pr_info_ratelimited("%s: Failed to send frame %p, "
						"xid <0x%x>, remaining %zu, "
						"lso_max <0x%x>\n",
						__func__, fp, ep->xid,
						remaining, lport->lso_max);
			/*
			 * Go ahead and set TASK_SET_FULL status ignoring the
			 * rest of the DataIN, and immediately attempt to
			 * send the response via ft_queue_status() in order
			 * to notify the initiator that it should reduce it's
			 * per LUN queue_depth.
			 */
			se_cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
			break;
		}
	}
queue_status:
	return ft_queue_status(se_cmd);
}
Beispiel #5
0
/*
 * Send read data back to initiator.
 */
int ft_send_read_data(struct scst_cmd *cmd)
{
	struct ft_cmd *fcmd;
	struct fc_frame *fp = NULL;
	struct fc_exch *ep;
	struct fc_lport *lport;
	size_t remaining;
	u32 fh_off = 0;
	u32 frame_off;
	size_t frame_len = 0;
	size_t mem_len;
	u32 mem_off;
	size_t tlen;
	struct page *page;
	int use_sg;
	int error;
	void *to = NULL;
	u8 *from = NULL;
	int loop_limit = 10000;

	fcmd = scst_cmd_get_tgt_priv(cmd);
	ep = fc_seq_exch(fcmd->seq);
	lport = ep->lp;

	frame_off = fcmd->read_data_len;
	tlen = scst_cmd_get_resp_data_len(cmd);
	FT_IO_DBG("oid %x oxid %x resp_len %zd frame_off %u\n",
		  ep->oid, ep->oxid, tlen, frame_off);
	if (tlen <= frame_off)
		return SCST_TGT_RES_SUCCESS;
	remaining = tlen - frame_off;
	if (remaining > UINT_MAX)
		FT_ERR("oid %x oxid %x resp_len %zd frame_off %u\n",
		       ep->oid, ep->oxid, tlen, frame_off);

	mem_len = scst_get_buf_first(cmd, &from);
	mem_off = 0;
	if (!mem_len) {
		FT_IO_DBG("mem_len 0\n");
		return SCST_TGT_RES_SUCCESS;
	}
	FT_IO_DBG("sid %x oxid %x mem_len %zd frame_off %u remaining %zd\n",
		 ep->sid, ep->oxid, mem_len, frame_off, remaining);

	/*
	 * If we've already transferred some of the data, skip through
	 * the buffer over the data already sent and continue with the
	 * same sequence.  Otherwise, get a new sequence for the data.
	 */
	if (frame_off) {
		tlen = frame_off;
		while (mem_len <= tlen) {
			tlen -= mem_len;
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			if (!mem_len)
				return SCST_TGT_RES_SUCCESS;
		}
		mem_len -= tlen;
		mem_off = tlen;
	} else
		fcmd->seq = lport->tt.seq_start_next(fcmd->seq);

	/* no scatter/gather in skb for odd word length due to fc_seq_send() */
	use_sg = !(remaining % 4) && lport->sg_supp;

	while (remaining) {
		if (!loop_limit) {
			FT_ERR("hit loop limit.  remaining %zx mem_len %zx "
			       "frame_len %zx tlen %zx\n",
			       remaining, mem_len, frame_len, tlen);
			break;
		}
		loop_limit--;
		if (!mem_len) {
			scst_put_buf(cmd, from);
			mem_len = scst_get_buf_next(cmd, &from);
			mem_off = 0;
			if (!mem_len) {
				FT_ERR("mem_len 0 from get_buf_next\n");
				break;
			}
		}
		if (!frame_len) {
			frame_len = fcmd->max_lso_payload;
			frame_len = min(frame_len, remaining);
			fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
			if (!fp) {
				FT_IO_DBG("frame_alloc failed. "
					  "use_sg %d frame_len %zd\n",
					  use_sg, frame_len);
				break;
			}
			fr_max_payload(fp) = fcmd->max_payload;
			to = fc_frame_payload_get(fp, 0);
			fh_off = frame_off;
		}
		tlen = min(mem_len, frame_len);
		BUG_ON(!tlen);
		BUG_ON(tlen > remaining);
		BUG_ON(tlen > mem_len);
		BUG_ON(tlen > frame_len);

		if (use_sg) {
			page = virt_to_page(from + mem_off);
			get_page(page);
			tlen = min_t(size_t, tlen,
				     PAGE_SIZE - (mem_off & ~PAGE_MASK));
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
					   page, offset_in_page(from + mem_off),
					   tlen);
			fr_len(fp) += tlen;
			fp_skb(fp)->data_len += tlen;
			fp_skb(fp)->truesize +=
					PAGE_SIZE << compound_order(page);
			frame_len -= tlen;
			if (skb_shinfo(fp_skb(fp))->nr_frags >= FC_FRAME_SG_LEN)
				frame_len = 0;
		} else {
			memcpy(to, from + mem_off, tlen);
			to += tlen;
			frame_len -= tlen;
		}

		mem_len -= tlen;
		mem_off += tlen;
		remaining -= tlen;
		frame_off += tlen;

		if (frame_len)
			continue;
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP,
			       remaining ? (FC_FC_EX_CTX | FC_FC_REL_OFF) :
			       (FC_FC_EX_CTX | FC_FC_REL_OFF | FC_FC_END_SEQ),
			       fh_off);
		error = lport->tt.seq_send(lport, fcmd->seq, fp);
		if (error) {
			WARN_ON(1);
			/* XXX For now, initiator will retry */
		} else
			fcmd->read_data_len = frame_off;
	}
	if (mem_len)
		scst_put_buf(cmd, from);
	if (remaining) {
		FT_IO_DBG("remaining read data %zd\n", remaining);
		return SCST_TGT_RES_QUEUE_FULL;
	}
	return SCST_TGT_RES_SUCCESS;
}
Beispiel #6
0
static int fnic_cleanup(struct fnic *fnic)
{
	unsigned int i;
	int err;
	unsigned long flags;
	struct fc_frame *flogi = NULL;
	struct fc_frame *flogi_resp = NULL;

	vnic_dev_disable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_mask(&fnic->intr[i]);

	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_disable(&fnic->rq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_disable(&fnic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
		if (err)
			return err;
	}

	/* Clean up completed IOs and FCS frames */
	fnic_wq_copy_cmpl_handler(fnic, -1);
	fnic_wq_cmpl_handler(fnic, -1);
	fnic_rq_cmpl_handler(fnic, -1);

	/* Clean up the IOs and FCS frames that have not completed */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_clean(&fnic->wq_copy[i],
				   fnic_wq_copy_cleanup_handler);

	for (i = 0; i < fnic->cq_count; i++)
		vnic_cq_clean(&fnic->cq[i]);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_clean(&fnic->intr[i]);

	/*
	 * Remove cached flogi and flogi resp frames if any
	 * These frames are not in any queue, and therefore queue
	 * cleanup does not clean them. So clean them explicitly
	 */
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	flogi = fnic->flogi;
	fnic->flogi = NULL;
	flogi_resp = fnic->flogi_resp;
	fnic->flogi_resp = NULL;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (flogi)
		dev_kfree_skb(fp_skb(flogi));

	if (flogi_resp)
		dev_kfree_skb(fp_skb(flogi_resp));

	mempool_destroy(fnic->io_req_pool);
	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
		mempool_destroy(fnic->io_sgl_pool[i]);

	return 0;
}
Beispiel #7
0
/*
 * fnic_send
 * Routine to send a raw frame
 */
int fnic_send(struct fc_lport *lp, struct fc_frame *fp)
{
	struct fnic *fnic = lport_priv(lp);
	struct fc_frame_header *fh;
	int ret = 0;
	enum fnic_state old_state;
	unsigned long flags;
	struct fc_frame *old_flogi = NULL;
	struct fc_frame *old_flogi_resp = NULL;

	if (fnic->in_remove) {
		dev_kfree_skb(fp_skb(fp));
		ret = -1;
		goto fnic_send_end;
	}

	fh = fc_frame_header_get(fp);
	/* if not an Flogi frame, send it out, this is the common case */
	if (!is_flogi_frame(fh))
		return fnic_send_frame(fnic, fp);

	/* Flogi frame, now enter the state machine */

	spin_lock_irqsave(&fnic->fnic_lock, flags);
again:
	/* Get any old cached frames, free them after dropping lock */
	old_flogi = fnic->flogi;
	fnic->flogi = NULL;
	old_flogi_resp = fnic->flogi_resp;
	fnic->flogi_resp = NULL;

	fnic->flogi_oxid = FC_XID_UNKNOWN;

	old_state = fnic->state;
	switch (old_state) {
	case FNIC_IN_FC_MODE:
	case FNIC_IN_ETH_TRANS_FC_MODE:
	default:
		fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
		vnic_dev_del_addr(fnic->vdev, fnic->data_src_addr);
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);

		if (old_flogi) {
			dev_kfree_skb(fp_skb(old_flogi));
			old_flogi = NULL;
		}
		if (old_flogi_resp) {
			dev_kfree_skb(fp_skb(old_flogi_resp));
			old_flogi_resp = NULL;
		}

		ret = fnic_fw_reset_handler(fnic);

		spin_lock_irqsave(&fnic->fnic_lock, flags);
		if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE)
			goto again;
		if (ret) {
			fnic->state = old_state;
			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
			dev_kfree_skb(fp_skb(fp));
			goto fnic_send_end;
		}
		old_flogi = fnic->flogi;
		fnic->flogi = fp;
		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
		old_flogi_resp = fnic->flogi_resp;
		fnic->flogi_resp = NULL;
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		break;

	case FNIC_IN_FC_TRANS_ETH_MODE:
		/*
		 * A reset is pending with the firmware. Store the flogi
		 * and its oxid. The transition out of this state happens
		 * only when Firmware completes the reset, either with
		 * success or failed. If success, transition to
		 * FNIC_IN_ETH_MODE, if fail, then transition to
		 * FNIC_IN_FC_MODE
		 */
		fnic->flogi = fp;
		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		break;

	case FNIC_IN_ETH_MODE:
		/*
		 * The fw/hw is already in eth mode. Store the oxid,
		 * and send the flogi frame out. The transition out of this
		 * state happens only we receive flogi response from the
		 * network, and the oxid matches the cached oxid when the
		 * flogi frame was sent out. If they match, then we issue
		 * a flogi_reg request and transition to state
		 * FNIC_IN_ETH_TRANS_FC_MODE
		 */
		fnic->flogi_oxid = ntohs(fh->fh_ox_id);
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		ret = fnic_send_frame(fnic, fp);
		break;
	}

fnic_send_end:
	if (old_flogi)
		dev_kfree_skb(fp_skb(old_flogi));
	if (old_flogi_resp)
		dev_kfree_skb(fp_skb(old_flogi_resp));
	return ret;
}
Beispiel #8
0
int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
{
	struct vnic_wq *wq = &fnic->wq[0];
	struct sk_buff *skb;
	dma_addr_t pa;
	struct ethhdr *eth_hdr;
	struct vlan_ethhdr *vlan_hdr;
	struct fcoe_hdr *fcoe_hdr;
	struct fc_frame_header *fh;
	u32 tot_len, eth_hdr_len;
	int ret = 0;
	unsigned long flags;

	fh = fc_frame_header_get(fp);
	skb = fp_skb(fp);

	if (!fnic->vlan_hw_insert) {
		eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr);
		vlan_hdr = (struct vlan_ethhdr *)skb_push(skb, eth_hdr_len);
		eth_hdr = (struct ethhdr *)vlan_hdr;
		vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
		vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE);
		vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
		fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1);
	} else {
		eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr);
		eth_hdr = (struct ethhdr *)skb_push(skb, eth_hdr_len);
		eth_hdr->h_proto = htons(ETH_P_FCOE);
		fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1);
	}

	if (is_flogi_frame(fh)) {
		fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
		memcpy(eth_hdr->h_source, fnic->mac_addr, ETH_ALEN);
	} else {
		if (fnic->fcoui_mode)
			fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id);
		else
			memcpy(eth_hdr->h_dest, fnic->dest_addr, ETH_ALEN);
		memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN);
	}

	tot_len = skb->len;
	BUG_ON(tot_len % 4);

	memset(fcoe_hdr, 0, sizeof(*fcoe_hdr));
	fcoe_hdr->fcoe_sof = fr_sof(fp);
	if (FC_FCOE_VER)
		FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER);

	pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);

	spin_lock_irqsave(&fnic->wq_lock[0], flags);

	if (!vnic_wq_desc_avail(wq)) {
		pci_unmap_single(fnic->pdev, pa,
				 tot_len, PCI_DMA_TODEVICE);
		ret = -1;
		goto fnic_send_frame_end;
	}

	fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
			   fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
	spin_unlock_irqrestore(&fnic->wq_lock[0], flags);

	if (ret)
		dev_kfree_skb_any(fp_skb(fp));

	return ret;
}
Beispiel #9
0
static inline int fnic_handle_flogi_resp(struct fnic *fnic,
					 struct fc_frame *fp)
{
	u8 mac[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
	struct ethhdr *eth_hdr;
	struct fc_frame_header *fh;
	int ret = 0;
	unsigned long flags;
	struct fc_frame *old_flogi_resp = NULL;

	fh = (struct fc_frame_header *)fr_hdr(fp);

	spin_lock_irqsave(&fnic->fnic_lock, flags);

	if (fnic->state == FNIC_IN_ETH_MODE) {

		/*
		 * Check if oxid matches on taking the lock. A new Flogi
		 * issued by libFC might have changed the fnic cached oxid
		 */
		if (fnic->flogi_oxid != ntohs(fh->fh_ox_id)) {
			FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
				     "Flogi response oxid not"
				     " matching cached oxid, dropping frame"
				     "\n");
			ret = -1;
			spin_unlock_irqrestore(&fnic->fnic_lock, flags);
			dev_kfree_skb_irq(fp_skb(fp));
			goto handle_flogi_resp_end;
		}

		/* Drop older cached flogi response frame, cache this frame */
		old_flogi_resp = fnic->flogi_resp;
		fnic->flogi_resp = fp;
		fnic->flogi_oxid = FC_XID_UNKNOWN;

		/*
		 * this frame is part of flogi get the src mac addr from this
		 * frame if the src mac is fcoui based then we mark the
		 * address mode flag to use fcoui base for dst mac addr
		 * otherwise we have to store the fcoe gateway addr
		 */
		eth_hdr = (struct ethhdr *)skb_mac_header(fp_skb(fp));
		memcpy(mac, eth_hdr->h_source, ETH_ALEN);

		if (ntoh24(mac) == FC_FCOE_OUI)
			fnic->fcoui_mode = 1;
		else {
			fnic->fcoui_mode = 0;
			memcpy(fnic->dest_addr, mac, ETH_ALEN);
		}

		/*
		 * Except for Flogi frame, all outbound frames from us have the
		 * Eth Src address as FC_FCOE_OUI"our_sid". Flogi frame uses
		 * the vnic MAC address as the Eth Src address
		 */
		fc_fcoe_set_mac(fnic->data_src_addr, fh->fh_d_id);

		/* We get our s_id from the d_id of the flogi resp frame */
		fnic->s_id = ntoh24(fh->fh_d_id);

		/* Change state to reflect transition from Eth to FC mode */
		fnic->state = FNIC_IN_ETH_TRANS_FC_MODE;

	} else {
		FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
			     "Unexpected fnic state %s while"
			     " processing flogi resp\n",
			     fnic_state_to_str(fnic->state));
		ret = -1;
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		dev_kfree_skb_irq(fp_skb(fp));
		goto handle_flogi_resp_end;
	}

	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	/* Drop older cached frame */
	if (old_flogi_resp)
		dev_kfree_skb_irq(fp_skb(old_flogi_resp));

	/*
	 * send flogi reg request to firmware, this will put the fnic in
	 * in FC mode
	 */
	ret = fnic_flogi_reg_handler(fnic);

	if (ret < 0) {
		int free_fp = 1;
		spin_lock_irqsave(&fnic->fnic_lock, flags);
		/*
		 * free the frame is some other thread is not
		 * pointing to it
		 */
		if (fnic->flogi_resp != fp)
			free_fp = 0;
		else
			fnic->flogi_resp = NULL;

		if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE)
			fnic->state = FNIC_IN_ETH_MODE;
		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
		if (free_fp)
			dev_kfree_skb_irq(fp_skb(fp));
	}

 handle_flogi_resp_end:
	return ret;
}
Beispiel #10
0
static int fnic_cleanup(struct fnic *fnic)
{
	unsigned int i;
	int err;
	unsigned long flags;
	struct fc_frame *flogi = NULL;
	struct fc_frame *flogi_resp = NULL;

	vnic_dev_disable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_mask(&fnic->intr[i]);

	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_disable(&fnic->rq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_disable(&fnic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
		if (err)
			return err;
	}

	
	fnic_wq_copy_cmpl_handler(fnic, -1);
	fnic_wq_cmpl_handler(fnic, -1);
	fnic_rq_cmpl_handler(fnic, -1);

	
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_clean(&fnic->wq_copy[i],
				   fnic_wq_copy_cleanup_handler);

	for (i = 0; i < fnic->cq_count; i++)
		vnic_cq_clean(&fnic->cq[i]);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_clean(&fnic->intr[i]);

	
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	flogi = fnic->flogi;
	fnic->flogi = NULL;
	flogi_resp = fnic->flogi_resp;
	fnic->flogi_resp = NULL;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (flogi)
		dev_kfree_skb(fp_skb(flogi));

	if (flogi_resp)
		dev_kfree_skb(fp_skb(flogi_resp));

	mempool_destroy(fnic->io_req_pool);
	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
		mempool_destroy(fnic->io_sgl_pool[i]);

	return 0;
}