Example #1
0
static int fnic_cleanup(struct fnic *fnic)
{
	unsigned int i;
	int err;

	vnic_dev_disable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_mask(&fnic->intr[i]);

	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_disable(&fnic->rq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_disable(&fnic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
		if (err)
			return err;
	}

	/* Clean up completed IOs and FCS frames */
	fnic_wq_copy_cmpl_handler(fnic, -1);
	fnic_wq_cmpl_handler(fnic, -1);
	fnic_rq_cmpl_handler(fnic, -1);

	/* Clean up the IOs and FCS frames that have not completed */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_clean(&fnic->wq_copy[i],
				   fnic_wq_copy_cleanup_handler);

	for (i = 0; i < fnic->cq_count; i++)
		vnic_cq_clean(&fnic->cq[i]);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_clean(&fnic->intr[i]);

	mempool_destroy(fnic->io_req_pool);
	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
		mempool_destroy(fnic->io_sgl_pool[i]);

	return 0;
}
Example #2
0
static int enic_stop(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	unsigned int i;
	int err;

	del_timer_sync(&enic->notify_timer);

	spin_lock(&enic->devcmd_lock);
	vnic_dev_disable(enic->vdev);
	spin_unlock(&enic->devcmd_lock);
	napi_disable(&enic->napi);
	netif_stop_queue(netdev);

	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_mask(&enic->intr[i]);

	for (i = 0; i < enic->wq_count; i++) {
		err = vnic_wq_disable(&enic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < enic->rq_count; i++) {
		err = vnic_rq_disable(&enic->rq[i]);
		if (err)
			return err;
	}

	spin_lock(&enic->devcmd_lock);
	vnic_dev_notify_unset(enic->vdev);
	spin_unlock(&enic->devcmd_lock);
	enic_free_intr(enic);

	(void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
		-1, enic_rq_service_drop, NULL);
	(void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
		-1, enic_wq_service, NULL);

	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
	for (i = 0; i < enic->cq_count; i++)
		vnic_cq_clean(&enic->cq[i]);
	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_clean(&enic->intr[i]);

	return 0;
}
Example #3
0
int enic_disable(struct enic *enic)
{
	unsigned int i;
	int err;

	vnic_intr_mask(&enic->intr);
	(void)vnic_intr_masked(&enic->intr); /* flush write */

	vnic_dev_disable(enic->vdev);

	enic_clsf_destroy(enic);

	if (!enic_is_sriov_vf(enic))
		vnic_dev_del_addr(enic->vdev, enic->mac_addr);

	for (i = 0; i < enic->wq_count; i++) {
		err = vnic_wq_disable(&enic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < enic->rq_count; i++) {
		err = vnic_rq_disable(&enic->rq[i]);
		if (err)
			return err;
	}

	vnic_dev_set_reset_flag(enic->vdev, 1);
	vnic_dev_notify_unset(enic->vdev);

	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);

	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
	for (i = 0; i < enic->cq_count; i++)
		vnic_cq_clean(&enic->cq[i]);
	vnic_intr_clean(&enic->intr);

	return 0;
}
Example #4
0
static int fnic_cleanup(struct fnic *fnic)
{
	unsigned int i;
	int err;
	unsigned long flags;
	struct fc_frame *flogi = NULL;
	struct fc_frame *flogi_resp = NULL;

	vnic_dev_disable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_mask(&fnic->intr[i]);

	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_disable(&fnic->rq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_disable(&fnic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
		if (err)
			return err;
	}

	/* Clean up completed IOs and FCS frames */
	fnic_wq_copy_cmpl_handler(fnic, -1);
	fnic_wq_cmpl_handler(fnic, -1);
	fnic_rq_cmpl_handler(fnic, -1);

	/* Clean up the IOs and FCS frames that have not completed */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_clean(&fnic->wq_copy[i],
				   fnic_wq_copy_cleanup_handler);

	for (i = 0; i < fnic->cq_count; i++)
		vnic_cq_clean(&fnic->cq[i]);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_clean(&fnic->intr[i]);

	/*
	 * Remove cached flogi and flogi resp frames if any
	 * These frames are not in any queue, and therefore queue
	 * cleanup does not clean them. So clean them explicitly
	 */
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	flogi = fnic->flogi;
	fnic->flogi = NULL;
	flogi_resp = fnic->flogi_resp;
	fnic->flogi_resp = NULL;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (flogi)
		dev_kfree_skb(fp_skb(flogi));

	if (flogi_resp)
		dev_kfree_skb(fp_skb(flogi_resp));

	mempool_destroy(fnic->io_req_pool);
	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
		mempool_destroy(fnic->io_sgl_pool[i]);

	return 0;
}
Example #5
0
static int fnic_cleanup(struct fnic *fnic)
{
	unsigned int i;
	int err;
	unsigned long flags;
	struct fc_frame *flogi = NULL;
	struct fc_frame *flogi_resp = NULL;

	vnic_dev_disable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_mask(&fnic->intr[i]);

	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_disable(&fnic->rq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->raw_wq_count; i++) {
		err = vnic_wq_disable(&fnic->wq[i]);
		if (err)
			return err;
	}
	for (i = 0; i < fnic->wq_copy_count; i++) {
		err = vnic_wq_copy_disable(&fnic->wq_copy[i]);
		if (err)
			return err;
	}

	
	fnic_wq_copy_cmpl_handler(fnic, -1);
	fnic_wq_cmpl_handler(fnic, -1);
	fnic_rq_cmpl_handler(fnic, -1);

	
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_clean(&fnic->wq_copy[i],
				   fnic_wq_copy_cleanup_handler);

	for (i = 0; i < fnic->cq_count; i++)
		vnic_cq_clean(&fnic->cq[i]);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_clean(&fnic->intr[i]);

	
	spin_lock_irqsave(&fnic->fnic_lock, flags);
	flogi = fnic->flogi;
	fnic->flogi = NULL;
	flogi_resp = fnic->flogi_resp;
	fnic->flogi_resp = NULL;
	spin_unlock_irqrestore(&fnic->fnic_lock, flags);

	if (flogi)
		dev_kfree_skb(fp_skb(flogi));

	if (flogi_resp)
		dev_kfree_skb(fp_skb(flogi_resp));

	mempool_destroy(fnic->io_req_pool);
	for (i = 0; i < FNIC_SGL_NUM_CACHES; i++)
		mempool_destroy(fnic->io_sgl_pool[i]);

	return 0;
}