Пример #1
0
/**
 * 		IO is not started (unallocated).
 */
static void
bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_trc_fp(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_START:
		if (!bfa_itnim_is_online(ioim->itnim)) {
			if (!bfa_itnim_hold_io(ioim->itnim)) {
				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
				list_del(&ioim->qe);
				list_add_tail(&ioim->qe,
					&ioim->fcpim->ioim_comp_q);
				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
						__bfa_cb_ioim_pathtov, ioim);
			} else {
				list_del(&ioim->qe);
				list_add_tail(&ioim->qe,
					&ioim->itnim->pending_q);
			}
			break;
		}

		if (ioim->nsges > BFI_SGE_INLINE) {
			if (!bfa_ioim_sge_setup(ioim)) {
				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
				return;
			}
		}

		if (!bfa_ioim_send_ioreq(ioim)) {
			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
			break;
		}

		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
		break;

	case BFA_IOIM_SM_IOTOV:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
				__bfa_cb_ioim_pathtov, ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		/**
		 * IO in pending queue can get abort requests. Complete abort
		 * requests immediately.
		 */
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
				ioim);
		break;

	default:
		bfa_assert(0);
	}
}
Пример #2
0
/**
 * Allocate IOIM resource for initiator mode I/O request.
 */
struct bfa_ioim_s *
bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
		struct bfa_itnim_s *itnim, u16 nsges)
{
	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
	struct bfa_ioim_s *ioim;

	/**
	 * alocate IOIM resource
	 */
	bfa_q_deq(&fcpim->ioim_free_q, &ioim);
	if (!ioim) {
		bfa_fcpim_stats(fcpim, no_iotags);
		return NULL;
	}

	ioim->dio = dio;
	ioim->itnim = itnim;
	ioim->nsges = nsges;
	ioim->nsgpgs = 0;

	bfa_stats(fcpim, total_ios);
	bfa_stats(itnim, ios);
	fcpim->ios_active++;

	list_add_tail(&ioim->qe, &itnim->io_q);
	bfa_trc_fp(ioim->bfa, ioim->iotag);

	return ioim;
}
Пример #3
0
/**
 * Line based interrupt handler.
 */
irqreturn_t
bfad_intx(int irq, void *dev_id)
{
	struct bfad_s         *bfad = dev_id;
	struct list_head         doneq;
	unsigned long   flags;
	bfa_boolean_t rc;

	spin_lock_irqsave(&bfad->bfad_lock, flags);
	rc = bfa_intx(&bfad->bfa);
	if (!rc) {
		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
		return IRQ_NONE;
	}

	bfa_comp_deq(&bfad->bfa, &doneq);
	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

	if (!list_empty(&doneq)) {
		bfa_comp_process(&bfad->bfa, &doneq);

		spin_lock_irqsave(&bfad->bfad_lock, flags);
		bfa_comp_free(&bfad->bfa, &doneq);
		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
		bfa_trc_fp(bfad, irq);
	}

	return IRQ_HANDLED;

}
Пример #4
0
void
bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
{
	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
	struct bfa_sgpg_wqe_s *wqe;

	bfa_trc_fp(bfa, nsgpg);

	mod->free_sgpgs += nsgpg;
	bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);

	bfa_q_enq_q(&mod->sgpg_q, sgpg_q);

	if (list_empty(&mod->sgpg_wait_q))
		return;

	/**
	 * satisfy as many waiting requests as possible
	 */
	do {
		wqe = bfa_q_first(&mod->sgpg_wait_q);
		if (mod->free_sgpgs < wqe->nsgpg)
			nsgpg = mod->free_sgpgs;
		else
			nsgpg = wqe->nsgpg;
		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
		wqe->nsgpg -= nsgpg;
		if (wqe->nsgpg == 0) {
			list_del(&wqe->qe);
			wqe->cbfn(wqe->cbarg);
		}
	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
}
Пример #5
0
void
bfa_msix_rspq(struct bfa_s *bfa, int qid)
{
	struct bfi_msg_s *m;
	u32 pi, ci;
	struct list_head *waitq;

	bfa_trc_fp(bfa, qid);

	qid &= (BFI_IOC_MAX_CQS - 1);

	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);

	ci = bfa_rspq_ci(bfa, qid);
	pi = bfa_rspq_pi(bfa, qid);

	bfa_trc_fp(bfa, ci);
	bfa_trc_fp(bfa, pi);

	if (bfa->rme_process) {
		while (ci != pi) {
			m = bfa_rspq_elem(bfa, qid, ci);
			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);

			bfa_isrs[m->mhdr.msg_class] (bfa, m);

			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
		}
	}

	/**
	 * update CI
	 */
	bfa_rspq_ci(bfa, qid) = pi;
	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
	mmiowb();

	/**
	 * Resume any pending requests in the corresponding reqq.
	 */
	waitq = bfa_reqq(bfa, qid);
	if (!list_empty(waitq))
		bfa_reqq_resume(bfa, qid);
}
Пример #6
0
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);

	/**
	 * Obtain the queue over which this request has to be issued
	 */
	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
			bfa_cb_ioim_get_reqq(ioim->dio) :
			bfa_itnim_get_reqq(ioim);

	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
}
Пример #7
0
/**
 * IO bfa callback is pending.
 */
static void
bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_trc_fp(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_HCB:
		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
		bfa_ioim_free(ioim);
		bfa_cb_ioim_resfree(ioim->bfa->bfad);
		break;

	case BFA_IOIM_SM_CLEANUP:
		bfa_ioim_notify_cleanup(ioim);
		break;

	case BFA_IOIM_SM_HWFAIL:
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Пример #8
0
void
bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
{
	struct bfi_msg_s      *m;
	u32        pi, ci;

	bfa_trc_fp(bfa, rsp_qid);

	rsp_qid &= (BFI_IOC_MAX_CQS - 1);

	bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);

	ci = bfa_rspq_ci(bfa, rsp_qid);
	pi = bfa_rspq_pi(bfa, rsp_qid);

	bfa_trc_fp(bfa, ci);
	bfa_trc_fp(bfa, pi);

	if (bfa->rme_process) {
		while (ci != pi) {
			m = bfa_rspq_elem(bfa, rsp_qid, ci);
			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);

			bfa_isrs[m->mhdr.msg_class] (bfa, m);

			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
		}
	}

	/**
	 * update CI
	 */
	bfa_rspq_ci(bfa, rsp_qid) = pi;
	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
	bfa_os_mmiowb();
}
Пример #9
0
void
bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
{
	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
	struct bfa_ioim_s *ioim;
	u16        iotag;

	iotag = bfa_os_ntohs(rsp->io_tag);

	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
	bfa_assert(ioim->iotag == iotag);

	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
Пример #10
0
void
bfa_ioim_free(struct bfa_ioim_s *ioim)
{
	struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;

	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));

	bfa_assert_fp(list_empty(&ioim->sgpg_q)
		   || (ioim->nsges > BFI_SGE_INLINE));

	if (ioim->nsgpgs > 0)
		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);

	bfa_stats(ioim->itnim, io_comps);
	fcpim->ios_active--;

	list_del(&ioim->qe);
	list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
}
Пример #11
0
bfa_status_t
bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
{
	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
	struct bfa_sgpg_s *hsgpg;
	int             i;

	bfa_trc_fp(bfa, nsgpgs);

	if (mod->free_sgpgs < nsgpgs)
		return BFA_STATUS_ENOMEM;

	for (i = 0; i < nsgpgs; i++) {
		bfa_q_deq(&mod->sgpg_q, &hsgpg);
		bfa_assert(hsgpg);
		list_add_tail(&hsgpg->qe, sgpg_q);
	}

	mod->free_sgpgs -= nsgpgs;
	return BFA_STATUS_OK;
}
Пример #12
0
/**
 * 		IO is active.
 */
static void
bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_trc_fp(ioim->bfa, event);

	switch (event) {
	case BFA_IOIM_SM_COMP_GOOD:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
			      __bfa_cb_ioim_good_comp, ioim);
		break;

	case BFA_IOIM_SM_COMP:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
			      ioim);
		break;

	case BFA_IOIM_SM_DONE:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
			      ioim);
		break;

	case BFA_IOIM_SM_ABORT:
		ioim->iosp->abort_explicit = BFA_TRUE;
		ioim->io_cbfn = __bfa_cb_ioim_abort;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->reqq,
					&ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_CLEANUP:
		ioim->iosp->abort_explicit = BFA_FALSE;
		ioim->io_cbfn = __bfa_cb_ioim_failed;

		if (bfa_ioim_send_abort(ioim))
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
		else {
			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
			bfa_reqq_wait(ioim->bfa, ioim->reqq,
					&ioim->iosp->reqq_wait);
		}
		break;

	case BFA_IOIM_SM_HWFAIL:
		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
		list_del(&ioim->qe);
		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
			      ioim);
		break;

	default:
		bfa_sm_fault(ioim->bfa, event);
	}
}
Пример #13
0
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
	bfa_trc_fp(ioim->bfa, ioim->iotag);
	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
}