void bfad_drv_uninit(struct bfad_s *bfad) { del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfa_assert(list_empty(&bfad->file_q)); bfad_hal_mem_release(bfad); }
/* * Notify sub-modules of hardware failure. */ static void bfa_iocfc_hbfail_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa->rme_process = BFA_FALSE; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); }
/* * IOC enable request is complete */ static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status) { struct bfa_s *bfa = bfa_arg; if (status != BFA_STATUS_OK) { bfa_isr_disable(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb, bfa); return; } bfa_iocfc_send_cfg(bfa); }
/* * IOC disable request is complete */ static void bfa_iocfc_disable_cbfn(void *bfa_arg) { struct bfa_s *bfa = bfa_arg; bfa_isr_disable(bfa); bfa_iocfc_disable_submod(bfa); if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP) bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb, bfa); else { WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE); bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb, bfa); } }
void bfad_drv_uninit(struct bfad_s *bfad) { unsigned long flags; spin_lock_irqsave(&bfad->bfad_lock, flags); init_completion(&bfad->comp); bfa_stop(&bfad->bfa); spin_unlock_irqrestore(&bfad->bfad_lock, flags); wait_for_completion(&bfad->comp); del_timer_sync(&bfad->hal_tmo); bfa_isr_disable(&bfad->bfa); bfa_detach(&bfad->bfa); bfad_remove_intr(bfad); bfad_hal_mem_release(bfad); bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE; }