extern void emlxs_event_queue_destroy(emlxs_hba_t *hba) { emlxs_port_t *vport; emlxs_event_queue_t *eventq = &EVENTQ; uint32_t i; uint32_t wakeup = 0; mutex_enter(&eventq->lock); /* Clear all event masks and broadcast a wakeup */ /* to clear any sleeping threads */ if (hba->event_mask) { hba->event_mask = 0; hba->event_timer = 0; wakeup = 1; } for (i = 0; i < MAX_VPORTS; i++) { vport = &VPORT(i); if (vport->sd_event_mask) { vport->sd_event_mask = 0; wakeup = 1; } } if (wakeup) { cv_broadcast(&eventq->lock_cv); mutex_exit(&eventq->lock); DELAYMS(10); mutex_enter(&eventq->lock); } /* Destroy the remaining events */ while (eventq->first) { emlxs_event_destroy(hba, eventq->first); } mutex_exit(&eventq->lock); /* Destroy the queue lock */ mutex_destroy(&eventq->lock); cv_destroy(&eventq->lock_cv); /* Clear the queue */ bzero(eventq, sizeof (emlxs_event_queue_t)); return; } /* emlxs_event_queue_destroy() */
extern int32_t emlxs_ip_handle_unsol_req(emlxs_port_t *port, CHANNEL *cp, IOCBQ *iocbq, MATCHMAP *mp, uint32_t size) { emlxs_hba_t *hba = HBA; fc_unsol_buf_t *ubp; IOCB *cmd; NETHDR *nd; NODELIST *ndlp; uint8_t *mac; emlxs_ub_priv_t *ub_priv; uint32_t sid; uint32_t i; uint32_t IpDropped = 1; uint32_t IpBcastReceived = 0; uint32_t IpSeqReceived = 0; cmd = &iocbq->iocb; ubp = NULL; for (i = 0; i < MAX_VPORTS; i++) { port = &VPORT(i); if (!(port->flag & EMLXS_PORT_BOUND) || !(port->flag & EMLXS_PORT_IP_UP)) { continue; } ubp = (fc_unsol_buf_t *)emlxs_ub_get(port, size, FC_TYPE_IS8802_SNAP, 0); if (!ubp) { /* Theoretically we should never get here. */ /* There should be one DMA buffer for every ub */ /* buffer. If we are out of ub buffers */ /* then some how this matching has been corrupted */ EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ip_dropped_msg, "Buffer not found. paddr=%lx", PADDR(cmd->un.cont64[0].addrHigh, cmd->un.cont64[0].addrLow)); continue; } bcopy(mp->virt, ubp->ub_buffer, size); ub_priv = ubp->ub_fca_private; nd = (NETHDR *)ubp->ub_buffer; mac = nd->fc_srcname.IEEE; ndlp = emlxs_node_find_mac(port, mac); if (ndlp) { sid = ndlp->nlp_DID; if ((ndlp->nlp_Xri == 0) && !(ndlp->nlp_flag[hba->channel_ip] & NLP_RPI_XRI)) { (void) emlxs_create_xri(port, cp, ndlp); } } /* * If no node is found, then check if this is a * broadcast frame */ else if (cmd->un.xrseq.w5.hcsw.Fctl & BC) { sid = cmd->un.ulpWord[4] & 0x00ffffff; } else { /* We have to drop this frame because we do not have */ /* the S_ID of the request */ EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ip_dropped_msg, "Node not found. mac=%02x%02x%02x%02x%02x%02x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); (void) emlxs_ub_release((opaque_t)port, 1, &ubp->ub_token); continue; } if (cmd->un.xrseq.w5.hcsw.Fctl & BC) { IpBcastReceived++; } else { IpSeqReceived++; } /* * Setup frame header */ ubp->ub_frame.r_ctl = cmd->un.xrseq.w5.hcsw.Rctl; ubp->ub_frame.type = cmd->un.xrseq.w5.hcsw.Type; ubp->ub_frame.s_id = sid; ubp->ub_frame.ox_id = ub_priv->token; ubp->ub_frame.rx_id = cmd->ULPCONTEXT; ubp->ub_class = FC_TRAN_CLASS3; emlxs_ub_callback(port, ubp); IpDropped = 0; } port = &PPORT; out: if (IpDropped) { HBASTATS.IpDropped++; } if (IpBcastReceived) { HBASTATS.IpBcastReceived++; } if (IpSeqReceived) { HBASTATS.IpSeqReceived++; } return (0); } /* emlxs_ip_handle_unsol_req() */
extern void emlxs_timer_checks(emlxs_hba_t *hba) { emlxs_port_t *port = &PPORT; uint8_t flag[MAX_CHANNEL]; uint32_t i; uint32_t rc; /* Exit if we are still initializing */ if (hba->state < FC_LINK_DOWN) { return; } /* DEBUG - re-examine this path for SLI4 later */ if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) { /* Check for linkup timeout */ emlxs_timer_check_linkup(hba); return; } bzero((void *)flag, sizeof (flag)); /* Check SLI level timeouts */ EMLXS_SLI_TIMER(hba); /* Check event queue */ emlxs_timer_check_events(hba); /* Check heartbeat timer */ emlxs_timer_check_heartbeat(hba); #ifdef IDLE_TIMER emlxs_pm_idle_timer(hba); #endif /* IDLE_TIMER */ /* Check for loopback timeouts */ emlxs_timer_check_loopback(hba); /* Check for packet timeouts */ rc = emlxs_timer_check_pkts(hba, flag); if (rc) { /* Link or adapter is being reset */ return; } /* Check for linkup timeout */ emlxs_timer_check_linkup(hba); /* Check the ports */ for (i = 0; i < MAX_VPORTS; i++) { port = &VPORT(i); if (!(port->flag & EMLXS_PORT_BOUND)) { continue; } /* Check for node gate timeouts */ emlxs_timer_check_nodes(port, flag); /* Check for tape discovery timeout */ emlxs_timer_check_discovery(port); /* Check for UB timeouts */ emlxs_timer_check_ub(port); #ifdef DHCHAP_SUPPORT /* Check for DHCHAP authentication timeouts */ emlxs_timer_check_dhchap(port); #endif /* DHCHAP_SUPPORT */ } /* Check for IO channel service timeouts */ /* Always do this last */ emlxs_timer_check_channels(hba, flag); return; } /* emlxs_timer_checks() */