/* * Process a create_xri command completion. */ extern int32_t emlxs_handle_create_xri(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq) { emlxs_port_t *port = &PPORT; IOCB *cmd; NODELIST *ndlp; fc_packet_t *pkt; emlxs_buf_t *sbp; cmd = &iocbq->iocb; sbp = (emlxs_buf_t *)iocbq->sbp; if (!sbp) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_ip_completion_msg, "create_xri: cmd=0x%x iotag=0x%x status=0x%x w4=0x%x", cmd->ULPCOMMAND, cmd->ULPIOTAG, cmd->ULPSTATUS, cmd->un.ulpWord[4]); return (EIO); } /* check for first xmit completion in sequence */ ndlp = (NODELIST *)sbp->node; if (cmd->ULPSTATUS) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_bad_ip_completion_msg, "create_xri: cmd=0x%x iotag=0x%x status=0x%x w4=0x%x", cmd->ULPCOMMAND, cmd->ULPIOTAG, cmd->ULPSTATUS, cmd->un.ulpWord[4]); mutex_enter(&EMLXS_TX_CHANNEL_LOCK); ndlp->nlp_flag[cp->channelno] &= ~NLP_RPI_XRI; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return (EIO); } mutex_enter(&EMLXS_TX_CHANNEL_LOCK); ndlp->nlp_Xri = cmd->ULPCONTEXT; ndlp->nlp_flag[cp->channelno] &= ~NLP_RPI_XRI; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "create_xri completed: DID=0x%x Xri=0x%x iotag=0x%x", ndlp->nlp_DID, ndlp->nlp_Xri, cmd->ULPIOTAG); pkt = sbp->pkt; emlxs_pkt_free(pkt); return (0); } /* emlxs_handle_create_xri() */
extern void emlxs_node_add(emlxs_port_t *port, NODELIST *ndlp) { emlxs_hba_t *hba = HBA; NODELIST *np; uint8_t *wwn; uint32_t hash; RPIobj_t *rp; rw_enter(&port->node_rwlock, RW_WRITER); hash = EMLXS_DID_HASH(ndlp->nlp_DID); np = port->node_table[hash]; /* * Insert node pointer to the head */ port->node_table[hash] = ndlp; if (!np) { ndlp->nlp_list_next = NULL; } else { ndlp->nlp_list_next = np; } port->node_count++; wwn = (uint8_t *)&ndlp->nlp_portname; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_msg, "node=%p did=%06x rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x " "count=%d", ndlp, ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7], port->node_count); /* Add Node/RPI binding */ if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { rp = emlxs_sli4_find_rpi(hba, ndlp->nlp_Rpi); if (rp) { rp->node = ndlp; ndlp->RPIp = rp; } else { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_create_msg, "Unable to find RPI! did=%x rpi=%x", ndlp->nlp_DID, ndlp->nlp_Rpi); } } rw_exit(&port->node_rwlock); return; } /* emlxs_node_add() */
/* ARGSUSED */ static void emlxs_pkt_thread(emlxs_hba_t *hba, void *arg1, void *arg2) { emlxs_port_t *port; fc_packet_t *pkt = (fc_packet_t *)arg1; int32_t rval; emlxs_buf_t *sbp; sbp = PKT2PRIV(pkt); port = sbp->port; /* Send the pkt now */ rval = emlxs_pkt_send(pkt, 1); if (rval != FC_SUCCESS) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_trans_msg, "Deferred emlxs_pkt_send failed: status=%x pkt=%p", rval, pkt); if (pkt->pkt_comp) { emlxs_set_pkt_state(sbp, IOSTAT_LOCAL_REJECT, 0, 1); ((CHANNEL *)sbp->channel)->ulpCmplCmd++; (*pkt->pkt_comp) (pkt); } else { emlxs_pkt_free(pkt); } } return; } /* emlxs_pkt_thread() */
/* EMLXS_FCTAB_LOCK must be held to call this */ static uint32_t emlxs_pkt_chip_timeout(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abortq, uint8_t *flag) { emlxs_hba_t *hba = HBA; CHANNEL *cp = (CHANNEL *)sbp->channel; IOCBQ *iocbq = NULL; fc_packet_t *pkt; uint32_t rc = 0; mutex_enter(&sbp->mtx); /* Warning: Some FCT sbp's don't have fc_packet objects */ pkt = PRIV2PKT(sbp); switch (sbp->abort_attempts) { case 0: /* Create the abort IOCB */ if (hba->state >= FC_LINK_UP) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, "chipQ:1:Aborting. sbp=%p iotag=%x tmo=%d flags=%x", sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0, sbp->pkt_flags); iocbq = emlxs_create_abort_xri_cn(port, sbp->node, sbp->iotag, cp, sbp->class, ABORT_TYPE_ABTS); /* The adapter will make 2 attempts to send ABTS */ /* with 2*ratov timeout each time */ sbp->ticks = hba->timer_tics + (4 * hba->fc_ratov) + 10; } else {
extern NODELIST * emlxs_node_find_rpi(emlxs_port_t *port, uint32_t rpi) { NODELIST *nlp; uint32_t i; rw_enter(&port->node_rwlock, RW_READER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { if (nlp->nlp_Rpi == rpi) { rw_exit(&port->node_rwlock); return (nlp); } nlp = (NODELIST *)nlp->nlp_list_next; } } rw_exit(&port->node_rwlock); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: rpi=%x", rpi); /* no match found */ return ((NODELIST *)0); } /* emlxs_node_find_rpi() */
/* Called by emlxs_timer_check_nodes() */ extern void emlxs_node_timeout(emlxs_port_t *port, NODELIST *ndlp, uint32_t channelno) { emlxs_hba_t *hba = HBA; /* If node needs servicing, then add it to the channel queues */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); /* Return if node destroyed */ if (!ndlp || !ndlp->nlp_active) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } /* Open the node if not offline */ if (!(ndlp->nlp_flag[channelno] & NLP_OFFLINE)) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_timeout_msg, "node=%p did=%06x channel=%d Opening.", ndlp, ndlp->nlp_DID, channelno); emlxs_node_open(port, ndlp, channelno); return; } /* OFFLINE TIMEOUT OCCURRED! */ /* Clear the timer */ ndlp->nlp_tics[channelno] = 0; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_timeout_msg, "node=%p did=%06x %s. Flushing.", ndlp, ndlp->nlp_DID, channelno); /* Flush tx queue for this channel */ (void) emlxs_tx_node_flush(port, ndlp, &hba->chan[channelno], 0, 0); /* Flush chip queue for this channel */ (void) emlxs_chipq_node_flush(port, &hba->chan[channelno], ndlp, 0); return; } /* emlxs_node_timeout() */
/* Event queue lock must be held */ static void emlxs_event_destroy(emlxs_hba_t *hba, emlxs_event_entry_t *entry) { emlxs_event_queue_t *eventq = &EVENTQ; emlxs_port_t *port; uint32_t missed = 0; port = (emlxs_port_t *)entry->port; eventq->count--; if (eventq->count == 0) { eventq->first = NULL; eventq->last = NULL; } else { if (entry->prev) { entry->prev->next = entry->next; } if (entry->next) { entry->next->prev = entry->prev; } if (eventq->first == entry) { eventq->first = entry->next; } if (eventq->last == entry) { eventq->last = entry->prev; } } entry->prev = NULL; entry->next = NULL; if ((entry->evt->mask == EVT_LINK) || (entry->evt->mask == EVT_RSCN)) { if (!(entry->flag & EMLXS_DFC_EVENT_DONE)) { hba->hba_event.missed++; missed = 1; } } EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_dequeued_msg, "%s[%d]: flag=%x missed=%d cnt=%d", entry->evt->label, entry->id, entry->flag, missed, eventq->count); /* Call notification handler */ entry->evt->destroy(entry); /* Free context buffer */ if (entry->bp && entry->size) { kmem_free(entry->bp, entry->size); } /* Free entry buffer */ kmem_free(entry, sizeof (emlxs_event_entry_t)); return; } /* emlxs_event_destroy() */
static void emlxs_timer_check_channels(emlxs_hba_t *hba, uint8_t *flag) { emlxs_port_t *port = &PPORT; emlxs_config_t *cfg = &CFG; int32_t channelno; CHANNEL *cp; uint32_t logit; if (!cfg[CFG_TIMEOUT_ENABLE].current) { return; } for (channelno = 0; channelno < hba->chan_count; channelno++) { cp = &hba->chan[channelno]; logit = 0; /* Check for channel timeout now */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); if (cp->timeout && (hba->timer_tics >= cp->timeout)) { /* Check if there is work to do on channel and */ /* the link is still up */ if (cp->nodeq.q_first) { flag[channelno] = 1; cp->timeout = hba->timer_tics + 10; if (hba->state >= FC_LINK_UP) { logit = 1; } } else { cp->timeout = 0; } } mutex_exit(&EMLXS_TX_CHANNEL_LOCK); if (logit) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_chan_watchdog_msg, "IO Channel %d cnt=%d,%d", channelno, hba->channel_tx_count, hba->io_count); } /* * If IO channel flag is set, request iocb servicing * here to send any iocb's that may still be queued */ if (flag[channelno]) { EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0); } } return; } /* emlxs_timer_check_channels() */
static void emlxs_timer_check_heartbeat(emlxs_hba_t *hba) { emlxs_port_t *port = &PPORT; MAILBOXQ *mbq; emlxs_config_t *cfg = &CFG; int rc; if (!cfg[CFG_HEARTBEAT_ENABLE].current) { return; } if (hba->timer_tics < hba->heartbeat_timer) { return; } hba->heartbeat_timer = hba->timer_tics + 5; /* Return if adapter interrupts have occurred */ if (hba->heartbeat_flag) { hba->heartbeat_flag = 0; return; } /* No adapter interrupts have occured for 5 seconds now */ /* Return if mailbox is busy */ /* This means the mailbox timer routine is watching for problems */ if (hba->mbox_timer) { return; } /* Return if heartbeat is still outstanding */ if (hba->heartbeat_active) { return; } if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1)) == 0) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg, "Unable to allocate heartbeat mailbox."); return; } emlxs_mb_heartbeat(hba, mbq); hba->heartbeat_active = 1; rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, MBX_NOWAIT, 0); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbq); } return; } /* emlxs_timer_check_heartbeat() */
void emlxs_thread_spawn(emlxs_hba_t *hba, void (*func) (), void *arg1, void *arg2) { emlxs_port_t *port = &PPORT; emlxs_thread_t *ethread; /* Create a thread */ ethread = (emlxs_thread_t *)kmem_alloc(sizeof (emlxs_thread_t), KM_NOSLEEP); if (ethread == NULL) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg, "Unable to allocate thread object."); return; } bzero(ethread, sizeof (emlxs_thread_t)); ethread->hba = hba; ethread->flags = EMLXS_THREAD_INITD | EMLXS_THREAD_RUN_ONCE; ethread->func = func; ethread->arg1 = arg1; ethread->arg2 = arg2; /* Queue the thread on the spawn thread list */ mutex_enter(&hba->spawn_lock); /* Dont spawn the thread if the spawn list is closed */ if (hba->spawn_open == 0) { mutex_exit(&hba->spawn_lock); /* destroy the thread */ kmem_free(ethread, sizeof (emlxs_thread_t)); return; } if (hba->spawn_thread_head == NULL) { hba->spawn_thread_head = ethread; } else { hba->spawn_thread_tail->next = ethread; ethread->prev = hba->spawn_thread_tail; } hba->spawn_thread_tail = ethread; mutex_exit(&hba->spawn_lock); (void) thread_create(NULL, 0, &emlxs_thread, (char *)ethread, 0, &p0, TS_RUN, v.v_maxsyspri - 2); } /* emlxs_thread_spawn() */
extern NODELIST * emlxs_node_find_mac(emlxs_port_t *port, uint8_t *mac) { NODELIST *nlp; uint32_t i; rw_enter(&port->node_rwlock, RW_READER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { /* * If portname matches mac address, * return NODELIST entry */ if ((nlp->nlp_portname.IEEE[0] == mac[0])) { if ((nlp->nlp_DID != BCAST_DID) && ((nlp->nlp_DID & FABRIC_DID_MASK) == FABRIC_DID_MASK)) { nlp = (NODELIST *)nlp->nlp_list_next; continue; } if ((nlp->nlp_portname.IEEE[1] == mac[1]) && (nlp->nlp_portname.IEEE[2] == mac[2]) && (nlp->nlp_portname.IEEE[3] == mac[3]) && (nlp->nlp_portname.IEEE[4] == mac[4]) && (nlp->nlp_portname.IEEE[5] == mac[5])) { rw_exit(&port->node_rwlock); return (nlp); } } nlp = (NODELIST *)nlp->nlp_list_next; } } rw_exit(&port->node_rwlock); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: MAC=%02x%02x%02x%02x%02x%02x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); return (NULL); } /* emlxs_node_find_mac() */
extern NODELIST * emlxs_node_find_index(emlxs_port_t *port, uint32_t index, uint32_t nports_only) { NODELIST *nlp; uint32_t i; uint32_t count; rw_enter(&port->node_rwlock, RW_READER); if (index > port->node_count - 1) { rw_exit(&port->node_rwlock); return (NULL); } count = 0; for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { /* Skip fabric ports if requested */ if (nports_only && (nlp->nlp_DID & 0xFFF000) == 0xFFF000) { nlp = (NODELIST *)nlp->nlp_list_next; continue; } if (count == index) { rw_exit(&port->node_rwlock); return (nlp); } nlp = (NODELIST *)nlp->nlp_list_next; count++; } } rw_exit(&port->node_rwlock); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: index=%d", index); /* no match found */ return ((NODELIST *)0); } /* emlxs_node_find_wwpn() */
extern NODELIST * emlxs_node_find_wwpn(emlxs_port_t *port, uint8_t *wwpn) { NODELIST *nlp; uint32_t i; uint32_t j; uint8_t *bptr1; uint8_t *bptr2; rw_enter(&port->node_rwlock, RW_READER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { bptr1 = (uint8_t *)&nlp->nlp_portname; bptr1 += 7; bptr2 = (uint8_t *)wwpn; bptr2 += 7; for (j = 0; j < 8; j++) { if (*bptr1-- != *bptr2--) { break; } } if (j == 8) { rw_exit(&port->node_rwlock); return (nlp); } nlp = (NODELIST *)nlp->nlp_list_next; } } rw_exit(&port->node_rwlock); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: wwpn=%02x%02x%02x%02x%02x%02x%02x%02x", wwpn[0], wwpn[1], wwpn[2], wwpn[3], wwpn[4], wwpn[5], wwpn[6], wwpn[7]); /* no match found */ return ((NODELIST *)0); } /* emlxs_node_find_wwpn() */
static void emlxs_timer_check_loopback(emlxs_hba_t *hba) { emlxs_port_t *port = &PPORT; emlxs_config_t *cfg = &CFG; int32_t reset = 0; if (!cfg[CFG_TIMEOUT_ENABLE].current) { return; } /* Check the loopback timer for expiration */ mutex_enter(&EMLXS_PORT_LOCK); if (!hba->loopback_tics || (hba->timer_tics < hba->loopback_tics)) { mutex_exit(&EMLXS_PORT_LOCK); return; } hba->loopback_tics = 0; if (hba->flag & FC_LOOPBACK_MODE) { reset = 1; } mutex_exit(&EMLXS_PORT_LOCK); if (reset) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_dfc_debug_msg, "LOOPBACK_MODE: Expired. Resetting..."); (void) emlxs_reset(port, FC_FCA_LINK_RESET); } return; } /* emlxs_timer_check_loopback() */
extern NODELIST * emlxs_node_find_did(emlxs_port_t *port, uint32_t did) { emlxs_hba_t *hba = HBA; NODELIST *nlp; uint32_t hash; /* Check for invalid node ids */ if ((did == 0) && (!(hba->flag & FC_LOOPBACK_MODE))) { return ((NODELIST *)0); } if (did & 0xff000000) { return ((NODELIST *)0); } /* Check for bcast node */ if (did == BCAST_DID) { /* Use the base node here */ return (&port->node_base); } #ifdef MENLO_SUPPORT /* Check for menlo node */ if (did == EMLXS_MENLO_DID) { /* Use the base node here */ return (&port->node_base); } #endif /* MENLO_SUPPORT */ /* Check for host node */ if (did == port->did && !(hba->flag & FC_LOOPBACK_MODE)) { /* Use the base node here */ return (&port->node_base); } /* * Convert well known fabric addresses to the FABRIC_DID, * since we don't login to some of them */ if ((did == SCR_DID)) { did = FABRIC_DID; } rw_enter(&port->node_rwlock, RW_READER); hash = EMLXS_DID_HASH(did); nlp = port->node_table[hash]; while (nlp != NULL) { /* Check for obvious match */ if (nlp->nlp_DID == did) { rw_exit(&port->node_rwlock); return (nlp); } /* Check for detailed match */ else if (emlxs_node_match_did(port, nlp, did)) { rw_exit(&port->node_rwlock); return (nlp); } nlp = (NODELIST *)nlp->nlp_list_next; } rw_exit(&port->node_rwlock); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_not_found_msg, "find: did=%x", did); /* no match found */ return ((NODELIST *)0); } /* emlxs_node_find_did() */
/* Timeout not -1 will apply the timeout */ extern void emlxs_node_close(emlxs_port_t *port, NODELIST *ndlp, uint32_t channelno, int32_t timeout) { emlxs_hba_t *hba = HBA; emlxs_config_t *cfg = &CFG; CHANNEL *cp; NODELIST *prev; uint32_t offline = 0; /* If node is on a channel service queue, then remove it */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); /* Return if node destroyed */ if (!ndlp || !ndlp->nlp_active) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } /* Check offline support */ if (timeout == -1) { if (cfg[CFG_OFFLINE_TIMEOUT].current) { timeout = cfg[CFG_OFFLINE_TIMEOUT].current; offline = 1; } else { timeout = 0; } } if (channelno == hba->channel_ip) { /* Clear IP XRI */ ndlp->nlp_Xri = 0; } /* Check if node is already closed */ if (ndlp->nlp_flag[channelno] & NLP_CLOSED) { if (ndlp->nlp_flag[channelno] & NLP_OFFLINE) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } if (offline) { ndlp->nlp_tics[channelno] = hba->timer_tics + timeout; ndlp->nlp_flag[channelno] |= NLP_OFFLINE; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_closed_msg, "node=%p did=%06x channel=%d. offline=%d set.", ndlp, ndlp->nlp_DID, channelno, timeout); } else if (timeout) { ndlp->nlp_tics[channelno] = hba->timer_tics + timeout; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_closed_msg, "node=%p did=%06x channel=%d. timeout=%d set.", ndlp, ndlp->nlp_DID, channelno, timeout); } else { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); } return; } /* Set the node closed */ ndlp->nlp_flag[channelno] |= NLP_CLOSED; if (offline) { ndlp->nlp_tics[channelno] = hba->timer_tics + timeout; ndlp->nlp_flag[channelno] |= NLP_OFFLINE; } else if (timeout) { ndlp->nlp_tics[channelno] = hba->timer_tics + timeout; } /* * ndlp->nlp_next[] and cp->nodeq list have to be updated * simulaneously */ if (ndlp->nlp_next[channelno]) { /* Remove node from channel queue */ cp = &hba->chan[channelno]; /* If this is the only node on list */ if (cp->nodeq.q_first == (void *)ndlp && cp->nodeq.q_last == (void *)ndlp) { cp->nodeq.q_last = NULL; cp->nodeq.q_first = NULL; cp->nodeq.q_cnt = 0; } else if (cp->nodeq.q_first == (void *)ndlp) { cp->nodeq.q_first = ndlp->nlp_next[channelno]; ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] = cp->nodeq.q_first; cp->nodeq.q_cnt--; } else { /* This is a little more difficult */ /* Find the previous node in circular channel queue */ prev = ndlp; while (prev->nlp_next[channelno] != ndlp) { prev = prev->nlp_next[channelno]; } prev->nlp_next[channelno] = ndlp->nlp_next[channelno]; if (cp->nodeq.q_last == (void *)ndlp) { cp->nodeq.q_last = (void *)prev; } cp->nodeq.q_cnt--; } /* Clear node */ ndlp->nlp_next[channelno] = NULL; } mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } /* emlxs_node_close() */
extern void emlxs_node_open(emlxs_port_t *port, NODELIST *ndlp, uint32_t channelno) { emlxs_hba_t *hba = HBA; CHANNEL *cp; uint32_t found; NODELIST *nlp; MAILBOXQ *mbox; uint32_t i; int rc; /* If node needs servicing, then add it to the channel queues */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); /* Return if node destroyed */ if (!ndlp || !ndlp->nlp_active) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } /* Return if node already open */ if (!(ndlp->nlp_flag[channelno] & NLP_CLOSED)) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return; } /* Set the node open (not closed) */ ndlp->nlp_flag[channelno] &= ~(NLP_CLOSED|NLP_OFFLINE); /* Clear the timer */ ndlp->nlp_tics[channelno] = 0; /* * If the ptx or the tx queue needs servicing and * the node is not already on the channel queue */ if ((ndlp->nlp_ptx[channelno].q_first || ndlp->nlp_tx[channelno].q_first) && !ndlp->nlp_next[channelno]) { cp = &hba->chan[channelno]; /* If so, then add it to the channel queue */ if (cp->nodeq.q_first) { ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] = (uint8_t *)ndlp; ndlp->nlp_next[channelno] = cp->nodeq.q_first; /* If this is not the base node then */ /* add it to the tail */ if (!ndlp->nlp_base) { cp->nodeq.q_last = (uint8_t *)ndlp; } else { /* Otherwise, add it to the head */ /* The command node always gets priority */ cp->nodeq.q_first = (uint8_t *)ndlp; } cp->nodeq.q_cnt++; } else { cp->nodeq.q_first = (uint8_t *)ndlp; cp->nodeq.q_last = (uint8_t *)ndlp; ndlp->nlp_next[channelno] = ndlp; cp->nodeq.q_cnt = 1; } } mutex_exit(&EMLXS_TX_CHANNEL_LOCK); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_opened_msg, "node=%p did=%06x channel=%d", ndlp, ndlp->nlp_DID, channelno); /* If link attention needs to be cleared */ if ((hba->state == FC_LINK_UP) && (channelno == hba->channel_fcp)) { if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { /* re Think this code path. For SLI4 channel fcp == els */ EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg, "ADD CODE to RESUME RPIs node=%p did=%06x chan=%d", ndlp, ndlp->nlp_DID, channelno); goto done; } /* Scan to see if any FCP2 devices are still closed */ found = 0; rw_enter(&port->node_rwlock, RW_READER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (nlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED)) { found = 1; break; } nlp = nlp->nlp_list_next; } if (found) { break; } } rw_exit(&port->node_rwlock); if (!found) { /* Clear link attention */ if ((mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { mutex_enter(&EMLXS_PORT_LOCK); /* * If state is not FC_LINK_UP, then either the * link has gone down or a FC_CLEAR_LA has * already been issued */ if (hba->state != FC_LINK_UP) { mutex_exit(&EMLXS_PORT_LOCK); (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox); goto done; } EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA); hba->discovery_timer = 0; mutex_exit(&EMLXS_PORT_LOCK); emlxs_mb_clear_la(hba, mbox); rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox); } } else { /* Close the node and try again */ /* in a few seconds */ emlxs_node_close(port, ndlp, channelno, 5); return; } } } done: /* Wake any sleeping threads */ mutex_enter(&EMLXS_PKT_LOCK); cv_broadcast(&EMLXS_PKT_CV); mutex_exit(&EMLXS_PKT_LOCK); return; } /* emlxs_node_open() */
extern int32_t emlxs_ip_handle_unsol_req(emlxs_port_t *port, CHANNEL *cp, IOCBQ *iocbq, MATCHMAP *mp, uint32_t size) { emlxs_hba_t *hba = HBA; fc_unsol_buf_t *ubp; IOCB *cmd; NETHDR *nd; NODELIST *ndlp; uint8_t *mac; emlxs_ub_priv_t *ub_priv; uint32_t sid; uint32_t i; uint32_t IpDropped = 1; uint32_t IpBcastReceived = 0; uint32_t IpSeqReceived = 0; cmd = &iocbq->iocb; ubp = NULL; for (i = 0; i < MAX_VPORTS; i++) { port = &VPORT(i); if (!(port->flag & EMLXS_PORT_BOUND) || !(port->flag & EMLXS_PORT_IP_UP)) { continue; } ubp = (fc_unsol_buf_t *)emlxs_ub_get(port, size, FC_TYPE_IS8802_SNAP, 0); if (!ubp) { /* Theoretically we should never get here. */ /* There should be one DMA buffer for every ub */ /* buffer. If we are out of ub buffers */ /* then some how this matching has been corrupted */ EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ip_dropped_msg, "Buffer not found. paddr=%lx", PADDR(cmd->un.cont64[0].addrHigh, cmd->un.cont64[0].addrLow)); continue; } bcopy(mp->virt, ubp->ub_buffer, size); ub_priv = ubp->ub_fca_private; nd = (NETHDR *)ubp->ub_buffer; mac = nd->fc_srcname.IEEE; ndlp = emlxs_node_find_mac(port, mac); if (ndlp) { sid = ndlp->nlp_DID; if ((ndlp->nlp_Xri == 0) && !(ndlp->nlp_flag[hba->channel_ip] & NLP_RPI_XRI)) { (void) emlxs_create_xri(port, cp, ndlp); } } /* * If no node is found, then check if this is a * broadcast frame */ else if (cmd->un.xrseq.w5.hcsw.Fctl & BC) { sid = cmd->un.ulpWord[4] & 0x00ffffff; } else { /* We have to drop this frame because we do not have */ /* the S_ID of the request */ EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ip_dropped_msg, "Node not found. mac=%02x%02x%02x%02x%02x%02x", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); (void) emlxs_ub_release((opaque_t)port, 1, &ubp->ub_token); continue; } if (cmd->un.xrseq.w5.hcsw.Fctl & BC) { IpBcastReceived++; } else { IpSeqReceived++; } /* * Setup frame header */ ubp->ub_frame.r_ctl = cmd->un.xrseq.w5.hcsw.Rctl; ubp->ub_frame.type = cmd->un.xrseq.w5.hcsw.Type; ubp->ub_frame.s_id = sid; ubp->ub_frame.ox_id = ub_priv->token; ubp->ub_frame.rx_id = cmd->ULPCONTEXT; ubp->ub_class = FC_TRAN_CLASS3; emlxs_ub_callback(port, ubp); IpDropped = 0; } port = &PPORT; out: if (IpDropped) { HBASTATS.IpDropped++; } if (IpBcastReceived) { HBASTATS.IpBcastReceived++; } if (IpSeqReceived) { HBASTATS.IpSeqReceived++; } return (0); } /* emlxs_ip_handle_unsol_req() */
extern void emlxs_event(emlxs_port_t *port, emlxs_event_t *evt, void *bp, uint32_t size) { emlxs_hba_t *hba = HBA; emlxs_event_queue_t *eventq = &EVENTQ; emlxs_event_entry_t *entry; uint32_t i; uint32_t mask; if (emlxs_event_check(port, evt) == 0) { goto failed; } /* Create event entry */ if (!(entry = (emlxs_event_entry_t *)kmem_alloc( sizeof (emlxs_event_entry_t), KM_NOSLEEP))) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_event_debug_msg, "%s: Unable to allocate event entry.", evt->label); goto failed; } /* Initialize */ bzero(entry, sizeof (emlxs_event_entry_t)); entry->evt = evt; entry->port = (void *)port; entry->bp = bp; entry->size = size; mutex_enter(&eventq->lock); /* Set the event timer */ entry->timestamp = hba->timer_tics; if (evt->timeout) { entry->timer = entry->timestamp + evt->timeout; } /* Set the event id */ entry->id = eventq->next_id++; /* Set last event table */ mask = evt->mask; for (i = 0; i < 32; i++) { if (mask & 0x01) { eventq->last_id[i] = entry->id; } mask >>= 1; } /* Put event on bottom of queue */ entry->next = NULL; if (eventq->count == 0) { entry->prev = NULL; eventq->first = entry; eventq->last = entry; } else { entry->prev = eventq->last; entry->prev->next = entry; eventq->last = entry; } eventq->count++; if ((entry->evt->mask == EVT_LINK) || (entry->evt->mask == EVT_RSCN)) { hba->hba_event.new++; }
static uint32_t emlxs_timer_check_pkts(emlxs_hba_t *hba, uint8_t *flag) { emlxs_port_t *port = &PPORT; emlxs_config_t *cfg = &CFG; Q tmo; int32_t channelno; CHANNEL *cp; NODELIST *nlp; IOCBQ *prev; IOCBQ *next; IOCB *iocb; IOCBQ *iocbq; emlxs_buf_t *sbp; fc_packet_t *pkt; Q abort; uint32_t iotag; uint32_t rc; if (!cfg[CFG_TIMEOUT_ENABLE].current) { return (0); } if (hba->pkt_timer > hba->timer_tics) { return (0); } hba->pkt_timer = hba->timer_tics + EMLXS_PKT_PERIOD; bzero((void *)&tmo, sizeof (Q)); /* * We must hold the locks here because we never know when an iocb * will be removed out from under us */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); for (channelno = 0; channelno < hba->chan_count; channelno++) { cp = &hba->chan[channelno]; /* Scan the tx queues for each active node on the channel */ /* Get the first node */ nlp = (NODELIST *)cp->nodeq.q_first; while (nlp) { /* Scan the node's priority tx queue */ prev = NULL; iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first; while (iocbq) { next = (IOCBQ *)iocbq->next; iocb = &iocbq->iocb; sbp = (emlxs_buf_t *)iocbq->sbp; /* Check if iocb has timed out */ if (sbp && hba->timer_tics >= sbp->ticks) { /* iocb timed out, now deque it */ if (next == NULL) { nlp->nlp_ptx[channelno].q_last = (uint8_t *)prev; } if (prev == NULL) { nlp->nlp_ptx[channelno]. q_first = (uint8_t *)next; } else { prev->next = next; } iocbq->next = NULL; nlp->nlp_ptx[channelno].q_cnt--; /* Add this iocb to our local */ /* timout queue */ /* * This way we don't hold the TX_CHANNEL * lock too long */ if (tmo.q_first) { ((IOCBQ *)tmo.q_last)->next = iocbq; tmo.q_last = (uint8_t *)iocbq; tmo.q_cnt++; } else { tmo.q_first = (uint8_t *)iocbq; tmo.q_last = (uint8_t *)iocbq; tmo.q_cnt = 1; } iocbq->next = NULL; } else { prev = iocbq; } iocbq = next; } /* while (iocbq) */ /* Scan the node's tx queue */ prev = NULL; iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first; while (iocbq) { next = (IOCBQ *)iocbq->next; iocb = &iocbq->iocb; sbp = (emlxs_buf_t *)iocbq->sbp; /* Check if iocb has timed out */ if (sbp && hba->timer_tics >= sbp->ticks) { /* iocb timed out, now deque it */ if (next == NULL) { nlp->nlp_tx[channelno].q_last = (uint8_t *)prev; } if (prev == NULL) { nlp->nlp_tx[channelno].q_first = (uint8_t *)next; } else { prev->next = next; } iocbq->next = NULL; nlp->nlp_tx[channelno].q_cnt--; /* Add this iocb to our local */ /* timout queue */ /* * This way we don't hold the TX_CHANNEL * lock too long */ if (tmo.q_first) { ((IOCBQ *)tmo.q_last)->next = iocbq; tmo.q_last = (uint8_t *)iocbq; tmo.q_cnt++; } else { tmo.q_first = (uint8_t *)iocbq; tmo.q_last = (uint8_t *)iocbq; tmo.q_cnt = 1; } iocbq->next = NULL; } else { prev = iocbq; } iocbq = next; } /* while (iocbq) */ if (nlp == (NODELIST *)cp->nodeq.q_last) { nlp = NULL; } else { nlp = nlp->nlp_next[channelno]; } } /* while (nlp) */ } /* end of for */ /* Now cleanup the iocb's */ iocbq = (IOCBQ *)tmo.q_first; while (iocbq) { /* Free the IoTag and the bmp */ iocb = &iocbq->iocb; channelno = ((CHANNEL *)iocbq->channel)->channelno; sbp = iocbq->sbp; if (sbp && (sbp != STALE_PACKET)) { if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { hba->fc_table[sbp->iotag] = NULL; emlxs_sli4_free_xri(hba, sbp, sbp->xp); } else { (void) emlxs_unregister_pkt( (CHANNEL *)iocbq->channel, iocb->ULPIOTAG, 0); } mutex_enter(&sbp->mtx); sbp->pkt_flags |= PACKET_IN_TIMEOUT; mutex_exit(&sbp->mtx); } iocbq = (IOCBQ *)iocbq->next; } /* end of while */ mutex_exit(&EMLXS_TX_CHANNEL_LOCK); /* Now complete the transmit timeouts outside the locks */ iocbq = (IOCBQ *)tmo.q_first; while (iocbq) { /* Save the next iocbq for now */ next = (IOCBQ *)iocbq->next; /* Unlink this iocbq */ iocbq->next = NULL; /* Get the pkt */ sbp = (emlxs_buf_t *)iocbq->sbp; if (sbp) { /* Warning: Some FCT sbp's don't have */ /* fc_packet objects */ pkt = PRIV2PKT(sbp); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_timeout_msg, "TXQ abort: sbp=%p iotag=%x tmo=%d", sbp, sbp->iotag, (pkt) ? pkt->pkt_timeout : 0); if (hba->state >= FC_LINK_UP) { emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_ABORT_TIMEOUT, 1); } else { emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT, IOERR_LINK_DOWN, 1); } } iocbq = next; } /* end of while */ /* Now check the chip */ bzero((void *)&abort, sizeof (Q)); /* Check the HBA for outstanding IOs */ rc = 0; mutex_enter(&EMLXS_FCTAB_LOCK); for (iotag = 1; iotag < hba->max_iotag; iotag++) { sbp = hba->fc_table[iotag]; if (sbp && (sbp != STALE_PACKET) && (sbp->pkt_flags & PACKET_IN_CHIPQ) && !(sbp->pkt_flags & (PACKET_IN_FLUSH | PACKET_XRI_CLOSED)) && (hba->timer_tics >= sbp->ticks)) { rc = emlxs_pkt_chip_timeout(sbp->iocbq.port, sbp, &abort, flag); if (rc) { break; } } } mutex_exit(&EMLXS_FCTAB_LOCK); /* Now put the iocb's on the tx queue */ iocbq = (IOCBQ *)abort.q_first; while (iocbq) { /* Save the next iocbq for now */ next = (IOCBQ *)iocbq->next; /* Unlink this iocbq */ iocbq->next = NULL; /* Send this iocbq */ emlxs_tx_put(iocbq, 1); iocbq = next; } /* Now trigger IO channel service to send these abort iocbq */ for (channelno = 0; channelno < hba->chan_count; channelno++) { if (!flag[channelno]) { continue; } cp = &hba->chan[channelno]; EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, 0); } if (rc == 1) { /* Spawn a thread to reset the link */ emlxs_thread_spawn(hba, emlxs_reset_link_thread, NULL, NULL); } else if (rc == 2) { /* Spawn a thread to reset the adapter */ emlxs_thread_spawn(hba, emlxs_restart_thread, NULL, NULL); } return (rc); } /* emlxs_timer_check_pkts() */
extern int32_t emlxs_ip_handle_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq) { emlxs_port_t *port = &PPORT; IOCB *cmd; emlxs_buf_t *sbp; NODELIST *ndlp; cmd = &iocbq->iocb; HBASTATS.IpEvent++; sbp = (emlxs_buf_t *)iocbq->sbp; if (!sbp) { HBASTATS.IpStray++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_ip_completion_msg, "cmd=0x%x iotag=0x%x status=0x%x perr=0x%x", (uint32_t)cmd->ULPCOMMAND, (uint32_t)cmd->ULPIOTAG, cmd->ULPSTATUS, cmd->un.ulpWord[4]); return (EIO); } if (cp->channelno != hba->channel_ip) { HBASTATS.IpStray++; return (0); } port = sbp->iocbq.port; switch (cmd->ULPCOMMAND) { /* * Error: Abnormal BCAST command completion (Local error) */ case CMD_XMIT_BCAST_CN: case CMD_XMIT_BCAST64_CN: HBASTATS.IpBcastCompleted++; HBASTATS.IpBcastError++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "XMIT BCAST completion error cmd=0x%x status=0x%x " "[%08x,%08x]", cmd->ULPCOMMAND, cmd->ULPSTATUS, cmd->un.ulpWord[4], cmd->un.ulpWord[5]); emlxs_pkt_complete(sbp, cmd->ULPSTATUS, cmd->un.grsp.perr.statLocalError, 1); break; /* * Error: Abnormal XMIT SEQUENCE command completion * (Local error) */ case CMD_XMIT_SEQUENCE_CR: case CMD_XMIT_SEQUENCE64_CR: HBASTATS.IpSeqCompleted++; HBASTATS.IpSeqError++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "XMIT SEQUENCE CR completion error: cmd=%x status=0x%x " "[%08x,%08x]", cmd->ULPCOMMAND, cmd->ULPSTATUS, cmd->un.ulpWord[4], cmd->un.ulpWord[5]); emlxs_pkt_complete(sbp, cmd->ULPSTATUS, cmd->un.grsp.perr.statLocalError, 1); break; /* * Normal BCAST completion */ case CMD_XMIT_BCAST_CX: case CMD_XMIT_BCAST64_CX: HBASTATS.IpBcastCompleted++; HBASTATS.IpBcastGood++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "XMIT BCAST CN completion: cmd=%x status=0x%x [%08x,%08x]", cmd->ULPCOMMAND, cmd->ULPSTATUS, cmd->un.ulpWord[4], cmd->un.ulpWord[5]); emlxs_pkt_complete(sbp, cmd->ULPSTATUS, cmd->un.grsp.perr.statLocalError, 1); break; /* * Normal XMIT SEQUENCE completion */ case CMD_XMIT_SEQUENCE_CX: case CMD_XMIT_SEQUENCE64_CX: HBASTATS.IpSeqCompleted++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "XMIT SEQUENCE CR completion: cmd=%x status=0x%x" "[%08x,%08x]", cmd->ULPCOMMAND, cmd->ULPSTATUS, cmd->un.ulpWord[4], cmd->un.ulpWord[5]); if (cmd->ULPSTATUS) { HBASTATS.IpSeqError++; if ((cmd->ULPSTATUS == IOSTAT_LOCAL_REJECT) && ((cmd->un.ulpWord[4] & 0xff) == IOERR_NO_XRI)) { ndlp = (NODELIST *)sbp->node; if ((cmd->ULPCONTEXT == ndlp->nlp_Xri) && !(ndlp->nlp_flag[hba->channel_ip] & NLP_RPI_XRI)) { ndlp->nlp_Xri = 0; (void) emlxs_create_xri(port, cp, ndlp); } } } else { HBASTATS.IpSeqGood++; } emlxs_pkt_complete(sbp, cmd->ULPSTATUS, cmd->un.grsp.perr.statLocalError, 1); break; default: HBASTATS.IpStray++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_invalid_ip_msg, "Invalid iocb: cmd=0x%x", cmd->ULPCOMMAND); break; } /* switch(cmd->ULPCOMMAND) */ return (0); } /* emlxs_ip_handle_event() */
extern int32_t emlxs_ip_handle_rcv_seq_list(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq) { emlxs_port_t *port = &PPORT; IOCB *cmd; uint64_t bdeAddr; MATCHMAP *mp = NULL; HBQE_t *hbqE; uint32_t hbq_id; uint32_t hbqe_tag; RING *rp; /* * No action required for now. */ cmd = &iocbq->iocb; rp = &hba->sli.sli3.ring[cp->channelno]; HBASTATS.IpRcvEvent++; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "Receive sequence list: cmd=0x%x iotag=0x%x status=0x%x " "w4=0x%x channelno=0x%x", cmd->ULPCOMMAND, cmd->ULPIOTAG, cmd->ULPSTATUS, cmd->un.ulpWord[4], cp->channelno); if (cmd->ULPSTATUS) { goto out; } hbqE = (HBQE_t *)&iocbq->iocb; hbq_id = hbqE->unt.ext.HBQ_tag; hbqe_tag = hbqE->unt.ext.HBQE_tag; if (hba->flag & FC_HBQ_ENABLED) { HBQ_INIT_t *hbq; hbq = &hba->sli.sli3.hbq_table[hbq_id]; HBASTATS.IpUbPosted--; if (hbqe_tag >= hbq->HBQ_numEntries) { mp = NULL; } else { mp = hba->sli.sli3.hbq_table [hbq_id].HBQ_PostBufs[hbqe_tag]; } } else { /* Check for valid buffer */ if (!(cmd->un.cont64[0].tus.f.bdeFlags & BUFF_TYPE_INVALID)) { bdeAddr = PADDR(cmd->un.cont64[0].addrHigh, cmd->un.cont64[0].addrLow); mp = emlxs_mem_get_vaddr(hba, rp, bdeAddr); } } out: if (hba->flag & FC_HBQ_ENABLED) { emlxs_update_HBQ_index(hba, hbq_id); } else { if (mp) { (void) emlxs_mem_put(hba, MEM_IPBUF, (uint8_t *)mp); } (void) emlxs_post_buffer(hba, rp, 1); } HBASTATS.IpDropped++; return (0); } /* emlxs_ip_handle_rcv_seq_list() */
static void emlxs_timer_check_discovery(emlxs_port_t *port) { emlxs_hba_t *hba = HBA; emlxs_config_t *cfg = &CFG; int32_t send_clear_la; uint32_t found; uint32_t i; NODELIST *nlp; MAILBOXQ *mbox; int rc; if (!cfg[CFG_TIMEOUT_ENABLE].current) { return; } /* Check the discovery timer for expiration */ send_clear_la = 0; mutex_enter(&EMLXS_PORT_LOCK); while (hba->discovery_timer && (hba->timer_tics >= hba->discovery_timer) && (hba->state == FC_LINK_UP)) { send_clear_la = 1; /* Perform a flush on fcp2 nodes that are still closed */ found = 0; rw_enter(&port->node_rwlock, RW_READER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { nlp = port->node_table[i]; while (nlp != NULL) { if ((nlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (nlp->nlp_flag[hba->channel_fcp] & NLP_CLOSED)) { found = 1; break; } nlp = nlp->nlp_list_next; } if (found) { break; } } rw_exit(&port->node_rwlock); if (!found) { break; } EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_missing_msg, "FCP2 device (did=%06x) missing. Flushing...", nlp->nlp_DID); mutex_exit(&EMLXS_PORT_LOCK); (void) emlxs_mb_unreg_did(port, nlp->nlp_DID, NULL, NULL, NULL); mutex_enter(&EMLXS_PORT_LOCK); } mutex_exit(&EMLXS_PORT_LOCK); /* Try to send clear link attention, if needed */ if ((send_clear_la == 1) && (mbox = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX, 1))) { mutex_enter(&EMLXS_PORT_LOCK); /* * If state is not FC_LINK_UP, then either the link has gone * down or a FC_CLEAR_LA has already been issued */ if (hba->state != FC_LINK_UP) { mutex_exit(&EMLXS_PORT_LOCK); (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox); } else { /* Change state and clear discovery timer */ EMLXS_STATE_CHANGE_LOCKED(hba, FC_CLEAR_LA); hba->discovery_timer = 0; mutex_exit(&EMLXS_PORT_LOCK); /* Prepare and send the CLEAR_LA command */ emlxs_mb_clear_la(hba, mbox); rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbox, MBX_NOWAIT, 0); if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) { (void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbox); } } } return; } /* emlxs_timer_check_discovery() */
extern void emlxs_node_destroy_all(emlxs_port_t *port) { emlxs_hba_t *hba = HBA; NODELIST *next; NODELIST *ndlp; RPIobj_t *rp; uint8_t *wwn; uint32_t i; /* Flush and free the nodes */ rw_enter(&port->node_rwlock, RW_WRITER); for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) { ndlp = port->node_table[i]; port->node_table[i] = 0; while (ndlp != NULL) { next = ndlp->nlp_list_next; ndlp->nlp_list_next = NULL; ndlp->nlp_list_prev = NULL; ndlp->nlp_active = 0; if (port->node_count) { port->node_count--; } wwn = (uint8_t *)&ndlp->nlp_portname; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_destroy_msg, "did=%06x " "rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x " "count=%d", ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7], port->node_count); (void) emlxs_tx_node_flush(port, ndlp, 0, 0, 0); /* Break Node/RPI binding */ if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { rp = EMLXS_NODE_TO_RPI(hba, ndlp); ndlp->RPIp = NULL; if (rp) { rp->node = NULL; (void) emlxs_sli4_free_rpi(hba, rp); } } (void) emlxs_mem_put(hba, MEM_NLP, (uint8_t *)ndlp); ndlp = next; } } port->node_count = 0; rw_exit(&port->node_rwlock); /* Clean the base node */ mutex_enter(&EMLXS_PORT_LOCK); port->node_base.nlp_list_next = NULL; port->node_base.nlp_list_prev = NULL; port->node_base.nlp_active = 1; mutex_exit(&EMLXS_PORT_LOCK); /* Flush the base node */ (void) emlxs_tx_node_flush(port, &port->node_base, 0, 1, 0); (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0); return; } /* emlxs_node_destroy_all() */
static void emlxs_timer_check_ub(emlxs_port_t *port) { emlxs_hba_t *hba = HBA; emlxs_unsol_buf_t *ulistp; fc_unsol_buf_t *ubp; emlxs_ub_priv_t *ub_priv; uint32_t i; if (port->ub_timer > hba->timer_tics) { return; } port->ub_timer = hba->timer_tics + EMLXS_UB_PERIOD; /* Check the unsolicited buffers */ mutex_enter(&EMLXS_UB_LOCK); ulistp = port->ub_pool; while (ulistp) { /* Check buffers in this pool */ for (i = 0; i < ulistp->pool_nentries; i++) { ubp = (fc_unsol_buf_t *)&ulistp->fc_ubufs[i]; ub_priv = ubp->ub_fca_private; if (!(ub_priv->flags & EMLXS_UB_IN_USE)) { continue; } /* If buffer has timed out, print message and */ /* increase timeout */ if ((ub_priv->time + ub_priv->timeout) <= hba->timer_tics) { ub_priv->flags |= EMLXS_UB_TIMEOUT; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sfs_debug_msg, "Stale UB buffer detected (%d mins): " "buffer=%p (%x,%x,%x,%x)", (ub_priv->timeout / 60), ubp, ubp->ub_frame.type, ubp->ub_frame.s_id, ubp->ub_frame.ox_id, ubp->ub_frame.rx_id); /* Increase timeout period */ /* If timeout was 5 mins or less, */ /* increase it to 10 mins */ if (ub_priv->timeout <= (5 * 60)) { ub_priv->timeout = (10 * 60); } /* If timeout was 10 mins or less, */ /* increase it to 30 mins */ else if (ub_priv->timeout <= (10 * 60)) { ub_priv->timeout = (30 * 60); } /* Otherwise double it. */ else { ub_priv->timeout *= 2; } } } ulistp = ulistp->pool_next; } mutex_exit(&EMLXS_UB_LOCK); return; } /* emlxs_timer_check_ub() */
extern void emlxs_node_rm(emlxs_port_t *port, NODELIST *ndlp) { emlxs_hba_t *hba = HBA; NODELIST *np; NODELIST *prevp; RPIobj_t *rp; uint8_t *wwn; uint32_t hash; rw_enter(&port->node_rwlock, RW_WRITER); hash = EMLXS_DID_HASH(ndlp->nlp_DID); np = port->node_table[hash]; prevp = NULL; while (np != NULL) { if (np->nlp_DID == ndlp->nlp_DID) { if (prevp == NULL) { port->node_table[hash] = np->nlp_list_next; } else { prevp->nlp_list_next = np->nlp_list_next; } if (port->node_count) { port->node_count--; } wwn = (uint8_t *)&ndlp->nlp_portname; EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_node_destroy_msg, "did=%06x " "rpi=%x wwpn=%02x%02x%02x%02x%02x%02x%02x%02x " "count=%d", ndlp->nlp_DID, ndlp->nlp_Rpi, wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7], port->node_count); (void) emlxs_tx_node_flush(port, ndlp, 0, 1, 0); ndlp->nlp_active = 0; /* Break Node/RPI binding */ if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) { rp = EMLXS_NODE_TO_RPI(hba, ndlp); ndlp->RPIp = NULL; if (rp) { rp->node = NULL; (void) emlxs_sli4_free_rpi(hba, rp); } } (void) emlxs_mem_put(hba, MEM_NLP, (uint8_t *)ndlp); break; } prevp = np; np = np->nlp_list_next; } rw_exit(&port->node_rwlock); return; } /* emlxs_node_rm() */
/* * Issue an iocb command to create an exchange with the remote Nport * specified by the NODELIST entry. */ extern int32_t emlxs_create_xri(emlxs_port_t *port, CHANNEL *cp, NODELIST *ndlp) { emlxs_hba_t *hba = HBA; IOCB *icmd; IOCBQ *iocbq; fc_packet_t *pkt; emlxs_buf_t *sbp; uint16_t iotag; /* Check if an XRI has already been requested */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); if (ndlp->nlp_Xri != 0 || (ndlp->nlp_flag[cp->channelno] & NLP_RPI_XRI)) { mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return (0); } ndlp->nlp_flag[cp->channelno] |= NLP_RPI_XRI; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); if (!(pkt = emlxs_pkt_alloc(port, 0, 0, 0, KM_NOSLEEP))) { EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "create_xri failed: Unable to allocate pkt. did=0x%x", ndlp->nlp_DID); goto fail; } sbp = (emlxs_buf_t *)pkt->pkt_fca_private; iocbq = &sbp->iocbq; /* Clear the PACKET_ULP_OWNED flag */ sbp->pkt_flags &= ~PACKET_ULP_OWNED; /* Get the iotag by registering the packet */ iotag = emlxs_register_pkt(cp, sbp); if (!iotag) { /* * No more command slots available, retry later */ emlxs_pkt_free(pkt); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "create_xri failed: Unable to allocate IOTAG. did=0x%x", ndlp->nlp_DID); goto fail; } icmd = &iocbq->iocb; icmd->ULPIOTAG = iotag; icmd->ULPCONTEXT = ndlp->nlp_Rpi; icmd->ULPLE = 1; icmd->ULPCOMMAND = CMD_CREATE_XRI_CR; icmd->ULPOWNER = OWN_CHIP; /* Initalize iocbq */ iocbq->port = (void *)port; iocbq->node = (void *)ndlp; iocbq->channel = (void *)cp; mutex_enter(&sbp->mtx); sbp->node = (void *)ndlp; sbp->channel = cp; mutex_exit(&sbp->mtx); EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_ip_detail_msg, "create_xri sent: DID=0x%x Xri=0x%x iotag=0x%x", ndlp->nlp_DID, ndlp->nlp_Xri, iotag); EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq); return (0); fail: /* Clear the XRI flag */ mutex_enter(&EMLXS_TX_CHANNEL_LOCK); ndlp->nlp_flag[cp->channelno] &= ~NLP_RPI_XRI; mutex_exit(&EMLXS_TX_CHANNEL_LOCK); return (1); } /* emlxs_create_xri() */