void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) { if (mb) { mb->sm_use_count-- ; DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ; /* * If the use_count is != zero the MBuf is queued * more than once and must not queued into the * free MBuf queue */ if (!mb->sm_use_count) { DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ; #ifndef COMMON_MB_POOL mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; smc->os.hwm.mbuf_pool.mb_free = mb ; #else mb->sm_next = mb_free ; mb_free = mb ; #endif } } else SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; }
void mac_drv_repair_descr(struct s_smc *smc) { u_long phys ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ; return ; } /* * repair tx queues: don't start */ phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ; outpd(ADDR(B5_XA_DA),phys) ; if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) { outpd(ADDR(B0_XA_CSR),CSR_START) ; } phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ; outpd(ADDR(B5_XS_DA),phys) ; if (smc->hw.fp.tx_q[QUEUE_S].tx_used) { outpd(ADDR(B0_XS_CSR),CSR_START) ; } /* * repair rx queues */ phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ; outpd(ADDR(B4_R1_DA),phys) ; outpd(ADDR(B0_R1_CSR),CSR_START) ; }
void mac1_irq(struct s_smc *smc, u_short stu, u_short stl) { int restart_tx = 0 ; again: /* * parity error: note encoding error is not possible in tag mode */ if (stl & (FM_SPCEPDS | /* parity err. syn.q.*/ FM_SPCEPDA0 | /* parity err. a.q.0 */ FM_SPCEPDA1)) { /* parity err. a.q.1 */ SMT_PANIC(smc,SMT_E0134, SMT_E0134_MSG) ; } /* * buffer underrun: can only occur if a tx threshold is specified */ if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/ FM_STBURA0 | /* tx buffer underrun a.q.0 */ FM_STBURA1)) { /* tx buffer underrun a.q.2 */ SMT_PANIC(smc,SMT_E0133, SMT_E0133_MSG) ; } if ( (stu & (FM_SXMTABT | /* transmit abort */ FM_STXABRS | /* syn. tx abort */ FM_STXABRA0)) || /* asyn. tx abort */ (stl & (FM_SQLCKS | /* lock for syn. q. */ FM_SQLCKA0)) ) { /* lock for asyn. q. */ formac_tx_restart(smc) ; /* init tx */ restart_tx = 1 ; stu = inpw(FM_A(FM_ST1U)) ; stl = inpw(FM_A(FM_ST1L)) ; stu &= ~ (FM_STECFRMA0 | FM_STEFRMA0 | FM_STEFRMS) ; if (stu || stl) goto again ; } if (stu & (FM_STEFRMA0 | /* end of asyn tx */ FM_STEFRMS)) { /* end of sync tx */ restart_tx = 1 ; } if (restart_tx) llc_restart_tx(smc) ; }
void mac_drv_clear_rx_queue(struct s_smc *smc) { struct s_smt_fp_rxd volatile *r ; struct s_smt_fp_rxd volatile *next_rxd ; struct s_smt_rx_queue *queue ; int frag_count ; int i ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ; return ; } queue = smc->hw.fp.rx[QUEUE_R1] ; DB_RX("clear_rx_queue",0,0,5) ; /* * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers */ r = queue->rx_curr_get ; while (queue->rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; frag_count = 1 ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (r != queue->rx_curr_put && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; } DB_RX("STF bit found",0,0,5) ; next_rxd = r ; for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ", (void *)queue->rx_curr_get,frag_count,5) ; mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; queue->rx_curr_get = next_rxd ; queue->rx_used -= frag_count ; queue->rx_free += frag_count ; } }
int mac_drv_init(struct s_smc *smc) { if (sizeof(struct s_smt_fp_rxd) % 16) { SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ; } if (sizeof(struct s_smt_fp_txd) % 16) { SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ; } /* * get the required memory for the RxDs and TxDs */ if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) mac_drv_get_desc_mem(smc,(u_int) (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { return(1) ; /* no space the hwm modul can't work */ } /* * get the memory for the SMT MBufs */ #ifndef MB_OUTSIDE_SMC smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ; #else #ifndef COMMON_MB_POOL if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, MAX_MBUF*sizeof(SMbuf)))) { return(1) ; /* no space the hwm modul can't work */ } #else if (!mb_start) { if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, MAX_MBUF*sizeof(SMbuf)))) { return(1) ; /* no space the hwm modul can't work */ } } #endif #endif return (0) ; }
int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, int frame_status) { NDD_TRACE("THiB",fc,frag_count,frame_len) ; smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; smc->os.hwm.tx_len = frame_len ; DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ; if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { frame_status |= LAN_TX ; } else { switch (fc) { case FC_SMT_INFO : case FC_SMT_NSA : frame_status |= LAN_TX ; break ; case FC_SMT_LOC : frame_status |= LOC_TX ; break ; case FC_SMT_LAN_LOC : frame_status |= LAN_TX | LOC_TX ; break ; default : SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ; } } if (!smc->hw.mac_ring_is_up) { frame_status &= ~LAN_TX ; frame_status |= RING_DOWN ; DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; } if (frag_count > smc->os.hwm.tx_p->tx_free) { #ifndef NDIS_OS2 mac_drv_clear_txd(smc) ; if (frag_count > smc->os.hwm.tx_p->tx_free) { DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; } #else DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; #endif } DB_TX("frame_status = %x",frame_status,0,3) ; NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; return(frame_status) ; }
static void prop_actions(struct s_smc *smc) { int port_in = 0 ; int port_out = 0 ; RS_SET(smc,RS_EVENT) ; switch (smc->s.sas) { case SMT_SAS : port_in = port_out = pcm_get_s_port(smc) ; break ; case SMT_DAS : port_in = cfm_get_mac_input(smc) ; port_out = cfm_get_mac_output(smc) ; break ; case SMT_NAC : SMT_PANIC(smc,SMT_E0108, SMT_E0108_MSG) ; return ; } DB_ECM("ECM : prop_actions - trace_prop %d\n", smc->e.trace_prop,0) ; DB_ECM("ECM : prop_actions - in %d out %d\n", port_in,port_out) ; if (smc->e.trace_prop & ENTITY_BIT(ENTITY_MAC)) { DB_ECM("ECM : initiate TRACE on PHY %c\n",'A'+port_in-PA,0) ; queue_event(smc,EVENT_PCM+port_in,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PA))) && port_out != PA) { DB_ECM("ECM : propagate TRACE on PHY B\n",0,0) ; queue_event(smc,EVENT_PCMB,PC_TRACE) ; } else if ((smc->e.trace_prop & ENTITY_BIT(ENTITY_PHY(PB))) && port_out != PB) { DB_ECM("ECM : propagate TRACE on PHY A\n",0,0) ; queue_event(smc,EVENT_PCMA,PC_TRACE) ; } else { DB_ECM("ECM : TRACE terminated\n",0,0) ; smc->e.path_test = PT_PENDING ; } smc->e.trace_prop = 0 ; }
void process_receive(struct s_smc *smc) { int i ; int n ; int frag_count ; /* number of RxDs of the curr rx buf */ int used_frags ; /* number of RxDs of the curr frame */ struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */ struct s_smt_fp_rxd volatile *r ; /* rxd pointer */ struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */ u_long rbctrl ; /* receive buffer control word */ u_long rfsw ; /* receive frame status word */ u_short rx_used ; u_char far *virt ; char far *data ; SMbuf *mb ; u_char fc ; /* Frame control */ int len ; /* Frame length */ smc->os.hwm.detec_count = 0 ; queue = smc->hw.fp.rx[QUEUE_R1] ; NDD_TRACE("RHxB",0,0,0) ; for ( ; ; ) { r = queue->rx_curr_get ; rx_used = queue->rx_used ; frag_count = 0 ; #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr) { goto rx_end ; } #endif #ifdef NDIS_OS2 if (offDepth) { smc->os.hwm.rx_break = 1 ; goto rx_end ; } smc->os.hwm.rx_break = 0 ; #endif #ifdef ODI2 if (smc->os.hwm.rx_break) { goto rx_end ; } #endif n = 0 ; do { DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); if (rbctrl & BMU_OWN) { NDD_TRACE("RHxE",r,rfsw,rbctrl) ; DB_RX("End of RxDs",0,0,4) ; goto rx_end ; } /* * out of RxD detection */ if (!rx_used) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ; /* Either we don't have an RxD or all * RxDs are filled. Therefore it's allowed * for to set the STOPPED flag */ smc->hw.hw_state = STOPPED ; mac_drv_clear_rx_queue(smc) ; smc->hw.hw_state = STARTED ; mac_drv_fill_rxd(smc) ; smc->os.hwm.detec_count = 0 ; goto rx_end ; } rfsw = le32_to_cpu(r->rxd_rfsw) ; if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { /* * The BMU_STF bit is deleted, 1 frame is * placed into more than 1 rx buffer * * skip frame by setting the rx len to 0 * * if fragment count == 0 * The missing STF bit belongs to the * current frame, search for the * EOF bit to complete the frame * else * the fragment belongs to the next frame, * exit the loop and process the frame */ SK_BREAK() ; rfsw = 0 ; if (frag_count) { break ; } } n += rbctrl & 0xffff ; r = r->rxd_next ; frag_count++ ; rx_used-- ; } while (!(rbctrl & BMU_EOF)) ; used_frags = frag_count ; DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ; /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ /* BMU_ST_BUF will not be changed by the ASIC */ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; rx_used-- ; } DB_RX("STF bit found",0,0,5) ; /* * The received frame is finished for the process receive */ rxd = queue->rx_curr_get ; queue->rx_curr_get = r ; queue->rx_free += frag_count ; queue->rx_used = rx_used ; /* * ASIC Errata no. 7 (STF - Bit Bug) */ rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } smc->hw.fp.err_stats.err_valid++ ; smc->mib.m[MAC0].fddiMACCopied_Ct++ ; /* the length of the data including the FC */ len = (rfsw & RD_LENGTH) - 4 ; DB_RX("frame length = %d",len,0,4) ; /* * check the frame_length and all error flags */ if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ if (rfsw & RD_S_MSRABT) { DB_RX("Frame aborted by the FORMAC",0,0,2) ; smc->hw.fp.err_stats.err_abort++ ; } /* * check frame status */ if (rfsw & RD_S_SEAC2) { DB_RX("E-Indicator set",0,0,2) ; smc->hw.fp.err_stats.err_e_indicator++ ; } if (rfsw & RD_S_SFRMERR) { DB_RX("CRC error",0,0,2) ; smc->hw.fp.err_stats.err_crc++ ; } if (rfsw & RX_FS_IMPL) { DB_RX("Implementer frame",0,0,2) ; smc->hw.fp.err_stats.err_imp_frame++ ; } goto abort_frame ; } if (len > FDDI_RAW_MTU-4) { DB_RX("Frame too long error",0,0,2) ; smc->hw.fp.err_stats.err_too_long++ ; goto abort_frame ; } /* * SUPERNET 3 Bug: FORMAC delivers status words * of aborded frames to the BMU */ if (len <= 4) { DB_RX("Frame length = 0",0,0,2) ; goto abort_frame ; } if (len != (n-4)) { DB_RX("BMU: rx len differs: [%d:%d]",len,n,4); smc->os.hwm.rx_len_error++ ; goto abort_frame ; } /* * Check SA == MA */ virt = (u_char far *) rxd->rxd_virt ; DB_RX("FC = %x",*virt,0,2) ; if (virt[12] == MA[5] && virt[11] == MA[4] && virt[10] == MA[3] && virt[9] == MA[2] && virt[8] == MA[1] && (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) { goto abort_frame ; } /* * test if LLC frame */ if (rfsw & RX_FS_LLC) { /* * if pass_llc_promisc is disable * if DA != Multicast or Broadcast or DA!=MA * abort the frame */ if (!smc->os.hwm.pass_llc_promisc) { if(!(virt[1] & GROUP_ADDR_BIT)) { if (virt[6] != MA[5] || virt[5] != MA[4] || virt[4] != MA[3] || virt[3] != MA[2] || virt[2] != MA[1] || virt[1] != MA[0]) { DB_RX("DA != MA and not multi- or broadcast",0,0,2) ; goto abort_frame ; } } } /* * LLC frame received */ DB_RX("LLC - receive",0,0,4) ; mac_drv_rx_complete(smc,rxd,frag_count,len) ; } else { if (!(mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; DB_RX("No SMbuf; receive terminated",0,0,4) ; goto abort_frame ; } data = smtod(mb,char *) - 1 ; /* * copy the frame into a SMT_MBuf */ #ifdef USE_OS_CPY hwm_cpy_rxd2mb(rxd,data,len) ; #else for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; memcpy(data,r->rxd_virt,n) ; data += n ; } data = smtod(mb,char *) - 1 ; #endif fc = *(char *)mb->sm_data = *data ; mb->sm_len = len - 1 ; /* len - fc */ data++ ; /* * SMT frame received */ switch(fc) { case FC_SMT_INFO : smc->hw.fp.err_stats.err_smt_frame++ ; DB_RX("SMT frame received ",0,0,5) ; if (smc->os.hwm.pass_SMT) { DB_RX("pass SMT frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_received_pack(smc,mb,(int)(rfsw>>25)) ; break ; case FC_SMT_NSA : smc->hw.fp.err_stats.err_smt_frame++ ; DB_RX("SMT frame received ",0,0,5) ; /* if pass_NSA set pass the NSA frame or */ /* pass_SMT set and the A-Indicator */ /* is not set, pass the NSA frame */ if (smc->os.hwm.pass_NSA || (smc->os.hwm.pass_SMT && !(rfsw & A_INDIC))) { DB_RX("pass SMT frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_received_pack(smc,mb,(int)(rfsw>>25)) ; break ; case FC_BEACON : if (smc->os.hwm.pass_DB) { DB_RX("pass DB frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_free_mbuf(smc,mb) ; break ; default : /* * unknown FC abord the frame */ DB_RX("unknown FC error",0,0,2) ; smt_free_mbuf(smc,mb) ; DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count) ; if ((fc & 0xf0) == FC_MAC) smc->hw.fp.err_stats.err_mac_frame++ ; else smc->hw.fp.err_stats.err_imp_frame++ ; break ; } } DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; continue ; /*--------------------------------------------------------------------*/ abort_frame: DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count) ; DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; } rx_end: #ifdef ALL_RX_COMPLETE mac_drv_all_receives_complete(smc) ; #endif return ; /* lint bug: needs return detect end of function */ }
void fddi_isr(struct s_smc *smc) { u_long is ; /* ISR source */ u_short stu, stl ; SMbuf *mb ; #ifdef USE_BREAK_ISR int force_irq ; #endif #ifdef ODI2 if (smc->os.hwm.rx_break) { mac_drv_fill_rxd(smc) ; if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) { smc->os.hwm.rx_break = 0 ; process_receive(smc) ; } else { smc->os.hwm.detec_count = 0 ; smt_force_irq(smc) ; } } #endif smc->os.hwm.isr_flag = TRUE ; #ifdef USE_BREAK_ISR force_irq = TRUE ; if (smc->os.hwm.leave_isr) { smc->os.hwm.leave_isr = FALSE ; process_receive(smc) ; } #endif while ((is = GET_ISR() & ISR_MASK)) { NDD_TRACE("CH0B",is,0,0) ; DB_GEN("ISA = 0x%x",is,0,7) ; if (is & IMASK_SLOW) { NDD_TRACE("CH1b",is,0,0) ; if (is & IS_PLINT1) { /* PLC1 */ plc1_irq(smc) ; } if (is & IS_PLINT2) { /* PLC2 */ plc2_irq(smc) ; } if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ stu = inpw(FM_A(FM_ST1U)) ; stl = inpw(FM_A(FM_ST1L)) ; DB_GEN("Slow transmit complete",0,0,6) ; mac1_irq(smc,stu,stl) ; } if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ stu= inpw(FM_A(FM_ST2U)) ; stl= inpw(FM_A(FM_ST2L)) ; DB_GEN("Slow receive complete",0,0,6) ; DB_GEN("stl = %x : stu = %x",stl,stu,7) ; mac2_irq(smc,stu,stl) ; } if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ stu= inpw(FM_A(FM_ST3U)) ; stl= inpw(FM_A(FM_ST3L)) ; DB_GEN("FORMAC Mode Register 3",0,0,6) ; mac3_irq(smc,stu,stl) ; } if (is & IS_TIMINT) { /* Timer 82C54-2 */ timer_irq(smc) ; #ifdef NDIS_OS2 force_irq_pending = 0 ; #endif /* * out of RxD detection */ if (++smc->os.hwm.detec_count > 4) { /* * check out of RxD condition */ process_receive(smc) ; } } if (is & IS_TOKEN) { /* Restricted Token Monitor */ rtm_irq(smc) ; } if (is & IS_R1_P) { /* Parity error rx queue 1 */ /* clear IRQ */ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ; SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } if (is & IS_R1_C) { /* Encoding error rx queue 1 */ /* clear IRQ */ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ; } if (is & IS_XA_C) { /* Encoding error async tx q */ /* clear IRQ */ outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ; } if (is & IS_XS_C) { /* Encoding error sync tx q */ /* clear IRQ */ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ; } } /* * Fast Tx complete Async/Sync Queue (BMU service) */ if (is & (IS_XS_F|IS_XA_F)) { DB_GEN("Fast tx complete queue",0,0,6) ; /* * clear IRQ, Note: no IRQ is lost, because * we always service both queues */ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ; outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ; mac_drv_clear_txd(smc) ; llc_restart_tx(smc) ; } /* * Fast Rx Complete (BMU service) */ if (is & IS_R1_F) { DB_GEN("Fast receive complete",0,0,6) ; /* clear IRQ */ #ifndef USE_BREAK_ISR outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; process_receive(smc) ; #else process_receive(smc) ; if (smc->os.hwm.leave_isr) { force_irq = FALSE ; } else { outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; process_receive(smc) ; } #endif } #ifndef NDIS_OS2 while ((mb = get_llc_rx(smc))) { smt_to_llc(smc,mb) ; } #else if (offDepth) post_proc() ; while (!offDepth && (mb = get_llc_rx(smc))) { smt_to_llc(smc,mb) ; } if (!offDepth && smc->os.hwm.rx_break) { process_receive(smc) ; } #endif if (smc->q.ev_get != smc->q.ev_put) { NDD_TRACE("CH2a",0,0,0) ; ev_dispatcher(smc) ; } #ifdef NDIS_OS2 post_proc() ; if (offDepth) { /* leave fddi_isr because */ break ; /* indications not allowed */ } #endif #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr) { break ; /* leave fddi_isr */ } #endif /* NOTE: when the isr is left, no rx is pending */ } /* end of interrupt source polling loop */ #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr && force_irq) { smt_force_irq(smc) ; } #endif smc->os.hwm.isr_flag = FALSE ; NDD_TRACE("CH0E",0,0,0) ; }
void smt_init_evc(struct s_smc *smc) { struct s_srf_evc *evc ; const struct evc_init *init ; int i ; int index ; int offset ; static u_char fail_safe = FALSE ; memset((char *)smc->evcs,0,sizeof(smc->evcs)) ; evc = smc->evcs ; init = evc_inits ; for (i = 0 ; (unsigned) i < MAX_INIT_EVC ; i++) { for (index = 0 ; index < init->n ; index++) { evc->evc_code = init->code ; evc->evc_para = init->para ; evc->evc_index = init->index + index ; #ifndef DEBUG evc->evc_multiple = &fail_safe ; evc->evc_cond_state = &fail_safe ; #endif evc++ ; } init++ ; } if ((unsigned) (evc - smc->evcs) > MAX_EVCS) { SMT_PANIC(smc,SMT_E0127, SMT_E0127_MSG) ; } /* * conditions */ smc->evcs[0].evc_cond_state = &smc->mib.fddiSMTPeerWrapFlag ; smc->evcs[1].evc_cond_state = &smc->mib.m[MAC0].fddiMACDuplicateAddressCond ; smc->evcs[2].evc_cond_state = &smc->mib.m[MAC0].fddiMACFrameErrorFlag ; smc->evcs[3].evc_cond_state = &smc->mib.m[MAC0].fddiMACNotCopiedFlag ; /* * events */ smc->evcs[4].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_N ; smc->evcs[5].evc_multiple = &smc->mib.m[MAC0].fddiMACMultiple_P ; offset = 6 ; for (i = 0 ; i < NUMPHYS ; i++) { /* * conditions */ smc->evcs[offset + 0*NUMPHYS].evc_cond_state = &smc->mib.p[i].fddiPORTLerFlag ; smc->evcs[offset + 1*NUMPHYS].evc_cond_state = &smc->mib.p[i].fddiPORTEB_Condition ; /* * events */ smc->evcs[offset + 2*NUMPHYS].evc_multiple = &smc->mib.p[i].fddiPORTMultiple_U ; smc->evcs[offset + 3*NUMPHYS].evc_multiple = &smc->mib.p[i].fddiPORTMultiple_P ; offset++ ; } #ifdef DEBUG for (i = 0, evc = smc->evcs ; (unsigned) i < MAX_EVCS ; i++, evc++) { if (SMT_IS_CONDITION(evc->evc_code)) { if (!evc->evc_cond_state) { SMT_PANIC(smc,SMT_E0128, SMT_E0128_MSG) ; } evc->evc_multiple = &fail_safe ; } else { if (!evc->evc_multiple) { SMT_PANIC(smc,SMT_E0129, SMT_E0129_MSG) ; } evc->evc_cond_state = &fail_safe ; } } #endif smc->srf.TSR = smt_get_time() ; smc->srf.sr_state = SR0_WAIT ; }
static void ecm_fsm(struct s_smc *smc, int cmd) { int ls_a ; int ls_b ; int p ; smc->mib.fddiSMTBypassPresent = sm_pm_bypass_present(smc) ; if (cmd == EC_CONNECT) smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ; if (cmd == EC_DISCONNECT && smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long) FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc), smt_get_error_word(smc) ); if (cmd == EC_CONNECT) { smc->e.DisconnectFlag = FALSE ; } else if (cmd == EC_DISCONNECT) { smc->e.DisconnectFlag = TRUE ; } switch(smc->mib.fddiSMTECMState) { case ACTIONS(EC0_OUT) : smc->e.path_test = PT_PASSED ; smc->e.ecm_line_state = FALSE ; stop_ecm_timer(smc) ; ACTIONS_DONE() ; break ; case EC0_OUT: if (cmd == EC_CONNECT && !smc->mib.fddiSMTBypassPresent && smc->e.path_test==PT_PASSED) { GO_STATE(EC1_IN) ; break ; } else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) && smc->mib.fddiSMTBypassPresent && (smc->s.sas == SMT_DAS)) { GO_STATE(EC5_INSERT) ; break ; } break; case ACTIONS(EC1_IN) : stop_ecm_timer(smc) ; smc->e.trace_prop = 0 ; sm_ma_control(smc,MA_TREQ) ; for (p = 0 ; p < NUMPHYS ; p++) if (smc->mib.p[p].fddiPORTHardwarePresent) queue_event(smc,EVENT_PCMA+p,PC_START) ; ACTIONS_DONE() ; break ; case EC1_IN: if (cmd == EC_TRACE_PROP) { prop_actions(smc) ; GO_STATE(EC2_TRACE) ; break ; } else if (cmd == EC_DISCONNECT) { GO_STATE(EC3_LEAVE) ; break ; } break; case ACTIONS(EC2_TRACE) : start_ecm_timer(smc,MIB2US(smc->mib.fddiSMTTrace_MaxExpiration), EC_TIMEOUT_TMAX) ; ACTIONS_DONE() ; break ; case EC2_TRACE : if (cmd == EC_TRACE_PROP) { prop_actions(smc) ; GO_STATE(EC2_TRACE) ; break ; } else if (cmd == EC_DISCONNECT) { smc->e.path_test = PT_EXITING ; GO_STATE(EC3_LEAVE) ; break ; } else if (smc->e.path_test == PT_PENDING) { GO_STATE(EC3_LEAVE) ; break ; } else if (cmd == EC_TIMEOUT_TMAX) { AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_TRACE_MAX, smt_get_error_word(smc)); smc->e.path_test = PT_PENDING ; GO_STATE(EC3_LEAVE) ; break ; } break ; case ACTIONS(EC3_LEAVE) : start_ecm_timer(smc,smc->s.ecm_td_min,EC_TIMEOUT_TD) ; for (p = 0 ; p < NUMPHYS ; p++) queue_event(smc,EVENT_PCMA+p,PC_STOP) ; ACTIONS_DONE() ; break ; case EC3_LEAVE: if (cmd == EC_TIMEOUT_TD && !smc->mib.fddiSMTBypassPresent && (smc->e.path_test != PT_PENDING)) { GO_STATE(EC0_OUT) ; break ; } else if (cmd == EC_TIMEOUT_TD && (smc->e.path_test == PT_PENDING)) { GO_STATE(EC4_PATH_TEST) ; break ; } else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) { GO_STATE(EC1_IN) ; break ; } else if (cmd == EC_DISCONNECT && smc->e.path_test == PT_PENDING) { smc->e.path_test = PT_EXITING ; } else if (cmd == EC_TIMEOUT_TD && smc->mib.fddiSMTBypassPresent && smc->e.path_test != PT_PENDING) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC4_PATH_TEST) : stop_ecm_timer(smc) ; smc->e.path_test = PT_TESTING ; start_ecm_timer(smc,smc->s.ecm_test_done,EC_TEST_DONE) ; ACTIONS_DONE() ; break ; case EC4_PATH_TEST : if (cmd == EC_TEST_DONE) smc->e.path_test = PT_PASSED ; if (smc->e.path_test == PT_FAILED) RS_SET(smc,RS_PATHTEST) ; if (smc->e.path_test == PT_FAILED && !smc->mib.fddiSMTBypassPresent) { GO_STATE(EC0_OUT) ; break ; } else if (cmd == EC_DISCONNECT && !smc->mib.fddiSMTBypassPresent) { GO_STATE(EC0_OUT) ; break ; } else if (smc->e.path_test == PT_PASSED) { GO_STATE(EC1_IN) ; break ; } else if (smc->e.path_test == PT_FAILED && smc->mib.fddiSMTBypassPresent) { GO_STATE(EC7_DEINSERT) ; break ; } else if (cmd == EC_DISCONNECT && smc->mib.fddiSMTBypassPresent) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC5_INSERT) : sm_pm_bypass_req(smc,BP_INSERT); start_ecm_timer(smc,smc->s.ecm_in_max,EC_TIMEOUT_INMAX) ; ACTIONS_DONE() ; break ; case EC5_INSERT : if (cmd == EC_TIMEOUT_INMAX) { GO_STATE(EC6_CHECK) ; break ; } else if (cmd == EC_DISCONNECT) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC6_CHECK) : start_ecm_timer(smc,smc->s.ecm_check_poll,0) ; smc->e.ecm_line_state = TRUE ; (void) sm_pm_ls_latch(smc,PA,1) ; (void) sm_pm_ls_latch(smc,PB,1) ; ACTIONS_DONE() ; break ; case EC6_CHECK : ls_a = sm_pm_get_ls(smc,PA) ; ls_b = sm_pm_get_ls(smc,PB) ; if (((ls_a == PC_QLS) || (ls_a == PC_HLS)) && ((ls_b == PC_QLS) || (ls_b == PC_HLS)) ) { smc->e.sb_flag = FALSE ; smc->e.ecm_line_state = FALSE ; GO_STATE(EC1_IN) ; break ; } else if (!smc->e.sb_flag && (((ls_a == PC_ILS) && (ls_b == PC_QLS)) || ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){ smc->e.sb_flag = TRUE ; DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK, smt_get_error_word(smc)); } else if (cmd == EC_DISCONNECT) { smc->e.ecm_line_state = FALSE ; GO_STATE(EC7_DEINSERT) ; break ; } else { start_ecm_timer(smc,smc->s.ecm_check_poll,0) ; } break ; case ACTIONS(EC7_DEINSERT) : sm_pm_bypass_req(smc,BP_DEINSERT); start_ecm_timer(smc,smc->s.ecm_i_max,EC_TIMEOUT_IMAX) ; ACTIONS_DONE() ; break ; case EC7_DEINSERT: if (cmd == EC_TIMEOUT_IMAX) { GO_STATE(EC0_OUT) ; break ; } else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) { GO_STATE(EC5_INSERT) ; break ; } break; default: SMT_PANIC(smc,SMT_E0107, SMT_E0107_MSG) ; break; } }
/* process ECM event */ static void ecm_fsm(struct s_smc *smc, int cmd) { int ls_a ; /* current line state PHY A */ int ls_b ; /* current line state PHY B */ int p ; /* ports */ smc->mib.fddiSMTBypassPresent = sm_pm_bypass_present(smc) ; if (cmd == EC_CONNECT) smc->mib.fddiSMTRemoteDisconnectFlag = FALSE ; /* For AIX event notification: */ /* Is a disconnect command remotely issued ? */ if (cmd == EC_DISCONNECT && smc->mib.fddiSMTRemoteDisconnectFlag == TRUE) AIX_EVENT (smc, (u_long) CIO_HARD_FAIL, (u_long) FDDI_REMOTE_DISCONNECT, smt_get_event_word(smc), smt_get_error_word(smc) ); /*jd 05-Aug-1999 Bug #10419 "Port Disconnect fails at Dup MAc Cond."*/ if (cmd == EC_CONNECT) { smc->e.DisconnectFlag = FALSE ; } else if (cmd == EC_DISCONNECT) { smc->e.DisconnectFlag = TRUE ; } switch(smc->mib.fddiSMTECMState) { case ACTIONS(EC0_OUT) : /* * We do not perform a path test */ smc->e.path_test = PT_PASSED ; smc->e.ecm_line_state = FALSE ; stop_ecm_timer(smc) ; ACTIONS_DONE() ; break ; case EC0_OUT: /*EC01*/ if (cmd == EC_CONNECT && !smc->mib.fddiSMTBypassPresent && smc->e.path_test==PT_PASSED) { GO_STATE(EC1_IN) ; break ; } /*EC05*/ else if (cmd == EC_CONNECT && (smc->e.path_test==PT_PASSED) && smc->mib.fddiSMTBypassPresent && (smc->s.sas == SMT_DAS)) { GO_STATE(EC5_INSERT) ; break ; } break; case ACTIONS(EC1_IN) : stop_ecm_timer(smc) ; smc->e.trace_prop = 0 ; sm_ma_control(smc,MA_TREQ) ; for (p = 0 ; p < NUMPHYS ; p++) if (smc->mib.p[p].fddiPORTHardwarePresent) queue_event(smc,EVENT_PCMA+p,PC_START) ; ACTIONS_DONE() ; break ; case EC1_IN: /*EC12*/ if (cmd == EC_TRACE_PROP) { prop_actions(smc) ; GO_STATE(EC2_TRACE) ; break ; } /*EC13*/ else if (cmd == EC_DISCONNECT) { GO_STATE(EC3_LEAVE) ; break ; } break; case ACTIONS(EC2_TRACE) : start_ecm_timer(smc,MIB2US(smc->mib.fddiSMTTrace_MaxExpiration), EC_TIMEOUT_TMAX) ; ACTIONS_DONE() ; break ; case EC2_TRACE : /*EC22*/ if (cmd == EC_TRACE_PROP) { prop_actions(smc) ; GO_STATE(EC2_TRACE) ; break ; } /*EC23a*/ else if (cmd == EC_DISCONNECT) { smc->e.path_test = PT_EXITING ; GO_STATE(EC3_LEAVE) ; break ; } /*EC23b*/ else if (smc->e.path_test == PT_PENDING) { GO_STATE(EC3_LEAVE) ; break ; } /*EC23c*/ else if (cmd == EC_TIMEOUT_TMAX) { /* Trace_Max is expired */ /* -> send AIX_EVENT */ AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_TRACE_MAX, smt_get_error_word(smc)); smc->e.path_test = PT_PENDING ; GO_STATE(EC3_LEAVE) ; break ; } break ; case ACTIONS(EC3_LEAVE) : start_ecm_timer(smc,smc->s.ecm_td_min,EC_TIMEOUT_TD) ; for (p = 0 ; p < NUMPHYS ; p++) queue_event(smc,EVENT_PCMA+p,PC_STOP) ; ACTIONS_DONE() ; break ; case EC3_LEAVE: /*EC30*/ if (cmd == EC_TIMEOUT_TD && !smc->mib.fddiSMTBypassPresent && (smc->e.path_test != PT_PENDING)) { GO_STATE(EC0_OUT) ; break ; } /*EC34*/ else if (cmd == EC_TIMEOUT_TD && (smc->e.path_test == PT_PENDING)) { GO_STATE(EC4_PATH_TEST) ; break ; } /*EC31*/ else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) { GO_STATE(EC1_IN) ; break ; } /*EC33*/ else if (cmd == EC_DISCONNECT && smc->e.path_test == PT_PENDING) { smc->e.path_test = PT_EXITING ; /* * stay in state - state will be left via timeout */ } /*EC37*/ else if (cmd == EC_TIMEOUT_TD && smc->mib.fddiSMTBypassPresent && smc->e.path_test != PT_PENDING) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC4_PATH_TEST) : stop_ecm_timer(smc) ; smc->e.path_test = PT_TESTING ; start_ecm_timer(smc,smc->s.ecm_test_done,EC_TEST_DONE) ; /* now perform path test ... just a simulation */ ACTIONS_DONE() ; break ; case EC4_PATH_TEST : /* path test done delay */ if (cmd == EC_TEST_DONE) smc->e.path_test = PT_PASSED ; if (smc->e.path_test == PT_FAILED) RS_SET(smc,RS_PATHTEST) ; /*EC40a*/ if (smc->e.path_test == PT_FAILED && !smc->mib.fddiSMTBypassPresent) { GO_STATE(EC0_OUT) ; break ; } /*EC40b*/ else if (cmd == EC_DISCONNECT && !smc->mib.fddiSMTBypassPresent) { GO_STATE(EC0_OUT) ; break ; } /*EC41*/ else if (smc->e.path_test == PT_PASSED) { GO_STATE(EC1_IN) ; break ; } /*EC47a*/ else if (smc->e.path_test == PT_FAILED && smc->mib.fddiSMTBypassPresent) { GO_STATE(EC7_DEINSERT) ; break ; } /*EC47b*/ else if (cmd == EC_DISCONNECT && smc->mib.fddiSMTBypassPresent) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC5_INSERT) : sm_pm_bypass_req(smc,BP_INSERT); start_ecm_timer(smc,smc->s.ecm_in_max,EC_TIMEOUT_INMAX) ; ACTIONS_DONE() ; break ; case EC5_INSERT : /*EC56*/ if (cmd == EC_TIMEOUT_INMAX) { GO_STATE(EC6_CHECK) ; break ; } /*EC57*/ else if (cmd == EC_DISCONNECT) { GO_STATE(EC7_DEINSERT) ; break ; } break ; case ACTIONS(EC6_CHECK) : /* * in EC6_CHECK, we *POLL* the line state ! * check whether both bypass switches have switched. */ start_ecm_timer(smc,smc->s.ecm_check_poll,0) ; smc->e.ecm_line_state = TRUE ; /* flag to pcm: report Q/HLS */ (void) sm_pm_ls_latch(smc,PA,1) ; /* enable line state latch */ (void) sm_pm_ls_latch(smc,PB,1) ; /* enable line state latch */ ACTIONS_DONE() ; break ; case EC6_CHECK : ls_a = sm_pm_get_ls(smc,PA) ; ls_b = sm_pm_get_ls(smc,PB) ; /*EC61*/ if (((ls_a == PC_QLS) || (ls_a == PC_HLS)) && ((ls_b == PC_QLS) || (ls_b == PC_HLS)) ) { smc->e.sb_flag = FALSE ; smc->e.ecm_line_state = FALSE ; GO_STATE(EC1_IN) ; break ; } /*EC66*/ else if (!smc->e.sb_flag && (((ls_a == PC_ILS) && (ls_b == PC_QLS)) || ((ls_a == PC_QLS) && (ls_b == PC_ILS)))){ smc->e.sb_flag = TRUE ; DB_ECMN(1,"ECM : EC6_CHECK - stuck bypass\n",0,0) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_SMT_ERROR, (u_long) FDDI_BYPASS_STUCK, smt_get_error_word(smc)); } /*EC67*/ else if (cmd == EC_DISCONNECT) { smc->e.ecm_line_state = FALSE ; GO_STATE(EC7_DEINSERT) ; break ; } else { /* * restart poll */ start_ecm_timer(smc,smc->s.ecm_check_poll,0) ; } break ; case ACTIONS(EC7_DEINSERT) : sm_pm_bypass_req(smc,BP_DEINSERT); start_ecm_timer(smc,smc->s.ecm_i_max,EC_TIMEOUT_IMAX) ; ACTIONS_DONE() ; break ; case EC7_DEINSERT: /*EC70*/ if (cmd == EC_TIMEOUT_IMAX) { GO_STATE(EC0_OUT) ; break ; } /*EC75*/ else if (cmd == EC_CONNECT && smc->e.path_test == PT_PASSED) { GO_STATE(EC5_INSERT) ; break ; } break; default: SMT_PANIC(smc,SMT_E0107, SMT_E0107_MSG) ; break; } }
/* * PCM state machine */ static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd) { int i ; int np = phy->np ; /* PHY index */ struct s_plc *plc ; struct fddi_mib_p *mib ; #ifndef MOT_ELM u_short plc_rev ; /* Revision of the plc */ #endif /* nMOT_ELM */ plc = &phy->plc ; mib = phy->mib ; /* * general transitions independent of state */ switch (cmd) { case PC_STOP : /*PC00-PC80*/ if (mib->fddiPORTPCMState != PC9_MAINT) { GO_STATE(PC0_OFF) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP, smt_get_port_event_word(smc)); } return ; case PC_START : /*PC01-PC81*/ if (mib->fddiPORTPCMState != PC9_MAINT) GO_STATE(PC1_BREAK) ; return ; case PC_DISABLE : /* PC09-PC99 */ GO_STATE(PC9_MAINT) ; AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long) FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED, smt_get_port_event_word(smc)); return ; case PC_TIMEOUT_LCT : /* if long or extended LCT */ stop_pcm_timer0(smc,phy) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; /* end of LCT is indicate by PCM_CODE (initiate PCM event) */ return ; } switch(mib->fddiPORTPCMState) { case ACTIONS(PC0_OFF) : stop_pcm_timer0(smc,phy) ; outpw(PLC(np,PL_CNTRL_A),0) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; sm_ph_lem_stop(smc,np) ; /* disable LEM */ phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; plc_go_state(smc,np,PL_PCM_STOP) ; mib->fddiPORTConnectState = PCM_DISABLED ; ACTIONS_DONE() ; break ; case PC0_OFF: /*PC09*/ if (cmd == PC_MAINT) { GO_STATE(PC9_MAINT) ; break ; } break ; case ACTIONS(PC1_BREAK) : /* Stop the LCT timer if we came from Signal state */ stop_pcm_timer0(smc,phy) ; ACTIONS_DONE() ; plc_go_state(smc,np,0) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; sm_ph_lem_stop(smc,np) ; /* disable LEM */ /* * if vector is already loaded, go to OFF to clear PCM_SIGNAL */ #if 0 if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) { plc_go_state(smc,np,PL_PCM_STOP) ; /* TB_MIN ? */ } #endif /* * Go to OFF state in any case. */ plc_go_state(smc,np,PL_PCM_STOP) ; if (mib->fddiPORTPC_Withhold == PC_WH_NONE) mib->fddiPORTConnectState = PCM_CONNECTING ; phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; phy->ls_flag = FALSE ; phy->pc_mode = PM_NONE ; /* needed by CFM */ phy->bitn = 0 ; /* bit signaling start bit */ for (i = 0 ; i < 3 ; i++) pc_tcode_actions(smc,i,phy) ; /* Set the non-active interrupt mask register */ outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ; /* * If the LCT was stopped. There might be a * PCM_CODE interrupt event present. * This must be cleared. */ (void)inpw(PLC(np,PL_INTR_EVENT)) ; #ifndef MOT_ELM /* Get the plc revision for revision dependent code */ plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ; if (plc_rev != PLC_REV_SN3) #endif /* MOT_ELM */ { /* * No supernet III PLC, so set Xmit verctor and * length BEFORE starting the state machine. */ if (plc_send_bits(smc,phy,3)) { return ; } } /* * Now give the Start command. * - The start command shall be done before setting the bits * to be signaled. (In PLC-S description and PLCS in SN3. * - The start command shall be issued AFTER setting the * XMIT vector and the XMIT length register. * * We do it exactly according this specs for the old PLC and * the new PLCS inside the SN3. * For the usual PLCS we try it the way it is done for the * old PLC and set the XMIT registers again, if the PLC is * not in SIGNAL state. This is done according to an PLCS * errata workaround. */ plc_go_state(smc,np,PL_PCM_START) ; /* * workaround for PLC-S eng. sample errata */ #ifdef MOT_ELM if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL)) #else /* nMOT_ELM */ if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) != PLC_REVISION_A) && !(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL)) #endif /* nMOT_ELM */ { /* * Set register again (PLCS errata) or the first time * (new SN3 PLCS). */ (void) plc_send_bits(smc,phy,3) ; } /* * end of workaround */ GO_STATE(PC5_SIGNAL) ; plc->p_state = PS_BIT3 ; plc->p_bits = 3 ; plc->p_start = 0 ; break ; case PC1_BREAK : break ; case ACTIONS(PC2_TRACE) : plc_go_state(smc,np,PL_PCM_TRACE) ; ACTIONS_DONE() ; break ; case PC2_TRACE : break ; case PC3_CONNECT : /* these states are done by hardware */ case PC4_NEXT : break ; case ACTIONS(PC5_SIGNAL) : ACTIONS_DONE() ; case PC5_SIGNAL : if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT)) break ; switch (plc->p_state) { case PS_BIT3 : for (i = 0 ; i <= 2 ; i++) pc_rcode_actions(smc,i,phy) ; pc_tcode_actions(smc,3,phy) ; plc->p_state = PS_BIT4 ; plc->p_bits = 1 ; plc->p_start = 3 ; phy->bitn = 3 ; if (plc_send_bits(smc,phy,1)) { return ; } break ; case PS_BIT4 : pc_rcode_actions(smc,3,phy) ; for (i = 4 ; i <= 6 ; i++) pc_tcode_actions(smc,i,phy) ; plc->p_state = PS_BIT7 ; plc->p_bits = 3 ; plc->p_start = 4 ; phy->bitn = 4 ; if (plc_send_bits(smc,phy,3)) { return ; } break ; case PS_BIT7 : for (i = 3 ; i <= 6 ; i++) pc_rcode_actions(smc,i,phy) ; plc->p_state = PS_LCT ; plc->p_bits = 0 ; plc->p_start = 7 ; phy->bitn = 7 ; sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */ /* start LCT */ i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ; outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */ outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ; break ; case PS_LCT : /* check for local LCT failure */ pc_tcode_actions(smc,7,phy) ; /* * set tval[7] */ plc->p_state = PS_BIT8 ; plc->p_bits = 1 ; plc->p_start = 7 ; phy->bitn = 7 ; if (plc_send_bits(smc,phy,1)) { return ; } break ; case PS_BIT8 : /* check for remote LCT failure */ pc_rcode_actions(smc,7,phy) ; if (phy->t_val[7] || phy->r_val[7]) { plc_go_state(smc,np,PL_PCM_STOP) ; GO_STATE(PC1_BREAK) ; break ; } for (i = 8 ; i <= 9 ; i++) pc_tcode_actions(smc,i,phy) ; plc->p_state = PS_JOIN ; plc->p_bits = 2 ; plc->p_start = 8 ; phy->bitn = 8 ; if (plc_send_bits(smc,phy,2)) { return ; } break ; case PS_JOIN : for (i = 8 ; i <= 9 ; i++) pc_rcode_actions(smc,i,phy) ; plc->p_state = PS_ACTIVE ; GO_STATE(PC6_JOIN) ; break ; } break ; case ACTIONS(PC6_JOIN) : /* * prevent mux error when going from WRAP_A to WRAP_B */ if (smc->s.sas == SMT_DAS && np == PB && (smc->y[PA].pc_mode == PM_TREE || smc->y[PB].pc_mode == PM_TREE)) { SETMASK(PLC(np,PL_CNTRL_A), PL_SC_REM_LOOP,PL_SC_REM_LOOP) ; SETMASK(PLC(np,PL_CNTRL_B), PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ; } SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ; ACTIONS_DONE() ; cmd = 0 ; /* fall thru */ case PC6_JOIN : switch (plc->p_state) { case PS_ACTIVE: /*PC88b*/ if (!phy->cf_join) { phy->cf_join = TRUE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; } if (cmd == PC_JOIN) GO_STATE(PC8_ACTIVE) ; /*PC82*/ if (cmd == PC_TRACE) { GO_STATE(PC2_TRACE) ; break ; } break ; } break ; case PC7_VERIFY : break ; case ACTIONS(PC8_ACTIVE) : /* * start LEM for SMT */ sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ; phy->tr_flag = FALSE ; mib->fddiPORTConnectState = PCM_ACTIVE ; /* Set the active interrupt mask register */ outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ; ACTIONS_DONE() ; break ; case PC8_ACTIVE : /*PC81 is done by PL_TNE_EXPIRED irq */ /*PC82*/ if (cmd == PC_TRACE) { GO_STATE(PC2_TRACE) ; break ; } /*PC88c: is done by TRACE_PROP irq */ break ; case ACTIONS(PC9_MAINT) : stop_pcm_timer0(smc,phy) ; CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ; CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ; CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */ sm_ph_lem_stop(smc,np) ; /* disable LEM */ phy->cf_loop = FALSE ; phy->cf_join = FALSE ; queue_event(smc,EVENT_CFM,CF_JOIN+np) ; plc_go_state(smc,np,PL_PCM_STOP) ; mib->fddiPORTConnectState = PCM_DISABLED ; SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ; sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ; outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ; ACTIONS_DONE() ; break ; case PC9_MAINT : DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ; /*PC90*/ if (cmd == PC_ENABLE) { GO_STATE(PC0_OFF) ; break ; } break ; default: SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ; break ; } }
/* process RMT event */ static void rmt_fsm(struct s_smc *smc, int cmd) { /* * RM00-RM70 : from all states */ if (!smc->r.rm_join && !smc->r.rm_loop && smc->mib.m[MAC0].fddiMACRMTState != ACTIONS(RM0_ISOLATED) && smc->mib.m[MAC0].fddiMACRMTState != RM0_ISOLATED) { RS_SET(smc,RS_NORINGOP) ; rmt_indication(smc,0) ; GO_STATE(RM0_ISOLATED) ; return ; } switch(smc->mib.m[MAC0].fddiMACRMTState) { case ACTIONS(RM0_ISOLATED) : stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; /* * Disable MAC. */ sm_ma_control(smc,MA_OFFLINE) ; smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; smc->r.loop_avail = FALSE ; smc->r.sm_ma_avail = FALSE ; smc->r.no_flag = TRUE ; DB_RMTN(1,"RMT : ISOLATED\n",0,0) ; ACTIONS_DONE() ; break ; case RM0_ISOLATED : /*RM01*/ if (smc->r.rm_join || smc->r.rm_loop) { /* * According to the standard the MAC must be reset * here. The FORMAC will be initialized and Claim * and Beacon Frames will be uploaded to the MAC. * So any change of Treq will take effect NOW. */ sm_ma_control(smc,MA_RESET) ; GO_STATE(RM1_NON_OP) ; break ; } break ; case ACTIONS(RM1_NON_OP) : start_rmt_timer0(smc,smc->s.rmt_t_non_op,RM_TIMEOUT_NON_OP) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; sm_ma_control(smc,MA_BEACON) ; DB_RMTN(1,"RMT : RING DOWN\n",0,0) ; RS_SET(smc,RS_NORINGOP) ; smc->r.sm_ma_avail = FALSE ; rmt_indication(smc,0) ; ACTIONS_DONE() ; break ; case RM1_NON_OP : /*RM12*/ if (cmd == RM_RING_OP) { RS_SET(smc,RS_RINGOPCHANGE) ; GO_STATE(RM2_RING_OP) ; break ; } /*RM13*/ else if (cmd == RM_TIMEOUT_NON_OP) { smc->r.bn_flag = FALSE ; smc->r.no_flag = TRUE ; GO_STATE(RM3_DETECT) ; break ; } break ; case ACTIONS(RM2_RING_OP) : stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; smc->r.no_flag = FALSE ; if (smc->r.rm_loop) smc->r.loop_avail = TRUE ; if (smc->r.rm_join) { smc->r.sm_ma_avail = TRUE ; if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable) smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ; else smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; } DB_RMTN(1,"RMT : RING UP\n",0,0) ; RS_CLEAR(smc,RS_NORINGOP) ; RS_SET(smc,RS_RINGOPCHANGE) ; rmt_indication(smc,1) ; smt_stat_counter(smc,0) ; ACTIONS_DONE() ; break ; case RM2_RING_OP : /*RM21*/ if (cmd == RM_RING_NON_OP) { smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; smc->r.loop_avail = FALSE ; RS_SET(smc,RS_RINGOPCHANGE) ; GO_STATE(RM1_NON_OP) ; break ; } /*RM22a*/ else if (cmd == RM_ENABLE_FLAG) { if (smc->mib.m[MAC0].fddiMACMA_UnitdataEnable) smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = TRUE ; else smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; } /*RM25*/ else if (smc->r.dup_addr_test == DA_FAILED) { smc->mib.m[MAC0].fddiMACMA_UnitdataAvailable = FALSE ; smc->r.loop_avail = FALSE ; smc->r.da_flag = TRUE ; GO_STATE(RM5_RING_OP_DUP) ; break ; } break ; case ACTIONS(RM3_DETECT) : start_rmt_timer0(smc,smc->s.mac_d_max*2,RM_TIMEOUT_D_MAX) ; start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; DB_RMTN(1,"RMT : RM3_DETECT\n",0,0) ; ACTIONS_DONE() ; break ; case RM3_DETECT : if (cmd == RM_TIMEOUT_POLL) { start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL); sm_mac_check_beacon_claim(smc) ; break ; } if (cmd == RM_TIMEOUT_D_MAX) { smc->r.timer0_exp = TRUE ; } /* *jd(22-Feb-1999) * We need a time ">= 2*mac_d_max" since we had finished * Claim or Beacon state. So we will restart timer0 at * every state change. */ if (cmd == RM_TX_STATE_CHANGE) { start_rmt_timer0(smc, smc->s.mac_d_max*2, RM_TIMEOUT_D_MAX) ; } /*RM32*/ if (cmd == RM_RING_OP) { GO_STATE(RM2_RING_OP) ; break ; } /*RM33a*/ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) && smc->r.bn_flag) { smc->r.bn_flag = FALSE ; } /*RM33b*/ else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) { int tx ; /* * set bn_flag only if in state T4 or T5: * only if we're the beaconer should we start the * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { DB_RMTN(2,"RMT : DETECT && TRT_EXPIRED && T4/T5\n",0,0); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed * and the link to the upstream neighbor is * lost we need to restart the stuck timer to * check the "stuck beacon" condition. */ start_rmt_timer1(smc,smc->s.rmt_t_stuck, RM_TIMEOUT_T_STUCK) ; } /* * We do NOT need to clear smc->r.bn_flag in case of * not being in state T4 or T5, because the flag * must be cleared in order to get in this condition. */ DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", tx,smc->r.bn_flag) ; } /*RM34a*/ else if (cmd == RM_MY_CLAIM && smc->r.timer0_exp) { rmt_new_dup_actions(smc) ; GO_STATE(RM4_NON_OP_DUP) ; break ; } /*RM34b*/ else if (cmd == RM_MY_BEACON && smc->r.timer0_exp) { rmt_new_dup_actions(smc) ; GO_STATE(RM4_NON_OP_DUP) ; break ; } /*RM34c*/ else if (cmd == RM_VALID_CLAIM) { rmt_new_dup_actions(smc) ; GO_STATE(RM4_NON_OP_DUP) ; break ; } /*RM36*/ else if (cmd == RM_TIMEOUT_T_STUCK && smc->r.rm_join && smc->r.bn_flag) { GO_STATE(RM6_DIRECTED) ; break ; } break ; case ACTIONS(RM4_NON_OP_DUP) : start_rmt_timer0(smc,smc->s.rmt_t_announce,RM_TIMEOUT_ANNOUNCE); start_rmt_timer1(smc,smc->s.rmt_t_stuck,RM_TIMEOUT_T_STUCK) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_mac_check_beacon_claim(smc) ; DB_RMTN(1,"RMT : RM4_NON_OP_DUP\n",0,0) ; ACTIONS_DONE() ; break ; case RM4_NON_OP_DUP : if (cmd == RM_TIMEOUT_POLL) { start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL); sm_mac_check_beacon_claim(smc) ; break ; } /*RM41*/ if (!smc->r.da_flag) { GO_STATE(RM1_NON_OP) ; break ; } /*RM44a*/ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) && smc->r.bn_flag) { smc->r.bn_flag = FALSE ; } /*RM44b*/ else if (cmd == RM_TRT_EXP && !smc->r.bn_flag) { int tx ; /* * set bn_flag only if in state T4 or T5: * only if we're the beaconer should we start the * trace ! */ if ((tx = sm_mac_get_tx_state(smc)) == 4 || tx == 5) { DB_RMTN(2,"RMT : NOPDUP && TRT_EXPIRED && T4/T5\n",0,0); smc->r.bn_flag = TRUE ; /* * If one of the upstream stations beaconed * and the link to the upstream neighbor is * lost we need to restart the stuck timer to * check the "stuck beacon" condition. */ start_rmt_timer1(smc,smc->s.rmt_t_stuck, RM_TIMEOUT_T_STUCK) ; } /* * We do NOT need to clear smc->r.bn_flag in case of * not being in state T4 or T5, because the flag * must be cleared in order to get in this condition. */ DB_RMTN(2, "RMT : sm_mac_get_tx_state() = %d (bn_flag = %d)\n", tx,smc->r.bn_flag) ; } /*RM44c*/ else if (cmd == RM_TIMEOUT_ANNOUNCE && !smc->r.bn_flag) { rmt_dup_actions(smc) ; } /*RM45*/ else if (cmd == RM_RING_OP) { smc->r.no_flag = FALSE ; GO_STATE(RM5_RING_OP_DUP) ; break ; } /*RM46*/ else if (cmd == RM_TIMEOUT_T_STUCK && smc->r.rm_join && smc->r.bn_flag) { GO_STATE(RM6_DIRECTED) ; break ; } break ; case ACTIONS(RM5_RING_OP_DUP) : stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; DB_RMTN(1,"RMT : RM5_RING_OP_DUP\n",0,0) ; ACTIONS_DONE() ; break; case RM5_RING_OP_DUP : /*RM52*/ if (smc->r.dup_addr_test == DA_PASSED) { smc->r.da_flag = FALSE ; GO_STATE(RM2_RING_OP) ; break ; } /*RM54*/ else if (cmd == RM_RING_NON_OP) { smc->r.jm_flag = FALSE ; smc->r.bn_flag = FALSE ; GO_STATE(RM4_NON_OP_DUP) ; break ; } break ; case ACTIONS(RM6_DIRECTED) : start_rmt_timer0(smc,smc->s.rmt_t_direct,RM_TIMEOUT_T_DIRECT) ; stop_rmt_timer1(smc) ; start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL) ; sm_ma_control(smc,MA_DIRECTED) ; RS_SET(smc,RS_BEACON) ; DB_RMTN(1,"RMT : RM6_DIRECTED\n",0,0) ; ACTIONS_DONE() ; break ; case RM6_DIRECTED : /*RM63*/ if (cmd == RM_TIMEOUT_POLL) { start_rmt_timer2(smc,smc->s.rmt_t_poll,RM_TIMEOUT_POLL); sm_mac_check_beacon_claim(smc) ; #ifndef SUPERNET_3 /* Because of problems with the Supernet II chip set * sending of Directed Beacon will stop after 165ms * therefore restart_trt_for_dbcn(smc) will be called * to prevent this. */ restart_trt_for_dbcn(smc) ; #endif /*SUPERNET_3*/ break ; } if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) && !smc->r.da_flag) { smc->r.bn_flag = FALSE ; GO_STATE(RM3_DETECT) ; break ; } /*RM64*/ else if ((cmd == RM_MY_BEACON || cmd == RM_OTHER_BEACON) && smc->r.da_flag) { smc->r.bn_flag = FALSE ; GO_STATE(RM4_NON_OP_DUP) ; break ; } /*RM67*/ else if (cmd == RM_TIMEOUT_T_DIRECT) { GO_STATE(RM7_TRACE) ; break ; } break ; case ACTIONS(RM7_TRACE) : stop_rmt_timer0(smc) ; stop_rmt_timer1(smc) ; stop_rmt_timer2(smc) ; smc->e.trace_prop |= ENTITY_BIT(ENTITY_MAC) ; queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ; DB_RMTN(1,"RMT : RM7_TRACE\n",0,0) ; ACTIONS_DONE() ; break ; case RM7_TRACE : break ; default: SMT_PANIC(smc,SMT_E0122, SMT_E0122_MSG) ; break; } }
/*ARGSUSED1*/ static void cfm_fsm(struct s_smc *smc, int cmd) { switch(smc->mib.fddiSMTCF_State) { case ACTIONS(SC0_ISOLATED) : smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ; smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ; smc->mib.p[PA].fddiPORTMACPlacement = 0 ; smc->mib.p[PB].fddiPORTMACPlacement = 0 ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_SEPA ; config_mux(smc,MUX_ISOLATE) ; /* configure PHY Mux */ smc->r.rm_loop = FALSE ; smc->r.rm_join = FALSE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ /* Don't do the WC-Flag changing here */ ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break; case SC0_ISOLATED : /*SC07*/ /*SAS port can be PA or PB ! */ if (smc->s.sas && (smc->y[PA].cf_join || smc->y[PA].cf_loop || smc->y[PB].cf_join || smc->y[PB].cf_loop)) { GO_STATE(SC11_C_WRAP_S) ; break ; } /*SC01*/ if ((smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].cf_join && !smc->y[PA].wc_flag) || smc->y[PA].cf_loop) { GO_STATE(SC9_C_WRAP_A) ; break ; } /*SC02*/ if ((smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].cf_join && !smc->y[PB].wc_flag) || smc->y[PB].cf_loop) { GO_STATE(SC10_C_WRAP_B) ; break ; } break ; case ACTIONS(SC9_C_WRAP_A) : smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ; smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_ISOLATED ; smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ; smc->mib.p[PB].fddiPORTMACPlacement = 0 ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ; config_mux(smc,MUX_WRAPA) ; /* configure PHY mux */ if (smc->y[PA].cf_loop) { smc->r.rm_join = FALSE ; smc->r.rm_loop = TRUE ; queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */ } if (smc->y[PA].cf_join) { smc->r.rm_loop = FALSE ; smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break ; case SC9_C_WRAP_A : /*SC10*/ if ( (smc->y[PA].wc_flag || !smc->y[PA].cf_join) && !smc->y[PA].cf_loop ) { GO_STATE(SC0_ISOLATED) ; break ; } /*SC12*/ else if ( (smc->y[PB].cf_loop && smc->y[PA].cf_join && smc->y[PA].cem_pst == CEM_PST_UP) || ((smc->y[PB].cf_loop || (smc->y[PB].cf_join && smc->y[PB].cem_pst == CEM_PST_UP)) && (smc->y[PA].pc_mode == PM_TREE || smc->y[PB].pc_mode == PM_TREE))) { smc->y[PA].scrub = TRUE ; GO_STATE(SC10_C_WRAP_B) ; break ; } /*SC14*/ else if (!smc->s.attach_s && smc->y[PA].cf_join && smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join && smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].pc_mode == PM_PEER) { smc->y[PA].scrub = TRUE ; smc->y[PB].scrub = TRUE ; GO_STATE(SC4_THRU_A) ; break ; } /*SC15*/ else if ( smc->s.attach_s && smc->y[PA].cf_join && smc->y[PA].cem_pst == CEM_PST_UP && smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join && smc->y[PB].cem_pst == CEM_PST_UP && smc->y[PB].pc_mode == PM_PEER) { smc->y[PA].scrub = TRUE ; smc->y[PB].scrub = TRUE ; GO_STATE(SC5_THRU_B) ; break ; } break ; case ACTIONS(SC10_C_WRAP_B) : smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_ISOLATED ; smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ; smc->mib.p[PA].fddiPORTMACPlacement = 0 ; smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ; config_mux(smc,MUX_WRAPB) ; /* configure PHY mux */ if (smc->y[PB].cf_loop) { smc->r.rm_join = FALSE ; smc->r.rm_loop = TRUE ; queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */ } if (smc->y[PB].cf_join) { smc->r.rm_loop = FALSE ; smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break ; case SC10_C_WRAP_B : /*SC20*/ if ( !smc->y[PB].cf_join && !smc->y[PB].cf_loop ) { GO_STATE(SC0_ISOLATED) ; break ; } /*SC21*/ else if ( smc->y[PA].cf_loop && smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) { smc->y[PB].scrub = TRUE ; GO_STATE(SC9_C_WRAP_A) ; break ; } /*SC24*/ else if (!smc->s.attach_s && smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) { smc->y[PA].scrub = TRUE ; smc->y[PB].scrub = TRUE ; GO_STATE(SC4_THRU_A) ; break ; } /*SC25*/ else if ( smc->s.attach_s && smc->y[PA].cf_join && smc->y[PA].pc_mode == PM_PEER && smc->y[PB].cf_join && smc->y[PB].pc_mode == PM_PEER) { smc->y[PA].scrub = TRUE ; smc->y[PB].scrub = TRUE ; GO_STATE(SC5_THRU_B) ; break ; } break ; case ACTIONS(SC4_THRU_A) : smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ; smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ; smc->mib.p[PA].fddiPORTMACPlacement = 0 ; smc->mib.p[PB].fddiPORTMACPlacement = INDEX_MAC ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ; config_mux(smc,MUX_THRUA) ; /* configure PHY mux */ smc->r.rm_loop = FALSE ; smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break ; case SC4_THRU_A : /*SC41*/ if (smc->y[PB].wc_flag || !smc->y[PB].cf_join) { smc->y[PA].scrub = TRUE ; GO_STATE(SC9_C_WRAP_A) ; break ; } /*SC42*/ else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) { smc->y[PB].scrub = TRUE ; GO_STATE(SC10_C_WRAP_B) ; break ; } /*SC45*/ else if (smc->s.attach_s) { smc->y[PB].scrub = TRUE ; GO_STATE(SC5_THRU_B) ; break ; } break ; case ACTIONS(SC5_THRU_B) : smc->mib.p[PA].fddiPORTCurrentPath = MIB_PATH_THRU ; smc->mib.p[PB].fddiPORTCurrentPath = MIB_PATH_THRU ; smc->mib.p[PA].fddiPORTMACPlacement = INDEX_MAC ; smc->mib.p[PB].fddiPORTMACPlacement = 0 ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_THRU ; config_mux(smc,MUX_THRUB) ; /* configure PHY mux */ smc->r.rm_loop = FALSE ; smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break ; case SC5_THRU_B : /*SC51*/ if (!smc->y[PB].cf_join || smc->y[PB].wc_flag) { smc->y[PA].scrub = TRUE ; GO_STATE(SC9_C_WRAP_A) ; break ; } /*SC52*/ else if (!smc->y[PA].cf_join || smc->y[PA].wc_flag) { smc->y[PB].scrub = TRUE ; GO_STATE(SC10_C_WRAP_B) ; break ; } /*SC54*/ else if (!smc->s.attach_s) { smc->y[PA].scrub = TRUE ; GO_STATE(SC4_THRU_A) ; break ; } break ; case ACTIONS(SC11_C_WRAP_S) : smc->mib.p[PS].fddiPORTCurrentPath = MIB_PATH_CONCATENATED ; smc->mib.p[PS].fddiPORTMACPlacement = INDEX_MAC ; smc->mib.fddiSMTStationStatus = MIB_SMT_STASTA_CON ; config_mux(smc,MUX_WRAPS) ; /* configure PHY mux */ if (smc->y[PA].cf_loop || smc->y[PB].cf_loop) { smc->r.rm_join = FALSE ; smc->r.rm_loop = TRUE ; queue_event(smc,EVENT_RMT,RM_LOOP) ;/* signal RMT */ } if (smc->y[PA].cf_join || smc->y[PB].cf_join) { smc->r.rm_loop = FALSE ; smc->r.rm_join = TRUE ; queue_event(smc,EVENT_RMT,RM_JOIN) ;/* signal RMT */ } ACTIONS_DONE() ; DB_CFMN(1,"CFM : %s\n",cfm_states[smc->mib.fddiSMTCF_State],0) ; break ; case SC11_C_WRAP_S : /*SC70*/ if ( !smc->y[PA].cf_join && !smc->y[PA].cf_loop && !smc->y[PB].cf_join && !smc->y[PB].cf_loop) { GO_STATE(SC0_ISOLATED) ; break ; } break ; default: SMT_PANIC(smc,SMT_E0106, SMT_E0106_MSG) ; break; } }