/* Stop the adapter and free any mbufs allocated to the RX and TX buffers */ static void sln_stop(struct sln_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; int i; ASSERT_SERIALIZED(ifp->if_serializer); ifp->if_timer = 0; callout_stop(&sc->sln_state); /* disable Tx/Rx */ sc->txcfg &= ~SL_TXCFG_EN; sc->rxcfg &= ~SL_RXCFG_EN; SLN_WRITE_4(sc, SL_TX_CONFIG, sc->txcfg); SLN_WRITE_4(sc, SL_RX_CONFIG, sc->rxcfg); /* Clear interrupt */ SLN_WRITE_4(sc, SL_INT_MASK, 0); SLN_READ_4(sc, SL_INT_STATUS); /* Free the TX list buffers */ for (i = 0; i < SL_TXD_CNT; i++) { if (sc->sln_bufdata.sln_tx_buf[i] != NULL) { m_freem(sc->sln_bufdata.sln_tx_buf[i]); sc->sln_bufdata.sln_tx_buf[i] = NULL; SLN_WRITE_4(sc, SL_TSAD0 + i * 4, 0); } } ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); }
static void sbsh_init(void *xsc) { struct sbsh_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; u_int8_t t; if ((ifp->if_flags & IFF_RUNNING) || sc->state == NOT_LOADED) { return; } bzero(&sc->in_stats, sizeof(struct sbni16_stats)); sc->head_xq = sc->tail_xq = sc->head_rq = sc->tail_rq = 0; sc->head_tdesc = sc->head_rdesc = 0; sc->regs->IMR = EXT; t = 2; issue_cx28975_cmd(sc, _DSL_CLEAR_ERROR_CTRS, &t, 1); if (issue_cx28975_cmd(sc, _DSL_ACTIVATION, &t, 1) == 0) { sc->state = ACTIVATION; ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); } }
int octeon_eth_init(struct ifnet *ifp) { struct octeon_eth_softc *sc = ifp->if_softc; /* XXX don't disable commonly used parts!!! XXX */ if (sc->sc_init_flag == 0) { /* Cancel any pending I/O. */ octeon_eth_stop(ifp, 0); /* Initialize the device */ octeon_eth_configure(sc); cn30xxpko_enable(sc->sc_pko); cn30xxipd_enable(sc->sc_ipd); sc->sc_init_flag = 1; } else { cn30xxgmx_port_enable(sc->sc_gmx_port, 1); } octeon_eth_mediachange(ifp); cn30xxgmx_set_filter(sc->sc_gmx_port); timeout_add_sec(&sc->sc_tick_misc_ch, 1); timeout_add_sec(&sc->sc_tick_free_ch, 1); SET(ifp->if_flags, IFF_RUNNING); ifq_clr_oactive(&ifp->if_snd); return 0; }
void vnet_stop(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); ifp->if_timer = 0; cbus_intr_setenabled(sc->sc_bustag, sc->sc_tx_ino, INTR_DISABLED); cbus_intr_setenabled(sc->sc_bustag, sc->sc_rx_ino, INTR_DISABLED); intr_barrier(sc->sc_tx_ih); intr_barrier(sc->sc_rx_ih); hv_ldc_tx_qconf(lc->lc_id, 0, 0); hv_ldc_rx_qconf(lc->lc_id, 0, 0); lc->lc_tx_seqid = 0; lc->lc_state = 0; lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN; vnet_ldc_reset(lc); free(sc->sc_vsd, M_DEVBUF, VNET_NUM_SOFT_DESC * sizeof(*sc->sc_vsd)); vnet_dring_free(sc->sc_dmatag, sc->sc_vd); hv_ldc_set_map_table(lc->lc_id, 0, 0); ldc_map_free(sc->sc_dmatag, sc->sc_lm); }
/* * End of sending */ static void lgue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ifnet *ifp; struct lgue_softc *sc; usbd_status err; sc = priv; if (sc->lgue_dying) return; ifp = &sc->lgue_arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->lgue_ep[LGUE_ENDPT_TX]); return; } usbd_get_xfer_status(sc->lgue_tx_xfer, NULL, NULL, NULL,&err); if (err) IFNET_STAT_INC(ifp, oerrors, 1); else IFNET_STAT_INC(ifp, opackets, 1); if (!STAILQ_EMPTY(&sc->lgue_tx_queue)) { if_devstart_sched(ifp); } ifp->if_timer = 0; ifq_clr_oactive(&ifp->if_snd); }
void an_stop(struct ifnet *ifp, int disable) { struct an_softc *sc = ifp->if_softc; int i, s; if (!sc->sc_enabled) return; DPRINTF(("an_stop: disable %d\n", disable)); s = splnet(); ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); if (!sc->sc_invalid) { an_cmd(sc, AN_CMD_FORCE_SYNCLOSS, 0); CSR_WRITE_2(sc, AN_INT_EN, 0); an_cmd(sc, AN_CMD_DISABLE, 0); for (i = 0; i < AN_TX_RING_CNT; i++) an_cmd(sc, AN_CMD_DEALLOC_MEM, sc->sc_txd[i].d_fid); } sc->sc_tx_timer = 0; ifp->if_timer = 0; ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); if (disable) { if (sc->sc_disable) (*sc->sc_disable)(sc); sc->sc_enabled = 0; } splx(s); }
static void start_xmit_frames(struct sbsh_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; /* * Check if we have any free descriptor(s) and free space in * our transmit queue. */ while (sc->tail_xq != ((sc->head_xq - 1) & (XQLEN - 1)) && sc->regs->LTDR != ((sc->head_tdesc - 1) & 0x7f)) { m = ifq_dequeue(&ifp->if_snd); if (m == NULL) break; if (m->m_pkthdr.len) { BPF_MTAP(ifp, m); encap_frame(sc, m); } else m_freem(m); } if (sc->regs->CTDR != sc->regs->LTDR) ifq_set_oactive(&ifp->if_snd); else ifq_clr_oactive(&ifp->if_snd); }
static void ng_eiface_init(void *xsc) { priv_p sc = xsc; struct ifnet *ifp = sc->ifp; crit_enter(); ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); crit_exit(); }
void octeon_eth_watchdog(struct ifnet *ifp) { struct octeon_eth_softc *sc = ifp->if_softc; printf("%s: device timeout\n", sc->sc_dev.dv_xname); octeon_eth_configure(sc); SET(ifp->if_flags, IFF_RUNNING); ifq_clr_oactive(&ifp->if_snd); ifp->if_timer = 0; octeon_eth_start(ifp); }
void eginit(register struct eg_softc *sc) { bus_space_tag_t bst = sc->sc_bst; bus_space_handle_t bsh = sc->sc_bsh; register struct ifnet *ifp = &sc->sc_arpcom.ac_if; /* soft reset the board */ bus_space_write_1(bst, bsh, EG_CONTROL, EG_CTL_FLSH); delay(100); bus_space_write_1(bst, bsh, EG_CONTROL, EG_CTL_ATTN); delay(100); bus_space_write_1(bst, bsh, EG_CONTROL, 0); delay(200); sc->eg_pcb[0] = EG_CMD_CONFIG82586; /* Configure 82586 */ sc->eg_pcb[1] = 2; sc->eg_pcb[2] = 3; /* receive broadcast & multicast */ sc->eg_pcb[3] = 0; if (egwritePCB(sc) != 0) DPRINTF(("write error3\n")); if (egreadPCB(sc) != 0) { DPRINTF(("read error3\n")); egprintpcb(sc); } else if (sc->eg_pcb[2] != 0 || sc->eg_pcb[3] != 0) printf("%s: configure card command failed\n", sc->sc_dev.dv_xname); if (sc->eg_inbuf == 0) sc->eg_inbuf = malloc(EG_BUFLEN, M_TEMP, M_NOWAIT); sc->eg_incount = 0; if (sc->eg_outbuf == 0) sc->eg_outbuf = malloc(EG_BUFLEN, M_TEMP, M_NOWAIT); bus_space_write_1(bst, bsh, EG_CONTROL, EG_CTL_CMDE); sc->eg_incount = 0; egrecv(sc); /* Interface is now `running', with no output active. */ ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); /* Attempt to start output, if any. */ egstart(ifp); }
void vnet_rx_vio_rdx(struct vnet_softc *sc, struct vio_msg_tag *tag) { struct ifnet *ifp = &sc->sc_ac.ac_if; switch(tag->stype) { case VIO_SUBTYPE_INFO: DPRINTF(("CTRL/INFO/RDX\n")); tag->stype = VIO_SUBTYPE_ACK; tag->sid = sc->sc_local_sid; vnet_sendmsg(sc, tag, sizeof(*tag)); sc->sc_vio_state |= VIO_RCV_RDX; break; case VIO_SUBTYPE_ACK: DPRINTF(("CTRL/ACK/RDX\n")); if (!ISSET(sc->sc_vio_state, VIO_SND_RDX)) { ldc_reset(&sc->sc_lc); break; } sc->sc_vio_state |= VIO_ACK_RDX; break; default: DPRINTF(("CTRL/0x%02x/RDX (VIO)\n", tag->stype)); break; } if (ISSET(sc->sc_vio_state, VIO_RCV_RDX) && ISSET(sc->sc_vio_state, VIO_ACK_RDX)) { /* Link is up! */ vnet_link_state(sc); /* Configure multicast now that we can. */ vnet_setmulti(sc, 1); KERNEL_LOCK(); ifq_clr_oactive(&ifp->if_snd); vnet_start(ifp); KERNEL_UNLOCK(); } }
static void ep_if_watchdog(struct ifnet *ifp) { struct ep_softc *sc = ifp->if_softc; /* if_printf(ifp, "watchdog\n"); log(LOG_ERR, "%s: watchdog\n", ifp->if_xname); ifp->if_oerrors++; */ if (sc->gone) { return; } ifq_clr_oactive(&ifp->if_snd); if_devstart(ifp); ep_intr(ifp->if_softc); }
int octeon_eth_stop(struct ifnet *ifp, int disable) { struct octeon_eth_softc *sc = ifp->if_softc; timeout_del(&sc->sc_tick_misc_ch); timeout_del(&sc->sc_tick_free_ch); mii_down(&sc->sc_mii); cn30xxgmx_port_enable(sc->sc_gmx_port, 0); /* Mark the interface as down and cancel the watchdog timer. */ CLR(ifp->if_flags, IFF_RUNNING); ifq_clr_oactive(&ifp->if_snd); ifp->if_timer = 0; intr_barrier(octeon_eth_pow_recv_ih); return 0; }
void an_txeof(struct an_softc *sc, u_int16_t status) { struct ifnet *ifp = &sc->sc_ic.ic_if; int cur, id; sc->sc_tx_timer = 0; ifq_clr_oactive(&ifp->if_snd); id = CSR_READ_2(sc, AN_TX_CMP_FID); CSR_WRITE_2(sc, AN_EVENT_ACK, status & (AN_EV_TX | AN_EV_TX_EXC)); if (status & AN_EV_TX_EXC) ifp->if_oerrors++; else ifp->if_opackets++; cur = sc->sc_txcur; if (sc->sc_txd[cur].d_fid == id) { sc->sc_txd[cur].d_inuse = 0; DPRINTF2(("an_txeof: sent %x/%d\n", id, cur)); AN_INC(cur, AN_TX_RING_CNT); sc->sc_txcur = cur; } else { for (cur = 0; cur < AN_TX_RING_CNT; cur++) { if (id == sc->sc_txd[cur].d_fid) { sc->sc_txd[cur].d_inuse = 0; break; } } if (ifp->if_flags & IFF_DEBUG) printf("%s: tx mismatch: " "expected %x(%d), actual %x(%d)\n", sc->sc_dev.dv_xname, sc->sc_txd[sc->sc_txcur].d_fid, sc->sc_txcur, id, cur); } }
/* * Start transfer from internal queue */ static int lgue_start_transfer(struct lgue_softc *sc) { usbd_status err; struct lgue_queue_entry *entry; struct ifnet *ifp; if (STAILQ_EMPTY(&sc->lgue_tx_queue)) return(0); ifp = &sc->lgue_arpcom.ac_if; entry = STAILQ_FIRST(&sc->lgue_tx_queue); STAILQ_REMOVE_HEAD(&sc->lgue_tx_queue, entry_next); m_copydata(entry->entry_mbuf, 0, entry->entry_mbuf->m_pkthdr.len, sc->lgue_tx_buf); /* Transmit */ usbd_setup_xfer(sc->lgue_tx_xfer, sc->lgue_ep[LGUE_ENDPT_TX], sc, sc->lgue_tx_buf, entry->entry_mbuf->m_pkthdr.len, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, lgue_txeof); err = usbd_transfer(sc->lgue_tx_xfer); if (err != USBD_IN_PROGRESS) { m_freem(entry->entry_mbuf); kfree(entry, M_USBDEV); lgue_stop(sc); ifq_clr_oactive(&ifp->if_snd); return(EIO); } m_freem(entry->entry_mbuf); kfree(entry, M_USBDEV); sc->lgue_tx_cnt++; ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 5; return(0); }
/* * Init */ static void lgue_init(void *xsc) { struct lgue_softc *sc; struct ifnet *ifp; usbd_status err; sc = xsc; ifp = &sc->lgue_arpcom.ac_if; if (ifp->if_flags & IFF_RUNNING) return; /* Create RX and TX bufs */ if (sc->lgue_tx_xfer == NULL) { sc->lgue_tx_xfer = usbd_alloc_xfer(sc->lgue_udev); if (sc->lgue_tx_xfer == NULL) { if_printf(ifp, "tx buffer allocate failed\n"); return; } } sc->lgue_tx_buf = kmalloc(LGUE_BUFSZ, M_USBDEV, M_WAITOK); if (sc->lgue_rx_xfer == NULL) { sc->lgue_rx_xfer = usbd_alloc_xfer(sc->lgue_udev); if (sc->lgue_rx_xfer == NULL) { if_printf(ifp, "rx buffer allocate failed\n"); return; } } sc->lgue_rx_buf = kmalloc(LGUE_BUFSZ, M_USBDEV, M_WAITOK); /* Create INTR buf */ if (sc->lgue_intr_xfer == NULL) { sc->lgue_intr_xfer = usbd_alloc_xfer(sc->lgue_udev); if (sc->lgue_intr_xfer == NULL) { if_printf(ifp, "intr buffer allocate failed\n"); return; } } sc->lgue_intr_buf = kmalloc(LGUE_BUFSZ, M_USBDEV, M_WAITOK); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->lgue_data_iface, sc->lgue_ed[LGUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->lgue_ep[LGUE_ENDPT_RX]); if (err) { if_printf(ifp, "open RX pipe failed: %s\n", usbd_errstr(err)); return; } err = usbd_open_pipe(sc->lgue_data_iface, sc->lgue_ed[LGUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->lgue_ep[LGUE_ENDPT_TX]); if (err) { if_printf(ifp, "open TX pipe failed: %s\n", usbd_errstr(err)); return; } /* Open INTR pipe. */ err = usbd_open_pipe(sc->lgue_ctl_iface, sc->lgue_ed[LGUE_ENDPT_INTR], USBD_EXCLUSIVE_USE, &sc->lgue_ep[LGUE_ENDPT_INTR]); if (err) { if_printf(ifp, "open INTR pipe failed: %s\n", usbd_errstr(err)); return; } /* Create internal queue */ STAILQ_INIT(&sc->lgue_tx_queue); ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); sc->lgue_dying = 0; lgue_rxstart(ifp); lgue_intrstart(ifp); }
/* * Stop */ static void lgue_stop(struct lgue_softc *sc) { struct ifnet *ifp; usbd_status err; struct lgue_queue_entry *entry; if (sc->lgue_dying) return; sc->lgue_dying = 1; ifp = &sc->lgue_arpcom.ac_if; /* Stop transfers */ if (sc->lgue_ep[LGUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->lgue_ep[LGUE_ENDPT_TX]); if (err) { if_printf(ifp, "abort tx pipe failed:%s\n", usbd_errstr(err)); } err = usbd_close_pipe(sc->lgue_ep[LGUE_ENDPT_TX]); if (err) { if_printf(ifp, "close tx pipe failed:%s\n", usbd_errstr(err)); } } if (sc->lgue_ep[LGUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->lgue_ep[LGUE_ENDPT_RX]); if (err) { if_printf(ifp, "abort rx pipe failed:%s\n", usbd_errstr(err)); } err = usbd_close_pipe(sc->lgue_ep[LGUE_ENDPT_RX]); if (err) { if_printf(ifp, "close rx pipe failed:%s\n", usbd_errstr(err)); } } if (sc->lgue_ep[LGUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->lgue_ep[LGUE_ENDPT_INTR]); if (err) { if_printf(ifp, "abort intr pipe failed:%s\n", usbd_errstr(err)); } err = usbd_close_pipe(sc->lgue_ep[LGUE_ENDPT_INTR]); if (err) { if_printf(ifp, "close intr pipe failed:%s\n", usbd_errstr(err)); } } /* Free tx buffers */ if (sc->lgue_tx_buf != NULL) { kfree(sc->lgue_tx_buf, M_USBDEV); sc->lgue_tx_buf = NULL; } if (sc->lgue_tx_xfer != NULL) { usbd_free_xfer(sc->lgue_tx_xfer); sc->lgue_tx_xfer = NULL; } /* Free rx buffers */ if (sc->lgue_rx_buf != NULL) { kfree(sc->lgue_rx_buf, M_USBDEV); sc->lgue_rx_buf = NULL; } if (sc->lgue_rx_xfer != NULL) { usbd_free_xfer(sc->lgue_rx_xfer); sc->lgue_rx_xfer = NULL; } /* Free intr buffer */ if (sc->lgue_intr_buf != NULL) { kfree(sc->lgue_intr_buf, M_USBDEV); sc->lgue_intr_buf = NULL; } if (sc->lgue_intr_xfer != NULL) { usbd_free_xfer(sc->lgue_intr_xfer); sc->lgue_intr_xfer = NULL; } /* Clear internal queue */ while (!STAILQ_EMPTY(&sc->lgue_tx_queue)) { entry = STAILQ_FIRST(&sc->lgue_tx_queue); STAILQ_REMOVE_HEAD(&sc->lgue_tx_queue, entry_next); m_freem(entry->entry_mbuf); kfree(entry, M_USBDEV); } ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); }
/* * Reset and initialize the chip */ void sninit(void *xsc) { struct sn_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int flags; int mask; /* * This resets the registers mostly to defaults, but doesn't affect * EEPROM. After the reset cycle, we pause briefly for the chip to * be happy. */ SMC_SELECT_BANK(0); outw(BASE + RECV_CONTROL_REG_W, RCR_SOFTRESET); SMC_DELAY(); outw(BASE + RECV_CONTROL_REG_W, 0x0000); SMC_DELAY(); SMC_DELAY(); outw(BASE + TXMIT_CONTROL_REG_W, 0x0000); /* * Set the control register to automatically release succesfully * transmitted packets (making the best use out of our limited * memory) and to enable the EPH interrupt on certain TX errors. */ SMC_SELECT_BANK(1); outw(BASE + CONTROL_REG_W, (CTR_AUTO_RELEASE | CTR_TE_ENABLE | CTR_CR_ENABLE | CTR_LE_ENABLE)); /* Set squelch level to 240mV (default 480mV) */ flags = inw(BASE + CONFIG_REG_W); flags |= CR_SET_SQLCH; outw(BASE + CONFIG_REG_W, flags); /* * Reset the MMU and wait for it to be un-busy. */ SMC_SELECT_BANK(2); outw(BASE + MMU_CMD_REG_W, MMUCR_RESET); while (inw(BASE + MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; /* * Disable all interrupts */ outb(BASE + INTR_MASK_REG_B, 0x00); sn_setmcast(sc); /* * Set the transmitter control. We want it enabled. */ flags = TCR_ENABLE; #ifndef SW_PAD /* * I (GB) have been unlucky getting this to work. */ flags |= TCR_PAD_ENABLE; #endif /* SW_PAD */ outw(BASE + TXMIT_CONTROL_REG_W, flags); /* * Now, enable interrupts */ SMC_SELECT_BANK(2); mask = IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT | IM_TX_INT; outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->pages_wanted = -1; /* * Mark the interface running but not active. */ ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); /* * Attempt to push out any waiting packets. */ if_devstart(ifp); }
/* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; u_short length; u_short numPages; u_short pages_wanted; u_char packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd); if (m == NULL) { kprintf("%s: snresume() with nothing to send\n", ifp->if_xname); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { kprintf("%s: large packet discarded (B)\n", ifp->if_xname); IFNET_STAT_INC(ifp, oerrors, 1); m_freem(top); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = inb(BASE + ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { kprintf("%s: Memory allocation failed. Weird.\n", ifp->if_xname); ifp->if_timer = 1; ifq_prepend(&ifp->if_snd, top); goto try_start; } /* * We have a packet number, so tell the card to use it. */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { kprintf("%s: memory allocation wrong size. Weird.\n", ifp->if_xname); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (inw(BASE + MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; outw(BASE + MMU_CMD_REG_W, MMUCR_FREEPKT); ifq_prepend(&ifp->if_snd, top); return; } /* * Point to the beginning of the packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ outw(BASE + DATA_REG_W, 0); outb(BASE + DATA_REG_B, (length + 6) & 0xFF); outb(BASE + DATA_REG_B, (length + 6) >> 8); /* * Push out the data to the card. */ for (m = top; m != NULL; m = m->m_next) { /* * Push out words. */ outsw(BASE + DATA_REG_W, mtod(m, caddr_t), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) outb(BASE + DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { outw(BASE + DATA_REG_W, 0); pad -= 2; } if (pad) outb(BASE + DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ outw(BASE + DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = inb(BASE + INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; outw(BASE + MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ ifq_clr_oactive(&ifp->if_snd); if_devstart(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 1; }
void sn_intr(void *arg) { int status, interrupts; struct sn_softc *sc = (struct sn_softc *) arg; struct ifnet *ifp = &sc->arpcom.ac_if; /* * Chip state registers */ u_char mask; u_char packet_no; u_short tx_status; u_short card_stats; /* * Clear the watchdog. */ ifp->if_timer = 0; SMC_SELECT_BANK(2); /* * Obtain the current interrupt mask and clear the hardware mask * while servicing interrupts. */ mask = inb(BASE + INTR_MASK_REG_B); outb(BASE + INTR_MASK_REG_B, 0x00); /* * Get the set of interrupts which occurred and eliminate any which * are masked. */ interrupts = inb(BASE + INTR_STAT_REG_B); status = interrupts & mask; /* * Now, process each of the interrupt types. */ /* * Receive Overrun. */ if (status & IM_RX_OVRN_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(2); outb(BASE + INTR_ACK_REG_B, IM_RX_OVRN_INT); IFNET_STAT_INC(&sc->arpcom.ac_if, ierrors, 1); } /* * Got a packet. */ if (status & IM_RCV_INT) { #if 1 int packet_number; SMC_SELECT_BANK(2); packet_number = inw(BASE + FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ kprintf("sn: Receive interrupt with nothing on FIFO\n"); goto out; } #endif snread(ifp); } /* * An on-card memory allocation came through. */ if (status & IM_ALLOC_INT) { /* * Disable this interrupt. */ mask &= ~IM_ALLOC_INT; ifq_clr_oactive(&sc->arpcom.ac_if.if_snd); snresume(&sc->arpcom.ac_if); } /* * TX Completion. Handle a transmit error message. This will only be * called when there is an error, because of the AUTO_RELEASE mode. */ if (status & IM_TX_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(2); outb(BASE + INTR_ACK_REG_B, IM_TX_INT); packet_no = inw(BASE + FIFO_PORTS_REG_W); packet_no &= FIFO_TX_MASK; /* * select this as the packet to read from */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Position the pointer to the first word from this packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | PTR_READ | 0x0000); /* * Fetch the TX status word. The value found here will be a * copy of the EPH_STATUS_REG_W at the time the transmit * failed. */ tx_status = inw(BASE + DATA_REG_W); if (tx_status & EPHSR_TX_SUC) { device_printf(sc->dev, "Successful packet caused interrupt\n"); } else { IFNET_STAT_INC(&sc->arpcom.ac_if, oerrors, 1); } if (tx_status & EPHSR_LATCOL) IFNET_STAT_INC(&sc->arpcom.ac_if, collisions, 1); /* * Some of these errors will have disabled transmit. * Re-enable transmit now. */ SMC_SELECT_BANK(0); #ifdef SW_PAD outw(BASE + TXMIT_CONTROL_REG_W, TCR_ENABLE); #else outw(BASE + TXMIT_CONTROL_REG_W, TCR_ENABLE | TCR_PAD_ENABLE); #endif /* SW_PAD */ /* * kill the failed packet. Wait for the MMU to be un-busy. */ SMC_SELECT_BANK(2); while (inw(BASE + MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; outw(BASE + MMU_CMD_REG_W, MMUCR_FREEPKT); /* * Attempt to queue more transmits. */ ifq_clr_oactive(&sc->arpcom.ac_if.if_snd); if_devstart(&sc->arpcom.ac_if); } /* * Transmit underrun. We use this opportunity to update transmit * statistics from the card. */ if (status & IM_TX_EMPTY_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(2); outb(BASE + INTR_ACK_REG_B, IM_TX_EMPTY_INT); /* * Disable this interrupt. */ mask &= ~IM_TX_EMPTY_INT; SMC_SELECT_BANK(0); card_stats = inw(BASE + COUNTER_REG_W); /* * Single collisions */ IFNET_STAT_INC(&sc->arpcom.ac_if, collisions, card_stats & ECR_COLN_MASK); /* * Multiple collisions */ IFNET_STAT_INC(&sc->arpcom.ac_if, collisions, (card_stats & ECR_MCOLN_MASK) >> 4); SMC_SELECT_BANK(2); /* * Attempt to enqueue some more stuff. */ ifq_clr_oactive(&sc->arpcom.ac_if.if_snd); if_devstart(&sc->arpcom.ac_if); }
/* * Process an ioctl for the virtual interface */ static int ng_eiface_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data, struct ucred *cr) { struct ifreq *const ifr = (struct ifreq *) data; int error = 0; #ifdef DEBUG ng_eiface_print_ioctl(ifp, cmd, data); #endif crit_enter(); switch (cmd) { /* These two are mostly handled at a higher layer */ case SIOCSIFADDR: error = ether_ioctl(ifp, cmd, data); break; case SIOCGIFADDR: break; /* Set flags */ case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ if (ifr->ifr_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { ifq_clr_oactive(&ifp->if_snd); ifp->if_flags |= IFF_RUNNING; } } else { if (ifp->if_flags & IFF_RUNNING) { ifp->if_flags &= ~IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); } } break; /* Set the interface MTU */ case SIOCSIFMTU: if (ifr->ifr_mtu > NG_EIFACE_MTU_MAX || ifr->ifr_mtu < NG_EIFACE_MTU_MIN) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; /* Stuff that's not supported */ case SIOCADDMULTI: case SIOCDELMULTI: error = 0; break; case SIOCSIFPHYS: error = EOPNOTSUPP; break; default: error = EINVAL; break; } crit_exit(); return (error); }
int egintr(void *arg) { struct eg_softc *sc = arg; bus_space_tag_t bst = sc->sc_bst; bus_space_handle_t bsh = sc->sc_bsh; int ret = 0; int i, len; u_short *ptr; while (bus_space_read_1(bst, bsh, EG_STATUS) & EG_STAT_ACRF) { ret = 1; egreadPCB(sc); switch (sc->eg_pcb[0]) { case EG_RSP_RECVPACKET: len = sc->eg_pcb[6] | (sc->eg_pcb[7] << 8); /* Set direction bit : Adapter -> host */ bus_space_write_1(bst, bsh, EG_CONTROL, bus_space_read_1(bst, bsh, EG_CONTROL) | EG_CTL_DIR); for (ptr = (u_short *)sc->eg_inbuf; len > 0; len -= 2) { for (i = 10000; i != 0; i--) { if (bus_space_read_1(bst, bsh, EG_STATUS) & EG_STAT_HRDY) break; delay(10); } if (i == 0) { printf("%s: receive failed\n", sc->sc_dev.dv_xname); break; } *ptr++ = bus_space_read_2(bst, bsh, EG_DATA); } if (len <= 0) { len = sc->eg_pcb[8] | (sc->eg_pcb[9] << 8); egread(sc, sc->eg_inbuf, len); sc->eg_incount--; egrecv(sc); } break; case EG_RSP_SENDPACKET: if (sc->eg_pcb[6] || sc->eg_pcb[7]) { DPRINTF(("packet dropped\n")); sc->sc_arpcom.ac_if.if_oerrors++; } else sc->sc_arpcom.ac_if.if_opackets++; sc->sc_arpcom.ac_if.if_collisions += sc->eg_pcb[8] & 0xf; ifq_clr_oactive(&sc->sc_arpcom.ac_if.if_snd); egstart(&sc->sc_arpcom.ac_if); break; case EG_RSP_GETSTATS: DPRINTF(("Card Statistics\n")); bcopy(&sc->eg_pcb[2], &i, sizeof(i)); DPRINTF(("Receive Packets %d\n", i)); bcopy(&sc->eg_pcb[6], &i, sizeof(i)); DPRINTF(("Transmit Packets %d\n", i)); DPRINTF(("CRC errors %d\n", *(short *)&sc->eg_pcb[10])); DPRINTF(("alignment errors %d\n", *(short *)&sc->eg_pcb[12])); DPRINTF(("no resources errors %d\n", *(short *)&sc->eg_pcb[14])); DPRINTF(("overrun errors %d\n", *(short *)&sc->eg_pcb[16])); break; default: DPRINTF(("egintr: Unknown response %x??\n", sc->eg_pcb[0])); egprintpcb(sc); break; } } return (ret); }
void ep_intr(void *arg) { struct ep_softc *sc = arg; struct ifnet *ifp = &sc->arpcom.ac_if; int status; /* * quick fix: Try to detect an interrupt when the card goes away. */ if (sc->gone || inw(BASE + EP_STATUS) == 0xffff) { return; } outw(BASE + EP_COMMAND, SET_INTR_MASK); /* disable all Ints */ rescan: while ((status = inw(BASE + EP_STATUS)) & S_5_INTS) { /* first acknowledge all interrupt sources */ outw(BASE + EP_COMMAND, ACK_INTR | (status & S_MASK)); if (status & (S_RX_COMPLETE | S_RX_EARLY)) epread(sc); if (status & S_TX_AVAIL) { /* we need ACK */ ifp->if_timer = 0; ifq_clr_oactive(&ifp->if_snd); GO_WINDOW(1); inw(BASE + EP_W1_FREE_TX); if_devstart(ifp); } if (status & S_CARD_FAILURE) { ifp->if_timer = 0; #ifdef EP_LOCAL_STATS kprintf("\n"); if_printf(ifp, "\n\tStatus: %x\n", status); GO_WINDOW(4); kprintf("\tFIFO Diagnostic: %x\n", inw(BASE + EP_W4_FIFO_DIAG)); kprintf("\tStat: %x\n", sc->stat); kprintf("\tIpackets=%d, Opackets=%d\n", ifp->if_ipackets, ifp->if_opackets); kprintf("\tNOF=%d, NOMB=%d, RXOF=%d, RXOL=%d, TXU=%d\n", sc->rx_no_first, sc->rx_no_mbuf, sc->rx_overrunf, sc->rx_overrunl, sc->tx_underrun); #else #ifdef DIAGNOSTIC if_printf(ifp, "Status: %x (input buffer overflow)\n", status); #else IFNET_STAT_INC(ifp, ierrors, 1); #endif #endif ep_if_init(sc); return; } if (status & S_TX_COMPLETE) { ifp->if_timer = 0; /* we need ACK. we do it at the end */ /* * We need to read TX_STATUS until we get a 0 status in order to * turn off the interrupt flag. */ while ((status = inb(BASE + EP_W1_TX_STATUS)) & TXS_COMPLETE) { if (status & TXS_SUCCES_INTR_REQ); else if (status & (TXS_UNDERRUN | TXS_JABBER | TXS_MAX_COLLISION)) { outw(BASE + EP_COMMAND, TX_RESET); if (status & TXS_UNDERRUN) { #ifdef EP_LOCAL_STATS sc->tx_underrun++; #endif } else { if (status & TXS_JABBER); else /* TXS_MAX_COLLISION - we shouldn't get here */ IFNET_STAT_INC(ifp, collisions, 1); } IFNET_STAT_INC(ifp, oerrors, 1); outw(BASE + EP_COMMAND, TX_ENABLE); /* * To have a tx_avail_int but giving the chance to the * Reception */ if (!ifq_is_empty(&ifp->if_snd)) outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | 8); } outb(BASE + EP_W1_TX_STATUS, 0x0); /* pops up the next * status */ } /* while */ ifq_clr_oactive(&ifp->if_snd); GO_WINDOW(1); inw(BASE + EP_W1_FREE_TX); if_devstart(ifp); } /* end TX_COMPLETE */ } outw(BASE + EP_COMMAND, C_INTR_LATCH); /* ACK int Latch */ if ((status = inw(BASE + EP_STATUS)) & S_5_INTS) goto rescan; /* re-enable Ints */ outw(BASE + EP_COMMAND, SET_INTR_MASK | S_5_INTS); }
void vnet_rx_vio_dring_data(struct vnet_softc *sc, struct vio_msg_tag *tag) { struct vio_dring_msg *dm = (struct vio_dring_msg *)tag; struct ldc_conn *lc = &sc->sc_lc; struct ifnet *ifp = &sc->sc_ac.ac_if; struct mbuf *m; paddr_t pa; psize_t nbytes; int err; switch(tag->stype) { case VIO_SUBTYPE_INFO: { struct vnet_desc desc; uint64_t cookie; paddr_t desc_pa; int idx, ack_end_idx = -1; struct mbuf_list ml = MBUF_LIST_INITIALIZER(); idx = dm->start_idx; for (;;) { cookie = sc->sc_peer_dring_cookie.addr; cookie += idx * sc->sc_peer_desc_size; nbytes = sc->sc_peer_desc_size; pmap_extract(pmap_kernel(), (vaddr_t)&desc, &desc_pa); err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, cookie, desc_pa, nbytes, &nbytes); if (err != H_EOK) { printf("hv_ldc_copy_in %d\n", err); break; } if (desc.hdr.dstate != VIO_DESC_READY) break; if (desc.nbytes > (ETHER_MAX_LEN - ETHER_CRC_LEN)) { ifp->if_ierrors++; goto skip; } m = MCLGETI(NULL, M_DONTWAIT, NULL, desc.nbytes); if (!m) break; m->m_len = m->m_pkthdr.len = desc.nbytes; nbytes = roundup(desc.nbytes + VNET_ETHER_ALIGN, 8); pmap_extract(pmap_kernel(), (vaddr_t)m->m_data, &pa); err = hv_ldc_copy(lc->lc_id, LDC_COPY_IN, desc.cookie[0].addr, pa, nbytes, &nbytes); if (err != H_EOK) { m_freem(m); goto skip; } m->m_data += VNET_ETHER_ALIGN; ml_enqueue(&ml, m); skip: desc.hdr.dstate = VIO_DESC_DONE; nbytes = sc->sc_peer_desc_size; err = hv_ldc_copy(lc->lc_id, LDC_COPY_OUT, cookie, desc_pa, nbytes, &nbytes); if (err != H_EOK) printf("hv_ldc_copy_out %d\n", err); ack_end_idx = idx; if (++idx == sc->sc_peer_dring_nentries) idx = 0; } if_input(ifp, &ml); if (ack_end_idx == -1) { dm->tag.stype = VIO_SUBTYPE_NACK; } else { dm->tag.stype = VIO_SUBTYPE_ACK; dm->end_idx = ack_end_idx; } dm->tag.sid = sc->sc_local_sid; dm->proc_state = VIO_DP_STOPPED; vnet_sendmsg(sc, dm, sizeof(*dm)); break; } case VIO_SUBTYPE_ACK: { struct ldc_map *map = sc->sc_lm; u_int cons, count; sc->sc_peer_state = dm->proc_state; cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); while (sc->sc_vd->vd_desc[cons].hdr.dstate == VIO_DESC_DONE) { map->lm_slot[sc->sc_vsd[cons].vsd_map_idx].entry = 0; atomic_dec_int(&map->lm_count); pool_put(&sc->sc_pool, sc->sc_vsd[cons].vsd_buf); sc->sc_vsd[cons].vsd_buf = NULL; ifp->if_opackets++; sc->sc_vd->vd_desc[cons].hdr.dstate = VIO_DESC_FREE; sc->sc_tx_cons++; cons = sc->sc_tx_cons & (sc->sc_vd->vd_nentries - 1); } count = sc->sc_tx_prod - sc->sc_tx_cons; if (count > 0 && sc->sc_peer_state != VIO_DP_ACTIVE) vnet_send_dring_data(sc, cons); KERNEL_LOCK(); if (count < (sc->sc_vd->vd_nentries - 1)) ifq_clr_oactive(&ifp->if_snd); if (count == 0) ifp->if_timer = 0; vnet_start(ifp); KERNEL_UNLOCK(); break; } case VIO_SUBTYPE_NACK: DPRINTF(("DATA/NACK/DRING_DATA\n")); sc->sc_peer_state = VIO_DP_STOPPED; break; default: DPRINTF(("DATA/0x%02x/DRING_DATA\n", tag->stype)); break; } }
int an_init(struct ifnet *ifp) { struct an_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; int i, error, fid; DPRINTF(("an_init: enabled %d\n", sc->sc_enabled)); if (!sc->sc_enabled) { if (sc->sc_enable) (*sc->sc_enable)(sc); an_wait(sc); sc->sc_enabled = 1; } else { an_stop(ifp, 0); if ((error = an_reset(sc)) != 0) { printf("%s: failed to reset\n", ifp->if_xname); an_stop(ifp, 1); return error; } } CSR_WRITE_2(sc, AN_SW0, AN_MAGIC); /* Allocate the TX buffers */ for (i = 0; i < AN_TX_RING_CNT; i++) { if ((error = an_alloc_nicmem(sc, AN_TX_MAX_LEN, &fid)) != 0) { printf("%s: failed to allocate nic memory\n", ifp->if_xname); an_stop(ifp, 1); return error; } DPRINTF2(("an_init: txbuf %d allocated %x\n", i, fid)); sc->sc_txd[i].d_fid = fid; sc->sc_txd[i].d_inuse = 0; } sc->sc_txcur = sc->sc_txnext = 0; IEEE80211_ADDR_COPY(sc->sc_config.an_macaddr, ic->ic_myaddr); an_swap16((u_int16_t *)&sc->sc_config.an_macaddr, 3); sc->sc_config.an_scanmode = AN_SCANMODE_ACTIVE; sc->sc_config.an_authtype = AN_AUTHTYPE_OPEN; /*XXX*/ if (ic->ic_flags & IEEE80211_F_WEPON) { sc->sc_config.an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE; } sc->sc_config.an_listen_interval = ic->ic_lintval; sc->sc_config.an_beacon_period = ic->ic_lintval; if (ic->ic_flags & IEEE80211_F_PMGTON) sc->sc_config.an_psave_mode = AN_PSAVE_PSP; else sc->sc_config.an_psave_mode = AN_PSAVE_CAM; sc->sc_config.an_ds_channel = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->sc_config.an_opmode = AN_OPMODE_INFRASTRUCTURE_STATION; sc->sc_config.an_rxmode = AN_RXMODE_BC_MC_ADDR; break; #ifndef IEEE80211_STA_ONLY case IEEE80211_M_IBSS: sc->sc_config.an_opmode = AN_OPMODE_IBSS_ADHOC; sc->sc_config.an_rxmode = AN_RXMODE_BC_MC_ADDR; break; #endif case IEEE80211_M_MONITOR: sc->sc_config.an_opmode = AN_OPMODE_INFRASTRUCTURE_STATION; sc->sc_config.an_rxmode = AN_RXMODE_80211_MONITOR_ANYBSS; sc->sc_config.an_authtype = AN_AUTHTYPE_NONE; if (ic->ic_flags & IEEE80211_F_WEPON) sc->sc_config.an_authtype |= AN_AUTHTYPE_PRIVACY_IN_USE | AN_AUTHTYPE_ALLOW_UNENCRYPTED; break; default: printf("%s: bad opmode %d\n", ifp->if_xname, ic->ic_opmode); an_stop(ifp, 1); return EIO; } sc->sc_config.an_rxmode |= AN_RXMODE_NO_8023_HEADER; /* Set the ssid list */ memset(&sc->sc_buf, 0, sizeof(sc->sc_buf.sc_ssidlist)); sc->sc_buf.sc_ssidlist.an_entry[0].an_ssid_len = ic->ic_des_esslen; if (ic->ic_des_esslen) memcpy(sc->sc_buf.sc_ssidlist.an_entry[0].an_ssid, ic->ic_des_essid, ic->ic_des_esslen); an_swap16((u_int16_t *)&sc->sc_buf.sc_ssidlist.an_entry[0].an_ssid, 16); if ((error = an_write_rid(sc, AN_RID_SSIDLIST, &sc->sc_buf, sizeof(sc->sc_buf.sc_ssidlist)))) { printf("%s: failed to write ssid list\n", ifp->if_xname); an_stop(ifp, 1); return error; } /* Set the AP list */ memset(&sc->sc_buf, 0, sizeof(sc->sc_buf.sc_aplist)); (void)an_write_rid(sc, AN_RID_APLIST, &sc->sc_buf, sizeof(sc->sc_buf.sc_aplist)); /* Set the encapsulation */ for (i = 0; i < AN_ENCAP_NENTS; i++) { sc->sc_buf.sc_encap.an_entry[i].an_ethertype = 0; sc->sc_buf.sc_encap.an_entry[i].an_action = AN_RXENCAP_RFC1024 | AN_TXENCAP_RFC1024; } (void)an_write_rid(sc, AN_RID_ENCAP, &sc->sc_buf, sizeof(sc->sc_buf.sc_encap)); /* Set the WEP Keys */ if (ic->ic_flags & IEEE80211_F_WEPON) an_write_wepkey(sc, AN_RID_WEP_VOLATILE, sc->sc_wepkeys, sc->sc_tx_key); /* Set the configuration */ if ((error = an_write_rid(sc, AN_RID_GENCONFIG, &sc->sc_config, sizeof(sc->sc_config)))) { printf("%s: failed to write config\n", ifp->if_xname); an_stop(ifp, 1); return error; } /* Enable the MAC */ if (an_cmd(sc, AN_CMD_ENABLE, 0)) { printf("%s: failed to enable MAC\n", sc->sc_dev.dv_xname); an_stop(ifp, 1); return ENXIO; } if (ifp->if_flags & IFF_PROMISC) an_cmd(sc, AN_CMD_SET_MODE, 0xffff); ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); ic->ic_state = IEEE80211_S_INIT; if (ic->ic_opmode == IEEE80211_M_MONITOR) ieee80211_new_state(ic, IEEE80211_S_RUN, -1); /* enable interrupts */ CSR_WRITE_2(sc, AN_INT_EN, AN_INTRS); return 0; }
void egstart(struct ifnet *ifp) { struct eg_softc *sc = ifp->if_softc; bus_space_tag_t bst = sc->sc_bst; bus_space_handle_t bsh = sc->sc_bsh; struct mbuf *m0, *m; caddr_t buffer; int len; u_short *ptr; u_int i; /* Don't transmit if interface is busy or not running */ if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) return; loop: /* Dequeue the next datagram. */ IFQ_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) return; ifq_set_oactive(&ifp->if_snd); /* We need to use m->m_pkthdr.len, so require the header */ if ((m0->m_flags & M_PKTHDR) == 0) panic("egstart: no header mbuf"); len = max(m0->m_pkthdr.len, ETHER_MIN_LEN); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); #endif sc->eg_pcb[0] = EG_CMD_SENDPACKET; sc->eg_pcb[1] = 0x06; sc->eg_pcb[2] = 0; /* address not used, we send zero */ sc->eg_pcb[3] = 0; sc->eg_pcb[4] = 0; sc->eg_pcb[5] = 0; sc->eg_pcb[6] = len; /* length of packet */ sc->eg_pcb[7] = len >> 8; if (egwritePCB(sc) != 0) { DPRINTF(("egwritePCB in egstart failed\n")); ifp->if_oerrors++; ifq_clr_oactive(&ifp->if_snd); m_freem(m0); goto loop; } buffer = sc->eg_outbuf; for (m = m0; m != 0; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; } if (len > m0->m_pkthdr.len) bzero(buffer, len - m0->m_pkthdr.len); /* set direction bit: host -> adapter */ bus_space_write_1(bst, bsh, EG_CONTROL, bus_space_read_1(bst, bsh, EG_CONTROL) & ~EG_CTL_DIR); for (ptr = (u_short *)sc->eg_outbuf; len > 0; len -= 2) { bus_space_write_2(bst, bsh, EG_DATA, *ptr++); for (i = 10000; i != 0; i--) { if (bus_space_read_1(bst, bsh, EG_STATUS) & EG_STAT_HRDY) break; delay(10); } if (i == 0) { printf("%s: start failed\n", sc->sc_dev.dv_xname); break; } } m_freem(m0); }
/* * The order in here seems important. Otherwise we may not receive * interrupts. ?! */ static void ep_if_init(void *xsc) { struct ep_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int i; if (sc->gone) return; crit_enter(); while (inw(BASE + EP_STATUS) & S_COMMAND_IN_PROGRESS); GO_WINDOW(0); outw(BASE + EP_COMMAND, STOP_TRANSCEIVER); GO_WINDOW(4); outw(BASE + EP_W4_MEDIA_TYPE, DISABLE_UTP); GO_WINDOW(0); /* Disable the card */ outw(BASE + EP_W0_CONFIG_CTRL, 0); /* Enable the card */ outw(BASE + EP_W0_CONFIG_CTRL, ENABLE_DRQ_IRQ); GO_WINDOW(2); /* Reload the ether_addr. */ for (i = 0; i < 6; i++) outb(BASE + EP_W2_ADDR_0 + i, sc->arpcom.ac_enaddr[i]); outw(BASE + EP_COMMAND, RX_RESET); outw(BASE + EP_COMMAND, TX_RESET); while (inw(BASE + EP_STATUS) & S_COMMAND_IN_PROGRESS); /* Window 1 is operating window */ GO_WINDOW(1); for (i = 0; i < 31; i++) inb(BASE + EP_W1_TX_STATUS); /* get rid of stray intr's */ outw(BASE + EP_COMMAND, ACK_INTR | 0xff); outw(BASE + EP_COMMAND, SET_RD_0_MASK | S_5_INTS); outw(BASE + EP_COMMAND, SET_INTR_MASK | S_5_INTS); if (ifp->if_flags & IFF_PROMISC) outw(BASE + EP_COMMAND, SET_RX_FILTER | FIL_INDIVIDUAL | FIL_GROUP | FIL_BRDCST | FIL_ALL); else outw(BASE + EP_COMMAND, SET_RX_FILTER | FIL_INDIVIDUAL | FIL_GROUP | FIL_BRDCST); if (!sc->epb.mii_trans) { ep_ifmedia_upd(ifp); } outw(BASE + EP_COMMAND, RX_ENABLE); outw(BASE + EP_COMMAND, TX_ENABLE); ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); /* just in case */ #ifdef EP_LOCAL_STATS sc->rx_no_first = sc->rx_no_mbuf = sc->rx_overrunf = sc->rx_overrunl = sc->tx_underrun = 0; #endif EP_FSET(sc, F_RX_FIRST); if (sc->top) { m_freem(sc->top); sc->top = sc->mcur = 0; } outw(BASE + EP_COMMAND, SET_RX_EARLY_THRESH | RX_INIT_EARLY_THRESH); outw(BASE + EP_COMMAND, SET_TX_START_THRESH | 16); /* * Store up a bunch of mbuf's for use later. (MAX_MBS). First we free up * any that we had in case we're being called from intr or somewhere * else. */ GO_WINDOW(1); if_devstart(ifp); crit_exit(); }