static void start_xmit_frames(struct sbsh_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; /* * Check if we have any free descriptor(s) and free space in * our transmit queue. */ while (sc->tail_xq != ((sc->head_xq - 1) & (XQLEN - 1)) && sc->regs->LTDR != ((sc->head_tdesc - 1) & 0x7f)) { m = ifq_dequeue(&ifp->if_snd); if (m == NULL) break; if (m->m_pkthdr.len) { BPF_MTAP(ifp, m); encap_frame(sc, m); } else m_freem(m); } if (sc->regs->CTDR != sc->regs->LTDR) ifq_set_oactive(&ifp->if_snd); else ifq_clr_oactive(&ifp->if_snd); }
/* * Start transfer from internal queue */ static int lgue_start_transfer(struct lgue_softc *sc) { usbd_status err; struct lgue_queue_entry *entry; struct ifnet *ifp; if (STAILQ_EMPTY(&sc->lgue_tx_queue)) return(0); ifp = &sc->lgue_arpcom.ac_if; entry = STAILQ_FIRST(&sc->lgue_tx_queue); STAILQ_REMOVE_HEAD(&sc->lgue_tx_queue, entry_next); m_copydata(entry->entry_mbuf, 0, entry->entry_mbuf->m_pkthdr.len, sc->lgue_tx_buf); /* Transmit */ usbd_setup_xfer(sc->lgue_tx_xfer, sc->lgue_ep[LGUE_ENDPT_TX], sc, sc->lgue_tx_buf, entry->entry_mbuf->m_pkthdr.len, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, lgue_txeof); err = usbd_transfer(sc->lgue_tx_xfer); if (err != USBD_IN_PROGRESS) { m_freem(entry->entry_mbuf); kfree(entry, M_USBDEV); lgue_stop(sc); ifq_clr_oactive(&ifp->if_snd); return(EIO); } m_freem(entry->entry_mbuf); kfree(entry, M_USBDEV); sc->lgue_tx_cnt++; ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 5; return(0); }
void an_start(struct ifnet *ifp) { struct an_softc *sc = (struct an_softc *)ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct ieee80211_frame *wh; struct an_txframe frmhdr; struct mbuf *m; u_int16_t len; int cur, fid; if (!sc->sc_enabled || sc->sc_invalid) { DPRINTF(("an_start: noop: enabled %d invalid %d\n", sc->sc_enabled, sc->sc_invalid)); return; } memset(&frmhdr, 0, sizeof(frmhdr)); cur = sc->sc_txnext; for (;;) { if (ic->ic_state != IEEE80211_S_RUN) { DPRINTF(("an_start: not running %d\n", ic->ic_state)); break; } m = ifq_deq_begin(&ifp->if_snd); if (m == NULL) { DPRINTF2(("an_start: no pending mbuf\n")); break; } if (sc->sc_txd[cur].d_inuse) { ifq_deq_rollback(&ifp->if_snd, m); DPRINTF2(("an_start: %x/%d busy\n", sc->sc_txd[cur].d_fid, cur)); ifq_set_oactive(&ifp->if_snd); break; } ifq_deq_commit(&ifp->if_snd, m); ifp->if_opackets++; #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif if ((m = ieee80211_encap(ifp, m, &ni)) == NULL) { ifp->if_oerrors++; continue; } if (ni != NULL) ieee80211_release_node(ic, ni); #if NBPFILTER > 0 if (ic->ic_rawbpf) bpf_mtap(ic->ic_rawbpf, m, BPF_DIRECTION_OUT); #endif wh = mtod(m, struct ieee80211_frame *); if (ic->ic_flags & IEEE80211_F_WEPON) wh->i_fc[1] |= IEEE80211_FC1_WEP; m_copydata(m, 0, sizeof(struct ieee80211_frame), (caddr_t)&frmhdr.an_whdr); an_swap16((u_int16_t *)&frmhdr.an_whdr, sizeof(struct ieee80211_frame)/2); /* insert payload length in front of llc/snap */ len = htons(m->m_pkthdr.len - sizeof(struct ieee80211_frame)); m_adj(m, sizeof(struct ieee80211_frame) - sizeof(len)); if (mtod(m, u_long) & 0x01) memcpy(mtod(m, caddr_t), &len, sizeof(len)); else *mtod(m, u_int16_t *) = len; /* * XXX Aironet firmware apparently convert the packet * with longer than 1500 bytes in length into LLC/SNAP. * If we have 1500 bytes in ethernet payload, it is * 1508 bytes including LLC/SNAP and will be inserted * additional LLC/SNAP header with 1501-1508 in its * ethertype !! * So we skip LLC/SNAP header and force firmware to * convert it to LLC/SNAP again. */ m_adj(m, sizeof(struct llc)); frmhdr.an_tx_ctl = AN_TXCTL_80211; frmhdr.an_tx_payload_len = m->m_pkthdr.len; frmhdr.an_gaplen = AN_TXGAP_802_11; if (ic->ic_fixed_rate != -1) frmhdr.an_tx_rate = ic->ic_sup_rates[IEEE80211_MODE_11B].rs_rates[ ic->ic_fixed_rate] & IEEE80211_RATE_VAL; else frmhdr.an_tx_rate = 0; if (sizeof(frmhdr) + AN_TXGAP_802_11 + sizeof(len) + m->m_pkthdr.len > AN_TX_MAX_LEN) { ifp->if_oerrors++; m_freem(m); continue; } #if NBPFILTER > 0 if (sc->sc_drvbpf) { struct mbuf mb; struct an_tx_radiotap_header *tap = &sc->sc_txtap; tap->at_rate = ic->ic_bss->ni_rates.rs_rates[ic->ic_bss->ni_txrate]; tap->at_chan_freq = ic->ic_bss->ni_chan->ic_freq; tap->at_chan_flags = ic->ic_bss->ni_chan->ic_flags; mb.m_data = (caddr_t)tap; mb.m_len = sizeof(sc->sc_txtapu); mb.m_next = m; mb.m_nextpkt = NULL; mb.m_type = 0; mb.m_flags = 0; bpf_mtap(sc->sc_drvbpf, m, BPF_DIRECTION_OUT); } #endif fid = sc->sc_txd[cur].d_fid; if (an_write_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr)) != 0) { ifp->if_oerrors++; m_freem(m); continue; } /* dummy write to avoid seek. */ an_write_bap(sc, fid, -1, &frmhdr, AN_TXGAP_802_11); an_mwrite_bap(sc, fid, -1, m, m->m_pkthdr.len); m_freem(m); DPRINTF2(("an_start: send %d byte via %x/%d\n", ntohs(len) + sizeof(struct ieee80211_frame), fid, cur)); sc->sc_txd[cur].d_inuse = 1; if (an_cmd(sc, AN_CMD_TX, fid)) { printf("%s: xmit failed\n", ifp->if_xname); sc->sc_txd[cur].d_inuse = 0; continue; } sc->sc_tx_timer = 5; ifp->if_timer = 1; AN_INC(cur, AN_TX_RING_CNT); sc->sc_txnext = cur; } }
void egstart(struct ifnet *ifp) { struct eg_softc *sc = ifp->if_softc; bus_space_tag_t bst = sc->sc_bst; bus_space_handle_t bsh = sc->sc_bsh; struct mbuf *m0, *m; caddr_t buffer; int len; u_short *ptr; u_int i; /* Don't transmit if interface is busy or not running */ if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) return; loop: /* Dequeue the next datagram. */ IFQ_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) return; ifq_set_oactive(&ifp->if_snd); /* We need to use m->m_pkthdr.len, so require the header */ if ((m0->m_flags & M_PKTHDR) == 0) panic("egstart: no header mbuf"); len = max(m0->m_pkthdr.len, ETHER_MIN_LEN); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0, BPF_DIRECTION_OUT); #endif sc->eg_pcb[0] = EG_CMD_SENDPACKET; sc->eg_pcb[1] = 0x06; sc->eg_pcb[2] = 0; /* address not used, we send zero */ sc->eg_pcb[3] = 0; sc->eg_pcb[4] = 0; sc->eg_pcb[5] = 0; sc->eg_pcb[6] = len; /* length of packet */ sc->eg_pcb[7] = len >> 8; if (egwritePCB(sc) != 0) { DPRINTF(("egwritePCB in egstart failed\n")); ifp->if_oerrors++; ifq_clr_oactive(&ifp->if_snd); m_freem(m0); goto loop; } buffer = sc->eg_outbuf; for (m = m0; m != 0; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; } if (len > m0->m_pkthdr.len) bzero(buffer, len - m0->m_pkthdr.len); /* set direction bit: host -> adapter */ bus_space_write_1(bst, bsh, EG_CONTROL, bus_space_read_1(bst, bsh, EG_CONTROL) & ~EG_CTL_DIR); for (ptr = (u_short *)sc->eg_outbuf; len > 0; len -= 2) { bus_space_write_2(bst, bsh, EG_DATA, *ptr++); for (i = 10000; i != 0; i--) { if (bus_space_read_1(bst, bsh, EG_STATUS) & EG_STAT_HRDY) break; delay(10); } if (i == 0) { printf("%s: start failed\n", sc->sc_dev.dv_xname); break; } } m_freem(m0); }
void vnet_start_desc(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_map *map = sc->sc_lm; struct vnet_desc_msg dm; struct mbuf *m; paddr_t pa; caddr_t buf; u_int prod, count; for (;;) { count = sc->sc_tx_prod - sc->sc_tx_cons; if (count >= (sc->sc_vd->vd_nentries - 1) || map->lm_count >= map->lm_nentries) { ifq_set_oactive(&ifp->if_snd); return; } buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); if (buf == NULL) { ifq_set_oactive(&ifp->if_snd); return; } IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { pool_put(&sc->sc_pool, buf); return; } m_copydata(m, 0, m->m_pkthdr.len, buf); #if NBPFILTER > 0 /* * If BPF is listening on this interface, let it see the * packet before we commit it to the wire. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; atomic_inc_int(&map->lm_count); prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); sc->sc_vsd[prod].vsd_map_idx = map->lm_next; sc->sc_vsd[prod].vsd_buf = buf; bzero(&dm, sizeof(dm)); dm.tag.type = VIO_TYPE_DATA; dm.tag.stype = VIO_SUBTYPE_INFO; dm.tag.stype_env = VIO_DESC_DATA; dm.tag.sid = sc->sc_local_sid; dm.seq_no = sc->sc_seq_no++; dm.desc_handle = sc->sc_tx_prod; dm.nbytes = max(m->m_pkthdr.len, 60); dm.ncookies = 1; dm.cookie[0].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); dm.cookie[0].size = 2048; vnet_sendmsg(sc, &dm, sizeof(dm)); sc->sc_tx_prod++; sc->sc_tx_prod &= (sc->sc_vd->vd_nentries - 1); m_freem(m); } }
void vnet_start(struct ifnet *ifp) { struct vnet_softc *sc = ifp->if_softc; struct ldc_conn *lc = &sc->sc_lc; struct ldc_map *map = sc->sc_lm; struct mbuf *m; paddr_t pa; caddr_t buf; uint64_t tx_head, tx_tail, tx_state; u_int start, prod, count; int err; if (!(ifp->if_flags & IFF_RUNNING) || ifq_is_oactive(&ifp->if_snd)) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * We cannot transmit packets until a VIO connection has been * established. */ if (!ISSET(sc->sc_vio_state, VIO_RCV_RDX) || !ISSET(sc->sc_vio_state, VIO_ACK_RDX)) return; /* * Make sure there is room in the LDC transmit queue to send a * DRING_DATA message. */ err = hv_ldc_tx_get_state(lc->lc_id, &tx_head, &tx_tail, &tx_state); if (err != H_EOK) return; tx_tail += sizeof(struct ldc_pkt); tx_tail &= ((lc->lc_txq->lq_nentries * sizeof(struct ldc_pkt)) - 1); if (tx_tail == tx_head) { ifq_set_oactive(&ifp->if_snd); return; } if (sc->sc_xfer_mode == VIO_DESC_MODE) { vnet_start_desc(ifp); return; } start = prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); while (sc->sc_vd->vd_desc[prod].hdr.dstate == VIO_DESC_FREE) { count = sc->sc_tx_prod - sc->sc_tx_cons; if (count >= (sc->sc_vd->vd_nentries - 1) || map->lm_count >= map->lm_nentries) { ifq_set_oactive(&ifp->if_snd); break; } buf = pool_get(&sc->sc_pool, PR_NOWAIT|PR_ZERO); if (buf == NULL) { ifq_set_oactive(&ifp->if_snd); break; } IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { pool_put(&sc->sc_pool, buf); break; } m_copydata(m, 0, m->m_pkthdr.len, buf + VNET_ETHER_ALIGN); #if NBPFILTER > 0 /* * If BPF is listening on this interface, let it see the * packet before we commit it to the wire. */ if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif pmap_extract(pmap_kernel(), (vaddr_t)buf, &pa); KASSERT((pa & ~PAGE_MASK) == (pa & LDC_MTE_RA_MASK)); while (map->lm_slot[map->lm_next].entry != 0) { map->lm_next++; map->lm_next &= (map->lm_nentries - 1); } map->lm_slot[map->lm_next].entry = (pa & LDC_MTE_RA_MASK); map->lm_slot[map->lm_next].entry |= LDC_MTE_CPR; atomic_inc_int(&map->lm_count); sc->sc_vd->vd_desc[prod].nbytes = max(m->m_pkthdr.len, 60); sc->sc_vd->vd_desc[prod].ncookies = 1; sc->sc_vd->vd_desc[prod].cookie[0].addr = map->lm_next << PAGE_SHIFT | (pa & PAGE_MASK); sc->sc_vd->vd_desc[prod].cookie[0].size = 2048; membar_producer(); sc->sc_vd->vd_desc[prod].hdr.dstate = VIO_DESC_READY; sc->sc_vsd[prod].vsd_map_idx = map->lm_next; sc->sc_vsd[prod].vsd_buf = buf; sc->sc_tx_prod++; prod = sc->sc_tx_prod & (sc->sc_vd->vd_nentries - 1); m_freem(m); } membar_producer(); if (start != prod && sc->sc_peer_state != VIO_DP_ACTIVE) { vnet_send_dring_data(sc, start); ifp->if_timer = 5; } }
/* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; u_short length; u_short numPages; u_short pages_wanted; u_char packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd); if (m == NULL) { kprintf("%s: snresume() with nothing to send\n", ifp->if_xname); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { kprintf("%s: large packet discarded (B)\n", ifp->if_xname); IFNET_STAT_INC(ifp, oerrors, 1); m_freem(top); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = inb(BASE + ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { kprintf("%s: Memory allocation failed. Weird.\n", ifp->if_xname); ifp->if_timer = 1; ifq_prepend(&ifp->if_snd, top); goto try_start; } /* * We have a packet number, so tell the card to use it. */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { kprintf("%s: memory allocation wrong size. Weird.\n", ifp->if_xname); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (inw(BASE + MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; outw(BASE + MMU_CMD_REG_W, MMUCR_FREEPKT); ifq_prepend(&ifp->if_snd, top); return; } /* * Point to the beginning of the packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ outw(BASE + DATA_REG_W, 0); outb(BASE + DATA_REG_B, (length + 6) & 0xFF); outb(BASE + DATA_REG_B, (length + 6) >> 8); /* * Push out the data to the card. */ for (m = top; m != NULL; m = m->m_next) { /* * Push out words. */ outsw(BASE + DATA_REG_W, mtod(m, caddr_t), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) outb(BASE + DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { outw(BASE + DATA_REG_W, 0); pad -= 2; } if (pad) outb(BASE + DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ outw(BASE + DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = inb(BASE + INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; outw(BASE + MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ ifq_clr_oactive(&ifp->if_snd); if_devstart(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 1; }
void snstart(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; u_short length; u_short numPages; u_char packet_no; int time_out; ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) return; if (sc->pages_wanted != -1) { /* XXX should never happen */ kprintf("%s: snstart() while memory allocation pending\n", ifp->if_xname); ifq_set_oactive(&ifp->if_snd); return; } startagain: /* * Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd); if (m == NULL) return; /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { kprintf("%s: large packet discarded (A)\n", ifp->if_xname); IFNET_STAT_INC(&sc->arpcom.ac_if, oerrors, 1); m_freem(top); goto readcheck; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; /* * Now, try to allocate the memory */ SMC_SELECT_BANK(2); outw(BASE + MMU_CMD_REG_W, MMUCR_ALLOC | numPages); /* * Wait a short amount of time to see if the allocation request * completes. Otherwise, I enable the interrupt and wait for * completion asyncronously. */ time_out = MEMORY_WAIT_TIME; do { if (inb(BASE + INTR_STAT_REG_B) & IM_ALLOC_INT) break; } while (--time_out); if (!time_out) { /* * No memory now. Oh well, wait until the chip finds memory * later. Remember how many pages we were asking for and * enable the allocation completion interrupt. Also set a * watchdog in case we miss the interrupt. We mark the * interface active since there is no point in attempting an * snstart() until after the memory is available. */ mask = inb(BASE + INTR_MASK_REG_B) | IM_ALLOC_INT; outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; ifp->if_timer = 1; ifq_set_oactive(&ifp->if_snd); sc->pages_wanted = numPages; ifq_prepend(&ifp->if_snd, top); return; } /* * The memory allocation completed. Check the results. */ packet_no = inb(BASE + ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { kprintf("%s: Memory allocation failed\n", ifp->if_xname); ifq_prepend(&ifp->if_snd, top); goto startagain; } /* * We have a packet number, so tell the card to use it. */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Point to the beginning of the packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ outw(BASE + DATA_REG_W, 0); outb(BASE + DATA_REG_B, (length + 6) & 0xFF); outb(BASE + DATA_REG_B, (length + 6) >> 8); /* * Push out the data to the card. */ for (m = top; m != NULL; m = m->m_next) { /* * Push out words. */ outsw(BASE + DATA_REG_W, mtod(m, caddr_t), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) outb(BASE + DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { outw(BASE + DATA_REG_W, 0); pad -= 2; } if (pad) outb(BASE + DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ outw(BASE + DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = inb(BASE + INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; outw(BASE + MMU_CMD_REG_W, MMUCR_ENQUEUE); ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 1; BPF_MTAP(ifp, top); IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); readcheck: /* * Is another packet coming in? We don't want to overflow the tiny * RX FIFO. If nothing has arrived then attempt to queue another * transmit packet. */ if (inw(BASE + FIFO_PORTS_REG_W) & FIFO_REMPTY) goto startagain; }
static void ep_if_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct ep_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); if (sc->gone) { ifq_purge(&ifp->if_snd); return; } while (inw(BASE + EP_STATUS) & S_COMMAND_IN_PROGRESS); if (ifq_is_oactive(&ifp->if_snd)) { return; } crit_enter(); startagain: /* Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd, NULL); if (m == NULL) { crit_exit(); return; } for (len = 0, top = m; m; m = m->m_next) len += m->m_len; m = top; pad = padmap[len & 3]; /* * The 3c509 automatically pads short packets to minimum ethernet length, * but we drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN) { /* packet is obviously too large: toss it */ IFNET_STAT_INC(ifp, oerrors, 1); m_freem(m); goto readcheck; } if (inw(BASE + EP_W1_FREE_TX) < len + pad + 4) { /* no room in FIFO */ outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | (len + pad + 4)); /* make sure */ if (inw(BASE + EP_W1_FREE_TX) < len + pad + 4) { ifq_set_oactive(&ifp->if_snd); ifq_prepend(&ifp->if_snd, top); crit_exit(); return; } } else { outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | EP_THRESH_DISABLE); } outw(BASE + EP_W1_TX_PIO_WR_1, len); outw(BASE + EP_W1_TX_PIO_WR_1, 0x0); /* Second dword meaningless */ if (EP_FTST(sc, F_ACCESS_32_BITS)) { for (top = m; m != NULL; m = m->m_next) { outsl(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t), m->m_len / 4); if (m->m_len & 3) outsb(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t) + (m->m_len & (~3)), m->m_len & 3); } } else { for (top = m; m != NULL; m = m->m_next) { outsw(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t), m->m_len / 2); if (m->m_len & 1) outb(BASE + EP_W1_TX_PIO_WR_1, *(mtod(m, caddr_t) + m->m_len - 1)); } } while (pad--) outb(BASE + EP_W1_TX_PIO_WR_1, 0); /* Padding */ BPF_MTAP(ifp, top); ifp->if_timer = 2; IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); /* * Is another packet coming in? We don't want to overflow the tiny RX * fifo. */ readcheck: if (inw(BASE + EP_W1_RX_STATUS) & RX_BYTES_MASK) { /* * we check if we have packets left, in that case we prepare to come * back later */ if (!ifq_is_empty(&ifp->if_snd)) outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | 8); crit_exit(); return; } goto startagain; }