/* * Start transfer */ static void lgue_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct lgue_softc *sc; struct mbuf *m_head; ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); sc = ifp->if_softc; if (sc->lgue_dying) return; if (ifq_is_oactive(&ifp->if_snd)) { return; } /* To internal queue */ while ((m_head = ifq_dequeue(&ifp->if_snd, NULL)) != NULL) { if (lgue_encap(sc, m_head)) { m_freem(m_head); break; } /* Filter */ BPF_MTAP(ifp, m_head); } lgue_start_transfer(sc); }
/* * Start transfer */ static void lgue_start(struct ifnet *ifp) { struct lgue_softc *sc; struct mbuf *m_head; sc = ifp->if_softc; if (sc->lgue_dying) return; if (ifp->if_flags & IFF_OACTIVE) { return; } /* To internal queue */ while ((m_head = ifq_dequeue(&ifp->if_snd, NULL)) != NULL) { if (lgue_encap(sc, m_head)) { m_freem(m_head); break; } /* Filter */ BPF_MTAP(ifp, m_head); } lgue_start_transfer(sc); }
static void start_xmit_frames(struct sbsh_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; /* * Check if we have any free descriptor(s) and free space in * our transmit queue. */ while (sc->tail_xq != ((sc->head_xq - 1) & (XQLEN - 1)) && sc->regs->LTDR != ((sc->head_tdesc - 1) & 0x7f)) { m = ifq_dequeue(&ifp->if_snd); if (m == NULL) break; if (m->m_pkthdr.len) { BPF_MTAP(ifp, m); encap_frame(sc, m); } else m_freem(m); } if (sc->regs->CTDR != sc->regs->LTDR) ifq_set_oactive(&ifp->if_snd); else ifq_clr_oactive(&ifp->if_snd); }
/* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWE_LOCK(fwe); xfer = STAILQ_FIRST(&fwe->xferlist); if (xfer == NULL) { #if 0 printf("if_fwe: lack of xfer\n"); #endif FWE_UNLOCK(fwe); break; } STAILQ_REMOVE_HEAD(&fwe->xferlist, link); FWE_UNLOCK(fwe); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWE_LOCK(fwe); STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); break; } BPF_MTAP(ifp, m); /* keep ip packet alignment for alpha */ M_PREPEND(m, ETHER_ALIGN, M_NOWAIT); fp = &xfer->send.hdr; *(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr; fp->mode.stream.len = m->m_pkthdr.len; xfer->mbuf = m; xfer->send.pay_len = m->m_pkthdr.len; if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) { /* error */ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* XXX set error code */ fwe_output_callback(xfer); } else { if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fwe->fd.fc); }
/* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while (xferq->queued < xferq->maxq) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; xfer = fw_xfer_alloc(); if (xfer == NULL) { return; } #if __FreeBSD_version >= 500000 BPF_MTAP(ifp, m); #else if (ifp->if_bpf != NULL) bpf_mtap(ifp, m); #endif xfer->send.off = 0; xfer->spd = 2; xfer->fc = fwe->fd.fc; xfer->retry_req = fw_asybusy; xfer->sc = (caddr_t)fwe; xfer->act.hand = fwe_output_callback; /* keep ip packet alignment for alpha */ M_PREPEND(m, ALIGN_PAD, M_DONTWAIT); fp = (struct fw_pkt *)&xfer->dst; /* XXX */ xfer->dst = *((int32_t *)&fwe->pkt_hdr); fp->mode.stream.len = htons(m->m_pkthdr.len); xfer->send.buf = (caddr_t) fp; xfer->mbuf = m; xfer->send.len = m->m_pkthdr.len + HDR_LEN; i++; if (fw_asyreq(xfer->fc, -1, xfer) != 0) { /* error */ ifp->if_oerrors ++; /* XXX set error code */ fwe_output_callback(xfer); } else { ifp->if_opackets ++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (xfer != NULL) xferq->start(xfer->fc); }
static void ffec_txstart_locked(struct ffec_softc *sc) { struct ifnet *ifp; struct mbuf *m; int enqueued; FFEC_ASSERT_LOCKED(sc); if (!sc->link_is_up) return; ifp = sc->ifp; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; enqueued = 0; for (;;) { if (sc->txcount == (TX_DESC_COUNT-1)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (ffec_setup_txbuf(sc, sc->tx_idx_head, &m) != 0) { IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } BPF_MTAP(ifp, m); sc->tx_idx_head = next_txidx(sc, sc->tx_idx_head); ++enqueued; } if (enqueued != 0) { bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_PREWRITE); WR4(sc, FEC_TDAR_REG, FEC_TDAR_TDAR); bus_dmamap_sync(sc->txdesc_tag, sc->txdesc_map, BUS_DMASYNC_POSTWRITE); sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS; } }
Static void cdce_start(struct ifnet *ifp) { struct cdce_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; CDCE_LOCK(sc); if (sc->cdce_dying || ifp->if_flags & IFF_OACTIVE || !(ifp->if_flags & IFF_RUNNING)) { CDCE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { CDCE_UNLOCK(sc); return; } if (cdce_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; CDCE_UNLOCK(sc); return; } BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; CDCE_UNLOCK(sc); return; }
/* * icoutput() */ static int icoutput(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct rtentry *rt) { device_t icdev = devclass_get_device(ic_devclass, ifp->if_unit); device_t parent = device_get_parent(icdev); struct ic_softc *sc = (struct ic_softc *)device_get_softc(icdev); int s, len, sent; struct mbuf *mm; u_char *cp; u_int hdr = dst->sa_family; ifp->if_flags |= IFF_RUNNING; s = splhigh(); /* already sending? */ if (sc->ic_sending) { ifp->if_oerrors ++; goto error; } /* insert header */ bcopy ((char *)&hdr, sc->ic_obuf, ICHDRLEN); cp = sc->ic_obuf + ICHDRLEN; len = 0; mm = m; do { if (len + mm->m_len > sc->ic_if.if_mtu) { /* packet to large */ ifp->if_oerrors ++; goto error; } bcopy(mtod(mm,char *), cp, mm->m_len); cp += mm->m_len; len += mm->m_len; } while ((mm = mm->m_next)); if (ifp->if_bpf) { struct mbuf m0, *n = m; /* * We need to prepend the address family as * a four byte field. Cons up a dummy header * to pacify bpf. This is safe because bpf * will only read from the mbuf (i.e., it won't * try to free it or keep a pointer a to it). */ m0.m_next = m; m0.m_len = sizeof(u_int); m0.m_data = (char *)&hdr; n = &m0; BPF_MTAP(ifp, n); } sc->ic_sending = 1; m_freem(m); splx(s); /* send the packet */ if (iicbus_block_write(parent, sc->ic_addr, sc->ic_obuf, len + ICHDRLEN, &sent)) ifp->if_oerrors ++; else { ifp->if_opackets ++; ifp->if_obytes += len; } sc->ic_sending = 0; return (0); error: m_freem(m); splx(s); return(0); }
static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; int last_len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ++ifp->if_oerrors; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; last_len = 0; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); last_len = m->m_len; } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[last_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); }
static void usie_if_tx_callback(struct usb_xfer *xfer, usb_error_t error) { struct usie_softc *sc = usbd_xfer_softc(xfer); struct usb_page_cache *pc; struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; uint16_t size; switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete\n"); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_opackets++; /* fall though */ case USB_ST_SETUP: tr_setup: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (m->m_pkthdr.len > (MCLBYTES - ETHER_HDR_LEN + ETHER_CRC_LEN - sizeof(sc->sc_txd))) { DPRINTF("packet len is too big: %d\n", m->m_pkthdr.len); break; } pc = usbd_xfer_get_frame(xfer, 0); sc->sc_txd.hip.len = htobe16(m->m_pkthdr.len + ETHER_HDR_LEN + ETHER_CRC_LEN); size = sizeof(sc->sc_txd); usbd_copy_in(pc, 0, &sc->sc_txd, size); usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, 0, m->m_pkthdr.len + size + ETHER_CRC_LEN); BPF_MTAP(ifp, m); m_freem(m); usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("USB transfer error, %s\n", usbd_errstr(error)); ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } break; } }
/* * ARCnet output routine. * Encapsulate a packet of type family for the local net. * Assumes that ifp is actually pointer to arccom structure. */ int arc_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct route *ro) { struct arc_header *ah; int error; u_int8_t atype, adst; int loop_copy = 0; int isphds; #if defined(INET) || defined(INET6) struct llentry *lle; #endif if (!((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING))) return(ENETDOWN); /* m, m1 aren't initialized yet */ error = 0; switch (dst->sa_family) { #ifdef INET case AF_INET: /* * For now, use the simple IP addr -> ARCnet addr mapping */ if (m->m_flags & (M_BCAST|M_MCAST)) adst = arcbroadcastaddr; /* ARCnet broadcast address */ else if (ifp->if_flags & IFF_NOARP) adst = ntohl(SIN(dst)->sin_addr.s_addr) & 0xFF; else { error = arpresolve(ifp, ro ? ro->ro_rt : NULL, m, dst, &adst, &lle); if (error) return (error == EWOULDBLOCK ? 0 : error); } atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_IP_OLD : ARCTYPE_IP; break; case AF_ARP: { struct arphdr *ah; ah = mtod(m, struct arphdr *); ah->ar_hrd = htons(ARPHRD_ARCNET); loop_copy = -1; /* if this is for us, don't do it */ switch(ntohs(ah->ar_op)) { case ARPOP_REVREQUEST: case ARPOP_REVREPLY: atype = ARCTYPE_REVARP; break; case ARPOP_REQUEST: case ARPOP_REPLY: default: atype = ARCTYPE_ARP; break; } if (m->m_flags & M_BCAST) bcopy(ifp->if_broadcastaddr, &adst, ARC_ADDR_LEN); else bcopy(ar_tha(ah), &adst, ARC_ADDR_LEN); } break; #endif #ifdef INET6 case AF_INET6: error = nd6_storelladdr(ifp, m, dst, (u_char *)&adst, &lle); if (error) return (error); atype = ARCTYPE_INET6; break; #endif #ifdef IPX case AF_IPX: adst = SIPX(dst)->sipx_addr.x_host.c_host[5]; atype = ARCTYPE_IPX; if (adst == 0xff) adst = arcbroadcastaddr; break; #endif case AF_UNSPEC: loop_copy = -1; ah = (struct arc_header *)dst->sa_data; adst = ah->arc_dhost; atype = ah->arc_type; if (atype == ARCTYPE_ARP) { atype = (ifp->if_flags & IFF_LINK0) ? ARCTYPE_ARP_OLD: ARCTYPE_ARP; #ifdef ARCNET_ALLOW_BROKEN_ARP /* * XXX It's not clear per RFC826 if this is needed, but * "assigned numbers" say this is wrong. * However, e.g., AmiTCP 3.0Beta used it... we make this * switchable for emergency cases. Not perfect, but... */ if (ifp->if_flags & IFF_LINK2) mtod(m, struct arphdr *)->ar_pro = atype - 1; #endif } break; default: if_printf(ifp, "can't handle af%d\n", dst->sa_family); senderr(EAFNOSUPPORT); } isphds = arc_isphds(atype); M_PREPEND(m, isphds ? ARC_HDRNEWLEN : ARC_HDRLEN, M_DONTWAIT); if (m == 0) senderr(ENOBUFS); ah = mtod(m, struct arc_header *); ah->arc_type = atype; ah->arc_dhost = adst; ah->arc_shost = ARC_LLADDR(ifp); if (isphds) { ah->arc_flag = 0; ah->arc_seqid = 0; } if ((ifp->if_flags & IFF_SIMPLEX) && (loop_copy != -1)) { if ((m->m_flags & M_BCAST) || (loop_copy > 0)) { struct mbuf *n = m_copy(m, 0, (int)M_COPYALL); (void) if_simloop(ifp, n, dst->sa_family, ARC_HDRLEN); } else if (ah->arc_dhost == ah->arc_shost) { (void) if_simloop(ifp, m, dst->sa_family, ARC_HDRLEN); return (0); /* XXX */ } } BPF_MTAP(ifp, m); error = ifp->if_transmit(ifp, m); return (error); bad: if (m) m_freem(m); return (error); }
void snstart(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; u_short length; u_short numPages; u_char packet_no; int time_out; ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); if ((ifp->if_flags & IFF_RUNNING) == 0 || ifq_is_oactive(&ifp->if_snd)) return; if (sc->pages_wanted != -1) { /* XXX should never happen */ kprintf("%s: snstart() while memory allocation pending\n", ifp->if_xname); ifq_set_oactive(&ifp->if_snd); return; } startagain: /* * Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd); if (m == NULL) return; /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { kprintf("%s: large packet discarded (A)\n", ifp->if_xname); IFNET_STAT_INC(&sc->arpcom.ac_if, oerrors, 1); m_freem(top); goto readcheck; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; /* * Now, try to allocate the memory */ SMC_SELECT_BANK(2); outw(BASE + MMU_CMD_REG_W, MMUCR_ALLOC | numPages); /* * Wait a short amount of time to see if the allocation request * completes. Otherwise, I enable the interrupt and wait for * completion asyncronously. */ time_out = MEMORY_WAIT_TIME; do { if (inb(BASE + INTR_STAT_REG_B) & IM_ALLOC_INT) break; } while (--time_out); if (!time_out) { /* * No memory now. Oh well, wait until the chip finds memory * later. Remember how many pages we were asking for and * enable the allocation completion interrupt. Also set a * watchdog in case we miss the interrupt. We mark the * interface active since there is no point in attempting an * snstart() until after the memory is available. */ mask = inb(BASE + INTR_MASK_REG_B) | IM_ALLOC_INT; outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; ifp->if_timer = 1; ifq_set_oactive(&ifp->if_snd); sc->pages_wanted = numPages; ifq_prepend(&ifp->if_snd, top); return; } /* * The memory allocation completed. Check the results. */ packet_no = inb(BASE + ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { kprintf("%s: Memory allocation failed\n", ifp->if_xname); ifq_prepend(&ifp->if_snd, top); goto startagain; } /* * We have a packet number, so tell the card to use it. */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Point to the beginning of the packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ outw(BASE + DATA_REG_W, 0); outb(BASE + DATA_REG_B, (length + 6) & 0xFF); outb(BASE + DATA_REG_B, (length + 6) >> 8); /* * Push out the data to the card. */ for (m = top; m != NULL; m = m->m_next) { /* * Push out words. */ outsw(BASE + DATA_REG_W, mtod(m, caddr_t), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) outb(BASE + DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { outw(BASE + DATA_REG_W, 0); pad -= 2; } if (pad) outb(BASE + DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ outw(BASE + DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = inb(BASE + INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; outw(BASE + MMU_CMD_REG_W, MMUCR_ENQUEUE); ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 1; BPF_MTAP(ifp, top); IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); readcheck: /* * Is another packet coming in? We don't want to overflow the tiny * RX FIFO. If nothing has arrived then attempt to queue another * transmit packet. */ if (inw(BASE + FIFO_PORTS_REG_W) & FIFO_REMPTY) goto startagain; }
void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m0, u_int len) { struct hevcc *vcc; struct atm_pseudohdr aph; struct mbuf *m, *m1; u_int vpi, vci; u_char *ptr; DBG(sc, RX, ("cid=%#x flags=%#x len=%u mbuf=%p", cid, flags, len, m0)); vcc = sc->vccs[cid]; if (vcc == NULL) goto drop; if (flags & HE_REGM_RBRQ_CON_CLOSED) { if (vcc->vflags & HE_VCC_RX_CLOSING) { vcc->vflags &= ~HE_VCC_RX_CLOSING; if (vcc->param.flags & ATMIO_FLAG_ASYNC) { if (!(vcc->vflags & HE_VCC_OPEN)) hatm_vcc_closed(sc, cid); } else cv_signal(&sc->vcc_cv); } goto drop; } if (!(vcc->vflags & HE_VCC_RX_OPEN)) goto drop; if (flags & HE_REGM_RBRQ_HBUF_ERROR) { sc->istats.hbuf_error++; if (vcc->chain != NULL) { m_freem(vcc->chain); vcc->chain = vcc->last = NULL; } goto drop; } if (m0 == NULL) { sc->istats.no_rcv_mbuf++; return; } if ((m0->m_len = len) == 0) { sc->istats.empty_hbuf++; m_free(m0); } else if (vcc->chain == NULL) { sc->istats.rx_seg++; vcc->chain = vcc->last = m0; vcc->last->m_next = NULL; vcc->chain->m_pkthdr.len = m0->m_len; vcc->chain->m_pkthdr.rcvif = sc->ifp; } else { sc->istats.rx_seg++; vcc->last->m_next = m0; vcc->last = m0; vcc->last->m_next = NULL; vcc->chain->m_pkthdr.len += m0->m_len; } if (!(flags & HE_REGM_RBRQ_END_PDU)) return; if (flags & HE_REGM_RBRQ_CRC_ERROR) { if (vcc->chain) m_freem(vcc->chain); vcc->chain = vcc->last = NULL; sc->istats.crc_error++; sc->ifp->if_ierrors++; return; } if (flags & HE_REGM_RBRQ_LEN_ERROR) { if (vcc->chain) m_freem(vcc->chain); vcc->chain = vcc->last = NULL; sc->istats.len_error++; sc->ifp->if_ierrors++; return; } #ifdef HATM_DEBUG if (sc->debug & DBG_DUMP) { struct mbuf *tmp; for (tmp = vcc->chain; tmp != NULL; tmp = tmp->m_next) { printf("mbuf %p: len=%u\n", tmp, tmp->m_len); for (ptr = mtod(tmp, u_char *); ptr < mtod(tmp, u_char *) + tmp->m_len; ptr++) printf("%02x ", *ptr); printf("\n"); } } #endif if (vcc->param.aal == ATMIO_AAL_5) { /* * Need to remove padding and the trailer. The trailer * may be split accross buffers according to 2.10.1.2 * Assume that mbufs sizes are even (buffer sizes and cell * payload sizes are) and that there are no empty mbufs. */ m = vcc->last; if (m->m_len == 2) { /* Ah, oh, only part of CRC */ if (m == vcc->chain) { /* ups */ sc->istats.short_aal5++; m_freem(vcc->chain); vcc->chain = vcc->last = NULL; return; } for (m1 = vcc->chain; m1->m_next != m; m1 = m1->m_next) ; ptr = (u_char *)m1->m_data + m1->m_len - 4; } else if (m->m_len == 4) { /* Ah, oh, only CRC */ if (m == vcc->chain) { /* ups */ sc->istats.short_aal5++; m_freem(vcc->chain); vcc->chain = vcc->last = NULL; return; } for (m1 = vcc->chain; m1->m_next != m; m1 = m1->m_next) ; ptr = (u_char *)m1->m_data + m1->m_len - 2; } else if (m->m_len >= 6) { ptr = (u_char *)m->m_data + m->m_len - 6; } else panic("hatm_rx: bad mbuf len %d", m->m_len); len = (ptr[0] << 8) + ptr[1]; if (len > (u_int)vcc->chain->m_pkthdr.len - 4) { sc->istats.badlen_aal5++; m_freem(vcc->chain); vcc->chain = vcc->last = NULL; return; } m_adj(vcc->chain, -(vcc->chain->m_pkthdr.len - len)); } m = vcc->chain; vcc->chain = vcc->last = NULL; #ifdef ENABLE_BPF if (!(vcc->param.flags & ATMIO_FLAG_NG) && (vcc->param.aal == ATMIO_AAL_5) && (vcc->param.flags & ATM_PH_LLCSNAP)) BPF_MTAP(sc->ifp, m); #endif vpi = HE_VPI(cid); vci = HE_VCI(cid); ATM_PH_FLAGS(&aph) = vcc->param.flags & 0xff; ATM_PH_VPI(&aph) = vpi; ATM_PH_SETVCI(&aph, vci); sc->ifp->if_ipackets++; /* this is in if_atmsubr.c */ /* sc->ifp->if_ibytes += len; */ vcc->ibytes += len; vcc->ipackets++; #if 0 { struct mbuf *tmp; for (tmp = m; tmp != NULL; tmp = tmp->m_next) { printf("mbuf %p: len=%u\n", tmp, tmp->m_len); for (ptr = mtod(tmp, u_char *); ptr < mtod(tmp, u_char *) + tmp->m_len; ptr++) printf("%02x ", *ptr); printf("\n"); } } #endif atm_input(sc->ifp, &aph, m, vcc->rxhand); return; drop: if (m0 != NULL) m_free(m0); }
static void ep_if_start(struct ifnet *ifp, struct ifaltq_subque *ifsq) { struct ep_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq); if (sc->gone) { ifq_purge(&ifp->if_snd); return; } while (inw(BASE + EP_STATUS) & S_COMMAND_IN_PROGRESS); if (ifq_is_oactive(&ifp->if_snd)) { return; } crit_enter(); startagain: /* Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd, NULL); if (m == NULL) { crit_exit(); return; } for (len = 0, top = m; m; m = m->m_next) len += m->m_len; m = top; pad = padmap[len & 3]; /* * The 3c509 automatically pads short packets to minimum ethernet length, * but we drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN) { /* packet is obviously too large: toss it */ IFNET_STAT_INC(ifp, oerrors, 1); m_freem(m); goto readcheck; } if (inw(BASE + EP_W1_FREE_TX) < len + pad + 4) { /* no room in FIFO */ outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | (len + pad + 4)); /* make sure */ if (inw(BASE + EP_W1_FREE_TX) < len + pad + 4) { ifq_set_oactive(&ifp->if_snd); ifq_prepend(&ifp->if_snd, top); crit_exit(); return; } } else { outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | EP_THRESH_DISABLE); } outw(BASE + EP_W1_TX_PIO_WR_1, len); outw(BASE + EP_W1_TX_PIO_WR_1, 0x0); /* Second dword meaningless */ if (EP_FTST(sc, F_ACCESS_32_BITS)) { for (top = m; m != NULL; m = m->m_next) { outsl(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t), m->m_len / 4); if (m->m_len & 3) outsb(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t) + (m->m_len & (~3)), m->m_len & 3); } } else { for (top = m; m != NULL; m = m->m_next) { outsw(BASE + EP_W1_TX_PIO_WR_1, mtod(m, caddr_t), m->m_len / 2); if (m->m_len & 1) outb(BASE + EP_W1_TX_PIO_WR_1, *(mtod(m, caddr_t) + m->m_len - 1)); } } while (pad--) outb(BASE + EP_W1_TX_PIO_WR_1, 0); /* Padding */ BPF_MTAP(ifp, top); ifp->if_timer = 2; IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); /* * Is another packet coming in? We don't want to overflow the tiny RX * fifo. */ readcheck: if (inw(BASE + EP_W1_RX_STATUS) & RX_BYTES_MASK) { /* * we check if we have packets left, in that case we prepare to come * back later */ if (!ifq_is_empty(&ifp->if_snd)) outw(BASE + EP_COMMAND, SET_TX_AVAIL_THRESH | 8); crit_exit(); return; } goto startagain; }
static uint8_t cdce_ncm_fill_tx_frames(struct usb_xfer *xfer, uint8_t index) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, index); struct mbuf *m; uint32_t rem; uint32_t offset; uint32_t last_offset; uint16_t n; uint8_t retval; usbd_xfer_set_frame_offset(xfer, index * CDCE_NCM_TX_MAXLEN, index); offset = sizeof(sc->sc_ncm.hdr) + sizeof(sc->sc_ncm.dpt) + sizeof(sc->sc_ncm.dp); /* Store last valid offset before alignment */ last_offset = offset; /* Align offset */ offset = CDCE_NCM_ALIGN(sc->sc_ncm.tx_remainder, offset, sc->sc_ncm.tx_modulus); /* Zero pad */ cdce_ncm_tx_zero(pc, last_offset, offset); /* buffer full */ retval = 2; for (n = 0; n != sc->sc_ncm.tx_nframe; n++) { /* check if end of transmit buffer is reached */ if (offset >= sc->sc_ncm.tx_max) break; /* compute maximum buffer size */ rem = sc->sc_ncm.tx_max - offset; IFQ_DRV_DEQUEUE(&(ifp->if_snd), m); if (m == NULL) { /* buffer not full */ retval = 1; break; } if (m->m_pkthdr.len > rem) { if (n == 0) { /* The frame won't fit in our buffer */ DPRINTFN(1, "Frame too big to be transmitted!\n"); m_freem(m); ifp->if_oerrors++; n--; continue; } /* Wait till next buffer becomes ready */ IFQ_DRV_PREPEND(&(ifp->if_snd), m); break; } usbd_m_copy_in(pc, offset, m, 0, m->m_pkthdr.len); USETW(sc->sc_ncm.dp[n].wFrameLength, m->m_pkthdr.len); USETW(sc->sc_ncm.dp[n].wFrameIndex, offset); /* Update offset */ offset += m->m_pkthdr.len; /* Store last valid offset before alignment */ last_offset = offset; /* Align offset */ offset = CDCE_NCM_ALIGN(sc->sc_ncm.tx_remainder, offset, sc->sc_ncm.tx_modulus); /* Zero pad */ cdce_ncm_tx_zero(pc, last_offset, offset); /* * If there's a BPF listener, bounce a copy * of this frame to him: */ BPF_MTAP(ifp, m); /* Free mbuf */ m_freem(m); /* Pre-increment interface counter */ ifp->if_opackets++; } if (n == 0) return (0); rem = (sizeof(sc->sc_ncm.dpt) + (4 * n) + 4); USETW(sc->sc_ncm.dpt.wLength, rem); /* zero the rest of the data pointer entries */ for (; n != CDCE_NCM_SUBFRAMES_MAX; n++) { USETW(sc->sc_ncm.dp[n].wFrameLength, 0); USETW(sc->sc_ncm.dp[n].wFrameIndex, 0); } offset = last_offset; /* Align offset */ offset = CDCE_NCM_ALIGN(0, offset, CDCE_NCM_TX_MINLEN); /* Optimise, save bandwidth and force short termination */ if (offset >= sc->sc_ncm.tx_max) offset = sc->sc_ncm.tx_max; else offset ++; /* Zero pad */ cdce_ncm_tx_zero(pc, last_offset, offset); /* set frame length */ usbd_xfer_set_frame_len(xfer, index, offset); /* Fill out 16-bit header */ sc->sc_ncm.hdr.dwSignature[0] = 'N'; sc->sc_ncm.hdr.dwSignature[1] = 'C'; sc->sc_ncm.hdr.dwSignature[2] = 'M'; sc->sc_ncm.hdr.dwSignature[3] = 'H'; USETW(sc->sc_ncm.hdr.wHeaderLength, sizeof(sc->sc_ncm.hdr)); USETW(sc->sc_ncm.hdr.wBlockLength, offset); USETW(sc->sc_ncm.hdr.wSequence, sc->sc_ncm.tx_seq); USETW(sc->sc_ncm.hdr.wDptIndex, sizeof(sc->sc_ncm.hdr)); sc->sc_ncm.tx_seq++; /* Fill out 16-bit frame table header */ sc->sc_ncm.dpt.dwSignature[0] = 'N'; sc->sc_ncm.dpt.dwSignature[1] = 'C'; sc->sc_ncm.dpt.dwSignature[2] = 'M'; sc->sc_ncm.dpt.dwSignature[3] = '0'; USETW(sc->sc_ncm.dpt.wNextNdpIndex, 0); /* reserved */ usbd_copy_in(pc, 0, &(sc->sc_ncm.hdr), sizeof(sc->sc_ncm.hdr)); usbd_copy_in(pc, sizeof(sc->sc_ncm.hdr), &(sc->sc_ncm.dpt), sizeof(sc->sc_ncm.dpt)); usbd_copy_in(pc, sizeof(sc->sc_ncm.hdr) + sizeof(sc->sc_ncm.dpt), &(sc->sc_ncm.dp), sizeof(sc->sc_ncm.dp)); return (retval); }
static void ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct ipheth_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct usb_page_cache *pc; struct mbuf *m; uint8_t x; int actlen; int aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); ifp->if_opackets++; /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != IPHETH_TX_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; usbd_xfer_set_frame_offset(xfer, x * IPHETH_BUF_SIZE, x); pc = usbd_xfer_get_frame(xfer, x); sc->sc_tx_buf[x] = m; if (m->m_pkthdr.len > IPHETH_BUF_SIZE) m->m_pkthdr.len = IPHETH_BUF_SIZE; usbd_m_copy_in(pc, 0, m, 0, m->m_pkthdr.len); usbd_xfer_set_frame_len(xfer, x, IPHETH_BUF_SIZE); if (IPHETH_BUF_SIZE != m->m_pkthdr.len) { usbd_frame_zero(pc, m->m_pkthdr.len, IPHETH_BUF_SIZE - m->m_pkthdr.len); } /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ ipheth_free_queue(sc->sc_tx_buf, IPHETH_TX_FRAMES_MAX); /* count output errors */ ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
static void vtbe_txstart_locked(struct vtbe_softc *sc) { struct virtio_net_hdr_mrg_rxbuf *vnh; struct iovec iov[DESC_COUNT]; struct vqueue_info *vq; struct iovec *riov; struct ifnet *ifp; struct mbuf *m; struct uio uio; int enqueued; int iolen; int error; int *addr; int reg; int len; int n; VTBE_ASSERT_LOCKED(sc); /* RX queue */ vq = &sc->vs_queues[0]; if (!vq_has_descs(vq)) { return; } ifp = sc->ifp; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) { return; } enqueued = 0; if (!vq_ring_ready(vq)) return; vq->vq_save_used = be16toh(vq->vq_used->idx); for (;;) { if (!vq_has_descs(vq)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { break; } n = vq_getchain(sc->beri_mem_offset, vq, iov, DESC_COUNT, NULL); KASSERT(n >= 1 && n <= DESC_COUNT, ("wrong descriptors num %d", n)); addr = iov[0].iov_base; len = iov[0].iov_len; vnh = iov[0].iov_base; memset(vnh, 0, sc->hdrsize); vnh->num_buffers = htobe16(1); iov[0].iov_len -= sc->hdrsize; iov[0].iov_base = (void *)((uintptr_t)iov[0].iov_base + sc->hdrsize); riov = &iov[0]; uio.uio_resid = iov[0].iov_len; uio.uio_iov = riov; uio.uio_segflg = UIO_SYSSPACE; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_rw = UIO_READ; error = m_mbuftouio(&uio, m, 0); if (error) panic("m_mbuftouio failed\n"); iolen = (len - iov[0].iov_len - sc->hdrsize); vq_relchain(vq, iov, 0, iolen + sc->hdrsize); paddr_unmap((void *)addr, len); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); BPF_MTAP(ifp, m); m_freem(m); ++enqueued; } if (enqueued != 0) { reg = htobe32(VIRTIO_MMIO_INT_VRING); WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg); PIO_SET(sc->pio_send, Q_INTR, 1); } }
Static void url_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct url_chain *c = priv; struct url_softc *sc = c->url_sc; struct ifnet *ifp = GET_IFP(sc); struct mbuf *m; u_int32_t total_len; url_rxhdr_t rxhdr; int s; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev),__func__)); if (sc->sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; sc->sc_rx_errs++; if (usbd_ratecheck(&sc->sc_rx_notice)) { printf("%s: %u usb errors on rx: %s\n", USBDEVNAME(sc->sc_dev), sc->sc_rx_errs, usbd_errstr(status)); sc->sc_rx_errs = 0; } if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall(sc->sc_pipe_rx); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); memcpy(mtod(c->url_mbuf, char *), c->url_buf, total_len); if (total_len <= ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } memcpy(&rxhdr, c->url_buf + total_len - ETHER_CRC_LEN, sizeof(rxhdr)); DPRINTF(("%s: RX Status: %dbytes%s%s%s%s packets\n", USBDEVNAME(sc->sc_dev), UGETW(rxhdr) & URL_RXHDR_BYTEC_MASK, UGETW(rxhdr) & URL_RXHDR_VALID_MASK ? ", Valid" : "", UGETW(rxhdr) & URL_RXHDR_RUNTPKT_MASK ? ", Runt" : "", UGETW(rxhdr) & URL_RXHDR_PHYPKT_MASK ? ", Physical match" : "", UGETW(rxhdr) & URL_RXHDR_MCASTPKT_MASK ? ", Multicast" : "")); if ((UGETW(rxhdr) & URL_RXHDR_VALID_MASK) == 0) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; total_len -= ETHER_CRC_LEN; m = c->url_mbuf; m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; s = splnet(); if (url_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; goto done1; } #if NBPFILTER > 0 if (ifp->if_bpf) BPF_MTAP(ifp, m); #endif DPRINTF(("%s: %s: deliver %d\n", USBDEVNAME(sc->sc_dev), __func__, m->m_len)); IF_INPUT(ifp, m); done1: splx(s); done: /* Setup new transfer */ usbd_setup_xfer(xfer, sc->sc_pipe_rx, c, c->url_buf, URL_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, url_rxeof); sc->sc_refcnt++; usbd_transfer(xfer); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); DPRINTF(("%s: %s: start rx\n", USBDEVNAME(sc->sc_dev), __func__)); }
/* * We enter here when we have a rule with O_LOG. * XXX this function alone takes about 2Kbytes of code! */ void ipfw_log(struct ip_fw *f, u_int hlen, struct ip_fw_args *args, struct mbuf *m, struct ifnet *oif, u_short offset, uint32_t tablearg, struct ip *ip) { char *action; int limit_reached = 0; char action2[40], proto[128], fragment[32]; if (V_fw_verbose == 0) { #ifndef WITHOUT_BPF struct m_hdr mh; if (log_if == NULL || log_if->if_bpf == NULL) return; /* BPF treats the "mbuf" as read-only */ mh.mh_next = m; mh.mh_len = ETHER_HDR_LEN; if (args->eh) { /* layer2, use orig hdr */ mh.mh_data = (char *)args->eh; } else { /* add fake header. Later we will store * more info in the header */ mh.mh_data = "DDDDDDSSSSSS\x08\x00"; } BPF_MTAP(log_if, (struct mbuf *)&mh); #endif /* !WITHOUT_BPF */ return; } /* the old 'log' function */ fragment[0] = '\0'; proto[0] = '\0'; if (f == NULL) { /* bogus pkt */ if (V_verbose_limit != 0 && V_norule_counter >= V_verbose_limit) return; V_norule_counter++; if (V_norule_counter == V_verbose_limit) limit_reached = V_verbose_limit; action = "Refuse"; } else { /* O_LOG is the first action, find the real one */ ipfw_insn *cmd = ACTION_PTR(f); ipfw_insn_log *l = (ipfw_insn_log *)cmd; if (l->max_log != 0 && l->log_left == 0) return; l->log_left--; if (l->log_left == 0) limit_reached = l->max_log; cmd += F_LEN(cmd); /* point to first action */ if (cmd->opcode == O_ALTQ) { ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd; snprintf(SNPARGS(action2, 0), "Altq %d", altq->qid); cmd += F_LEN(cmd); } if (cmd->opcode == O_PROB) cmd += F_LEN(cmd); if (cmd->opcode == O_TAG) cmd += F_LEN(cmd); action = action2; switch (cmd->opcode) { case O_DENY: action = "Deny"; break; case O_REJECT: if (cmd->arg1==ICMP_REJECT_RST) action = "Reset"; else if (cmd->arg1==ICMP_UNREACH_HOST) action = "Reject"; else snprintf(SNPARGS(action2, 0), "Unreach %d", cmd->arg1); break; case O_UNREACH6: if (cmd->arg1==ICMP6_UNREACH_RST) action = "Reset"; else snprintf(SNPARGS(action2, 0), "Unreach %d", cmd->arg1); break; case O_ACCEPT: action = "Accept"; break; case O_COUNT: action = "Count"; break; case O_DIVERT: snprintf(SNPARGS(action2, 0), "Divert %d", cmd->arg1); break; case O_TEE: snprintf(SNPARGS(action2, 0), "Tee %d", cmd->arg1); break; case O_SETFIB: snprintf(SNPARGS(action2, 0), "SetFib %d", cmd->arg1); break; case O_SKIPTO: snprintf(SNPARGS(action2, 0), "SkipTo %d", cmd->arg1); break; case O_PIPE: snprintf(SNPARGS(action2, 0), "Pipe %d", cmd->arg1); break; case O_QUEUE: snprintf(SNPARGS(action2, 0), "Queue %d", cmd->arg1); break; case O_FORWARD_IP: { ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd; int len; struct in_addr dummyaddr; if (sa->sa.sin_addr.s_addr == INADDR_ANY) dummyaddr.s_addr = htonl(tablearg); else dummyaddr.s_addr = sa->sa.sin_addr.s_addr; len = snprintf(SNPARGS(action2, 0), "Forward to %s", inet_ntoa(dummyaddr)); if (sa->sa.sin_port) snprintf(SNPARGS(action2, len), ":%d", sa->sa.sin_port); } break; case O_NETGRAPH: snprintf(SNPARGS(action2, 0), "Netgraph %d", cmd->arg1); break; case O_NGTEE: snprintf(SNPARGS(action2, 0), "Ngtee %d", cmd->arg1); break; case O_NAT: action = "Nat"; break; case O_REASS: action = "Reass"; break; default: action = "UNKNOWN"; break; } } if (hlen == 0) { /* non-ip */ snprintf(SNPARGS(proto, 0), "MAC"); } else { int len; #ifdef INET6 char src[INET6_ADDRSTRLEN + 2], dst[INET6_ADDRSTRLEN + 2]; #else char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN]; #endif struct icmphdr *icmp; struct tcphdr *tcp; struct udphdr *udp; #ifdef INET6 struct ip6_hdr *ip6 = NULL; struct icmp6_hdr *icmp6; #endif src[0] = '\0'; dst[0] = '\0'; #ifdef INET6 if (IS_IP6_FLOW_ID(&(args->f_id))) { char ip6buf[INET6_ADDRSTRLEN]; snprintf(src, sizeof(src), "[%s]", ip6_sprintf(ip6buf, &args->f_id.src_ip6)); snprintf(dst, sizeof(dst), "[%s]", ip6_sprintf(ip6buf, &args->f_id.dst_ip6)); ip6 = (struct ip6_hdr *)ip; tcp = (struct tcphdr *)(((char *)ip) + hlen); udp = (struct udphdr *)(((char *)ip) + hlen); } else #endif { tcp = L3HDR(struct tcphdr, ip); udp = L3HDR(struct udphdr, ip); inet_ntoa_r(ip->ip_src, src); inet_ntoa_r(ip->ip_dst, dst); } switch (args->f_id.proto) { case IPPROTO_TCP: len = snprintf(SNPARGS(proto, 0), "TCP %s", src); if (offset == 0) snprintf(SNPARGS(proto, len), ":%d %s:%d", ntohs(tcp->th_sport), dst, ntohs(tcp->th_dport)); else snprintf(SNPARGS(proto, len), " %s", dst); break; case IPPROTO_UDP: len = snprintf(SNPARGS(proto, 0), "UDP %s", src); if (offset == 0) snprintf(SNPARGS(proto, len), ":%d %s:%d", ntohs(udp->uh_sport), dst, ntohs(udp->uh_dport)); else snprintf(SNPARGS(proto, len), " %s", dst); break; case IPPROTO_ICMP: icmp = L3HDR(struct icmphdr, ip); if (offset == 0) len = snprintf(SNPARGS(proto, 0), "ICMP:%u.%u ", icmp->icmp_type, icmp->icmp_code); else len = snprintf(SNPARGS(proto, 0), "ICMP "); len += snprintf(SNPARGS(proto, len), "%s", src); snprintf(SNPARGS(proto, len), " %s", dst); break; #ifdef INET6 case IPPROTO_ICMPV6: icmp6 = (struct icmp6_hdr *)(((char *)ip) + hlen); if (offset == 0) len = snprintf(SNPARGS(proto, 0), "ICMPv6:%u.%u ", icmp6->icmp6_type, icmp6->icmp6_code); else len = snprintf(SNPARGS(proto, 0), "ICMPv6 "); len += snprintf(SNPARGS(proto, len), "%s", src); snprintf(SNPARGS(proto, len), " %s", dst); break; #endif default: len = snprintf(SNPARGS(proto, 0), "P:%d %s", args->f_id.proto, src); snprintf(SNPARGS(proto, len), " %s", dst); break; } #ifdef INET6 if (IS_IP6_FLOW_ID(&(args->f_id))) { if (offset & (IP6F_OFF_MASK | IP6F_MORE_FRAG)) snprintf(SNPARGS(fragment, 0), " (frag %08x:%d@%d%s)", args->f_id.extra, ntohs(ip6->ip6_plen) - hlen, ntohs(offset & IP6F_OFF_MASK) << 3, (offset & IP6F_MORE_FRAG) ? "+" : ""); } else #endif { int ipoff, iplen; ipoff = ntohs(ip->ip_off); iplen = ntohs(ip->ip_len); if (ipoff & (IP_MF | IP_OFFMASK)) snprintf(SNPARGS(fragment, 0), " (frag %d:%d@%d%s)", ntohs(ip->ip_id), iplen - (ip->ip_hl << 2), offset << 3, (ipoff & IP_MF) ? "+" : ""); } } #ifdef __FreeBSD__ if (oif || m->m_pkthdr.rcvif) log(LOG_SECURITY | LOG_INFO, "ipfw: %d %s %s %s via %s%s\n", f ? f->rulenum : -1, action, proto, oif ? "out" : "in", oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname, fragment); else #endif log(LOG_SECURITY | LOG_INFO, "ipfw: %d %s %s [no if info]%s\n", f ? f->rulenum : -1, action, proto, fragment); if (limit_reached) log(LOG_SECURITY | LOG_NOTICE, "ipfw: limit %d reached on entry %d\n", limit_reached, f ? f->rulenum : -1); }
static int gre_input2(struct mbuf *m ,int hlen, u_char proto) { struct greip *gip = mtod(m, struct greip *); int s; struct ifqueue *ifq; struct gre_softc *sc; u_short flags; if ((sc = gre_lookup(m, proto)) == NULL) { /* No matching tunnel or tunnel is down. */ return (0); } sc->sc_if.if_ipackets++; sc->sc_if.if_ibytes += m->m_pkthdr.len; switch (proto) { case IPPROTO_GRE: hlen += sizeof (struct gre_h); /* process GRE flags as packet can be of variable len */ flags = ntohs(gip->gi_flags); /* Checksum & Offset are present */ if ((flags & GRE_CP) | (flags & GRE_RP)) hlen += 4; /* We don't support routing fields (variable length) */ if (flags & GRE_RP) return(0); if (flags & GRE_KP) hlen += 4; if (flags & GRE_SP) hlen +=4; switch (ntohs(gip->gi_ptype)) { /* ethertypes */ case ETHERTYPE_IP: /* shouldn't need a schednetisr(), as */ case WCCP_PROTOCOL_TYPE: /* we are in ip_input */ ifq = &ipintrq; break; #ifdef NS case ETHERTYPE_NS: ifq = &nsintrq; schednetisr(NETISR_NS); break; #endif #ifdef NETATALK case ETHERTYPE_ATALK: ifq = &atintrq1; schednetisr(NETISR_ATALK); break; #endif case ETHERTYPE_IPV6: /* FALLTHROUGH */ default: /* others not yet supported */ return(0); } break; default: /* others not yet supported */ return(0); } m->m_data += hlen; m->m_len -= hlen; m->m_pkthdr.len -= hlen; if (sc->sc_if.if_bpf) { struct mbuf m0; u_int32_t af = AF_INET; m0.m_next = m; m0.m_len = 4; m0.m_data = (char *)⁡ BPF_MTAP(&(sc->sc_if), &m0); } m->m_pkthdr.rcvif = &sc->sc_if; s = splnet(); /* possible */ if (_IF_QFULL(ifq)) { _IF_DROP(ifq); m_freem(m); } else { IF_ENQUEUE(ifq,m); } splx(s); return(1); /* packet is done, no further processing needed */ }
/* * admsw_start: [ifnet interface function] * * Start packet transmission on the interface. */ static void admsw_start(struct ifnet *ifp) { struct admsw_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct admsw_descsoft *ds; struct admsw_desc *desc; bus_dmamap_t dmamap; struct ether_header *eh; int error, nexttx, len, i; static int vlan = 0; /* * Loop through the send queues, setting up transmit descriptors * unitl we drain the queues, or use up all available transmit * descriptors. */ for (;;) { vlan++; if (vlan == SW_DEVS) vlan = 0; i = vlan; for (;;) { ifp = sc->sc_ifnet[i]; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) == IFF_DRV_RUNNING) { /* Grab a packet off the queue. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 != NULL) break; } i++; if (i == SW_DEVS) i = 0; if (i == vlan) return; } vlan = i; m = NULL; /* Get a spare descriptor. */ if (sc->sc_txfree == 0) { /* No more slots left; notify upper layer. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } nexttx = sc->sc_txnext; desc = &sc->sc_txldescs[nexttx]; ds = &sc->sc_txlsoft[nexttx]; dmamap = ds->ds_dmamap; /* * Load the DMA map. If this fails, the packet either * didn't fit in the alloted number of segments, or we * were short on resources. In this case, we'll copy * and try again. */ if (m0->m_pkthdr.len < ETHER_MIN_LEN || bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m0, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT) != 0) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { device_printf(sc->sc_dev, "unable to allocate Tx mbuf\n"); break; } if (m0->m_pkthdr.len > MHLEN) { MCLGET(m, M_NOWAIT); if ((m->m_flags & M_EXT) == 0) { device_printf(sc->sc_dev, "unable to allocate Tx cluster\n"); m_freem(m); break; } } m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags; m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; if (m->m_pkthdr.len < ETHER_MIN_LEN) { if (M_TRAILINGSPACE(m) < ETHER_MIN_LEN - m->m_pkthdr.len) panic("admsw_start: M_TRAILINGSPACE\n"); memset(mtod(m, uint8_t *) + m->m_pkthdr.len, 0, ETHER_MIN_LEN - ETHER_CRC_LEN - m->m_pkthdr.len); m->m_pkthdr.len = m->m_len = ETHER_MIN_LEN; } error = bus_dmamap_load_mbuf(sc->sc_bufs_dmat, dmamap, m, admsw_mbuf_map_addr, ds, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "unable to load Tx buffer, error = %d\n", error); break; } } if (m != NULL) { m_freem(m0); m0 = m; } /* * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. */ /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_bufs_dmat, dmamap, BUS_DMASYNC_PREWRITE); if (ds->ds_nsegs != 1 && ds->ds_nsegs != 2) panic("admsw_start: nsegs == %d\n", ds->ds_nsegs); desc->data = ds->ds_addr[0]; desc->len = len = ds->ds_len[0]; if (ds->ds_nsegs > 1) { len += ds->ds_len[1]; desc->cntl = ds->ds_addr[1] | ADM5120_DMA_BUF2ENABLE; } else desc->cntl = 0; desc->status = (len << ADM5120_DMA_LENSHIFT) | (1 << vlan); eh = mtod(m0, struct ether_header *); if (ntohs(eh->ether_type) == ETHERTYPE_IP && m0->m_pkthdr.csum_flags & CSUM_IP) desc->status |= ADM5120_DMA_CSUM; if (nexttx == ADMSW_NTXLDESC - 1) desc->data |= ADM5120_DMA_RINGEND; desc->data |= ADM5120_DMA_OWN; /* Sync the descriptor. */ ADMSW_CDTXLSYNC(sc, nexttx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); REG_WRITE(SEND_TRIG_REG, 1); /* printf("send slot %d\n",nexttx); */ /* * Store a pointer to the packet so we can free it later. */ ds->ds_mbuf = m0; /* Advance the Tx pointer. */ sc->sc_txfree--; sc->sc_txnext = ADMSW_NEXTTXL(nexttx); /* Pass the packet to any BPF listeners. */ BPF_MTAP(ifp, m0); /* Set a watchdog timer in case the chip flakes out. */ sc->sc_timer = 5; }
static void cdce_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct cdce_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = uether_getifp(&sc->sc_ue); struct mbuf *m; struct mbuf *mt; uint32_t crc; uint8_t x; int actlen, aframes; usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL); DPRINTFN(1, "\n"); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete: %u bytes in %u frames\n", actlen, aframes); ifp->if_opackets++; /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: for (x = 0; x != CDCE_FRAMES_MAX; x++) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->sc_flags & CDCE_FLAG_ZAURUS) { /* * Zaurus wants a 32-bit CRC appended * to every frame */ crc = cdce_m_crc32(m, 0, m->m_pkthdr.len); crc = htole32(crc); if (!m_append(m, 4, (void *)&crc)) { m_freem(m); ifp->if_oerrors++; continue; } } if (m->m_len != m->m_pkthdr.len) { mt = m_defrag(m, M_DONTWAIT); if (mt == NULL) { m_freem(m); ifp->if_oerrors++; continue; } m = mt; } if (m->m_pkthdr.len > MCLBYTES) { m->m_pkthdr.len = MCLBYTES; } sc->sc_tx_buf[x] = m; usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len); /* * If there's a BPF listener, bounce a copy of * this frame to him: */ BPF_MTAP(ifp, m); } if (x != 0) { usbd_xfer_set_frames(xfer, x); usbd_transfer_submit(xfer); } break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); /* free all previous TX buffers */ cdce_free_queue(sc->sc_tx_buf, CDCE_FRAMES_MAX); /* count output errors */ ifp->if_oerrors++; if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } }
/* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; u_short length; u_short numPages; u_short pages_wanted; u_char packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = ifq_dequeue(&ifp->if_snd); if (m == NULL) { kprintf("%s: snresume() with nothing to send\n", ifp->if_xname); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { kprintf("%s: large packet discarded (B)\n", ifp->if_xname); IFNET_STAT_INC(ifp, oerrors, 1); m_freem(top); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = inb(BASE + ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { kprintf("%s: Memory allocation failed. Weird.\n", ifp->if_xname); ifp->if_timer = 1; ifq_prepend(&ifp->if_snd, top); goto try_start; } /* * We have a packet number, so tell the card to use it. */ outb(BASE + PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { kprintf("%s: memory allocation wrong size. Weird.\n", ifp->if_xname); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (inw(BASE + MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; outw(BASE + MMU_CMD_REG_W, MMUCR_FREEPKT); ifq_prepend(&ifp->if_snd, top); return; } /* * Point to the beginning of the packet */ outw(BASE + POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ outw(BASE + DATA_REG_W, 0); outb(BASE + DATA_REG_B, (length + 6) & 0xFF); outb(BASE + DATA_REG_B, (length + 6) >> 8); /* * Push out the data to the card. */ for (m = top; m != NULL; m = m->m_next) { /* * Push out words. */ outsw(BASE + DATA_REG_W, mtod(m, caddr_t), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) outb(BASE + DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { outw(BASE + DATA_REG_W, 0); pad -= 2; } if (pad) outb(BASE + DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ outw(BASE + DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = inb(BASE + INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); outb(BASE + INTR_MASK_REG_B, mask); sc->intr_mask = mask; outw(BASE + MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); IFNET_STAT_INC(ifp, opackets, 1); m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ ifq_clr_oactive(&ifp->if_snd); if_devstart(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ ifq_set_oactive(&ifp->if_snd); ifp->if_timer = 1; }
static void usie_if_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct usie_softc *sc = usbd_xfer_softc(xfer); struct ifnet *ifp = sc->sc_ifp; struct mbuf *m0; struct mbuf *m = NULL; struct usie_desc *rxd; uint32_t actlen; uint16_t err; uint16_t pkt; uint16_t ipl; uint16_t len; uint16_t diff; uint8_t pad; uint8_t ipv; usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%u\n", actlen); if (actlen < sizeof(struct usie_hip)) { DPRINTF("data too short %u\n", actlen); goto tr_setup; } m = sc->sc_rxm; sc->sc_rxm = NULL; /* fall though */ case USB_ST_SETUP: tr_setup: if (sc->sc_rxm == NULL) { sc->sc_rxm = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE /* could be bigger than MCLBYTES */ ); } if (sc->sc_rxm == NULL) { DPRINTF("could not allocate Rx mbuf\n"); ifp->if_ierrors++; usbd_xfer_set_stall(xfer); usbd_xfer_set_frames(xfer, 0); } else { /* * Directly loading a mbuf cluster into DMA to * save some data copying. This works because * there is only one cluster. */ usbd_xfer_set_frame_data(xfer, 0, mtod(sc->sc_rxm, caddr_t), MIN(MJUMPAGESIZE, USIE_RXSZ_MAX)); usbd_xfer_set_frames(xfer, 1); } usbd_transfer_submit(xfer); break; default: /* Error */ DPRINTF("USB transfer error, %s\n", usbd_errstr(error)); if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); ifp->if_ierrors++; goto tr_setup; } if (sc->sc_rxm != NULL) { m_freem(sc->sc_rxm); sc->sc_rxm = NULL; } break; } if (m == NULL) return; mtx_unlock(&sc->sc_mtx); m->m_pkthdr.len = m->m_len = actlen; err = pkt = 0; /* HW can aggregate multiple frames in a single USB xfer */ for (;;) { rxd = mtod(m, struct usie_desc *); len = be16toh(rxd->hip.len) & USIE_HIP_IP_LEN_MASK; pad = (rxd->hip.id & USIE_HIP_PAD) ? 1 : 0; ipl = (len - pad - ETHER_HDR_LEN); if (ipl >= len) { DPRINTF("Corrupt frame\n"); m_freem(m); break; } diff = sizeof(struct usie_desc) + ipl + pad; if (((rxd->hip.id & USIE_HIP_MASK) != USIE_HIP_IP) || (be16toh(rxd->desc_type) & USIE_TYPE_MASK) != USIE_IP_RX) { DPRINTF("received wrong type of packet\n"); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); err++; if (m->m_pkthdr.len > 0) continue; m_freem(m); break; } switch (be16toh(rxd->ethhdr.ether_type)) { case ETHERTYPE_IP: ipv = NETISR_IP; break; #ifdef INET6 case ETHERTYPE_IPV6: ipv = NETISR_IPV6; break; #endif default: DPRINTF("unsupported ether type\n"); err++; break; } /* the last packet */ if (m->m_pkthdr.len <= diff) { m->m_data += (sizeof(struct usie_desc) + pad); m->m_pkthdr.len = m->m_len = ipl; m->m_pkthdr.rcvif = ifp; BPF_MTAP(sc->sc_ifp, m); netisr_dispatch(ipv, m); break; } /* copy aggregated frames to another mbuf */ m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m0 == NULL)) { DPRINTF("could not allocate mbuf\n"); err++; m_freem(m); break; } m_copydata(m, sizeof(struct usie_desc) + pad, ipl, mtod(m0, caddr_t)); m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = m0->m_len = ipl; BPF_MTAP(sc->sc_ifp, m0); netisr_dispatch(ipv, m0); m->m_data += diff; m->m_pkthdr.len = (m->m_len -= diff); } mtx_lock(&sc->sc_mtx); ifp->if_ierrors += err; ifp->if_ipackets += pkt; }
/* * Queue a packet. Start transmission if not active. * Packet is placed in Information field of PPP frame. * Called at splnet as the if->if_output handler. * Called at splnet from pppwrite(). */ static int pppoutput_serialized(struct ifnet *ifp, struct ifaltq_subque *ifsq, struct mbuf *m0, struct sockaddr *dst, struct rtentry *rtp) { struct ppp_softc *sc = &ppp_softc[ifp->if_dunit]; int protocol, address, control; u_char *cp; int error; #ifdef INET struct ip *ip; #endif struct ifqueue *ifq; enum NPmode mode; int len; struct mbuf *m; struct altq_pktattr pktattr; if (sc->sc_devp == NULL || (ifp->if_flags & IFF_RUNNING) == 0 || ((ifp->if_flags & IFF_UP) == 0 && dst->sa_family != AF_UNSPEC)) { error = ENETDOWN; /* sort of */ goto bad; } ifq_classify(&ifp->if_snd, m0, dst->sa_family, &pktattr); /* * Compute PPP header. */ m0->m_flags &= ~M_HIGHPRI; switch (dst->sa_family) { #ifdef INET case AF_INET: address = PPP_ALLSTATIONS; control = PPP_UI; protocol = PPP_IP; mode = sc->sc_npmode[NP_IP]; /* * If this packet has the "low delay" bit set in the IP header, * put it on the fastq instead. */ ip = mtod(m0, struct ip *); if (ip->ip_tos & IPTOS_LOWDELAY) m0->m_flags |= M_HIGHPRI; break; #endif #ifdef IPX case AF_IPX: /* * This is pretty bogus.. We dont have an ipxcp module in pppd * yet to configure the link parameters. Sigh. I guess a * manual ifconfig would do.... -Peter */ address = PPP_ALLSTATIONS; control = PPP_UI; protocol = PPP_IPX; mode = NPMODE_PASS; break; #endif case AF_UNSPEC: address = PPP_ADDRESS(dst->sa_data); control = PPP_CONTROL(dst->sa_data); protocol = PPP_PROTOCOL(dst->sa_data); mode = NPMODE_PASS; break; default: kprintf("%s: af%d not supported\n", ifp->if_xname, dst->sa_family); error = EAFNOSUPPORT; goto bad; } /* * Drop this packet, or return an error, if necessary. */ if (mode == NPMODE_ERROR) { error = ENETDOWN; goto bad; } if (mode == NPMODE_DROP) { error = 0; goto bad; } /* * Add PPP header. If no space in first mbuf, allocate another. * (This assumes M_LEADINGSPACE is always 0 for a cluster mbuf.) */ if (M_LEADINGSPACE(m0) < PPP_HDRLEN) { m0 = m_prepend(m0, PPP_HDRLEN, MB_DONTWAIT); if (m0 == NULL) { error = ENOBUFS; goto bad; } m0->m_len = 0; } else m0->m_data -= PPP_HDRLEN; cp = mtod(m0, u_char *); *cp++ = address; *cp++ = control; *cp++ = protocol >> 8; *cp++ = protocol & 0xff; m0->m_len += PPP_HDRLEN; len = 0; for (m = m0; m != NULL; m = m->m_next) len += m->m_len; if (sc->sc_flags & SC_LOG_OUTPKT) { kprintf("%s output: ", ifp->if_xname); pppdumpm(m0); } if ((protocol & 0x8000) == 0) { #ifdef PPP_FILTER /* * Apply the pass and active filters to the packet, * but only if it is a data packet. */ *mtod(m0, u_char *) = 1; /* indicates outbound */ if (sc->sc_pass_filt.bf_insns != NULL && bpf_filter(sc->sc_pass_filt.bf_insns, (u_char *) m0, len, 0) == 0) { error = 0; /* drop this packet */ goto bad; } /* * Update the time we sent the most recent packet. */ if (sc->sc_active_filt.bf_insns == NULL || bpf_filter(sc->sc_active_filt.bf_insns, (u_char *) m0, len, 0)) sc->sc_last_sent = time_uptime; *mtod(m0, u_char *) = address; #else /* * Update the time we sent the most recent data packet. */ sc->sc_last_sent = time_uptime; #endif /* PPP_FILTER */ } BPF_MTAP(ifp, m0); /* * Put the packet on the appropriate queue. */ crit_enter(); if (mode == NPMODE_QUEUE) { /* XXX we should limit the number of packets on this queue */ *sc->sc_npqtail = m0; m0->m_nextpkt = NULL; sc->sc_npqtail = &m0->m_nextpkt; } else { /* fastq and if_snd are emptied at spl[soft]net now */ if ((m0->m_flags & M_HIGHPRI) && !ifq_is_enabled(&sc->sc_if.if_snd)) { ifq = &sc->sc_fastq; if (IF_QFULL(ifq) && dst->sa_family != AF_UNSPEC) { IF_DROP(ifq); m_freem(m0); error = ENOBUFS; } else { IF_ENQUEUE(ifq, m0); error = 0; } } else { ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq); error = ifsq_enqueue(ifsq, m0, &pktattr); } if (error) { crit_exit(); IFNET_STAT_INC(&sc->sc_if, oerrors, 1); sc->sc_stats.ppp_oerrors++; return (error); } (*sc->sc_start)(sc); } getmicrotime(&ifp->if_lastchange); IFNET_STAT_INC(ifp, opackets, 1); IFNET_STAT_INC(ifp, obytes, len); crit_exit(); return (0); bad: m_freem(m0); return (error); }
static void ed_start_locked(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; struct mbuf *m0, *m; bus_size_t buffer; int len; ED_ASSERT_LOCKED(sc); outloop: /* * First, see if there are buffered packets and an idle transmitter - * should never happen at this point. */ if (sc->txb_inuse && (sc->xmit_busy == 0)) { printf("ed: packets buffered, but transmitter idle\n"); ed_xmit(sc); } /* * See if there is room to put another packet in the buffer. */ if (sc->txb_inuse == sc->txb_cnt) { /* * No room. Indicate this to the outside world and exit. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == 0) { /* * We are using the !OACTIVE flag to indicate to the outside * world that we can accept an additional packet rather than * that the transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't filled all the * buffers with data then we still want to accept more. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } /* * Copy the mbuf chain into the transmit buffer */ m0 = m; /* txb_new points to next open buffer slot */ buffer = sc->mem_start + (sc->txb_new * ED_TXBUF_SIZE * ED_PAGE_SIZE); len = sc->sc_write_mbufs(sc, m, buffer); if (len == 0) { m_freem(m0); goto outloop; } sc->txb_len[sc->txb_new] = max(len, (ETHER_MIN_LEN-ETHER_CRC_LEN)); sc->txb_inuse++; /* * Point to next buffer slot and wrap if necessary. */ sc->txb_new++; if (sc->txb_new == sc->txb_cnt) sc->txb_new = 0; if (sc->xmit_busy == 0) ed_xmit(sc); /* * Tap off here if there is a bpf listener. */ BPF_MTAP(ifp, m0); m_freem(m0); /* * Loop back to the top to possibly buffer more packets */ goto outloop; }