static int nicvf_if_transmit(struct ifnet *ifp, struct mbuf *mbuf) { struct nicvf *nic = if_getsoftc(ifp); struct queue_set *qs = nic->qs; struct snd_queue *sq; struct mbuf *mtmp; int qidx; int err = 0; if (__predict_false(qs == NULL)) { panic("%s: missing queue set for %s", __func__, device_get_nameunit(nic->dev)); } /* Select queue */ if (M_HASHTYPE_GET(mbuf) != M_HASHTYPE_NONE) qidx = mbuf->m_pkthdr.flowid % qs->sq_cnt; else qidx = curcpu % qs->sq_cnt; sq = &qs->sq[qidx]; if (mbuf->m_next != NULL && (mbuf->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP)) != 0) { if (M_WRITABLE(mbuf) == 0) { mtmp = m_dup(mbuf, M_NOWAIT); m_freem(mbuf); if (mtmp == NULL) return (ENOBUFS); mbuf = mtmp; } } err = drbr_enqueue(ifp, sq->br, mbuf); if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up || (err != 0)) { /* * Try to enqueue packet to the ring buffer. * If the driver is not active, link down or enqueue operation * failed, return with the appropriate error code. */ return (err); } if (NICVF_TX_TRYLOCK(sq) != 0) { err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); return (err); } else taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); return (0); }
static void awg_start_locked(struct awg_softc *sc) { struct mbuf *m; uint32_t val; if_t ifp; int cnt, nsegs; AWG_ASSERT_LOCKED(sc); if (!sc->link) return; ifp = sc->ifp; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; for (cnt = 0; ; cnt++) { if (sc->tx.queued >= TX_DESC_COUNT - TX_MAX_SEGS) { if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); break; } m = if_dequeue(ifp); if (m == NULL) break; nsegs = awg_setup_txbuf(sc, sc->tx.cur, &m); if (nsegs == 0) { if_sendq_prepend(ifp, m); break; } if_bpfmtap(ifp, m); sc->tx.cur = TX_SKIP(sc->tx.cur, nsegs); } if (cnt != 0) { bus_dmamap_sync(sc->tx.desc_tag, sc->tx.desc_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); /* Start and run TX DMA */ val = RD4(sc, EMAC_TX_CTL_1); WR4(sc, EMAC_TX_CTL_1, val | TX_DMA_START); } }
static void awg_tick(void *softc) { struct awg_softc *sc; struct mii_data *mii; if_t ifp; int link; sc = softc; ifp = sc->ifp; mii = device_get_softc(sc->miibus); AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; link = sc->link; mii_tick(mii); if (sc->link && !link) awg_start_locked(sc); callout_reset(&sc->stat_ch, hz, awg_tick, sc); }
static void awg_update_link_locked(struct awg_softc *sc) { struct mii_data *mii; uint32_t val; AWG_ASSERT_LOCKED(sc); if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: case IFM_100_TX: case IFM_10_T: sc->link = 1; break; default: sc->link = 0; break; } } else sc->link = 0; if (sc->link == 0) return; val = RD4(sc, EMAC_BASIC_CTL_0); val &= ~(BASIC_CTL_SPEED | BASIC_CTL_DUPLEX); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) val |= BASIC_CTL_SPEED_1000 << BASIC_CTL_SPEED_SHIFT; else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) val |= BASIC_CTL_SPEED_100 << BASIC_CTL_SPEED_SHIFT; else val |= BASIC_CTL_SPEED_10 << BASIC_CTL_SPEED_SHIFT; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= BASIC_CTL_DUPLEX; WR4(sc, EMAC_BASIC_CTL_0, val); val = RD4(sc, EMAC_RX_CTL_0); val &= ~RX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= RX_FLOW_CTL_EN; WR4(sc, EMAC_RX_CTL_0, val); val = RD4(sc, EMAC_TX_FLOW_CTL); val &= ~(PAUSE_TIME|TX_FLOW_CTL_EN); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= TX_FLOW_CTL_EN; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) val |= awg_pause_time << PAUSE_TIME_SHIFT; WR4(sc, EMAC_TX_FLOW_CTL, val); }
static void nicvf_if_init_locked(struct nicvf *nic) { struct queue_set *qs = nic->qs; struct ifnet *ifp; int qidx; int err; caddr_t if_addr; NICVF_CORE_LOCK_ASSERT(nic); ifp = nic->ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) nicvf_stop_locked(nic); err = nicvf_enable_misc_interrupt(nic); if (err != 0) { if_printf(ifp, "Could not reenable Mbox interrupt\n"); return; } /* Get the latest MAC address */ if_addr = if_getlladdr(ifp); /* Update MAC address if changed */ if (memcmp(nic->hwaddr, if_addr, ETHER_ADDR_LEN) != 0) { memcpy(nic->hwaddr, if_addr, ETHER_ADDR_LEN); nicvf_hw_set_mac_addr(nic, if_addr); } /* Initialize the queues */ err = nicvf_init_resources(nic); if (err != 0) goto error; /* Make sure queue initialization is written */ wmb(); nicvf_reg_write(nic, NIC_VF_INT, ~0UL); /* Enable Qset err interrupt */ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); /* Enable completion queue interrupt */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); /* Enable RBDR threshold interrupt */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); nic->drv_stats.txq_stop = 0; nic->drv_stats.txq_wake = 0; /* Activate network interface */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Schedule callout to update stats */ callout_reset(&nic->stats_callout, hz, nicvf_tick_stats, nic); return; error: /* Something went very wrong. Disable this ifnet for good */ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); }
static int nicvf_if_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct nicvf *nic; struct rcv_queue *rq; struct ifreq *ifr; uint32_t flags; int mask, err; int rq_idx; #if defined(INET) || defined(INET6) struct ifaddr *ifa; boolean_t avoid_reset = FALSE; #endif nic = if_getsoftc(ifp); ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) ifa = (struct ifaddr *)data; #endif err = 0; switch (cmd) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* Avoid reinitialization unless it's necessary */ if (avoid_reset) { if_setflagbits(ifp, IFF_UP, 0); if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) nicvf_if_init(nic); #ifdef INET if (!(if_getflags(ifp) & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif return (0); } #endif err = ether_ioctl(ifp, cmd, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu < NIC_HW_MIN_FRS || ifr->ifr_mtu > NIC_HW_MAX_FRS) { err = EINVAL; } else { NICVF_CORE_LOCK(nic); err = nicvf_update_hw_max_frs(nic, ifr->ifr_mtu); if (err == 0) if_setmtu(ifp, ifr->ifr_mtu); NICVF_CORE_UNLOCK(nic); } break; case SIOCSIFFLAGS: NICVF_CORE_LOCK(nic); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ nic->if_flags; if ((nic->if_flags & if_getflags(ifp)) & IFF_PROMISC) { /* Change promiscous mode */ #if 0 /* ARM64TODO */ nicvf_set_promiscous(nic); #endif } if ((nic->if_flags ^ if_getflags(ifp)) & IFF_ALLMULTI) { /* Change multicasting settings */ #if 0 /* ARM64TODO */ nicvf_set_multicast(nic); #endif } } else { nicvf_if_init_locked(nic); } } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) nicvf_stop_locked(nic); nic->if_flags = if_getflags(ifp); NICVF_CORE_UNLOCK(nic); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { #if 0 NICVF_CORE_LOCK(nic); /* ARM64TODO */ nicvf_set_multicast(nic); NICVF_CORE_UNLOCK(nic); #endif } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &nic->if_media, cmd); break; case SIOCSIFCAP: mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (mask & IFCAP_VLAN_MTU) { /* No work to do except acknowledge the change took. */ if_togglecapenable(ifp, IFCAP_VLAN_MTU); } if (mask & IFCAP_TXCSUM) if_togglecapenable(ifp, IFCAP_TXCSUM); if (mask & IFCAP_RXCSUM) if_togglecapenable(ifp, IFCAP_RXCSUM); if ((mask & IFCAP_TSO4) && nic->hw_tso) if_togglecapenable(ifp, IFCAP_TSO4); if (mask & IFCAP_LRO) { /* * Lock the driver for a moment to avoid * mismatch in per-queue settings. */ NICVF_CORE_LOCK(nic); if_togglecapenable(ifp, IFCAP_LRO); if ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0) { /* * Now disable LRO for subsequent packets. * Atomicity of this change is not necessary * as we don't need precise toggle of this * feature for all threads processing the * completion queue. */ for (rq_idx = 0; rq_idx < nic->qs->rq_cnt; rq_idx++) { rq = &nic->qs->rq[rq_idx]; rq->lro_enabled = !rq->lro_enabled; } } NICVF_CORE_UNLOCK(nic); } break; default: err = ether_ioctl(ifp, cmd, data); break; } return (err); }