void rtwn_free_rx_list(struct rtwn_pci_softc *sc) { struct rtwn_rx_ring *rx_ring = &sc->rx_ring; struct rtwn_rx_data *rx_data; int i, s; s = splnet(); if (rx_ring->map) { if (rx_ring->desc) { bus_dmamap_unload(sc->sc_dmat, rx_ring->map); bus_dmamem_unmap(sc->sc_dmat, (caddr_t)rx_ring->desc, sizeof (struct r92c_rx_desc_pci) * RTWN_RX_LIST_COUNT); bus_dmamem_free(sc->sc_dmat, &rx_ring->seg, rx_ring->nsegs); rx_ring->desc = NULL; } bus_dmamap_destroy(sc->sc_dmat, rx_ring->map); rx_ring->map = NULL; } for (i = 0; i < RTWN_RX_LIST_COUNT; i++) { rx_data = &rx_ring->rx_data[i]; if (rx_data->m != NULL) { bus_dmamap_unload(sc->sc_dmat, rx_data->map); m_freem(rx_data->m); rx_data->m = NULL; } bus_dmamap_destroy(sc->sc_dmat, rx_data->map); rx_data->map = NULL; } splx(s); }
void wi_usb_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct wi_usb_chain *c = priv; struct wi_usb_softc *sc = c->wi_usb_sc; int s; if (sc->wi_usb_dying) return; s = splnet(); DPRINTFN(10,("%s: %s: enter status=%d\n", sc->wi_usb_dev.dv_xname, __func__, status)); if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { splx(s); return; } printf("%s: usb error on tx: %s\n", sc->wi_usb_dev.dv_xname, usbd_errstr(status)); if (status == USBD_STALLED) { sc->wi_usb_refcnt++; usbd_clear_endpoint_stall_async( sc->wi_usb_ep[WI_USB_ENDPT_TX]); if (--sc->wi_usb_refcnt < 0) usb_detach_wakeup(&sc->wi_usb_dev); } splx(s); return; } splx(s); }
int ppp_clone_create(struct if_clone *ifc, int unit) { struct ppp_softc *sc; int s; sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT|M_ZERO); if (!sc) return (ENOMEM); sc->sc_unit = unit; snprintf(sc->sc_if.if_xname, sizeof sc->sc_if.if_xname, "%s%d", ifc->ifc_name, unit); sc->sc_if.if_softc = sc; sc->sc_if.if_mtu = PPP_MTU; sc->sc_if.if_flags = IFF_POINTOPOINT | IFF_MULTICAST; sc->sc_if.if_type = IFT_PPP; sc->sc_if.if_hdrlen = PPP_HDRLEN; sc->sc_if.if_ioctl = pppsioctl; sc->sc_if.if_output = pppoutput; sc->sc_if.if_start = ppp_ifstart; IFQ_SET_MAXLEN(&sc->sc_if.if_snd, IFQ_MAXLEN); mq_init(&sc->sc_inq, IFQ_MAXLEN, IPL_NET); IFQ_SET_MAXLEN(&sc->sc_fastq, IFQ_MAXLEN); ppp_pkt_list_init(&sc->sc_rawq, IFQ_MAXLEN); IFQ_SET_READY(&sc->sc_if.if_snd); if_attach(&sc->sc_if); if_alloc_sadl(&sc->sc_if); #if NBPFILTER > 0 bpfattach(&sc->sc_bpf, &sc->sc_if, DLT_PPP, PPP_HDRLEN); #endif s = splnet(); LIST_INSERT_HEAD(&ppp_softc_list, sc, sc_list); splx(s); return (0); }
/* ARGSUSED */ int loioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifaddr *ifa; struct ifreq *ifr; int s, error = 0; switch (cmd) { case SIOCSIFADDR: s = splnet(); ifp->if_flags |= IFF_RUNNING; if_up(ifp); /* send up RTM_IFINFO */ splx(s); ifa = (struct ifaddr *)data; if (ifa != 0) ifa->ifa_rtrequest = lortrequest; /* * Everything else is done at a higher level. */ break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFMTU: ifr = (struct ifreq *)data; ifp->if_mtu = ifr->ifr_mtu; break; default: error = ENOTTY; } return (error); }
/*---------------------------------------------------------------------------* * i4bputqueue_hipri - put message into front of queue to userland *---------------------------------------------------------------------------*/ void i4bputqueue_hipri(struct mbuf *m) { int x; if(!openflag) { i4b_Dfreembuf(m); return; } x = splnet(); if(IF_QFULL(&i4b_rdqueue)) { struct mbuf *m1; IF_DEQUEUE(&i4b_rdqueue, m1); i4b_Dfreembuf(m1); NDBGL4(L4_ERR, "ERROR, queue full, removing entry!"); } IF_PREPEND(&i4b_rdqueue, m); splx(x); if(readflag) { readflag = 0; wakeup((void *) &i4b_rdqueue); } if(selflag) { selflag = 0; selnotify(&select_rd_info, 0, 0); } }
void mplsintr(void) { struct mbuf *m; int s; while (!IF_IS_EMPTY(&mplsintrq)) { s = splnet(); IF_DEQUEUE(&mplsintrq, m); splx(s); if (!m) return; if (((m->m_flags & M_PKTHDR) == 0) || (m->m_pkthdr.rcvif == 0)) panic("mplsintr(): no pkthdr or rcvif"); #ifdef MBUFTRACE m_claimm(m, &mpls_owner); #endif mpls_input(m->m_pkthdr.rcvif, m); } }
/* * Support for poll() system call */ int bpfpoll(dev_t dev, int events, struct proc *p) { struct bpf_d *d; int s, revents; revents = events & (POLLIN | POLLRDNORM); if (revents == 0) return (0); /* only support reading */ /* * An imitation of the FIONREAD ioctl code. */ d = bpfilter_lookup(minor(dev)); /* * XXX The USB stack manages it to trigger some race condition * which causes bpfilter_lookup to return NULL when a USB device * gets detached while it is up and has an open bpf handler (e.g. * dhclient). We still should recheck if we can fix the root * cause of this issue. */ if (d == NULL) return (POLLERR); s = splnet(); if (d->bd_hlen == 0 && (!d->bd_immediate || d->bd_slen == 0)) { revents = 0; /* no data waiting */ /* * if there's a timeout, mark the time we started waiting. */ if (d->bd_rtout != -1 && d->bd_rdStart == 0) d->bd_rdStart = ticks; selrecord(p, &d->bd_sel); } splx(s); return (revents); }
Static int url_ioctl(struct ifnet *ifp, u_long cmd, void *data) { struct url_softc *sc = ifp->if_softc; int s, error = 0; DPRINTF(("%s: %s: enter\n", device_xname(sc->sc_dev), __func__)); if (sc->sc_dying) return (EIO); s = splnet(); error = ether_ioctl(ifp, cmd, data); if (error == ENETRESET) { if (ifp->if_flags & IFF_RUNNING) url_setmulti(sc); error = 0; } splx(s); return (error); }
STATUS ifIndexToIfName ( unsigned short ifIndex,/* Interface index */ char * ifName /* Where the name is to be stored */ ) { FAST struct ifnet * ifp; int s; if (ifName == NULL) return (ERROR); s = splnet (); if ((ifp = ifIndexToIfp (ifIndex)) == NULL) { splx (s); return (ERROR); } sprintf (ifName, "%s%d", ifp->if_name, ifp->if_unit); splx (s); return (OK); }
/* * Add a new protocol domain to the list of supported domains * Note: you cant unload it again because a socket may be using it. * XXX can't fail at this time. */ static void net_init_domain(struct domain *dp) { register struct protosw *pr; int s; s = splnet(); if (dp->dom_init) (*dp->dom_init)(); for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++){ if (pr->pr_usrreqs == 0) panic("domaininit: %ssw[%d] has no usrreqs!", dp->dom_name, (int)(pr - dp->dom_protosw)); if (pr->pr_init) (*pr->pr_init)(); } /* * update global informatio about maximums */ max_hdr = max_linkhdr + max_protohdr; max_datalen = MHLEN - max_hdr; splx(s); }
int nsmb_dev_open(dev_t dev, int oflags, int devtype, struct proc *p) { struct smb_dev *sdp; int s; sdp = SMB_GETDEV(dev); if (sdp && (sdp->sd_flags & NSMBFL_OPEN)) return EBUSY; if (sdp == NULL) { sdp = malloc(sizeof(*sdp), M_SMBDATA, M_WAITOK); smb_devtbl[minor(dev)] = (void*)sdp; } #ifndef __NetBSD__ /* * XXX: this is just crazy - make a device for an already passed device... * someone should take care of it. */ if ((dev->si_flags & SI_NAMED) == 0) make_dev(&nsmb_cdevsw, minor(dev), cred->cr_uid, cred->cr_gid, 0700, NSMB_NAME"%d", dev2unit(dev)); #endif /* !__NetBSD__ */ bzero(sdp, sizeof(*sdp)); /* STAILQ_INIT(&sdp->sd_rqlist); STAILQ_INIT(&sdp->sd_rplist); bzero(&sdp->sd_pollinfo, sizeof(struct selinfo)); */ s = splnet(); sdp->sd_level = -1; sdp->sd_flags |= NSMBFL_OPEN; splx(s); return 0; }
/*---------------------------------------------------------------------------* * open trace device *---------------------------------------------------------------------------*/ PDEVSTATIC int isdntrcopen(dev_t dev, int flag, int fmt, struct proc *p) { int x; int unit = minor(dev); if(unit >= NISDNTRC) return(ENXIO); if(device_state[unit] & ST_ISOPEN) return(EBUSY); if(analyzemode && (unit == outunit || unit == rxunit || unit == txunit)) return(EBUSY); x = splnet(); device_state[unit] = ST_ISOPEN; splx(x); return(0); }
int sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) { int s, error = 0; SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); s = splnet(); error = ether_ioctl(ifp, cmd, data); if (error == ENETRESET) { /* * Multicast list has changed; set the hardware filter * accordingly. */ if (ifp->if_flags & IFF_RUNNING) error = sq_init(ifp); else error = 0; } splx(s); return error; }
static int del_entry(struct ip_fw_head *chainptr, u_short number) { struct ip_fw_chain *fcp; int s; s = splnet(); fcp = chainptr->lh_first; if (number != (u_short)-1) { for (; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number == number) { LIST_REMOVE(fcp, chain); splx(s); free(fcp->rule, M_IPFW); free(fcp, M_IPFW); return 0; } } } splx(s); return (EINVAL); }
/* * octeon_eth_tick_free * * => garbage collect send gather buffer / mbuf * => called at softclock */ void octeon_eth_tick_free(void *arg) { struct octeon_eth_softc *sc = arg; int timo; int s; s = splnet(); /* XXX */ if (ml_len(&sc->sc_sendq) > 0) { octeon_eth_send_queue_flush_prefetch(sc); octeon_eth_send_queue_flush_fetch(sc); octeon_eth_send_queue_flush(sc); } /* XXX */ /* XXX ??? */ timo = hz - (100 * sc->sc_ext_callback_cnt); if (timo < 10) timo = 10; timeout_add_msec(&sc->sc_tick_free_ch, 1000 * timo / hz); /* XXX */ splx(s); }
void natmintr() { int s; struct mbuf *m; struct socket *so; struct natmpcb *npcb; next: s = splnet(); IF_DEQUEUE(&natmintrq, m); splx(s); if (m == NULL) return; #ifdef DIAGNOSTIC if ((m->m_flags & M_PKTHDR) == 0) panic("natmintr no HDR"); #endif npcb = (struct natmpcb *) m->m_pkthdr.rcvif; /* XXX: overloaded */ so = npcb->npcb_socket; s = splnet(); /* could have atm devs @ different levels */ npcb->npcb_inq--; splx(s); if (npcb->npcb_flags & NPCB_DRAIN) { m_freem(m); if (npcb->npcb_inq == 0) free(npcb, M_PCB); /* done! */ goto next; } if (npcb->npcb_flags & NPCB_FREE) { m_freem(m); /* drop */ goto next; } #ifdef NEED_TO_RESTORE_IFP m->m_pkthdr.rcvif = npcb->npcb_ifp; #else #ifdef DIAGNOSTIC m->m_pkthdr.rcvif = NULL; /* null it out to be safe */ #endif #endif if (sbspace(&so->so_rcv) > m->m_pkthdr.len || ((npcb->npcb_flags & NPCB_RAW) != 0 && so->so_rcv.sb_cc < NPCB_RAWCC) ) { #ifdef NATM_STAT natm_sookcnt++; natm_sookbytes += m->m_pkthdr.len; #endif sbappendrecord(&so->so_rcv, m); sorwakeup(so); } else { #ifdef NATM_STAT natm_sodropcnt++; natm_sodropbytes += m->m_pkthdr.len; #endif m_freem(m); } goto next; }
static int add_entry(struct ip_fw_head *chainptr, struct ip_fw *frwl) { struct ip_fw *ftmp = 0; struct ip_fw_chain *fwc = 0, *fcp, *fcpl = 0; u_short nbr = 0; int s; fwc = malloc(sizeof *fwc, M_IPFW, M_DONTWAIT); ftmp = malloc(sizeof *ftmp, M_IPFW, M_DONTWAIT); if (!fwc || !ftmp) { dprintf(("%s malloc said no\n", err_prefix)); if (fwc) free(fwc, M_IPFW); if (ftmp) free(ftmp, M_IPFW); return (ENOSPC); } bcopy(frwl, ftmp, sizeof(struct ip_fw)); ftmp->fw_in_if.fu_via_if.name[FW_IFNLEN - 1] = '\0'; ftmp->fw_pcnt = 0L; ftmp->fw_bcnt = 0L; fwc->rule = ftmp; s = splnet(); if (!chainptr->lh_first) { LIST_INSERT_HEAD(chainptr, fwc, chain); splx(s); return(0); } else if (ftmp->fw_number == (u_short)-1) { if (fwc) free(fwc, M_IPFW); if (ftmp) free(ftmp, M_IPFW); splx(s); dprintf(("%s bad rule number\n", err_prefix)); return (EINVAL); } /* If entry number is 0, find highest numbered rule and add 100 */ if (ftmp->fw_number == 0) { for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number != (u_short)-1) nbr = fcp->rule->fw_number; else break; } if (nbr < (u_short)-1 - 100) nbr += 100; ftmp->fw_number = nbr; } /* Got a valid number; now insert it, keeping the list ordered */ for (fcp = chainptr->lh_first; fcp; fcp = fcp->chain.le_next) { if (fcp->rule->fw_number > ftmp->fw_number) { if (fcpl) { LIST_INSERT_AFTER(fcpl, fwc, chain); } else { LIST_INSERT_HEAD(chainptr, fwc, chain); } break; } else { fcpl = fcp; } } splx(s); return (0); }
int s; if (m) { if (m->m_len != sizeof(struct ip_fw)) return(EINVAL); frwl = mtod(m, struct ip_fw *); } else frwl = NULL; /* * It's possible to insert multiple chain entries with the * same number, so we don't stop after finding the first * match if zeroing a specific entry. */ s = splnet(); for (fcp = ip_fw_chain.lh_first; fcp; fcp = fcp->chain.le_next) if (!frwl || frwl->fw_number == fcp->rule->fw_number) { fcp->rule->fw_bcnt = fcp->rule->fw_pcnt = 0; fcp->rule->timestamp = 0; } splx(s); if (fw_verbose) { if (frwl) printf("ipfw: Entry %d cleared.\n", frwl->fw_number); else printf("ipfw: Accounting cleared.\n"); } return(0);
int scope6_set(struct ifnet *ifp, struct scope6_id *idlist) { int i, s; int error = 0; struct scope6_id *sid = SID(ifp); if (sid == NULL) /* paranoid? */ return (EINVAL); /* * XXX: We need more consistency checks of the relationship among * scopes (e.g. an organization should be larger than a site). */ /* * TODO(XXX): after setting, we should reflect the changes to * interface addresses, routing table entries, PCB entries... */ #if defined(__NetBSD__) || defined(__OpenBSD__) s = splsoftnet(); #else s = splnet(); #endif for (i = 0; i < 16; i++) { if (idlist->s6id_list[i] && idlist->s6id_list[i] != sid->s6id_list[i]) { /* * An interface zone ID must be the corresponding * interface index by definition. */ if (i == IPV6_ADDR_SCOPE_INTFACELOCAL && idlist->s6id_list[i] != ifp->if_index) { splx(s); return (EINVAL); } if (i == IPV6_ADDR_SCOPE_LINKLOCAL) { if (idlist->s6id_list[i] >= if_indexlim || #ifdef __FreeBSD__ ifnet_byindex(idlist->s6id_list[i]) == NULL #else ifindex2ifnet[idlist->s6id_list[i]] == NULL #endif ) { /* * XXX: theoretically, there should be * no relationship between link IDs and * interface IDs, but we check the * consistency for safety in later use. */ splx(s); return (EINVAL); } } /* * XXX: we must need lots of work in this case, * but we simply set the new value in this initial * implementation. */ sid->s6id_list[i] = idlist->s6id_list[i]; } } splx(s); return (error); }
/* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ int cas_init(struct ifnet *ifp) { struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; bus_space_tag_t t = sc->sc_memt; bus_space_handle_t h = sc->sc_memh; int s; u_int max_frame_size; u_int32_t v; s = splnet(); DPRINTF(sc, ("%s: cas_init: calling stop\n", sc->sc_dev.dv_xname)); /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel */ cas_stop(ifp, 0); cas_reset(sc); DPRINTF(sc, ("%s: cas_init: restarting\n", sc->sc_dev.dv_xname)); /* Re-initialize the MIF */ cas_mifinit(sc); /* step 3. Setup data structures in host memory */ cas_meminit(sc); /* step 4. TX MAC registers & counters */ cas_init_regs(sc); max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; v = (max_frame_size) | (0x2000 << 16) /* Burst size */; bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); /* step 5. RX MAC registers & counters */ cas_setladrf(sc); /* step 6 & 7. Program Descriptor Ring Base Addresses */ KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); if (CAS_PLUS(sc)) { KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, CAS_CDRXADDR2(sc, 0)); } /* step 8. Global Configuration & Interrupt Mask */ bus_space_write_4(t, h, CAS_INTMASK, ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| CAS_INTR_TX_TAG_ERR| CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| CAS_INTR_RX_TAG_ERR| CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| CAS_INTR_BERR)); bus_space_write_4(t, h, CAS_MAC_RX_MASK, CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ /* step 9. ETX Configuration: use mostly default values */ /* Enable DMA */ v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; bus_space_write_4(t, h, CAS_TX_CONFIG, v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); bus_space_write_4(t, h, CAS_TX_KICK, 0); /* step 10. ERX Configuration */ /* Encode Receive Descriptor ring size */ v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; if (CAS_PLUS(sc)) v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; /* Encode Receive Completion ring size */ v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; /* Enable DMA */ bus_space_write_4(t, h, CAS_RX_CONFIG, v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); /* * The following value is for an OFF Threshold of about 3/4 full * and an ON Threshold of 1/4 full. */ bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, (3 * sc->sc_rxfifosize / 256) | ( (sc->sc_rxfifosize / 256) << 12)); bus_space_write_4(t, h, CAS_RX_BLANKING, (6<<12)|6); /* step 11. Configure Media */ mii_mediachg(&sc->sc_mii); /* step 12. RX_MAC Configuration Register */ v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); /* step 14. Issue Transmit Pending command */ /* step 15. Give the receiver a swift kick */ bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); if (CAS_PLUS(sc)) bus_space_write_4(t, h, CAS_RX_KICK2, 4); /* Start the one second timer. */ timeout_add(&sc->sc_tick_ch, hz); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; splx(s); return (0); }
/* * Process an ioctl request. */ int cas_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct cas_softc *sc = ifp->if_softc; struct ifaddr *ifa = (struct ifaddr *)data; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; s = splnet(); if ((error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data)) > 0) { splx(s); return (error); } switch (cmd) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP; if ((ifp->if_flags & IFF_RUNNING) == 0) cas_init(ifp); #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) arp_ifinit(&sc->sc_arpcom, ifa); #endif break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) && ((ifp->if_flags ^ sc->sc_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) cas_setladrf(sc); else { if ((ifp->if_flags & IFF_RUNNING) == 0) cas_init(ifp); } } else { if (ifp->if_flags & IFF_RUNNING) cas_stop(ifp, 1); } sc->sc_if_flags = ifp->if_flags; #ifdef CAS_DEBUG sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; #endif break; case SIOCSIFMTU: if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { error = EINVAL; } else if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; } break; case SIOCADDMULTI: case SIOCDELMULTI: error = (cmd == SIOCADDMULTI) ? ether_addmulti(ifr, &sc->sc_arpcom) : ether_delmulti(ifr, &sc->sc_arpcom); if (error == ENETRESET) { /* * Multicast list has changed; set the hardware filter * accordingly. */ if (ifp->if_flags & IFF_RUNNING) cas_setladrf(sc); error = 0; } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: error = ENOTTY; break; } splx(s); return (error); }
/* * XXX Liang: timeout for write is not supported yet. */ int libcfs_sock_write (struct socket *sock, void *buffer, int nob, int timeout) { int rc; CFS_DECL_NET_DATA; while (nob > 0) { struct iovec iov = { .iov_base = buffer, .iov_len = nob }; struct uio suio = { .uio_iov = &iov, .uio_iovcnt = 1, .uio_offset = 0, .uio_resid = nob, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_WRITE, .uio_procp = NULL }; CFS_NET_IN; rc = sosend(sock, NULL, &suio, (struct mbuf *)0, (struct mbuf *)0, 0); CFS_NET_EX; if (rc != 0) { if ( suio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\ rc == EWOULDBLOCK)) rc = 0; if ( rc != 0 ) return -rc; rc = nob - suio.uio_resid; buffer = ((char *)buffer) + rc; nob = suio.uio_resid; continue; } break; } return (0); } /* * XXX Liang: timeout for read is not supported yet. */ int libcfs_sock_read (struct socket *sock, void *buffer, int nob, int timeout) { int rc; CFS_DECL_NET_DATA; while (nob > 0) { struct iovec iov = { .iov_base = buffer, .iov_len = nob }; struct uio ruio = { .uio_iov = &iov, .uio_iovcnt = 1, .uio_offset = 0, .uio_resid = nob, .uio_segflg = UIO_SYSSPACE, .uio_rw = UIO_READ, .uio_procp = NULL }; CFS_NET_IN; rc = soreceive(sock, (struct sockaddr **)0, &ruio, (struct mbuf **)0, (struct mbuf **)0, (int *)0); CFS_NET_EX; if (rc != 0) { if ( ruio.uio_resid != nob && ( rc == ERESTART || rc == EINTR ||\ rc == EWOULDBLOCK)) rc = 0; if (rc != 0) return -rc; rc = nob - ruio.uio_resid; buffer = ((char *)buffer) + rc; nob = ruio.uio_resid; continue; } break; } return (0); } int libcfs_sock_setbuf (struct socket *sock, int txbufsize, int rxbufsize) { struct sockopt sopt; int rc = 0; int option; CFS_DECL_NET_DATA; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_val = &option; sopt.sopt_valsize = sizeof(option); if (txbufsize != 0) { option = txbufsize; if (option > KSOCK_MAX_BUF) option = KSOCK_MAX_BUF; sopt.sopt_name = SO_SNDBUF; CFS_NET_IN; rc = sosetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't set send buffer %d: %d\n", option, rc); return -rc; } } if (rxbufsize != 0) { option = rxbufsize; sopt.sopt_name = SO_RCVBUF; CFS_NET_IN; rc = sosetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't set receive buffer %d: %d\n", option, rc); return -rc; } } return 0; } int libcfs_sock_getaddr (struct socket *sock, int remote, __u32 *ip, int *port) { struct sockaddr_in *sin; struct sockaddr *sa = NULL; int rc; CFS_DECL_NET_DATA; if (remote != 0) { CFS_NET_IN; rc = sock->so_proto->pr_usrreqs->pru_peeraddr(sock, &sa); CFS_NET_EX; if (rc != 0) { if (sa) FREE(sa, M_SONAME); CERROR ("Error %d getting sock peer IP\n", rc); return -rc; } } else { CFS_NET_IN; rc = sock->so_proto->pr_usrreqs->pru_sockaddr(sock, &sa); CFS_NET_EX; if (rc != 0) { if (sa) FREE(sa, M_SONAME); CERROR ("Error %d getting sock local IP\n", rc); return -rc; } } if (sa != NULL) { sin = (struct sockaddr_in *)sa; if (ip != NULL) *ip = ntohl (sin->sin_addr.s_addr); if (port != NULL) *port = ntohs (sin->sin_port); if (sa) FREE(sa, M_SONAME); } return 0; } int libcfs_sock_getbuf (struct socket *sock, int *txbufsize, int *rxbufsize) { struct sockopt sopt; int rc; CFS_DECL_NET_DATA; bzero(&sopt, sizeof sopt); sopt.sopt_dir = SOPT_GET; sopt.sopt_level = SOL_SOCKET; if (txbufsize != NULL) { sopt.sopt_val = txbufsize; sopt.sopt_valsize = sizeof(*txbufsize); sopt.sopt_name = SO_SNDBUF; CFS_NET_IN; rc = sogetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't get send buffer size: %d\n", rc); return -rc; } } if (rxbufsize != NULL) { sopt.sopt_val = rxbufsize; sopt.sopt_valsize = sizeof(*rxbufsize); sopt.sopt_name = SO_RCVBUF; CFS_NET_IN; rc = sogetopt(sock, &sopt); CFS_NET_EX; if (rc != 0) { CERROR ("Can't get receive buffer size: %d\n", rc); return -rc; } } return 0; } int libcfs_sock_connect (struct socket **sockp, int *fatal, __u32 local_ip, int local_port, __u32 peer_ip, int peer_port) { struct sockaddr_in srvaddr; struct socket *so; int s; int rc; CFS_DECL_FUNNEL_DATA; rc = libcfs_sock_create(sockp, fatal, local_ip, local_port); if (rc != 0) return rc; so = *sockp; bzero(&srvaddr, sizeof(srvaddr)); srvaddr.sin_len = sizeof(struct sockaddr_in); srvaddr.sin_family = AF_INET; srvaddr.sin_port = htons (peer_port); srvaddr.sin_addr.s_addr = htonl (peer_ip); CFS_NET_IN; rc = soconnect(so, (struct sockaddr *)&srvaddr); if (rc != 0) { CFS_NET_EX; if (rc != EADDRNOTAVAIL && rc != EADDRINUSE) CDEBUG(D_NETERROR, "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc, HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port); goto out; } s = splnet(); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { CDEBUG(D_NET, "ksocknal sleep for waiting auto_connect.\n"); (void) tsleep((caddr_t)&so->so_timeo, PSOCK, "ksocknal_conn", hz); } if ((rc = so->so_error) != 0) { so->so_error = 0; splx(s); CFS_NET_EX; CDEBUG(D_NETERROR, "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc, HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port); goto out; } LASSERT(so->so_state & SS_ISCONNECTED); splx(s); CFS_NET_EX; if (sockp) *sockp = so; return (0); out: CFS_NET_IN; soshutdown(so, 2); soclose(so); CFS_NET_EX; return (-rc); } void libcfs_sock_release (struct socket *sock) { CFS_DECL_FUNNEL_DATA; CFS_NET_IN; soshutdown(sock, 0); CFS_NET_EX; }
int libcfs_sock_accept (struct socket **newsockp, struct socket *sock) { struct socket *so; struct sockaddr *sa; int error, s; CFS_DECL_FUNNEL_DATA; CFS_NET_IN; s = splnet(); if ((sock->so_options & SO_ACCEPTCONN) == 0) { splx(s); CFS_NET_EX; return (-EINVAL); } if ((sock->so_state & SS_NBIO) && sock->so_comp.tqh_first == NULL) { splx(s); CFS_NET_EX; return (-EWOULDBLOCK); } error = 0; while (TAILQ_EMPTY(&sock->so_comp) && sock->so_error == 0) { if (sock->so_state & SS_CANTRCVMORE) { sock->so_error = ECONNABORTED; break; } error = tsleep((caddr_t)&sock->so_timeo, PSOCK | PCATCH, "accept", 0); if (error) { splx(s); CFS_NET_EX; return (-error); } } if (sock->so_error) { error = sock->so_error; sock->so_error = 0; splx(s); CFS_NET_EX; return (-error); } /* * At this point we know that there is at least one connection * ready to be accepted. Remove it from the queue prior to * allocating the file descriptor for it since falloc() may * block allowing another process to accept the connection * instead. */ so = TAILQ_FIRST(&sock->so_comp); TAILQ_REMOVE(&sock->so_comp, so, so_list); sock->so_qlen--; so->so_state &= ~SS_COMP; so->so_head = NULL; sa = 0; (void) soaccept(so, &sa); *newsockp = so; FREE(sa, M_SONAME); splx(s); CFS_NET_EX; return (-error); }
int octeon_eth_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct octeon_eth_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; s = splnet(); switch (cmd) { case SIOCSIFADDR: ifp->if_flags |= IFF_UP; if (!(ifp->if_flags & IFF_RUNNING)) octeon_eth_init(ifp); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING) error = ENETRESET; else octeon_eth_init(ifp); } else { if (ifp->if_flags & IFF_RUNNING) octeon_eth_stop(ifp, 0); } break; case SIOCSIFMEDIA: /* Flow control requires full-duplex mode. */ if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || (ifr->ifr_media & IFM_FDX) == 0) { ifr->ifr_media &= ~IFM_ETH_FMASK; } if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { if ((ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { ifr->ifr_media |= IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; } sc->sc_gmx_port->sc_port_flowflags = ifr->ifr_media & IFM_ETH_FMASK; } /* FALLTHROUGH */ case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); break; default: error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); } if (error == ENETRESET) { if (ISSET(ifp->if_flags, IFF_RUNNING)) cn30xxgmx_set_filter(sc->sc_gmx_port); error = 0; } octeon_eth_start(ifp); splx(s); return (error); }
static struct priq_class * priq_class_create(struct priq_if *pif, int pri, int qlimit, int flags, int qid) { struct priq_class *cl; int s; #ifndef ALTQ_RED if (flags & PRCF_RED) { #ifdef ALTQ_DEBUG printf("priq_class_create: RED not configured for PRIQ!\n"); #endif return (NULL); } #endif if ((cl = pif->pif_classes[pri]) != NULL) { /* modify the class instead of creating a new one */ s = splnet(); if (!qempty(cl->cl_q)) priq_purgeq(cl); splx(s); #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_destroy(cl->cl_red); #endif } else { cl = malloc(sizeof(struct priq_class), M_DEVBUF, M_WAITOK|M_ZERO); if (cl == NULL) return (NULL); cl->cl_q = malloc(sizeof(class_queue_t), M_DEVBUF, M_WAITOK|M_ZERO); if (cl->cl_q == NULL) goto err_ret; } pif->pif_classes[pri] = cl; if (flags & PRCF_DEFAULTCLASS) pif->pif_default = cl; if (qlimit == 0) qlimit = 50; /* use default */ qlimit(cl->cl_q) = qlimit; qtype(cl->cl_q) = Q_DROPTAIL; qlen(cl->cl_q) = 0; cl->cl_flags = flags; cl->cl_pri = pri; if (pri > pif->pif_maxpri) pif->pif_maxpri = pri; cl->cl_pif = pif; cl->cl_handle = qid; #ifdef ALTQ_RED if (flags & (PRCF_RED|PRCF_RIO)) { int red_flags, red_pkttime; red_flags = 0; if (flags & PRCF_ECN) red_flags |= REDF_ECN; #ifdef ALTQ_RIO if (flags & PRCF_CLEARDSCP) red_flags |= RIOF_CLEARDSCP; #endif if (pif->pif_bandwidth < 8) red_pkttime = 1000 * 1000 * 1000; /* 1 sec */ else red_pkttime = (int64_t)pif->pif_ifq->altq_ifp->if_mtu * 1000 * 1000 * 1000 / (pif->pif_bandwidth / 8); #ifdef ALTQ_RIO if (flags & PRCF_RIO) { cl->cl_red = (red_t *)rio_alloc(0, NULL, red_flags, red_pkttime); if (cl->cl_red != NULL) qtype(cl->cl_q) = Q_RIO; } else #endif if (flags & PRCF_RED) { cl->cl_red = red_alloc(0, 0, qlimit(cl->cl_q) * 10/100, qlimit(cl->cl_q) * 30/100, red_flags, red_pkttime); if (cl->cl_red != NULL) qtype(cl->cl_q) = Q_RED; } } #endif /* ALTQ_RED */ return (cl); err_ret: if (cl->cl_red != NULL) { #ifdef ALTQ_RIO if (q_is_rio(cl->cl_q)) rio_destroy((rio_t *)cl->cl_red); #endif #ifdef ALTQ_RED if (q_is_red(cl->cl_q)) red_destroy(cl->cl_red); #endif } if (cl->cl_q != NULL) free(cl->cl_q, M_DEVBUF); free(cl, M_DEVBUF); return (NULL); }
static void lostart(struct ifnet *ifp) { for (;;) { pktqueue_t *pktq = NULL; struct ifqueue *ifq = NULL; struct mbuf *m; size_t pktlen; uint32_t af; int s, isr = 0; IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; af = *(mtod(m, uint32_t *)); m_adj(m, sizeof(uint32_t)); switch (af) { #ifdef INET case AF_INET: pktq = ip_pktq; break; #endif #ifdef INET6 case AF_INET6: m->m_flags |= M_LOOP; pktq = ip6_pktq; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, af); m_freem(m); return; } pktlen = m->m_pkthdr.len; s = splnet(); if (__predict_true(pktq)) { if (__predict_false(pktq_enqueue(pktq, m, 0))) { m_freem(m); splx(s); return; } ifp->if_ipackets++; ifp->if_ibytes += pktlen; splx(s); continue; } if (IF_QFULL(ifq)) { IF_DROP(ifq); splx(s); m_freem(m); return; } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += pktlen; splx(s); } }
int looutput(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, struct rtentry *rt) { pktqueue_t *pktq = NULL; struct ifqueue *ifq = NULL; int s, isr = -1; int csum_flags; size_t pktlen; MCLAIM(m, ifp->if_mowner); KASSERT(KERNEL_LOCKED_P()); if ((m->m_flags & M_PKTHDR) == 0) panic("looutput: no header mbuf"); if (ifp->if_flags & IFF_LOOPBACK) bpf_mtap_af(ifp, dst->sa_family, m); m->m_pkthdr.rcvif = ifp; if (rt && rt->rt_flags & (RTF_REJECT|RTF_BLACKHOLE)) { m_freem(m); return (rt->rt_flags & RTF_BLACKHOLE ? 0 : rt->rt_flags & RTF_HOST ? EHOSTUNREACH : ENETUNREACH); } pktlen = m->m_pkthdr.len; ifp->if_opackets++; ifp->if_obytes += pktlen; #ifdef ALTQ /* * ALTQ on the loopback interface is just for debugging. It's * used only for loopback interfaces, not for a simplex interface. */ if ((ALTQ_IS_ENABLED(&ifp->if_snd) || TBR_IS_ENABLED(&ifp->if_snd)) && ifp->if_start == lostart) { struct altq_pktattr pktattr; int error; /* * If the queueing discipline needs packet classification, * do it before prepending the link headers. */ IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family, &pktattr); M_PREPEND(m, sizeof(uint32_t), M_DONTWAIT); if (m == NULL) return (ENOBUFS); *(mtod(m, uint32_t *)) = dst->sa_family; s = splnet(); IFQ_ENQUEUE(&ifp->if_snd, m, &pktattr, error); (*ifp->if_start)(ifp); splx(s); return (error); } #endif /* ALTQ */ m_tag_delete_nonpersistent(m); #ifdef MPLS if (rt != NULL && rt_gettag(rt) != NULL && rt_gettag(rt)->sa_family == AF_MPLS && (m->m_flags & (M_MCAST | M_BCAST)) == 0) { union mpls_shim msh; msh.s_addr = MPLS_GETSADDR(rt); if (msh.shim.label != MPLS_LABEL_IMPLNULL) { ifq = &mplsintrq; isr = NETISR_MPLS; } } if (isr != NETISR_MPLS) #endif switch (dst->sa_family) { #ifdef INET case AF_INET: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~(M_CSUM_IPv4|M_CSUM_UDPv4)) == 0); if (csum_flags != 0 && IN_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; pktq = ip_pktq; break; #endif #ifdef INET6 case AF_INET6: csum_flags = m->m_pkthdr.csum_flags; KASSERT((csum_flags & ~M_CSUM_UDPv6) == 0); if (csum_flags != 0 && IN6_LOOPBACK_NEED_CHECKSUM(csum_flags)) { ip6_undefer_csum(m, 0, csum_flags); } m->m_pkthdr.csum_flags = 0; m->m_flags |= M_LOOP; pktq = ip6_pktq; break; #endif #ifdef IPX case AF_IPX: ifq = &ipxintrq; isr = NETISR_IPX; break; #endif #ifdef NETATALK case AF_APPLETALK: ifq = &atintrq2; isr = NETISR_ATALK; break; #endif default: printf("%s: can't handle af%d\n", ifp->if_xname, dst->sa_family); m_freem(m); return (EAFNOSUPPORT); } s = splnet(); if (__predict_true(pktq)) { int error = 0; if (__predict_true(pktq_enqueue(pktq, m, 0))) { ifp->if_ipackets++; ifp->if_ibytes += pktlen; } else { m_freem(m); error = ENOBUFS; } splx(s); return error; } if (IF_QFULL(ifq)) { IF_DROP(ifq); m_freem(m); splx(s); return (ENOBUFS); } IF_ENQUEUE(ifq, m); schednetisr(isr); ifp->if_ipackets++; ifp->if_ibytes += m->m_pkthdr.len; splx(s); return (0); }
int natm_usrreq(struct socket *so, int req, struct mbuf *m, struct mbuf *nam, struct mbuf *control, struct proc *p) { int error = 0, s, s2; struct natmpcb *npcb; struct sockaddr_natm *snatm; struct atm_pseudoioctl api; struct atm_pseudohdr *aph; struct atm_rawioctl ario; struct ifnet *ifp; int proto = so->so_proto->pr_protocol; s = splsoftnet(); npcb = (struct natmpcb *) so->so_pcb; if (npcb == NULL && req != PRU_ATTACH) { error = EINVAL; goto done; } switch (req) { case PRU_ATTACH: /* attach protocol to up */ if (npcb) { error = EISCONN; break; } if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) { if (proto == PROTO_NATMAAL5) error = soreserve(so, natm5_sendspace, natm5_recvspace); else error = soreserve(so, natm0_sendspace, natm0_recvspace); if (error) break; } so->so_pcb = (caddr_t) (npcb = npcb_alloc(M_WAITOK)); npcb->npcb_socket = so; break; case PRU_DETACH: /* detach protocol from up */ /* * we turn on 'drain' *before* we sofree. */ npcb_free(npcb, NPCB_DESTROY); /* drain */ so->so_pcb = NULL; sofree(so); break; case PRU_CONNECT: /* establish connection to peer */ /* * validate nam and npcb */ if (nam->m_len != sizeof(*snatm)) { error = EINVAL; break; } snatm = mtod(nam, struct sockaddr_natm *); if (snatm->snatm_len != sizeof(*snatm) || (npcb->npcb_flags & NPCB_FREE) == 0) { error = EINVAL; break; } if (snatm->snatm_family != AF_NATM) { error = EAFNOSUPPORT; break; } snatm->snatm_if[IFNAMSIZ-1] = '\0'; /* XXX ensure null termination since ifunit() uses strcmp */ /* * convert interface string to ifp, validate. */ ifp = ifunit(snatm->snatm_if); if (ifp == NULL || (ifp->if_flags & IFF_RUNNING) == 0) { error = ENXIO; break; } if (ifp->if_output != atm_output) { error = EAFNOSUPPORT; break; } /* * register us with the NATM PCB layer */ if (npcb_add(npcb, ifp, snatm->snatm_vci, snatm->snatm_vpi) != npcb) { error = EADDRINUSE; break; } /* * enable rx */ ATM_PH_FLAGS(&api.aph) = (proto == PROTO_NATMAAL5) ? ATM_PH_AAL5 : 0; ATM_PH_VPI(&api.aph) = npcb->npcb_vpi; ATM_PH_SETVCI(&api.aph, npcb->npcb_vci); api.rxhand = npcb; s2 = splnet(); if (ifp->if_ioctl == NULL || ifp->if_ioctl(ifp, SIOCATMENA, (caddr_t) &api) != 0) { splx(s2); npcb_free(npcb, NPCB_REMOVE); error = EIO; break; } splx(s2); soisconnected(so); break; case PRU_DISCONNECT: /* disconnect from peer */ if ((npcb->npcb_flags & NPCB_CONNECTED) == 0) { printf("natm: disconnected check\n"); error = EIO; break; } ifp = npcb->npcb_ifp; /* * disable rx */ ATM_PH_FLAGS(&api.aph) = ATM_PH_AAL5; ATM_PH_VPI(&api.aph) = npcb->npcb_vpi; ATM_PH_SETVCI(&api.aph, npcb->npcb_vci); api.rxhand = npcb; s2 = splnet(); if (ifp->if_ioctl != NULL) ifp->if_ioctl(ifp, SIOCATMDIS, (caddr_t) &api); splx(s2); npcb_free(npcb, NPCB_REMOVE); soisdisconnected(so); break; case PRU_SHUTDOWN: /* won't send any more data */ socantsendmore(so); break; case PRU_SEND: /* send this data */ if (control && control->m_len) { m_freem(control); m_freem(m); error = EINVAL; break; } /* * send the data. we must put an atm_pseudohdr on first */ M_PREPEND(m, sizeof(*aph), M_WAITOK); aph = mtod(m, struct atm_pseudohdr *); ATM_PH_VPI(aph) = npcb->npcb_vpi; ATM_PH_SETVCI(aph, npcb->npcb_vci); ATM_PH_FLAGS(aph) = (proto == PROTO_NATMAAL5) ? ATM_PH_AAL5 : 0; error = atm_output(npcb->npcb_ifp, m, NULL, NULL); break; case PRU_SENSE: /* return status into m */ /* return zero? */ break; case PRU_PEERADDR: /* fetch peer's address */ snatm = mtod(nam, struct sockaddr_natm *); bzero(snatm, sizeof(*snatm)); nam->m_len = snatm->snatm_len = sizeof(*snatm); snatm->snatm_family = AF_NATM; #if defined(__NetBSD__) || defined(__OpenBSD__) bcopy(npcb->npcb_ifp->if_xname, snatm->snatm_if, sizeof(snatm->snatm_if)); #elif defined(__FreeBSD__) sprintf(snatm->snatm_if, "%s%d", npcb->npcb_ifp->if_name, npcb->npcb_ifp->if_unit); #endif snatm->snatm_vci = npcb->npcb_vci; snatm->snatm_vpi = npcb->npcb_vpi; break; case PRU_CONTROL: /* control operations on protocol */ /* * raw atm ioctl. comes in as a SIOCRAWATM. we convert it to * SIOCXRAWATM and pass it to the driver. */ if ((u_long)m == SIOCRAWATM) { if (npcb->npcb_ifp == NULL) { error = ENOTCONN; break; } ario.npcb = npcb; ario.rawvalue = *((int *)nam); error = npcb->npcb_ifp->if_ioctl(npcb->npcb_ifp, SIOCXRAWATM, (caddr_t) &ario); if (!error) { if (ario.rawvalue) npcb->npcb_flags |= NPCB_RAW; else npcb->npcb_flags &= ~(NPCB_RAW); } break; } error = EOPNOTSUPP; break; case PRU_BIND: /* bind socket to address */ case PRU_LISTEN: /* listen for connection */ case PRU_ACCEPT: /* accept connection from peer */ case PRU_CONNECT2: /* connect two sockets */ case PRU_ABORT: /* abort (fast DISCONNECT, DETACH) */ /* (only happens if LISTEN socket) */ case PRU_RCVD: /* have taken data; more room now */ case PRU_FASTTIMO: /* 200ms timeout */ case PRU_SLOWTIMO: /* 500ms timeout */ case PRU_RCVOOB: /* retrieve out of band data */ case PRU_SENDOOB: /* send out of band data */ case PRU_PROTORCV: /* receive from below */ case PRU_PROTOSEND: /* send to below */ case PRU_SOCKADDR: /* fetch socket's address */ #ifdef DIAGNOSTIC printf("natm: PRU #%d unsupported\n", req); #endif error = EOPNOTSUPP; break; default: panic("natm usrreq"); } done: splx(s); return(error); }
void kue_init(void *xsc) { struct kue_softc *sc = xsc; struct ifnet *ifp = GET_IFP(sc); int s; u_char *eaddr; DPRINTFN(5,("%s: %s: enter\n", sc->kue_dev.dv_xname,__func__)); if (ifp->if_flags & IFF_RUNNING) return; s = splnet(); eaddr = sc->arpcom.ac_enaddr; /* Set MAC address */ kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MAC, 0, eaddr, ETHER_ADDR_LEN); sc->kue_rxfilt = KUE_RXFILT_UNICAST | KUE_RXFILT_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); /* I'm not sure how to tune these. */ #if 0 /* * Leave this one alone for now; setting it * wrong causes lockups on some machines/controllers. */ kue_setword(sc, KUE_CMD_SET_SOFS, 1); #endif kue_setword(sc, KUE_CMD_SET_URB_SIZE, 64); /* Init TX ring. */ if (kue_tx_list_init(sc) == ENOBUFS) { printf("%s: tx list init failed\n", sc->kue_dev.dv_xname); splx(s); return; } /* Init RX ring. */ if (kue_rx_list_init(sc) == ENOBUFS) { printf("%s: rx list init failed\n", sc->kue_dev.dv_xname); splx(s); return; } /* Load the multicast filter. */ kue_setmulti(sc); if (sc->kue_ep[KUE_ENDPT_RX] == NULL) { if (kue_open_pipes(sc)) { splx(s); return; } } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; splx(s); }
int iavc_load(capi_softc_t *capi_sc, int len, u_int8_t *cp) { iavc_softc_t *sc = (iavc_softc_t*) capi_sc->ctx; u_int8_t val; aprint_debug_dev(&sc->sc_dev, "reset card ....\n"); if (sc->sc_dma) iavc_b1dma_reset(sc); /* PCI cards */ else if (sc->sc_t1) iavc_t1_reset(sc); /* ISA attachment T1 */ else iavc_b1_reset(sc); /* ISA attachment B1 */ DELAY(1000); aprint_debug_dev(&sc->sc_dev, "start loading %d bytes firmware\n", len); while (len && b1io_save_put_byte(sc, *cp++) == 0) len--; if (len) { aprint_error_dev(&sc->sc_dev, "loading failed, can't write to card, len = %d\n", len); return (EIO); } aprint_debug_dev(&sc->sc_dev, "firmware loaded, wait for ACK\n"); if(sc->sc_capi.card_type == CARD_TYPEC_AVM_B1_ISA) iavc_put_byte(sc, SEND_POLL); else iavc_put_byte(sc, SEND_POLLACK); for (len = 0; len < 1000 && !iavc_rx_full(sc); len++) DELAY(100); if (!iavc_rx_full(sc)) { aprint_error_dev(&sc->sc_dev, "loading failed, no ack\n"); return (EIO); } val = iavc_get_byte(sc); if ((sc->sc_dma && val != RECEIVE_POLLDWORD) || (!sc->sc_dma && val != RECEIVE_POLL)) { aprint_error_dev(&sc->sc_dev, "loading failed, bad ack = %02x\n", val); return (EIO); } aprint_debug_dev(&sc->sc_dev, "got ACK = 0x%02x\n", val); /* Start the DMA engine */ if (sc->sc_dma) { int s; s = splnet(); sc->sc_csr = AVM_FLAG; AMCC_WRITE(sc, AMCC_INTCSR, sc->sc_csr); AMCC_WRITE(sc, AMCC_MCSR, (EN_A2P_TRANSFERS|EN_P2A_TRANSFERS| A2P_HI_PRIORITY|P2A_HI_PRIORITY| RESET_A2P_FLAGS|RESET_P2A_FLAGS)); iavc_write_port(sc, 0x07, 0x30); /* XXX magic numbers from */ iavc_write_port(sc, 0x10, 0xf0); /* XXX the linux driver */ bus_dmamap_sync(sc->dmat, sc->rx_map, 0, sc->rx_map->dm_mapsize, BUS_DMASYNC_PREREAD); sc->sc_recv1 = 0; AMCC_WRITE(sc, AMCC_RXPTR, sc->rx_map->dm_segs[0].ds_addr); AMCC_WRITE(sc, AMCC_RXLEN, 4); sc->sc_csr |= EN_RX_TC_INT|EN_TX_TC_INT; AMCC_WRITE(sc, AMCC_INTCSR, sc->sc_csr); splx(s); } #ifdef notyet /* good happy place */ if(sc->sc_capi.card_type == CARD_TYPEC_AVM_B1_ISA) b1isa_setup_irq(sc); #endif iavc_send_init(sc); return 0; }