static int rfcomm_send(struct socket *so, struct mbuf *m, struct sockaddr *nam, struct mbuf *control, struct lwp *l) { struct rfcomm_dlc *pcb = so->so_pcb; int err = 0; struct mbuf *m0; KASSERT(solocked(so)); KASSERT(m != NULL); if (control) /* no use for that */ m_freem(control); if (pcb == NULL) { err = EINVAL; goto release; } m0 = m_copypacket(m, M_DONTWAIT); if (m0 == NULL) { err = ENOMEM; goto release; } sbappendstream(&so->so_snd, m); return rfcomm_send_pcb(pcb, m0); release: m_freem(m); return err; }
static int udp_mcast_input(struct udp_mcast_arg *arg) { struct inpcb *inp = arg->inp; struct inpcb *last = arg->last; struct ip *ip = arg->ip; struct mbuf *m = arg->m; if (check_multicast_membership(ip, inp, m) < 0) return ERESTART; /* caller continue */ if (last != NULL) { struct mbuf *n; #ifdef IPSEC /* check AH/ESP integrity. */ if (ipsec4_in_reject_so(m, last->inp_socket)) ipsecstat.in_polvio++; /* do not inject data to pcb */ else #endif /*IPSEC*/ #ifdef FAST_IPSEC /* check AH/ESP integrity. */ if (ipsec4_in_reject(m, last)) ; else #endif /*FAST_IPSEC*/ if ((n = m_copypacket(m, M_NOWAIT)) != NULL) udp_append(last, ip, n, arg->iphlen + sizeof(struct udphdr), arg->udp_in); } arg->last = last = inp; /* * Don't look for additional matches if this one does * not have either the SO_REUSEPORT or SO_REUSEADDR * socket options set. This heuristic avoids searching * through all pcbs in the common case of a non-shared * port. It * assumes that an application will never * clear these options after setting them. */ if (!(last->inp_socket->so_options & (SO_REUSEPORT | SO_REUSEADDR))) return EJUSTRETURN; /* caller stop */ return 0; }
/*---------------------------------------------------------------------------* * L2 -> L1: PH-DATA-REQUEST (D-Channel) * * NOTE: We may get called here from ihfc_hdlc_Dread or isac_hdlc_Dread * via the upper layers. *---------------------------------------------------------------------------*/ static int ihfc_ph_data_req(int unit, struct mbuf *m, int freeflag) { ihfc_sc_t *sc = &ihfc_softc[unit]; u_char chan = 0; HFC_VAR; if (!m) return 0; HFC_BEG; if(S_PHSTATE != 3) { NDBGL1(L1_PRIM, "L1 was not running: " "ihfc_ph_activate_req(unit = %d)!", unit); ihfc_ph_activate_req(unit); } /* "Allow" I-frames (-hp) */ if (freeflag == MBUF_DONTFREE) m = m_copypacket(m, M_DONTWAIT); if (!_IF_QFULL(&S_IFQUEUE) && m) { IF_ENQUEUE(&S_IFQUEUE, m); ihfc_B_start(unit, chan); /* (recycling) */ } else { NDBGL1(L1_ERROR, "No frame out (unit = %d)", unit); if (m) i4b_Dfreembuf(m); HFC_END; return 0; } if (S_INTR_ACTIVE) S_INT_S1 |= 0x04; HFC_END; return 1; }
/* * User Request. * up is socket * m is either * optional mbuf chain containing message * ioctl command (PRU_CONTROL) * nam is either * optional mbuf chain containing an address * ioctl data (PRU_CONTROL) * optionally protocol number (PRU_ATTACH) * message flags (PRU_RCVD) * ctl is either * optional mbuf chain containing socket options * optional interface pointer (PRU_CONTROL, PRU_PURGEIF) * l is pointer to process requesting action (if any) * * we are responsible for disposing of m and ctl if * they are mbuf chains */ int rfcomm_usrreq(struct socket *up, int req, struct mbuf *m, struct mbuf *nam, struct mbuf *ctl, struct lwp *l) { struct rfcomm_dlc *pcb = up->so_pcb; struct sockaddr_bt *sa; struct mbuf *m0; int err = 0; DPRINTFN(2, "%s\n", prurequests[req]); switch (req) { case PRU_CONTROL: return EPASSTHROUGH; case PRU_PURGEIF: return EOPNOTSUPP; case PRU_ATTACH: if (up->so_lock == NULL) { mutex_obj_hold(bt_lock); up->so_lock = bt_lock; solock(up); } KASSERT(solocked(up)); if (pcb != NULL) return EINVAL; /* * Since we have nothing to add, we attach the DLC * structure directly to our PCB pointer. */ err = soreserve(up, rfcomm_sendspace, rfcomm_recvspace); if (err) return err; err = rfcomm_attach((struct rfcomm_dlc **)&up->so_pcb, &rfcomm_proto, up); if (err) return err; err = rfcomm_rcvd(up->so_pcb, sbspace(&up->so_rcv)); if (err) { rfcomm_detach((struct rfcomm_dlc **)&up->so_pcb); return err; } return 0; } if (pcb == NULL) { err = EINVAL; goto release; } switch(req) { case PRU_DISCONNECT: soisdisconnecting(up); return rfcomm_disconnect(pcb, up->so_linger); case PRU_ABORT: rfcomm_disconnect(pcb, 0); soisdisconnected(up); /* fall through to */ case PRU_DETACH: return rfcomm_detach((struct rfcomm_dlc **)&up->so_pcb); case PRU_BIND: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); if (sa->bt_len != sizeof(struct sockaddr_bt)) return EINVAL; if (sa->bt_family != AF_BLUETOOTH) return EAFNOSUPPORT; return rfcomm_bind(pcb, sa); case PRU_CONNECT: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); if (sa->bt_len != sizeof(struct sockaddr_bt)) return EINVAL; if (sa->bt_family != AF_BLUETOOTH) return EAFNOSUPPORT; soisconnecting(up); return rfcomm_connect(pcb, sa); case PRU_PEERADDR: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return rfcomm_peeraddr(pcb, sa); case PRU_SOCKADDR: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return rfcomm_sockaddr(pcb, sa); case PRU_SHUTDOWN: socantsendmore(up); break; case PRU_SEND: KASSERT(m != NULL); if (ctl) /* no use for that */ m_freem(ctl); m0 = m_copypacket(m, M_DONTWAIT); if (m0 == NULL) return ENOMEM; sbappendstream(&up->so_snd, m); return rfcomm_send(pcb, m0); case PRU_SENSE: return 0; /* (no release) */ case PRU_RCVD: return rfcomm_rcvd(pcb, sbspace(&up->so_rcv)); case PRU_RCVOOB: return EOPNOTSUPP; /* (no release) */ case PRU_LISTEN: return rfcomm_listen(pcb); case PRU_ACCEPT: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return rfcomm_peeraddr(pcb, sa); case PRU_CONNECT2: case PRU_SENDOOB: case PRU_FASTTIMO: case PRU_SLOWTIMO: case PRU_PROTORCV: case PRU_PROTOSEND: err = EOPNOTSUPP; break; default: UNKNOWN(req); err = EOPNOTSUPP; break; } release: if (m) m_freem(m); if (ctl) m_freem(ctl); return err; }
/* * User Request. * up is socket * m is either * optional mbuf chain containing message * ioctl command (PRU_CONTROL) * nam is either * optional mbuf chain containing an address * ioctl data (PRU_CONTROL) * ctl is optional mbuf chain containing socket options * l is pointer to process requesting action (if any) * * we are responsible for disposing of m and ctl if * they are mbuf chains */ static int sco_usrreq(struct socket *up, int req, struct mbuf *m, struct mbuf *nam, struct mbuf *ctl, struct lwp *l) { struct sco_pcb *pcb = (struct sco_pcb *)up->so_pcb; struct sockaddr_bt *sa; struct mbuf *m0; int err = 0; DPRINTFN(2, "%s\n", prurequests[req]); KASSERT(req != PRU_ATTACH); KASSERT(req != PRU_DETACH); switch(req) { case PRU_CONTROL: return EOPNOTSUPP; case PRU_PURGEIF: return EOPNOTSUPP; } /* anything after here *requires* a pcb */ if (pcb == NULL) { err = EINVAL; goto release; } switch(req) { case PRU_DISCONNECT: soisdisconnecting(up); return sco_disconnect(pcb, up->so_linger); case PRU_ABORT: sco_disconnect(pcb, 0); soisdisconnected(up); sco_detach(up); return 0; case PRU_BIND: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); if (sa->bt_len != sizeof(struct sockaddr_bt)) return EINVAL; if (sa->bt_family != AF_BLUETOOTH) return EAFNOSUPPORT; return sco_bind(pcb, sa); case PRU_CONNECT: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); if (sa->bt_len != sizeof(struct sockaddr_bt)) return EINVAL; if (sa->bt_family != AF_BLUETOOTH) return EAFNOSUPPORT; soisconnecting(up); return sco_connect(pcb, sa); case PRU_PEERADDR: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return sco_peeraddr(pcb, sa); case PRU_SOCKADDR: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return sco_sockaddr(pcb, sa); case PRU_SHUTDOWN: socantsendmore(up); break; case PRU_SEND: KASSERT(m != NULL); if (m->m_pkthdr.len == 0) break; if (m->m_pkthdr.len > pcb->sp_mtu) { err = EMSGSIZE; break; } m0 = m_copypacket(m, M_DONTWAIT); if (m0 == NULL) { err = ENOMEM; break; } if (ctl) /* no use for that */ m_freem(ctl); sbappendrecord(&up->so_snd, m); return sco_send(pcb, m0); case PRU_SENSE: return 0; /* (no sense - Doh!) */ case PRU_RCVD: case PRU_RCVOOB: return EOPNOTSUPP; /* (no release) */ case PRU_LISTEN: return sco_listen(pcb); case PRU_ACCEPT: KASSERT(nam != NULL); sa = mtod(nam, struct sockaddr_bt *); nam->m_len = sizeof(struct sockaddr_bt); return sco_peeraddr(pcb, sa); case PRU_CONNECT2: case PRU_SENDOOB: case PRU_FASTTIMO: case PRU_SLOWTIMO: case PRU_PROTORCV: case PRU_PROTOSEND: err = EOPNOTSUPP; break; default: UNKNOWN(req); err = EOPNOTSUPP; break; } release: if (m) m_freem(m); if (ctl) m_freem(ctl); return err; }
int nfs_boot_sendrecv(struct socket *so, struct mbuf *nam, int (*sndproc)(struct mbuf *, void *, int), struct mbuf *snd, int (*rcvproc)(struct mbuf *, void *), struct mbuf **rcv, struct mbuf **from_p, void *context, struct lwp *lwp) { int error, rcvflg, timo, secs, waited; struct mbuf *m, *from; struct uio uio; /* Free at end if not null. */ from = NULL; /* * Send it, repeatedly, until a reply is received, * but delay each re-send by an increasing amount. * If the delay hits the maximum, start complaining. */ waited = timo = 0; send_again: waited += timo; if (waited >= TOTAL_TIMEOUT) return (ETIMEDOUT); /* Determine new timeout. */ if (timo < MAX_RESEND_DELAY) timo++; else printf("nfs_boot: timeout...\n"); if (sndproc) { error = (*sndproc)(snd, context, waited); if (error) goto out; } /* Send request (or re-send). */ m = m_copypacket(snd, M_WAIT); if (m == NULL) { error = ENOBUFS; goto out; } error = (*so->so_send)(so, nam, NULL, m, NULL, 0, lwp); if (error) { printf("nfs_boot: sosend: %d\n", error); goto out; } m = NULL; /* * Wait for up to timo seconds for a reply. * The socket receive timeout was set to 1 second. */ secs = timo; for (;;) { if (from) { m_freem(from); from = NULL; } if (m) { m_freem(m); m = NULL; } uio.uio_resid = 1 << 16; /* ??? */ rcvflg = 0; error = (*so->so_receive)(so, &from, &uio, &m, NULL, &rcvflg); if (error == EWOULDBLOCK) { if (--secs <= 0) goto send_again; continue; } if (error) goto out; #ifdef DIAGNOSTIC if (!m || !(m->m_flags & M_PKTHDR) || (1 << 16) - uio.uio_resid != m->m_pkthdr.len) panic("nfs_boot_sendrecv: return size"); #endif if ((*rcvproc)(m, context)) continue; if (rcv) *rcv = m; else m_freem(m); if (from_p) { *from_p = from; from = NULL; } break; } out: if (from) m_freem(from); return (error); }
/* * Start a transmit of one or more packets */ static int hn_start_locked(struct ifnet *ifp) { hn_softc_t *sc = ifp->if_softc; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); uint8_t *buf; netvsc_packet *packet; struct mbuf *m_head, *m; struct mbuf *mc_head = NULL; int i; int num_frags; int len; int xlen; int rppi_size; int retries = 0; int ret = 0; while (!IFQ_DRV_IS_EMPTY(&sc->hn_ifp->if_snd)) { IFQ_DRV_DEQUEUE(&sc->hn_ifp->if_snd, m_head); if (m_head == NULL) { break; } len = 0; num_frags = 0; xlen = 0; /* Walk the mbuf list computing total length and num frags */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { num_frags++; len += m->m_len; } } /* * Reserve the number of pages requested. Currently, * one page is reserved for the message in the RNDIS * filter packet */ num_frags += HV_RF_NUM_TX_RESERVED_PAGE_BUFS; /* If exceeds # page_buffers in netvsc_packet */ if (num_frags > NETVSC_PACKET_MAXPAGE) { m_freem(m); return (EINVAL); } rppi_size = 0; if (m_head->m_flags & M_VLANTAG) { rppi_size = sizeof(rndis_per_packet_info) + sizeof(ndis_8021q_info); } /* * Allocate a buffer with space for a netvsc packet plus a * number of reserved areas. First comes a (currently 16 * bytes, currently unused) reserved data area. Second is * the netvsc_packet, which includes (currently 4) page * buffers. Third (optional) is a rndis_per_packet_info * struct, but only if a VLAN tag should be inserted into the * Ethernet frame by the Hyper-V infrastructure. Fourth is * an area reserved for an rndis_filter_packet struct. * Changed malloc to M_NOWAIT to avoid sleep under spin lock. * No longer reserving extra space for page buffers, as they * are already part of the netvsc_packet. */ buf = malloc(HV_NV_PACKET_OFFSET_IN_BUF + sizeof(netvsc_packet) + rppi_size + sizeof(rndis_filter_packet), M_DEVBUF, M_ZERO | M_NOWAIT); if (buf == NULL) { m_freem(m); return (ENOMEM); } packet = (netvsc_packet *)(buf + HV_NV_PACKET_OFFSET_IN_BUF); *(vm_offset_t *)buf = HV_NV_SC_PTR_OFFSET_IN_BUF; /* * extension points to the area reserved for the * rndis_filter_packet, which is placed just after * the netvsc_packet (and rppi struct, if present; * length is updated later). */ packet->extension = packet + 1; /* Set up the rndis header */ packet->page_buf_count = num_frags; /* Initialize it from the mbuf */ packet->tot_data_buf_len = len; /* * If the Hyper-V infrastructure needs to embed a VLAN tag, * initialize netvsc_packet and rppi struct values as needed. */ if (rppi_size) { /* Lower layers need the VLAN TCI */ packet->vlan_tci = m_head->m_pkthdr.ether_vtag; } /* * Fill the page buffers with mbuf info starting at index * HV_RF_NUM_TX_RESERVED_PAGE_BUFS. */ i = HV_RF_NUM_TX_RESERVED_PAGE_BUFS; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len) { vm_offset_t paddr = vtophys(mtod(m, vm_offset_t)); packet->page_buffers[i].pfn = paddr >> PAGE_SHIFT; packet->page_buffers[i].offset = paddr & (PAGE_SIZE - 1); packet->page_buffers[i].length = m->m_len; i++; } } /* * If bpf, copy the mbuf chain. This is less expensive than * it appears; the mbuf clusters are not copied, only their * reference counts are incremented. * Needed to avoid a race condition where the completion * callback is invoked, freeing the mbuf chain, before the * bpf_mtap code has a chance to run. */ if (ifp->if_bpf) { mc_head = m_copypacket(m_head, M_DONTWAIT); } retry_send: /* Set the completion routine */ packet->compl.send.on_send_completion = netvsc_xmit_completion; packet->compl.send.send_completion_context = packet; packet->compl.send.send_completion_tid = (uint64_t)m_head; /* Removed critical_enter(), does not appear necessary */ ret = hv_rf_on_send(device_ctx, packet); if (ret == 0) { ifp->if_opackets++; /* if bpf && mc_head, call bpf_mtap code */ if (mc_head) { ETHER_BPF_MTAP(ifp, mc_head); } } else { retries++; if (retries < 4) { goto retry_send; } IF_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* * Null the mbuf pointer so the completion function * does not free the mbuf chain. We just pushed the * mbuf chain back on the if_snd queue. */ packet->compl.send.send_completion_tid = 0; /* * Release the resources since we will not get any * send completion */ netvsc_xmit_completion(packet); } /* if bpf && mc_head, free the mbuf chain copy */ if (mc_head) { m_freem(mc_head); } }