/* * Start output on the pflog interface. */ void pflogstart(struct ifnet *ifp) { struct mbuf *m; #ifndef __FreeBSD__ int s; #endif for (;;) { #ifdef __FreeBSD__ IF_LOCK(&ifp->if_snd); _IF_DROP(&ifp->if_snd); _IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { IF_UNLOCK(&ifp->if_snd); return; } else m_freem(m); IF_UNLOCK(&ifp->if_snd); #else s = splimp(); IF_DROP(&ifp->if_snd); IF_DEQUEUE(&ifp->if_snd, m); splx(s); if (m == NULL) return; else m_freem(m); #endif } }
static void bt3c_forward(node_p node, hook_p hook, void *arg1, int arg2) { bt3c_softc_p sc = (bt3c_softc_p) NG_NODE_PRIVATE(node); struct mbuf *m = NULL; int error; if (sc == NULL) return; if (sc->hook != NULL && NG_HOOK_IS_VALID(sc->hook)) { for (;;) { IF_DEQUEUE(&sc->inq, m); if (m == NULL) break; NG_SEND_DATA_ONLY(error, sc->hook, m); if (error != 0) NG_BT3C_STAT_IERROR(sc->stat); } } else { IF_LOCK(&sc->inq); for (;;) { _IF_DEQUEUE(&sc->inq, m); if (m == NULL) break; NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(m); } IF_UNLOCK(&sc->inq); } } /* bt3c_forward */
void ieee80211_flush_ifq(struct ifqueue* ifq, struct ieee80211vap* vap) { struct ieee80211_node* ni; struct mbuf* m; struct mbuf** mprev; IF_LOCK(ifq); mprev = &ifq->ifq_head; while ((m = *mprev) != NULL) { ni = (struct ieee80211_node*)m->m_pkthdr.rcvif; if (ni != NULL && ni->ni_vap == vap) { *mprev = m->m_nextpkt; // remove from list ifq->ifq_len--; m_freem(m); ieee80211_free_node(ni); // reclaim ref } else mprev = &m->m_nextpkt; } // recalculate tail ptr m = ifq->ifq_head; for (; m != NULL && m->m_nextpkt != NULL; m = m->m_nextpkt); ifq->ifq_tail = m; IF_UNLOCK(ifq); }
/*---------------------------------------------------------------------------* * read from trace device *---------------------------------------------------------------------------*/ static int i4btrcread(dev_t dev, struct uio * uio, int ioflag) { struct mbuf *m; int x; int error = 0; int unit = minor(dev); if(!(device_state[unit] & ST_ISOPEN)) return(EIO); x = SPLI4B(); IF_LOCK(&trace_queue[unit]); while(IF_QEMPTY(&trace_queue[unit]) && (device_state[unit] & ST_ISOPEN)) { device_state[unit] |= ST_WAITDATA; if((error = msleep((caddr_t) &trace_queue[unit], &trace_queue[unit].ifq_mtx, TTIPRI | PCATCH, "bitrc", 0 )) != 0) { device_state[unit] &= ~ST_WAITDATA; IF_UNLOCK(&trace_queue[unit]); splx(x); return(error); } } _IF_DEQUEUE(&trace_queue[unit], m); IF_UNLOCK(&trace_queue[unit]); if(m && m->m_len) error = uiomove(m->m_data, m->m_len, uio); else error = EIO; if(m) i4b_Bfreembuf(m); splx(x); return(error); }
/* * Accept data from the hook and queue it for output. */ Static int ng_udbp_rcvdata(hook_p hook, item_p item) { const udbp_p sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error; struct ifqueue *xmitq_p; int s; struct mbuf *m; meta_p meta; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); /* * Now queue the data for when it can be sent */ if (meta && meta->priority > 0) { xmitq_p = (&sc->xmitq_hipri); } else { xmitq_p = (&sc->xmitq); } s = splusb(); IF_LOCK(xmitq_p); if (_IF_QFULL(xmitq_p)) { _IF_DROP(xmitq_p); IF_UNLOCK(xmitq_p); splx(s); error = ENOBUFS; goto bad; } _IF_ENQUEUE(xmitq_p, m); IF_UNLOCK(xmitq_p); if (!(sc->flags & OUT_BUSY)) udbp_setup_out_transfer(sc); splx(s); return (0); bad: /* * It was an error case. * check if we need to free the mbuf, and then return the error */ NG_FREE_M(m); NG_FREE_META(meta); return (error); }
/** * Periodic timer tick for slow management operations * * @param arg Device to check */ static void cvm_do_timer(void *arg) { static int port; static int updated; if (port < CVMX_PIP_NUM_INPUT_PORTS) { if (cvm_oct_device[port]) { int queues_per_port; int qos; cvm_oct_private_t *priv = (cvm_oct_private_t *)cvm_oct_device[port]->if_softc; cvm_oct_common_poll(priv->ifp); if (priv->need_link_update) { updated++; taskqueue_enqueue(cvm_oct_link_taskq, &priv->link_task); } queues_per_port = cvmx_pko_get_num_queues(port); /* Drain any pending packets in the free list */ for (qos = 0; qos < queues_per_port; qos++) { if (_IF_QLEN(&priv->tx_free_queue[qos]) > 0) { IF_LOCK(&priv->tx_free_queue[qos]); while (_IF_QLEN(&priv->tx_free_queue[qos]) > cvmx_fau_fetch_and_add32(priv->fau+qos*4, 0)) { struct mbuf *m; _IF_DEQUEUE(&priv->tx_free_queue[qos], m); m_freem(m); } IF_UNLOCK(&priv->tx_free_queue[qos]); /* * XXX locking! */ priv->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } } } port++; /* Poll the next port in a 50th of a second. This spreads the polling of ports out a little bit */ callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL); } else { port = 0; /* If any updates were made in this run, continue iterating at * 1/50th of a second, so that if a link has merely gone down * temporarily (e.g. because of interface reinitialization) it * will not be forced to stay down for an entire second. */ if (updated > 0) { updated = 0; callout_reset(&cvm_oct_poll_timer, hz / 50, cvm_do_timer, NULL); } else { /* All ports have been polled. Start the next iteration through the ports in one second */ callout_reset(&cvm_oct_poll_timer, hz, cvm_do_timer, NULL); } } }
/* * Start output on the pflog interface. */ static void pflogstart(struct ifnet *ifp) { struct mbuf *m; for (;;) { IF_LOCK(&ifp->if_snd); _IF_DROP(&ifp->if_snd); _IF_DEQUEUE(&ifp->if_snd, m); IF_UNLOCK(&ifp->if_snd); if (m == NULL) return; else m_freem(m); } }
static int ng_bt3c_rcvdata(hook_p hook, item_p item) { bt3c_softc_p sc = (bt3c_softc_p)NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m = NULL; int error = 0; if (sc == NULL) { error = EHOSTDOWN; goto out; } if (hook != sc->hook) { error = EINVAL; goto out; } NGI_GET_M(item, m); IF_LOCK(&sc->outq); if (_IF_QFULL(&sc->outq)) { NG_BT3C_ERR(sc->dev, "Outgoing queue is full. Dropping mbuf, len=%d\n", m->m_pkthdr.len); _IF_DROP(&sc->outq); NG_BT3C_STAT_OERROR(sc->stat); NG_FREE_M(m); } else _IF_ENQUEUE(&sc->outq, m); IF_UNLOCK(&sc->outq); error = ng_send_fn(sc->node, NULL, bt3c_send, NULL, 0 /* new send */); out: NG_FREE_ITEM(item); return (error); } /* ng_bt3c_rcvdata */
/*---------------------------------------------------------------------------* * get_trace_data_from_l1() * ------------------------ * is called from layer 1, adds timestamp to trace data and puts * it into a queue, from which it can be read from the i4btrc * device. The unit number in the trace header selects the minor * device's queue the data is put into. *---------------------------------------------------------------------------*/ int get_trace_data_from_l1(i4b_trace_hdr_t *hdr, int len, char *buf) { struct mbuf *m; int x; int unit; int trunc = 0; int totlen = len + sizeof(i4b_trace_hdr_t); /* * for telephony (or better non-HDLC HSCX mode) we get * (MCLBYTE + sizeof(i4b_trace_hdr_t)) length packets * to put into the queue to userland. because of this * we detect this situation, strip the length to MCLBYTES * max size, and infor the userland program of this fact * by putting the no of truncated bytes into hdr->trunc. */ if(totlen > MCLBYTES) { trunc = 1; hdr->trunc = totlen - MCLBYTES; totlen = MCLBYTES; } else { hdr->trunc = 0; } /* set length of trace record */ hdr->length = totlen; /* check valid unit no */ if((unit = hdr->unit) > NI4BTRC) { printf("i4b_trace: get_trace_data_from_l1 - unit > NI4BTRC!\n"); return(0); } /* get mbuf */ if(!(m = i4b_Bgetmbuf(totlen))) { printf("i4b_trace: get_trace_data_from_l1 - i4b_getmbuf() failed!\n"); return(0); } /* check if we are in analyzemode */ if(analyzemode && (unit == rxunit || unit == txunit)) { if(unit == rxunit) hdr->dir = FROM_NT; else hdr->dir = FROM_TE; unit = outunit; } IF_LOCK(&trace_queue[unit]); if(_IF_QFULL(&trace_queue[unit])) { struct mbuf *m1; x = SPLI4B(); _IF_DEQUEUE(&trace_queue[unit], m1); splx(x); i4b_Bfreembuf(m1); } /* copy trace header */ memcpy(m->m_data, hdr, sizeof(i4b_trace_hdr_t)); /* copy trace data */ if(trunc) memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, totlen-sizeof(i4b_trace_hdr_t)); else memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, len); x = SPLI4B(); _IF_ENQUEUE(&trace_queue[unit], m); IF_UNLOCK(&trace_queue[unit]); if(device_state[unit] & ST_WAITDATA) { device_state[unit] &= ~ST_WAITDATA; wakeup((caddr_t) &trace_queue[unit]); } splx(x); return(1); }
static void bt3c_receive(bt3c_softc_p sc) { u_int16_t i, count, c; /* Receive data from the card */ bt3c_read(sc, 0x7006, count); NG_BT3C_INFO(sc->dev, "The card has %d characters\n", count); bt3c_set_address(sc, 0x7480); for (i = 0; i < count; i++) { /* Allocate new mbuf if needed */ if (sc->m == NULL) { sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; MGETHDR(sc->m, M_NOWAIT, MT_DATA); if (sc->m == NULL) { NG_BT3C_ERR(sc->dev, "Could not get mbuf\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } if (!(MCLGET(sc->m, M_NOWAIT))) { NG_FREE_M(sc->m); NG_BT3C_ERR(sc->dev, "Could not get cluster\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } sc->m->m_len = sc->m->m_pkthdr.len = 0; } /* Read and append character to mbuf */ bt3c_read_data(sc, c); if (sc->m->m_pkthdr.len >= MCLBYTES) { NG_BT3C_ERR(sc->dev, "Oversized frame\n"); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; /* XXX lost of sync */ } mtod(sc->m, u_int8_t *)[sc->m->m_len ++] = (u_int8_t) c; sc->m->m_pkthdr.len ++; NG_BT3C_INFO(sc->dev, "Got char %#x, want=%d, got=%d\n", c, sc->want, sc->m->m_pkthdr.len); if (sc->m->m_pkthdr.len < sc->want) continue; /* wait for more */ switch (sc->state) { /* Got packet indicator */ case NG_BT3C_W4_PKT_IND: NG_BT3C_INFO(sc->dev, "Got packet indicator %#x\n", *mtod(sc->m, u_int8_t *)); sc->state = NG_BT3C_W4_PKT_HDR; /* * Since packet indicator included in the packet * header just set sc->want to sizeof(packet header). */ switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: sc->want = sizeof(ng_hci_acldata_pkt_t); break; case NG_HCI_SCO_DATA_PKT: sc->want = sizeof(ng_hci_scodata_pkt_t); break; case NG_HCI_EVENT_PKT: sc->want = sizeof(ng_hci_event_pkt_t); break; default: NG_BT3C_ERR(sc->dev, "Ignoring unknown packet type=%#x\n", *mtod(sc->m, u_int8_t *)); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; } break; /* Got packet header */ case NG_BT3C_W4_PKT_HDR: sc->state = NG_BT3C_W4_PKT_DATA; switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: c = le16toh(mtod(sc->m, ng_hci_acldata_pkt_t *)->length); break; case NG_HCI_SCO_DATA_PKT: c = mtod(sc->m, ng_hci_scodata_pkt_t*)->length; break; case NG_HCI_EVENT_PKT: c = mtod(sc->m, ng_hci_event_pkt_t *)->length; break; default: KASSERT(0, ("Invalid packet type=%#x\n", *mtod(sc->m, u_int8_t *))); break; } NG_BT3C_INFO(sc->dev, "Got packet header, packet type=%#x, got so far %d, payload size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len, c); if (c > 0) { sc->want += c; break; } /* else FALLTHROUGH and deliver frame */ /* XXX is this true? should we deliver empty frame? */ /* Got packet data */ case NG_BT3C_W4_PKT_DATA: NG_BT3C_INFO(sc->dev, "Got full packet, packet type=%#x, packet size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len); NG_BT3C_STAT_BYTES_RECV(sc->stat, sc->m->m_pkthdr.len); NG_BT3C_STAT_PCKTS_RECV(sc->stat); IF_LOCK(&sc->inq); if (_IF_QFULL(&sc->inq)) { NG_BT3C_ERR(sc->dev, "Incoming queue is full. Dropping mbuf, len=%d\n", sc->m->m_pkthdr.len); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); } else { _IF_ENQUEUE(&sc->inq, sc->m); sc->m = NULL; } IF_UNLOCK(&sc->inq); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; default: KASSERT(0, ("Invalid node state=%d", sc->state)); break; } } bt3c_write(sc, 0x7006, 0x0000); } /* bt3c_receive */
/** * Packet transmit * * @param m Packet to send * @param dev Device info structure * @return Always returns zero */ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp) { cvmx_pko_command_word0_t pko_command; cvmx_buf_ptr_t hw_buffer; int dropped; int qos; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int32_t in_use; int32_t buffers_to_free; cvmx_wqe_t *work; /* Prefetch the private data structure. It is larger that one cache line */ CVMX_PREFETCH(priv, 0); /* Start off assuming no drop */ dropped = 0; /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely remove "qos" in the event neither interface supports multiple queues per port */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_MBUF_QOS(m); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; /* The CN3XXX series of parts has an errata (GMX-401) which causes the GMX block to hang if a collision occurs towards the end of a <68 byte packet. As a workaround for this, we pad packets to be 68 bytes whenever we are in half duplex mode. We don't handle the case of having a small packet but no room to add the padding. The kernel should always give us at least a cache line */ if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { cvmx_gmxx_prtx_cfg_t gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { static uint8_t pad[64]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to padd small packet.", __func__); } } } #ifdef OCTEON_VENDOR_RADISYS /* * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes. */ if (__predict_false(m->m_pkthdr.len < 60) && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) { static uint8_t pad[60]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to pad small packet.", __func__); } #endif /* * If the packet is not fragmented. */ if (m->m_pkthdr.len == m->m_len) { /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = m->m_len; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */ work = NULL; } else { struct mbuf *n; unsigned segs; uint64_t *gp; /* * The packet is fragmented, we need to send a list of segments * in memory we borrow from the WQE pool. */ work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (work == NULL) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return 1; } segs = 0; gp = (uint64_t *)work; for (n = m; n != NULL; n = n->m_next) { if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t)) panic("%s: too many segments in packet; call m_collapse().", __func__); /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */ hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = n->m_len; *gp++ = hw_buffer.u64; segs++; } /* Build the PKO buffer gather list pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(work); hw_buffer.s.pool = CVMX_FPA_WQE_POOL; hw_buffer.s.size = segs; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = segs; pko_command.s.gather = 1; pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */ } /* Finish building the PKO command */ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.reg0 = priv->fau+qos*4; pko_command.s.total_bytes = m->m_pkthdr.len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; /* Check if we can use the hardware checksumming */ if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1; } /* * XXX * Could use a different free queue (and different FAU address) per * core instead of per QoS, to reduce contention here. */ IF_LOCK(&priv->tx_free_queue[qos]); /* Get the number of mbufs in use by the hardware */ in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE); /* Drop this packet if we have too many already queued to the HW */ if (_IF_QFULL(&priv->tx_free_queue[qos])) { dropped = 1; } /* Send the packet to the output queue */ else if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) { DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp)); dropped = 1; } if (__predict_false(dropped)) { m_freem(m); cvmx_fau_atomic_add32(priv->fau+qos*4, -1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* Put this packet on the queue to be freed later */ _IF_ENQUEUE(&priv->tx_free_queue[qos], m); /* Pass it to any BPF listeners. */ ETHER_BPF_MTAP(ifp, m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); } /* Free mbufs not in use by the hardware */ if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { _IF_DEQUEUE(&priv->tx_free_queue[qos], m); m_freem(m); } } IF_UNLOCK(&priv->tx_free_queue[qos]); return dropped; }