int iavc_send(capi_softc_t *capi_sc, struct mbuf *m) { iavc_softc_t *sc = (iavc_softc_t*) capi_sc->ctx; if (sc->sc_state != IAVC_UP) { printf("iavc%d: attempt to send before device up\n", sc->sc_unit); if (m->m_next) i4b_Bfreembuf(m->m_next); i4b_Dfreembuf(m); return (ENXIO); } if (_IF_QFULL(&sc->sc_txq)) { #if defined (__FreeBSD__) && __FreeBSD__ > 4 _IF_DROP(&sc->sc_txq); #else IF_DROP(&sc->sc_txq); #endif printf("iavc%d: tx overflow, message dropped\n", sc->sc_unit); if (m->m_next) i4b_Bfreembuf(m->m_next); i4b_Dfreembuf(m); } else { _IF_ENQUEUE(&sc->sc_txq, m); iavc_start_tx(sc); } return 0; }
/* * Accept data from the hook and queue it for output. */ Static int ng_udbp_rcvdata(hook_p hook, item_p item) { const udbp_p sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error; struct ifqueue *xmitq_p; int s; struct mbuf *m; meta_p meta; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); /* * Now queue the data for when it can be sent */ if (meta && meta->priority > 0) { xmitq_p = (&sc->xmitq_hipri); } else { xmitq_p = (&sc->xmitq); } s = splusb(); IF_LOCK(xmitq_p); if (_IF_QFULL(xmitq_p)) { _IF_DROP(xmitq_p); IF_UNLOCK(xmitq_p); splx(s); error = ENOBUFS; goto bad; } _IF_ENQUEUE(xmitq_p, m); IF_UNLOCK(xmitq_p); if (!(sc->flags & OUT_BUSY)) udbp_setup_out_transfer(sc); splx(s); return (0); bad: /* * It was an error case. * check if we need to free the mbuf, and then return the error */ NG_FREE_M(m); NG_FREE_META(meta); return (error); }
/*---------------------------------------------------------------------------* * L2 -> L1: PH-DATA-REQUEST (D-Channel) * * NOTE: We may get called here from ihfc_hdlc_Dread or isac_hdlc_Dread * via the upper layers. *---------------------------------------------------------------------------*/ static int ihfc_ph_data_req(int unit, struct mbuf *m, int freeflag) { ihfc_sc_t *sc = &ihfc_softc[unit]; u_char chan = 0; HFC_VAR; if (!m) return 0; HFC_BEG; if(S_PHSTATE != 3) { NDBGL1(L1_PRIM, "L1 was not running: " "ihfc_ph_activate_req(unit = %d)!", unit); ihfc_ph_activate_req(unit); } /* "Allow" I-frames (-hp) */ if (freeflag == MBUF_DONTFREE) m = m_copypacket(m, M_DONTWAIT); if (!_IF_QFULL(&S_IFQUEUE) && m) { IF_ENQUEUE(&S_IFQUEUE, m); ihfc_B_start(unit, chan); /* (recycling) */ } else { NDBGL1(L1_ERROR, "No frame out (unit = %d)", unit); if (m) i4b_Dfreembuf(m); HFC_END; return 0; } if (S_INTR_ACTIVE) S_INT_S1 |= 0x04; HFC_END; return 1; }
/*---------------------------------------------------------------------------* * routine INVOKE RETRANSMISSION (Q.921 03/93 page 84) *---------------------------------------------------------------------------*/ void i4b_invoke_retransmission(l2_softc_t *l2sc, int nr) { CRIT_VAR; CRIT_BEG; NDBGL2(L2_ERROR, "nr = %d", nr ); while(l2sc->vs != nr) { NDBGL2(L2_ERROR, "nr(%d) != vs(%d)", nr, l2sc->vs); M128DEC(l2sc->vs); /* XXXXXXXXXXXXXXXXX */ if((l2sc->ua_num != UA_EMPTY) && (l2sc->vs == l2sc->ua_num)) { if(_IF_QFULL(&l2sc->i_queue)) { NDBGL2(L2_ERROR, "ERROR, I-queue full!"); } else { IF_ENQUEUE(&l2sc->i_queue, l2sc->ua_frame); l2sc->ua_num = UA_EMPTY; } } else { NDBGL2(L2_ERROR, "ERROR, l2sc->vs = %d, l2sc->ua_num = %d ",l2sc->vs, l2sc->ua_num); } /* XXXXXXXXXXXXXXXXX */ i4b_i_frame_queued_up(l2sc); } CRIT_END; }
static int ng_bt3c_rcvdata(hook_p hook, item_p item) { bt3c_softc_p sc = (bt3c_softc_p)NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); struct mbuf *m = NULL; int error = 0; if (sc == NULL) { error = EHOSTDOWN; goto out; } if (hook != sc->hook) { error = EINVAL; goto out; } NGI_GET_M(item, m); IF_LOCK(&sc->outq); if (_IF_QFULL(&sc->outq)) { NG_BT3C_ERR(sc->dev, "Outgoing queue is full. Dropping mbuf, len=%d\n", m->m_pkthdr.len); _IF_DROP(&sc->outq); NG_BT3C_STAT_OERROR(sc->stat); NG_FREE_M(m); } else _IF_ENQUEUE(&sc->outq, m); IF_UNLOCK(&sc->outq); error = ng_send_fn(sc->node, NULL, bt3c_send, NULL, 0 /* new send */); out: NG_FREE_ITEM(item); return (error); } /* ng_bt3c_rcvdata */
/*---------------------------------------------------------------------------* * get_trace_data_from_l1() * ------------------------ * is called from layer 1, adds timestamp to trace data and puts * it into a queue, from which it can be read from the i4btrc * device. The unit number in the trace header selects the minor * device's queue the data is put into. *---------------------------------------------------------------------------*/ int get_trace_data_from_l1(i4b_trace_hdr_t *hdr, int len, char *buf) { struct mbuf *m; int x; int unit; int trunc = 0; int totlen = len + sizeof(i4b_trace_hdr_t); /* * for telephony (or better non-HDLC HSCX mode) we get * (MCLBYTE + sizeof(i4b_trace_hdr_t)) length packets * to put into the queue to userland. because of this * we detect this situation, strip the length to MCLBYTES * max size, and infor the userland program of this fact * by putting the no of truncated bytes into hdr->trunc. */ if(totlen > MCLBYTES) { trunc = 1; hdr->trunc = totlen - MCLBYTES; totlen = MCLBYTES; } else { hdr->trunc = 0; } /* set length of trace record */ hdr->length = totlen; /* check valid unit no */ if((unit = hdr->unit) > NI4BTRC) { printf("i4b_trace: get_trace_data_from_l1 - unit > NI4BTRC!\n"); return(0); } /* get mbuf */ if(!(m = i4b_Bgetmbuf(totlen))) { printf("i4b_trace: get_trace_data_from_l1 - i4b_getmbuf() failed!\n"); return(0); } /* check if we are in analyzemode */ if(analyzemode && (unit == rxunit || unit == txunit)) { if(unit == rxunit) hdr->dir = FROM_NT; else hdr->dir = FROM_TE; unit = outunit; } IF_LOCK(&trace_queue[unit]); if(_IF_QFULL(&trace_queue[unit])) { struct mbuf *m1; x = SPLI4B(); _IF_DEQUEUE(&trace_queue[unit], m1); splx(x); i4b_Bfreembuf(m1); } /* copy trace header */ memcpy(m->m_data, hdr, sizeof(i4b_trace_hdr_t)); /* copy trace data */ if(trunc) memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, totlen-sizeof(i4b_trace_hdr_t)); else memcpy(&m->m_data[sizeof(i4b_trace_hdr_t)], buf, len); x = SPLI4B(); _IF_ENQUEUE(&trace_queue[unit], m); IF_UNLOCK(&trace_queue[unit]); if(device_state[unit] & ST_WAITDATA) { device_state[unit] &= ~ST_WAITDATA; wakeup((caddr_t) &trace_queue[unit]); } splx(x); return(1); }
static void bt3c_receive(bt3c_softc_p sc) { u_int16_t i, count, c; /* Receive data from the card */ bt3c_read(sc, 0x7006, count); NG_BT3C_INFO(sc->dev, "The card has %d characters\n", count); bt3c_set_address(sc, 0x7480); for (i = 0; i < count; i++) { /* Allocate new mbuf if needed */ if (sc->m == NULL) { sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; MGETHDR(sc->m, M_NOWAIT, MT_DATA); if (sc->m == NULL) { NG_BT3C_ERR(sc->dev, "Could not get mbuf\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } if (!(MCLGET(sc->m, M_NOWAIT))) { NG_FREE_M(sc->m); NG_BT3C_ERR(sc->dev, "Could not get cluster\n"); NG_BT3C_STAT_IERROR(sc->stat); break; /* XXX lost of sync */ } sc->m->m_len = sc->m->m_pkthdr.len = 0; } /* Read and append character to mbuf */ bt3c_read_data(sc, c); if (sc->m->m_pkthdr.len >= MCLBYTES) { NG_BT3C_ERR(sc->dev, "Oversized frame\n"); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; /* XXX lost of sync */ } mtod(sc->m, u_int8_t *)[sc->m->m_len ++] = (u_int8_t) c; sc->m->m_pkthdr.len ++; NG_BT3C_INFO(sc->dev, "Got char %#x, want=%d, got=%d\n", c, sc->want, sc->m->m_pkthdr.len); if (sc->m->m_pkthdr.len < sc->want) continue; /* wait for more */ switch (sc->state) { /* Got packet indicator */ case NG_BT3C_W4_PKT_IND: NG_BT3C_INFO(sc->dev, "Got packet indicator %#x\n", *mtod(sc->m, u_int8_t *)); sc->state = NG_BT3C_W4_PKT_HDR; /* * Since packet indicator included in the packet * header just set sc->want to sizeof(packet header). */ switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: sc->want = sizeof(ng_hci_acldata_pkt_t); break; case NG_HCI_SCO_DATA_PKT: sc->want = sizeof(ng_hci_scodata_pkt_t); break; case NG_HCI_EVENT_PKT: sc->want = sizeof(ng_hci_event_pkt_t); break; default: NG_BT3C_ERR(sc->dev, "Ignoring unknown packet type=%#x\n", *mtod(sc->m, u_int8_t *)); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; } break; /* Got packet header */ case NG_BT3C_W4_PKT_HDR: sc->state = NG_BT3C_W4_PKT_DATA; switch (*mtod(sc->m, u_int8_t *)) { case NG_HCI_ACL_DATA_PKT: c = le16toh(mtod(sc->m, ng_hci_acldata_pkt_t *)->length); break; case NG_HCI_SCO_DATA_PKT: c = mtod(sc->m, ng_hci_scodata_pkt_t*)->length; break; case NG_HCI_EVENT_PKT: c = mtod(sc->m, ng_hci_event_pkt_t *)->length; break; default: KASSERT(0, ("Invalid packet type=%#x\n", *mtod(sc->m, u_int8_t *))); break; } NG_BT3C_INFO(sc->dev, "Got packet header, packet type=%#x, got so far %d, payload size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len, c); if (c > 0) { sc->want += c; break; } /* else FALLTHROUGH and deliver frame */ /* XXX is this true? should we deliver empty frame? */ /* Got packet data */ case NG_BT3C_W4_PKT_DATA: NG_BT3C_INFO(sc->dev, "Got full packet, packet type=%#x, packet size=%d\n", *mtod(sc->m, u_int8_t *), sc->m->m_pkthdr.len); NG_BT3C_STAT_BYTES_RECV(sc->stat, sc->m->m_pkthdr.len); NG_BT3C_STAT_PCKTS_RECV(sc->stat); IF_LOCK(&sc->inq); if (_IF_QFULL(&sc->inq)) { NG_BT3C_ERR(sc->dev, "Incoming queue is full. Dropping mbuf, len=%d\n", sc->m->m_pkthdr.len); NG_BT3C_STAT_IERROR(sc->stat); NG_FREE_M(sc->m); } else { _IF_ENQUEUE(&sc->inq, sc->m); sc->m = NULL; } IF_UNLOCK(&sc->inq); sc->state = NG_BT3C_W4_PKT_IND; sc->want = 1; break; default: KASSERT(0, ("Invalid node state=%d", sc->state)); break; } } bt3c_write(sc, 0x7006, 0x0000); } /* bt3c_receive */
static int gre_input2(struct mbuf *m ,int hlen, u_char proto) { struct greip *gip = mtod(m, struct greip *); int s; struct ifqueue *ifq; struct gre_softc *sc; u_short flags; if ((sc = gre_lookup(m, proto)) == NULL) { /* No matching tunnel or tunnel is down. */ return (0); } sc->sc_if.if_ipackets++; sc->sc_if.if_ibytes += m->m_pkthdr.len; switch (proto) { case IPPROTO_GRE: hlen += sizeof (struct gre_h); /* process GRE flags as packet can be of variable len */ flags = ntohs(gip->gi_flags); /* Checksum & Offset are present */ if ((flags & GRE_CP) | (flags & GRE_RP)) hlen += 4; /* We don't support routing fields (variable length) */ if (flags & GRE_RP) return(0); if (flags & GRE_KP) hlen += 4; if (flags & GRE_SP) hlen +=4; switch (ntohs(gip->gi_ptype)) { /* ethertypes */ case ETHERTYPE_IP: /* shouldn't need a schednetisr(), as */ case WCCP_PROTOCOL_TYPE: /* we are in ip_input */ ifq = &ipintrq; break; #ifdef NS case ETHERTYPE_NS: ifq = &nsintrq; schednetisr(NETISR_NS); break; #endif #ifdef NETATALK case ETHERTYPE_ATALK: ifq = &atintrq1; schednetisr(NETISR_ATALK); break; #endif case ETHERTYPE_IPV6: /* FALLTHROUGH */ default: /* others not yet supported */ return(0); } break; default: /* others not yet supported */ return(0); } m->m_data += hlen; m->m_len -= hlen; m->m_pkthdr.len -= hlen; if (sc->sc_if.if_bpf) { struct mbuf m0; u_int32_t af = AF_INET; m0.m_next = m; m0.m_len = 4; m0.m_data = (char *)⁡ BPF_MTAP(&(sc->sc_if), &m0); } m->m_pkthdr.rcvif = &sc->sc_if; s = splnet(); /* possible */ if (_IF_QFULL(ifq)) { _IF_DROP(ifq); m_freem(m); } else { IF_ENQUEUE(ifq,m); } splx(s); return(1); /* packet is done, no further processing needed */ }
/** * Packet transmit * * @param m Packet to send * @param dev Device info structure * @return Always returns zero */ int cvm_oct_xmit(struct mbuf *m, struct ifnet *ifp) { cvmx_pko_command_word0_t pko_command; cvmx_buf_ptr_t hw_buffer; int dropped; int qos; cvm_oct_private_t *priv = (cvm_oct_private_t *)ifp->if_softc; int32_t in_use; int32_t buffers_to_free; cvmx_wqe_t *work; /* Prefetch the private data structure. It is larger that one cache line */ CVMX_PREFETCH(priv, 0); /* Start off assuming no drop */ dropped = 0; /* The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to completely remove "qos" in the event neither interface supports multiple queues per port */ if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) || (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) { qos = GET_MBUF_QOS(m); if (qos <= 0) qos = 0; else if (qos >= cvmx_pko_get_num_queues(priv->port)) qos = 0; } else qos = 0; /* The CN3XXX series of parts has an errata (GMX-401) which causes the GMX block to hang if a collision occurs towards the end of a <68 byte packet. As a workaround for this, we pad packets to be 68 bytes whenever we are in half duplex mode. We don't handle the case of having a small packet but no room to add the padding. The kernel should always give us at least a cache line */ if (__predict_false(m->m_pkthdr.len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) { cvmx_gmxx_prtx_cfg_t gmx_prt_cfg; int interface = INTERFACE(priv->port); int index = INDEX(priv->port); if (interface < 2) { /* We only need to pad packet in half duplex mode */ gmx_prt_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface)); if (gmx_prt_cfg.s.duplex == 0) { static uint8_t pad[64]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to padd small packet.", __func__); } } } #ifdef OCTEON_VENDOR_RADISYS /* * The RSYS4GBE will hang if asked to transmit a packet less than 60 bytes. */ if (__predict_false(m->m_pkthdr.len < 60) && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CUST_RADISYS_RSYS4GBE) { static uint8_t pad[60]; if (!m_append(m, sizeof pad - m->m_pkthdr.len, pad)) printf("%s: unable to pad small packet.", __func__); } #endif /* * If the packet is not fragmented. */ if (m->m_pkthdr.len == m->m_len) { /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(m->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = m->m_len; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = 1; pko_command.s.dontfree = 1; /* Do not put this buffer into the FPA. */ work = NULL; } else { struct mbuf *n; unsigned segs; uint64_t *gp; /* * The packet is fragmented, we need to send a list of segments * in memory we borrow from the WQE pool. */ work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL); if (work == NULL) { m_freem(m); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); return 1; } segs = 0; gp = (uint64_t *)work; for (n = m; n != NULL; n = n->m_next) { if (segs == CVMX_FPA_WQE_POOL_SIZE / sizeof (uint64_t)) panic("%s: too many segments in packet; call m_collapse().", __func__); /* Build the PKO buffer pointer */ hw_buffer.u64 = 0; hw_buffer.s.i = 1; /* Do not put this buffer into the FPA. */ hw_buffer.s.addr = cvmx_ptr_to_phys(n->m_data); hw_buffer.s.pool = 0; hw_buffer.s.size = n->m_len; *gp++ = hw_buffer.u64; segs++; } /* Build the PKO buffer gather list pointer */ hw_buffer.u64 = 0; hw_buffer.s.addr = cvmx_ptr_to_phys(work); hw_buffer.s.pool = CVMX_FPA_WQE_POOL; hw_buffer.s.size = segs; /* Build the PKO command */ pko_command.u64 = 0; pko_command.s.segs = segs; pko_command.s.gather = 1; pko_command.s.dontfree = 0; /* Put the WQE above back into the FPA. */ } /* Finish building the PKO command */ pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */ pko_command.s.reg0 = priv->fau+qos*4; pko_command.s.total_bytes = m->m_pkthdr.len; pko_command.s.size0 = CVMX_FAU_OP_SIZE_32; pko_command.s.subone0 = 1; /* Check if we can use the hardware checksumming */ if ((m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) != 0) { /* Use hardware checksum calc */ pko_command.s.ipoffp1 = ETHER_HDR_LEN + 1; } /* * XXX * Could use a different free queue (and different FAU address) per * core instead of per QoS, to reduce contention here. */ IF_LOCK(&priv->tx_free_queue[qos]); /* Get the number of mbufs in use by the hardware */ in_use = cvmx_fau_fetch_and_add32(priv->fau+qos*4, 1); buffers_to_free = cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0); cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos, CVMX_PKO_LOCK_CMD_QUEUE); /* Drop this packet if we have too many already queued to the HW */ if (_IF_QFULL(&priv->tx_free_queue[qos])) { dropped = 1; } /* Send the packet to the output queue */ else if (__predict_false(cvmx_pko_send_packet_finish(priv->port, priv->queue + qos, pko_command, hw_buffer, CVMX_PKO_LOCK_CMD_QUEUE))) { DEBUGPRINT("%s: Failed to send the packet\n", if_name(ifp)); dropped = 1; } if (__predict_false(dropped)) { m_freem(m); cvmx_fau_atomic_add32(priv->fau+qos*4, -1); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } else { /* Put this packet on the queue to be freed later */ _IF_ENQUEUE(&priv->tx_free_queue[qos], m); /* Pass it to any BPF listeners. */ ETHER_BPF_MTAP(ifp, m); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len); } /* Free mbufs not in use by the hardware */ if (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { while (_IF_QLEN(&priv->tx_free_queue[qos]) > in_use) { _IF_DEQUEUE(&priv->tx_free_queue[qos], m); m_freem(m); } } IF_UNLOCK(&priv->tx_free_queue[qos]); return dropped; }
/*---------------------------------------------------------------------------* * Data source switch for Read channels - 1, 3 and 5 (B and D-Channel) *---------------------------------------------------------------------------*/ void ihfc_putmbuf (ihfc_sc_t *sc, u_char chan, struct mbuf *m) { i4b_trace_hdr_t hdr; if (chan < 2) { if(S_TRACE & TRACE_D_RX) { hdr.count = ++S_DTRACECOUNT; hdr.dir = FROM_NT; hdr.type = TRC_CH_D; hdr.unit = S_I4BUNIT; MICROTIME(hdr.time); i4b_l1_trace_ind(&hdr, m->m_len, m->m_data); } if (!S_ENABLED) { i4b_Dfreembuf(m); return; } m->m_pkthdr.len = m->m_len; i4b_l1_ph_data_ind(S_I4BUNIT, m); } else { if(S_TRACE & TRACE_B_RX) { hdr.count = ++S_BTRACECOUNT; hdr.dir = FROM_NT; hdr.type = (chan < 4) ? TRC_CH_B1 : TRC_CH_B2; hdr.unit = S_I4BUNIT; MICROTIME(hdr.time); i4b_l1_trace_ind(&hdr, m->m_len, m->m_data); } if (!S_ENABLED) { i4b_Bfreembuf(m); return; } if (S_PROT == BPROT_NONE) { if(!i4b_l1_bchan_tel_silence(m->m_data, m->m_len)) { S_BDRVLINK->bch_activity(S_BDRVLINK->unit, ACT_RX); } if (!_IF_QFULL(&S_IFQUEUE)) { S_BYTES += m->m_len; IF_ENQUEUE(&S_IFQUEUE, m); S_BDRVLINK->bch_rx_data_ready(S_BDRVLINK->unit); } return; } if (S_PROT == BPROT_RHDLC) { S_MBUFDUMMY = m; S_BYTES += m->m_pkthdr.len = m->m_len; S_BDRVLINK->bch_rx_data_ready(S_BDRVLINK->unit); S_MBUFDUMMY = NULL; return; } NDBGL1(L1_ERROR, "Unknown protocol: %d", S_PROT); } }