/* hfi1_vnic_napi - napi receive polling callback function */ static int hfi1_vnic_napi(struct napi_struct *napi, int budget) { struct hfi1_vnic_rx_queue *rxq = container_of(napi, struct hfi1_vnic_rx_queue, napi); struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; int work_done = 0; v_dbg("napi %d budget %d\n", rxq->idx, budget); hfi1_vnic_handle_rx(rxq, &work_done, budget); v_dbg("napi %d work_done %d\n", rxq->idx, work_done); if (work_done < budget) napi_complete(napi); return work_done; }
/* opa_netdev_start_xmit - transmit function */ static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len); /* pad to ensure mininum ethernet packet length */ if (unlikely(skb->len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; skb_put(skb, ETH_ZLEN - skb->len); } opa_vnic_encap_skb(adapter, skb); return adapter->rn_ops->ndo_start_xmit(skb, netdev); }
void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet) { struct hfi1_devdata *dd = packet->rcd->dd; struct hfi1_vnic_vport_info *vinfo = NULL; struct hfi1_vnic_rx_queue *rxq; struct sk_buff *skb; int l4_type, vesw_id = -1; u8 q_idx; l4_type = HFI1_GET_L4_TYPE(packet->ebuf); if (likely(l4_type == OPA_VNIC_L4_ETHR)) { vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf); vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id); /* * In case of invalid vesw id, count the error on * the first available vport. */ if (unlikely(!vinfo)) { struct hfi1_vnic_vport_info *vinfo_tmp; int id_tmp = 0; vinfo_tmp = idr_get_next(&dd->vnic.vesw_idr, &id_tmp); if (vinfo_tmp) { spin_lock(&vport_cntr_lock); vinfo_tmp->stats[0].netstats.rx_nohandler++; spin_unlock(&vport_cntr_lock); } } } if (unlikely(!vinfo)) { dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n", l4_type, vesw_id, packet->rcd->ctxt); return; } q_idx = packet->rcd->vnic_q_idx; rxq = &vinfo->rxq[q_idx]; if (unlikely(!netif_oper_up(vinfo->netdev))) { vinfo->stats[q_idx].rx_drop_state++; skb_queue_purge(&rxq->skbq); return; } if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) { vinfo->stats[q_idx].netstats.rx_fifo_errors++; return; } skb = netdev_alloc_skb(vinfo->netdev, packet->tlen); if (unlikely(!skb)) { vinfo->stats[q_idx].netstats.rx_fifo_errors++; return; } memcpy(skb->data, packet->ebuf, packet->tlen); skb_put(skb, packet->tlen); skb_queue_tail(&rxq->skbq, skb); if (napi_schedule_prep(&rxq->napi)) { v_dbg("napi %d scheduling\n", q_idx); __napi_schedule(&rxq->napi); } }
static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); u8 pad_len, q_idx = skb->queue_mapping; struct hfi1_devdata *dd = vinfo->dd; struct opa_vnic_skb_mdata *mdata; u32 pkt_len, total_len; int err = -EINVAL; u64 pbc; v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); if (unlikely(!netif_oper_up(netdev))) { vinfo->stats[q_idx].tx_drop_state++; goto tx_finish; } /* take out meta data */ mdata = (struct opa_vnic_skb_mdata *)skb->data; skb_pull(skb, sizeof(*mdata)); if (unlikely(mdata->flags & OPA_VNIC_SKB_MDATA_ENCAP_ERR)) { vinfo->stats[q_idx].tx_dlid_zero++; goto tx_finish; } /* add tail padding (for 8 bytes size alignment) and icrc */ pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7; pad_len += OPA_VNIC_ICRC_TAIL_LEN; /* * pkt_len is how much data we have to write, includes header and data. * total_len is length of the packet in Dwords plus the PBC should not * include the CRC. */ pkt_len = (skb->len + pad_len) >> 2; total_len = pkt_len + 2; /* PBC + packet */ pbc = create_bypass_pbc(mdata->vl, total_len); skb_get(skb); v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len); err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len); if (unlikely(err)) { if (err == -ENOMEM) vinfo->stats[q_idx].netstats.tx_fifo_errors++; else if (err != -EBUSY) vinfo->stats[q_idx].netstats.tx_carrier_errors++; } /* remove the header before updating tx counters */ skb_pull(skb, OPA_VNIC_HDR_LEN); if (unlikely(err == -EBUSY)) { hfi1_vnic_maybe_stop_tx(vinfo, q_idx); dev_kfree_skb_any(skb); return NETDEV_TX_BUSY; } tx_finish: /* update tx counters */ hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err); dev_kfree_skb_any(skb); return NETDEV_TX_OK; }