static inline void oce_set_rx_oflags(mblk_t *mp, struct oce_nic_rx_cqe *cqe) { int csum_flags = 0; /* set flags */ if (cqe->u0.s.ip_cksum_pass) { csum_flags |= HCK_IPV4_HDRCKSUM_OK; } if (cqe->u0.s.l4_cksum_pass) { csum_flags |= (HCK_FULLCKSUM | HCK_FULLCKSUM_OK); } if (csum_flags) { (void) mac_hcksum_set(mp, 0, 0, 0, 0, csum_flags); } }
static mblk_t * bge_receive_packet(bge_t *bgep, bge_rbd_t *hw_rbd_p, recv_ring_t *rrp) { bge_rbd_t hw_rbd; buff_ring_t *brp; sw_rbd_t *srbdp; uchar_t *dp; mblk_t *mp; uint_t len; uint_t minsize; uint_t maxsize; uint32_t pflags; mp = NULL; hw_rbd = *hw_rbd_p; switch (hw_rbd.flags & (RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING)) { case RBD_FLAG_MINI_RING|RBD_FLAG_JUMBO_RING: default: /* error, this shouldn't happen */ BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring flags!")); goto error; case RBD_FLAG_JUMBO_RING: brp = &bgep->buff[BGE_JUMBO_BUFF_RING]; break; #if (BGE_BUFF_RINGS_USED > 2) case RBD_FLAG_MINI_RING: brp = &bgep->buff[BGE_MINI_BUFF_RING]; break; #endif /* BGE_BUFF_RINGS_USED > 2 */ case 0: brp = &bgep->buff[BGE_STD_BUFF_RING]; break; } if (hw_rbd.index >= brp->desc.nslots) { /* error, this shouldn't happen */ BGE_PKTDUMP((bgep, &hw_rbd, NULL, "bad ring index!")); goto error; } srbdp = &brp->sw_rbds[hw_rbd.index]; if (hw_rbd.opaque != srbdp->pbuf.token) { /* bogus, drop the packet */ BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "bad ring token")); goto refill; } if ((hw_rbd.flags & RBD_FLAG_PACKET_END) == 0) { /* bogus, drop the packet */ BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "unterminated packet")); goto refill; } if (hw_rbd.flags & RBD_FLAG_FRAME_HAS_ERROR) { /* bogus, drop the packet */ BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "errored packet")); goto refill; } len = hw_rbd.len; #ifdef BGE_IPMI_ASF /* * When IPMI/ASF is enabled, VLAN tag must be stripped. */ if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) maxsize = bgep->chipid.ethmax_size + ETHERFCSL; else #endif /* * H/W will not strip the VLAN tag from incoming packet * now, as RECEIVE_MODE_KEEP_VLAN_TAG bit is set in * RECEIVE_MAC_MODE_REG register. */ maxsize = bgep->chipid.ethmax_size + VLAN_TAGSZ + ETHERFCSL; if (len > maxsize) { /* bogus, drop the packet */ BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "oversize packet")); goto refill; } #ifdef BGE_IPMI_ASF if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) minsize = ETHERMIN + ETHERFCSL - VLAN_TAGSZ; else #endif minsize = ETHERMIN + ETHERFCSL; if (len < minsize) { /* bogus, drop the packet */ BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "undersize packet")); goto refill; } /* * Packet looks good; get a buffer to copy it into. * We want to leave some space at the front of the allocated * buffer in case any upstream modules want to prepend some * sort of header. This also has the side-effect of making * the packet *contents* 4-byte aligned, as required by NCA! */ #ifdef BGE_IPMI_ASF if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { mp = allocb(BGE_HEADROOM + len + VLAN_TAGSZ, 0); } else { #endif mp = allocb(BGE_HEADROOM + len, 0); #ifdef BGE_IPMI_ASF } #endif if (mp == NULL) { /* Nothing to do but drop the packet */ goto refill; } /* * Sync the data and copy it to the STREAMS buffer. */ DMA_SYNC(srbdp->pbuf, DDI_DMA_SYNC_FORKERNEL); if (bge_check_dma_handle(bgep, srbdp->pbuf.dma_hdl) != DDI_FM_OK) { bgep->bge_dma_error = B_TRUE; bgep->bge_chip_state = BGE_CHIP_ERROR; return (NULL); } #ifdef BGE_IPMI_ASF if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { /* * As VLAN tag has been stripped from incoming packet in ASF * scenario, we insert it into this packet again. */ struct ether_vlan_header *ehp; mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM - VLAN_TAGSZ; bcopy(DMA_VPTR(srbdp->pbuf), dp, 2 * ETHERADDRL); ehp = (void *)dp; ehp->ether_tpid = ntohs(ETHERTYPE_VLAN); ehp->ether_tci = ntohs(hw_rbd.vlan_tci); bcopy(((uchar_t *)(DMA_VPTR(srbdp->pbuf))) + 2 * ETHERADDRL, dp + 2 * ETHERADDRL + VLAN_TAGSZ, len - 2 * ETHERADDRL); } else { #endif mp->b_rptr = dp = mp->b_rptr + BGE_HEADROOM; bcopy(DMA_VPTR(srbdp->pbuf), dp, len); #ifdef BGE_IPMI_ASF } if (bgep->asf_enabled && (hw_rbd.flags & RBD_FLAG_VLAN_TAG)) { mp->b_wptr = dp + len + VLAN_TAGSZ - ETHERFCSL; } else #endif mp->b_wptr = dp + len - ETHERFCSL; /* * Special check for one specific type of data corruption; * in a good packet, the first 8 bytes are *very* unlikely * to be the same as the second 8 bytes ... but we let the * packet through just in case. */ if (bcmp(dp, dp+8, 8) == 0) BGE_PKTDUMP((bgep, &hw_rbd, srbdp, "stuttered packet?")); pflags = 0; if (hw_rbd.flags & RBD_FLAG_TCP_UDP_CHECKSUM) pflags |= HCK_FULLCKSUM; if (hw_rbd.flags & RBD_FLAG_IP_CHECKSUM) pflags |= HCK_IPV4_HDRCKSUM_OK; if (pflags != 0) mac_hcksum_set(mp, 0, 0, 0, hw_rbd.tcp_udp_cksum, pflags); /* Update per-ring rx statistics */ rrp->rx_pkts++; rrp->rx_bytes += len; refill: /* * Replace the buffer in the ring it came from ... */ bge_refill(bgep, brp, srbdp); return (mp); error: /* * We come here if the integrity of the ring descriptors * (rather than merely packet data) appears corrupted. * The factotum will attempt to reset-and-recover. */ bgep->bge_chip_state = BGE_CHIP_ERROR; bge_fm_ereport(bgep, DDI_FM_DEVICE_INVAL_STATE); return (NULL); }