Real operator() (const F& f) const { QL_ASSERT(w_!=0, "Null weights" ); QL_ASSERT(x_!=0, "Null abscissas"); Size startIdx; Real val; const Size isOrderOdd = order_ & 1; if (isOrderOdd) { QL_ASSERT((n_>0), "assume at least 1 point in quadrature"); val = w_[0]*f(x_[0]); startIdx=1; } else { val = 0.0; startIdx=0; } for (Size i=startIdx; i<n_; ++i) { val += w_[i]*f( x_[i]); val += w_[i]*f(-x_[i]); } return val; }
Real GaussJacobiPolynomial::beta(Size i) const { Real num = 4.0*i*(i+alpha_)*(i+beta_)*(i+alpha_+beta_); Real denom = (2.0*i+alpha_+beta_)*(2.0*i+alpha_+beta_) * ((2.0*i+alpha_+beta_)*(2.0*i+alpha_+beta_)-1); if (!denom) { if (num != 0.0) { QL_FAIL("can't compute b_k for jacobi integration\n"); } else { // l'Hospital num = 4.0*i*(i+beta_)* (2.0*i+2*alpha_+beta_); denom= 2.0*(2.0*i+alpha_+beta_); denom*=denom-1; QL_ASSERT(denom, "can't compute b_k for jacobi integration\n"); } } return num / denom; }
Real GaussJacobiPolynomial::alpha(Size i) const { Real num = beta_*beta_ - alpha_*alpha_; Real denom = (2.0*i+alpha_+beta_)*(2.0*i+alpha_+beta_+2); if (!denom) { if (num != 0.0) { QL_FAIL("can't compute a_k for jacobi integration\n"); } else { // l'Hospital num = 2*beta_; denom= 2*(2.0*i+alpha_+beta_+1); QL_ASSERT(denom, "can't compute a_k for jacobi integration\n"); } } return num / denom; }
/* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, qla_sgl_rcv_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; sdsp = &ha->hw.sds[sds_idx]; rx_ring = &ha->rx_ring[r_idx]; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_RX_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_RX_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return; } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = sgc->pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); } if (sgc->chksum_status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; } else { mpf->m_pkthdr.csum_flags = 0; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); mpf->m_pkthdr.flowid = sgc->rss_hash; M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return; }
/* * Name: qla_rcv_isr * Function: Main Interrupt Service Routine */ static uint32_t qla_rcv_isr(qla_host_t *ha, uint32_t sds_idx, uint32_t count) { device_t dev; qla_hw_t *hw; uint32_t comp_idx, c_idx = 0, desc_count = 0, opcode; volatile q80_stat_desc_t *sdesc, *sdesc0 = NULL; uint32_t ret = 0; qla_sgl_comp_t sgc; uint16_t nhandles; uint32_t sds_replenish_threshold = 0; dev = ha->pci_dev; hw = &ha->hw; hw->sds[sds_idx].rcv_active = 1; if (ha->flags.stop_rcv) { hw->sds[sds_idx].rcv_active = 0; return 0; } QL_DPRINT2(ha, (dev, "%s: [%d]enter\n", __func__, sds_idx)); /* * receive interrupts */ comp_idx = hw->sds[sds_idx].sdsr_next; while (count-- && !ha->flags.stop_rcv) { sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (!opcode) break; hw->sds[sds_idx].intr_count++; switch (opcode) { case Q8_STAT_DESC_OPCODE_RCV_PKT: desc_count = 1; bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH((sdesc->data[0])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_RCV: desc_count = Q8_STAT_DESC_COUNT_SGL_RCV((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != Q8_STAT_DESC_OPCODE_CONT) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.rcv.pkt_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV(\ (sdesc->data[0])); sgc.rcv.chksum_status = Q8_STAT_DESC_STATUS((sdesc->data[1])); sgc.rcv.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.rcv.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 2) ,\ ("%s: [sds_idx, data0, data1]="\ "%d, %p, %p]\n", __func__, sds_idx,\ (void *)sdesc->data[0],\ (void *)sdesc->data[1])); sgc.rcv.num_handles = 1; sgc.rcv.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.rcv.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, dcount, data0, data1]=" "[%d, %d, 0x%llx, 0x%llx]\n", __func__, sds_idx, desc_count, (long long unsigned int)sdesc->data[0], (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.rcv.num_handles += nhandles; qla_rx_intr(ha, &sgc.rcv, sds_idx); break; case Q8_STAT_DESC_OPCODE_SGL_LRO: desc_count = Q8_STAT_DESC_COUNT_SGL_LRO((sdesc->data[1])); if (desc_count > 1) { c_idx = (comp_idx + desc_count -1) & (NUM_STATUS_DESCRIPTORS-1); sdesc0 = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[c_idx]; if (Q8_STAT_DESC_OPCODE((sdesc0->data[1])) != Q8_STAT_DESC_OPCODE_CONT) { desc_count = 0; break; } } bzero(&sgc, sizeof(qla_sgl_comp_t)); sgc.lro.payload_length = Q8_STAT_DESC_TOTAL_LENGTH_SGL_RCV((sdesc->data[0])); sgc.lro.rss_hash = Q8_STAT_DESC_RSS_HASH((sdesc->data[0])); sgc.lro.num_handles = 1; sgc.lro.handle[0] = Q8_STAT_DESC_HANDLE((sdesc->data[0])); if (Q8_SGL_LRO_STAT_TS((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_TS; if (Q8_SGL_LRO_STAT_PUSH_BIT((sdesc->data[1]))) sgc.lro.flags |= Q8_LRO_COMP_PUSH_BIT; sgc.lro.l2_offset = Q8_SGL_LRO_STAT_L2_OFFSET((sdesc->data[1])); sgc.lro.l4_offset = Q8_SGL_LRO_STAT_L4_OFFSET((sdesc->data[1])); if (Q8_STAT_DESC_VLAN((sdesc->data[1]))) { sgc.lro.vlan_tag = Q8_STAT_DESC_VLAN_ID((sdesc->data[1])); } QL_ASSERT(ha, (desc_count <= 7) ,\ ("%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1])); if (qla_rcv_cont_sds(ha, sds_idx, comp_idx, desc_count, &sgc.lro.handle[1], &nhandles)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); desc_count = 0; break; } sgc.lro.num_handles += nhandles; if (qla_lro_intr(ha, &sgc.lro, sds_idx)) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc->data[0],\ (long long unsigned int)sdesc->data[1]); device_printf(dev, "%s: [comp_idx, c_idx, dcount, nhndls]="\ "[%d, %d, %d, %d]\n",\ __func__, comp_idx, c_idx, desc_count, sgc.lro.num_handles); if (desc_count > 1) { device_printf(dev, "%s: [sds_idx, data0, data1]="\ "[%d, 0x%llx, 0x%llx]\n",\ __func__, sds_idx,\ (long long unsigned int)sdesc0->data[0],\ (long long unsigned int)sdesc0->data[1]); } } break; default: device_printf(dev, "%s: default 0x%llx!\n", __func__, (long long unsigned int)sdesc->data[0]); break; } if (desc_count == 0) break; sds_replenish_threshold += desc_count; while (desc_count--) { sdesc->data[0] = 0ULL; sdesc->data[1] = 0ULL; comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &hw->sds[sds_idx].sds_ring_base[comp_idx]; } if (sds_replenish_threshold > ha->hw.sds_cidx_thres) { sds_replenish_threshold = 0; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx,\ comp_idx); } hw->sds[sds_idx].sdsr_next = comp_idx; } } if (ha->flags.stop_rcv) goto qla_rcv_isr_exit; if (hw->sds[sds_idx].sdsr_next != comp_idx) { QL_UPDATE_SDS_CONSUMER_INDEX(ha, sds_idx, comp_idx); } hw->sds[sds_idx].sdsr_next = comp_idx; sdesc = (q80_stat_desc_t *)&hw->sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (opcode) ret = -1; qla_rcv_isr_exit: hw->sds[sds_idx].rcv_active = 0; return (ret); }
static int qla_rcv_cont_sds(qla_host_t *ha, uint32_t sds_idx, uint32_t comp_idx, uint32_t dcount, uint16_t *handle, uint16_t *nhandles) { uint32_t i; uint16_t num_handles; q80_stat_desc_t *sdesc; uint32_t opcode; *nhandles = 0; dcount--; for (i = 0; i < dcount; i++) { comp_idx = (comp_idx + 1) & (NUM_STATUS_DESCRIPTORS-1); sdesc = (q80_stat_desc_t *) &ha->hw.sds[sds_idx].sds_ring_base[comp_idx]; opcode = Q8_STAT_DESC_OPCODE((sdesc->data[1])); if (!opcode) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } num_handles = Q8_SGL_STAT_DESC_NUM_HANDLES((sdesc->data[1])); if (!num_handles) { device_printf(ha->pci_dev, "%s: opcode=0 %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); return -1; } if (QL_ERR_INJECT(ha, INJCT_NUM_HNDLE_INVALID)) num_handles = -1; switch (num_handles) { case 1: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); break; case 2: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); break; case 3: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); break; case 4: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); break; case 5: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); break; case 6: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); break; case 7: *handle++ = Q8_SGL_STAT_DESC_HANDLE1((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE2((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE3((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE4((sdesc->data[0])); *handle++ = Q8_SGL_STAT_DESC_HANDLE5((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE6((sdesc->data[1])); *handle++ = Q8_SGL_STAT_DESC_HANDLE7((sdesc->data[1])); break; default: device_printf(ha->pci_dev, "%s: invalid num handles %p %p\n", __func__, (void *)sdesc->data[0], (void *)sdesc->data[1]); QL_ASSERT(ha, (0),\ ("%s: %s [nh, sds, d0, d1]=[%d, %d, %p, %p]\n", __func__, "invalid num handles", sds_idx, num_handles, (void *)sdesc->data[0],(void *)sdesc->data[1])); qla_rcv_error(ha); return 0; } *nhandles = *nhandles + num_handles; } return 0; }
/* * Name: qla_lro_intr * Function: Handles normal ethernet frames received */ static int qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx) { qla_rx_buf_t *rxb; struct mbuf *mp = NULL, *mpf = NULL, *mpl = NULL; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; uint32_t i, rem_len = 0, pkt_length, iplen; struct tcphdr *th; struct ip *ip = NULL; struct ip6_hdr *ip6 = NULL; uint16_t etype; uint32_t r_idx = 0; qla_rx_ring_t *rx_ring; if (ha->hw.num_rds_rings > 1) r_idx = sds_idx; ha->hw.rds[r_idx].count++; rx_ring = &ha->rx_ring[r_idx]; ha->lro_pkt_count++; sdsp = &ha->hw.sds[sds_idx]; pkt_length = sgc->payload_length + sgc->l4_offset; if (sgc->flags & Q8_LRO_COMP_TS) { pkt_length += QLA_TCP_HDR_SIZE + QLA_TCP_TS_OPTION_SIZE; } else { pkt_length += QLA_TCP_HDR_SIZE; } ha->lro_bytes += pkt_length; for (i = 0; i < sgc->num_handles; i++) { rxb = &rx_ring->rx_buf[sgc->handle[i] & 0x7FFF]; QL_ASSERT(ha, (rxb != NULL), ("%s: [sds_idx]=[%d] rxb != NULL\n", __func__,\ sds_idx)); if ((rxb == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_RXB_INVAL)) { /* log the error */ device_printf(ha->pci_dev, "%s invalid rxb[%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } mp = rxb->m_head; if (i == 0) mpf = mp; QL_ASSERT(ha, (mp != NULL), ("%s: [sds_idx]=[%d] mp != NULL\n", __func__,\ sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; if ((mp == NULL) || QL_ERR_INJECT(ha, INJCT_LRO_MP_NULL)) { /* log the error */ device_printf(ha->pci_dev, "%s mp == NULL [%d, %d, 0x%04x]\n", __func__, sds_idx, i, sgc->handle[i]); qla_rcv_error(ha); return (0); } if (i == 0) { mpl = mpf = mp; mp->m_flags |= M_PKTHDR; mp->m_pkthdr.len = pkt_length; mp->m_pkthdr.rcvif = ifp; rem_len = mp->m_pkthdr.len; } else { mp->m_flags &= ~M_PKTHDR; mpl->m_next = mp; mpl = mp; rem_len = rem_len - mp->m_len; } } mpl->m_len = rem_len; th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset); if (sgc->flags & Q8_LRO_COMP_PUSH_BIT) th->th_flags |= TH_PUSH; m_adj(mpf, sgc->l2_offset); eh = mtod(mpf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mpf->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mpf->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mpf, ETHER_VLAN_ENCAP_LEN); etype = ntohs(eh->evl_proto); } else { etype = ntohs(eh->evl_encap_proto); } if (etype == ETHERTYPE_IP) { ip = (struct ip *)(mpf->m_data + ETHER_HDR_LEN); iplen = (ip->ip_hl << 2) + (th->th_off << 2) + sgc->payload_length; ip->ip_len = htons(iplen); ha->ipv4_lro++; } else if (etype == ETHERTYPE_IPV6) { ip6 = (struct ip6_hdr *)(mpf->m_data + ETHER_HDR_LEN); iplen = (th->th_off << 2) + sgc->payload_length; ip6->ip6_plen = htons(iplen); ha->ipv6_lro++; } else { m_freem(mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return 0; } mpf->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; mpf->m_pkthdr.csum_data = 0xFFFF; mpf->m_pkthdr.flowid = sgc->rss_hash; M_HASHTYPE_SET(mpf, M_HASHTYPE_OPAQUE); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, mpf); if (sdsp->rx_free > ha->std_replenish) qla_replenish_normal_rx(ha, sdsp, r_idx); return (0); }
/* * Name: qla_rx_intr * Function: Handles normal ethernet frames received */ static void qla_rx_intr(qla_host_t *ha, uint64_t data, uint32_t sds_idx, struct lro_ctrl *lro) { uint32_t idx, length, status, ring; qla_rx_buf_t *rxb; struct mbuf *mp; struct ifnet *ifp = ha->ifp; qla_sds_t *sdsp; struct ether_vlan_header *eh; sdsp = &ha->hw.sds[sds_idx]; ring = (uint32_t)Q8_STAT_DESC_TYPE(data); idx = (uint32_t)Q8_STAT_DESC_HANDLE(data); length = (uint32_t)Q8_STAT_DESC_TOTAL_LENGTH(data); status = (uint32_t)Q8_STAT_DESC_STATUS(data); if (ring == 0) { if ((idx >= NUM_RX_DESCRIPTORS) || (length > MCLBYTES)) { device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]" " len[0x%08x] invalid\n", __func__, ring, idx, length); return; } } else { if ((idx >= NUM_RX_JUMBO_DESCRIPTORS)||(length > MJUM9BYTES)) { device_printf(ha->pci_dev, "%s: ring[%d] index[0x%08x]" " len[0x%08x] invalid\n", __func__, ring, idx, length); return; } } if (ring == 0) rxb = &ha->rx_buf[idx]; else rxb = &ha->rx_jbuf[idx]; QL_ASSERT((rxb != NULL),\ ("%s: [r, i, sds_idx]=[%d, 0x%x, %d] rxb != NULL\n",\ __func__, ring, idx, sds_idx)); mp = rxb->m_head; QL_ASSERT((mp != NULL),\ ("%s: [r,i,rxb, sds_idx]=[%d, 0x%x, %p, %d] mp != NULL\n",\ __func__, ring, idx, rxb, sds_idx)); bus_dmamap_sync(ha->rx_tag, rxb->map, BUS_DMASYNC_POSTREAD); if (ring == 0) { rxb->m_head = NULL; rxb->next = sdsp->rxb_free; sdsp->rxb_free = rxb; sdsp->rx_free++; } else { rxb->m_head = NULL; rxb->next = sdsp->rxjb_free; sdsp->rxjb_free = rxb; sdsp->rxj_free++; } mp->m_len = length; mp->m_pkthdr.len = length; mp->m_pkthdr.rcvif = ifp; eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { uint32_t *data = (uint32_t *)eh; mp->m_pkthdr.ether_vtag = ntohs(eh->evl_tag); mp->m_flags |= M_VLANTAG; *(data + 3) = *(data + 2); *(data + 2) = *(data + 1); *(data + 1) = *data; m_adj(mp, ETHER_VLAN_ENCAP_LEN); } if (status == Q8_STAT_DESC_STATUS_CHKSUM_OK) { mp->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } else { mp->m_pkthdr.csum_flags = 0; } if (lro->lro_cnt && (tcp_lro_rx(lro, mp, 0) == 0)) { /* LRO packet has been successfuly queued */ } else { (*ifp->if_input)(ifp, mp); } if (sdsp->rx_free > std_replenish) qla_replenish_normal_rx(ha, sdsp); if (sdsp->rxj_free > jumbo_replenish) qla_replenish_jumbo_rx(ha, sdsp); return; }