/* * Get an unused rxbuf from either the pool or from memory. * The returned rxbuf has a mblk associated with it. */ static vmxnet3s_rxbuf_t * vmxnet3s_get_rxbuf(vmxnet3s_softc_t *dp, boolean_t cansleep) { vmxnet3s_rxbuf_t *rxbuf; vmxnet3s_rxpool_t *rxpool = &dp->rxpool; mutex_enter(&dp->rxpoollock); if (rxpool->listhead) { rxbuf = rxpool->listhead; rxpool->listhead = rxbuf->next; mutex_exit(&dp->rxpoollock); } else { mutex_exit(&dp->rxpoollock); if ((rxbuf = vmxnet3s_alloc_rxbuf(dp, cansleep)) == NULL) goto done; } ASSERT(rxbuf); rxbuf->mblk = desballoc((uchar_t *)rxbuf->dma.buf, rxbuf->dma.buflen, BPRI_MED, &rxbuf->freecb); if (!rxbuf->mblk) { vmxnet3s_put_rxbuf(rxbuf); rxbuf = NULL; } done: return (rxbuf); }
/* * function to free mblk databuffer to the RQ pool * * arg - pointer to the receive buffer descriptor * * return none */ void oce_rx_pool_free(char *arg) { oce_rq_bdesc_t *rqbd; struct oce_rq *rq; /* During destroy, arg will be NULL */ if (arg == NULL) { return; } /* retrieve the pointers from arg */ rqbd = (oce_rq_bdesc_t *)(void *)arg; rq = rqbd->rq; rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base, rqbd->rqb->size, 0, &rqbd->fr_rtn); if (rqbd->mp) { rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM; } oce_rqb_free(rq, rqbd); (void) atomic_add_32(&rq->pending, -1); } /* rx_pool_free */
/* * RQ buffer constructor function * * rqbd - pointer to rq buffer descriptor * rq - pointer to RQ structure * size - size of the buffer * flags - KM_SLEEP OR KM_NOSLEEP * * return DDI_SUCCESS => success, DDI_FAILURE otherwise */ static int oce_rqb_ctor(oce_rq_bdesc_t *rqbd, struct oce_rq *rq, size_t size, int flags) { struct oce_dev *dev; oce_dma_buf_t *dbuf; dev = rq->parent; dbuf = oce_alloc_dma_buffer(dev, size, &oce_rx_buf_attr, flags); if (dbuf == NULL) { return (DDI_FAILURE); } /* Set the call back function parameters */ rqbd->fr_rtn.free_func = (void (*)())oce_rx_pool_free; rqbd->fr_rtn.free_arg = (caddr_t)(void *)rqbd; rqbd->mp = desballoc((uchar_t *)(dbuf->base), dbuf->size, 0, &rqbd->fr_rtn); if (rqbd->mp == NULL) { oce_free_dma_buffer(dev, dbuf); return (DDI_FAILURE); } rqbd->rqb = dbuf; rqbd->rq = rq; rqbd->frag_addr.dw.addr_lo = ADDR_LO(dbuf->addr + OCE_RQE_BUF_HEADROOM); rqbd->frag_addr.dw.addr_hi = ADDR_HI(dbuf->addr + OCE_RQE_BUF_HEADROOM); rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM; return (DDI_SUCCESS); } /* oce_rqb_ctor */
/* * igb_alloc_rcb_lists - Memory allocation for the receive control blocks * of one ring. */ static int igb_alloc_rcb_lists(igb_rx_data_t *rx_data) { int i; int ret; rx_control_block_t *rcb; igb_t *igb = rx_data->rx_ring->igb; dma_buffer_t *rx_buf; uint32_t rcb_count; /* * Allocate memory for the rx control blocks for work list and * free list. */ rcb_count = rx_data->ring_size + rx_data->free_list_size; rcb = rx_data->rcb_area; for (i = 0; i < rcb_count; i++, rcb++) { ASSERT(rcb != NULL); if (i < rx_data->ring_size) { /* Attach the rx control block to the work list */ rx_data->work_list[i] = rcb; } else { /* Attach the rx control block to the free list */ rx_data->free_list[i - rx_data->ring_size] = rcb; } rx_buf = &rcb->rx_buf; ret = igb_alloc_dma_buffer(igb, rx_buf, igb->rx_buf_size); if (ret != IGB_SUCCESS) { igb_log(igb, IGB_LOG_ERROR, "Allocate rx dma buffer failed"); goto alloc_rcb_lists_fail; } rx_buf->size -= IPHDR_ALIGN_ROOM; rx_buf->address += IPHDR_ALIGN_ROOM; rx_buf->dma_address += IPHDR_ALIGN_ROOM; rcb->ref_cnt = 1; rcb->rx_data = (igb_rx_data_t *)rx_data; rcb->free_rtn.free_func = igb_rx_recycle; rcb->free_rtn.free_arg = (char *)rcb; rcb->mp = desballoc((unsigned char *) rx_buf->address, rx_buf->size, 0, &rcb->free_rtn); } return (IGB_SUCCESS); alloc_rcb_lists_fail: igb_free_rcb_lists(rx_data); return (IGB_FAILURE); }
/* * function to process a single packet * * dev - software handle to the device * rq - pointer to the RQ to charge * cqe - Pointer to Completion Q entry * * return mblk pointer => success, NULL => error */ static inline mblk_t * oce_rx(struct oce_dev *dev, struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) { mblk_t *mp; int pkt_len; int32_t frag_cnt = 0; mblk_t **mblk_tail; mblk_t *mblk_head; int frag_size; oce_rq_bdesc_t *rqbd; uint16_t cur_index; oce_ring_buffer_t *ring; int i; frag_cnt = cqe->u0.s.num_fragments & 0x7; mblk_head = NULL; mblk_tail = &mblk_head; ring = rq->ring; cur_index = ring->cidx; /* Get the relevant Queue pointers */ pkt_len = cqe->u0.s.pkt_size; for (i = 0; i < frag_cnt; i++) { rqbd = rq->shadow_ring[cur_index]; if (rqbd->mp == NULL) { rqbd->mp = desballoc((uchar_t *)rqbd->rqb->base, rqbd->rqb->size, 0, &rqbd->fr_rtn); if (rqbd->mp == NULL) { return (NULL); } rqbd->mp->b_rptr = (uchar_t *)rqbd->rqb->base + OCE_RQE_BUF_HEADROOM; } mp = rqbd->mp; frag_size = (pkt_len > rq->cfg.frag_size) ? rq->cfg.frag_size : pkt_len; mp->b_wptr = mp->b_rptr + frag_size; pkt_len -= frag_size; mp->b_next = mp->b_cont = NULL; /* Chain the message mblks */ *mblk_tail = mp; mblk_tail = &mp->b_cont; (void) DBUF_SYNC(rqbd->rqb, DDI_DMA_SYNC_FORCPU); cur_index = GET_Q_NEXT(cur_index, 1, ring->num_items); } if (mblk_head == NULL) { oce_log(dev, CE_WARN, MOD_RX, "%s", "oce_rx:no frags?"); return (NULL); } /* replace the buffer with new ones */ (void) oce_rq_charge(rq, frag_cnt, B_FALSE); atomic_add_32(&rq->pending, frag_cnt); return (mblk_head); } /* oce_rx */
/* * Create a pool of mblks from which future vio_allocb() requests * will be serviced. * * NOTE: num_mblks has to non-zero and a power-of-2 * * Returns 0 on success or EINVAL if num_mblks is zero or not * a power of 2. */ int vio_create_mblks(uint64_t num_mblks, size_t mblk_size, vio_mblk_pool_t **poolp) { vio_mblk_pool_t *vmplp; vio_mblk_t *vmp; uint8_t *datap; int i; if (!(num_mblks) || (!ISP2(num_mblks))) { *poolp = 0; return (EINVAL); } vmplp = kmem_zalloc(sizeof (*vmplp), KM_SLEEP); vmplp->quelen = num_mblks; vmplp->quemask = num_mblks - 1; /* expects quelen is power-of-2 */ vmplp->mblk_size = mblk_size; mutex_init(&vmplp->hlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT)); mutex_init(&vmplp->tlock, NULL, MUTEX_DRIVER, DDI_INTR_PRI(DDI_INTR_SOFTPRI_DEFAULT)); vmplp->basep = kmem_zalloc(num_mblks * sizeof (vio_mblk_t), KM_SLEEP); vmplp->datap = kmem_zalloc(num_mblks * mblk_size, KM_SLEEP); vmplp->nextp = NULL; /* create a queue of pointers to free vio_mblk_t's */ vmplp->quep = kmem_zalloc(vmplp->quelen * sizeof (vio_mblk_t *), KM_SLEEP); vmplp->head = 0; vmplp->tail = 0; for (i = 0, datap = vmplp->datap; i < num_mblks; i++) { vmp = &(vmplp->basep[i]); vmp->vmplp = vmplp; vmp->datap = datap; vmp->reclaim.free_func = vio_freeb; vmp->reclaim.free_arg = (caddr_t)vmp; vmp->mp = desballoc(vmp->datap, mblk_size, BPRI_MED, &vmp->reclaim); if (vmp->mp == NULL) continue; /* put this vmp on the free stack */ vmplp->quep[vmplp->tail] = vmp; vmplp->tail = (vmplp->tail + 1) & vmplp->quemask; datap += mblk_size; } *poolp = vmplp; return (0); }
/* * Return a mblk to the free pool. Invoked when the upper IP * layers do freemsg() etc on the mblk they were passed. */ void vio_freeb(void *arg) { vio_mblk_t *vmp = (vio_mblk_t *)arg; vio_mblk_pool_t *vmplp = vmp->vmplp; vmp->mp = desballoc(vmp->datap, vmplp->mblk_size, BPRI_MED, &vmp->reclaim); mutex_enter(&vmplp->tlock); vmplp->quep[vmplp->tail] = vmp; vmplp->tail = (vmplp->tail + 1) & vmplp->quemask; mutex_exit(&vmplp->tlock); }
/* * Create a Multidata message block. */ multidata_t * mmd_alloc(mblk_t *hdr_mp, mblk_t **mmd_mp, int kmflags) { uchar_t *buf; multidata_t *mmd; uint_t mmd_mplen; struct mmd_buf_info *buf_info; ASSERT(hdr_mp != NULL); ASSERT(mmd_mp != NULL); /* * Caller should never pass in a chain of mblks since we * only care about the first one, hence the assertions. */ ASSERT(hdr_mp->b_cont == NULL); if ((buf = kmem_cache_alloc(mmd_cache, kmflags)) == NULL) return (NULL); buf_info = (struct mmd_buf_info *)buf; buf_info->frp.free_arg = (caddr_t)buf; mmd = (multidata_t *)(buf_info + 1); mmd_mplen = sizeof (*mmd); if ((*mmd_mp = desballoc((uchar_t *)mmd, mmd_mplen, BPRI_HI, &(buf_info->frp))) == NULL) { kmem_cache_free(mmd_cache, buf); return (NULL); } DB_TYPE(*mmd_mp) = M_MULTIDATA; (*mmd_mp)->b_wptr += mmd_mplen; mmd->mmd_dp = (*mmd_mp)->b_datap; mmd->mmd_hbuf = hdr_mp; return (mmd); }
/* * e1000g_rxfree_func - the call-back function to reclaim rx buffer * * This function is called when an mp is freed by the user thru * freeb call (Only for mp constructed through desballoc call) * It returns back the freed buffer to the freelist */ void e1000g_rxfree_func(p_rx_sw_packet_t packet) { e1000g_rx_data_t *rx_data; private_devi_list_t *devi_node; struct e1000g *Adapter; uint32_t ring_cnt; uint32_t ref_cnt; unsigned char *address; if (packet->ref_cnt == 0) { /* * This case only happens when rx buffers are being freed * in e1000g_stop() and freemsg() is called. */ return; } rx_data = (e1000g_rx_data_t *)(uintptr_t)packet->rx_data; if (packet->mp == NULL) { /* * Allocate a mblk that binds to the data buffer */ address = (unsigned char *)packet->rx_buf->address; if (address != NULL) { packet->mp = desballoc((unsigned char *) address, packet->rx_buf->size, BPRI_MED, &packet->free_rtn); } } /* * Enqueue the recycled packets in a recycle queue. When freelist * dries up, move the entire chain of packets from recycle queue * to freelist. This helps in avoiding per packet mutex contention * around freelist. */ mutex_enter(&rx_data->recycle_lock); QUEUE_PUSH_TAIL(&rx_data->recycle_list, &packet->Link); rx_data->recycle_freepkt++; mutex_exit(&rx_data->recycle_lock); ref_cnt = atomic_dec_32_nv(&packet->ref_cnt); if (ref_cnt == 0) { mutex_enter(&e1000g_rx_detach_lock); e1000g_free_rx_sw_packet(packet, B_FALSE); atomic_dec_32(&rx_data->pending_count); atomic_dec_32(&e1000g_mblks_pending); if ((rx_data->pending_count == 0) && (rx_data->flag & E1000G_RX_STOPPED)) { devi_node = rx_data->priv_devi_node; if (devi_node != NULL) { ring_cnt = atomic_dec_32_nv( &devi_node->pending_rx_count); if ((ring_cnt == 0) && (devi_node->flag & E1000G_PRIV_DEVI_DETACH)) { e1000g_free_priv_devi_node( devi_node); } } else { Adapter = rx_data->rx_ring->adapter; atomic_dec_32( &Adapter->pending_rx_count); } e1000g_free_rx_pending_buffers(rx_data); e1000g_free_rx_data(rx_data); } mutex_exit(&e1000g_rx_detach_lock); } }
/* * e1000g_receive - main receive routine * * This routine will process packets received in an interrupt */ mblk_t * e1000g_receive(e1000g_rx_ring_t *rx_ring, mblk_t **tail, uint_t sz) { struct e1000_hw *hw; mblk_t *nmp; mblk_t *ret_mp; mblk_t *ret_nmp; struct e1000_rx_desc *current_desc; struct e1000_rx_desc *last_desc; p_rx_sw_packet_t packet; p_rx_sw_packet_t newpkt; uint16_t length; uint32_t pkt_count; uint32_t desc_count; boolean_t accept_frame; boolean_t end_of_packet; boolean_t need_copy; struct e1000g *Adapter; dma_buffer_t *rx_buf; uint16_t cksumflags; uint_t chain_sz = 0; e1000g_rx_data_t *rx_data; uint32_t max_size; uint32_t min_size; ret_mp = NULL; ret_nmp = NULL; pkt_count = 0; desc_count = 0; cksumflags = 0; Adapter = rx_ring->adapter; rx_data = rx_ring->rx_data; hw = &Adapter->shared; /* Sync the Rx descriptor DMA buffers */ (void) ddi_dma_sync(rx_data->rbd_dma_handle, 0, 0, DDI_DMA_SYNC_FORKERNEL); if (e1000g_check_dma_handle(rx_data->rbd_dma_handle) != DDI_FM_OK) { ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); Adapter->e1000g_state |= E1000G_ERROR; return (NULL); } current_desc = rx_data->rbd_next; if (!(current_desc->status & E1000_RXD_STAT_DD)) { /* * don't send anything up. just clear the RFD */ E1000G_DEBUG_STAT(rx_ring->stat_none); return (NULL); } max_size = Adapter->max_frame_size - ETHERFCSL - VLAN_TAGSZ; min_size = ETHERMIN; /* * Loop through the receive descriptors starting at the last known * descriptor owned by the hardware that begins a packet. */ while ((current_desc->status & E1000_RXD_STAT_DD) && (pkt_count < Adapter->rx_limit_onintr) && ((sz == E1000G_CHAIN_NO_LIMIT) || (chain_sz <= sz))) { desc_count++; /* * Now this can happen in Jumbo frame situation. */ if (current_desc->status & E1000_RXD_STAT_EOP) { /* packet has EOP set */ end_of_packet = B_TRUE; } else { /* * If this received buffer does not have the * End-Of-Packet bit set, the received packet * will consume multiple buffers. We won't send this * packet upstack till we get all the related buffers. */ end_of_packet = B_FALSE; } /* * Get a pointer to the actual receive buffer * The mp->b_rptr is mapped to The CurrentDescriptor * Buffer Address. */ packet = (p_rx_sw_packet_t)QUEUE_GET_HEAD(&rx_data->recv_list); ASSERT(packet != NULL); rx_buf = packet->rx_buf; length = current_desc->length; #ifdef __sparc if (packet->dma_type == USE_DVMA) dvma_sync(rx_buf->dma_handle, 0, DDI_DMA_SYNC_FORKERNEL); else (void) ddi_dma_sync(rx_buf->dma_handle, E1000G_IPALIGNROOM, length, DDI_DMA_SYNC_FORKERNEL); #else (void) ddi_dma_sync(rx_buf->dma_handle, E1000G_IPALIGNROOM, length, DDI_DMA_SYNC_FORKERNEL); #endif if (e1000g_check_dma_handle( rx_buf->dma_handle) != DDI_FM_OK) { ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); Adapter->e1000g_state |= E1000G_ERROR; goto rx_drop; } accept_frame = (current_desc->errors == 0) || ((current_desc->errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) != 0); if (hw->mac.type == e1000_82543) { unsigned char last_byte; last_byte = *((unsigned char *)rx_buf->address + length - 1); if (TBI_ACCEPT(hw, current_desc->status, current_desc->errors, current_desc->length, last_byte, Adapter->min_frame_size, Adapter->max_frame_size)) { e1000_tbi_adjust_stats(Adapter, length, hw->mac.addr); length--; accept_frame = B_TRUE; } else if (e1000_tbi_sbp_enabled_82543(hw) && (current_desc->errors == E1000_RXD_ERR_CE)) { accept_frame = B_TRUE; } } /* * Indicate the packet to the NOS if it was good. * Normally, hardware will discard bad packets for us. * Check for the packet to be a valid Ethernet packet */ if (!accept_frame) { /* * error in incoming packet, either the packet is not a * ethernet size packet, or the packet has an error. In * either case, the packet will simply be discarded. */ E1000G_DEBUGLOG_0(Adapter, E1000G_INFO_LEVEL, "Process Receive Interrupts: Error in Packet\n"); E1000G_STAT(rx_ring->stat_error); /* * Returning here as we are done here. There is * no point in waiting for while loop to elapse * and the things which were done. More efficient * and less error prone... */ goto rx_drop; } /* * If the Ethernet CRC is not stripped by the hardware, * we need to strip it before sending it up to the stack. */ if (end_of_packet && !Adapter->strip_crc) { if (length > ETHERFCSL) { length -= ETHERFCSL; } else { /* * If the fragment is smaller than the CRC, * drop this fragment, do the processing of * the end of the packet. */ ASSERT(rx_data->rx_mblk_tail != NULL); rx_data->rx_mblk_tail->b_wptr -= ETHERFCSL - length; rx_data->rx_mblk_len -= ETHERFCSL - length; QUEUE_POP_HEAD(&rx_data->recv_list); goto rx_end_of_packet; } } need_copy = B_TRUE; if (length <= Adapter->rx_bcopy_thresh) goto rx_copy; /* * Get the pre-constructed mblk that was associated * to the receive data buffer. */ if (packet->mp == NULL) { packet->mp = desballoc((unsigned char *) rx_buf->address, length, BPRI_MED, &packet->free_rtn); } if (packet->mp != NULL) { /* * We have two sets of buffer pool. One associated with * the Rxdescriptors and other a freelist buffer pool. * Each time we get a good packet, Try to get a buffer * from the freelist pool using e1000g_get_buf. If we * get free buffer, then replace the descriptor buffer * address with the free buffer we just got, and pass * the pre-constructed mblk upstack. (note no copying) * * If we failed to get a free buffer, then try to * allocate a new buffer(mp) and copy the recv buffer * content to our newly allocated buffer(mp). Don't * disturb the desriptor buffer address. (note copying) */ newpkt = e1000g_get_buf(rx_data); if (newpkt != NULL) { /* * Get the mblk associated to the data, * and strip it off the sw packet. */ nmp = packet->mp; packet->mp = NULL; atomic_inc_32(&packet->ref_cnt); /* * Now replace old buffer with the new * one we got from free list * Both the RxSwPacket as well as the * Receive Buffer Descriptor will now * point to this new packet. */ packet = newpkt; current_desc->buffer_addr = newpkt->rx_buf->dma_address; need_copy = B_FALSE; } else { /* EMPTY */ E1000G_DEBUG_STAT(rx_ring->stat_no_freepkt); } } rx_copy: if (need_copy) { /* * No buffers available on free list, * bcopy the data from the buffer and * keep the original buffer. Dont want to * do this.. Yack but no other way */ if ((nmp = allocb(length + E1000G_IPALIGNROOM, BPRI_MED)) == NULL) { /* * The system has no buffers available * to send up the incoming packet, hence * the packet will have to be processed * when there're more buffers available. */ E1000G_STAT(rx_ring->stat_allocb_fail); goto rx_drop; } nmp->b_rptr += E1000G_IPALIGNROOM; nmp->b_wptr += E1000G_IPALIGNROOM; /* * The free list did not have any buffers * available, so, the received packet will * have to be copied into a mp and the original * buffer will have to be retained for future * packet reception. */ bcopy(rx_buf->address, nmp->b_wptr, length); } /* * The rx_sw_packet MUST be popped off the * RxSwPacketList before either a putnext or freemsg * is done on the mp that has now been created by the * desballoc. If not, it is possible that the free * routine will get called from the interrupt context * and try to put this packet on the free list */ (p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list); ASSERT(nmp != NULL); nmp->b_wptr += length; if (rx_data->rx_mblk == NULL) { /* * TCP/UDP checksum offload and * IP checksum offload */ if (!(current_desc->status & E1000_RXD_STAT_IXSM)) { /* * Check TCP/UDP checksum */ if ((current_desc->status & E1000_RXD_STAT_TCPCS) && !(current_desc->errors & E1000_RXD_ERR_TCPE)) cksumflags |= HCK_FULLCKSUM | HCK_FULLCKSUM_OK; /* * Check IP Checksum */ if ((current_desc->status & E1000_RXD_STAT_IPCS) && !(current_desc->errors & E1000_RXD_ERR_IPE)) cksumflags |= HCK_IPV4_HDRCKSUM; } } /* * We need to maintain our packet chain in the global * Adapter structure, for the Rx processing can end * with a fragment that has no EOP set. */ if (rx_data->rx_mblk == NULL) { /* Get the head of the message chain */ rx_data->rx_mblk = nmp; rx_data->rx_mblk_tail = nmp; rx_data->rx_mblk_len = length; } else { /* Not the first packet */ /* Continue adding buffers */ rx_data->rx_mblk_tail->b_cont = nmp; rx_data->rx_mblk_tail = nmp; rx_data->rx_mblk_len += length; } ASSERT(rx_data->rx_mblk != NULL); ASSERT(rx_data->rx_mblk_tail != NULL); ASSERT(rx_data->rx_mblk_tail->b_cont == NULL); /* * Now this MP is ready to travel upwards but some more * fragments are coming. * We will send packet upwards as soon as we get EOP * set on the packet. */ if (!end_of_packet) { /* * continue to get the next descriptor, * Tail would be advanced at the end */ goto rx_next_desc; } rx_end_of_packet: if (E1000G_IS_VLAN_PACKET(rx_data->rx_mblk->b_rptr)) max_size = Adapter->max_frame_size - ETHERFCSL; if ((rx_data->rx_mblk_len > max_size) || (rx_data->rx_mblk_len < min_size)) { E1000G_STAT(rx_ring->stat_size_error); goto rx_drop; } /* * Found packet with EOP * Process the last fragment. */ if (cksumflags != 0) { (void) hcksum_assoc(rx_data->rx_mblk, NULL, NULL, 0, 0, 0, 0, cksumflags, 0); cksumflags = 0; } /* * Count packets that span multi-descriptors */ E1000G_DEBUG_STAT_COND(rx_ring->stat_multi_desc, (rx_data->rx_mblk->b_cont != NULL)); /* * Append to list to send upstream */ if (ret_mp == NULL) { ret_mp = ret_nmp = rx_data->rx_mblk; } else { ret_nmp->b_next = rx_data->rx_mblk; ret_nmp = rx_data->rx_mblk; } ret_nmp->b_next = NULL; *tail = ret_nmp; chain_sz += length; rx_data->rx_mblk = NULL; rx_data->rx_mblk_tail = NULL; rx_data->rx_mblk_len = 0; pkt_count++; rx_next_desc: /* * Zero out the receive descriptors status */ current_desc->status = 0; if (current_desc == rx_data->rbd_last) rx_data->rbd_next = rx_data->rbd_first; else rx_data->rbd_next++; last_desc = current_desc; current_desc = rx_data->rbd_next; /* * Put the buffer that we just indicated back * at the end of our list */ QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link); } /* while loop */ /* Sync the Rx descriptor DMA buffers */ (void) ddi_dma_sync(rx_data->rbd_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); /* * Advance the E1000's Receive Queue #0 "Tail Pointer". */ E1000_WRITE_REG(hw, E1000_RDT(0), (uint32_t)(last_desc - rx_data->rbd_first)); if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); Adapter->e1000g_state |= E1000G_ERROR; } Adapter->rx_pkt_cnt = pkt_count; return (ret_mp); rx_drop: /* * Zero out the receive descriptors status */ current_desc->status = 0; /* Sync the Rx descriptor DMA buffers */ (void) ddi_dma_sync(rx_data->rbd_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV); if (current_desc == rx_data->rbd_last) rx_data->rbd_next = rx_data->rbd_first; else rx_data->rbd_next++; last_desc = current_desc; (p_rx_sw_packet_t)QUEUE_POP_HEAD(&rx_data->recv_list); QUEUE_PUSH_TAIL(&rx_data->recv_list, &packet->Link); /* * Reclaim all old buffers already allocated during * Jumbo receives.....for incomplete reception */ if (rx_data->rx_mblk != NULL) { freemsg(rx_data->rx_mblk); rx_data->rx_mblk = NULL; rx_data->rx_mblk_tail = NULL; rx_data->rx_mblk_len = 0; } /* * Advance the E1000's Receive Queue #0 "Tail Pointer". */ E1000_WRITE_REG(hw, E1000_RDT(0), (uint32_t)(last_desc - rx_data->rbd_first)); if (e1000g_check_acc_handle(Adapter->osdep.reg_handle) != DDI_FM_OK) { ddi_fm_service_impact(Adapter->dip, DDI_SERVICE_DEGRADED); Adapter->e1000g_state |= E1000G_ERROR; } return (ret_mp); }
/* ARGSUSED */ int UM_Receive_Packet(char *plkahead, unsigned short length, Adapter_Struc *pAd, int status, Data_Buff_Structure **pDataBuff) { mblk_t *mp; smcg_t *smcg = (smcg_t *)pAd->sm_private; struct smcg_rx_buffer_desc *bdesc; ASSERT(mutex_owned(&smcg->rbuf_lock)); /* * Look for a free data buffer to replace the one we are about * to pass upstream */ mutex_enter(&smcg->rlist_lock); if (((bdesc = smcg->rx_freelist) != NULL) && !smcg->detaching_flag) { smcg->rx_freelist = bdesc->next; smcg->rx_bufs_outstanding++; mp = desballoc((unsigned char *) smcg->bdesc[smcg->rx_ring_index]->buf, (size_t)length, BPRI_MED, (frtn_t *)smcg->bdesc[smcg->rx_ring_index]); if (mp == NULL) { bdesc->next = smcg->rx_freelist; smcg->rx_freelist = bdesc; smcg->rx_bufs_outstanding--; smcg->norcvbuf++; /* Update Statistics */ mutex_exit(&smcg->rlist_lock); goto rcv_done; /* No resources */ } mutex_exit(&smcg->rlist_lock); smcg->bdesc[smcg->rx_ring_index] = bdesc; } else { mutex_exit(&smcg->rlist_lock); /* freelist empty, leave buffer intact, and copy out data */ mp = allocb(length, BPRI_MED); if (mp == NULL) { smcg->norcvbuf++; /* Update Statistics */ goto rcv_done; /* No resources, drop the packet */ } bcopy(smcg->bdesc[smcg->rx_ring_index]->buf, mp->b_wptr, length); } mp->b_wptr += length; if (length < ETHERMIN) smcg->short_count++; /* Update statistics */ /* * Queue received msgblks to be sent up to GLD with out holding * any mutexes */ /* Make sure that the last one points to NULL */ ASSERT(mp->b_next == 0); if (!smcg->rq_first) { /* Add first entry */ smcg->rq_first = mp; smcg->rq_last = mp; } else { smcg->rq_last->b_next = mp; /* Link mp's in the queue */ smcg->rq_last = mp; /* Move last pointer */ } rcv_done: smcg->smc_dbuf.fragment_list[0].fragment_ptr = (unsigned char *) smcg->bdesc[smcg->rx_ring_index]->physaddr; *pDataBuff = &(smcg->smc_dbuf); smcg->rx_ring_index = (smcg->rx_ring_index + 1) % pAd->num_of_rx_buffs; return (SUCCESS); }