/* internal pkt allocation, no BCM_RPC_TP_ENCAP_LEN */ static rpc_buf_t * bcm_rpc_tp_pktget(rpc_tp_info_t * rpcb, int len, bool send) { rpc_buf_t* b; #if defined(NDIS) struct lbuf *lb; if (len > LBDATASZ) return (NULL); if (send) lb = shared_lb_get(rpcb->sh, &rpcb->sh->txfree); else lb = shared_lb_get(rpcb->sh, &rpcb->sh->rxfree); if (lb != NULL) lb->len = len; b = (rpc_buf_t*)lb; #else struct sk_buff *skb; #if defined(CTFPOOL) skb = PKTGET(rpcb->osh, len, FALSE); #else if ((skb = dev_alloc_skb(len))) { skb_put(skb, len); skb->priority = 0; } #endif /* defined(CTFPOOL) */ b = (rpc_buf_t*)skb; if (b != NULL) { #ifdef CTFMAP /* Clear the ctf buf flag to allow full dma map */ PKTCLRCTF(rpcb->osh, skb); CTFMAPPTR(rpcb->osh, skb) = NULL; #endif /* CTFMAP */ RPC_TP_LOCK(rpcb); rpcb->bufalloc++; if (!rpcb->rxflowctrl && (rpcb->buf_cnt_inuse >= RPCRX_WM_HI)) { rpcb->rxflowctrl = TRUE; RPC_TP_ERR(("%s, rxflowctrl change to %d\n", __FUNCTION__, rpcb->rxflowctrl)); dbus_flowctrl_rx(rpcb->bus, TRUE); } rpcb->buf_cnt_inuse++; if (rpcb->buf_cnt_inuse > (int)rpcb->buf_cnt_max) rpcb->buf_cnt_max = rpcb->buf_cnt_inuse; RPC_TP_UNLOCK(rpcb); } else { printf("%s: buf alloc failed buf_cnt_inuse %d rxflowctrl:%d\n", __FUNCTION__, rpcb->buf_cnt_inuse, rpcb->rxflowctrl); ASSERT(0); } #endif /* NDIS */ return b; }
/* Free the driver packet. Free the tag if present */ void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFMAP /* Clear the map ptr before freeing */ PKTCLRCTF(osh, skb); CTFMAPPTR(osh, skb) = NULL; #endif /* CTFMAP */ #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else /* CTFPOOL */ { #endif /* CTFPOOL */ if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); } atomic_dec(&osh->pktalloced); skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > (PAGE_SIZE*2)) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); }