void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; unsigned long flags; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } spin_lock_irqsave(&osh->pktalloc_lock, flags); osh->pub.pktalloced--; spin_unlock_irqrestore(&osh->pktalloc_lock, flags); skb = nskb; } } #ifdef CONFIG_DHD_USE_STATIC_BUF #ifdef CUSTOMER_HW_SAMSUNG void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > DHD_SKB_4PAGE_BUFSIZE) { printk("osl_pktget_static: Do we really need this big skb??" " len=%d\n", len); return osl_pktget(osh, len); }
/* Free the driver packet. Free the tag if present */ void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; if (osh == NULL) { printk("%s: osh == NULL \n", __FUNCTION__); return; } skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { if (atomic_read(&skb->users) == 1) smp_rmb(); else if (!atomic_dec_and_test(&skb->users)) goto next_skb; osl_pktfastfree(osh, skb); } else #endif { if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); } #ifdef CTFPOOL next_skb: #endif atomic_dec(&osh->pktalloced); skb = nskb; } }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } osh->pub.pktalloced--; skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void * osl_pktget_static(osl_t *osh, uint len) { int i; struct sk_buff *skb; if (len > (PAGE_SIZE * 2)) { printf("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; if (osh == NULL) { printk("%s: osh == NULL \n", __FUNCTION__); return; } skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { if (atomic_read(&skb->users) == 1) smp_rmb(); else if (!atomic_dec_and_test(&skb->users)) goto next_skb; osl_pktfastfree(osh, skb); } else #endif { if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } #ifdef CTFPOOL next_skb: #endif atomic_dec(&osh->pktalloced); skb = nskb; } }
/* Free the driver packet. Free the tag if present */ void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) osl_pktfastfree(osh, skb); else { #else /* CTFPOOL */ { #endif /* CTFPOOL */ if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); } osh->pub.pktalloced--; skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > (PAGE_SIZE*2)) { printk("Do we really need this big skb??\n"); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("all static pkt in use!\n"); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } osh->pub.pktalloced--; skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void * osl_pktget_static(osl_t *osh, uint len) { int i; struct sk_buff *skb; /*static buf is 32k, then must be set (PAGE_SIZE * 8 = 32k) */ if (len > (PAGE_SIZE * 8)) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } } for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; skb = bcm_static_skb->skb_32k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } osh->pub.pktalloced--; skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > (PAGE_SIZE*2)) { printk("Do we really need this big skb??\n"); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < MAX_STATIC_PKT_NUM; i++) { if (bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] == 0) break; } if (i != MAX_STATIC_PKT_NUM) { bcm_static_skb->pkt_use[i+MAX_STATIC_PKT_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("all static pkt in use!\n"); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; unsigned long flags; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } spin_lock_irqsave(&osh->pktalloc_lock, flags); osh->pub.pktalloced--; spin_unlock_irqrestore(&osh->pktalloc_lock, flags); skb = nskb; } } #ifdef CONFIG_DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > DHD_SKB_MAX_BUFSIZE) { printk("osl_pktget_static: Do we really need this big skb??" " len=%d\n", len); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= DHD_SKB_1PAGE_BUFSIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } } if (len <= DHD_SKB_2PAGE_BUFSIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1; skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } } #if defined(ENHANCED_STATIC_BUF) if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) { bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1; skb = bcm_static_skb->skb_16k; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } #endif up(&bcm_static_skb->osl_pkt_sem); printk("osl_pktget_static: all static pkt in use!\n"); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; unsigned long flags; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } spin_lock_irqsave(&osh->pktalloc_lock, flags); osh->pub.pktalloced--; spin_unlock_irqrestore(&osh->pktalloc_lock, flags); skb = nskb; } } #ifdef CONFIG_DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; #ifdef CONFIG_COMMON_PATCH if (len > (PAGE_SIZE*3)) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } #else if (len > (PAGE_SIZE*2)) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } #endif down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); #ifdef CONFIG_COMMON_PATCH skb = bcm_static_skb->skb_12k[i]; #else skb = bcm_static_skb->skb_8k[i]; #endif skb->tail = skb->data + len; skb->len = len; return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); }
void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; if (osh == NULL) { printk("%s: osh == NULL \n", __FUNCTION__); return; } skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else { #endif if (skb->destructor) dev_kfree_skb_any(skb); else dev_kfree_skb(skb); } atomic_dec(&osh->pktalloced); skb = nskb; } } #ifdef CONFIG_DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > DHD_SKB_MAX_BUFSIZE) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= DHD_SKB_1PAGE_BUFSIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } } if (len <= DHD_SKB_2PAGE_BUFSIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 1; skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } } #if defined(ENHANCED_STATIC_BUF) if (bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] == 0) { bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM * 2] = 1; skb = bcm_static_skb->skb_16k; skb->tail = skb->data + len; skb->len = len; up(&bcm_static_skb->osl_pkt_sem); return skb; } #endif up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); }
/* Free the driver packet. Free the tag if present */ void BCMFASTPATH osl_pktfree(osl_t *osh, void *p, bool send) { struct sk_buff *skb, *nskb; skb = (struct sk_buff*) p; if (send && osh->pub.tx_fn) osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); /* perversion: we use skb->next to chain multi-skb packets */ while (skb) { nskb = skb->next; skb->next = NULL; #ifdef CTFMAP /* Clear the map ptr before freeing */ PKTCLRCTF(osh, skb); CTFMAPPTR(osh, skb) = NULL; #endif /* CTFMAP */ #ifdef CTFPOOL if ((PKTISFAST(osh, skb)) && (atomic_read(&skb->users) == 1)) osl_pktfastfree(osh, skb); else { #else /* CTFPOOL */ { #endif /* CTFPOOL */ if (skb->destructor) /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if * destructor exists */ dev_kfree_skb_any(skb); else /* can free immediately (even in_irq()) if destructor * does not exist */ dev_kfree_skb(skb); } atomic_dec(&osh->pktalloced); skb = nskb; } } #ifdef DHD_USE_STATIC_BUF void* osl_pktget_static(osl_t *osh, uint len) { int i = 0; struct sk_buff *skb; if (len > (PAGE_SIZE*2)) { printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); return osl_pktget(osh, len); } down(&bcm_static_skb->osl_pkt_sem); if (len <= PAGE_SIZE) { for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_4k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } } for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) break; } if (i != STATIC_PKT_MAX_NUM) { bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; up(&bcm_static_skb->osl_pkt_sem); skb = bcm_static_skb->skb_8k[i]; skb->tail = skb->data + len; skb->len = len; return skb; } up(&bcm_static_skb->osl_pkt_sem); printk("%s: all static pkt in use!\n", __FUNCTION__); return osl_pktget(osh, len); }