static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; #ifdef CTFPOOL_SPINLOCK unsigned long flags; #endif /* CTFPOOL_SPINLOCK */ /* Try to do fast allocate. Return null if ctfpool is not in use * or if there are no items in the ctfpool. */ if (osh->ctfpool == NULL) return NULL; CTFPOOL_LOCK(osh->ctfpool, flags); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } if (len > osh->ctfpool->obj_size) { CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); /* Get an object from ctfpool */ skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); CTFPOOL_UNLOCK(osh->ctfpool, flags); /* Init skb struct */ skb->next = skb->prev = NULL; #if defined(__ARM_ARCH_7A__) skb->data = skb->head + NET_SKB_PAD; skb->tail = skb->head + NET_SKB_PAD; #else skb->data = skb->head + 16; skb->tail = skb->head + 16; #endif /* __ARM_ARCH_7A__ */ skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); PKTSETCLINK(skb, NULL); PKTCCLRATTR(skb); PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED); return skb; }
static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; #ifdef CTFPOOL_SPINLOCK unsigned long flags; #endif if (osh->ctfpool == NULL) return NULL; CTFPOOL_LOCK(osh->ctfpool, flags); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); CTFPOOL_UNLOCK(osh->ctfpool, flags); skb->next = skb->prev = NULL; #if defined(__ARM_ARCH_7A__) skb->data = skb->head + NET_SKB_PAD; skb->tail = skb->head + NET_SKB_PAD; #else skb->data = skb->head + 16; skb->tail = skb->head + 16; #endif skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); PKTSETCLINK(skb, NULL); PKTCCLRATTR(skb); return skb; }
/* Clone a packet. * The pkttag contents are NOT cloned. */ void * osl_pktdup(osl_t *osh, void *skb) { void * p; ASSERT(!PKTISCHAINED(skb)); /* clear the CTFBUF flag if set and map the rest of the buffer * before cloning. */ PKTCTFMAP(osh, skb); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) #else if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) #endif return NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { ctfpool_t *ctfpool; /* if the buffer allocated from ctfpool is cloned then * we can't be sure when it will be freed. since there * is a chance that we will be losing a buffer * from our pool, we increment the refill count for the * object to be alloced later. */ ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); ASSERT(ctfpool != NULL); PKTCLRFAST(osh, p); PKTCLRFAST(osh, skb); ctfpool->refills++; } #endif /* CTFPOOL */ /* Clear PKTC context */ PKTSETCLINK(p, NULL); PKTCCLRFLAGS(p); PKTCSETCNT(p, 1); PKTCSETLEN(p, PKTLEN(osh, skb)); /* skb_clone copies skb->cb.. we don't want that */ if (osh->pub.pkttag) OSL_PKTTAG_CLEAR(p); /* Increment the packet counter */ atomic_inc(&osh->pktalloced); return (p); }
void * osl_pktdup(osl_t *osh, void *skb) { void * p; ASSERT(!PKTISCHAINED(skb)); PKTCTFMAP(osh, skb); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) if ((p = pskb_copy((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) #else if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) #endif return NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { ctfpool_t *ctfpool; ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); ASSERT(ctfpool != NULL); PKTCLRFAST(osh, p); PKTCLRFAST(osh, skb); ctfpool->refills++; } #endif PKTSETCLINK(p, NULL); PKTCCLRFLAGS(p); PKTCSETCNT(p, 1); PKTCSETLEN(p, PKTLEN(osh, skb)); if (osh->pub.pkttag) OSL_PKTTAG_CLEAR(p); atomic_inc(&osh->pktalloced); return (p); }
void * osl_pktdup(osl_t *osh, void *skb) { void * p; ASSERT(!PKTISCHAINED(skb)); PKTCTFMAP(osh, skb); if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) return NULL; #ifdef CTFPOOL if (PKTISFAST(osh, skb)) { ctfpool_t *ctfpool; ctfpool = (ctfpool_t *)CTFPOOLPTR(osh, skb); ASSERT(ctfpool != NULL); PKTCLRFAST(osh, p); PKTCLRFAST(osh, skb); ctfpool->refills++; } #endif PKTSETCLINK(p, NULL); PKTCCLRATTR(p); if (osh->pub.pkttag) OSL_PKTTAG_CLEAR(p); atomic_inc(&osh->pktalloced); return (p); }