static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; #ifdef CTFPOOL_SPINLOCK unsigned long flags; #endif /* CTFPOOL_SPINLOCK */ /* Try to do fast allocate. Return null if ctfpool is not in use * or if there are no items in the ctfpool. */ if (osh->ctfpool == NULL) return NULL; CTFPOOL_LOCK(osh->ctfpool, flags); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } if (len > osh->ctfpool->obj_size) { CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); /* Get an object from ctfpool */ skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); CTFPOOL_UNLOCK(osh->ctfpool, flags); /* Init skb struct */ skb->next = skb->prev = NULL; #if defined(__ARM_ARCH_7A__) skb->data = skb->head + NET_SKB_PAD; skb->tail = skb->head + NET_SKB_PAD; #else skb->data = skb->head + 16; skb->tail = skb->head + 16; #endif /* __ARM_ARCH_7A__ */ skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); PKTSETCLINK(skb, NULL); PKTCCLRATTR(skb); PKTFAST(osh, skb) &= ~(CTFBUF | SKIPCT | CHAINED); return skb; }
static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; #ifdef CTFPOOL_SPINLOCK unsigned long flags; #endif if (osh->ctfpool == NULL) return NULL; CTFPOOL_LOCK(osh->ctfpool, flags); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; CTFPOOL_UNLOCK(osh->ctfpool, flags); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); CTFPOOL_UNLOCK(osh->ctfpool, flags); skb->next = skb->prev = NULL; #if defined(__ARM_ARCH_7A__) skb->data = skb->head + NET_SKB_PAD; skb->tail = skb->head + NET_SKB_PAD; #else skb->data = skb->head + 16; skb->tail = skb->head + 16; #endif skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); PKTSETCLINK(skb, NULL); PKTCCLRATTR(skb); return skb; }
static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; /* Try to do fast allocate. Return null if ctfpool is not in use * or if there are no items in the ctfpool. */ if (osh->ctfpool == NULL) return NULL; spin_lock_bh(&osh->ctfpool->lock); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; spin_unlock_bh(&osh->ctfpool->lock); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); /* Get an object from ctfpool */ skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); spin_unlock_bh(&osh->ctfpool->lock); /* Init skb struct */ skb->next = skb->prev = NULL; skb->data = skb->head + 16; skb->tail = skb->head + 16; skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); return skb; }
static inline struct sk_buff * osl_pktfastget(osl_t *osh, uint len) { struct sk_buff *skb; if (osh->ctfpool == NULL) return NULL; spin_lock_bh(&osh->ctfpool->lock); if (osh->ctfpool->head == NULL) { ASSERT(osh->ctfpool->curr_obj == 0); osh->ctfpool->slow_allocs++; spin_unlock_bh(&osh->ctfpool->lock); return NULL; } ASSERT(len <= osh->ctfpool->obj_size); skb = (struct sk_buff *)osh->ctfpool->head; osh->ctfpool->head = (void *)skb->next; osh->ctfpool->fast_allocs++; osh->ctfpool->curr_obj--; ASSERT(CTFPOOLHEAD(osh, skb) == (struct sock *)osh->ctfpool->head); spin_unlock_bh(&osh->ctfpool->lock); skb->next = skb->prev = NULL; skb->data = skb->head + 16; skb->tail = skb->head + 16; skb->len = 0; skb->cloned = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) skb->list = NULL; #endif atomic_set(&skb->users, 1); return skb; }