unsigned char *skb_push(struct sk_buff *skb, int len) { IS_SKB(skb); skb->data-=len; skb->len+=len; IS_SKB(skb); if(skb->data<skb->head) panic("skpush:under: %p:%d", __builtin_return_address(0),len); return skb->data; }
struct sk_buff *skb_copy(struct sk_buff *skb, int priority) { struct sk_buff *n; unsigned long offset; /* * Allocate the copy buffer */ IS_SKB(skb); n=alloc_skb(skb->truesize-sizeof(struct sk_buff),priority); if(n==NULL) return NULL; /* * Shift between the two data areas in bytes */ offset=n->head-skb->head; /* Set the data pointer */ skb_reserve(n,skb->data-skb->head); /* Set the tail pointer and length */ skb_put(n,skb->len); /* Copy the bytes */ memcpy(n->head,skb->head,skb->end-skb->head); n->link3=NULL; n->list=NULL; n->sk=NULL; n->when=skb->when; n->dev=skb->dev; n->h.raw=skb->h.raw+offset; n->mac.raw=skb->mac.raw+offset; n->ip_hdr=(struct iphdr *)(((char *)skb->ip_hdr)+offset); n->saddr=skb->saddr; n->daddr=skb->daddr; n->raddr=skb->raddr; n->seq=skb->seq; n->end_seq=skb->end_seq; n->ack_seq=skb->ack_seq; n->acked=skb->acked; memcpy(n->proto_priv, skb->proto_priv, sizeof(skb->proto_priv)); n->used=skb->used; n->free=1; n->arp=skb->arp; n->tries=0; n->lock=0; n->users=0; n->pkt_type=skb->pkt_type; n->stamp=skb->stamp; IS_SKB(n); return n; }
void skb_reserve(struct sk_buff *skb, int len) { IS_SKB(skb); skb->data+=len; skb->tail+=len; if(skb->tail>skb->end) panic("sk_res: over"); if(skb->data<skb->head) panic("sk_res: under"); IS_SKB(skb); }
unsigned char *skb_put(struct sk_buff *skb, int len) { unsigned char *tmp=skb->tail; IS_SKB(skb); skb->tail+=len; skb->len+=len; IS_SKB(skb); if(skb->tail>skb->end) panic("skput:over: %p:%d", __builtin_return_address(0),len); return tmp; }
struct sk_buff *skb_dequeue(struct sk_buff_head *list_) { unsigned long flags; struct sk_buff *result; struct sk_buff *list = (struct sk_buff *)list_; save_flags(flags); cli(); IS_SKB_HEAD(list); result = list->next; if (result == list) { restore_flags(flags); return NULL; } result->next->prev = list; list->next = result->next; result->next = NULL; result->prev = NULL; list_->qlen--; result->list = NULL; restore_flags(flags); IS_SKB(result); return result; }
struct sk_buff *skb_clone(struct sk_buff *skb, int priority) { struct sk_buff *n; IS_SKB(skb); n = kmalloc(sizeof(*n), priority); if (!n) return NULL; memcpy(n, skb, sizeof(*n)); n->count = 1; if (skb->data_skb) skb = skb->data_skb; atomic_inc(&skb->count); atomic_inc(&net_allocs); atomic_inc(&net_skbcount); n->data_skb = skb; n->next = n->prev = n->link3 = NULL; n->list = NULL; n->sk = NULL; n->free = 1; n->tries = 0; n->lock = 0; n->users = 0; return n; }
unsigned char * skb_pull(struct sk_buff *skb, int len) { IS_SKB(skb); if(len>skb->len) return 0; skb->data+=len; skb->len-=len; return skb->data; }
void skb_trim(struct sk_buff *skb, int len) { IS_SKB(skb); if(skb->len>len) { skb->len=len; skb->tail=skb->data+len; } }
void sock_rfree(struct sock *sk, struct sk_buff *skb) { int s=skb->truesize; #if CONFIG_SKB_CHECK IS_SKB(skb); #endif kfree_skbmem(skb); if (sk) { atomic_sub(s, &sk->rmem_alloc); } }
/* * Insert a packet before another one in a list. */ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) { unsigned long flags; IS_SKB(old); IS_SKB(newsk); if(!old->next || !old->prev) printk("insert before unlisted item!\n"); if(newsk->next || newsk->prev) printk("inserted item is already on a list.\n"); save_flags(flags); cli(); newsk->next = old; newsk->prev = old->prev; old->prev = newsk; newsk->prev->next = newsk; restore_flags(flags); }
void kfree_skb(struct sk_buff *skb, int rw) { if (skb == NULL) { printk(KERN_CRIT "kfree_skb: skb = NULL (from %p)\n", __builtin_return_address(0)); return; } #if CONFIG_SKB_CHECK IS_SKB(skb); #endif if (skb->lock) { skb->free = 3; /* Free when unlocked */ net_free_locked++; return; } if (skb->free == 2) printk(KERN_WARNING "Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n", __builtin_return_address(0)); if (skb->list) printk(KERN_WARNING "Warning: kfree_skb passed an skb still on a list (from %p).\n", __builtin_return_address(0)); if(skb->destructor) skb->destructor(skb); if (skb->sk) { struct sock * sk = skb->sk; if(sk->prot!=NULL) { if (rw) sock_rfree(sk, skb); else sock_wfree(sk, skb); } else { if (rw) atomic_sub(skb->truesize, &sk->rmem_alloc); else { if(!sk->dead) sk->write_space(sk); atomic_sub(skb->truesize, &sk->wmem_alloc); } kfree_skbmem(skb); } } else kfree_skbmem(skb); }
void kfree_skb(struct sk_buff *skb, int rw) { if (skb == NULL) { printk("kfree_skb: skb = NULL (from %p)\n", __builtin_return_address(0)); return; } #ifdef CONFIG_SKB_CHECK IS_SKB(skb); #endif if (skb->lock) { skb->free = 3; /* Free when unlocked */ net_free_locked++; return; } if (skb->free == 2) printk("Warning: kfree_skb passed an skb that nobody set the free flag on! (from %p)\n", __builtin_return_address(0)); if (skb->next) printk("Warning: kfree_skb passed an skb still on a list (from %p).\n", __builtin_return_address(0)); if (skb->sk) { if(skb->sk->prot!=NULL) { if (rw) skb->sk->prot->rfree(skb->sk, skb, skb->mem_len); else skb->sk->prot->wfree(skb->sk, skb, skb->mem_len); } else { unsigned long flags; /* Non INET - default wmalloc/rmalloc handler */ save_flags(flags); cli(); if (rw) skb->sk->rmem_alloc-=skb->mem_len; else skb->sk->wmem_alloc-=skb->mem_len; restore_flags(flags); if(!skb->sk->dead) skb->sk->write_space(skb->sk); kfree_skbmem(skb,skb->mem_len); } } else kfree_skbmem(skb, skb->mem_len); }
void __skb_insert(struct sk_buff *newsk, struct sk_buff * prev, struct sk_buff *next, struct sk_buff_head * list) { IS_SKB(prev); IS_SKB(newsk); IS_SKB(next); if(!prev->next || !prev->prev) printk("insert after unlisted item!\n"); if(!next->next || !next->prev) printk("insert before unlisted item!\n"); if(newsk->next || newsk->prev) printk("inserted item is already on a list.\n"); newsk->next = next; newsk->prev = prev; next->prev = newsk; prev->next = newsk; newsk->list = list; list->qlen++; }
void sock_wfree(struct sock *sk, struct sk_buff *skb) { int s=skb->truesize; #if CONFIG_SKB_CHECK IS_SKB(skb); #endif kfree_skbmem(skb); if (sk) { /* In case it might be waiting for more memory. */ sk->write_space(sk); atomic_sub(s, &sk->wmem_alloc); } }
void kfree_skbmem(struct sk_buff *skb,unsigned size) { unsigned long flags; #ifdef CONFIG_SLAVE_BALANCING save_flags(flags); cli(); if(skb->in_dev_queue && skb->dev!=NULL) skb->dev->pkt_queue--; restore_flags(flags); #endif #ifdef CONFIG_SKB_CHECK IS_SKB(skb); if(size!=skb->truesize) printk("kfree_skbmem: size mismatch.\n"); if(skb->magic_debug_cookie == SK_GOOD_SKB) { save_flags(flags); cli(); IS_SKB(skb); skb->magic_debug_cookie = SK_FREED_SKB; kfree_s((void *)skb,size); net_skbcount--; net_memory -= size; restore_flags(flags); } else printk("kfree_skbmem: bad magic cookie\n"); #else save_flags(flags); cli(); kfree_s((void *)skb,size); net_skbcount--; net_memory -= size; restore_flags(flags); #endif }
/* * Place a packet after a given packet in a list. */ void skb_append(struct sk_buff *old, struct sk_buff *newsk) { unsigned long flags; IS_SKB(old); IS_SKB(newsk); if(!old->next || !old->prev) printk("append before unlisted item!\n"); if(newsk->next || newsk->prev) printk("append item is already on a list.\n"); save_flags(flags); cli(); newsk->prev = old; newsk->next = old->next; newsk->next->prev = newsk; old->next = newsk; newsk->list = old->list; newsk->list->qlen++; restore_flags(flags); }
void netif_rx(struct sk_buff *skb) { static int dropping = 0; /* * Any received buffers are un-owned and should be discarded * when freed. These will be updated later as the frames get * owners. */ skb->sk = NULL; skb->free = 1; if(skb->stamp.tv_sec==0) skb->stamp = xtime; /* * Check that we aren't overdoing things. */ if (!backlog_size) dropping = 0; else if (backlog_size > 300) dropping = 1; if (dropping) { kfree_skb(skb, FREE_READ); return; } /* * Add it to the "backlog" queue. */ #if CONFIG_SKB_CHECK IS_SKB(skb); #endif skb_queue_tail(&backlog,skb); backlog_size++; /* * If any packet arrived, mark it for processing after the * hardware interrupt returns. */ mark_bh(NET_BH); return; }
static void ip_free(struct ipq *qp) { struct ipfrag *fp; struct ipfrag *xp; /* * Stop the timer for this entry. */ del_timer(&qp->timer); /* Remove this entry from the "incomplete datagrams" queue. */ cli(); if (qp->prev == NULL) { ipqueue = qp->next; if (ipqueue != NULL) ipqueue->prev = NULL; } else { qp->prev->next = qp->next; if (qp->next != NULL) qp->next->prev = qp->prev; } /* Release all fragment data. */ fp = qp->fragments; while (fp != NULL) { xp = fp->next; IS_SKB(fp->skb); frag_kfree_skb(fp->skb,FREE_READ); frag_kfree_s(fp, sizeof(struct ipfrag)); fp = xp; } /* Release the IP header. */ frag_kfree_s(qp->iph, 64 + 8); /* Finally, release the queue descriptor itself. */ frag_kfree_s(qp, sizeof(struct ipq)); sti(); }
void __skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk) { struct sk_buff *list = (struct sk_buff *)list_; if (newsk->next || newsk->prev) printk("Suspicious queue tail: sk_buff on list!\n"); IS_SKB(newsk); IS_SKB_HEAD(list); newsk->next = list; newsk->prev = list->prev; newsk->next->prev = newsk; newsk->prev->next = newsk; newsk->list = list_; list_->qlen++; }
void __skb_unlink(struct sk_buff *skb) { IS_SKB(skb); if(skb->list) { skb->list->qlen--; skb->next->prev = skb->prev; skb->prev->next = skb->next; skb->next = NULL; skb->prev = NULL; skb->list = NULL; } #ifdef PARANOID_BUGHUNT_MODE /* This is legal but we sometimes want to watch it */ else printk("skb_unlink: not a linked element\n"); #endif }
static void arp_send_q(struct arp_table *entry, unsigned char *hw_dest) { struct sk_buff *skb; unsigned long flags; /* * Empty the entire queue, building its data up ready to send */ if(!(entry->flags&ATF_COM)) { printk("arp_send_q: incomplete entry for %s\n", in_ntoa(entry->ip)); return; } save_flags(flags); cli(); while((skb = skb_dequeue(&entry->skb)) != NULL) { IS_SKB(skb); skb_device_lock(skb); restore_flags(flags); if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb)) { skb->arp = 1; if(skb->sk==NULL) dev_queue_xmit(skb, skb->dev, 0); else dev_queue_xmit(skb,skb->dev,skb->sk->priority); } else { /* This routine is only ever called when 'entry' is complete. Thus this can't fail. */ printk("arp_send_q: The impossible occurred. Please notify Alan.\n"); printk("arp_send_q: active entity %s\n",in_ntoa(entry->ip)); printk("arp_send_q: failed to find %s\n",in_ntoa(skb->raddr)); } } restore_flags(flags); }
/* * Insert an sk_buff at the end of a list. */ void skb_queue_tail(struct sk_buff_head *list_, struct sk_buff *newsk) { unsigned long flags; struct sk_buff *list = (struct sk_buff *)list_; save_flags(flags); cli(); if (newsk->next || newsk->prev) printk("Suspicious queue tail: sk_buff on list!\n"); IS_SKB(newsk); IS_SKB_HEAD(list); newsk->next = list; newsk->prev = list->prev; newsk->next->prev = newsk; newsk->prev->next = newsk; restore_flags(flags); }
/* * Remove an sk_buff from its list. Works even without knowing the list it * is sitting on, which can be handy at times. It also means that THE LIST * MUST EXIST when you unlink. Thus a list must have its contents unlinked * _FIRST_. */ void skb_unlink(struct sk_buff *skb) { unsigned long flags; save_flags(flags); cli(); IS_SKB(skb); if(skb->prev && skb->next) { skb->next->prev = skb->prev; skb->prev->next = skb->next; skb->next = NULL; skb->prev = NULL; } #ifdef PARANOID_BUGHUNT_MODE /* This is legal but we sometimes want to watch it */ else printk("skb_unlink: not a linked element\n"); #endif restore_flags(flags); }
struct sk_buff *__skb_dequeue(struct sk_buff_head *list_) { struct sk_buff *result; struct sk_buff *list = (struct sk_buff *)list_; IS_SKB_HEAD(list); result = list->next; if (result == list) { return NULL; } result->next->prev = list; list->next = result->next; result->next = NULL; result->prev = NULL; list_->qlen--; result->list = NULL; IS_SKB(result); return result; }
void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) { unsigned long flags; int nitcount; struct packet_type *ptype; int where = 0; /* used to say if the packet should go */ /* at the front or the back of the */ /* queue - front is a retransmit try */ if (dev == NULL) { printk("dev.c: dev_queue_xmit: dev = NULL\n"); return; } if(pri>=0 && !skb_device_locked(skb)) skb_device_lock(skb); /* Shove a lock on the frame */ #ifdef CONFIG_SLAVE_BALANCING save_flags(flags); cli(); if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue && (dev->slave->flags & IFF_UP)) dev=dev->slave; restore_flags(flags); #endif #ifdef CONFIG_SKB_CHECK IS_SKB(skb); #endif skb->dev = dev; /* * This just eliminates some race conditions, but not all... */ if (skb->next != NULL) { /* * Make sure we haven't missed an interrupt. */ printk("dev_queue_xmit: worked around a missed interrupt\n"); start_bh_atomic(); dev->hard_start_xmit(NULL, dev); end_bh_atomic(); return; } /* * Negative priority is used to flag a frame that is being pulled from the * queue front as a retransmit attempt. It therefore goes back on the queue * start on a failure. */ if (pri < 0) { pri = -pri-1; where = 1; } if (pri >= DEV_NUMBUFFS) { printk("bad priority in dev_queue_xmit.\n"); pri = 1; } /* * If the address has not been resolved. Call the device header rebuilder. * This can cover all protocols and technically not just ARP either. */ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { return; } save_flags(flags); cli(); if (!where) { #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=1; #endif skb_queue_tail(dev->buffs + pri,skb); skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ skb = skb_dequeue(dev->buffs + pri); skb_device_lock(skb); /* New buffer needs locking down */ #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=0; #endif } restore_flags(flags); /* copy outgoing packets to any sniffer packet handlers */ if(!where) { for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next) { /* Never send packets back to the socket * they originated from - MvS ([email protected]) */ if (ptype->type == htons(ETH_P_ALL) && (ptype->dev == dev || !ptype->dev) && ((struct sock *)ptype->data != skb->sk)) { struct sk_buff *skb2; if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) break; /* * The protocol knows this has (for other paths) been taken off * and adds it back. */ skb2->len-=skb->dev->hard_header_len; ptype->func(skb2, skb->dev, ptype); nitcount--; } } } start_bh_atomic(); if (dev->hard_start_xmit(skb, dev) == 0) { end_bh_atomic(); /* * Packet is now solely the responsibility of the driver */ return; } end_bh_atomic(); /* * Transmission failed, put skb back into a list. Once on the list it's safe and * no longer device locked (it can be freed safely from the device queue) */ cli(); #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=1; dev->pkt_queue++; #endif skb_device_unlock(skb); skb_queue_head(dev->buffs + pri,skb); restore_flags(flags); }
void destroy_sock(struct sock *sk) { struct sk_buff *skb; sk->inuse = 1; /* just to be safe.对sock结构上锁 */ /* 首先检查dead标志,只有dead=1时才表示sock结构等待释放 */ /* In case it's sleeping somewhere. */ if (!sk->dead) sk->write_space(sk); // sock如果出现SO_NOSPACE标志,需要唤醒异步等待sock的进程 remove_sock(sk); /* 移除sock连接使用的定时器 */ /* Now we can no longer get new packets. */ delete_timer(sk); // 移除通用定时器 /* Nor send them */ del_timer(&sk->retransmit_timer); // 移除重传定时器 /* 释放sock::partial指向的数据写缓冲 */ while ((skb = tcp_dequeue_partial(sk)) != NULL) { IS_SKB(skb); kfree_skb(skb, FREE_WRITE); } /* Cleanup up the write buffer. */ while((skb = skb_dequeue(&sk->write_queue)) != NULL) { IS_SKB(skb); kfree_skb(skb, FREE_WRITE); } /* * Don't discard received data until the user side kills its * half of the socket. */ if (sk->dead) { while((skb=skb_dequeue(&sk->receive_queue))!=NULL) { /* * This will take care of closing sockets that were * listening and didn't accept everything. */ if (skb->sk != NULL && skb->sk != sk) { /* 如果当前sock对应一个listen socket就需要关闭尚未完成创建的socket连接 * 这其中的sock可能是establish或者syn_recv状态 */ IS_SKB(skb); skb->sk->dead = 1; skb->sk->prot->close(skb->sk, 0); } IS_SKB(skb); kfree_skb(skb, FREE_READ); } } /* Now we need to clean up the send head. 清理重发队列 */ cli(); for(skb = sk->send_head; skb != NULL; ) { struct sk_buff *skb2; /* * We need to remove skb from the transmit queue, * or maybe the arp queue. */ if (skb->next && skb->prev) { /* printk("destroy_sock: unlinked skb\n");*/ IS_SKB(skb); skb_unlink(skb); } skb->dev = NULL; skb2 = skb->link3; // link3在TCP中构建重发队列 kfree_skb(skb, FREE_WRITE); skb = skb2; } sk->send_head = NULL; sti(); /* And now the backlog. */ while((skb=skb_dequeue(&sk->back_log))!=NULL) { /* this should never happen. */ /* printk("cleaning back_log\n");*/ kfree_skb(skb, FREE_READ); } /* Now if it has a half accepted/ closed socket. */ if (sk->pair) { sk->pair->dead = 1; sk->pair->prot->close(sk->pair, 0); sk->pair = NULL; } /* * Now if everything is gone we can free the socket * structure, otherwise we need to keep it around until * everything is gone. */ if (sk->dead && sk->rmem_alloc == 0 && sk->wmem_alloc == 0) { kfree_s((void *)sk,sizeof(*sk)); } else { /* this should never happen. */ /* actually it can if an ack has just been sent. */ sk->destroy = 1; sk->ack_backlog = 0; sk->inuse = 0; reset_timer(sk, TIME_DESTROY, SOCK_DESTROY_TIME); } }
/* Receive interrupt handler for the A channel */ static void a_rxint(struct device *dev, struct pi_local *lp) { unsigned long flags; int cmd; int bytecount; char rse; struct sk_buff *skb; int sksize, pkt_len; struct mbuf *cur_buf; unsigned char *cfix; save_flags(flags); cli(); /* disable interrupts */ cmd = lp->base + CTL; rse = rdscc(lp->cardbase, cmd, R1); /* Get special condition bits from R1 */ if (rse & Rx_OVR) lp->rstate = RXERROR; if (rse & END_FR) { /* If end of frame */ /* figure length of frame from 8237 */ clear_dma_ff(lp->dmachan); bytecount = lp->bufsiz - get_dma_residue(lp->dmachan); if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (bytecount < 10)) { if ((bytecount >= 10) && (rse & CRC_ERR)) { lp->stats.rx_crc_errors++; } if (lp->rstate == RXERROR) { lp->stats.rx_errors++; lp->stats.rx_over_errors++; } /* Reset buffer pointers */ lp->rstate = ACTIVE; setup_rx_dma(lp); } else { /* Here we have a valid frame */ /* Toss 2 crc bytes , add one for KISS */ pkt_len = lp->rcvbuf->cnt = bytecount - 2 + 1; /* Get buffer for next frame */ cur_buf = lp->rcvbuf; switchbuffers(lp); setup_rx_dma(lp); /* Malloc up new buffer. */ sksize = pkt_len; skb = dev_alloc_skb(sksize); if (skb == NULL) { printk(KERN_ERR "PI: %s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; restore_flags(flags); return; } skb->dev = dev; /* KISS kludge - prefix with a 0 byte */ cfix=skb_put(skb,pkt_len); *cfix++=0; /* 'skb->data' points to the start of sk_buff data area. */ memcpy(cfix, (char *) cur_buf->data, pkt_len - 1); skb->protocol=htons(ETH_P_AX25); skb->mac.raw=skb->data; IS_SKB(skb); netif_rx(skb); lp->stats.rx_packets++; } /* end good frame */ } /* end EOF check */ wrtscc(lp->cardbase, lp->base + CTL, R0, ERR_RES); /* error reset */ restore_flags(flags); }
static void b_rxint(struct device *dev, struct pi_local *lp) { unsigned long flags; int cmd; char rse; struct sk_buff *skb; int sksize; int pkt_len; unsigned char *cfix; save_flags(flags); cli(); /* disable interrupts */ cmd = CTL + lp->base; rse = rdscc(lp->cardbase, cmd, R1); /* get status byte from R1 */ if ((rdscc(lp->cardbase, cmd, R0)) & Rx_CH_AV) { /* there is a char to be stored * read special condition bits before reading the data char */ if (rse & Rx_OVR) { /* Rx overrun - toss buffer */ /* reset buffer pointers */ lp->rcp = lp->rcvbuf->data; lp->rcvbuf->cnt = 0; lp->rstate = RXERROR; /* set error flag */ lp->stats.rx_errors++; lp->stats.rx_over_errors++; } else if (lp->rcvbuf->cnt >= lp->bufsiz) { /* Too large -- toss buffer */ /* reset buffer pointers */ lp->rcp = lp->rcvbuf->data; lp->rcvbuf->cnt = 0; lp->rstate = TOOBIG;/* when set, chars are not stored */ } /* ok, we can store the received character now */ if (lp->rstate == ACTIVE) { /* If no errors... */ *lp->rcp++ = rdscc(lp->cardbase, cmd, R8); /* char to rcv buff */ lp->rcvbuf->cnt++; /* bump count */ } else { /* got to empty FIFO */ (void) rdscc(lp->cardbase, cmd, R8); wrtscc(lp->cardbase, cmd, R0, ERR_RES); /* reset err latch */ lp->rstate = ACTIVE; } } if (rse & END_FR) { /* END OF FRAME -- Make sure Rx was active */ if (lp->rcvbuf->cnt > 0) { if ((rse & CRC_ERR) || (lp->rstate > ACTIVE) || (lp->rcvbuf->cnt < 10)) { if ((lp->rcvbuf->cnt >= 10) && (rse & CRC_ERR)) { lp->stats.rx_crc_errors++; } lp->rcp = lp->rcvbuf->data; lp->rcvbuf->cnt = 0; } else { /* Here we have a valid frame */ pkt_len = lp->rcvbuf->cnt -= 2; /* Toss 2 crc bytes */ pkt_len += 1; /* Make room for KISS control byte */ /* Malloc up new buffer. */ sksize = pkt_len; skb = dev_alloc_skb(sksize); if (skb == NULL) { printk(KERN_ERR "PI: %s: Memory squeeze, dropping packet.\n", dev->name); lp->stats.rx_dropped++; restore_flags(flags); return; } skb->dev = dev; /* KISS kludge - prefix with a 0 byte */ cfix=skb_put(skb,pkt_len); *cfix++=0; /* 'skb->data' points to the start of sk_buff data area. */ memcpy(cfix, lp->rcvbuf->data, pkt_len - 1); skb->protocol=ntohs(ETH_P_AX25); skb->mac.raw=skb->data; IS_SKB(skb); netif_rx(skb); lp->stats.rx_packets++; /* packet queued - initialize buffer for next frame */ lp->rcp = lp->rcvbuf->data; lp->rcvbuf->cnt = 0; } /* end good frame queued */ } /* end check for active receive upon EOF */ lp->rstate = ACTIVE; /* and clear error status */ } /* end EOF check */ restore_flags(flags); }
int skb_headroom(struct sk_buff *skb) { IS_SKB(skb); return skb->data-skb->head; }
int skb_tailroom(struct sk_buff *skb) { IS_SKB(skb); return skb->end-skb->tail; }