/* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). */ static int loopback_xmit(struct sk_buff *skb, struct device *dev) { struct enet_statistics *stats = (struct enet_statistics *)dev->priv; int unlock=1; if (skb == NULL || dev == NULL) return(0); /* * Optimise so buffers with skb->free=1 are not copied but * instead are lobbed from tx queue to rx queue */ if(skb->free==0) { struct sk_buff *skb2=skb; skb=skb_clone(skb, GFP_ATOMIC); /* Clone the buffer */ dev_kfree_skb(skb2, FREE_WRITE); if(skb==NULL) return 0; unlock=0; } else if(skb->sk) { /* * Packet sent but looped back around. Cease to charge * the socket for the frame. */ atomic_sub(skb->truesize, &skb->sk->wmem_alloc); skb->sk->write_space(skb->sk); } skb->protocol=eth_type_trans(skb,dev); skb->dev=dev; #ifndef LOOPBACK_MUST_CHECKSUM skb->ip_summed = CHECKSUM_UNNECESSARY; #endif netif_rx(skb); if(unlock) skb_device_unlock(skb); stats->rx_packets++; stats->tx_packets++; return(0); }
void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) { unsigned long flags; int nitcount; struct packet_type *ptype; int where = 0; /* used to say if the packet should go */ /* at the front or the back of the */ /* queue - front is a retransmit try */ if (dev == NULL) { printk("dev.c: dev_queue_xmit: dev = NULL\n"); return; } if(pri>=0 && !skb_device_locked(skb)) skb_device_lock(skb); /* Shove a lock on the frame */ #ifdef CONFIG_SLAVE_BALANCING save_flags(flags); cli(); if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue && (dev->slave->flags & IFF_UP)) dev=dev->slave; restore_flags(flags); #endif #ifdef CONFIG_SKB_CHECK IS_SKB(skb); #endif skb->dev = dev; /* * This just eliminates some race conditions, but not all... */ if (skb->next != NULL) { /* * Make sure we haven't missed an interrupt. */ printk("dev_queue_xmit: worked around a missed interrupt\n"); start_bh_atomic(); dev->hard_start_xmit(NULL, dev); end_bh_atomic(); return; } /* * Negative priority is used to flag a frame that is being pulled from the * queue front as a retransmit attempt. It therefore goes back on the queue * start on a failure. */ if (pri < 0) { pri = -pri-1; where = 1; } if (pri >= DEV_NUMBUFFS) { printk("bad priority in dev_queue_xmit.\n"); pri = 1; } /* * If the address has not been resolved. Call the device header rebuilder. * This can cover all protocols and technically not just ARP either. */ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { return; } save_flags(flags); cli(); if (!where) { #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=1; #endif skb_queue_tail(dev->buffs + pri,skb); skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ skb = skb_dequeue(dev->buffs + pri); skb_device_lock(skb); /* New buffer needs locking down */ #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=0; #endif } restore_flags(flags); /* copy outgoing packets to any sniffer packet handlers */ if(!where) { for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next) { /* Never send packets back to the socket * they originated from - MvS ([email protected]) */ if (ptype->type == htons(ETH_P_ALL) && (ptype->dev == dev || !ptype->dev) && ((struct sock *)ptype->data != skb->sk)) { struct sk_buff *skb2; if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) break; /* * The protocol knows this has (for other paths) been taken off * and adds it back. */ skb2->len-=skb->dev->hard_header_len; ptype->func(skb2, skb->dev, ptype); nitcount--; } } } start_bh_atomic(); if (dev->hard_start_xmit(skb, dev) == 0) { end_bh_atomic(); /* * Packet is now solely the responsibility of the driver */ return; } end_bh_atomic(); /* * Transmission failed, put skb back into a list. Once on the list it's safe and * no longer device locked (it can be freed safely from the device queue) */ cli(); #ifdef CONFIG_SLAVE_BALANCING skb->in_dev_queue=1; dev->pkt_queue++; #endif skb_device_unlock(skb); skb_queue_head(dev->buffs + pri,skb); restore_flags(flags); }
/* * All outgoing AX.25 I frames pass via this routine. Therefore this is * where the fragmentation of frames takes place. */ void ax25_output(ax25_cb *ax25, struct sk_buff *skb) { struct sk_buff *skbn; unsigned char *p; int frontlen, mtu, len, fragno, ka9qfrag, first = 1; long flags; /* * dl1bke 960301: We use the new PACLEN parameter as MTU of the AX.25 layer. * This will (hopefully) allow user programs to write() data * w/o having to think of the maximal amount of data we can * send with one call. It's called PACLEN to (1) avoid confusion * with (IP) MTU and (2) TAPR calls this PACLEN, too ;-) */ mtu = ax25->paclen; if ((skb->len - 1) > mtu) { if (*skb->data == AX25_P_TEXT) { skb_pull(skb, 1); /* skip PID */ ka9qfrag = 0; } else { mtu -= 2; /* Allow for fragment control info */ ka9qfrag = 1; } fragno = skb->len / mtu; if (skb->len % mtu == 0) fragno--; frontlen = skb_headroom(skb); /* Address space + CTRL */ while (skb->len > 0) { save_flags(flags); cli(); /* * do _not_ use sock_alloc_send_skb, our socket may have * sk->shutdown set... */ if ((skbn = alloc_skb(mtu + 2 + frontlen, GFP_ATOMIC)) == NULL) { restore_flags(flags); printk(KERN_DEBUG "ax25_output: alloc_skb returned NULL\n"); if (skb_device_locked(skb)) skb_device_unlock(skb); return; } skbn->sk = skb->sk; if (skbn->sk) atomic_add(skbn->truesize, &skbn->sk->wmem_alloc); restore_flags(flags); skbn->free = 1; skbn->arp = 1; len = (mtu > skb->len) ? skb->len : mtu; if (ka9qfrag == 1) { skb_reserve(skbn, frontlen + 2); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 2); *p++ = AX25_P_SEGMENT; *p = fragno--; if (first) { *p |= SEG_FIRST; first = 0; } } else { skb_reserve(skbn, frontlen + 1); memcpy(skb_put(skbn, len), skb->data, len); p = skb_push(skbn, 1); *p = AX25_P_TEXT; } skb_pull(skb, len); skb_queue_tail(&ax25->write_queue, skbn); /* Throw it on the queue */ } skb->free = 1; kfree_skb(skb, FREE_WRITE); } else { skb_queue_tail(&ax25->write_queue, skb); /* Throw it on the queue */ } if (ax25->state == AX25_STATE_3 || ax25->state == AX25_STATE_4) { if (!ax25->dama_slave) /* bke 960114: we aren't allowed to transmit */ ax25_kick(ax25); /* in DAMA mode unless we received a Poll */ } }
static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri) { unsigned long flags; struct sk_buff_head *list; int retransmission = 0; /* used to say if the packet should go */ /* at the front or the back of the */ /* queue - front is a retransmit try */ if(pri>=0 && !skb_device_locked(skb)) skb_device_lock(skb); /* Shove a lock on the frame */ #if CONFIG_SKB_CHECK IS_SKB(skb); #endif skb->dev = dev; /* * Negative priority is used to flag a frame that is being pulled from the * queue front as a retransmit attempt. It therefore goes back on the queue * start on a failure. */ if (pri < 0) { pri = -pri-1; retransmission = 1; } #ifdef CONFIG_NET_DEBUG if (pri >= DEV_NUMBUFFS) { printk(KERN_WARNING "bad priority in dev_queue_xmit.\n"); pri = 1; } #endif /* * If the address has not been resolved. Call the device header rebuilder. * This can cover all protocols and technically not just ARP either. */ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) { return; } /* * * If dev is an alias, switch to its main device. * "arp" resolution has been made with alias device, so * arp entries refer to alias, not main. * */ #ifdef CONFIG_NET_ALIAS if (net_alias_is(dev)) skb->dev = dev = net_alias_dev_tx(dev); #endif /* * If we are bridging and this is directly generated output * pass the frame via the bridge. */ #ifdef CONFIG_BRIDGE if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP) { if(br_tx_frame(skb)) return; } #endif list = dev->buffs + pri; save_flags(flags); /* if this isn't a retransmission, use the first packet instead... */ if (!retransmission) { if (skb_queue_len(list)) { /* avoid overrunning the device queue.. */ if (skb_queue_len(list) > dev->tx_queue_len) { dev_kfree_skb(skb, FREE_WRITE); return; } } /* copy outgoing packets to any sniffer packet handlers */ if (dev_nit) { struct packet_type *ptype; skb->stamp=xtime; for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) { /* Never send packets back to the socket * they originated from - MvS ([email protected]) */ if ((ptype->dev == dev || !ptype->dev) && ((struct sock *)ptype->data != skb->sk)) { struct sk_buff *skb2; if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) break; /* FIXME?: Wrong when the hard_header_len * is an upper bound. Is this even * used anywhere? */ skb2->h.raw = skb2->data + dev->hard_header_len; /* On soft header devices we * yank the header before mac.raw * back off. This is set by * dev->hard_header(). */ if (dev->flags&IFF_SOFTHEADERS) skb_pull(skb2,skb2->mac.raw-skb2->data); skb2->mac.raw = skb2->data; ptype->func(skb2, skb->dev, ptype); } } } if (skb_queue_len(list)) { cli(); skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */ __skb_queue_tail(list, skb); skb = __skb_dequeue(list); skb_device_lock(skb); /* New buffer needs locking down */ restore_flags(flags); } } if (dev->hard_start_xmit(skb, dev) == 0) { /* * Packet is now solely the responsibility of the driver */ return; } /* * Transmission failed, put skb back into a list. Once on the list it's safe and * no longer device locked (it can be freed safely from the device queue) */ cli(); skb_device_unlock(skb); __skb_queue_head(list,skb); restore_flags(flags); }