예제 #1
0
파일: dev.c 프로젝트: 0xffea/gnumach
void dev_tint(struct device *dev)
{
	int i;
	unsigned long flags;
	struct sk_buff_head * head;
	
	/*
	 * aliases do not transmit (for now :) )
	 */

#ifdef CONFIG_NET_ALIAS
	if (net_alias_is(dev)) return;
#endif
	head = dev->buffs;
	save_flags(flags);
	cli();

	/*
	 *	Work the queues in priority order
	 */	 
	for(i = 0;i < DEV_NUMBUFFS; i++,head++)
	{

		while (!skb_queue_empty(head)) {
			struct sk_buff *skb;

			skb = head->next;
			__skb_unlink(skb, head);
			/*
			 *	Stop anyone freeing the buffer while we retransmit it
			 */
			skb_device_lock(skb);
			restore_flags(flags);
			/*
			 *	Feed them to the output stage and if it fails
			 *	indicate they re-queue at the front.
			 */
			do_dev_queue_xmit(skb,dev,-i - 1);
			/*
			 *	If we can take no more then stop here.
			 */
			if (dev->tbusy)
				return;
			cli();
		}
	}
	restore_flags(flags);
}
예제 #2
0
static void arp_send_q(struct arp_table *entry, unsigned char *hw_dest)
{
	struct sk_buff *skb;

	unsigned long flags;

	/*
	 *	Empty the entire queue, building its data up ready to send
	 */
	
	if(!(entry->flags&ATF_COM))
	{
		printk("arp_send_q: incomplete entry for %s\n",
				in_ntoa(entry->ip));
		return;
	}

	save_flags(flags);
	
	cli();
	while((skb = skb_dequeue(&entry->skb)) != NULL)
	{
		IS_SKB(skb);
		skb_device_lock(skb);
		restore_flags(flags);
		if(!skb->dev->rebuild_header(skb->data,skb->dev,skb->raddr,skb))
		{
			skb->arp  = 1;
			if(skb->sk==NULL)
				dev_queue_xmit(skb, skb->dev, 0);
			else
				dev_queue_xmit(skb,skb->dev,skb->sk->priority);
		}
		else
		{
			/* This routine is only ever called when 'entry' is
			   complete. Thus this can't fail. */
			printk("arp_send_q: The impossible occurred. Please notify Alan.\n");
			printk("arp_send_q: active entity %s\n",in_ntoa(entry->ip));
			printk("arp_send_q: failed to find %s\n",in_ntoa(skb->raddr));
		}
	}
	restore_flags(flags);
}
예제 #3
0
static void arp_release_entry(struct arp_table *entry)
{
	struct sk_buff *skb;
	unsigned long flags;

	save_flags(flags);
	cli();
	/* Release the list of `skb' pointers. */
	while ((skb = skb_dequeue(&entry->skb)) != NULL)
	{
		skb_device_lock(skb);
		restore_flags(flags);
		dev_kfree_skb(skb, FREE_WRITE);
	}
	restore_flags(flags);
	del_timer(&entry->timer);
	kfree_s(entry, sizeof(struct arp_table));
	return;
}
예제 #4
0
void dev_tint(struct device *dev)
{
	int i;
	struct sk_buff *skb;
	unsigned long flags;
	
	save_flags(flags);	
	/*
	 *	Work the queues in priority order
	 */
	 
	for(i = 0;i < DEV_NUMBUFFS; i++) 
	{
		/*
		 *	Pull packets from the queue
		 */
		 

		cli();
		while((skb=skb_dequeue(&dev->buffs[i]))!=NULL)
		{
			/*
			 *	Stop anyone freeing the buffer while we retransmit it
			 */
			skb_device_lock(skb);
			restore_flags(flags);
			/*
			 *	Feed them to the output stage and if it fails
			 *	indicate they re-queue at the front.
			 */
			dev_queue_xmit(skb,dev,-i - 1);
			/*
			 *	If we can take no more then stop here.
			 */
			if (dev->tbusy)
				return;
			cli();
		}
	}
	restore_flags(flags);
}
예제 #5
0
void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
{
	unsigned long flags;
	int nitcount;
	struct packet_type *ptype;
	int where = 0;		/* used to say if the packet should go	*/
				/* at the front or the back of the	*/
				/* queue - front is a retransmit try	*/

	if (dev == NULL) 
	{
		printk("dev.c: dev_queue_xmit: dev = NULL\n");
		return;
	}
	
	if(pri>=0 && !skb_device_locked(skb))
		skb_device_lock(skb);	/* Shove a lock on the frame */
#ifdef CONFIG_SLAVE_BALANCING
	save_flags(flags);
	cli();
	if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
				(dev->slave->flags & IFF_UP))
		dev=dev->slave;
	restore_flags(flags);
#endif		
#ifdef CONFIG_SKB_CHECK 
	IS_SKB(skb);
#endif    
	skb->dev = dev;

	/*
	 *	This just eliminates some race conditions, but not all... 
	 */

	if (skb->next != NULL) 
	{
		/*
		 *	Make sure we haven't missed an interrupt. 
		 */
		printk("dev_queue_xmit: worked around a missed interrupt\n");
		start_bh_atomic();
		dev->hard_start_xmit(NULL, dev);
		end_bh_atomic();
		return;
  	}

	/*
	 *	Negative priority is used to flag a frame that is being pulled from the
	 *	queue front as a retransmit attempt. It therefore goes back on the queue
	 *	start on a failure.
	 */
	 
  	if (pri < 0) 
  	{
		pri = -pri-1;
		where = 1;
  	}

	if (pri >= DEV_NUMBUFFS) 
	{
		printk("bad priority in dev_queue_xmit.\n");
		pri = 1;
	}

	/*
	 *	If the address has not been resolved. Call the device header rebuilder.
	 *	This can cover all protocols and technically not just ARP either.
	 */
	 
	if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
		return;
	}

	save_flags(flags);
	cli();	
	if (!where) {
#ifdef CONFIG_SLAVE_BALANCING	
		skb->in_dev_queue=1;
#endif		
		skb_queue_tail(dev->buffs + pri,skb);
		skb_device_unlock(skb);		/* Buffer is on the device queue and can be freed safely */
		skb = skb_dequeue(dev->buffs + pri);
		skb_device_lock(skb);		/* New buffer needs locking down */
#ifdef CONFIG_SLAVE_BALANCING		
		skb->in_dev_queue=0;
#endif		
	}
	restore_flags(flags);

	/* copy outgoing packets to any sniffer packet handlers */
	if(!where)
	{
		for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next) 
		{
			/* Never send packets back to the socket
			 * they originated from - MvS ([email protected])
			 */
			if (ptype->type == htons(ETH_P_ALL) &&
			   (ptype->dev == dev || !ptype->dev) &&
			   ((struct sock *)ptype->data != skb->sk))
			{
				struct sk_buff *skb2;
				if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
					break;
				/*
				 *	The protocol knows this has (for other paths) been taken off
				 *	and adds it back.
				 */
				skb2->len-=skb->dev->hard_header_len;
				ptype->func(skb2, skb->dev, ptype);
				nitcount--;
			}
		}
	}
	start_bh_atomic();
	if (dev->hard_start_xmit(skb, dev) == 0) {
		end_bh_atomic();
		/*
		 *	Packet is now solely the responsibility of the driver
		 */
		return;
	}
	end_bh_atomic();

	/*
	 *	Transmission failed, put skb back into a list. Once on the list it's safe and
	 *	no longer device locked (it can be freed safely from the device queue)
	 */
	cli();
#ifdef CONFIG_SLAVE_BALANCING
	skb->in_dev_queue=1;
	dev->pkt_queue++;
#endif		
	skb_device_unlock(skb);
	skb_queue_head(dev->buffs + pri,skb);
	restore_flags(flags);
}
예제 #6
0
파일: dev.c 프로젝트: 0xffea/gnumach
static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
{
	unsigned long flags;
	struct sk_buff_head *list;
	int retransmission = 0;	/* used to say if the packet should go	*/
				/* at the front or the back of the	*/
				/* queue - front is a retransmit try	*/

	if(pri>=0 && !skb_device_locked(skb))
		skb_device_lock(skb);	/* Shove a lock on the frame */
#if CONFIG_SKB_CHECK 
	IS_SKB(skb);
#endif    
	skb->dev = dev;

	/*
	 *	Negative priority is used to flag a frame that is being pulled from the
	 *	queue front as a retransmit attempt. It therefore goes back on the queue
	 *	start on a failure.
	 */
	 
  	if (pri < 0) 
  	{
		pri = -pri-1;
		retransmission = 1;
  	}

#ifdef CONFIG_NET_DEBUG
	if (pri >= DEV_NUMBUFFS) 
	{
		printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
		pri = 1;
	}
#endif

	/*
	 *	If the address has not been resolved. Call the device header rebuilder.
	 *	This can cover all protocols and technically not just ARP either.
	 */
	 
	if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
		return;
	}

	/*
	 *
	 * 	If dev is an alias, switch to its main device.
	 *	"arp" resolution has been made with alias device, so
	 *	arp entries refer to alias, not main.
	 *
	 */

#ifdef CONFIG_NET_ALIAS
	if (net_alias_is(dev))
	  	skb->dev = dev = net_alias_dev_tx(dev);
#endif

	/*
	 *	If we are bridging and this is directly generated output
	 *	pass the frame via the bridge.
	 */

#ifdef CONFIG_BRIDGE
	if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
	{
		if(br_tx_frame(skb))
			return;
	}
#endif

	list = dev->buffs + pri;

	save_flags(flags);
	/* if this isn't a retransmission, use the first packet instead... */
	if (!retransmission) {
		if (skb_queue_len(list)) {
			/* avoid overrunning the device queue.. */
			if (skb_queue_len(list) > dev->tx_queue_len) {
				dev_kfree_skb(skb, FREE_WRITE);
				return;
			}
		}

		/* copy outgoing packets to any sniffer packet handlers */
		if (dev_nit) {
			struct packet_type *ptype;
			skb->stamp=xtime;
			for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next) 
			{
				/* Never send packets back to the socket
				 * they originated from - MvS ([email protected])
				 */
				if ((ptype->dev == dev || !ptype->dev) &&
				   ((struct sock *)ptype->data != skb->sk))
				{
					struct sk_buff *skb2;
					if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
						break;
					/* FIXME?: Wrong when the hard_header_len
					 * is an upper bound. Is this even
					 * used anywhere?
					 */
					skb2->h.raw = skb2->data + dev->hard_header_len;
					/* On soft header devices we
					 * yank the header before mac.raw
					 * back off. This is set by
					 * dev->hard_header().
					 */
					if (dev->flags&IFF_SOFTHEADERS)
						skb_pull(skb2,skb2->mac.raw-skb2->data);
					skb2->mac.raw = skb2->data;
					ptype->func(skb2, skb->dev, ptype);
				}
			}
		}

		if (skb_queue_len(list)) {
			cli();
			skb_device_unlock(skb);		/* Buffer is on the device queue and can be freed safely */
			__skb_queue_tail(list, skb);
			skb = __skb_dequeue(list);
			skb_device_lock(skb);		/* New buffer needs locking down */
			restore_flags(flags);
		}
	}
	if (dev->hard_start_xmit(skb, dev) == 0) {
		/*
		 *	Packet is now solely the responsibility of the driver
		 */
		return;
	}

	/*
	 *	Transmission failed, put skb back into a list. Once on the list it's safe and
	 *	no longer device locked (it can be freed safely from the device queue)
	 */
	cli();
	skb_device_unlock(skb);
	__skb_queue_head(list,skb);
	restore_flags(flags);
}