Пример #1
0
void dev_activate(struct device *dev)
{
	/* No queueing discipline is attached to device;
	   create default one i.e. pfifo_fast for devices,
	   which need queueing and noqueue_qdisc for
	   virtual interfaces
	 */

	if (dev->qdisc_sleeping == &noop_qdisc) {
		if (dev->tx_queue_len) {
			struct Qdisc *qdisc;
			qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
			if (qdisc == NULL) {
				printk(KERN_INFO "%s: activation failed\n", dev->name);
				return;
			}
			dev->qdisc_sleeping = qdisc;
		} else
			dev->qdisc_sleeping = &noqueue_qdisc;
	}

	start_bh_atomic();
	if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
		dev->qdisc->tx_timeo = 5*HZ;
		dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo;
		if (!del_timer(&dev_watchdog))
			dev_watchdog.expires = jiffies + 5*HZ;
		add_timer(&dev_watchdog);
	}
	end_bh_atomic();
}
Пример #2
0
static int inet6_addr_del(int ifindex, struct in6_addr *pfx, int plen)
{
	struct inet6_ifaddr *ifp;
	struct inet6_dev *idev;
	struct device *dev;
	
	if ((dev = dev_get_by_index(ifindex)) == NULL)
		return -ENODEV;

	if ((idev = ipv6_get_idev(dev)) == NULL)
		return -ENXIO;

	start_bh_atomic();
	for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) {
		if (ifp->prefix_len == plen &&
		    (!memcmp(pfx, &ifp->addr, sizeof(struct in6_addr)))) {
			ipv6_del_addr(ifp);
			end_bh_atomic();

			/* If the last address is deleted administratively,
			   disable IPv6 on this interface.
			 */
			if (idev->addr_list == NULL)
				addrconf_ifdown(idev->dev, 1);
			return 0;
		}
	}
	end_bh_atomic();
	return -EADDRNOTAVAIL;
}
Пример #3
0
void qdisc_destroy(struct Qdisc *qdisc)
{
	struct Qdisc_ops *ops = qdisc->ops;

	if (!atomic_dec_and_test(&qdisc->refcnt))
		return;

#ifdef CONFIG_NET_SCHED
	if (qdisc->dev) {
		struct Qdisc *q, **qp;
		for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next)
			if (q == qdisc) {
				*qp = q->next;
				q->next = NULL;
				break;
			}
	}
#ifdef CONFIG_NET_ESTIMATOR
	qdisc_kill_estimator(&qdisc->stats);
#endif
#endif
	start_bh_atomic();
	if (ops->reset)
		ops->reset(qdisc);
	if (ops->destroy)
		ops->destroy(qdisc);
	end_bh_atomic();
	if (!(qdisc->flags&TCQ_F_BUILTIN))
		kfree(qdisc);
}
Пример #4
0
static void hard_idle(void)
{
	while (!need_resched) {
		if (hlt_works_ok && !hlt_counter) {
#ifdef CONFIG_APM
				/* If the APM BIOS is not enabled, or there
				 is an error calling the idle routine, we
				 should hlt if possible.  We need to check
				 need_resched again because an interrupt
				 may have occurred in apm_do_idle(). */
			start_bh_atomic();
			if (!apm_do_idle() && !need_resched)
				__asm__("hlt");
			end_bh_atomic();
#else
			__asm__("hlt");
#endif
	        }
 		if (need_resched) 
 			break;
		schedule();
	}
#ifdef CONFIG_APM
	apm_do_busy();
#endif
}
Пример #5
0
void qdisc_reset(struct Qdisc *qdisc)
{
	struct Qdisc_ops *ops = qdisc->ops;
	start_bh_atomic();
	if (ops->reset)
		ops->reset(qdisc);
	end_bh_atomic();
}
Пример #6
0
static void ip6_rule_del(struct ip6_fw_rule *rl)
{
	struct ip6_fw_rule *next, *prev;

	start_bh_atomic();
	ip6_fw_rule_cnt--;
	next = rl->next;
	prev = rl->prev;
	next->prev = prev;
	prev->next = next;
	end_bh_atomic();
}
Пример #7
0
NORET_TYPE void do_exit(long code)
{
	struct task_struct *tsk = current;

	if (in_interrupt())
		printk("Aiee, killing interrupt handler\n");
	if (!tsk->pid)
		panic("Attempted to kill the idle task!");
	tsk->flags |= PF_EXITING;
	start_bh_atomic();
	del_timer(&tsk->real_timer);
	end_bh_atomic();

	lock_kernel();
fake_volatile:
#ifdef CONFIG_BSD_PROCESS_ACCT
	acct_process(code);
#endif
	sem_exit();
	__exit_mm(tsk);
#if CONFIG_AP1000
	exit_msc(tsk);
#endif
	__exit_files(tsk);
	__exit_fs(tsk);
	__exit_sighand(tsk);
	exit_thread();
	tsk->state = TASK_ZOMBIE;
	tsk->exit_code = code;
	exit_notify();
#ifdef DEBUG_PROC_TREE
	audit_ptree();
#endif
	if (tsk->exec_domain && tsk->exec_domain->module)
		__MOD_DEC_USE_COUNT(tsk->exec_domain->module);
	if (tsk->binfmt && tsk->binfmt->module)
		__MOD_DEC_USE_COUNT(tsk->binfmt->module);
	schedule();
/*
 * In order to get rid of the "volatile function does return" message
 * I did this little loop that confuses gcc to think do_exit really
 * is volatile. In fact it's schedule() that is volatile in some
 * circumstances: when current->state = ZOMBIE, schedule() never
 * returns.
 *
 * In fact the natural way to do all this is to have the label and the
 * goto right after each other, but I put the fake_volatile label at
 * the start of the function just in case something /really/ bad
 * happens, and the schedule returns. This way we can try again. I'm
 * not paranoid: it's just that everybody is out to get me.
 */
	goto fake_volatile;
}
Пример #8
0
void dev_mc_discard(struct device *dev)
{
	start_bh_atomic();
	while (dev->mc_list!=NULL) {
		struct dev_mc_list *tmp=dev->mc_list;
		dev->mc_list=tmp->next;
		if (tmp->dmi_users > tmp->dmi_gusers)
			printk("dev_mc_discard: multicast leakage! dmi_users=%d\n", tmp->dmi_users);
		kfree_s(tmp,sizeof(*tmp));
	}
	dev->mc_count=0;
	end_bh_atomic();
}
Пример #9
0
/*
 * Dequeue a new connection.
 */
static inline void
svc_sock_accepted(struct svc_sock *svsk)
{
	start_bh_atomic();
        svsk->sk_busy = 0;
        svsk->sk_conn--;
        if (svsk->sk_conn || svsk->sk_data || svsk->sk_close) {
                dprintk("svc: socket %p re-enqueued after accept\n",
						svsk->sk_sk);
                svc_sock_enqueue(svsk);
        }
	end_bh_atomic();
}
Пример #10
0
void dev_shutdown(struct device *dev)
{
	struct Qdisc *qdisc;

	start_bh_atomic();
	qdisc = dev->qdisc_sleeping;
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	qdisc_destroy(qdisc);
	BUG_TRAP(dev->qdisc_list == NULL);
	dev->qdisc_list = NULL;
	end_bh_atomic();
}
Пример #11
0
static void ip6_rule_add(struct ip6_fw_rule *rl)
{
	struct ip6_fw_rule *next;

	start_bh_atomic();
	ip6_fw_rule_cnt++;
	next = &ip6_fw_rule_list;
	rl->next = next;
	rl->prev = next->prev;
	rl->prev->next = rl;
	next->prev = rl;
	end_bh_atomic();
}
Пример #12
0
static void igmp_group_added(struct ip_mc_list *im)
{
	if (im->loaded == 0) {
		im->loaded = 1;
		ip_mc_filter_add(im->interface, im->multiaddr);
	}

#ifdef CONFIG_IP_MULTICAST
	if (im->multiaddr == IGMP_ALL_HOSTS)
		return;

	start_bh_atomic();
	igmp_start_timer(im, IGMP_Initial_Report_Delay);
	end_bh_atomic();
#endif
}
Пример #13
0
void fib6_clean_tree(struct fib6_node *root,
		     int (*func)(struct rt6_info *, void *arg),
		     int prune, void *arg)
{
	struct fib6_cleaner_t c;

	c.w.root = root;
	c.w.func = fib6_clean_node;
	c.w.prune = prune;
	c.func = func;
	c.arg = arg;

	start_bh_atomic();
	fib6_walk(&c.w);
	end_bh_atomic();
}
Пример #14
0
/*
 * Having read count bytes from a socket, check whether it
 * needs to be re-enqueued.
 */
static inline void
svc_sock_received(struct svc_sock *svsk, int count)
{
	start_bh_atomic();
	if ((svsk->sk_data -= count) < 0) {
		printk(KERN_NOTICE "svc: sk_data negative!\n");
		svsk->sk_data = 0;
	}
	svsk->sk_rqstp = NULL; /* XXX */
	svsk->sk_busy = 0;
	if (svsk->sk_conn || svsk->sk_data || svsk->sk_close) {
		dprintk("svc: socket %p re-enqueued after receive\n",
						svsk->sk_sk);
		svc_sock_enqueue(svsk);
	}
	end_bh_atomic();
}
Пример #15
0
/*
 * Dequeue the first socket.
 */
static inline struct svc_sock *
svc_sock_dequeue(struct svc_serv *serv)
{
	struct svc_sock	*svsk;

	start_bh_atomic();
	if ((svsk = serv->sv_sockets) != NULL)
		rpc_remove_list(&serv->sv_sockets, svsk);
	end_bh_atomic();

	if (svsk) {
		dprintk("svc: socket %p dequeued, inuse=%d\n",
			svsk->sk_sk, svsk->sk_inuse);
		svsk->sk_qued = 0;
	}

	return svsk;
}
Пример #16
0
void __release_sock(struct sock *sk)
{
#ifdef CONFIG_INET
	if (!sk->prot || !sk->prot->rcv)
		return;
		
	/* See if we have any packets built up. */
	start_bh_atomic();
	while (!skb_queue_empty(&sk->back_log)) {
		struct sk_buff * skb = sk->back_log.next;
		__skb_unlink(skb, &sk->back_log);
		sk->prot->rcv(skb, skb->dev, (struct options*)skb->proto_priv,
			      skb->saddr, skb->len, skb->daddr, 1,
			      /* Only used for/by raw sockets. */
			      (struct inet_protocol *)sk->pair); 
	}
	end_bh_atomic();
#endif  
}
Пример #17
0
static int dev_mc_read_proc(char *buffer, char **start, off_t offset,
			    int length, int *eof, void *data)
{
	off_t pos=0, begin=0;
	struct dev_mc_list *m;
	int len=0;
	struct device *dev;

	start_bh_atomic();

	for (dev = dev_base; dev; dev = dev->next) {
		for (m = dev->mc_list; m; m = m->next) {
			int i;

			len += sprintf(buffer+len,"%-4d %-15s %-5d %-5d ", dev->ifindex, dev->name,
				       m->dmi_users, m->dmi_gusers);

			for (i=0; i<m->dmi_addrlen; i++)
				len += sprintf(buffer+len, "%02x", m->dmi_addr[i]);

			len+=sprintf(buffer+len, "\n");

			pos=begin+len;
			if (pos < offset) {
				len=0;
				begin=pos;
			}
			if (pos > offset+length)
				goto done;
		}
	}
	*eof = 1;

done:
	end_bh_atomic();
	*start=buffer+(offset-begin);
	len-=(offset-begin);
	if(len>length)
		len=length;
	if(len<0)
		len=0;
	return len;
}
Пример #18
0
static void igmp_group_dropped(struct ip_mc_list *im)
{
	if (im->loaded) {
		im->loaded = 0;
		ip_mc_filter_del(im->interface, im->multiaddr);
	}

#ifdef CONFIG_IP_MULTICAST
	if (im->multiaddr == IGMP_ALL_HOSTS)
		return;

	start_bh_atomic();
	igmp_stop_timer(im);
	end_bh_atomic();

	if (im->reporter && !IGMP_V1_SEEN(im->interface))
		igmp_send_report(im->interface->dev, im->multiaddr, IGMP_HOST_LEAVE_MESSAGE);
#endif
}
Пример #19
0
static void ipmr_cache_resolve(struct mfc_cache *cache)
{
	struct sk_buff *skb;

	start_bh_atomic();

	/*
	 *	Kill the queue entry timer.
	 */

	del_timer(&cache->mfc_timer);

	if (cache->mfc_flags&MFC_QUEUED) {
		cache->mfc_flags&=~MFC_QUEUED;
		cache_resolve_queue_len--;
	}

	end_bh_atomic();

	/*
	 *	Play the pending entries through our router
	 */
	while((skb=skb_dequeue(&cache->mfc_unresolved))) {
#ifdef CONFIG_RTNETLINK
		if (skb->nh.iph->version == 0) {
			int err;
			struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));

			if (ipmr_fill_mroute(skb, cache, NLMSG_DATA(nlh)) > 0) {
				nlh->nlmsg_len = skb->tail - (u8*)nlh;
			} else {
				nlh->nlmsg_type = NLMSG_ERROR;
				nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
				skb_trim(skb, nlh->nlmsg_len);
				((struct nlmsgerr*)NLMSG_DATA(nlh))->error = -EMSGSIZE;
			}
			err = netlink_unicast(rtnl, skb, NETLINK_CB(skb).pid, MSG_DONTWAIT);
		} else
#endif
			ip_mr_forward(skb, cache, 0);
	}
}
Пример #20
0
int dev_mc_add(struct device *dev, void *addr, int alen, int glbl)
{
	int err = 0;
	struct dev_mc_list *dmi, *dmi1;

	dmi1 = (struct dev_mc_list *)kmalloc(sizeof(*dmi), gfp_any());

	start_bh_atomic();
	for(dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next) {
		if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && dmi->dmi_addrlen==alen) {
			if (glbl) {
				int old_glbl = dmi->dmi_gusers;
				dmi->dmi_gusers = 1;
				if (old_glbl)
					goto done;
			}
			dmi->dmi_users++;
			goto done;
		}
	}

	if ((dmi=dmi1)==NULL) {
		end_bh_atomic();
		return -ENOMEM;
	}
	memcpy(dmi->dmi_addr, addr, alen);
	dmi->dmi_addrlen=alen;
	dmi->next=dev->mc_list;
	dmi->dmi_users=1;
	dmi->dmi_gusers=glbl ? 1 : 0;
	dev->mc_list=dmi;
	dev->mc_count++;
	end_bh_atomic();
	dev_mc_upload(dev);
	return 0;

done:
	end_bh_atomic();
	if (dmi1)
		kfree(dmi1);
	return err;
}
Пример #21
0
void dev_mc_upload(struct device *dev)
{
	/* Don't do anything till we up the interface
	   [dev_open will call this function so the list will
	    stay sane] */
	    
	if(!(dev->flags&IFF_UP))
		return;

	/*
	 *	Devices with no set multicast don't get set 
	 */

	if(dev->set_multicast_list==NULL)
		return;

	start_bh_atomic();
	dev->set_multicast_list(dev);
	end_bh_atomic();
}
Пример #22
0
int dev_mc_delete(struct device *dev, void *addr, int alen, int glbl)
{
	int err = 0;
	struct dev_mc_list *dmi, **dmip;

	start_bh_atomic();
	for (dmip=&dev->mc_list; (dmi=*dmip)!=NULL; dmip=&dmi->next) {
		/*
		 *	Find the entry we want to delete. The device could
		 *	have variable length entries so check these too.
		 */
		if (memcmp(dmi->dmi_addr,addr,dmi->dmi_addrlen)==0 && alen==dmi->dmi_addrlen) {
			if (glbl) {
				int old_glbl = dmi->dmi_gusers;
				dmi->dmi_gusers = 0;
				if (old_glbl == 0)
					break;
			}
			if(--dmi->dmi_users)
				goto done;

			/*
			 *	Last user. So delete the entry.
			 */
			*dmip = dmi->next;
			dev->mc_count--;
			kfree_s(dmi,sizeof(*dmi));
			/*
			 *	We have altered the list, so the card
			 *	loaded filter is now wrong. Fix it
			 */
			end_bh_atomic();
			dev_mc_upload(dev);
			return 0;
		}
	}
	err = -ENOENT;
done:
	end_bh_atomic();
	return err;
}
Пример #23
0
static void ipmr_update_threshoulds(struct mfc_cache *cache, unsigned char *ttls)
{
	int vifi;

	start_bh_atomic();

	cache->mfc_minvif = MAXVIFS;
	cache->mfc_maxvif = 0;
	memset(cache->mfc_ttls, 255, MAXVIFS);

	for (vifi=0; vifi<maxvif; vifi++) {
		if (vifc_map&(1<<vifi) && ttls[vifi] && ttls[vifi] < 255) {
			cache->mfc_ttls[vifi] = ttls[vifi];
			if (cache->mfc_minvif > vifi)
				cache->mfc_minvif = vifi;
			if (cache->mfc_maxvif <= vifi)
				cache->mfc_maxvif = vifi + 1;
		}
	}
	end_bh_atomic();
}
Пример #24
0
__initfunc(int net_profile_init(void))
{
	int i;

#ifdef CONFIG_PROC_FS
	struct proc_dir_entry *ent;

	ent = create_proc_entry("net/profile", 0, 0);
	ent->read_proc = profile_read_proc;
#endif

	register_netdevice(&whitehole_dev);

	printk("Evaluating net profiler cost ...");
#if CPU == 586 || CPU == 686
	if (!(boot_cpu_data.x86_capability & X86_FEATURE_TSC)) {
		printk(KERN_ERR "Sorry, your CPU does not support TSC. Net profiler disabled.\n");
		return -1;
	}
#endif
	start_bh_atomic();
#ifdef __alpha__
	alpha_tick(0);
#endif
	for (i=0; i<1024; i++) {
		NET_PROFILE_ENTER(total);
		NET_PROFILE_LEAVE(total);
	}
	if (net_prof_total.accumulator.tv_sec) {
		printk(" too high!\n");
	} else {
		net_profile_adjust.tv_usec = net_prof_total.accumulator.tv_usec>>10;
		printk("%ld units\n", net_profile_adjust.tv_usec);
	}
	net_prof_total.hits = 0;
	net_profile_stamp(&net_prof_total.entered);
	end_bh_atomic();
	return 0;
}
Пример #25
0
static void igmp6_join_group(struct ifmcaddr6 *ma)
{
	unsigned long delay;
	int addr_type;

	addr_type = ipv6_addr_type(&ma->mca_addr);

	if ((addr_type & (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_LOOPBACK)))
		return;

	igmp6_send(&ma->mca_addr, ma->dev, ICMPV6_MGM_REPORT);

	delay = net_random() % IGMP6_UNSOLICITED_IVAL;
	start_bh_atomic();
	if (del_timer(&ma->mca_timer))
		delay = ma->mca_timer.expires - jiffies;

	ma->mca_timer.expires = jiffies + delay;

	add_timer(&ma->mca_timer);
	ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
	end_bh_atomic();
}
Пример #26
0
/* 
 *	Delete existing entry
 */
static int ip_masq_user_del(struct ip_masq_user *ums)
{
	struct ip_masq *ms=NULL;

	if (masq_proto_num (ums->protocol) == -1) {
		return EPROTONOSUPPORT;
	}
	start_bh_atomic();
	if (ums->mport && ums->maddr) {
		ms = ip_masq_in_get(ums->protocol, 
				ums->daddr, ums->dport, 
				ums->maddr, ums->mport);
		end_bh_atomic();
	} else if (ums->sport && ums->saddr) {
		ms = ip_masq_out_get(ums->protocol,
				ums->saddr, ums->sport,
				ums->daddr, ums->dport);
		end_bh_atomic();
	} else
		return EINVAL;	
	
	if (ms == NULL) {
		return ESRCH;
	}

	/*
	 *	got (locked) entry, setup almost tiny timeout :) and  
	 *	give away
	 *
	 *	FIXME: should use something better than S_CLOSE
	 */
	ms->timeout = IP_MASQ_S_CLOSE;

	masq_user_k2u(ms, ums);
	ip_masq_put(ms);
	return 0;
}
Пример #27
0
void dev_deactivate(struct device *dev)
{
	struct Qdisc *qdisc;

	start_bh_atomic();

	qdisc = xchg(&dev->qdisc, &noop_qdisc);

	qdisc_reset(qdisc);

	if (qdisc->h.forw) {
		struct Qdisc_head **hp, *h;

		for (hp = &qdisc_head.forw; (h = *hp) != &qdisc_head; hp = &h->forw) {
			if (h == &qdisc->h) {
				*hp = h->forw;
				h->forw = NULL;
				break;
			}
		}
	}

	end_bh_atomic();
}
Пример #28
0
static struct ip_masq * ip_masq_user_locked_get (struct ip_masq_user *ums, int *err)
{
	struct ip_masq *ms=NULL;
	if (masq_proto_num (ums->protocol) == -1) {
		*err = EPROTONOSUPPORT;
	}

	start_bh_atomic();
	if (ums->mport && ums->maddr) {
		ms = ip_masq_in_get(ums->protocol, 
				ums->daddr, ums->dport, 
				ums->maddr, ums->mport);
		end_bh_atomic();
	} else if (ums->sport && ums->saddr) {
		ms = ip_masq_out_get(ums->protocol,
				ums->saddr, ums->sport,
				ums->daddr, ums->dport);
		end_bh_atomic();
	} else
		*err = EINVAL;	
	
	if (ms == NULL) *err = ESRCH;
	return ms;
}
Пример #29
0
static int
ipip_tunnel_ioctl (struct device *dev, struct ifreq *ifr, int cmd)
{
	int err = 0;
	struct ip_tunnel_parm p;
	struct ip_tunnel *t;

	MOD_INC_USE_COUNT;

	switch (cmd) {
	case SIOCGETTUNNEL:
		t = NULL;
		if (dev == &ipip_fb_tunnel_dev) {
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
				err = -EFAULT;
				break;
			}
			t = ipip_tunnel_locate(&p, 0);
		}
		if (t == NULL)
			t = (struct ip_tunnel*)dev->priv;
		memcpy(&p, &t->parms, sizeof(p));
		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
			err = -EFAULT;
		break;

	case SIOCADDTUNNEL:
	case SIOCCHGTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			goto done;

		err = -EFAULT;
		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
			goto done;

		err = -EINVAL;
		if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
		    p.iph.ihl != 5 || (p.iph.frag_off&__constant_htons(~IP_DF)))
			goto done;
		if (p.iph.ttl)
			p.iph.frag_off |= __constant_htons(IP_DF);

		t = ipip_tunnel_locate(&p, cmd == SIOCADDTUNNEL);

		if (dev != &ipip_fb_tunnel_dev && cmd == SIOCCHGTUNNEL &&
		    t != &ipip_fb_tunnel) {
			if (t != NULL) {
				if (t->dev != dev) {
					err = -EEXIST;
					break;
				}
			} else {
				if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
				    (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
					err = -EINVAL;
					break;
				}
				t = (struct ip_tunnel*)dev->priv;
				start_bh_atomic();
				ipip_tunnel_unlink(t);
				t->parms.iph.saddr = p.iph.saddr;
				t->parms.iph.daddr = p.iph.daddr;
				memcpy(dev->dev_addr, &p.iph.saddr, 4);
				memcpy(dev->broadcast, &p.iph.daddr, 4);
				ipip_tunnel_link(t);
				end_bh_atomic();
				netdev_state_change(dev);
			}
		}

		if (t) {
			err = 0;
			if (cmd == SIOCCHGTUNNEL) {
				t->parms.iph.ttl = p.iph.ttl;
				t->parms.iph.tos = p.iph.tos;
				t->parms.iph.frag_off = p.iph.frag_off;
			}
			if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
				err = -EFAULT;
		} else
			err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
		break;

	case SIOCDELTUNNEL:
		err = -EPERM;
		if (!capable(CAP_NET_ADMIN))
			goto done;

		if (dev == &ipip_fb_tunnel_dev) {
			err = -EFAULT;
			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
				goto done;
			err = -ENOENT;
			if ((t = ipip_tunnel_locate(&p, 0)) == NULL)
				goto done;
			err = -EPERM;
			if (t == &ipip_fb_tunnel)
				goto done;
		}
		err = unregister_netdevice(dev);
		break;

	default:
		err = -EINVAL;
	}

done:
	MOD_DEC_USE_COUNT;
	return err;
}
Пример #30
0
void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
{
	unsigned long flags;
	int nitcount;
	struct packet_type *ptype;
	int where = 0;		/* used to say if the packet should go	*/
				/* at the front or the back of the	*/
				/* queue - front is a retransmit try	*/

	if (dev == NULL) 
	{
		printk("dev.c: dev_queue_xmit: dev = NULL\n");
		return;
	}
	
	if(pri>=0 && !skb_device_locked(skb))
		skb_device_lock(skb);	/* Shove a lock on the frame */
#ifdef CONFIG_SLAVE_BALANCING
	save_flags(flags);
	cli();
	if(dev->slave!=NULL && dev->slave->pkt_queue < dev->pkt_queue &&
				(dev->slave->flags & IFF_UP))
		dev=dev->slave;
	restore_flags(flags);
#endif		
#ifdef CONFIG_SKB_CHECK 
	IS_SKB(skb);
#endif    
	skb->dev = dev;

	/*
	 *	This just eliminates some race conditions, but not all... 
	 */

	if (skb->next != NULL) 
	{
		/*
		 *	Make sure we haven't missed an interrupt. 
		 */
		printk("dev_queue_xmit: worked around a missed interrupt\n");
		start_bh_atomic();
		dev->hard_start_xmit(NULL, dev);
		end_bh_atomic();
		return;
  	}

	/*
	 *	Negative priority is used to flag a frame that is being pulled from the
	 *	queue front as a retransmit attempt. It therefore goes back on the queue
	 *	start on a failure.
	 */
	 
  	if (pri < 0) 
  	{
		pri = -pri-1;
		where = 1;
  	}

	if (pri >= DEV_NUMBUFFS) 
	{
		printk("bad priority in dev_queue_xmit.\n");
		pri = 1;
	}

	/*
	 *	If the address has not been resolved. Call the device header rebuilder.
	 *	This can cover all protocols and technically not just ARP either.
	 */
	 
	if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
		return;
	}

	save_flags(flags);
	cli();	
	if (!where) {
#ifdef CONFIG_SLAVE_BALANCING	
		skb->in_dev_queue=1;
#endif		
		skb_queue_tail(dev->buffs + pri,skb);
		skb_device_unlock(skb);		/* Buffer is on the device queue and can be freed safely */
		skb = skb_dequeue(dev->buffs + pri);
		skb_device_lock(skb);		/* New buffer needs locking down */
#ifdef CONFIG_SLAVE_BALANCING		
		skb->in_dev_queue=0;
#endif		
	}
	restore_flags(flags);

	/* copy outgoing packets to any sniffer packet handlers */
	if(!where)
	{
		for (nitcount= dev_nit, ptype = ptype_base; nitcount > 0 && ptype != NULL; ptype = ptype->next) 
		{
			/* Never send packets back to the socket
			 * they originated from - MvS ([email protected])
			 */
			if (ptype->type == htons(ETH_P_ALL) &&
			   (ptype->dev == dev || !ptype->dev) &&
			   ((struct sock *)ptype->data != skb->sk))
			{
				struct sk_buff *skb2;
				if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
					break;
				/*
				 *	The protocol knows this has (for other paths) been taken off
				 *	and adds it back.
				 */
				skb2->len-=skb->dev->hard_header_len;
				ptype->func(skb2, skb->dev, ptype);
				nitcount--;
			}
		}
	}
	start_bh_atomic();
	if (dev->hard_start_xmit(skb, dev) == 0) {
		end_bh_atomic();
		/*
		 *	Packet is now solely the responsibility of the driver
		 */
		return;
	}
	end_bh_atomic();

	/*
	 *	Transmission failed, put skb back into a list. Once on the list it's safe and
	 *	no longer device locked (it can be freed safely from the device queue)
	 */
	cli();
#ifdef CONFIG_SLAVE_BALANCING
	skb->in_dev_queue=1;
	dev->pkt_queue++;
#endif		
	skb_device_unlock(skb);
	skb_queue_head(dev->buffs + pri,skb);
	restore_flags(flags);
}