Example #1
0
static struct sk_buff *__xip_start_skb(struct sock *sk, struct xip_dst *xdst,
	const struct xia_addr *src, int src_n, const struct xia_addr *dest,
	int dest_n, u8 dest_last_node, int transhdrlen, int noblock)
{
	struct net_device *dev = xdst->dst.dev;
	struct sk_buff *skb;
	u32 mtu, alloclen;
	int hh_len, xh_len, rc;

	if (!dev) {
		net_warn_ratelimited("XIP %s: there is a bug somewhere, tried to send a datagram, but dst.dev is NULL\n",
				     __func__);
		return ERR_PTR(-ENODEV);
	}

	mtu = dst_mtu(&xdst->dst);
	if (mtu < XIP_MIN_MTU) {
		net_warn_ratelimited("XIP %s: cannot send datagram out because mtu (= %u) of dev %s is less than minimum MTU (= %u)\n",
				     __func__, mtu, dev->name, XIP_MIN_MTU);
		return ERR_PTR(-EMSGSIZE);
	}

	hh_len = LL_RESERVED_SPACE(dev);
	alloclen = hh_len + mtu;
	skb = sock_alloc_send_skb(sk, alloclen, noblock, &rc);
	if (unlikely(!skb))
		return ERR_PTR(rc);

	/* Fill in the control structures. */

	/* Reserve space for the link layer header */
	skb_reserve(skb, hh_len);

	/* Fill XIP header. */
	skb_reset_network_header(skb);
	xh_len = xip_hdr_size(dest_n, src_n);
	skb_put(skb, xh_len);
	xip_fill_in_hdr(skb, xdst, src->s_row, src_n,
			dest->s_row, dest_n, dest_last_node);

	skb_set_transport_header(skb, xh_len);
	skb_put(skb, transhdrlen);

	/* XXX Does we need to set skb_shinfo(skb)->tx_flags? */

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	xdst_hold(xdst);
	skb_dst_set(skb, &xdst->dst);
	return skb;
}
Example #2
0
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
	int mtu = netdev_vport->dev->mtu;
	int len;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
				     ovs_dp_name(vport->dp),
				     packet_length(skb), mtu);
		goto error;
	}

	if (unlikely(skb_warn_if_lro(skb)))
		goto error;

	skb->dev = netdev_vport->dev;
	len = skb->len;
	dev_queue_xmit(skb);

	return len;

error:
	kfree_skb(skb);
	ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
	return 0;
}
Example #3
0
static inline int handle_dev_cpu_collision(struct sk_buff *skb,
					   struct netdev_queue *dev_queue,
					   struct Qdisc *q)
{
	int ret;

	if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) {
		/*
		 * Same CPU holding the lock. It may be a transient
		 * configuration error, when hard_start_xmit() recurses. We
		 * detect it by checking xmit owner and drop the packet when
		 * deadloop is detected. Return OK to try the next skb.
		 */
		kfree_skb(skb);
		net_warn_ratelimited("Dead loop on netdevice %s, fix it urgently!\n",
				     dev_queue->dev->name);
		ret = qdisc_qlen(q);
	} else {
		/*
		 * Another cpu is holding lock, requeue & delay xmits for
		 * some time.
		 */
		__this_cpu_inc(softnet_data.cpu_collision);
		ret = dev_requeue_skb(skb, q);
	}

	return ret;
}
Example #4
0
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb) {
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
					     tc_index_to_dp(skb));
		} else {
			q->backlog -= qdisc_pkt_len(skb);

			if (gred_wred_mode(t)) {
				if (!sch->qstats.backlog)
					red_start_of_idle_period(&t->wred_set);
			} else {
				if (!q->backlog)
					red_start_of_idle_period(&q->vars);
			}
		}

		return skb;
	}

	return NULL;
}
static unsigned int gred_drop(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_tail(sch);
	if (skb) {
		unsigned int len = qdisc_pkt_len(skb);
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
					     tc_index_to_dp(skb));
		} else {
			q->backlog -= len;
			q->stats.other++;

			if (gred_wred_mode(t)) {
				if (!sch->qstats.backlog)
					red_start_of_idle_period(&t->wred_set);
			} else {
				if (!q->backlog)
					red_start_of_idle_period(&q->vars);
			}
		}

		qdisc_drop(skb, sch);
		return len;
	}

	return 0;
}
Example #6
0
int sirdev_receive(struct sir_dev *dev, const unsigned char *cp, size_t count) 
{
	if (!dev || !dev->netdev) {
		net_warn_ratelimited("%s(), not ready yet!\n", __func__);
		return -1;
	}

	if (!dev->irlap) {
		net_warn_ratelimited("%s - too early: %p / %zd!\n",
				     __func__, cp, count);
		return -1;
	}

	if (cp==NULL) {
		/* error already at lower level receive
		 * just update stats and set media busy
		 */
		irda_device_set_media_busy(dev->netdev, TRUE);
		dev->netdev->stats.rx_dropped++;
		pr_debug("%s; rx-drop: %zd\n", __func__, count);
		return 0;
	}

	/* Read the characters into the buffer */
	if (likely(atomic_read(&dev->enable_rx))) {
		while (count--)
			/* Unwrap and destuff one byte */
			async_unwrap_char(dev->netdev, &dev->netdev->stats,
					  &dev->rx_buff, *cp++);
	} else {
		while (count--) {
			/* rx not enabled: save the raw bytes and never
			 * trigger any netif_rx. The received bytes are flushed
			 * later when we re-enable rx but might be read meanwhile
			 * by the dongle driver.
			 */
			dev->rx_buff.data[dev->rx_buff.len++] = *cp++;

			/* What should we do when the buffer is full? */
			if (unlikely(dev->rx_buff.len == dev->rx_buff.truesize))
				dev->rx_buff.len = 0;
		}
	}

	return 0;
}
Example #7
0
/*
 * Function iriap_init (void)
 *
 *    Initializes the IrIAP layer, called by the module initialization code
 *    in irmod.c
 */
int __init iriap_init(void)
{
	struct ias_object *obj;
	struct iriap_cb *server;
	__u8 oct_seq[6];
	__u16 hints;

	/* Allocate master array */
	iriap = hashbin_new(HB_LOCK);
	if (!iriap)
		return -ENOMEM;

	/* Object repository - defined in irias_object.c */
	irias_objects = hashbin_new(HB_LOCK);
	if (!irias_objects) {
		net_warn_ratelimited("%s: Can't allocate irias_objects hashbin!\n",
				     __func__);
		hashbin_delete(iriap, NULL);
		return -ENOMEM;
	}

	lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
				   "irias_objects");

	/*
	 *  Register some default services for IrLMP
	 */
	hints  = irlmp_service_to_hint(S_COMPUTER);
	service_handle = irlmp_register_service(hints);

	/* Register the Device object with LM-IAS */
	obj = irias_new_object("Device", IAS_DEVICE_ID);
	irias_add_string_attrib(obj, "DeviceName", "Linux", IAS_KERNEL_ATTR);

	oct_seq[0] = 0x01;  /* Version 1 */
	oct_seq[1] = 0x00;  /* IAS support bits */
	oct_seq[2] = 0x00;  /* LM-MUX support bits */
#ifdef CONFIG_IRDA_ULTRA
	oct_seq[2] |= 0x04; /* Connectionless Data support */
#endif
	irias_add_octseq_attrib(obj, "IrLMPSupport", oct_seq, 3,
				IAS_KERNEL_ATTR);
	irias_insert_object(obj);

	/*
	 *  Register server support with IrLMP so we can accept incoming
	 *  connections
	 */
	server = iriap_open(LSAP_IAS, IAS_SERVER, NULL, NULL);
	if (!server) {
		pr_debug("%s(), unable to open server\n", __func__);
		return -1;
	}
	iriap_register_lsap(server, LSAP_IAS, IAS_SERVER);

	return 0;
}
Example #8
0
/*
 * Transmit possibly several skbs, and handle the return status as
 * required. Owning running seqcount bit guarantees that
 * only one CPU can execute this function.
 *
 * Returns to the caller:
 *				false  - hardware queue frozen backoff
 *				true   - feel free to send more pkts
 */
bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		     struct net_device *dev, struct netdev_queue *txq,
		     spinlock_t *root_lock, bool validate)
{
	int ret = NETDEV_TX_BUSY;
	bool again = false;

	/* And release qdisc */
	if (root_lock)
		spin_unlock(root_lock);

	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
	if (validate)
		skb = validate_xmit_skb_list(skb, dev, &again);

#ifdef CONFIG_XFRM_OFFLOAD
	if (unlikely(again)) {
		if (root_lock)
			spin_lock(root_lock);

		dev_requeue_skb(skb, q);
		return false;
	}
#endif

	if (likely(skb)) {
		HARD_TX_LOCK(dev, txq, smp_processor_id());
		if (!netif_xmit_frozen_or_stopped(txq))
			skb = dev_hard_start_xmit(skb, dev, txq, &ret);

		HARD_TX_UNLOCK(dev, txq);
	} else {
		if (root_lock)
			spin_lock(root_lock);
		return true;
	}

	if (root_lock)
		spin_lock(root_lock);

	if (!dev_xmit_complete(ret)) {
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely(ret != NETDEV_TX_BUSY))
			net_warn_ratelimited("BUG %s code %d qlen %d\n",
					     dev->name, ret, q->q.qlen);

		dev_requeue_skb(skb, q);
		return false;
	}

	if (ret && netif_xmit_frozen_or_stopped(txq))
		return false;

	return true;
}
Example #9
0
int __init irda_device_init( void)
{
	dongles = hashbin_new(HB_NOLOCK);
	if (dongles == NULL) {
		net_warn_ratelimited("IrDA: Can't allocate dongles hashbin!\n");
		return -ENOMEM;
	}
	spin_lock_init(&dongles->hb_spinlock);

	tasks = hashbin_new(HB_LOCK);
	if (tasks == NULL) {
		net_warn_ratelimited("IrDA: Can't allocate tasks hashbin!\n");
		hashbin_delete(dongles, NULL);
		return -ENOMEM;
	}

	/* We no longer initialise the driver ourselves here, we let
	 * the system do it for us... - Jean II */

	return 0;
}
Example #10
0
/*
 * Transmit one skb, and handle the return status as required. Holding the
 * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
 * function.
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 */
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		    struct net_device *dev, struct netdev_queue *txq,
		    spinlock_t *root_lock)
{
	int ret = NETDEV_TX_BUSY;

	/* And release qdisc */
	spin_unlock(root_lock);

	HARD_TX_LOCK(dev, txq, smp_processor_id());
	if (!netif_xmit_frozen_or_stopped(txq))
		ret = dev_hard_start_xmit(skb, dev, txq);

	HARD_TX_UNLOCK(dev, txq);
	
#ifdef CONFIG_MTK_NET_LOGGING 	
    if(ret != NETDEV_TX_OK ){
    	if(qdisc_qlen(q) < 16){
    		if(4 == (qdisc_qlen(q)) % 16)
    			printk(KERN_INFO "[mtk_net][sched]dev_hard_start_xmit ret = %d(%s), txq state = %lu\n", 
    				ret, dev->name, txq->state);
    	} else {
    		if(64 == (qdisc_qlen(q)) % 128)
    			printk(KERN_INFO "[mtk_net][sched]warning: dev_hard_start_xmit ret = %d(%s), txq state = %lu\n", 
    				ret, dev->name, txq->state);    		
    	}
    }
#endif

	spin_lock(root_lock);

	if (dev_xmit_complete(ret)) {
		/* Driver sent out skb successfully or skb was consumed */
		ret = qdisc_qlen(q);
	} else if (ret == NETDEV_TX_LOCKED) {
		/* Driver try lock failed */
		ret = handle_dev_cpu_collision(skb, txq, q);
	} else {
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely(ret != NETDEV_TX_BUSY))
			net_warn_ratelimited("BUG %s code %d qlen %d\n",
					     dev->name, ret, q->q.qlen);

		ret = dev_requeue_skb(skb, q);
	}

	if (ret && netif_xmit_frozen_or_stopped(txq))
		ret = 0;

	return ret;
}
Example #11
0
/*
 * Function ias_new_object (name, id)
 *
 *    Create a new IAS object
 *
 */
struct ias_object *irias_new_object( char *name, int id)
{
	struct ias_object *obj;

	obj = kzalloc(sizeof(struct ias_object), GFP_ATOMIC);
	if (obj == NULL) {
		net_warn_ratelimited("%s(), Unable to allocate object!\n",
				     __func__);
		return NULL;
	}

	obj->magic = IAS_OBJECT_MAGIC;
	obj->name = kstrndup(name, IAS_MAX_CLASSNAME, GFP_ATOMIC);
	if (!obj->name) {
		net_warn_ratelimited("%s(), Unable to allocate name!\n",
				     __func__);
		kfree(obj);
		return NULL;
	}
	obj->id = id;

	/* Locking notes : the attrib spinlock has lower precendence
	 * than the objects spinlock. Never grap the objects spinlock
	 * while holding any attrib spinlock (risk of deadlock). Jean II */
	obj->attribs = hashbin_new(HB_LOCK);

	if (obj->attribs == NULL) {
		net_warn_ratelimited("%s(), Unable to allocate attribs!\n",
				     __func__);
		kfree(obj->name);
		kfree(obj);
		return NULL;
	}

	return obj;
}
Example #12
0
static int lowpan_frag_rx_handlers_result(struct sk_buff *skb,
					  lowpan_rx_result res)
{
	switch (res) {
	case RX_QUEUED:
		return NET_RX_SUCCESS;
	case RX_CONTINUE:
		/* nobody cared about this packet */
		net_warn_ratelimited("%s: received unknown dispatch\n",
				     __func__);

		/* fall-through */
	default:
		/* all others failure */
		return NET_RX_DROP;
	}
}
Example #13
0
File: vport.c Project: JunoZhu/ovs
void ovs_vport_send(struct vport *vport, struct sk_buff *skb)
{
	int mtu = vport->dev->mtu;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
				     vport->dev->name,
				     packet_length(skb), mtu);
		vport->dev->stats.tx_errors++;
		goto drop;
	}

	skb->dev = vport->dev;
	vport->ops->send(skb);
	return;

drop:
	kfree_skb(skb);
}
Example #14
0
int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type)
{
	struct lowpan_frag_queue *fq;
	struct net *net = dev_net(skb->dev);
	struct lowpan_802154_cb *cb = lowpan_802154_cb(skb);
	struct ieee802154_hdr hdr;
	int err;

	if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
		goto err;

	err = lowpan_get_cb(skb, frag_type, cb);
	if (err < 0)
		goto err;

	if (frag_type == LOWPAN_DISPATCH_FRAG1) {
		err = lowpan_invoke_frag_rx_handlers(skb);
		if (err == NET_RX_DROP)
			goto err;
	}

	if (cb->d_size > IPV6_MIN_MTU) {
		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
		goto err;
	}

	fq = fq_find(net, cb, &hdr.source, &hdr.dest);
	if (fq != NULL) {
		int ret;

		spin_lock(&fq->q.lock);
		ret = lowpan_frag_queue(fq, skb, frag_type);
		spin_unlock(&fq->q.lock);

		inet_frag_put(&fq->q, &lowpan_frags);
		return ret;
	}

err:
	kfree_skb(skb);
	return -1;
}
static unsigned int
ip6t_mangle_out(struct sk_buff *skb, const struct net_device *out)
{
	unsigned int ret;
	struct in6_addr saddr, daddr;
	u_int8_t hop_limit;
	u_int32_t flowlabel, mark;
	int err;
#if 0
	/* root is playing with raw sockets. */
	if (skb->len < sizeof(struct iphdr) ||
	    ip_hdrlen(skb) < sizeof(struct iphdr)) {
		net_warn_ratelimited("ip6t_hook: happy cracking\n");
		return NF_ACCEPT;
	}
#endif

	/* save source/dest address, mark, hoplimit, flowlabel, priority,  */
	memcpy(&saddr, &ipv6_hdr(skb)->saddr, sizeof(saddr));
	memcpy(&daddr, &ipv6_hdr(skb)->daddr, sizeof(daddr));
	mark = skb->mark;
	hop_limit = ipv6_hdr(skb)->hop_limit;

	/* flowlabel and prio (includes version, which shouldn't change either */
	flowlabel = *((u_int32_t *)ipv6_hdr(skb));

	ret = ip6t_do_table(skb, NF_INET_LOCAL_OUT, NULL, out,
			    dev_net(out)->ipv6.ip6table_mangle);

	if (ret != NF_DROP && ret != NF_STOLEN &&
	    (!ipv6_addr_equal(&ipv6_hdr(skb)->saddr, &saddr) ||
	     !ipv6_addr_equal(&ipv6_hdr(skb)->daddr, &daddr) ||
	     skb->mark != mark ||
	     ipv6_hdr(skb)->hop_limit != hop_limit ||
	     flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) {
		err = ip6_route_me_harder(skb);
		if (err < 0)
			ret = NF_DROP_ERR(err);
	}

	return ret;
}
Example #16
0
/*
 * Transmit possibly several skbs, and handle the return status as
 * required. Holding the __QDISC___STATE_RUNNING bit guarantees that
 * only one CPU can execute this function.
 *
 * Returns to the caller:
 *				0  - queue is empty or throttled.
 *				>0 - queue is not empty.
 */
int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
		    struct net_device *dev, struct netdev_queue *txq,
		    spinlock_t *root_lock, bool validate)
{
	int ret = NETDEV_TX_BUSY;

	/* And release qdisc */
	spin_unlock(root_lock);

	/* Note that we validate skb (GSO, checksum, ...) outside of locks */
	if (validate)
		skb = validate_xmit_skb_list(skb, dev);

	if (skb) {
		HARD_TX_LOCK(dev, txq, smp_processor_id());
		if (!netif_xmit_frozen_or_stopped(txq))
			skb = dev_hard_start_xmit(skb, dev, txq, &ret);

		HARD_TX_UNLOCK(dev, txq);
	}
	spin_lock(root_lock);

	if (dev_xmit_complete(ret)) {
		/* Driver sent out skb successfully or skb was consumed */
		ret = qdisc_qlen(q);
	} else if (ret == NETDEV_TX_LOCKED) {
		/* Driver try lock failed */
		ret = handle_dev_cpu_collision(skb, txq, q);
	} else {
		/* Driver returned NETDEV_TX_BUSY - requeue skb */
		if (unlikely(ret != NETDEV_TX_BUSY))
			net_warn_ratelimited("BUG %s code %d qlen %d\n",
					     dev->name, ret, q->q.qlen);

		ret = dev_requeue_skb(skb, q);
	}

	if (ret && netif_xmit_frozen_or_stopped(txq))
		ret = 0;

	return ret;
}
Example #17
0
void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
{
	int mtu = vport->dev->mtu;

	switch (vport->dev->type) {
	case ARPHRD_NONE:
		if (mac_proto == MAC_PROTO_ETHERNET) {
			skb_reset_network_header(skb);
			skb_reset_mac_len(skb);
			skb->protocol = htons(ETH_P_TEB);
		} else if (mac_proto != MAC_PROTO_NONE) {
			WARN_ON_ONCE(1);
			goto drop;
		}
		break;
	case ARPHRD_ETHER:
		if (mac_proto != MAC_PROTO_ETHERNET)
			goto drop;
		break;
	default:
		goto drop;
	}

	if (unlikely(packet_length(skb, vport->dev) > mtu &&
		     !skb_is_gso(skb))) {
		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
				     vport->dev->name,
				     packet_length(skb, vport->dev), mtu);
		vport->dev->stats.tx_errors++;
		goto drop;
	}

	skb->dev = vport->dev;
	vport->ops->send(skb);
	return;

drop:
	kfree_skb(skb);
}
Example #18
0
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
	int mtu = netdev_vport->dev->mtu;
	int len;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
				     netdev_vport->dev->name,
				     packet_length(skb), mtu);
		goto drop;
	}

	skb->dev = netdev_vport->dev;
	len = skb->len;
	dev_queue_xmit(skb);

	return len;

drop:
	kfree_skb(skb);
	return 0;
}
Example #19
0
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
{
	struct lowpan_frag_queue *fq;
	struct net *net = dev_net(skb->dev);
	struct lowpan_frag_info *frag_info = lowpan_cb(skb);
	struct ieee802154_addr source, dest;
	int err;

	source = mac_cb(skb)->source;
	dest = mac_cb(skb)->dest;

	err = lowpan_get_frag_info(skb, frag_type, frag_info);
	if (err < 0)
		goto err;

	if (frag_info->d_size > IPV6_MIN_MTU) {
		net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n");
		goto err;
	}

	fq = fq_find(net, frag_info, &source, &dest);
	if (fq != NULL) {
		int ret;

		spin_lock(&fq->q.lock);
		ret = lowpan_frag_queue(fq, skb, frag_type);
		spin_unlock(&fq->q.lock);

		inet_frag_put(&fq->q, &lowpan_frags);
		return ret;
	}

err:
	kfree_skb(skb);
	return -1;
}
Example #20
0
static int netdev_send(struct vport *vport, struct sk_buff *skb)
{
	struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
	int mtu = netdev_vport->dev->mtu;
	int len;

	if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
		net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n",
				     netdev_vport->dev->name,
				     packet_length(skb), mtu);
		goto error;
	}

	skb->dev = netdev_vport->dev;
	forward_ip_summed(skb, true);

	if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) {
		int features;

		features = netif_skb_features(skb);

		if (!vlan_tso)
			features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
				      NETIF_F_UFO | NETIF_F_FSO);

		if (netif_needs_gso(skb, features)) {
			struct sk_buff *nskb;

			nskb = skb_gso_segment(skb, features);
			if (!nskb) {
				if (unlikely(skb_cloned(skb) &&
				    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) {
					kfree_skb(skb);
					return 0;
				}

				skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY;
				goto tag;
			}

			if (IS_ERR(nskb)) {
				kfree_skb(skb);
				return 0;
			}
			consume_skb(skb);
			skb = nskb;

			len = 0;
			do {
				nskb = skb->next;
				skb->next = NULL;

				skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
				if (likely(skb)) {
					len += skb->len;
					vlan_set_tci(skb, 0);
					dev_queue_xmit(skb);
				}

				skb = nskb;
			} while (skb);

			return len;
		}

tag:
		skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
		if (unlikely(!skb))
			return 0;
		vlan_set_tci(skb, 0);
	}

	len = skb->len;
	dev_queue_xmit(skb);

	return len;

error:
	kfree_skb(skb);
	ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
	return 0;
}
Example #21
0
static int help(struct sk_buff *skb, unsigned int protoff,
		struct nf_conn *ct, enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff;
	const struct iphdr *iph;
	const struct tcphdr *th;
	struct tcphdr _tcph;
	const char *data_limit;
	char *data, *ib_ptr;
	int dir = CTINFO2DIR(ctinfo);
	struct nf_conntrack_expect *exp;
	struct nf_conntrack_tuple *tuple;
	__be32 dcc_ip;
	u_int16_t dcc_port;
	__be16 port;
	int i, ret = NF_ACCEPT;
	char *addr_beg_p, *addr_end_p;
	typeof(nf_nat_irc_hook) nf_nat_irc;

	/* If packet is coming from IRC server */
	if (dir == IP_CT_DIR_REPLY)
		return NF_ACCEPT;

	/* Until there's been traffic both ways, don't look in packets. */
	if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
		return NF_ACCEPT;

	/* Not a full tcp header? */
	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = protoff + th->doff*4;
	if (dataoff >= skb->len)
		return NF_ACCEPT;

	spin_lock_bh(&irc_buffer_lock);
	ib_ptr = skb_header_pointer(skb, dataoff, skb->len - dataoff,
				    irc_buffer);
	BUG_ON(ib_ptr == NULL);

	data = ib_ptr;
	data_limit = ib_ptr + skb->len - dataoff;

	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
	while (data < data_limit - (19 + MINMATCHLEN)) {
		if (memcmp(data, "\1DCC ", 5)) {
			data++;
			continue;
		}
		data += 5;
		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */

		iph = ip_hdr(skb);
		pr_debug("DCC found in master %pI4:%u %pI4:%u\n",
			 &iph->saddr, ntohs(th->source),
			 &iph->daddr, ntohs(th->dest));

		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
				/* no match */
				continue;
			}
			data += strlen(dccprotos[i]);
			pr_debug("DCC %s detected\n", dccprotos[i]);

			/* we have at least
			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
			 * data left (== 14/13 bytes) */
			if (parse_dcc(data, data_limit, &dcc_ip,
				       &dcc_port, &addr_beg_p, &addr_end_p)) {
				pr_debug("unable to parse dcc command\n");
				continue;
			}

			pr_debug("DCC bound ip/port: %pI4:%u\n",
				 &dcc_ip, dcc_port);

			/* dcc_ip can be the internal OR external (NAT'ed) IP */
			tuple = &ct->tuplehash[dir].tuple;
			if (tuple->src.u3.ip != dcc_ip &&
			    tuple->dst.u3.ip != dcc_ip) {
				net_warn_ratelimited("Forged DCC command from %pI4: %pI4:%u\n",
						     &tuple->src.u3.ip,
						     &dcc_ip, dcc_port);
				continue;
			}

			exp = nf_ct_expect_alloc(ct);
			if (exp == NULL) {
				nf_ct_helper_log(skb, ct,
						 "cannot alloc expectation");
				ret = NF_DROP;
				goto out;
			}
			tuple = &ct->tuplehash[!dir].tuple;
			port = htons(dcc_port);
			nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
					  tuple->src.l3num,
					  NULL, &tuple->dst.u3,
					  IPPROTO_TCP, NULL, &port);

			nf_nat_irc = rcu_dereference(nf_nat_irc_hook);
			if (nf_nat_irc && ct->status & IPS_NAT_MASK)
				ret = nf_nat_irc(skb, ctinfo, protoff,
						 addr_beg_p - ib_ptr,
						 addr_end_p - addr_beg_p,
						 exp);
			else if (nf_ct_expect_related(exp) != 0) {
				nf_ct_helper_log(skb, ct,
						 "cannot add expectation");
				ret = NF_DROP;
			}
			nf_ct_expect_put(exp);
			goto out;
		}
	}
 out:
	spin_unlock_bh(&irc_buffer_lock);
	return ret;
}
Example #22
0
/*
 * Function ircomm_tty_change_speed (driver)
 *
 *    Change speed of the driver. If the remote device is a DCE, then this
 *    should make it change the speed of its serial port
 */
static void ircomm_tty_change_speed(struct ircomm_tty_cb *self,
		struct tty_struct *tty)
{
	unsigned int cflag, cval;
	int baud;

	if (!self->ircomm)
		return;

	cflag = tty->termios.c_cflag;

	/*  byte size and parity */
	switch (cflag & CSIZE) {
	case CS5: cval = IRCOMM_WSIZE_5; break;
	case CS6: cval = IRCOMM_WSIZE_6; break;
	case CS7: cval = IRCOMM_WSIZE_7; break;
	case CS8: cval = IRCOMM_WSIZE_8; break;
	default:  cval = IRCOMM_WSIZE_5; break;
	}
	if (cflag & CSTOPB)
		cval |= IRCOMM_2_STOP_BIT;

	if (cflag & PARENB)
		cval |= IRCOMM_PARITY_ENABLE;
	if (!(cflag & PARODD))
		cval |= IRCOMM_PARITY_EVEN;

	/* Determine divisor based on baud rate */
	baud = tty_get_baud_rate(tty);
	if (!baud)
		baud = 9600;	/* B0 transition handled in rs_set_termios */

	self->settings.data_rate = baud;
	ircomm_param_request(self, IRCOMM_DATA_RATE, FALSE);

	/* CTS flow control flag and modem status interrupts */
	if (cflag & CRTSCTS) {
		self->port.flags |= ASYNC_CTS_FLOW;
		self->settings.flow_control |= IRCOMM_RTS_CTS_IN;
		/* This got me. Bummer. Jean II */
		if (self->service_type == IRCOMM_3_WIRE_RAW)
			net_warn_ratelimited("%s(), enabling RTS/CTS on link that doesn't support it (3-wire-raw)\n",
					     __func__);
	} else {
		self->port.flags &= ~ASYNC_CTS_FLOW;
		self->settings.flow_control &= ~IRCOMM_RTS_CTS_IN;
	}
	if (cflag & CLOCAL)
		self->port.flags &= ~ASYNC_CHECK_CD;
	else
		self->port.flags |= ASYNC_CHECK_CD;
#if 0
	/*
	 * Set up parity check flag
	 */

	if (I_INPCK(self->tty))
		driver->read_status_mask |= LSR_FE | LSR_PE;
	if (I_BRKINT(driver->tty) || I_PARMRK(driver->tty))
		driver->read_status_mask |= LSR_BI;

	/*
	 * Characters to ignore
	 */
	driver->ignore_status_mask = 0;
	if (I_IGNPAR(driver->tty))
		driver->ignore_status_mask |= LSR_PE | LSR_FE;

	if (I_IGNBRK(self->tty)) {
		self->ignore_status_mask |= LSR_BI;
		/*
		 * If we're ignore parity and break indicators, ignore
		 * overruns too. (For real raw support).
		 */
		if (I_IGNPAR(self->tty))
			self->ignore_status_mask |= LSR_OE;
	}
#endif
	self->settings.data_format = cval;

	ircomm_param_request(self, IRCOMM_DATA_FORMAT, FALSE);
	ircomm_param_request(self, IRCOMM_FLOW_CONTROL, TRUE);
}
Example #23
0
static void leftover_dongle(void *arg)
{
	struct dongle_reg *reg = arg;
	net_warn_ratelimited("IrDA: Dongle type %x not unregistered\n",
			     reg->type);
}
Example #24
0
/*
 * Function ali_ircc_open (int i, chipio_t *inf)
 *
 *    Open driver instance
 *
 */
static int ali_ircc_open(int i, chipio_t *info)
{
	struct net_device *dev;
	struct ali_ircc_cb *self;
	int dongle_id;
	int err;
			
	if (i >= ARRAY_SIZE(dev_self)) {
		net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
				    __func__);
		return -ENOMEM;
	}
	
	/* Set FIR FIFO and DMA Threshold */
	if ((ali_ircc_setup(info)) == -1)
		return -1;
		
	dev = alloc_irdadev(sizeof(*self));
	if (dev == NULL) {
		net_err_ratelimited("%s(), can't allocate memory for control block!\n",
				    __func__);
		return -ENOMEM;
	}

	self = netdev_priv(dev);
	self->netdev = dev;
	spin_lock_init(&self->lock);
   
	/* Need to store self somewhere */
	dev_self[i] = self;
	self->index = i;

	/* Initialize IO */
	self->io.cfg_base  = info->cfg_base;	/* In ali_ircc_probe_53 assign 		*/
	self->io.fir_base  = info->fir_base;	/* info->sir_base = info->fir_base 	*/
	self->io.sir_base  = info->sir_base; 	/* ALi SIR and FIR use the same address */
        self->io.irq       = info->irq;
        self->io.fir_ext   = CHIP_IO_EXTENT;
        self->io.dma       = info->dma;
        self->io.fifo_size = 16;		/* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
	
	/* Reserve the ioports that we need */
	if (!request_region(self->io.fir_base, self->io.fir_ext,
			    ALI_IRCC_DRIVER_NAME)) {
		net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
				     __func__, self->io.fir_base);
		err = -ENODEV;
		goto err_out1;
	}

	/* Initialize QoS for this device */
	irda_init_max_qos_capabilies(&self->qos);
	
	/* The only value we must override it the baudrate */
	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
			
	self->qos.min_turn_time.bits = qos_mtt_bits;
			
	irda_qos_bits_to_value(&self->qos);
	
	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
	self->rx_buff.truesize = 14384; 
	self->tx_buff.truesize = 14384;

	/* Allocate memory if needed */
	self->rx_buff.head =
		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
				    &self->rx_buff_dma, GFP_KERNEL);
	if (self->rx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out2;
	}
	
	self->tx_buff.head =
		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
				    &self->tx_buff_dma, GFP_KERNEL);
	if (self->tx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out3;
	}

	self->rx_buff.in_frame = FALSE;
	self->rx_buff.state = OUTSIDE_FRAME;
	self->tx_buff.data = self->tx_buff.head;
	self->rx_buff.data = self->rx_buff.head;
	
	/* Reset Tx queue info */
	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
	self->tx_fifo.tail = self->tx_buff.head;

	/* Override the network functions we need to use */
	dev->netdev_ops = &ali_ircc_sir_ops;

	err = register_netdev(dev);
	if (err) {
		net_err_ratelimited("%s(), register_netdev() failed!\n",
				    __func__);
		goto err_out4;
	}
	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);

	/* Check dongle id */
	dongle_id = ali_ircc_read_dongle_id(i, info);
	net_info_ratelimited("%s(), %s, Found dongle: %s\n",
			     __func__, ALI_IRCC_DRIVER_NAME,
			     dongle_types[dongle_id]);
		
	self->io.dongle_id = dongle_id;

	
	return 0;

 err_out4:
	dma_free_coherent(NULL, self->tx_buff.truesize,
			  self->tx_buff.head, self->tx_buff_dma);
 err_out3:
	dma_free_coherent(NULL, self->rx_buff.truesize,
			  self->rx_buff.head, self->rx_buff_dma);
 err_out2:
	release_region(self->io.fir_base, self->io.fir_ext);
 err_out1:
	dev_self[i] = NULL;
	free_netdev(dev);
	return err;
}