Exemplo n.º 1
0
static unsigned int
ipt_error(struct sk_buff *skb, const struct xt_action_param *par)
{
	net_info_ratelimited("error: `%s'\n", (const char *)par->targinfo);

	return NF_DROP;
}
Exemplo n.º 2
0
static void dn_log_martian(struct sk_buff *skb, const char *msg)
{
	if (decnet_log_martians) {
		char *devname = skb->dev ? skb->dev->name : "???";
		struct dn_skb_cb *cb = DN_SKB_CB(skb);
		net_info_ratelimited("DECnet: Martian packet (%s) dev=%s src=0x%04hx dst=0x%04hx srcport=0x%04hx dstport=0x%04hx\n",
				     msg, devname,
				     le16_to_cpu(cb->src),
				     le16_to_cpu(cb->dst),
				     le16_to_cpu(cb->src_port),
				     le16_to_cpu(cb->dst_port));
	}
}
Exemplo n.º 3
0
static inline bool ip_rcv_options(struct sk_buff *skb)
{
	struct ip_options *opt;
	const struct iphdr *iph;
	struct net_device *dev = skb->dev;

	/* It looks as overkill, because not all
	   IP options require packet mangling.
	   But it is the easiest for now, especially taking
	   into account that combination of IP options
	   and running sniffer is extremely rare condition.
					      --ANK (980813)
	*/
	if (skb_cow(skb, skb_headroom(skb))) {
		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
		goto drop;
	}

	iph = ip_hdr(skb);
	opt = &(IPCB(skb)->opt);
	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);

	if (ip_options_compile(dev_net(dev), opt, skb)) {
		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
		goto drop;
	}

	if (unlikely(opt->srr)) {
		struct in_device *in_dev = __in_dev_get_rcu(dev);

		if (in_dev) {
			if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
				if (IN_DEV_LOG_MARTIANS(in_dev))
					net_info_ratelimited("source route option %pI4 -> %pI4\n",
							     &iph->saddr,
							     &iph->daddr);
				goto drop;
			}
		}

		if (ip_options_rcv_srr(skb))
			goto drop;
	}

	return false;
drop:
	return true;
}
static unsigned int
reject_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct ip6t_reject_info *reject = par->targinfo;
	struct net *net = dev_net((par->in != NULL) ? par->in : par->out);

	pr_debug("%s: medium point\n", __func__);
	switch (reject->with) {
	case IP6T_ICMP6_NO_ROUTE:
		send_unreach(net, skb, ICMPV6_NOROUTE, par->hooknum);
		break;
	case IP6T_ICMP6_ADM_PROHIBITED:
		send_unreach(net, skb, ICMPV6_ADM_PROHIBITED, par->hooknum);
		break;
	case IP6T_ICMP6_NOT_NEIGHBOUR:
		send_unreach(net, skb, ICMPV6_NOT_NEIGHBOUR, par->hooknum);
		break;
	case IP6T_ICMP6_ADDR_UNREACH:
		send_unreach(net, skb, ICMPV6_ADDR_UNREACH, par->hooknum);
		break;
	case IP6T_ICMP6_PORT_UNREACH:
		send_unreach(net, skb, ICMPV6_PORT_UNREACH, par->hooknum);
		break;
	case IP6T_ICMP6_ECHOREPLY:
		/* Do nothing */
		break;
	case IP6T_TCP_RESET:
		send_reset(net, skb);
		break;
	default:
		net_info_ratelimited("case %u not handled yet\n", reject->with);
		break;
	}

	return NF_DROP;
}
Exemplo n.º 5
0
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
		  const struct tnl_ptk_info *tpi, bool log_ecn_error)
{
	struct pcpu_tstats *tstats;
	const struct iphdr *iph = ip_hdr(skb);
	int err;

#ifdef CONFIG_NET_IPGRE_BROADCAST
	if (ipv4_is_multicast(iph->daddr)) {
		/* Looped back packet, drop it! */
		if (rt_is_output_route(skb_rtable(skb)))
			goto drop;
		tunnel->dev->stats.multicast++;
		skb->pkt_type = PACKET_BROADCAST;
	}
#endif

	if ((!(tpi->flags&TUNNEL_CSUM) &&  (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
	     ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
		tunnel->dev->stats.rx_crc_errors++;
		tunnel->dev->stats.rx_errors++;
		goto drop;
	}

	if (tunnel->parms.i_flags&TUNNEL_SEQ) {
		if (!(tpi->flags&TUNNEL_SEQ) ||
		    (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
			tunnel->dev->stats.rx_fifo_errors++;
			tunnel->dev->stats.rx_errors++;
			goto drop;
		}
		tunnel->i_seqno = ntohl(tpi->seq) + 1;
	}

	err = IP_ECN_decapsulate(iph, skb);
	if (unlikely(err)) {
		if (log_ecn_error)
			net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
					&iph->saddr, iph->tos);
		if (err > 1) {
			++tunnel->dev->stats.rx_frame_errors;
			++tunnel->dev->stats.rx_errors;
			goto drop;
		}
	}

	tstats = this_cpu_ptr(tunnel->dev->tstats);
	u64_stats_update_begin(&tstats->syncp);
	tstats->rx_packets++;
	tstats->rx_bytes += skb->len;
	u64_stats_update_end(&tstats->syncp);

	if (tunnel->net != dev_net(tunnel->dev))
		skb_scrub_packet(skb);

	if (tunnel->dev->type == ARPHRD_ETHER) {
		skb->protocol = eth_type_trans(skb, tunnel->dev);
		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
	} else {
		skb->dev = tunnel->dev;
	}
	gro_cells_receive(&tunnel->gro_cells, skb);
	return 0;

drop:
	kfree_skb(skb);
	return 0;
}
Exemplo n.º 6
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	int sum_truesize;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(&qp->q, clone->truesize);
	}

	skb_push(head, head->data - skb_network_header(head));

	sum_truesize = head->truesize;
	for (fp = head->next; fp;) {
		bool headstolen;
		int delta;
		struct sk_buff *next = fp->next;

		sum_truesize += fp->truesize;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);

		if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
			kfree_skb_partial(fp, headstolen);
		} else {
			if (!skb_shinfo(head)->frag_list)
				skb_shinfo(head)->frag_list = fp;
			head->data_len += fp->len;
			head->len += fp->len;
			head->truesize += fp->truesize;
		}
		fp = next;
	}
	sub_frag_mem_limit(&qp->q, sum_truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = qp->q.max_size;

	iph = ip_hdr(head);
	/* max_size != 0 implies at least one fragment had IP_DF set */
	iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0;
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	ip_send_check(iph);

	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"),
		       qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Exemplo n.º 7
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(!head);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(qp->q.net, clone->truesize);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	sub_frag_mem_limit(qp->q.net, head->truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);

	iph = ip_hdr(head);
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	/* When we set IP_DF on a refragmented skb we must also force a
	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
	 * original sender only sent fragments of size f (where f < s).
	 *
	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
	 * frag seen to avoid sending tiny DF-fragments in case skb was built
	 * from one very small df-fragment and one large non-df frag.
	 */
	if (qp->max_df_size == qp->q.max_size) {
		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
		iph->frag_off = htons(IP_DF);
	} else {
		iph->frag_off = 0;
	}

	ip_send_check(iph);

	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Exemplo n.º 8
0
/*
 * Function ali_ircc_open (int i, chipio_t *inf)
 *
 *    Open driver instance
 *
 */
static int ali_ircc_open(int i, chipio_t *info)
{
	struct net_device *dev;
	struct ali_ircc_cb *self;
	int dongle_id;
	int err;
			
	if (i >= ARRAY_SIZE(dev_self)) {
		net_err_ratelimited("%s(), maximum number of supported chips reached!\n",
				    __func__);
		return -ENOMEM;
	}
	
	/* Set FIR FIFO and DMA Threshold */
	if ((ali_ircc_setup(info)) == -1)
		return -1;
		
	dev = alloc_irdadev(sizeof(*self));
	if (dev == NULL) {
		net_err_ratelimited("%s(), can't allocate memory for control block!\n",
				    __func__);
		return -ENOMEM;
	}

	self = netdev_priv(dev);
	self->netdev = dev;
	spin_lock_init(&self->lock);
   
	/* Need to store self somewhere */
	dev_self[i] = self;
	self->index = i;

	/* Initialize IO */
	self->io.cfg_base  = info->cfg_base;	/* In ali_ircc_probe_53 assign 		*/
	self->io.fir_base  = info->fir_base;	/* info->sir_base = info->fir_base 	*/
	self->io.sir_base  = info->sir_base; 	/* ALi SIR and FIR use the same address */
        self->io.irq       = info->irq;
        self->io.fir_ext   = CHIP_IO_EXTENT;
        self->io.dma       = info->dma;
        self->io.fifo_size = 16;		/* SIR: 16, FIR: 32 Benjamin 2000/11/1 */
	
	/* Reserve the ioports that we need */
	if (!request_region(self->io.fir_base, self->io.fir_ext,
			    ALI_IRCC_DRIVER_NAME)) {
		net_warn_ratelimited("%s(), can't get iobase of 0x%03x\n",
				     __func__, self->io.fir_base);
		err = -ENODEV;
		goto err_out1;
	}

	/* Initialize QoS for this device */
	irda_init_max_qos_capabilies(&self->qos);
	
	/* The only value we must override it the baudrate */
	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8); // benjamin 2000/11/8 05:27PM
			
	self->qos.min_turn_time.bits = qos_mtt_bits;
			
	irda_qos_bits_to_value(&self->qos);
	
	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
	self->rx_buff.truesize = 14384; 
	self->tx_buff.truesize = 14384;

	/* Allocate memory if needed */
	self->rx_buff.head =
		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
				    &self->rx_buff_dma, GFP_KERNEL);
	if (self->rx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out2;
	}
	
	self->tx_buff.head =
		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
				    &self->tx_buff_dma, GFP_KERNEL);
	if (self->tx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out3;
	}

	self->rx_buff.in_frame = FALSE;
	self->rx_buff.state = OUTSIDE_FRAME;
	self->tx_buff.data = self->tx_buff.head;
	self->rx_buff.data = self->rx_buff.head;
	
	/* Reset Tx queue info */
	self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0;
	self->tx_fifo.tail = self->tx_buff.head;

	/* Override the network functions we need to use */
	dev->netdev_ops = &ali_ircc_sir_ops;

	err = register_netdev(dev);
	if (err) {
		net_err_ratelimited("%s(), register_netdev() failed!\n",
				    __func__);
		goto err_out4;
	}
	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);

	/* Check dongle id */
	dongle_id = ali_ircc_read_dongle_id(i, info);
	net_info_ratelimited("%s(), %s, Found dongle: %s\n",
			     __func__, ALI_IRCC_DRIVER_NAME,
			     dongle_types[dongle_id]);
		
	self->io.dongle_id = dongle_id;

	
	return 0;

 err_out4:
	dma_free_coherent(NULL, self->tx_buff.truesize,
			  self->tx_buff.head, self->tx_buff_dma);
 err_out3:
	dma_free_coherent(NULL, self->rx_buff.truesize,
			  self->rx_buff.head, self->rx_buff_dma);
 err_out2:
	release_region(self->io.fir_base, self->io.fir_ext);
 err_out1:
	dev_self[i] = NULL;
	free_netdev(dev);
	return err;
}
Exemplo n.º 9
0
int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
{
	int uninitialized_var(err);
	struct net *net = sock_net(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_flowlabel_req freq;
	struct ipv6_fl_socklist *sfl1 = NULL;
	struct ipv6_fl_socklist *sfl;
	struct ipv6_fl_socklist __rcu **sflp;
	struct ip6_flowlabel *fl, *fl1 = NULL;


	if (optlen < sizeof(freq))
		return -EINVAL;

	if (copy_from_user(&freq, optval, sizeof(freq)))
		return -EFAULT;

	switch (freq.flr_action) {
	case IPV6_FL_A_PUT:
		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
			if (sk->sk_protocol != IPPROTO_TCP)
				return -ENOPROTOOPT;
			if (!np->repflow)
				return -ESRCH;
			np->flow_label = 0;
			np->repflow = 0;
			return 0;
		}
		spin_lock_bh(&ip6_sk_fl_lock);
		for (sflp = &np->ipv6_fl_list;
		     (sfl = rcu_dereference(*sflp)) != NULL;
		     sflp = &sfl->next) {
			if (sfl->fl->label == freq.flr_label) {
				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
				*sflp = rcu_dereference(sfl->next);
				spin_unlock_bh(&ip6_sk_fl_lock);
				fl_release(sfl->fl);
				kfree_rcu(sfl, rcu);
				return 0;
			}
		}
		spin_unlock_bh(&ip6_sk_fl_lock);
		return -ESRCH;

	case IPV6_FL_A_RENEW:
		rcu_read_lock_bh();
		for_each_sk_fl_rcu(np, sfl) {
			if (sfl->fl->label == freq.flr_label) {
				err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
				rcu_read_unlock_bh();
				return err;
			}
		}
		rcu_read_unlock_bh();

		if (freq.flr_share == IPV6_FL_S_NONE &&
		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
			fl = fl_lookup(net, freq.flr_label);
			if (fl) {
				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
				fl_release(fl);
				return err;
			}
		}
		return -ESRCH;

	case IPV6_FL_A_GET:
		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
			struct net *net = sock_net(sk);
			if (net->ipv6.sysctl.flowlabel_consistency) {
				net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
				return -EPERM;
			}

			if (sk->sk_protocol != IPPROTO_TCP)
				return -ENOPROTOOPT;

			np->repflow = 1;
			return 0;
		}

		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
			return -EINVAL;

		fl = fl_create(net, sk, &freq, optval, optlen, &err);
		if (!fl)
			return err;
		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);

		if (freq.flr_label) {
			err = -EEXIST;
			rcu_read_lock_bh();
			for_each_sk_fl_rcu(np, sfl) {
				if (sfl->fl->label == freq.flr_label) {
					if (freq.flr_flags&IPV6_FL_F_EXCL) {
						rcu_read_unlock_bh();
						goto done;
					}
					fl1 = sfl->fl;
					atomic_inc(&fl1->users);
					break;
				}
			}
			rcu_read_unlock_bh();

			if (!fl1)
				fl1 = fl_lookup(net, freq.flr_label);
			if (fl1) {
recheck:
				err = -EEXIST;
				if (freq.flr_flags&IPV6_FL_F_EXCL)
					goto release;
				err = -EPERM;
				if (fl1->share == IPV6_FL_S_EXCL ||
				    fl1->share != fl->share ||
				    ((fl1->share == IPV6_FL_S_PROCESS) &&
				     (fl1->owner.pid == fl->owner.pid)) ||
				    ((fl1->share == IPV6_FL_S_USER) &&
				     uid_eq(fl1->owner.uid, fl->owner.uid)))
					goto release;

				err = -ENOMEM;
				if (!sfl1)
					goto release;
				if (fl->linger > fl1->linger)
					fl1->linger = fl->linger;
				if ((long)(fl->expires - fl1->expires) > 0)
					fl1->expires = fl->expires;
				fl_link(np, sfl1, fl1);
				fl_free(fl);
				return 0;

release:
				fl_release(fl1);
				goto done;
			}
		}
		err = -ENOENT;
		if (!(freq.flr_flags&IPV6_FL_F_CREATE))
			goto done;

		err = -ENOMEM;
		if (!sfl1)
			goto done;

		err = mem_check(sk);
		if (err != 0)
			goto done;

		fl1 = fl_intern(net, fl, freq.flr_label);
		if (fl1)
			goto recheck;

		if (!freq.flr_label) {
			if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
					 &fl->label, sizeof(fl->label))) {
				/* Intentionally ignore fault. */
			}
		}

		fl_link(np, sfl1, fl);
		return 0;

	default:
		return -EINVAL;
	}
Exemplo n.º 10
0
static int w83977af_probe(int iobase, int irq, int dma)
{
  	int version;
	int i;
  	
 	for (i=0; i < 2; i++) {
#ifdef CONFIG_USE_W977_PNP
 		/* Enter PnP configuration mode */
		w977_efm_enter(efbase[i]);
  
 		w977_select_device(W977_DEVICE_IR, efbase[i]);
  
 		/* Configure PnP port, IRQ, and DMA channel */
 		w977_write_reg(0x60, (iobase >> 8) & 0xff, efbase[i]);
 		w977_write_reg(0x61, (iobase) & 0xff, efbase[i]);
  
 		w977_write_reg(0x70, irq, efbase[i]);
#ifdef CONFIG_ARCH_NETWINDER
		/* Netwinder uses 1 higher than Linux */
 		w977_write_reg(0x74, dma+1, efbase[i]);
#else
 		w977_write_reg(0x74, dma, efbase[i]);   
#endif /* CONFIG_ARCH_NETWINDER */
 		w977_write_reg(0x75, 0x04, efbase[i]);  /* Disable Tx DMA */
  	
 		/* Set append hardware CRC, enable IR bank selection */	
 		w977_write_reg(0xf0, APEDCRC|ENBNKSEL, efbase[i]);
  
 		/* Activate device */
 		w977_write_reg(0x30, 0x01, efbase[i]);
  
 		w977_efm_exit(efbase[i]);
#endif /* CONFIG_USE_W977_PNP */
  		/* Disable Advanced mode */
  		switch_bank(iobase, SET2);
  		outb(iobase+2, 0x00);  
 
 		/* Turn on UART (global) interrupts */
 		switch_bank(iobase, SET0);
  		outb(HCR_EN_IRQ, iobase+HCR);
  	
  		/* Switch to advanced mode */
  		switch_bank(iobase, SET2);
  		outb(inb(iobase+ADCR1) | ADCR1_ADV_SL, iobase+ADCR1);
  
  		/* Set default IR-mode */
  		switch_bank(iobase, SET0);
  		outb(HCR_SIR, iobase+HCR);
  
  		/* Read the Advanced IR ID */
  		switch_bank(iobase, SET3);
  		version = inb(iobase+AUID);
  	
  		/* Should be 0x1? */
  		if (0x10 == (version & 0xf0)) {
 			efio = efbase[i];
 
 			/* Set FIFO size to 32 */
 			switch_bank(iobase, SET2);
 			outb(ADCR2_RXFS32|ADCR2_TXFS32, iobase+ADCR2);	
 	
 			/* Set FIFO threshold to TX17, RX16 */
 			switch_bank(iobase, SET0);	
 			outb(UFR_RXTL|UFR_TXTL|UFR_TXF_RST|UFR_RXF_RST|
			     UFR_EN_FIFO,iobase+UFR);
 
 			/* Receiver frame length */
 			switch_bank(iobase, SET4);
			outb(2048 & 0xff, iobase+6);
			outb((2048 >> 8) & 0x1f, iobase+7);

			/* 
			 * Init HP HSDL-1100 transceiver. 
			 * 
			 * Set IRX_MSL since we have 2 * receive paths IRRX, 
			 * and IRRXH. Clear IRSL0D since we want IRSL0 * to 
			 * be a input pin used for IRRXH 
			 *
			 *   IRRX  pin 37 connected to receiver 
			 *   IRTX  pin 38 connected to transmitter
			 *   FIRRX pin 39 connected to receiver      (IRSL0) 
			 *   CIRRX pin 40 connected to pin 37
			 */
			switch_bank(iobase, SET7);
			outb(0x40, iobase+7);
			
			net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
					     version);
			
			return 0;
		} else {
Exemplo n.º 11
0
/*
 * Function w83977af_open (iobase, irq)
 *
 *    Open driver instance
 *
 */
static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
			 unsigned int dma)
{
	struct net_device *dev;
        struct w83977af_ir *self;
	int err;

	/* Lock the port that we need */
	if (!request_region(iobase, CHIP_IO_EXTENT, driver_name)) {
		pr_debug("%s(), can't get iobase of 0x%03x\n",
			 __func__ , iobase);
		return -ENODEV;
	}

	if (w83977af_probe(iobase, irq, dma) == -1) {
		err = -1;
		goto err_out;
	}
	/*
	 *  Allocate new instance of the driver
	 */
	dev = alloc_irdadev(sizeof(struct w83977af_ir));
	if (dev == NULL) {
		printk( KERN_ERR "IrDA: Can't allocate memory for "
			"IrDA control block!\n");
		err = -ENOMEM;
		goto err_out;
	}

	self = netdev_priv(dev);
	spin_lock_init(&self->lock);
   

	/* Initialize IO */
	self->io.fir_base   = iobase;
        self->io.irq       = irq;
        self->io.fir_ext   = CHIP_IO_EXTENT;
        self->io.dma       = dma;
        self->io.fifo_size = 32;

	/* Initialize QoS for this device */
	irda_init_max_qos_capabilies(&self->qos);
	
	/* The only value we must override it the baudrate */

	/* FIXME: The HP HDLS-1100 does not support 1152000! */
	self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
		IR_115200|IR_576000|IR_1152000|(IR_4000000 << 8);

	/* The HP HDLS-1100 needs 1 ms according to the specs */
	self->qos.min_turn_time.bits = qos_mtt_bits;
	irda_qos_bits_to_value(&self->qos);
	
	/* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
	self->rx_buff.truesize = 14384; 
	self->tx_buff.truesize = 4000;
	
	/* Allocate memory if needed */
	self->rx_buff.head =
		dma_zalloc_coherent(NULL, self->rx_buff.truesize,
				    &self->rx_buff_dma, GFP_KERNEL);
	if (self->rx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out1;
	}

	self->tx_buff.head =
		dma_zalloc_coherent(NULL, self->tx_buff.truesize,
				    &self->tx_buff_dma, GFP_KERNEL);
	if (self->tx_buff.head == NULL) {
		err = -ENOMEM;
		goto err_out2;
	}

	self->rx_buff.in_frame = FALSE;
	self->rx_buff.state = OUTSIDE_FRAME;
	self->tx_buff.data = self->tx_buff.head;
	self->rx_buff.data = self->rx_buff.head;
	self->netdev = dev;

	dev->netdev_ops	= &w83977_netdev_ops;

	err = register_netdev(dev);
	if (err) {
		net_err_ratelimited("%s(), register_netdevice() failed!\n",
				    __func__);
		goto err_out3;
	}
	net_info_ratelimited("IrDA: Registered device %s\n", dev->name);

	/* Need to store self somewhere */
	dev_self[i] = self;
	
	return 0;
err_out3:
	dma_free_coherent(NULL, self->tx_buff.truesize,
			  self->tx_buff.head, self->tx_buff_dma);
err_out2:	
	dma_free_coherent(NULL, self->rx_buff.truesize,
			  self->rx_buff.head, self->rx_buff_dma);
err_out1:
	free_netdev(dev);
err_out:
	release_region(iobase, CHIP_IO_EXTENT);
	return err;
}
Exemplo n.º 12
0
/* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
			 struct sk_buff *prev_tail, struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
	struct sk_buff **nextp; /* To build frag_list. */
	struct rb_node *rbn;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (head != skb) {
		fp = skb_clone(skb, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;
		FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
		if (RB_EMPTY_NODE(&skb->rbnode))
			FRAG_CB(prev_tail)->next_frag = fp;
		else
			rb_replace_node(&skb->rbnode, &fp->rbnode,
					&qp->q.rb_fragments);
		if (qp->q.fragments_tail == skb)
			qp->q.fragments_tail = fp;
		skb_morph(skb, head);
		FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
		rb_replace_node(&head->rbnode, &skb->rbnode,
				&qp->q.rb_fragments);
		consume_skb(head);
		head = skb;
	}

	WARN_ON(head->ip_defrag_offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_nomem;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->truesize += clone->truesize;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(qp->q.net, clone->truesize);
		skb_shinfo(head)->frag_list = clone;
		nextp = &clone->next;
	} else {
		nextp = &skb_shinfo(head)->frag_list;
	}

	skb_push(head, head->data - skb_network_header(head));

	/* Traverse the tree in order, to build frag_list. */
	fp = FRAG_CB(head)->next_frag;
	rbn = rb_next(&head->rbnode);
	rb_erase(&head->rbnode, &qp->q.rb_fragments);
	while (rbn || fp) {
		/* fp points to the next sk_buff in the current run;
		 * rbn points to the next run.
		 */
		/* Go through the current run. */
		while (fp) {
			*nextp = fp;
			nextp = &fp->next;
			fp->prev = NULL;
			memset(&fp->rbnode, 0, sizeof(fp->rbnode));
			head->data_len += fp->len;
			head->len += fp->len;
			if (head->ip_summed != fp->ip_summed)
				head->ip_summed = CHECKSUM_NONE;
			else if (head->ip_summed == CHECKSUM_COMPLETE)
				head->csum = csum_add(head->csum, fp->csum);
			head->truesize += fp->truesize;
			fp = FRAG_CB(fp)->next_frag;
		}
		/* Move to the next run. */
		if (rbn) {
			struct rb_node *rbnext = rb_next(rbn);

			fp = rb_to_skb(rbn);
			rb_erase(rbn, &qp->q.rb_fragments);
			rbn = rbnext;
		}
	}
	sub_frag_mem_limit(qp->q.net, head->truesize);

	*nextp = NULL;
	head->next = NULL;
	head->prev = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);

	iph = ip_hdr(head);
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	/* When we set IP_DF on a refragmented skb we must also force a
	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
	 * original sender only sent fragments of size f (where f < s).
	 *
	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
	 * frag seen to avoid sending tiny DF-fragments in case skb was built
	 * from one very small df-fragment and one large non-df frag.
	 */
	if (qp->max_df_size == qp->q.max_size) {
		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
		iph->frag_off = htons(IP_DF);
	} else {
		iph->frag_off = 0;
	}

	ip_send_check(iph);

	__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.rb_fragments = RB_ROOT;
	qp->q.fragments_tail = NULL;
	qp->q.last_run_head = NULL;
	return 0;

out_nomem:
	net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
out_fail:
	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
	return err;
}