Ejemplo n.º 1
0
/* dev_loopback_xmit for use with netfilter. */
static int ip_dev_loopback_xmit(struct sk_buff *newskb)
{
	newskb->mac.raw = newskb->data;
	__skb_pull(newskb, newskb->nh.raw - newskb->data);
	newskb->pkt_type = PACKET_LOOPBACK;
	newskb->ip_summed = CHECKSUM_UNNECESSARY;
	BUG_TRAP(newskb->dst);
	netif_rx(newskb);
	return 0;
}
Ejemplo n.º 2
0
static void netlink_sock_destruct(struct sock *sk)
{
	skb_queue_purge(&sk->receive_queue);

	if (!sk->dead) {
		printk("Freeing alive netlink socket %p\n", sk);
		return;
	}
	BUG_TRAP(atomic_read(&sk->rmem_alloc)==0);
	BUG_TRAP(atomic_read(&sk->wmem_alloc)==0);
	BUG_TRAP(sk->protinfo.af_netlink->cb==NULL);

	kfree(sk->protinfo.af_netlink);

	atomic_dec(&netlink_sock_nr);
#ifdef NETLINK_REFCNT_DEBUG
	printk(KERN_DEBUG "NETLINK %p released, %d are still alive\n", sk, atomic_read(&netlink_sock_nr));
#endif
}
Ejemplo n.º 3
0
void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
{
	BUG_TRAP(ifp->if_next==NULL);
	BUG_TRAP(ifp->lst_next==NULL);
#ifdef NET_REFCNT_DEBUG
	printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
#endif

	in6_dev_put(ifp->idev);

	if (del_timer(&ifp->timer))
		printk("Timer is still running, when freeing ifa=%p\n", ifp);

	if (!ifp->dead) {
		printk("Freeing alive inet6 address %p\n", ifp);
		return;
	}
	inet6_ifa_count--;
	kfree(ifp);
}
Ejemplo n.º 4
0
/* dev_loopback_xmit for use with netfilter. */
static int ip6_dev_loopback_xmit(struct sk_buff *newskb)
{
	skb_reset_mac_header(newskb);
	__skb_pull(newskb, skb_network_offset(newskb));
	newskb->pkt_type = PACKET_LOOPBACK;
	newskb->ip_summed = CHECKSUM_UNNECESSARY;
	BUG_TRAP(newskb->dst);

	netif_rx(newskb);
	return 0;
}
Ejemplo n.º 5
0
static void pminisock_evict(void *context, struct cached_pminisock *cmsk) {
	// printk("evict0\n");
	struct sock *sk = (struct sock *)context;
	if(cmsk->pmsk == NULL) {
		BUG();
	} else {
		BUG_TRAP(cmsk->pmsk->refCnt >= 1);
		pmsk_release(sk, cmsk->pmsk);
	}
	// printk("evict1\n");
	cmsk->pmsk = NULL;
}
Ejemplo n.º 6
0
void dev_shutdown(struct net_device *dev)
{
	struct Qdisc *qdisc;

	write_lock(&qdisc_tree_lock);
	spin_lock_bh(&dev->queue_lock);
	qdisc = dev->qdisc_sleeping;
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	qdisc_destroy(qdisc);
#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
        if ((qdisc = dev->qdisc_ingress) != NULL) {
		dev->qdisc_ingress = NULL;
		qdisc_destroy(qdisc);
        }
#endif
	BUG_TRAP(dev->qdisc_list == NULL);
	BUG_TRAP(!timer_pending(&dev->watchdog_timer));
	spin_unlock_bh(&dev->queue_lock);
	write_unlock(&qdisc_tree_lock);
}
Ejemplo n.º 7
0
static inline int parse_and_save_headers(struct HeaderContext *dst, 
			struct HeaderContext *src, struct sk_buff *skb,
			int updateFlags) {
	int my_ll_len, my_ihl;
	if(updateFlags & INCLUDE_LL) {
		src->rawBuf = src->ll_rawh = skb->mac.raw;
		src->ll_len = my_ll_len = (char*)skb->nh.raw - (char*)src->ll_rawh;

		dst->ll_rawh = dst->rawBuf;
		memcpy(dst->ll_rawh, src->ll_rawh, my_ll_len);
		dst->ll_len = my_ll_len;
		BUG_TRAP(my_ll_len == 14);

		dst->iph = (struct iphdr*)(dst->ll_rawh + my_ll_len);
	} else {
		src->rawBuf = (char*)skb->nh.iph;
		src->ll_rawh = NULL;
		src->ll_len = my_ll_len = -1;

		dst->ll_rawh = NULL;
		dst->ll_len = my_ll_len;

		dst->iph = (struct iphdr*)dst->rawBuf;
	}

	src->iph = skb->nh.iph;
	src->ihl = my_ihl = src->iph->ihl*4;

	BUG_TRAP(my_ihl == 20);
	memcpy(dst->iph, src->iph, my_ihl);
	dst->ihl = my_ihl;
#if 0
	printk("dumping iph: \n");
	hexdump(dst->iph, my_ihl);
#endif

	PULL_OR_FAIL(skb, my_ihl, "IP header too long in parse_header_and_save\n", -1);

	return 0;
}
Ejemplo n.º 8
0
void sk_stream_kill_queues(struct sock *sk)
{
	/* First the read buffer. */
	__skb_queue_purge(&sk->sk_receive_queue);

	/* Next, the error queue. */
	__skb_queue_purge(&sk->sk_error_queue);

	/* Next, the write queue. */
	BUG_TRAP(skb_queue_empty(&sk->sk_write_queue));

	/* Account for returned memory. */
	sk_stream_mem_reclaim(sk);

	BUG_TRAP(!sk->sk_wmem_queued);
	BUG_TRAP(!sk->sk_forward_alloc);

	/* It is _impossible_ for the backlog to contain anything
	 * when we get here.  All user references to this socket
	 * have gone away, only the net layer knows can touch it.
	 */
}
Ejemplo n.º 9
0
void dev_shutdown(struct device *dev)
{
	struct Qdisc *qdisc;

	start_bh_atomic();
	qdisc = dev->qdisc_sleeping;
	dev->qdisc = &noop_qdisc;
	dev->qdisc_sleeping = &noop_qdisc;
	qdisc_destroy(qdisc);
	BUG_TRAP(dev->qdisc_list == NULL);
	dev->qdisc_list = NULL;
	end_bh_atomic();
}
/* dev_loopback_xmit for use with netfilter. */
static int ip_dev_loopback_xmit(struct sk_buff *newskb)
{
	newskb->mac.raw = newskb->data;
	__skb_pull(newskb, newskb->nh.raw - newskb->data);
	newskb->pkt_type = PACKET_LOOPBACK;
	newskb->ip_summed = CHECKSUM_UNNECESSARY;
	BUG_TRAP(newskb->dst);

#ifdef CONFIG_NETFILTER_DEBUG
	nf_debug_ip_loopback_xmit(newskb);
#endif
	netif_rx(newskb);
	return 0;
}
Ejemplo n.º 11
0
struct pminisock *pminisock_lookup(struct sock *sk, __u32 seqno, 
				   struct iphdr *iph, struct tcphdr *th) {
	struct ParsedPMinisock_cell *cell;
	struct pminisock_key key;

	ParsedPMinisock_cache.evictContext = sk;

#ifdef SOCKET_KEY
	key.sk = sk;
#endif
	key.seq = seqno;

	// printk("looking up seqno = %u\n", seqno);
	if(ParsedPMinisock_find_helper(&key, &cell)) {
#if 0
		if(CACHE_COMPARISON && (WireContinuation_inOutComp(cont, &cell->elem) != 0)) {
			printk("mismatch between input and continuation cache\n");;
			byte_diff(cont->hmac_start, cell->elem.copy_start, 
				  WIRECONT_MAC_LEN);
		}
		if(cont->tokenCounterBase != cell->elem.tokenCounterBase) {
			printk("Continuation (%d) hit, but %lld != %lld\n", cell->key, 
			       cont->tokenCounterBase, cell->elem.tokenCounterBase);
			return 0;
		}
#endif
		// Invariant: cache implies not in any event queue
		struct pminisock *pmsk = cell->elem.pmsk;
		BUG_TRAP(pmsk->refCnt == 1);

		if(!(  // seqno match is already verified
		     pmsk->daddr == iph->saddr &&
		     pmsk->dest == th->source)) {
			printk("flow validation failed\n");
			return NULL;
		}
		if(trickles_ratelimit()) {
			printk("Pminisock lookup does not perform validation yet\n");
		}

		// to enforce above invariant, we delete the cell from the cache
		ParsedPMinisock_deleteCell(cell);

		return pmsk;
	} else {
		// printk("Could not find\n");
	}
	return NULL;
}
Ejemplo n.º 12
0
int dccp_disconnect(struct sock *sk, int flags)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_sock *inet = inet_sk(sk);
	int err = 0;
	const int old_state = sk->sk_state;

	if (old_state != DCCP_CLOSED)
		dccp_set_state(sk, DCCP_CLOSED);

	/*
	 * This corresponds to the ABORT function of RFC793, sec. 3.8
	 * TCP uses a RST segment, DCCP a Reset packet with Code 2, "Aborted".
	 */
	if (old_state == DCCP_LISTEN) {
		inet_csk_listen_stop(sk);
	} else if (dccp_need_reset(old_state)) {
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		sk->sk_err = ECONNRESET;
	} else if (old_state == DCCP_REQUESTING)
		sk->sk_err = ECONNRESET;

	dccp_clear_xmit_timers(sk);
	__skb_queue_purge(&sk->sk_receive_queue);
	if (sk->sk_send_head != NULL) {
		__kfree_skb(sk->sk_send_head);
		sk->sk_send_head = NULL;
	}

	inet->dport = 0;

	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	sk->sk_shutdown = 0;
	sock_reset_flag(sk, SOCK_DONE);

	icsk->icsk_backoff = 0;
	inet_csk_delack_init(sk);
	__sk_dst_reset(sk);

	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);

	sk->sk_error_report(sk);
	return err;
}
Ejemplo n.º 13
0
// Invariant: When a minisocket is inserted into the cache, it MUST NOT be on any event list
int pminisock_insert(struct sock *sk, struct pminisock *pmsk) {
	BUG_TRAP(msk->refCnt >= 1);
	struct pminisock_key key;
#ifdef SOCKET_KEY
	key.sk = msk->serverSK;
#endif
	key.seq = pmsk->seq;
	// printk("Inserting %p with key %u\n", msk ,key);
	struct ParsedPMinisock_cell *cell = ParsedPMinisock_insert_helper(&key);
	if(cell != NULL) {
		pmsk_hold(pmsk);
		cell->elem.pmsk = pmsk;
		return 1;
	} else {
		return 0;
	}
}
Ejemplo n.º 14
0
void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
	unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
	struct tcf_common **p1p;

	for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
		if (*p1p == p) {
			write_lock_bh(hinfo->lock);
			*p1p = p->tcfc_next;
			write_unlock_bh(hinfo->lock);
			gen_kill_estimator(&p->tcfc_bstats,
					   &p->tcfc_rate_est);
			kfree(p);
			return;
		}
	}
	BUG_TRAP(0);
}
Ejemplo n.º 15
0
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{
    struct in_device *in_dev = __in_dev_get_rtnl(dev);

    ASSERT_RTNL();

    if (!in_dev) {
        inet_free_ifa(ifa);
        return -ENOBUFS;
    }
    ipv4_devconf_setall(in_dev);
    if (ifa->ifa_dev != in_dev) {
        BUG_TRAP(!ifa->ifa_dev);
        in_dev_hold(in_dev);
        ifa->ifa_dev = in_dev;
    }
    if (ipv4_is_loopback(ifa->ifa_local))
        ifa->ifa_scope = RT_SCOPE_HOST;
    return inet_insert_ifa(ifa);
}
Ejemplo n.º 16
0
int dccp_disconnect(struct sock *sk, int flags)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct inet_sock *inet = inet_sk(sk);
	int err = 0;
	const int old_state = sk->sk_state;

	if (old_state != DCCP_CLOSED)
		dccp_set_state(sk, DCCP_CLOSED);

	/* ABORT function of RFC793 */
	if (old_state == DCCP_LISTEN) {
		inet_csk_listen_stop(sk);
	/* FIXME: do the active reset thing */
	} else if (old_state == DCCP_REQUESTING)
		sk->sk_err = ECONNRESET;

	dccp_clear_xmit_timers(sk);
	__skb_queue_purge(&sk->sk_receive_queue);
	if (sk->sk_send_head != NULL) {
		__kfree_skb(sk->sk_send_head);
		sk->sk_send_head = NULL;
	}

	inet->dport = 0;

	if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
		inet_reset_saddr(sk);

	sk->sk_shutdown = 0;
	sock_reset_flag(sk, SOCK_DONE);

	icsk->icsk_backoff = 0;
	inet_csk_delack_init(sk);
	__sk_dst_reset(sk);

	BUG_TRAP(!inet->num || icsk->icsk_bind_hash);

	sk->sk_error_report(sk);
	return err;
}
Ejemplo n.º 17
0
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{
    struct in_device *in_dev = __in_dev_get(dev);

    ASSERT_RTNL();

    if (!in_dev) {
        in_dev = inetdev_init(dev);
        if (!in_dev) {
            inet_free_ifa(ifa);
            return -ENOBUFS;
        }
    }
    if (ifa->ifa_dev != in_dev) {
        BUG_TRAP(!ifa->ifa_dev);
        in_dev_hold(in_dev);
        ifa->ifa_dev = in_dev;
    }
    if (LOOPBACK(ifa->ifa_local))
        ifa->ifa_scope = RT_SCOPE_HOST;
    return inet_insert_ifa(ifa);
}
Ejemplo n.º 18
0
static void tcf_police_destroy(struct tcf_police *p)
{
	unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
	struct tcf_common **p1p;

	for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
		if (*p1p == &p->common) {
			write_lock_bh(&police_lock);
			*p1p = p->tcf_next;
			write_unlock_bh(&police_lock);
			gen_kill_estimator(&p->tcf_bstats,
					   &p->tcf_rate_est);
			if (p->tcfp_R_tab)
				qdisc_put_rtab(p->tcfp_R_tab);
			if (p->tcfp_P_tab)
				qdisc_put_rtab(p->tcfp_P_tab);
			kfree(p);
			return;
		}
	}
	BUG_TRAP(0);
}
Ejemplo n.º 19
0
void tcf_police_destroy(struct tcf_police *p)
{
	unsigned h = tcf_police_hash(p->index);
	struct tcf_police **p1p;
	
	for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
		if (*p1p == p) {
			write_lock_bh(&police_lock);
			*p1p = p->next;
			write_unlock_bh(&police_lock);
#ifdef CONFIG_NET_ESTIMATOR
			qdisc_kill_estimator(&p->stats);
#endif
			if (p->R_tab)
				qdisc_put_rtab(p->R_tab);
			if (p->P_tab)
				qdisc_put_rtab(p->P_tab);
			kfree(p);
			return;
		}
	}
	BUG_TRAP(0);
}
Ejemplo n.º 20
0
/*
 * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This
 * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under
 * any circumstances.
 */
void dccp_send_close(struct sock *sk, const int active)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	const unsigned int prio = active ? GFP_KERNEL : GFP_ATOMIC;

	skb = alloc_skb(sk->sk_prot->max_header, prio);
	if (skb == NULL)
		return;

	/* Reserve space for headers and prepare control bits. */
	skb_reserve(skb, sk->sk_prot->max_header);
	skb->csum = 0;
	DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ?
					DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ;

	skb_set_owner_w(skb, sk);
	if (active) {
		BUG_TRAP(sk->sk_send_head == NULL);
		sk->sk_send_head = skb;
		dccp_transmit_skb(sk, skb_clone(skb, prio));
	} else
		dccp_transmit_skb(sk, skb);
}
Ejemplo n.º 21
0
static int fib6_clean_node(struct fib6_walker_t *w)
{
	int res;
	struct rt6_info *rt;
	struct fib6_cleaner_t *c = (struct fib6_cleaner_t*)w;

	for (rt = w->leaf; rt; rt = rt->u.next) {
		res = c->func(rt, c->arg);
		if (res < 0) {
			w->leaf = rt;
			res = fib6_del(rt);
			if (res) {
#if RT6_DEBUG >= 2
				printk(KERN_DEBUG "fib6_clean_node: del failed: rt=%p@%p err=%d\n", rt, rt->rt6i_node, res);
#endif
				continue;
			}
			return 0;
		}
		BUG_TRAP(res==0);
	}
	w->leaf = rt;
	return 0;
}
Ejemplo n.º 22
0
int
skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;
	int elt = 0;

	if (copy > 0) {
		if (copy > len)
			copy = len;
		sg[elt].page = virt_to_page(skb->data + offset);
		sg[elt].offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
		sg[elt].length = copy;
		elt++;
		if ((len -= copy) == 0)
			return elt;
		offset += copy;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset + len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end - offset) > 0) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

			if (copy > len)
				copy = len;
			sg[elt].page = frag->page;
			sg[elt].offset = frag->page_offset+offset-start;
			sg[elt].length = copy;
			elt++;
			if (!(len -= copy))
				return elt;
			offset += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list = skb_shinfo(skb)->frag_list;

		for (; list; list = list->next) {
			int end;

			BUG_TRAP(start <= offset + len);

			end = start + list->len;
			if ((copy = end - offset) > 0) {
				if (copy > len)
					copy = len;
				elt += skb_to_sgvec(list, sg+elt, offset - start, copy);
				if ((len -= copy) == 0)
					return elt;
				offset += copy;
			}
			start = end;
		}
	}
	BUG_ON(len);
	return elt;
}
Ejemplo n.º 23
0
void skb_icv_walk(const struct sk_buff *skb, struct crypto_tfm *tfm,
		  int offset, int len, icv_update_fn_t icv_update)
{
	int start = skb_headlen(skb);
	int i, copy = start - offset;
	struct scatterlist sg;

	/* Checksum header. */
	if (copy > 0) {
		if (copy > len)
			copy = len;
		
		sg.page = virt_to_page(skb->data + offset);
		sg.offset = (unsigned long)(skb->data + offset) % PAGE_SIZE;
		sg.length = copy;
		
		icv_update(tfm, &sg, 1);
		
		if ((len -= copy) == 0)
			return;
		offset += copy;
	}

	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset + len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end - offset) > 0) {
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

			if (copy > len)
				copy = len;
			
			sg.page = frag->page;
			sg.offset = frag->page_offset + offset-start;
			sg.length = copy;
			
			icv_update(tfm, &sg, 1);

			if (!(len -= copy))
				return;
			offset += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list = skb_shinfo(skb)->frag_list;

		for (; list; list = list->next) {
			int end;

			BUG_TRAP(start <= offset + len);

			end = start + list->len;
			if ((copy = end - offset) > 0) {
				if (copy > len)
					copy = len;
				skb_icv_walk(list, tfm, offset-start, copy, icv_update);
				if ((len -= copy) == 0)
					return;
				offset += copy;
			}
			start = end;
		}
	}
	BUG_ON(len);
}
Ejemplo n.º 24
0
static inline void skb_copy_and_csum_datagram_kvec_dst(const struct sk_buff *skb, int offset, struct kvec_dst *dst, int len, unsigned int *csump)
{
	int i, copy;
	int start = skb->len - skb->data_len;
	int pos = 0;

	/* Copy header. */
	if ((copy = start-offset) > 0) {
		if (copy > len)
			copy = len;
		*csump = csum_and_copy_to_dst(dst, skb->data+offset, copy, *csump);
		if ((len -= copy) == 0)
			return;
		offset += copy;
		pos = copy;
	}

	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset+len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end-offset) > 0) {
			unsigned int csum2;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap_atomic(page, KM_USER1);
			csum2 = csum_and_copy_to_dst(dst,
				vaddr + frag->page_offset + offset-start,
				copy, 0);
			kunmap_atomic(vaddr, KM_USER1);
			*csump = csum_block_add(*csump, csum2, pos);
			if (!(len -= copy))
				return;
			offset += copy;
			pos += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list;

		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
			int end;

			BUG_TRAP(start <= offset+len);

			end = start + list->len;
			if ((copy = end-offset) > 0) {
				unsigned int csum2 = 0;
				if (copy > len)
					copy = len;
				skb_copy_and_csum_datagram_kvec_dst(list, offset-start, dst, copy, &csum2);
				*csump = csum_block_add(*csump, csum2, pos);
				if ((len -= copy) == 0)
					return;
				offset += copy;
				pos += copy;
			}
			start = end;
		}
	}
}
Ejemplo n.º 25
0
static inline void skb_copy_datagram_kvec_dst(const struct sk_buff *skb,
		int offset, struct kvec_dst *dst, int len)
{
	int i, copy;
	int start = skb->len - skb->data_len;

	/* Copy header. */
	if ((copy = start-offset) > 0) {
		if (copy > len)
			copy = len;
		memcpy_to_kvec_dst(dst, skb->data + offset, copy);
		if ((len -= copy) == 0)
			return;
		offset += copy;
	}

	/* Copy paged appendix. Hmm... why does this look so complicated? */
	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset+len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end-offset) > 0) {
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap_atomic(page, KM_USER1);
			memcpy_to_kvec_dst(dst, vaddr + frag->page_offset +
					     offset-start, copy);
			kunmap_atomic(vaddr, KM_USER1);
			if (!(len -= copy))
				return;
			offset += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list;

		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
			int end;

			BUG_TRAP(start <= offset+len);

			end = start + list->len;
			if ((copy = end-offset) > 0) {
				if (copy > len)
					copy = len;
				skb_copy_datagram_kvec_dst(list, offset-start, dst, copy);
				if ((len -= copy) == 0)
					return;
				offset += copy;
			}
			start = end;
		}
	}
}
Ejemplo n.º 26
0
int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump)
{
	int i, copy;
	int start = skb->len - skb->data_len;
	int pos = 0;

	/* Copy header. */
	if ((copy = start-offset) > 0) {
		int err = 0;
		if (copy > len)
			copy = len;
		*csump = csum_and_copy_to_user(skb->data+offset, to, copy, *csump, &err);
		if (err)
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
		to += copy;
		pos = copy;
	}

	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset+len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end-offset) > 0) {
			unsigned int csum2;
			int err = 0;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			csum2 = csum_and_copy_to_user(vaddr + frag->page_offset +
						      offset-start, to, copy, 0, &err);
			kunmap(page);
			if (err)
				goto fault;
			*csump = csum_block_add(*csump, csum2, pos);
			if (!(len -= copy))
				return 0;
			offset += copy;
			to += copy;
			pos += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list;

		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
			int end;

			BUG_TRAP(start <= offset+len);

			end = start + list->len;
			if ((copy = end-offset) > 0) {
				unsigned int csum2 = 0;
				if (copy > len)
					copy = len;
				if (skb_copy_and_csum_datagram(list, offset-start, to, copy, &csum2))
					goto fault;
				*csump = csum_block_add(*csump, csum2, pos);
				if ((len -= copy) == 0)
					return 0;
				offset += copy;
				to += copy;
				pos += copy;
			}
			start = end;
		}
	}
	if (len == 0)
		return 0;

fault:
	return -EFAULT;
}
Ejemplo n.º 27
0
/*
 *	Copy a datagram to an iovec.
 *	Note: the iovec is modified during the copy.
 */
int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset, struct iovec *to,
			    int len)
{
	int i, copy;
	int start = skb->len - skb->data_len;

	/* Copy header. */
	if ((copy = start-offset) > 0) {
		if (copy > len)
			copy = len;
		if (memcpy_toiovec(to, skb->data + offset, copy))
			goto fault;
		if ((len -= copy) == 0)
			return 0;
		offset += copy;
	}

	/* Copy paged appendix. Hmm... why does this look so complicated? */
	for (i=0; i<skb_shinfo(skb)->nr_frags; i++) {
		int end;

		BUG_TRAP(start <= offset+len);

		end = start + skb_shinfo(skb)->frags[i].size;
		if ((copy = end-offset) > 0) {
			int err;
			u8  *vaddr;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			struct page *page = frag->page;

			if (copy > len)
				copy = len;
			vaddr = kmap(page);
			err = memcpy_toiovec(to, vaddr + frag->page_offset +
					     offset-start, copy);
			kunmap(page);
			if (err)
				goto fault;
			if (!(len -= copy))
				return 0;
			offset += copy;
		}
		start = end;
	}

	if (skb_shinfo(skb)->frag_list) {
		struct sk_buff *list;

		for (list = skb_shinfo(skb)->frag_list; list; list=list->next) {
			int end;

			BUG_TRAP(start <= offset+len);

			end = start + list->len;
			if ((copy = end-offset) > 0) {
				if (copy > len)
					copy = len;
				if (skb_copy_datagram_iovec(list, offset-start, to, copy))
					goto fault;
				if ((len -= copy) == 0)
					return 0;
				offset += copy;
			}
			start = end;
		}
	}
	if (len == 0)
		return 0;

fault:
	return -EFAULT;
}
Ejemplo n.º 28
0
void dccp_close(struct sock *sk, long timeout)
{
	struct sk_buff *skb;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	/* FIXME: check for unread data */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		__kfree_skb(skb);
	}

	if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (dccp_close_state(sk)) {
		dccp_send_close(sk, 1);
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	BUG_TRAP(!sock_owned_by_user(sk));

	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * The last release_sock may have processed the CLOSE or RESET
	 * packet moving sock to CLOSED state, if not we have to fire
	 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
	 * in draft-ietf-dccp-spec-11. -acme
	 */
	if (sk->sk_state == DCCP_CLOSING) {
		/* FIXME: should start at 2 * RTT */
		/* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
					  inet_csk(sk)->icsk_rto,
					  DCCP_RTO_MAX);
#if 0
		/* Yeah, we should use sk->sk_prot->orphan_count, etc */
		dccp_set_state(sk, DCCP_CLOSED);
#endif
	}

	atomic_inc(sk->sk_prot->orphan_count);
	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
Ejemplo n.º 29
0
void dccp_close(struct sock *sk, long timeout)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	u32 data_was_unread = 0;
	int state;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	sk_stop_timer(sk, &dp->dccps_xmit_timer);

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		data_was_unread += skb->len;
		__kfree_skb(skb);
	}

	if (data_was_unread) {
		/* Unread data was tossed, send an appropriate Reset Code */
		DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (sk->sk_state != DCCP_CLOSED) {
		dccp_terminate_connection(sk);
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);
	atomic_inc(sk->sk_prot->orphan_count);

	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	BUG_TRAP(!sock_owned_by_user(sk));

	/* Have we already been destroyed by a softirq or backlog? */
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;

	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

out:
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
Ejemplo n.º 30
0
static void l2tp_net_recv_core(int fd, struct sockaddr_in const *from, struct msghdr *msg, int recv_len, struct in_pktinfo *ipi)
{
	int result = -EBADMSG;
	struct l2tp_peer *peer = NULL;
	struct l2tp_tunnel *tunnel = NULL;
	char *peer_host_name = NULL;
	struct l2tp_packet *pkt = NULL;
	int frag;
	uint16_t ver;
	int is_data;
	int has_seq;
	void *payload;
	uint16_t msg_type;

	if (recv_len < L2TP_MIN_FRAME_LEN) {
		l2tp_stats.short_frames++;
		goto out;
	}

	/* Allocate a pkt to hold info about this packet. 
	 * Once we've done this, there's no need to use the struct msghdr.
	 */
	pkt = l2tp_pkt_alloc(msg->msg_iovlen);
	if (pkt == NULL) {
		result = -ENOMEM;
		l2tp_stats.no_control_frame_resources++;
		goto out;
	}
	for (frag = 0; frag < msg->msg_iovlen; frag++) {
		pkt->buf[frag].data = msg->msg_iov[frag].iov_base;
		pkt->buf[frag].data_len = MIN(recv_len, msg->msg_iov[frag].iov_len);
	}
	pkt->total_len = recv_len;

	/* Parse the L2TP packet header */
	l2tp_net_parse_header(pkt, &ver, &is_data, &has_seq, &payload);
	if (ver != 2) {
		l2tp_stats.wrong_version_frames++;
		goto out;
	}

	/* If this is a data frame, return now. This should be handled by
	 * the kernel's L2TP code.
	 */
	if (is_data) {
		l2tp_stats.unexpected_data_frames++;
		goto out;
	}

#ifdef L2TP_TEST
	if (l2tp_test_is_fake_rx_drop(pkt->tunnel_id, pkt->session_id)) {
		L2TP_DEBUG(L2TP_DATA, "%s: fake rx drop: tid=%hu/%hu, len=%d", __FUNCTION__,
			   pkt->tunnel_id, pkt->session_id, recv_len);
		goto out;
	}
#endif /* L2TP_TEST */

	L2TP_DEBUG(L2TP_PROTOCOL, "%s: received len %d tunl %hu ses %hu, from fd %d", __FUNCTION__,
		   recv_len, pkt->tunnel_id, pkt->session_id, fd);
	if (pkt->avp_len > 0) {
		/* Don't count ZLBs as received frames */
		l2tp_stats.total_rcvd_control_frames++;
	}
	if ((pkt->avp_offset + pkt->avp_len) > recv_len) {
		l2tp_stats.bad_rcvd_frames++;
		goto out;
	}

	/* If tunnel_id non-zero, find tunnel context by id and if not found, discard the frame.
	 * If tunnel_id is zero, pre-parse the L2TP packet looking for a HOSTNAME attribute
	 * which is mandatory for all messages when tunnel_id is zero. Use the name there
	 * to locate the peer profile, then create the tunnel context.
	 */
	if (pkt->tunnel_id != 0) {
		/* Simple case - tunnel_id non-zero. If we can't find a tunnel context, bail */
		tunnel = l2tp_tunnel_find_by_id(pkt->tunnel_id);
		if (tunnel == NULL) {
			l2tp_stats.no_matching_tunnel_id_discards++;
			goto out;
		}
		peer = l2tp_tunnel_get_peer(tunnel);
	} else {
		/* Complicated case - tunnel_id zero. Since we support incoming L2TP tunnel
		 * setup requests, we must create internal contexts in order to handle the
		 * request. However, we should only do this for SCCRQ messages...
		 */
		struct l2tp_peer_profile *peer_profile;

		result = l2tp_avp_preparse(payload, pkt->avp_len, &peer_host_name, &msg_type);
		if (result < 0) {
			if (result != -ENOMEM) {
				if (from != NULL) {
					L2TP_DEBUG(L2TP_PROTOCOL, "%s: dropping non-SCCRQ from %x/%hu on fd %d", __FUNCTION__, 
						   ntohl(from->sin_addr.s_addr), ntohs(from->sin_port), fd);
				} else {
					L2TP_DEBUG(L2TP_PROTOCOL, "%s: dropping non-SCCRQ on fd %d", __FUNCTION__, fd);
				}
				l2tp_stats.no_matching_tunnel_id_discards++;
			}
			goto out;
		}

		/* Find a peer profile. Since this tunnel is being created by remote request, an explicit
		 * peer profile name cannot be specified by the remote peer. So we use the HOST_NAME AVP
		 * to select it. If a peer profile with that name does not exist, try to find a profile
		 * that matches the source IP address. Otherwise, we use the default profile.
		 */
		peer_profile = l2tp_peer_profile_find(peer_host_name);
		if (peer_profile == NULL) {
			peer_profile = l2tp_peer_profile_find_by_addr(from->sin_addr);
			if (peer_profile == NULL) {
				peer_profile = l2tp_peer_profile_find(L2TP_API_PEER_PROFILE_DEFAULT_PROFILE_NAME);
			}
		}
		L2TP_DEBUG(L2TP_PROTOCOL, "%s: using peer profile %s for %s (%x/%hu) on fd %d", __FUNCTION__, 
			   peer_profile->profile_name, peer_host_name, ntohl(from->sin_addr.s_addr), ntohs(from->sin_port), fd);

		/* Register a new peer context and record his addr */
		if (ipi != NULL) {
			peer = l2tp_peer_find(&ipi->ipi_addr, &from->sin_addr);
			if (peer == NULL) {
				peer = l2tp_peer_alloc(ipi->ipi_addr, from->sin_addr);
				if (peer == NULL) {
					result = -ENOMEM;
					l2tp_stats.no_control_frame_resources++;
					goto out;
				}
			}
			l2tp_peer_inc_use_count(peer);
		}

		if (l2tp_opt_trace_flags & L2TP_PROTOCOL) {
			l2tp_log(LOG_DEBUG, "PROTO: Creating new tunnel context with profile '%s' for %s (%x/%hu)",
				 peer_profile->default_tunnel_profile_name, peer_host_name, ntohl(from->sin_addr.s_addr), ntohs(from->sin_port));
		}

		tunnel = l2tp_tunnel_alloc(0, peer_profile->default_tunnel_profile_name, peer_profile->profile_name, 0, &result);
		if (tunnel == NULL) {
			if (result == -ENOMEM) {
				l2tp_stats.no_control_frame_resources++;
			}
			goto out_unlink_peer;
		}
		l2tp_tunnel_link(tunnel);
		result = l2tp_tunnel_xprt_create(peer, tunnel, from);
		if (result < 0) {
			if (result == -ENOMEM) {
				l2tp_stats.no_control_frame_resources++;
			}
			goto out_unlink_tunnel;
		}

		/* Give plugins visibility of tunnel created */
		if (l2tp_tunnel_created_hook != NULL) {
			result = (*l2tp_tunnel_created_hook)(l2tp_tunnel_peer_id(tunnel));
			if (result < 0) {
				goto out_unlink_tunnel;
			}
		}
	}

	BUG_TRAP(tunnel == NULL);

	if (!l2tp_tunnel_is_fd_connected(tunnel)) {
		if (from == NULL) {
			from = l2tp_tunnel_get_peer_addr(tunnel);
		}
		result = l2tp_net_connect_socket(l2tp_tunnel_get_fd(tunnel), from, peer, tunnel);
		if (result < 0) {
			l2tp_stats.socket_errors++;
			goto out_unlink_tunnel;
		}
	}

	l2tp_tunnel_inc_use_count(tunnel);
	result = l2tp_xprt_recv(l2tp_tunnel_get_xprt(tunnel), pkt);
	l2tp_tunnel_dec_use_count(tunnel);

out:
	/* l2tp_xprt_recv() consumes msg only if it returns 0 */
	if (result < 0) {
		if (pkt == NULL) {
			for (frag = 0; frag < msg->msg_iovlen; frag++) {
				if (msg->msg_iov[frag].iov_base != NULL) {
					free(msg->msg_iov[frag].iov_base);
				}
			}
		} else {
			l2tp_pkt_free(pkt);
		}
	}

	/* This might have been allocated by l2tp_avp_preparse() */
	if (peer_host_name != NULL) {
		free(peer_host_name);
	}

	return;

out_unlink_tunnel:
	l2tp_tunnel_dec_use_count(tunnel);
out_unlink_peer:
	l2tp_peer_dec_use_count(peer);

	l2tp_stats.tunnel_setup_failures++;
	
	goto out;
}