Ejemplo n.º 1
0
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
{
	int rc = NET_RX_SUCCESS;

	if (sk_filter(sk, skb))
		goto discard_and_relse;

	skb->dev = NULL;

	if (nested)
		bh_lock_sock_nested(sk);
	else
		bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		/*
		 * trylock + unlock semantics:
		 */
		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);

		rc = sk->sk_backlog_rcv(sk, skb);

		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
	} else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);
out:
	sock_put(sk);
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 2
0
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
	struct sock *sk = (struct sock *)tport->usr_handle;
	u32 res;

	/*
	 * Process message if socket is unlocked; otherwise add to backlog queue
	 *
	 * This code is based on sk_receive_skb(), but must be distinct from it
	 * since a TIPC-specific filter/reject mechanism is utilized
	 */

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		res = filter_rcv(sk, buf);
	} else {
		if (sk_add_backlog(sk, buf))
			res = TIPC_ERR_OVERLOAD;
		else
			res = TIPC_OK;
	}
	bh_unlock_sock(sk);

	return res;
}
Ejemplo n.º 3
0
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	/*
	 * This has to be done here and not at the upper layer ->accept
	 * method because of the way the PROCOM state machine works:
	 * it needs to set several state variables (see, for instance,
	 * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
	 * the originator of the new connection, and this state has to be
	 * in the newly created struct sock private area. -acme
	 */
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		/*
		 * Can't be skb_set_owner_r, this will be done at the
		 * llc_conn_state_process function, later on, when we will use
		 * skb_queue_rcv_skb to send it to upper layers, this is
		 * another trick required to cope with how the PROCOM state
		 * machine works. -acme
		 */
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
;
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 4
0
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
	struct sock *sk;
	unsigned short frametype;
	unsigned int lci;

	frametype = skb->data[2];
        lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);

	/*
	 *	LCI of zero is always for us, and its always a link control
	 *	frame.
	 */
	if (lci == 0) {
		x25_link_control(skb, nb, frametype);
		return 0;
	}

	/*
	 *	Find an existing socket.
	 */
	if ((sk = x25_find_socket(lci, nb)) != NULL) {
		int queued = 1;

		skb->h.raw = skb->data;
		bh_lock_sock(sk);
		if (!sock_owned_by_user(sk)) {
			queued = x25_process_rx_frame(sk, skb);
		} else {
			sk_add_backlog(sk, skb);
		}
		bh_unlock_sock(sk);
		return queued;
	}

	/*
	 *	Is is a Call Request ? if so process it.
	 */
	if (frametype == X25_CALL_REQUEST)
		return x25_rx_call_request(skb, nb, lci);

	/*
	 *	Its not a Call Request, nor is it a control frame.
	 *      Let caller throw it away.
	 */
/*
	x25_transmit_clear_request(nb, lci, 0x0D);
*/

	if (frametype != X25_CLEAR_CONFIRMATION)
		printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);

	return 0;
}
Ejemplo n.º 5
0
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
	struct sock *sk;
	unsigned short frametype;
	unsigned int lci;

	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
		return 0;

	frametype = skb->data[2];
	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);

	if (lci == 0) {
		x25_link_control(skb, nb, frametype);
		return 0;
	}

	if ((sk = x25_find_socket(lci, nb)) != NULL) {
		int queued = 1;

		skb_reset_transport_header(skb);
		bh_lock_sock(sk);
		if (!sock_owned_by_user(sk)) {
			queued = x25_process_rx_frame(sk, skb);
		} else {
			queued = !sk_add_backlog(sk, skb);
		}
		bh_unlock_sock(sk);
		sock_put(sk);
		return queued;
	}

	if (frametype == X25_CALL_REQUEST)
		return x25_rx_call_request(skb, nb, lci);


	if (x25_forward_data(lci, nb, skb)) {
		if (frametype == X25_CLEAR_CONFIRMATION) {
			x25_clear_forward_by_lci(lci);
		}
		kfree_skb(skb);
		return 1;
	}


	if (frametype != X25_CLEAR_CONFIRMATION)
		printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);

	return 0;
}
Ejemplo n.º 6
0
/**
 *	llc_process_tmr_ev - timer backend
 *	@sk: active connection
 *	@skb: occurred event
 *
 *	This function is called from timer callback functions. When connection
 *	is busy (during sending a data frame) timer expiration event must be
 *	queued. Otherwise this event can be sent to connection state machine.
 *	Queued events will process by llc_backlog_rcv function after sending
 *	data frame.
 */
static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
{
	if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) {
		printk(KERN_WARNING "%s: timer called on closed connection\n",
		       __func__);
		kfree_skb(skb);
	} else {
		if (!sock_owned_by_user(sk))
			llc_conn_state_process(sk, skb);
		else {
			llc_set_backlog_type(skb, LLC_EVENT);
			sk_add_backlog(sk, skb);
		}
	}
}
Ejemplo n.º 7
0
static int lapd_dispatch_mph_primitive(struct sk_buff *skb)
{
	struct sock *sk;
	struct hlist_node *node;
	struct lapd_device *dev = to_lapd_dev(skb->dev);
	int queued = FALSE;

	read_lock_bh(&lapd_hash_lock);

	sk_for_each(sk, node, lapd_get_hash(dev)) {
		struct lapd_sock *lapd_sock = to_lapd_sock(sk);

		if (lapd_sock->dev == dev &&
		    sk->sk_state == LAPD_SK_STATE_MGMT) {

			struct sk_buff *new_skb;

			if (!queued) {
				new_skb = skb;
				queued = TRUE;
			} else {
				new_skb = skb_clone(skb, GFP_ATOMIC);
			}

			new_skb->sk = sk;

			lapd_bh_lock_sock(lapd_sock);

			if (!sock_owned_by_user(&lapd_sock->sk)) {
				queued = lapd_mgmt_queue_primitive(lapd_sock,
									skb);
			} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
				sk_add_backlog(&lapd_sock->sk, skb);
#else
			  	__sk_add_backlog(&lapd_sock->sk, skb);
#endif
				queued = TRUE;
			}

			lapd_bh_unlock_sock(lapd_sock);
		}
	}
	read_unlock_bh(&lapd_hash_lock);

	return queued;
}
Ejemplo n.º 8
0
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
		dprintk("%s: adding to backlog...\n", __func__);
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 9
0
int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
{
	int rc = NET_RX_SUCCESS;

	if (sk_filter(sk, skb, 0))
		goto discard_and_relse;

	skb->dev = NULL;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk))
		rc = sk->sk_backlog_rcv(sk, skb);
	else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);
out:
	sock_put(sk);
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 10
0
static int lapd_pass_frame_to_socket(
	struct lapd_sock *lapd_sock,
	struct sk_buff *skb)
{
	int queued;

	/* Ensure serialization within a socket */
	lapd_bh_lock_sock(lapd_sock);

	if (!sock_owned_by_user(&lapd_sock->sk)) {
		queued = lapd_dlc_recv(lapd_sock, skb);
	} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) 
		sk_add_backlog(&lapd_sock->sk, skb);
#else
		__sk_add_backlog(&lapd_sock->sk, skb);
#endif
		queued = 1;
	}

	lapd_bh_unlock_sock(lapd_sock);

	return queued;
}
Ejemplo n.º 11
0
static int dn_nsp_rx_packet(struct sk_buff *skb)
{
	struct dn_skb_cb *cb = DN_SKB_CB(skb);
	struct sock *sk = NULL;
	unsigned char *ptr = (unsigned char *)skb->data;
	unsigned short reason = NSP_REASON_NL;

	skb->h.raw    = skb->data;
	cb->nsp_flags = *ptr++;

	if (decnet_debug_level & 2)
		printk(KERN_DEBUG "dn_nsp_rx: Message type 0x%02x\n", (int)cb->nsp_flags);

	if (skb->len < 2) 
		goto free_out;

	if (cb->nsp_flags & 0x83) 
		goto free_out;

	/*
	 * Returned packets...
	 * Swap src & dst and look up in the normal way.
	 */
	if (cb->rt_flags & DN_RT_F_RTS) {
		unsigned short tmp = cb->dst_port;
		cb->dst_port = cb->src_port;
		cb->src_port = tmp;
		tmp = cb->dst;
		cb->dst = cb->src;
		cb->src = tmp;
		sk = dn_find_by_skb(skb);
		goto got_it;
	}

	/*
	 * Filter out conninits and useless packet types
	 */
	if ((cb->nsp_flags & 0x0c) == 0x08) {
		switch(cb->nsp_flags & 0x70) {
			case 0x00: /* NOP */
			case 0x70: /* Reserved */
			case 0x50: /* Reserved, Phase II node init */
				goto free_out;
			case 0x10:
			case 0x60:
				sk = dn_find_listener(skb, &reason);
				goto got_it;
		}
	}

	if (skb->len < 3)
		goto free_out;

	/*
	 * Grab the destination address.
	 */
	cb->dst_port = *(unsigned short *)ptr;
	cb->src_port = 0;
	ptr += 2;

	/*
	 * If not a connack, grab the source address too.
	 */
	if (skb->len >= 5) {
		cb->src_port = *(unsigned short *)ptr;
		ptr += 2;
		skb_pull(skb, 5);
	}

	/*
	 * Find the socket to which this skb is destined.
	 */
	sk = dn_find_by_skb(skb);
got_it:
	if (sk != NULL) {
		struct dn_scp *scp = DN_SK(sk);
		int ret;

		/* Reset backoff */
		scp->nsp_rxtshift = 0;

		bh_lock_sock(sk);
		ret = NET_RX_SUCCESS;
		if (decnet_debug_level & 8)
			printk(KERN_DEBUG "NSP: 0x%02x 0x%02x 0x%04x 0x%04x %d\n",
				(int)cb->rt_flags, (int)cb->nsp_flags, 
				(int)cb->src_port, (int)cb->dst_port, 
				(int)sk->lock.users);
		if (sk->lock.users == 0)
			ret = dn_nsp_backlog_rcv(sk, skb);
		else
			sk_add_backlog(sk, skb);
		bh_unlock_sock(sk);
		sock_put(sk);

		return ret;
	}

	return dn_nsp_no_socket(skb, reason);

free_out:
	kfree_skb(skb);
	return NET_RX_DROP;
}
Ejemplo n.º 12
0
int ax8netfilter_udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
	struct udp_sock *up = udp_sk(sk);
	int rc;
	int is_udplite = IS_UDPLITE(sk);

	/*
	 *	Charge it to the socket, dropping if the queue is full.
	 */
	if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
		goto drop;
	nf_reset(skb);

	if (up->encap_type) {
		/*
		 * This is an encapsulation socket so pass the skb to
		 * the socket's udp_encap_rcv() hook. Otherwise, just
		 * fall through and pass this up the UDP socket.
		 * up->encap_rcv() returns the following value:
		 * =0 if skb was successfully passed to the encap
		 *    handler or was discarded by it.
		 * >0 if skb should be passed on to UDP.
		 * <0 if skb should be resubmitted as proto -N
		 */

		/* if we're overly short, let UDP handle it */
		if (skb->len > sizeof(struct udphdr) &&
		    up->encap_rcv != NULL) {
			int ret;

			ret = (*up->encap_rcv)(sk, skb);
			if (ret <= 0) {
				UDP_INC_STATS_BH(sock_net(sk),
						 UDP_MIB_INDATAGRAMS,
						 is_udplite);
				return -ret;
			}
		}

		/* FALLTHROUGH -- it's a UDP Packet */
	}

	/*
	 * 	UDP-Lite specific tests, ignored on UDP sockets
	 */
	if ((is_udplite & UDPLITE_RECV_CC)  &&  UDP_SKB_CB(skb)->partial_cov) {

		/*
		 * MIB statistics other than incrementing the error count are
		 * disabled for the following two types of errors: these depend
		 * on the application settings, not on the functioning of the
		 * protocol stack as such.
		 *
		 * RFC 3828 here recommends (sec 3.3): "There should also be a
		 * way ... to ... at least let the receiving application block
		 * delivery of packets with coverage values less than a value
		 * provided by the application."
		 */
		if (up->pcrlen == 0) {          /* full coverage was set  */
			LIMIT_NETDEBUG(KERN_WARNING "UDPLITE: partial coverage "
				"%d while full coverage %d requested\n",
				UDP_SKB_CB(skb)->cscov, skb->len);
			goto drop;
		}
		/* The next case involves violating the min. coverage requested
		 * by the receiver. This is subtle: if receiver wants x and x is
		 * greater than the buffersize/MTU then receiver will complain
		 * that it wants x while sender emits packets of smaller size y.
		 * Therefore the above ...()->partial_cov statement is essential.
		 */
		if (UDP_SKB_CB(skb)->cscov  <  up->pcrlen) {
			LIMIT_NETDEBUG(KERN_WARNING
				"UDPLITE: coverage %d too small, need min %d\n",
				UDP_SKB_CB(skb)->cscov, up->pcrlen);
			goto drop;
		}
	}

	if (sk->sk_filter) {
		if (udp_lib_checksum_complete(skb))
			goto drop;
	}

	rc = 0;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk))
		rc = ax8netfilter___udp_queue_rcv_skb(sk, skb);
	else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);

	return rc;

drop:
	UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
	kfree_skb(skb);
	return -1;
}