Exemple #1
0
static void dccp_enqueue_skb(struct sock *sk, struct sk_buff *skb)
{
	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
	sk->sk_data_ready(sk, 0);
}
/**
 * filter_rcv - validate incoming message
 * @sk: socket
 * @buf: message
 *
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
 * Called with socket lock already taken; port lock may also be taken.
 *
 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
 */
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
	struct socket *sock = sk->sk_socket;
	struct tipc_msg *msg = buf_msg(buf);
	unsigned int limit = rcvbuf_limit(sk, buf);
	u32 res = TIPC_OK;

	/* Reject message if it is wrong sort of message for socket */
	if (msg_type(msg) > TIPC_DIRECT_MSG)
		return TIPC_ERR_NO_PORT;

	if (sock->state == SS_READY) {
		if (msg_connected(msg))
			return TIPC_ERR_NO_PORT;
	} else {
		res = filter_connect(tipc_sk(sk), &buf);
		if (res != TIPC_OK || buf == NULL)
			return res;
	}

	/* Reject message if there isn't room to queue it */
	if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
		return TIPC_ERR_OVERLOAD;

	/* Enqueue message */
	TIPC_SKB_CB(buf)->handle = 0;
	__skb_queue_tail(&sk->sk_receive_queue, buf);
	skb_set_owner_r(buf, sk);

	sk->sk_data_ready(sk, 0);
	return TIPC_OK;
}
Exemple #3
0
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
	int err = 0;
	int skb_len;

	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
	   number of warnings when compiling with -W --ANK
	 */
	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
	    (unsigned)sk->sk_rcvbuf) {
		err = -ENOMEM;
		goto out;
	}

	err = sk_filter(sk, skb);
	if (err)
		goto out;

	skb->dev = NULL;
	skb_set_owner_r(skb, sk);

	/* Cache the SKB length before we tack it onto the receive
	 * queue.  Once it is added it no longer belongs to us and
	 * may be freed by other threads of control pulling packets
	 * from the queue.
	 */
	skb_len = skb->len;

	skb_queue_tail(&sk->sk_receive_queue, skb);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, skb_len);
out:
	return err;
}
Exemple #4
0
/*
 * Copy of sock_queue_rcv_skb (from sock.h) without
 * bh_lock_sock() (its already held when this is called) which
 * also allows data and other data to be queued to a socket.
 */
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
	int err;

	/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
	   number of warnings when compiling with -W --ANK
	 */
	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
	    (unsigned int)sk->sk_rcvbuf) {
		err = -ENOMEM;
		goto out;
	}

	err = sk_filter(sk, skb);
	if (err)
		goto out;

	skb_set_owner_r(skb, sk);
	skb_queue_tail(queue, skb);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk);
out:
	return err;
}
Exemple #5
0
static int generic_restore_queues(struct sock *sk, struct cpt_sock_image *si,
                                  loff_t pos, struct cpt_context *ctx)
{
    loff_t endpos;

    pos = pos + si->cpt_hdrlen;
    endpos = pos + si->cpt_next;
    while (pos < endpos) {
        struct sk_buff *skb;
        __u32 type;

        skb = rst_skb(sk, &pos, NULL, &type, ctx);
        if (IS_ERR(skb)) {
            if (PTR_ERR(skb) == -EINVAL) {
                int err;

                err = rst_sock_attr(&pos, sk, ctx);
                if (err)
                    return err;
            }
            return PTR_ERR(skb);
        }

        if (type == CPT_SKB_RQ) {
            skb_set_owner_r(skb, sk);
            skb_queue_tail(&sk->sk_receive_queue, skb);
        } else {
            wprintk_ctx("strange socket queue type %u\n", type);
            kfree_skb(skb);
        }
    }
    return 0;
}
Exemple #6
0
/*
 * Copy of sock_queue_rcv_skb (from sock.h) without
 * bh_lock_sock() (its already held when this is called) which
 * also allows data and other data to be queued to a socket.
 */
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
	int err;

        /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
           number of warnings when compiling with -W --ANK
         */
        if (atomic_read(&sk->rmem_alloc) + skb->truesize >= (unsigned)sk->rcvbuf) {
        	err = -ENOMEM;
        	goto out;
        }

	err = sk_filter(sk, skb, 0);
	if (err)
		goto out;

        skb_set_owner_r(skb, sk);
        skb_queue_tail(queue, skb);

	/* This code only runs from BH or BH protected context.
	 * Therefore the plain read_lock is ok here. -DaveM
	 */
	read_lock(&sk->callback_lock);
        if (!sk->dead) {
		struct socket *sock = sk->socket;
		wake_up_interruptible(sk->sleep);
		if (sock && sock->fasync_list &&
		    !test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
			__kill_fasync(sock->fasync_list, sig, 
				    (sig == SIGURG) ? POLL_PRI : POLL_IN);
	}
	read_unlock(&sk->callback_lock);
out:
        return err;
}
Exemple #7
0
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
	int err;
	int skb_len;

	
	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
	    (unsigned)sk->sk_rcvbuf) {
		err = -ENOMEM;
		goto out;
	}

	err = sk_filter(sk, skb);
	if (err)
		goto out;

	skb_len = skb->len;
	skb_set_owner_r(skb, sk);
	skb_queue_tail(queue, skb);

	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, skb_len);
out:
	return err;
}
static int x25_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
{
	struct sk_buff *skbo, *skbn = skb;
	struct x25_sock *x25 = x25_sk(sk);

	if (more) {
		x25->fraglen += skb->len;
		skb_queue_tail(&x25->fragment_queue, skb);
		skb_set_owner_r(skb, sk);
		return 0;
	}

	if (!more && x25->fraglen > 0) {	/* End of fragment */
		int len = x25->fraglen + skb->len;

		if ((skbn = alloc_skb(len, GFP_ATOMIC)) == NULL){
			kfree_skb(skb);
			return 1;
		}

		skb_queue_tail(&x25->fragment_queue, skb);

		skbn->h.raw = skbn->data;

		skbo = skb_dequeue(&x25->fragment_queue);
		memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
		kfree_skb(skbo);

		while ((skbo =
			skb_dequeue(&x25->fragment_queue)) != NULL) {
			skb_pull(skbo, (x25->neighbour->extended) ?
					X25_EXT_MIN_LEN : X25_STD_MIN_LEN);
			memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
			kfree_skb(skbo);
		}

		x25->fraglen = 0;
	}

	skb_set_owner_r(skbn, sk);
	skb_queue_tail(&sk->sk_receive_queue, skbn);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, skbn->len);

	return 0;
}
Exemple #9
0
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	/*
	 * This has to be done here and not at the upper layer ->accept
	 * method because of the way the PROCOM state machine works:
	 * it needs to set several state variables (see, for instance,
	 * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
	 * the originator of the new connection, and this state has to be
	 * in the newly created struct sock private area. -acme
	 */
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		/*
		 * Can't be skb_set_owner_r, this will be done at the
		 * llc_conn_state_process function, later on, when we will use
		 * skb_queue_rcv_skb to send it to upper layers, this is
		 * another trick required to cope with how the PROCOM state
		 * machine works. -acme
		 */
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
;
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Exemple #10
0
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
{
	sk->sk_shutdown |= RCV_SHUTDOWN;
	sock_set_flag(sk, SOCK_DONE);
	__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	skb_set_owner_r(skb, sk);
	sk->sk_data_ready(sk, 0);
}
Exemple #11
0
/*
 * Allocate a skb from the socket's receive buffer.
 */ 
struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
	if (force || atomic_read(&sk->rmem_alloc) < sk->rcvbuf) {
		struct sk_buff *skb = alloc_skb(size, priority);
		if (skb) {
			skb_set_owner_r(skb, sk);
			return skb;
		}
	}
	return NULL;
}
Exemple #12
0
static void llc_conn_tmr_common_cb(unsigned long timeout_data, u8 type)
{
	struct sock *sk = (struct sock *)timeout_data;
	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);

	bh_lock_sock(sk);
	if (skb) {
		struct llc_conn_state_ev *ev = llc_conn_ev(skb);

		skb_set_owner_r(skb, sk);
		ev->type = type;
		llc_process_tmr_ev(sk, skb);
	}
	bh_unlock_sock(sk);
}
Exemple #13
0
static int lapd_mgmt_queue_primitive(
	struct lapd_sock *lapd_sock,
	struct sk_buff *skb)
{
	int skb_len = skb->len;

	skb_set_owner_r(skb, &lapd_sock->sk);

	skb_queue_tail(&lapd_sock->sk.sk_receive_queue, skb);

	if (!sock_flag(&lapd_sock->sk, SOCK_DEAD))
		lapd_sock->sk.sk_data_ready(&lapd_sock->sk, skb_len);

	return TRUE;
}
Exemple #14
0
/*
 * Attach a skb to a netlink socket.
 * The caller must hold a reference to the destination socket. On error, the
 * reference is dropped. The skb is not send to the destination, just all
 * all error checks are performed and memory in the queue is reserved.
 * Return values:
 * < 0: error. skb freed, reference to sock dropped.
 * 0: continue
 * 1: repeat lookup - reference dropped while waiting for socket memory.
 */
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
		long timeo, struct sock *ssk)
{
	struct netlink_opt *nlk;

	nlk = nlk_sk(sk);

#ifdef NL_EMULATE_DEV
	if (nlk->handler)
		return 0;
#endif
	if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
	    test_bit(0, &nlk->state)) {
		DECLARE_WAITQUEUE(wait, current);
		if (!timeo) {
			if (!ssk || nlk_sk(ssk)->pid == 0)
				netlink_overrun(sk);
			sock_put(sk);
			kfree_skb(skb);
			return -EAGAIN;
		}

		__set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&nlk->wait, &wait);

		if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
		     test_bit(0, &nlk->state)) &&
		    !sock_flag(sk, SOCK_DEAD))
			timeo = schedule_timeout(timeo);

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&nlk->wait, &wait);
		sock_put(sk);

		if (signal_pending(current)) {
			kfree_skb(skb);
			return sock_intr_errno(timeo);
		}
		return 1;
	}
	skb_orphan(skb);
	skb_set_owner_r(skb, sk);
	return 0;
}
Exemple #15
0
static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
#ifdef NL_EMULATE_DEV
	if (sk->protinfo.af_netlink->handler) {
		skb_orphan(skb);
		sk->protinfo.af_netlink->handler(sk->protocol, skb);
		return 0;
	} else
#endif
	if (atomic_read(&sk->rmem_alloc) <= sk->rcvbuf &&
	    !test_bit(0, &sk->protinfo.af_netlink->state)) {
                skb_orphan(skb);
		skb_set_owner_r(skb, sk);
		skb_queue_tail(&sk->receive_queue, skb);
		sk->data_ready(sk, skb->len);
		return 0;
	}
	return -1;
}
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
		dprintk("%s: adding to backlog...\n", __func__);
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Exemple #17
0
static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
{
	struct netlink_opt *nlk = nlk_sk(sk);
#ifdef NL_EMULATE_DEV
	if (nlk->handler) {
		skb_orphan(skb);
		nlk->handler(sk->sk_protocol, skb);
		return 0;
	} else
#endif
	if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
	    !test_bit(0, &nlk->state)) {
                skb_orphan(skb);
		skb_set_owner_r(skb, sk);
		skb_queue_tail(&sk->sk_receive_queue, skb);
		sk->sk_data_ready(sk, skb->len);
		return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
	}
	return -1;
}
Exemple #18
0
/* If an incoming SYN or SYNACK frame contains a payload and/or FIN,
 * queue this additional data / FIN.
 */
void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (TCP_SKB_CB(skb)->end_seq == tp->rcv_nxt)
		return;

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		return;

	skb_dst_drop(skb);
	/* segs_in has been initialized to 1 in tcp_create_openreq_child().
	 * Hence, reset segs_in to 0 before calling tcp_segs_in()
	 * to avoid double counting.  Also, tcp_segs_in() expects
	 * skb->len to include the tcp_hdrlen.  Hence, it should
	 * be called before __skb_pull().
	 */
	tp->segs_in = 0;
	tcp_segs_in(tp, skb);
	__skb_pull(skb, tcp_hdrlen(skb));
	sk_forced_mem_schedule(sk, skb->truesize);
	skb_set_owner_r(skb, sk);

	TCP_SKB_CB(skb)->seq++;
	TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_SYN;

	tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	tp->syn_data_acked = 1;

	/* u64_stats_update_begin(&tp->syncp) not needed here,
	 * as we certainly are not changing upper 32bit value (0)
	 */
	tp->bytes_received = skb->len;

	if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
		tcp_fin(sk);
}
Exemple #19
0
/* Queue an skb to an actively connected sock.
 * Socket lock must be held. */
static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *hdr = pnp_hdr(skb);
	int err = NET_RX_SUCCESS;

	switch (hdr->message_id) {
	case PNS_PIPE_ALIGNED_DATA:
		__skb_pull(skb, 1);
		/* fall through */
	case PNS_PIPE_DATA:
		__skb_pull(skb, 3); /* Pipe data header */
		if (!pn_flow_safe(pn->rx_fc)) {
			err = sock_queue_rcv_skb(sk, skb);
			if (!err)
				return NET_RX_SUCCESS;
			err = NET_RX_DROP;
			break;
		}

		if (pn->rx_credits == 0) {
			atomic_inc(&sk->sk_drops);
			err = NET_RX_DROP;
			break;
		}
		pn->rx_credits--;
		skb->dev = NULL;
		skb_set_owner_r(skb, sk);
		err = skb->len;
		skb_queue_tail(&sk->sk_receive_queue, skb);
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, err);
		return NET_RX_SUCCESS;

	case PNS_PEP_CONNECT_RESP:
		if (sk->sk_state != TCP_SYN_SENT)
			break;
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_state_change(sk);
		if (pep_connresp_rcv(sk, skb)) {
			sk->sk_state = TCP_CLOSE_WAIT;
			break;
		}
		if (pn->init_enable == PN_PIPE_DISABLE)
			sk->sk_state = TCP_SYN_RECV;
		else {
			sk->sk_state = TCP_ESTABLISHED;
			pipe_start_flow_control(sk);
		}
		break;

	case PNS_PEP_ENABLE_RESP:
		if (sk->sk_state != TCP_SYN_SENT)
			break;

		if (pep_enableresp_rcv(sk, skb)) {
			sk->sk_state = TCP_CLOSE_WAIT;
			break;
		}

		sk->sk_state = TCP_ESTABLISHED;
		pipe_start_flow_control(sk);
		break;

	case PNS_PEP_DISCONNECT_RESP:
		/* sock should already be dead, nothing to do */
		break;

	case PNS_PEP_STATUS_IND:
		pipe_rcv_status(sk, skb);
		break;
	}
	kfree_skb(skb);
	return err;
}
/*
 * State machine for state 3, Connected State.
 * The handling of the timer(s) is in file x25_timer.c
 * Handling of state 0 and connection release is in af_x25.c.
 */
static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m)
{
	int queued = 0;
	int modulus;
	struct x25_sock *x25 = x25_sk(sk);

	modulus = (x25->neighbour->extended) ? X25_EMODULUS : X25_SMODULUS;

	switch (frametype) {

		case X25_RESET_REQUEST:
			x25_write_internal(sk, X25_RESET_CONFIRMATION);
			x25_stop_timer(sk);
			x25->condition = 0x00;
			x25->vs        = 0;
			x25->vr        = 0;
			x25->va        = 0;
			x25->vl        = 0;
			x25_requeue_frames(sk);
			break;

		case X25_CLEAR_REQUEST:
			x25_write_internal(sk, X25_CLEAR_CONFIRMATION);
			x25_disconnect(sk, 0, skb->data[3], skb->data[4]);
			break;

		case X25_RR:
		case X25_RNR:
			if (!x25_validate_nr(sk, nr)) {
				x25_clear_queues(sk);
				x25_write_internal(sk, X25_RESET_REQUEST);
				x25_start_t22timer(sk);
				x25->condition = 0x00;
				x25->vs        = 0;
				x25->vr        = 0;
				x25->va        = 0;
				x25->vl        = 0;
				x25->state     = X25_STATE_4;
			} else {
				x25_frames_acked(sk, nr);
				if (frametype == X25_RNR) {
					x25->condition |= X25_COND_PEER_RX_BUSY;
				} else {
					x25->condition &= ~X25_COND_PEER_RX_BUSY;
				}
			}
			break;

		case X25_DATA:	/* XXX */
			x25->condition &= ~X25_COND_PEER_RX_BUSY;
			if ((ns != x25->vr) || !x25_validate_nr(sk, nr)) {
				x25_clear_queues(sk);
				x25_write_internal(sk, X25_RESET_REQUEST);
				x25_start_t22timer(sk);
				x25->condition = 0x00;
				x25->vs        = 0;
				x25->vr        = 0;
				x25->va        = 0;
				x25->vl        = 0;
				x25->state     = X25_STATE_4;
				break;
			}
			x25_frames_acked(sk, nr);
			if (ns == x25->vr) {
				if (x25_queue_rx_frame(sk, skb, m) == 0) {
					x25->vr = (x25->vr + 1) % modulus;
					queued = 1;
				} else {
					/* Should never happen */
					x25_clear_queues(sk);
					x25_write_internal(sk, X25_RESET_REQUEST);
					x25_start_t22timer(sk);
					x25->condition = 0x00;
					x25->vs        = 0;
					x25->vr        = 0;
					x25->va        = 0;
					x25->vl        = 0;
					x25->state     = X25_STATE_4;
					break;
				}
				if (atomic_read(&sk->sk_rmem_alloc) >
				    (sk->sk_rcvbuf / 2))
					x25->condition |= X25_COND_OWN_RX_BUSY;
			}
			/*
			 *	If the window is full Ack it immediately, else
			 *	start the holdback timer.
			 */
			if (((x25->vl + x25->facilities.winsize_in) % modulus) == x25->vr) {
				x25->condition &= ~X25_COND_ACK_PENDING;
				x25_stop_timer(sk);
				x25_enquiry_response(sk);
			} else {
				x25->condition |= X25_COND_ACK_PENDING;
				x25_start_t2timer(sk);
			}
			break;

		case X25_INTERRUPT_CONFIRMATION:
			x25->intflag = 0;
			break;

		case X25_INTERRUPT:
			if (sock_flag(sk, SOCK_URGINLINE))
				queued = !sock_queue_rcv_skb(sk, skb);
			else {
				skb_set_owner_r(skb, sk);
				skb_queue_tail(&x25->interrupt_in_queue, skb);
				queued = 1;
			}
			sk_send_sigurg(sk);
			x25_write_internal(sk, X25_INTERRUPT_CONFIRMATION);
			break;

		default:
			printk(KERN_WARNING "x25: unknown %02X in state 3\n", frametype);
			break;
	}

	return queued;
}
Exemple #21
0
static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
	ax25_address *dev_addr, struct packet_type *ptype)
{
	ax25_address src, dest, *next_digi = NULL;
	int type = 0, mine = 0, dama;
	struct sock *make, *sk;
	ax25_digi dp, reverse_dp;
	ax25_cb *ax25;
	ax25_dev *ax25_dev;

	/*
	 *	Process the AX.25/LAPB frame.
	 */

	skb_reset_transport_header(skb);

	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Parse the address header.
	 */

	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) {
		kfree_skb(skb);
		return 0;
	}

	/*
	 *	Ours perhaps ?
	 */
	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
		next_digi = &dp.calls[dp.lastrepeat + 1];

	/*
	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
	 */
	skb_pull(skb, ax25_addr_size(&dp));

	/* For our port addresses ? */
	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* Also match on any registered callsign from L3/4 */
	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* UI frame - bypass LAPB processing */
	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
		skb_set_transport_header(skb, 2); /* skip control and pid */

		ax25_send_to_raw(&dest, skb, skb->data[1]);

		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) {
			kfree_skb(skb);
			return 0;
		}

		/* Now we are pointing at the pid byte */
		switch (skb->data[1]) {
		case AX25_P_IP:
			skb_pull(skb,2);		/* drop PID/CTRL */
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_IP);
			netif_rx(skb);
			break;

		case AX25_P_ARP:
			skb_pull(skb,2);
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_ARP);
			netif_rx(skb);
			break;
		case AX25_P_TEXT:
			/* Now find a suitable dgram socket */
			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
			if (sk != NULL) {
				bh_lock_sock(sk);
				if (atomic_read(&sk->sk_rmem_alloc) >=
				    sk->sk_rcvbuf) {
					kfree_skb(skb);
				} else {
					/*
					 *	Remove the control and PID.
					 */
					skb_pull(skb, 2);
					if (sock_queue_rcv_skb(sk, skb) != 0)
						kfree_skb(skb);
				}
				bh_unlock_sock(sk);
				sock_put(sk);
			} else {
				kfree_skb(skb);
			}
			break;

		default:
			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
			break;
		}

		return 0;
	}

	/*
	 *	Is connected mode supported on this device ?
	 *	If not, should we DM the incoming frame (except DMs) or
	 *	silently ignore them. For now we stay quiet.
	 */
	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) {
		kfree_skb(skb);
		return 0;
	}

	/* LAPB */

	/* AX.25 state 1-4 */

	ax25_digi_invert(&dp, &reverse_dp);

	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
		/*
		 *	Process the frame. If it is queued up internally it
		 *	returns one otherwise we free it immediately. This
		 *	routine itself wakes the user context layers so we do
		 *	no further work
		 */
		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
			kfree_skb(skb);

		ax25_cb_put(ax25);
		return 0;
	}

	/* AX.25 state 0 (disconnected) */

	/* a) received not a SABM(E) */

	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
	    (*skb->data & ~AX25_PF) != AX25_SABME) {
		/*
		 *	Never reply to a DM. Also ignore any connects for
		 *	addresses that are not our interfaces and not a socket.
		 */
		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
			ax25_return_dm(dev, &src, &dest, &dp);

		kfree_skb(skb);
		return 0;
	}

	/* b) received SABM(E) */

	if (dp.lastrepeat + 1 == dp.ndigi)
		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
	else
		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);

	if (sk != NULL) {
		bh_lock_sock(sk);
		if (sk_acceptq_is_full(sk) ||
		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
			if (mine)
				ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			bh_unlock_sock(sk);
			sock_put(sk);

			return 0;
		}

		ax25 = ax25_sk(make);
		skb_set_owner_r(skb, make);
		skb_queue_head(&sk->sk_receive_queue, skb);

		make->sk_state = TCP_ESTABLISHED;

		sk->sk_ack_backlog++;
		bh_unlock_sock(sk);
	} else {
		if (!mine) {
			kfree_skb(skb);
			return 0;
		}

		if ((ax25 = ax25_create_cb()) == NULL) {
			ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			return 0;
		}

		ax25_fillin_cb(ax25, ax25_dev);
	}

	ax25->source_addr = dest;
	ax25->dest_addr   = src;

	/*
	 *	Sort out any digipeated paths.
	 */
	if (dp.ndigi && !ax25->digipeat &&
	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		ax25_destroy_socket(ax25);
		if (sk)
			sock_put(sk);
		return 0;
	}

	if (dp.ndigi == 0) {
		kfree(ax25->digipeat);
		ax25->digipeat = NULL;
	} else {
		/* Reverse the source SABM's path */
		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
	}

	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
		ax25->modulus = AX25_EMODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
	} else {
		ax25->modulus = AX25_MODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
	}

	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);

#ifdef CONFIG_AX25_DAMA_SLAVE
	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
		ax25_dama_on(ax25);
#endif

	ax25->state = AX25_STATE_3;

	ax25_cb_add(ax25);

	ax25_start_heartbeat(ax25);
	ax25_start_t3timer(ax25);
	ax25_start_idletimer(ax25);

	if (sk) {
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, skb->len);
		sock_put(sk);
	} else
		kfree_skb(skb);

	return 0;
}
Exemple #22
0
static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct packet_sock *po;
	struct sockaddr_ll *sll;
	struct tpacket_hdr *h;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
	unsigned snaplen;
	unsigned long status = TP_STATUS_LOSING|TP_STATUS_USER;
	unsigned short macoff, netoff;
	struct sk_buff *copy_skb = NULL;

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = pt->af_packet_priv;
	po = pkt_sk(sk);

	if (dev->hard_header) {
		if (sk->sk_type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
			if (skb->ip_summed == CHECKSUM_HW)
				status |= TP_STATUS_CSUMNOTREADY;
		}
	}

	snaplen = skb->len;

	if (sk->sk_filter) {
		unsigned res = run_filter(skb, sk, snaplen);
		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}

	if (sk->sk_type == SOCK_DGRAM) {
		macoff = netoff = TPACKET_ALIGN(TPACKET_HDRLEN) + 16;
	} else {
		unsigned maclen = skb->nh.raw - skb->data;
		netoff = TPACKET_ALIGN(TPACKET_HDRLEN + (maclen < 16 ? 16 : maclen));
		macoff = netoff - maclen;
	}

	if (macoff + snaplen > po->frame_size) {
		if (po->copy_thresh &&
		    atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
		    (unsigned)sk->sk_rcvbuf) {
			if (skb_shared(skb)) {
				copy_skb = skb_clone(skb, GFP_ATOMIC);
			} else {
				copy_skb = skb_get(skb);
				skb_head = skb->data;
			}
			if (copy_skb)
				skb_set_owner_r(copy_skb, sk);
		}
		snaplen = po->frame_size - macoff;
		if ((int)snaplen < 0)
			snaplen = 0;
	}
	if (snaplen > skb->len-skb->data_len)
		snaplen = skb->len-skb->data_len;

	spin_lock(&sk->sk_receive_queue.lock);
	h = (struct tpacket_hdr *)packet_lookup_frame(po, po->head);
	
	if (h->tp_status)
		goto ring_is_full;
	po->head = po->head != po->frame_max ? po->head+1 : 0;
	po->stats.tp_packets++;
	if (copy_skb) {
		status |= TP_STATUS_COPY;
		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
	}
	if (!po->stats.tp_drops)
		status &= ~TP_STATUS_LOSING;
	spin_unlock(&sk->sk_receive_queue.lock);

	memcpy((u8*)h + macoff, skb->data, snaplen);

	h->tp_len = skb->len;
	h->tp_snaplen = snaplen;
	h->tp_mac = macoff;
	h->tp_net = netoff;
	if (skb->stamp.tv_sec == 0) { 
		do_gettimeofday(&skb->stamp);
		sock_enable_timestamp(sk);
	}
	h->tp_sec = skb->stamp.tv_sec;
	h->tp_usec = skb->stamp.tv_usec;

	sll = (struct sockaddr_ll*)((u8*)h + TPACKET_ALIGN(sizeof(*h)));
	sll->sll_halen = 0;
	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;

	h->tp_status = status;
	mb();

	{
		struct page *p_start, *p_end;
		u8 *h_end = (u8 *)h + macoff + snaplen - 1;

		p_start = virt_to_page(h);
		p_end = virt_to_page(h_end);
		while (p_start <= p_end) {
			flush_dcache_page(p_start);
			p_start++;
		}
	}

	sk->sk_data_ready(sk, 0);

drop_n_restore:
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
        kfree_skb(skb);
	return 0;

ring_is_full:
	po->stats.tp_drops++;
	spin_unlock(&sk->sk_receive_queue.lock);

	sk->sk_data_ready(sk, 0);
	if (copy_skb)
		kfree_skb(copy_skb);
	goto drop_n_restore;
}
Exemple #23
0
static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
					      struct sk_buff *skb,
					      struct request_sock *req,
					      struct dst_entry *dst)
{
	struct inet_request_sock *ireq = inet_rsk(req);
	struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
	struct inet_sock *newinet;
	struct dccp6_sock *newdp6;
	struct sock *newsk;

	if (skb->protocol == htons(ETH_P_IP)) {
		/*
		 *	v6 mapped
		 */
		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst);
		if (newsk == NULL)
			return NULL;

		newdp6 = (struct dccp6_sock *)newsk;
		newinet = inet_sk(newsk);
		newinet->pinet6 = &newdp6->inet6;
		newnp = inet6_sk(newsk);

		memcpy(newnp, np, sizeof(struct ipv6_pinfo));

		ipv6_addr_set_v4mapped(newinet->inet_daddr, &newsk->sk_v6_daddr);

		ipv6_addr_set_v4mapped(newinet->inet_saddr, &newnp->saddr);

		newsk->sk_v6_rcv_saddr = newnp->saddr;

		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
		newnp->pktoptions  = NULL;
		newnp->opt	   = NULL;
		newnp->mcast_oif   = inet6_iif(skb);
		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;

		/*
		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
		 * here, dccp_create_openreq_child now does this for us, see the comment in
		 * that function for the gory details. -acme
		 */

		/* It is tricky place. Until this moment IPv4 tcp
		   worked with IPv6 icsk.icsk_af_ops.
		   Sync it now.
		 */
		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);

		return newsk;
	}


	if (sk_acceptq_is_full(sk))
		goto out_overflow;

	if (dst == NULL) {
		struct in6_addr *final_p, final;
		struct flowi6 fl6;

		memset(&fl6, 0, sizeof(fl6));
		fl6.flowi6_proto = IPPROTO_DCCP;
		fl6.daddr = ireq->ir_v6_rmt_addr;
		final_p = fl6_update_dst(&fl6, np->opt, &final);
		fl6.saddr = ireq->ir_v6_loc_addr;
		fl6.flowi6_oif = sk->sk_bound_dev_if;
		fl6.fl6_dport = ireq->ir_rmt_port;
		fl6.fl6_sport = htons(ireq->ir_num);
		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));

		dst = ip6_dst_lookup_flow(sk, &fl6, final_p, false);
		if (IS_ERR(dst))
			goto out;
	}

	newsk = dccp_create_openreq_child(sk, req, skb);
	if (newsk == NULL)
		goto out_nonewsk;

	/*
	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
	 * count here, dccp_create_openreq_child now does this for us, see the
	 * comment in that function for the gory details. -acme
	 */

	__ip6_dst_store(newsk, dst, NULL, NULL);
	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
						      NETIF_F_TSO);
	newdp6 = (struct dccp6_sock *)newsk;
	newinet = inet_sk(newsk);
	newinet->pinet6 = &newdp6->inet6;
	newnp = inet6_sk(newsk);

	memcpy(newnp, np, sizeof(struct ipv6_pinfo));

	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
	newnp->saddr		= ireq->ir_v6_loc_addr;
	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
	newsk->sk_bound_dev_if	= ireq->ir_iif;

	/* Now IPv6 options...

	   First: no IPv4 options.
	 */
	newinet->inet_opt = NULL;

	/* Clone RX bits */
	newnp->rxopt.all = np->rxopt.all;

	/* Clone pktoptions received with SYN */
	newnp->pktoptions = NULL;
	if (ireq->pktopts != NULL) {
		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
		consume_skb(ireq->pktopts);
		ireq->pktopts = NULL;
		if (newnp->pktoptions)
			skb_set_owner_r(newnp->pktoptions, newsk);
	}
	newnp->opt	  = NULL;
	newnp->mcast_oif  = inet6_iif(skb);
	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;

	/*
	 * Clone native IPv6 options from listening socket (if any)
	 *
	 * Yes, keeping reference count would be much more clever, but we make
	 * one more one thing there: reattach optmem to newsk.
	 */
	if (np->opt != NULL)
		newnp->opt = ipv6_dup_options(newsk, np->opt);

	inet_csk(newsk)->icsk_ext_hdr_len = 0;
	if (newnp->opt != NULL)
		inet_csk(newsk)->icsk_ext_hdr_len = (newnp->opt->opt_nflen +
						     newnp->opt->opt_flen);

	dccp_sync_mss(newsk, dst_mtu(dst));

	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;

	if (__inet_inherit_port(sk, newsk) < 0) {
		inet_csk_prepare_forced_close(newsk);
		dccp_done(newsk);
		goto out;
	}
	__inet6_hash(newsk, NULL);

	return newsk;

out_overflow:
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
	dst_release(dst);
out:
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
	return NULL;
}
static int restore_queues(struct sock *sk, struct cpt_sock_image *si,
			  loff_t pos, struct cpt_context *ctx)
{
	loff_t endpos;

	endpos = pos + si->cpt_next;
	pos = pos + si->cpt_hdrlen;
	while (pos < endpos) {
		struct sk_buff *skb;
		__u32 type;
		int err;

		err = rst_sock_attr(&pos, sk, ctx);
		if (!err)
			continue;
		if (err < 0)
			return err;

		skb = rst_skb(sk, &pos, NULL, &type, ctx);
		if (IS_ERR(skb))
			return PTR_ERR(skb);

		if (sk->sk_type == SOCK_STREAM) {
			if (type == CPT_SKB_RQ) {
				skb_set_owner_r(skb, sk);
				ub_tcprcvbuf_charge_forced(sk, skb);
				skb_queue_tail(&sk->sk_receive_queue, skb);
			} else if (type == CPT_SKB_OFOQ) {
				struct tcp_sock *tp = tcp_sk(sk);
				skb_set_owner_r(skb, sk);
				ub_tcprcvbuf_charge_forced(sk, skb);
				skb_queue_tail(&tp->out_of_order_queue, skb);
			} else if (type == CPT_SKB_WQ) {
				sk->sk_wmem_queued += skb->truesize;
				sk->sk_forward_alloc -= skb->truesize;
				ub_tcpsndbuf_charge_forced(sk, skb);
				skb_queue_tail(&sk->sk_write_queue, skb);
			} else {
				wprintk_ctx("strange stream queue type %u\n", type);
				kfree_skb(skb);
			}
		} else {
			if (type == CPT_SKB_RQ) {
				skb_set_owner_r(skb, sk);
				skb_queue_tail(&sk->sk_receive_queue, skb);
			} else if (type == CPT_SKB_WQ) {
				struct inet_sock *inet = inet_sk(sk);
				if (inet->cork.fragsize) {
					skb_set_owner_w(skb, sk);
					skb_queue_tail(&sk->sk_write_queue, skb);
				} else {
					eprintk_ctx("cork skb is dropped\n");
					kfree_skb(skb);
				}
			} else {
				wprintk_ctx("strange dgram queue type %u\n", type);
				kfree_skb(skb);
			}
		}
	}
	return 0;
}
Exemple #25
0
static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *hdr = pnp_hdr(skb);
	struct sk_buff_head *queue;
	int err = 0;

	BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);

	switch (hdr->message_id) {
	case PNS_PEP_CONNECT_REQ:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
		break;

	case PNS_PEP_DISCONNECT_REQ:
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		sk->sk_state = TCP_CLOSE_WAIT;
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_state_change(sk);
		break;

	case PNS_PEP_ENABLE_REQ:
		/* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_RESET_REQ:
		switch (hdr->state_after_reset) {
		case PN_PIPE_DISABLE:
			pn->init_enable = 0;
			break;
		case PN_PIPE_ENABLE:
			pn->init_enable = 1;
			break;
		default: /* not allowed to send an error here!? */
			err = -EINVAL;
			goto out;
		}
		/* fall through */
	case PNS_PEP_DISABLE_REQ:
		atomic_set(&pn->tx_credits, 0);
		pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
		break;

	case PNS_PEP_CTRL_REQ:
		if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
			atomic_inc(&sk->sk_drops);
			break;
		}
		__skb_pull(skb, 4);
		queue = &pn->ctrlreq_queue;
		goto queue;

	case PNS_PIPE_ALIGNED_DATA:
		__skb_pull(skb, 1);
		/* fall through */
	case PNS_PIPE_DATA:
		__skb_pull(skb, 3); /* Pipe data header */
		if (!pn_flow_safe(pn->rx_fc)) {
			err = sock_queue_rcv_skb(sk, skb);
			if (!err)
				return 0;
			break;
		}

		if (pn->rx_credits == 0) {
			atomic_inc(&sk->sk_drops);
			err = -ENOBUFS;
			break;
		}
		pn->rx_credits--;
		queue = &sk->sk_receive_queue;
		goto queue;

	case PNS_PEP_STATUS_IND:
		pipe_rcv_status(sk, skb);
		break;

	case PNS_PIPE_REDIRECTED_IND:
		err = pipe_rcv_created(sk, skb);
		break;

	case PNS_PIPE_CREATED_IND:
		err = pipe_rcv_created(sk, skb);
		if (err)
			break;
		/* fall through */
	case PNS_PIPE_RESET_IND:
		if (!pn->init_enable)
			break;
		/* fall through */
	case PNS_PIPE_ENABLED_IND:
		if (!pn_flow_safe(pn->tx_fc)) {
			atomic_set(&pn->tx_credits, 1);
			sk->sk_write_space(sk);
		}
		if (sk->sk_state == TCP_ESTABLISHED)
			break; /* Nothing to do */
		sk->sk_state = TCP_ESTABLISHED;
		pipe_grant_credits(sk);
		break;

	case PNS_PIPE_DISABLED_IND:
		sk->sk_state = TCP_SYN_RECV;
		pn->rx_credits = 0;
		break;

	default:
		LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP message: %u\n",
				hdr->message_id);
		err = -EINVAL;
	}
out:
	kfree_skb(skb);
	return err;

queue:
	skb->dev = NULL;
	skb_set_owner_r(skb, sk);
	err = skb->len;
	skb_queue_tail(queue, skb);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, err);
	return 0;
}
Exemple #26
0
static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
					      struct sk_buff *skb,
					      struct request_sock *req,
					      struct dst_entry *dst,
					      struct request_sock *req_unhash,
					      bool *own_req)
{
	struct inet_request_sock *ireq = inet_rsk(req);
	struct ipv6_pinfo *newnp;
	const struct ipv6_pinfo *np = inet6_sk(sk);
	struct ipv6_txoptions *opt;
	struct inet_sock *newinet;
	struct dccp6_sock *newdp6;
	struct sock *newsk;

	if (skb->protocol == htons(ETH_P_IP)) {
		/*
		 *	v6 mapped
		 */
		newsk = dccp_v4_request_recv_sock(sk, skb, req, dst,
						  req_unhash, own_req);
		if (newsk == NULL)
			return NULL;

		newdp6 = (struct dccp6_sock *)newsk;
		newinet = inet_sk(newsk);
		newinet->pinet6 = &newdp6->inet6;
		newnp = inet6_sk(newsk);

		memcpy(newnp, np, sizeof(struct ipv6_pinfo));

		newnp->saddr = newsk->sk_v6_rcv_saddr;

		inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped;
		newsk->sk_backlog_rcv = dccp_v4_do_rcv;
		newnp->pktoptions  = NULL;
		newnp->opt	   = NULL;
		newnp->mcast_oif   = inet6_iif(skb);
		newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;

		/*
		 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
		 * here, dccp_create_openreq_child now does this for us, see the comment in
		 * that function for the gory details. -acme
		 */

		/* It is tricky place. Until this moment IPv4 tcp
		   worked with IPv6 icsk.icsk_af_ops.
		   Sync it now.
		 */
		dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);

		return newsk;
	}


	if (sk_acceptq_is_full(sk))
		goto out_overflow;

	if (!dst) {
		struct flowi6 fl6;

		dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_DCCP);
		if (!dst)
			goto out;
	}

	newsk = dccp_create_openreq_child(sk, req, skb);
	if (newsk == NULL)
		goto out_nonewsk;

	/*
	 * No need to charge this sock to the relevant IPv6 refcnt debug socks
	 * count here, dccp_create_openreq_child now does this for us, see the
	 * comment in that function for the gory details. -acme
	 */

	ip6_dst_store(newsk, dst, NULL, NULL);
	newsk->sk_route_caps = dst->dev->features & ~(NETIF_F_IP_CSUM |
						      NETIF_F_TSO);
	newdp6 = (struct dccp6_sock *)newsk;
	newinet = inet_sk(newsk);
	newinet->pinet6 = &newdp6->inet6;
	newnp = inet6_sk(newsk);

	memcpy(newnp, np, sizeof(struct ipv6_pinfo));

	newsk->sk_v6_daddr	= ireq->ir_v6_rmt_addr;
	newnp->saddr		= ireq->ir_v6_loc_addr;
	newsk->sk_v6_rcv_saddr	= ireq->ir_v6_loc_addr;
	newsk->sk_bound_dev_if	= ireq->ir_iif;

	/* Now IPv6 options...

	   First: no IPv4 options.
	 */
	newinet->inet_opt = NULL;

	/* Clone RX bits */
	newnp->rxopt.all = np->rxopt.all;

	newnp->pktoptions = NULL;
	newnp->opt	  = NULL;
	newnp->mcast_oif  = inet6_iif(skb);
	newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;

	/*
	 * Clone native IPv6 options from listening socket (if any)
	 *
	 * Yes, keeping reference count would be much more clever, but we make
	 * one more one thing there: reattach optmem to newsk.
	 */
	opt = rcu_dereference(np->opt);
	if (opt) {
		opt = ipv6_dup_options(newsk, opt);
		RCU_INIT_POINTER(newnp->opt, opt);
	}
	inet_csk(newsk)->icsk_ext_hdr_len = 0;
	if (opt)
		inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
						    opt->opt_flen;

	dccp_sync_mss(newsk, dst_mtu(dst));

	newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
	newinet->inet_rcv_saddr = LOOPBACK4_IPV6;

	if (__inet_inherit_port(sk, newsk) < 0) {
		inet_csk_prepare_forced_close(newsk);
		dccp_done(newsk);
		goto out;
	}
	*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
	/* Clone pktoptions received with SYN, if we own the req */
	if (*own_req && ireq->pktopts) {
		newnp->pktoptions = skb_clone(ireq->pktopts, GFP_ATOMIC);
		consume_skb(ireq->pktopts);
		ireq->pktopts = NULL;
		if (newnp->pktoptions)
			skb_set_owner_r(newnp->pktoptions, newsk);
	}

	return newsk;

out_overflow:
	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk:
	dst_release(dst);
out:
	__NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
	return NULL;
}
/**
 * accept - wait for connection request
 * @sock: listening socket
 * @newsock: new socket that is to be connected
 * @flags: file-related flags associated with socket
 *
 * Returns 0 on success, errno otherwise
 */
static int accept(struct socket *sock, struct socket *new_sock, int flags)
{
	struct sock *new_sk, *sk = sock->sk;
	struct sk_buff *buf;
	struct tipc_sock *new_tsock;
	struct tipc_port *new_tport;
	struct tipc_msg *msg;
	u32 new_ref;

	int res;

	lock_sock(sk);

	if (sock->state != SS_LISTENING) {
		res = -EINVAL;
		goto exit;
	}

	while (skb_queue_empty(&sk->sk_receive_queue)) {
		if (flags & O_NONBLOCK) {
			res = -EWOULDBLOCK;
			goto exit;
		}
		release_sock(sk);
		res = wait_event_interruptible(*sk_sleep(sk),
				(!skb_queue_empty(&sk->sk_receive_queue)));
		lock_sock(sk);
		if (res)
			goto exit;
	}

	buf = skb_peek(&sk->sk_receive_queue);

	res = tipc_create(sock_net(sock->sk), new_sock, 0, 0);
	if (res)
		goto exit;

	new_sk = new_sock->sk;
	new_tsock = tipc_sk(new_sk);
	new_tport = new_tsock->p;
	new_ref = new_tport->ref;
	msg = buf_msg(buf);

	/* we lock on new_sk; but lockdep sees the lock on sk */
	lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);

	/*
	 * Reject any stray messages received by new socket
	 * before the socket lock was taken (very, very unlikely)
	 */
	reject_rx_queue(new_sk);

	/* Connect new socket to it's peer */
	new_tsock->peer_name.ref = msg_origport(msg);
	new_tsock->peer_name.node = msg_orignode(msg);
	tipc_connect(new_ref, &new_tsock->peer_name);
	new_sock->state = SS_CONNECTED;

	tipc_set_portimportance(new_ref, msg_importance(msg));
	if (msg_named(msg)) {
		new_tport->conn_type = msg_nametype(msg);
		new_tport->conn_instance = msg_nameinst(msg);
	}

	/*
	 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
	 * Respond to 'SYN+' by queuing it on new socket.
	 */
	if (!msg_data_sz(msg)) {
		struct msghdr m = {NULL,};

		advance_rx_queue(sk);
		send_packet(NULL, new_sock, &m, 0);
	} else {
		__skb_dequeue(&sk->sk_receive_queue);
		__skb_queue_head(&new_sk->sk_receive_queue, buf);
		skb_set_owner_r(buf, new_sk);
	}
	release_sock(new_sk);

exit:
	release_sock(sk);
	return res;
}
Exemple #28
0
static bool tcp_fastopen_create_child(struct sock *sk,
                                      struct sk_buff *skb,
                                      struct dst_entry *dst,
                                      struct request_sock *req)
{
    struct tcp_sock *tp;
    struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
    struct sock *child;
    u32 end_seq;

    req->num_retrans = 0;
    req->num_timeout = 0;
    req->sk = NULL;

    child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
    if (!child)
        return false;

    spin_lock(&queue->fastopenq->lock);
    queue->fastopenq->qlen++;
    spin_unlock(&queue->fastopenq->lock);

    /* Initialize the child socket. Have to fix some values to take
     * into account the child is a Fast Open socket and is created
     * only out of the bits carried in the SYN packet.
     */
    tp = tcp_sk(child);

    tp->fastopen_rsk = req;
    tcp_rsk(req)->tfo_listener = true;

    /* RFC1323: The window in SYN & SYN/ACK segments is never
     * scaled. So correct it appropriately.
     */
    tp->snd_wnd = ntohs(tcp_hdr(skb)->window);

    /* Activate the retrans timer so that SYNACK can be retransmitted.
     * The request socket is not added to the SYN table of the parent
     * because it's been added to the accept queue directly.
     */
    inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
                              TCP_TIMEOUT_INIT, TCP_RTO_MAX);

    atomic_set(&req->rsk_refcnt, 1);
    /* Add the child socket directly into the accept queue */
    inet_csk_reqsk_queue_add(sk, req, child);

    /* Now finish processing the fastopen child socket. */
    inet_csk(child)->icsk_af_ops->rebuild_header(child);
    tcp_init_congestion_control(child);
    tcp_mtup_init(child);
    tcp_init_metrics(child);
    tcp_init_buffer_space(child);

    /* Queue the data carried in the SYN packet. We need to first
     * bump skb's refcnt because the caller will attempt to free it.
     * Note that IPv6 might also have used skb_get() trick
     * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
     * So we need to eventually get a clone of the packet,
     * before inserting it in sk_receive_queue.
     *
     * XXX (TFO) - we honor a zero-payload TFO request for now,
     * (any reason not to?) but no need to queue the skb since
     * there is no data. How about SYN+FIN?
     */
    end_seq = TCP_SKB_CB(skb)->end_seq;
    if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
        struct sk_buff *skb2;

        if (unlikely(skb_shared(skb)))
            skb2 = skb_clone(skb, GFP_ATOMIC);
        else
            skb2 = skb_get(skb);

        if (likely(skb2)) {
            skb_dst_drop(skb2);
            __skb_pull(skb2, tcp_hdrlen(skb));
            skb_set_owner_r(skb2, child);
            __skb_queue_tail(&child->sk_receive_queue, skb2);
            tp->syn_data_acked = 1;

            /* u64_stats_update_begin(&tp->syncp) not needed here,
             * as we certainly are not changing upper 32bit value (0)
             */
            tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
        } else {
            end_seq = TCP_SKB_CB(skb)->seq + 1;
        }
    }
    tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
    sk->sk_data_ready(sk);
    bh_unlock_sock(child);
    sock_put(child);
    WARN_ON(!req->sk);
    return true;
}
Exemple #29
0
static int packet_rcv(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct sockaddr_ll *sll;
	struct packet_sock *po;
	u8 * skb_head = skb->data;
	int skb_len = skb->len;
	unsigned snaplen;

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto drop;

	sk = pt->af_packet_priv;
	po = pkt_sk(sk);

	skb->dev = dev;

	if (dev->hard_header) {
		/* The device has an explicit notion of ll header,
		   exported to higher levels.

		   Otherwise, the device hides datails of it frame
		   structure, so that corresponding packet head
		   never delivered to user.
		 */
		if (sk->sk_type != SOCK_DGRAM)
			skb_push(skb, skb->data - skb->mac.raw);
		else if (skb->pkt_type == PACKET_OUTGOING) {
			/* Special case: outgoing packets have ll header at head */
			skb_pull(skb, skb->nh.raw - skb->data);
		}
	}

	snaplen = skb->len;

	if (sk->sk_filter) {
		unsigned res = run_filter(skb, sk, snaplen);
		if (res == 0)
			goto drop_n_restore;
		if (snaplen > res)
			snaplen = res;
	}

	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
	    (unsigned)sk->sk_rcvbuf)
		goto drop_n_acct;

	if (skb_shared(skb)) {
		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
		if (nskb == NULL)
			goto drop_n_acct;

		if (skb_head != skb->data) {
			skb->data = skb_head;
			skb->len = skb_len;
		}
		kfree_skb(skb);
		skb = nskb;
	}

	sll = (struct sockaddr_ll*)skb->cb;
	sll->sll_family = AF_PACKET;
	sll->sll_hatype = dev->type;
	sll->sll_protocol = skb->protocol;
	sll->sll_pkttype = skb->pkt_type;
	sll->sll_ifindex = dev->ifindex;
	sll->sll_halen = 0;

	if (dev->hard_header_parse)
		sll->sll_halen = dev->hard_header_parse(skb, sll->sll_addr);

	if (pskb_trim(skb, snaplen))
		goto drop_n_acct;

	skb_set_owner_r(skb, sk);
	skb->dev = NULL;
	dst_release(skb->dst);
	skb->dst = NULL;

	spin_lock(&sk->sk_receive_queue.lock);
	po->stats.tp_packets++;
	__skb_queue_tail(&sk->sk_receive_queue, skb);
	spin_unlock(&sk->sk_receive_queue.lock);
	sk->sk_data_ready(sk, skb->len);
	return 0;

drop_n_acct:
	spin_lock(&sk->sk_receive_queue.lock);
	po->stats.tp_drops++;
	spin_unlock(&sk->sk_receive_queue.lock);

drop_n_restore:
	if (skb_head != skb->data && skb_shared(skb)) {
		skb->data = skb_head;
		skb->len = skb_len;
	}
drop:
	kfree_skb(skb);
	return 0;
}
Exemple #30
0
int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock)
{
	struct sock *sk;
	int len = skb->len;
	int protocol = ssk->protocol;
	long timeo;
        DECLARE_WAITQUEUE(wait, current);

	timeo = sock_sndtimeo(ssk, nonblock);

retry:
	sk = netlink_lookup(protocol, pid);
	if (sk == NULL)
		goto no_dst;

#ifdef NL_EMULATE_DEV
	if (sk->protinfo.af_netlink->handler) {
		skb_orphan(skb);
		len = sk->protinfo.af_netlink->handler(protocol, skb);
		sock_put(sk);
		return len;
	}
#endif

	if (atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
	    test_bit(0, &sk->protinfo.af_netlink->state)) {
		if (!timeo) {
			if (ssk->protinfo.af_netlink->pid == 0)
				netlink_overrun(sk);
			sock_put(sk);
			kfree_skb(skb);
			return -EAGAIN;
		}

		__set_current_state(TASK_INTERRUPTIBLE);
		add_wait_queue(&sk->protinfo.af_netlink->wait, &wait);

		if ((atomic_read(&sk->rmem_alloc) > sk->rcvbuf ||
		    test_bit(0, &sk->protinfo.af_netlink->state)) &&
		    !sk->dead)
			timeo = schedule_timeout(timeo);

		__set_current_state(TASK_RUNNING);
		remove_wait_queue(&sk->protinfo.af_netlink->wait, &wait);
		sock_put(sk);

		if (signal_pending(current)) {
			kfree_skb(skb);
			return sock_intr_errno(timeo);
		}
		goto retry;
	}

	skb_orphan(skb);
	skb_set_owner_r(skb, sk);
	skb_queue_tail(&sk->receive_queue, skb);
	sk->data_ready(sk, len);
	sock_put(sk);
	return len;

no_dst:
	kfree_skb(skb);
	return -ECONNREFUSED;
}