Ejemplo n.º 1
0
static void svc_reclassify_socket(struct socket *sock)
{
	struct sock *sk = sock->sk;

	WARN_ON_ONCE(sock_owned_by_user(sk));
	if (sock_owned_by_user(sk))
		return;

	switch (sk->sk_family) {
	case AF_INET:
		sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
					      &svc_slock_key[0],
					      "sk_xprt.xpt_lock-AF_INET-NFSD",
					      &svc_key[0]);
		break;

	case AF_INET6:
		sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
					      &svc_slock_key[1],
					      "sk_xprt.xpt_lock-AF_INET6-NFSD",
					      &svc_key[1]);
		break;

	default:
		BUG();
	}
}
Ejemplo n.º 2
0
static void dn_slow_timer(unsigned long arg)
{
	struct sock *sk = (struct sock *)arg;
	struct dn_scp *scp = DN_SK(sk);

	bh_lock_sock(sk);

	if (sock_owned_by_user(sk)) {
		sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
		goto out;
	}

	if (scp->persist && scp->persist_fxn) {
		if (scp->persist <= SLOW_INTERVAL) {
			scp->persist = 0;

			if (scp->persist_fxn(sk))
				goto out;
		} else {
			scp->persist -= SLOW_INTERVAL;
		}
	}

	if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
		if ((jiffies - scp->stamp) >= scp->keepalive)
			scp->keepalive_fxn(sk);
	}

	sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Ejemplo n.º 3
0
static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
{
	struct sock *sk = (struct sock *)tport->usr_handle;
	u32 res;

	/*
	 * Process message if socket is unlocked; otherwise add to backlog queue
	 *
	 * This code is based on sk_receive_skb(), but must be distinct from it
	 * since a TIPC-specific filter/reject mechanism is utilized
	 */

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		res = filter_rcv(sk, buf);
	} else {
		if (sk_add_backlog(sk, buf))
			res = TIPC_ERR_OVERLOAD;
		else
			res = TIPC_OK;
	}
	bh_unlock_sock(sk);

	return res;
}
Ejemplo n.º 4
0
int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
{
	int rc = NET_RX_SUCCESS;

	if (sk_filter(sk, skb))
		goto discard_and_relse;

	skb->dev = NULL;

	if (nested)
		bh_lock_sock_nested(sk);
	else
		bh_lock_sock(sk);
	if (!sock_owned_by_user(sk)) {
		/*
		 * trylock + unlock semantics:
		 */
		mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);

		rc = sk->sk_backlog_rcv(sk, skb);

		mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
	} else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);
out:
	sock_put(sk);
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 5
0
Archivo: bpf.c Proyecto: pfq/PFQ
struct sk_filter *
pfq_alloc_sk_filter(struct sock_fprog *fprog)
{
	struct sock sk;
	int rv;

	sock_init_data(NULL, &sk);
	sk.sk_filter = NULL;
	atomic_set(&sk.sk_omem_alloc, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
	sock_reset_flag(&sk, SOCK_FILTER_LOCKED);
#endif
        pr_devel("[PFQ] BPF: new fprog (len %d)\n", fprog->len);

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,8) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
	if ((rv = __sk_attach_filter(fprog, &sk, sock_owned_by_user(&sk))))
#else
	if ((rv = sk_attach_filter(fprog, &sk)))
#endif
	{
		pr_devel("[PFQ] BPF: sk_attach_filter error: (%d)!\n", rv);
		return NULL;
	}

	return sk.sk_filter;
}
Ejemplo n.º 6
0
/**
 *	sk_attach_filter - attach a socket filter
 *	@fprog: the filter program
 *	@sk: the socket to use
 *
 * Attach the user's filter code. We first run some sanity checks on
 * it to make sure it does not explode on us later. If an error
 * occurs or there is insufficient memory for the filter a negative
 * errno code is returned. On success the return is zero.
 */
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
{
	struct sk_filter *fp, *old_fp;
	unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
	int err;

	/* Make sure new filter is there and in the right amounts. */
	if (fprog->filter == NULL)
		return -EINVAL;

	fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
	if (!fp)
		return -ENOMEM;
	if (copy_from_user(fp->insns, fprog->filter, fsize)) {
		sock_kfree_s(sk, fp, fsize+sizeof(*fp));
		return -EFAULT;
	}

	atomic_set(&fp->refcnt, 1);
	fp->len = fprog->len;

	err = sk_chk_filter(fp->insns, fp->len);
	if (err) {
		sk_filter_uncharge(sk, fp);
		return err;
	}

	old_fp = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
	rcu_assign_pointer(sk->sk_filter, fp);

	if (old_fp)
		sk_filter_uncharge(sk, old_fp);
	return 0;
}
Ejemplo n.º 7
0
/* ---- L2CAP timers ---- */
static void l2cap_sock_timeout(unsigned long arg)
{
	struct sock *sk = (struct sock *) arg;
	int reason;

	BT_DBG("sock %p state %d", sk, sk->sk_state);

	bh_lock_sock(sk);

	if (sock_owned_by_user(sk)) {
		/* sk is owned by user. Try again later */
		l2cap_sock_set_timer(sk, HZ / 5);
		bh_unlock_sock(sk);
		sock_put(sk);
		return;
	}

	if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
		reason = ECONNREFUSED;
	else if (sk->sk_state == BT_CONNECT &&
				l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
		reason = ECONNREFUSED;
	else
		reason = ETIMEDOUT;

	__l2cap_sock_close(sk, reason);

	bh_unlock_sock(sk);

	l2cap_sock_kill(sk);
	sock_put(sk);
}
Ejemplo n.º 8
0
static void dn_slow_timer(unsigned long arg)
{
	struct sock *sk = (struct sock *)arg;
	struct dn_scp *scp = DN_SK(sk);

	sock_hold(sk);
	bh_lock_sock(sk);

	if (sock_owned_by_user(sk)) {
		sk->sk_timer.expires = jiffies + HZ / 10;
		add_timer(&sk->sk_timer);
		goto out;
	}

	/*
	 * The persist timer is the standard slow timer used for retransmits
	 * in both connection establishment and disconnection as well as
	 * in the RUN state. The different states are catered for by changing
	 * the function pointer in the socket. Setting the timer to a value
	 * of zero turns it off. We allow the persist_fxn to turn the
	 * timer off in a permant way by returning non-zero, so that
	 * timer based routines may remove sockets. This is why we have a
	 * sock_hold()/sock_put() around the timer to prevent the socket
	 * going away in the middle.
	 */
	if (scp->persist && scp->persist_fxn) {
		if (scp->persist <= SLOW_INTERVAL) {
			scp->persist = 0;

			if (scp->persist_fxn(sk))
				goto out;
		} else {
			scp->persist -= SLOW_INTERVAL;
		}
	}

	/*
	 * Check for keepalive timeout. After the other timer 'cos if
	 * the previous timer caused a retransmit, we don't need to
	 * do this. scp->stamp is the last time that we sent a packet.
	 * The keepalive function sends a link service packet to the
	 * other end. If it remains unacknowledged, the standard
	 * socket timers will eventually shut the socket down. Each
	 * time we do this, scp->stamp will be updated, thus
	 * we won't try and send another until scp->keepalive has passed
	 * since the last successful transmission.
	 */
	if (scp->keepalive && scp->keepalive_fxn && (scp->state == DN_RUN)) {
		if ((jiffies - scp->stamp) >= scp->keepalive)
			scp->keepalive_fxn(sk);
	}

	sk->sk_timer.expires = jiffies + SLOW_INTERVAL;

	add_timer(&sk->sk_timer);
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Ejemplo n.º 9
0
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	/*
	 * This has to be done here and not at the upper layer ->accept
	 * method because of the way the PROCOM state machine works:
	 * it needs to set several state variables (see, for instance,
	 * llc_adm_actions_2 in net/llc/llc_c_st.c) and send a packet to
	 * the originator of the new connection, and this state has to be
	 * in the newly created struct sock private area. -acme
	 */
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		/*
		 * Can't be skb_set_owner_r, this will be done at the
		 * llc_conn_state_process function, later on, when we will use
		 * skb_queue_rcv_skb to send it to upper layers, this is
		 * another trick required to cope with how the PROCOM state
		 * machine works. -acme
		 */
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
;
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 10
0
static void sdp_poll_tx_timeout(unsigned long data)
{
	struct sdp_sock *ssk = (struct sdp_sock *)data;
	struct sock *sk = sk_ssk(ssk);
	u32 inflight, wc_processed;

	sdp_prf1(sk_ssk(ssk), NULL, "TX timeout: inflight=%d, head=%d tail=%d",
		(u32) tx_ring_posted(ssk),
		ring_head(ssk->tx_ring), ring_tail(ssk->tx_ring));

	/* Only process if the socket is not in use */
	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sdp_prf(sk_ssk(ssk), NULL, "TX comp: socket is busy");

		if (sdp_tx_handler_select(ssk) && sk->sk_state != TCP_CLOSE &&
				likely(ssk->qp_active)) {
			sdp_prf1(sk, NULL, "schedule a timer");
			mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);
		}

		SDPSTATS_COUNTER_INC(tx_poll_busy);
		goto out;
	}

	if (unlikely(!ssk->qp || sk->sk_state == TCP_CLOSE)) {
		SDPSTATS_COUNTER_INC(tx_poll_no_op);
		goto out;
	}

	wc_processed = sdp_process_tx_cq(ssk);
	if (!wc_processed)
		SDPSTATS_COUNTER_INC(tx_poll_miss);
	else {
		sdp_post_sends(ssk, GFP_ATOMIC);
		SDPSTATS_COUNTER_INC(tx_poll_hit);
	}

	inflight = (u32) tx_ring_posted(ssk);
	sdp_prf1(sk_ssk(ssk), NULL, "finished tx proccessing. inflight = %d",
			tx_ring_posted(ssk));

	/* If there are still packets in flight and the timer has not already
	 * been scheduled by the Tx routine then schedule it here to guarantee
	 * completion processing of these packets */
	if (inflight && likely(ssk->qp_active))
		mod_timer(&ssk->tx_ring.timer, jiffies + SDP_TX_POLL_TIMEOUT);

out:
	if (ssk->tx_ring.rdma_inflight && ssk->tx_ring.rdma_inflight->busy) {
		sdp_prf1(sk, NULL, "RDMA is inflight - arming irq");
		sdp_arm_tx_cq(sk);
	}

	bh_unlock_sock(sk);
}
/*
 * Set sock sync properties.
 */
int set_tcp_sock_sync_prop(struct bst_set_sock_sync_prop *set_prop)
{
	int err = 0;
	struct sock *sk;
	struct bastet_sock *bsk;
	struct bst_sock_comm_prop *guide = &set_prop->guide;

	sk = get_sock_by_comm_prop(guide);
	if (NULL == sk) {
		BASTET_LOGE("can not find sock by lport: %d, lIp: %pI4, rport: %d, rIp: %pI4",
					guide->local_port, &guide->local_ip,
					guide->remote_port, &guide->remote_ip);
		return -ENOENT;
	}

	if (sk->sk_state == TCP_TIME_WAIT) {
		BASTET_LOGE("sk: %p not expected time wait sock", sk);
		inet_twsk_put(inet_twsk(sk));
		return -EPERM;
	}

	bsk = sk->bastet;
	if (NULL == bsk) {
		BASTET_LOGE("sk: %p not expected bastet null", sk);
		err = -EPERM;
		goto out_put;
	}

	BASTET_LOGI("sk: %p", sk);

	spin_lock_bh(&sk->sk_lock.slock);

	if (NULL != bsk->sync_p) {
		BASTET_LOGE("sk: %p has a pending sock set", sk);
		err = -EPERM;
		goto out_unlock;
	}

	cancel_sock_bastet_timer(sk);

	if (sock_owned_by_user(sk)) {
		err = setup_sock_sync_set_timer(sk, &set_prop->sync_prop);
		goto out_unlock;
	}

	sock_set_internal(sk, &set_prop->sync_prop);

out_unlock:
	spin_unlock_bh(&sk->sk_lock.slock);

	adjust_traffic_flow_by_sock(sk, set_prop->sync_prop.tx, set_prop->sync_prop.rx);

out_put:
	sock_put(sk);
	return err;
}
/*
 * Close sock, when modem bastet fails this sock.
 */
int set_tcp_sock_closed(struct bst_sock_comm_prop *guide)
{
	int err = 0;
	struct sock *sk;
	struct bastet_sock *bsk;

	sk = get_sock_by_comm_prop(guide);
	if (NULL == sk) {
		BASTET_LOGE("can not find sock by lport: %d, lIp: %pI4, rport: %d, rIp: %pI4",
					guide->local_port, &guide->local_ip,
					guide->remote_port, &guide->remote_ip);
		return -ENOENT;
	}

	if (sk->sk_state == TCP_TIME_WAIT) {
		BASTET_LOGE("sk: %p not expected time wait sock", sk);
		inet_twsk_put(inet_twsk(sk));
		return -EPERM;
	}

	bsk = sk->bastet;
	if (NULL == bsk) {
		BASTET_LOGE("sk: %p not expected bastet null", sk);
		err = -EPERM;
		goto out_put;
	}

	BASTET_LOGI("sk: %p", sk);

	spin_lock_bh(&sk->sk_lock.slock);

	if (BST_SOCK_INVALID != bsk->bastet_sock_state
		&& BST_SOCK_UPDATING != bsk->bastet_sock_state) {
		BASTET_LOGE("sk: %p sync_current_state: %d not expected", sk, bsk->bastet_sock_state);
		goto out_unlock;
	}

	cancel_sock_bastet_timer(sk);

	bsk->bastet_sock_state = BST_SOCK_NOT_USED;

	if (sock_owned_by_user(sk)) {
		setup_sock_sync_close_timer(sk);
		goto out_unlock;
	}

	set_sock_close_internal(sk);

out_unlock:
	spin_unlock_bh(&sk->sk_lock.slock);

out_put:
	sock_put(sk);
	return err;
}
Ejemplo n.º 13
0
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
	struct sock *sk;
	unsigned short frametype;
	unsigned int lci;

	frametype = skb->data[2];
        lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);

	/*
	 *	LCI of zero is always for us, and its always a link control
	 *	frame.
	 */
	if (lci == 0) {
		x25_link_control(skb, nb, frametype);
		return 0;
	}

	/*
	 *	Find an existing socket.
	 */
	if ((sk = x25_find_socket(lci, nb)) != NULL) {
		int queued = 1;

		skb->h.raw = skb->data;
		bh_lock_sock(sk);
		if (!sock_owned_by_user(sk)) {
			queued = x25_process_rx_frame(sk, skb);
		} else {
			sk_add_backlog(sk, skb);
		}
		bh_unlock_sock(sk);
		return queued;
	}

	/*
	 *	Is is a Call Request ? if so process it.
	 */
	if (frametype == X25_CALL_REQUEST)
		return x25_rx_call_request(skb, nb, lci);

	/*
	 *	Its not a Call Request, nor is it a control frame.
	 *      Let caller throw it away.
	 */
/*
	x25_transmit_clear_request(nb, lci, 0x0D);
*/

	if (frametype != X25_CLEAR_CONFIRMATION)
		printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);

	return 0;
}
Ejemplo n.º 14
0
static void dccp_write_xmit_timer(unsigned long data) {
	struct sock *sk = (struct sock *)data;
	struct dccp_sock *dp = dccp_sk(sk);

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk))
		sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1);
	else
		dccp_write_xmit(sk, 0);
	bh_unlock_sock(sk);
	sock_put(sk);
}
static void x25_timer_expiry(unsigned long param)
{
	struct sock *sk = (struct sock *)param;

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) { /* can currently only occur in state 3 */
		if (x25_sk(sk)->state == X25_STATE_3)
			x25_start_t2timer(sk);
	} else
		x25_do_timer_expiry(sk);
	bh_unlock_sock(sk);
}
Ejemplo n.º 16
0
int sk_detach_filter(struct sock *sk)
{
	int ret = -ENOENT;
	struct sk_filter *filter;

	filter = rcu_dereference_protected(sk->sk_filter,
					   sock_owned_by_user(sk));
	if (filter) {
		rcu_assign_pointer(sk->sk_filter, NULL);
		sk_filter_uncharge(sk, filter);
		ret = 0;
	}
	return ret;
}
Ejemplo n.º 17
0
static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
{
	struct sock *sk;
	unsigned short frametype;
	unsigned int lci;

	if (!pskb_may_pull(skb, X25_STD_MIN_LEN))
		return 0;

	frametype = skb->data[2];
	lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF);

	if (lci == 0) {
		x25_link_control(skb, nb, frametype);
		return 0;
	}

	if ((sk = x25_find_socket(lci, nb)) != NULL) {
		int queued = 1;

		skb_reset_transport_header(skb);
		bh_lock_sock(sk);
		if (!sock_owned_by_user(sk)) {
			queued = x25_process_rx_frame(sk, skb);
		} else {
			queued = !sk_add_backlog(sk, skb);
		}
		bh_unlock_sock(sk);
		sock_put(sk);
		return queued;
	}

	if (frametype == X25_CALL_REQUEST)
		return x25_rx_call_request(skb, nb, lci);


	if (x25_forward_data(lci, nb, skb)) {
		if (frametype == X25_CLEAR_CONFIRMATION) {
			x25_clear_forward_by_lci(lci);
		}
		kfree_skb(skb);
		return 1;
	}


	if (frametype != X25_CLEAR_CONFIRMATION)
		printk(KERN_DEBUG "x25_receive_data(): unknown frame type %2x\n",frametype);

	return 0;
}
Ejemplo n.º 18
0
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
	struct sock *sk = (struct sock *)data;
	struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
	long s;

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sk_reset_timer(sk, &hctx->ccid2hctx_rtotimer,
			       jiffies + HZ / 5);
		goto out;
	}

	ccid2_pr_debug("RTO_EXPIRE\n");

	ccid2_hc_tx_check_sanity(hctx);

	/* back-off timer */
	hctx->ccid2hctx_rto <<= 1;

	s = hctx->ccid2hctx_rto / HZ;
	if (s > 60)
		hctx->ccid2hctx_rto = 60 * HZ;

	ccid2_start_rto_timer(sk);

	/* adjust pipe, cwnd etc */
	hctx->ccid2hctx_pipe = 0;
	hctx->ccid2hctx_ssthresh = hctx->ccid2hctx_cwnd >> 1;
	if (hctx->ccid2hctx_ssthresh < 2)
		hctx->ccid2hctx_ssthresh = 2;
	ccid2_change_cwnd(sk, 1);

	/* clear state about stuff we sent */
	hctx->ccid2hctx_seqt	= hctx->ccid2hctx_seqh;
	hctx->ccid2hctx_ssacks	= 0;
	hctx->ccid2hctx_acks	= 0;
	hctx->ccid2hctx_sent	= 0;

	/* clear ack ratio state. */
	hctx->ccid2hctx_arsent	 = 0;
	hctx->ccid2hctx_ackloss  = 0;
	hctx->ccid2hctx_rpseq	 = 0;
	hctx->ccid2hctx_rpdupack = -1;
	ccid2_change_l_ack_ratio(sk, 1);
	ccid2_hc_tx_check_sanity(hctx);
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Ejemplo n.º 19
0
/**
 *	llc_process_tmr_ev - timer backend
 *	@sk: active connection
 *	@skb: occurred event
 *
 *	This function is called from timer callback functions. When connection
 *	is busy (during sending a data frame) timer expiration event must be
 *	queued. Otherwise this event can be sent to connection state machine.
 *	Queued events will process by llc_backlog_rcv function after sending
 *	data frame.
 */
static void llc_process_tmr_ev(struct sock *sk, struct sk_buff *skb)
{
	if (llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC) {
		printk(KERN_WARNING "%s: timer called on closed connection\n",
		       __func__);
		kfree_skb(skb);
	} else {
		if (!sock_owned_by_user(sk))
			llc_conn_state_process(sk, skb);
		else {
			llc_set_backlog_type(skb, LLC_EVENT);
			sk_add_backlog(sk, skb);
		}
	}
}
Ejemplo n.º 20
0
static int lapd_dispatch_mph_primitive(struct sk_buff *skb)
{
	struct sock *sk;
	struct hlist_node *node;
	struct lapd_device *dev = to_lapd_dev(skb->dev);
	int queued = FALSE;

	read_lock_bh(&lapd_hash_lock);

	sk_for_each(sk, node, lapd_get_hash(dev)) {
		struct lapd_sock *lapd_sock = to_lapd_sock(sk);

		if (lapd_sock->dev == dev &&
		    sk->sk_state == LAPD_SK_STATE_MGMT) {

			struct sk_buff *new_skb;

			if (!queued) {
				new_skb = skb;
				queued = TRUE;
			} else {
				new_skb = skb_clone(skb, GFP_ATOMIC);
			}

			new_skb->sk = sk;

			lapd_bh_lock_sock(lapd_sock);

			if (!sock_owned_by_user(&lapd_sock->sk)) {
				queued = lapd_mgmt_queue_primitive(lapd_sock,
									skb);
			} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
				sk_add_backlog(&lapd_sock->sk, skb);
#else
			  	__sk_add_backlog(&lapd_sock->sk, skb);
#endif
				queued = TRUE;
			}

			lapd_bh_unlock_sock(lapd_sock);
		}
	}
	read_unlock_bh(&lapd_hash_lock);

	return queued;
}
/*
 * Bastet sock timeout, include all bastet time events.
 */
static void bastet_sock_bastet_timeout(unsigned long data)
{
	int event;
	struct sock *sk = (struct sock *)data;
	struct bastet_sock *bsk = sk->bastet;

	BASTET_LOGI("sk: %p time event: %d", sk, bsk->bastet_timer_event);

	bh_lock_sock(sk);

	/* Include in lock */
	event = bsk->bastet_timer_event;

	if (sock_owned_by_user(sk)) {
		/* Try again later */
		if (BST_TMR_DELAY_SOCK_SYNC == event) {
			bastet_wakelock_acquire_timeout(BST_SKIP_SOCK_OWNER_TIME + BST_WAKELOCK_TIMEOUT);
		}
		sk_reset_timer(sk, &bsk->bastet_timer, jiffies + BST_SKIP_SOCK_OWNER_TIME);
		goto out_unlock;
	}

	switch(event){
	case BST_TMR_REQ_SOCK_SYNC:
		request_sock_bastet_timeout(sk);
		break;
	case BST_TMR_SET_SOCK_SYNC:
		set_sock_bastet_timeout(sk);
		break;
	case BST_TMR_DELAY_SOCK_SYNC:
		delay_sock_bastet_timeout(sk);
		break;
	case BST_TMR_CLOSE_SOCK:
		close_sock_bastet_timeout(sk);
		break;
	default:
		BASTET_LOGE("sk: %p invalid time event: %d", sk, event);
		break;
	}

	sk_mem_reclaim(sk);
out_unlock:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Ejemplo n.º 22
0
void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_addr saddr, daddr;
	struct sock *sk;

	llc_pdu_decode_sa(skb, saddr.mac);
	llc_pdu_decode_ssap(skb, &saddr.lsap);
	llc_pdu_decode_da(skb, daddr.mac);
	llc_pdu_decode_dsap(skb, &daddr.lsap);

	sk = __llc_lookup(sap, &saddr, &daddr);
	if (!sk)
		goto drop;

	bh_lock_sock(sk);
	if (unlikely(sk->sk_state == TCP_LISTEN)) {
		struct sock *newsk = llc_create_incoming_sock(sk, skb->dev,
							      &saddr, &daddr);
		if (!newsk)
			goto drop_unlock;
		skb_set_owner_r(skb, newsk);
	} else {
		skb->sk = sk;
	}
	if (!sock_owned_by_user(sk))
		llc_conn_rcv(sk, skb);
	else {
		dprintk("%s: adding to backlog...\n", __func__);
		llc_set_backlog_type(skb, LLC_PACKET);
		if (sk_add_backlog(sk, skb))
			goto drop_unlock;
	}
out:
	bh_unlock_sock(sk);
	sock_put(sk);
	return;
drop:
	kfree_skb(skb);
	return;
drop_unlock:
	kfree_skb(skb);
	goto out;
}
Ejemplo n.º 23
0
Archivo: bpf.c Proyecto: pfq/PFQ
void
pfq_free_sk_filter(struct sk_filter *filter)
{
	struct sock sk;
	int rv;

	sock_init_data(NULL, &sk);
	sk.sk_filter = NULL;
	atomic_set(&sk.sk_omem_alloc, 0);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
	sock_reset_flag(&sk, SOCK_FILTER_LOCKED);
#endif
	sk.sk_filter = filter;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,8) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0))
	if ((rv = __sk_detach_filter(&sk, sock_owned_by_user(&sk))))
#else
	if ((rv = sk_detach_filter(&sk)))
#endif
		pr_devel("[PFQ] BPF: sk_detach_filter error: (%d)!\n", rv);
}
Ejemplo n.º 24
0
int sk_receive_skb(struct sock *sk, struct sk_buff *skb)
{
	int rc = NET_RX_SUCCESS;

	if (sk_filter(sk, skb, 0))
		goto discard_and_relse;

	skb->dev = NULL;

	bh_lock_sock(sk);
	if (!sock_owned_by_user(sk))
		rc = sk->sk_backlog_rcv(sk, skb);
	else
		sk_add_backlog(sk, skb);
	bh_unlock_sock(sk);
out:
	sock_put(sk);
	return rc;
discard_and_relse:
	kfree_skb(skb);
	goto out;
}
static void x25_heartbeat_expiry(unsigned long param)
{
	struct sock *sk = (struct sock *)param;

        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) /* can currently only occur in state 3 */ 
		goto restart_heartbeat;

	switch (x25_sk(sk)->state) {

		case X25_STATE_0:
			/*
			 * Magic here: If we listen() and a new link dies
			 * before it is accepted() it isn't 'dead' so doesn't
			 * get removed.
			 */
			if (sock_flag(sk, SOCK_DESTROY) ||
			    (sk->sk_state == TCP_LISTEN &&
			     sock_flag(sk, SOCK_DEAD))) {
				x25_destroy_socket(sk);
				goto unlock;
			}
			break;

		case X25_STATE_3:
			/*
			 * Check for the state of the receive buffer.
			 */
			x25_check_rbuf(sk);
			break;
	}
restart_heartbeat:
	x25_start_heartbeat(sk);
unlock:
	bh_unlock_sock(sk);
}
Ejemplo n.º 26
0
void sdp_nagle_timeout(unsigned long data)
{
	struct sdp_sock *ssk = (struct sdp_sock *)data;
	struct sock *sk = sk_ssk(ssk);

	SDPSTATS_COUNTER_INC(nagle_timer);
	sdp_dbg_data(sk, "last_unacked = %u\n", ssk->nagle_last_unacked);

	if (!ssk->nagle_last_unacked)
		goto out2;

	/* Only process if the socket is not in use */
	bh_lock_sock(sk);
	if (sock_owned_by_user(sk)) {
		sdp_dbg_data(sk, "socket is busy - will try later\n");
		goto out;
	}

	if (sk->sk_state == TCP_CLOSE) {
		bh_unlock_sock(sk);
		return;
	}

	ssk->nagle_last_unacked = 0;
	sdp_post_sends(ssk, GFP_ATOMIC);

	if (sdp_sk_sleep(sk) && waitqueue_active(sdp_sk_sleep(sk)))
		sk_stream_write_space(sk);
out:
	bh_unlock_sock(sk);
out2:
	if (sk->sk_send_head && ssk->qp_active) {
		/* If has pending sends - rearm */
		mod_timer(&ssk->nagle_timer, jiffies + SDP_NAGLE_TIMEOUT);
	}
}
Ejemplo n.º 27
0
static int lapd_pass_frame_to_socket(
	struct lapd_sock *lapd_sock,
	struct sk_buff *skb)
{
	int queued;

	/* Ensure serialization within a socket */
	lapd_bh_lock_sock(lapd_sock);

	if (!sock_owned_by_user(&lapd_sock->sk)) {
		queued = lapd_dlc_recv(lapd_sock, skb);
	} else {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) 
		sk_add_backlog(&lapd_sock->sk, skb);
#else
		__sk_add_backlog(&lapd_sock->sk, skb);
#endif
		queued = 1;
	}

	lapd_bh_unlock_sock(lapd_sock);

	return queued;
}
Ejemplo n.º 28
0
Archivo: ipv4.c Proyecto: panyfx/ath
/*
 * This routine is called by the ICMP module when it gets some sort of error
 * condition. If err < 0 then the socket should be closed and the error
 * returned to the user. If err > 0 it's just the icmp type << 8 | icmp code.
 * After adjustment header points to the first 8 bytes of the tcp header. We
 * need to find the appropriate port.
 *
 * The locking strategy used here is very "optimistic". When someone else
 * accesses the socket the ICMP is just dropped and for some paths there is no
 * check at all. A more general error queue to queue errors for later handling
 * is probably better.
 */
static void dccp_v4_err(struct sk_buff *skb, u32 info)
{
	const struct iphdr *iph = (struct iphdr *)skb->data;
	const u8 offset = iph->ihl << 2;
	const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset);
	struct dccp_sock *dp;
	struct inet_sock *inet;
	const int type = icmp_hdr(skb)->type;
	const int code = icmp_hdr(skb)->code;
	struct sock *sk;
	__u64 seq;
	int err;
	struct net *net = dev_net(skb->dev);

	if (skb->len < offset + sizeof(*dh) ||
	    skb->len < offset + __dccp_basic_hdr_len(dh)) {
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
		return;
	}

	sk = __inet_lookup_established(net, &dccp_hashinfo,
				       iph->daddr, dh->dccph_dport,
				       iph->saddr, ntohs(dh->dccph_sport),
				       inet_iif(skb));
	if (!sk) {
		ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
		return;
	}

	if (sk->sk_state == DCCP_TIME_WAIT) {
		inet_twsk_put(inet_twsk(sk));
		return;
	}
	seq = dccp_hdr_seq(dh);
	if (sk->sk_state == DCCP_NEW_SYN_RECV)
		return dccp_req_err(sk, seq);

	bh_lock_sock(sk);
	/* If too many ICMPs get dropped on busy
	 * servers this needs to be solved differently.
	 */
	if (sock_owned_by_user(sk))
		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);

	if (sk->sk_state == DCCP_CLOSED)
		goto out;

	dp = dccp_sk(sk);
	if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
	    !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
		goto out;
	}

	switch (type) {
	case ICMP_REDIRECT:
		dccp_do_redirect(skb, sk);
		goto out;
	case ICMP_SOURCE_QUENCH:
		/* Just silently ignore these. */
		goto out;
	case ICMP_PARAMETERPROB:
		err = EPROTO;
		break;
	case ICMP_DEST_UNREACH:
		if (code > NR_ICMP_UNREACH)
			goto out;

		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
			if (!sock_owned_by_user(sk))
				dccp_do_pmtu_discovery(sk, iph, info);
			goto out;
		}

		err = icmp_err_convert[code].errno;
		break;
	case ICMP_TIME_EXCEEDED:
		err = EHOSTUNREACH;
		break;
	default:
		goto out;
	}

	switch (sk->sk_state) {
	case DCCP_REQUESTING:
	case DCCP_RESPOND:
		if (!sock_owned_by_user(sk)) {
			DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS);
			sk->sk_err = err;

			sk->sk_error_report(sk);

			dccp_done(sk);
		} else
			sk->sk_err_soft = err;
		goto out;
	}

	/* If we've already connected we will keep trying
	 * until we time out, or the user gives up.
	 *
	 * rfc1122 4.2.3.9 allows to consider as hard errors
	 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
	 * but it is obsoleted by pmtu discovery).
	 *
	 * Note, that in modern internet, where routing is unreliable
	 * and in each dark corner broken firewalls sit, sending random
	 * errors ordered by their masters even this two messages finally lose
	 * their original sense (even Linux sends invalid PORT_UNREACHs)
	 *
	 * Now we are in compliance with RFCs.
	 *							--ANK (980905)
	 */

	inet = inet_sk(sk);
	if (!sock_owned_by_user(sk) && inet->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else /* Only an error on timeout */
		sk->sk_err_soft = err;
out:
	bh_unlock_sock(sk);
	sock_put(sk);
}
Ejemplo n.º 29
0
Archivo: ipv4.c Proyecto: panyfx/ath
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
	const struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
	struct inet_sock *inet = inet_sk(sk);
	struct dccp_sock *dp = dccp_sk(sk);
	__be16 orig_sport, orig_dport;
	__be32 daddr, nexthop;
	struct flowi4 *fl4;
	struct rtable *rt;
	int err;
	struct ip_options_rcu *inet_opt;

	dp->dccps_role = DCCP_ROLE_CLIENT;

	if (addr_len < sizeof(struct sockaddr_in))
		return -EINVAL;

	if (usin->sin_family != AF_INET)
		return -EAFNOSUPPORT;

	nexthop = daddr = usin->sin_addr.s_addr;

	inet_opt = rcu_dereference_protected(inet->inet_opt,
					     sock_owned_by_user(sk));
	if (inet_opt != NULL && inet_opt->opt.srr) {
		if (daddr == 0)
			return -EINVAL;
		nexthop = inet_opt->opt.faddr;
	}

	orig_sport = inet->inet_sport;
	orig_dport = usin->sin_port;
	fl4 = &inet->cork.fl.u.ip4;
	rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
			      RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
			      IPPROTO_DCCP,
			      orig_sport, orig_dport, sk);
	if (IS_ERR(rt))
		return PTR_ERR(rt);

	if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
		ip_rt_put(rt);
		return -ENETUNREACH;
	}

	if (inet_opt == NULL || !inet_opt->opt.srr)
		daddr = fl4->daddr;

	if (inet->inet_saddr == 0)
		inet->inet_saddr = fl4->saddr;
	sk_rcv_saddr_set(sk, inet->inet_saddr);
	inet->inet_dport = usin->sin_port;
	sk_daddr_set(sk, daddr);

	inet_csk(sk)->icsk_ext_hdr_len = 0;
	if (inet_opt)
		inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
	/*
	 * Socket identity is still unknown (sport may be zero).
	 * However we set state to DCCP_REQUESTING and not releasing socket
	 * lock select source port, enter ourselves into the hash tables and
	 * complete initialization after this.
	 */
	dccp_set_state(sk, DCCP_REQUESTING);
	err = inet_hash_connect(&dccp_death_row, sk);
	if (err != 0)
		goto failure;

	rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
			       inet->inet_sport, inet->inet_dport, sk);
	if (IS_ERR(rt)) {
		err = PTR_ERR(rt);
		rt = NULL;
		goto failure;
	}
	/* OK, now commit destination to socket.  */
	sk_setup_caps(sk, &rt->dst);

	dp->dccps_iss = secure_dccp_sequence_number(inet->inet_saddr,
						    inet->inet_daddr,
						    inet->inet_sport,
						    inet->inet_dport);
	inet->inet_id = dp->dccps_iss ^ jiffies;

	err = dccp_connect(sk);
	rt = NULL;
	if (err != 0)
		goto failure;
out:
	return err;
failure:
	/*
	 * This unhashes the socket and releases the local port, if necessary.
	 */
	dccp_set_state(sk, DCCP_CLOSED);
	ip_rt_put(rt);
	sk->sk_route_caps = 0;
	inet->inet_dport = 0;
	goto out;
}
Ejemplo n.º 30
0
static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
		u8 type, u8 code, int offset, __be32 info)
{
	const struct ipv6hdr *hdr = (const struct ipv6hdr*)skb->data;
	const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
	struct ipv6_pinfo *np;
	struct sock *sk;
	int err;
	struct tcp_sock *tp;
	__u32 seq;
	struct net *net = dev_net(skb->dev);

	sk = inet6_lookup(net, &tcp_hashinfo, &hdr->daddr,
			th->dest, &hdr->saddr, th->source, skb->dev->ifindex);

	if (sk == NULL) {
		ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
				   ICMP6_MIB_INERRORS);
		return;
	}

	if (sk->sk_state == TCP_TIME_WAIT) {
		inet_twsk_put(inet_twsk(sk));
		return;
	}

	bh_lock_sock(sk);
	if (sock_owned_by_user(sk))
		NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);

	if (sk->sk_state == TCP_CLOSE)
		goto out;

	if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
		NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
		goto out;
	}

	tp = tcp_sk(sk);
	seq = ntohl(th->seq);
	if (sk->sk_state != TCP_LISTEN &&
	    !between(seq, tp->snd_una, tp->snd_nxt)) {
		NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
		goto out;
	}

	np = inet6_sk(sk);

	if (type == ICMPV6_PKT_TOOBIG) {
		struct dst_entry *dst;

		if (sock_owned_by_user(sk))
			goto out;
		if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
			goto out;

		
		dst = __sk_dst_check(sk, np->dst_cookie);

		if (dst == NULL) {
			struct inet_sock *inet = inet_sk(sk);
			struct flowi6 fl6;

			memset(&fl6, 0, sizeof(fl6));
			fl6.flowi6_proto = IPPROTO_TCP;
			fl6.daddr = np->daddr;
			fl6.saddr = np->saddr;
			fl6.flowi6_oif = sk->sk_bound_dev_if;
			fl6.flowi6_mark = sk->sk_mark;
			fl6.fl6_dport = inet->inet_dport;
			fl6.fl6_sport = inet->inet_sport;
			fl6.flowi6_uid = sock_i_uid(sk);
			security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));

			dst = ip6_dst_lookup_flow(sk, &fl6, NULL, false);
			if (IS_ERR(dst)) {
				sk->sk_err_soft = -PTR_ERR(dst);
				goto out;
			}

		} else
			dst_hold(dst);

		if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
			tcp_sync_mss(sk, dst_mtu(dst));
			tcp_simple_retransmit(sk);
		} 
		dst_release(dst);
		goto out;
	}

	icmpv6_err_convert(type, code, &err);

	
	switch (sk->sk_state) {
		struct request_sock *req, **prev;
	case TCP_LISTEN:
		if (sock_owned_by_user(sk))
			goto out;

		req = inet6_csk_search_req(sk, &prev, th->dest, &hdr->daddr,
					   &hdr->saddr, inet6_iif(skb));
		if (!req)
			goto out;

		WARN_ON(req->sk != NULL);

		if (seq != tcp_rsk(req)->snt_isn) {
			NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
			goto out;
		}

		inet_csk_reqsk_queue_drop(sk, req, prev);
		goto out;

	case TCP_SYN_SENT:
	case TCP_SYN_RECV:  
		if (!sock_owned_by_user(sk)) {
			sk->sk_err = err;
			sk->sk_error_report(sk);		

			tcp_done(sk);
		} else
			sk->sk_err_soft = err;
		goto out;
	}

	if (!sock_owned_by_user(sk) && np->recverr) {
		sk->sk_err = err;
		sk->sk_error_report(sk);
	} else
		sk->sk_err_soft = err;

out:
	bh_unlock_sock(sk);
	sock_put(sk);
}