Exemple #1
0
static void __vsock_release(struct sock *sk)
{
	if (sk) {
		struct sk_buff *skb;
		struct sock *pending;
		struct vsock_sock *vsk;

		vsk = vsock_sk(sk);
		pending = NULL;	/* Compiler warning. */

		transport->release(vsk);

		lock_sock(sk);
		sock_orphan(sk);
		sk->sk_shutdown = SHUTDOWN_MASK;

		while ((skb = skb_dequeue(&sk->sk_receive_queue)))
			kfree_skb(skb);

		/* Clean up any sockets that never were accepted. */
		while ((pending = vsock_dequeue_accept(sk)) != NULL) {
			__vsock_release(pending);
			sock_put(pending);
		}

		release_sock(sk);
		sock_put(sk);
	}
}
static int l2cap_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct sock *srv_sk = NULL;
	int err;

	BT_DBG("sock %p, sk %p", sock, sk);

	if (!sk)
		return 0;

	/* If this is an ATT Client socket, find the matching Server */
	if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA && !l2cap_pi(sk)->incoming)
		srv_sk = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
					&bt_sk(sk)->src, &bt_sk(sk)->dst, 1);

	/* If server socket found, request tear down */
	BT_DBG("client:%p server:%p", sk, srv_sk);
	if (srv_sk)
		l2cap_sock_set_timer(srv_sk, 1);

	err = l2cap_sock_shutdown(sock, 2);

	sock_orphan(sk);
	l2cap_sock_kill(sk);
	return err;
}
Exemple #3
0
static int hci_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct hci_dev *hdev = hci_pi(sk)->hdev;

	DBG("sock %p sk %p", sock, sk);

	if (!sk)
		return 0;

	bluez_sock_unlink(&hci_sk_list, sk);

	if (hdev) {
		if (!hci_sock_lookup(hdev))
			hdev->flags &= ~HCI_SOCK;

		hci_dev_put(hdev);
	}

	sock_orphan(sk);

	skb_queue_purge(&sk->receive_queue);
	skb_queue_purge(&sk->write_queue);

	sock_put(sk);

	MOD_DEC_USE_COUNT;

	return 0;
}
Exemple #4
0
static int l2cap_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct sock *sk2 = NULL;
	int err;

	BT_DBG("sock %p, sk %p", sock, sk);

	if (!sk)
		return 0;

	/* If this is an ATT socket, find it's matching server/client */
	if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA)
		sk2 = l2cap_find_sock_by_fixed_cid_and_dir(L2CAP_CID_LE_DATA,
					&bt_sk(sk)->src, &bt_sk(sk)->dst,
					l2cap_pi(sk)->incoming ? 0 : 1);

	/* If matching socket found, request tear down */
	BT_DBG("sock:%p companion:%p", sk, sk2);
	if (sk2)
		l2cap_sock_set_timer(sk2, 1);

	err = l2cap_sock_shutdown(sock, 2);

	sock_orphan(sk);
	l2cap_sock_kill(sk);
	return err;
}
Exemple #5
0
static int netlink_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	if (!sk)
		return 0;

	netlink_remove(sk);

	spin_lock(&sk->protinfo.af_netlink->cb_lock);
	if (sk->protinfo.af_netlink->cb) {
		sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
		netlink_destroy_callback(sk->protinfo.af_netlink->cb);
		sk->protinfo.af_netlink->cb = NULL;
		__sock_put(sk);
	}
	spin_unlock(&sk->protinfo.af_netlink->cb_lock);

	/* OK. Socket is unlinked, and, therefore,
	   no new packets will arrive */

	sock_orphan(sk);
	sock->sk = NULL;
	wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);

	skb_queue_purge(&sk->write_queue);

	sock_put(sk);
	return 0;
}
static int hci_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct hci_dev *hdev = hci_pi(sk)->hdev;

	BT_DBG("sock %p sk %p", sock, sk);

	if (!sk)
		return 0;

	bluez_sock_unlink(&hci_sk_list, sk);

	if (hdev) {
		atomic_dec(&hdev->promisc);
		hci_dev_put(hdev);
	}

	sock_orphan(sk);

	skb_queue_purge(&sk->receive_queue);
	skb_queue_purge(&sk->write_queue);

	sock_put(sk);

	MOD_DEC_USE_COUNT;
	return 0;
}
Exemple #7
0
static int packet_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct sock **skp;

	if (!sk)
		return 0;

	write_lock_bh(&packet_sklist_lock);
	for (skp = &packet_sklist; *skp; skp = &(*skp)->next) {
		if (*skp == sk) {
			*skp = sk->next;
			__sock_put(sk);
			break;
		}
	}
	write_unlock_bh(&packet_sklist_lock);

	/*
	 *	Unhook packet receive handler.
	 */

	if (sk->protinfo.af_packet->running) {
		/*
		 *	Remove the protocol hook
		 */
		dev_remove_pack(&sk->protinfo.af_packet->prot_hook);
		sk->protinfo.af_packet->running = 0;
		__sock_put(sk);
	}

#ifdef CONFIG_PACKET_MULTICAST
	packet_flush_mclist(sk);
#endif

#ifdef CONFIG_PACKET_MMAP
	if (sk->protinfo.af_packet->pg_vec) {
		struct tpacket_req req;
		memset(&req, 0, sizeof(req));
		packet_set_ring(sk, &req, 1);
	}
#endif

	/*
	 *	Now the socket is dead. No more input will appear.
	 */

	sock_orphan(sk);
	sock->sk = NULL;

	/* Purge queues */

	skb_queue_purge(&sk->receive_queue);

	sock_put(sk);
	return 0;
}
static int rawsock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	nfc_dbg("sock=%p", sock);

	sock_orphan(sk);
	sock_put(sk);

	return 0;
}
Exemple #9
0
static int packet_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct packet_sock *po;

	if (!sk)
		return 0;

	po = pkt_sk(sk);

	write_lock_bh(&packet_sklist_lock);
	sk_del_node_init(sk);
	write_unlock_bh(&packet_sklist_lock);

	/*
	 *	Unhook packet receive handler.
	 */

	if (po->running) {
		/*
		 *	Remove the protocol hook
		 */
		dev_remove_pack(&po->prot_hook);
		po->running = 0;
		po->num = 0;
		__sock_put(sk);
	}

#ifdef CONFIG_PACKET_MULTICAST
	packet_flush_mclist(sk);
#endif

#ifdef CONFIG_PACKET_MMAP
	if (po->pg_vec) {
		struct tpacket_req req;
		memset(&req, 0, sizeof(req));
		packet_set_ring(sk, &req, 1);
	}
#endif

	/*
	 *	Now the socket is dead. No more input will appear.
	 */

	sock_orphan(sk);
	sock->sk = NULL;

	/* Purge queues */

	skb_queue_purge(&sk->sk_receive_queue);

	sock_put(sk);
	return 0;
}
Exemple #10
0
static int rawsock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	pr_debug("sock=%p\n", sock);

	sock_orphan(sk);
	sock_put(sk);

	return 0;
}
Exemple #11
0
/*
 * release an RxRPC socket
 */
static int rxrpc_release_sock(struct sock *sk)
{
	struct rxrpc_sock *rx = rxrpc_sk(sk);
	struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));

	_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));

	/* declare the socket closed for business */
	sock_orphan(sk);
	sk->sk_shutdown = SHUTDOWN_MASK;

	/* We want to kill off all connections from a service socket
	 * as fast as possible because we can't share these; client
	 * sockets, on the other hand, can share an endpoint.
	 */
	switch (sk->sk_state) {
	case RXRPC_SERVER_BOUND:
	case RXRPC_SERVER_BOUND2:
	case RXRPC_SERVER_LISTENING:
	case RXRPC_SERVER_LISTEN_DISABLED:
		rx->local->service_closed = true;
		break;
	}

	spin_lock_bh(&sk->sk_receive_queue.lock);
	sk->sk_state = RXRPC_CLOSE;
	spin_unlock_bh(&sk->sk_receive_queue.lock);

	if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
		write_lock(&rx->local->services_lock);
		rcu_assign_pointer(rx->local->service, NULL);
		write_unlock(&rx->local->services_lock);
	}

	/* try to flush out this socket */
	rxrpc_discard_prealloc(rx);
	rxrpc_release_calls_on_socket(rx);
	flush_workqueue(rxrpc_workqueue);
	rxrpc_purge_queue(&sk->sk_receive_queue);
	rxrpc_queue_work(&rxnet->service_conn_reaper);
	rxrpc_queue_work(&rxnet->client_conn_reaper);

	rxrpc_put_local(rx->local);
	rx->local = NULL;
	key_put(rx->key);
	rx->key = NULL;
	key_put(rx->securities);
	rx->securities = NULL;
	sock_put(sk);

	_leave(" = 0");
	return 0;
}
Exemple #12
0
static int bnep_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	BT_DBG("sock %p sk %p", sock, sk);

	if (!sk)
		return 0;

	sock_orphan(sk);
	sock_put(sk);
	return 0;
}
Exemple #13
0
static int l2cap_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	int err;

	BT_DBG("sock %p, sk %p", sock, sk);

	if (!sk) return 0;

	err = l2cap_sock_shutdown(sock, 2);

	sock_orphan(sk);
	l2cap_sock_kill(sk);
	return err;
}
Exemple #14
0
static int cmtp_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	BT_DBG("sock %p sk %p", sock, sk);

	if (!sk)
		return 0;

	bt_sock_unlink(&cmtp_sk_list, sk);

	sock_orphan(sk);
	sock_put(sk);

	return 0;
}
static int
MksckRelease(struct socket *sock)
{
	struct sock *sk = sock->sk;

	if (sk) {
		lock_sock(sk);
		sock_orphan(sk);
		release_sock(sk);
		sock_put(sk);
	}

	sock->sk = NULL;
	sock->state = SS_FREE;

	return 0;
}
Exemple #16
0
static int raw_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct raw_sock *ro;

	if (!sk)
		return 0;

	ro = raw_sk(sk);

	unregister_netdevice_notifier(&ro->notifier);

	lock_sock(sk);

	/* remove current filters & unregister */
	if (ro->bound) {
		if (ro->ifindex) {
			struct net_device *dev;

			dev = dev_get_by_index(&init_net, ro->ifindex);
			if (dev) {
				raw_disable_allfilters(dev, sk);
				dev_put(dev);
			}
		} else
			raw_disable_allfilters(NULL, sk);
	}

	if (ro->count > 1)
		kfree(ro->filter);

	ro->ifindex = 0;
	ro->bound   = 0;
	ro->count   = 0;
	free_percpu(ro->uniq);

	sock_orphan(sk);
	sock->sk = NULL;

	release_sock(sk);
	sock_put(sk);

	return 0;
}
Exemple #17
0
static int sco_sock_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	int err = 0;

	BT_DBG("sock %p, sk %p", sock, sk);

	if (!sk)
		return 0;

	sco_sock_close(sk);
	if (sk->linger) {
		lock_sock(sk);
		err = bluez_sock_wait_state(sk, BT_CLOSED, sk->lingertime);
		release_sock(sk);
	}

	sock_orphan(sk);
	sco_sock_kill(sk);
	return err;
}
Exemple #18
0
static int netlink_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	if (!sk)
		return 0;

	netlink_remove(sk);

	spin_lock(&sk->protinfo.af_netlink->cb_lock);
	if (sk->protinfo.af_netlink->cb) {
		sk->protinfo.af_netlink->cb->done(sk->protinfo.af_netlink->cb);
		netlink_destroy_callback(sk->protinfo.af_netlink->cb);
		sk->protinfo.af_netlink->cb = NULL;
		__sock_put(sk);
	}
	spin_unlock(&sk->protinfo.af_netlink->cb_lock);

	/* OK. Socket is unlinked, and, therefore,
	   no new packets will arrive */

	sock_orphan(sk);
	sock->sk = NULL;
	wake_up_interruptible_all(&sk->protinfo.af_netlink->wait);

	skb_queue_purge(&sk->write_queue);

	if (sk->protinfo.af_netlink->pid && !sk->protinfo.af_netlink->groups) {
		struct netlink_notify n = { protocol:sk->protocol,
		                            pid:sk->protinfo.af_netlink->pid };
		notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
	}	
	
	sock_put(sk);
	return 0;
}
Exemple #19
0
static int mptp_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct mptp_sock *ssk = mptp_sk(sk);

	if (unlikely(!sk))
		return 0;

	mptp_unhash(ssk->src);

	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);

	synchronize_net();

	sock_orphan(sk);
	sock->sk = NULL;

	skb_queue_purge(&sk->sk_receive_queue);

	log_debug("mptp_release sock=%p\n", sk);
	sock_put(sk);

	return 0;
}
Exemple #20
0
static int open_socket(cpt_object_t *obj, struct cpt_sock_image *si,
                       struct cpt_context *ctx)
{
    int err;
    struct socket *sock;
    struct socket *sock2 = NULL;
    struct file *file;
    cpt_object_t *fobj;
    cpt_object_t *pobj = NULL;

    err = sock_create(si->cpt_family, si->cpt_type, si->cpt_protocol,
                      &sock);
    if (err)
        return err;

    if (si->cpt_socketpair) {
        err = sock_create(si->cpt_family, si->cpt_type,
                          si->cpt_protocol, &sock2);
        if (err)
            goto err_out;

        err = sock->ops->socketpair(sock, sock2);
        if (err < 0)
            goto err_out;

        /* Socketpair with a peer outside our environment.
         * So, we create real half-open pipe and do not worry
         * about dead end anymore. */
        if (si->cpt_peer == -1) {
            sock_release(sock2);
            sock2 = NULL;
        }
    }

    cpt_obj_setobj(obj, sock->sk, ctx);

    if (si->cpt_file != CPT_NULL) {
        file = sock_mapfile(sock);
        err = PTR_ERR(file);
        if (IS_ERR(file))
            goto err_out;

        err = -ENOMEM;

        obj->o_parent = file;

        if ((fobj = cpt_object_add(CPT_OBJ_FILE, file, ctx)) == NULL)
            goto err_out;
        cpt_obj_setpos(fobj, si->cpt_file, ctx);
        cpt_obj_setindex(fobj, si->cpt_index, ctx);
    }

    if (sock2) {
        struct file *file2;

        pobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, si->cpt_peer, ctx);
        if (!pobj) BUG();
        if (pobj->o_obj) BUG();
        cpt_obj_setobj(pobj, sock2->sk, ctx);

        if (pobj->o_ppos != CPT_NULL) {
            file2 = sock_mapfile(sock2);
            err = PTR_ERR(file2);
            if (IS_ERR(file2))
                goto err_out;

            err = -ENOMEM;
            if ((fobj = cpt_object_add(CPT_OBJ_FILE, file2, ctx)) == NULL)
                goto err_out;
            cpt_obj_setpos(fobj, pobj->o_ppos, ctx);
            cpt_obj_setindex(fobj, si->cpt_peer, ctx);

            pobj->o_parent = file2;
        }
    }

    setup_sock_common(sock->sk, si, obj->o_pos, ctx);
    if (sock->sk->sk_family == AF_INET || sock->sk->sk_family == AF_INET6) {
        int saved_reuse = sock->sk->sk_reuse;

        inet_sk(sock->sk)->freebind = 1;
        sock->sk->sk_reuse = 2;
        if (si->cpt_laddrlen) {
            err = sock->ops->bind(sock, (struct sockaddr *)&si->cpt_laddr, si->cpt_laddrlen);
            if (err) {
                dprintk_ctx("binding failed: %d, do not worry\n", err);
            }
        }
        sock->sk->sk_reuse = saved_reuse;
        rst_socket_in(si, obj->o_pos, sock->sk, ctx);
    } else if (sock->sk->sk_family == AF_NETLINK) {
        struct sockaddr_nl *nl = (struct sockaddr_nl *)&si->cpt_laddr;
        if (nl->nl_pid) {
            err = sock->ops->bind(sock, (struct sockaddr *)&si->cpt_laddr, si->cpt_laddrlen);
            if (err) {
                eprintk_ctx("AF_NETLINK binding failed: %d\n", err);
            }
        }
        if (si->cpt_raddrlen && nl->nl_pid) {
            err = sock->ops->connect(sock, (struct sockaddr *)&si->cpt_raddr, si->cpt_raddrlen, O_NONBLOCK);
            if (err) {
                eprintk_ctx("oops, AF_NETLINK connect failed: %d\n", err);
            }
        }
        generic_restore_queues(sock->sk, si, obj->o_pos, ctx);
    } else if (sock->sk->sk_family == PF_PACKET) {
        struct sockaddr_ll *ll = (struct sockaddr_ll *)&si->cpt_laddr;
        if (ll->sll_protocol || ll->sll_ifindex) {
            int alen = si->cpt_laddrlen;
            if (alen < sizeof(struct sockaddr_ll))
                alen = sizeof(struct sockaddr_ll);
            err = sock->ops->bind(sock, (struct sockaddr *)&si->cpt_laddr, alen);
            if (err) {
                eprintk_ctx("AF_PACKET binding failed: %d\n", err);
            }
        }
        generic_restore_queues(sock->sk, si, obj->o_pos, ctx);
    }
    fixup_unix_address(sock, si, ctx);

    if (sock2) {
        err = rst_get_object(CPT_OBJ_SOCKET, pobj->o_pos, si, ctx);
        if (err)
            return err;
        setup_sock_common(sock2->sk, si, pobj->o_pos, ctx);
        fixup_unix_address(sock2, si, ctx);
    }

    if ((sock->sk->sk_family == AF_INET || sock->sk->sk_family == AF_INET6)
            && (int)si->cpt_parent != -1) {
        cpt_object_t *lobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, si->cpt_parent, ctx);
        if (lobj && cpt_attach_accept(lobj->o_obj, sock->sk, ctx) == 0)
            sock->sk = NULL;
    }


    if (si->cpt_file == CPT_NULL && sock->sk &&
            sock->sk->sk_family == AF_INET) {
        struct sock *sk = sock->sk;

        if (sk) {
            sock->sk = NULL;

            local_bh_disable();
            bh_lock_sock(sk);
            if (sock_owned_by_user(sk))
                eprintk_ctx("oops, sock is locked by user\n");

            sock_hold(sk);
            sock_orphan(sk);
            ub_inc_orphan_count(sk);
            bh_unlock_sock(sk);
            local_bh_enable();
            sock_put(sk);
            dprintk_ctx("orphaning socket %p\n", sk);
        }
    }

    if (si->cpt_file == CPT_NULL && sock->sk == NULL)
        sock_release(sock);

    return 0;

err_out:
    if (sock2)
        sock_release(sock2);
    sock_release(sock);
    return err;
}
Exemple #21
0
/**
 * This is main body of the socket close function in Sync Sockets.
 *
 * inet_release() can sleep (as well as tcp_close()), so we make our own
 * non-sleepable socket closing.
 *
 * This function must be used only for data sockets.
 * Use standard sock_release() for listening sockets.
 *
 * In most cases it is called in softirq context and from ksoftirqd which
 * processes data from the socket (RSS and RPS distribute packets that way).
 *
 * Note: it used to be called in process context as well, at the time when
 * Tempesta starts or stops. That's not the case right now, but it may change.
 *
 * TODO In some cases we need to close socket agresively w/o FIN_WAIT_2 state,
 * e.g. by sending RST. So we need to add second parameter to the function
 * which says how to close the socket.
 * One of the examples is rcl_req_limit() (it should reset connections).
 * See tcp_sk(sk)->linger2 processing in standard tcp_close().
 *
 * Called with locked socket.
 */
static void
ss_do_close(struct sock *sk)
{
	struct sk_buff *skb;
	int data_was_unread = 0;
	int state;

	if (unlikely(!sk))
		return;
	SS_DBG("Close socket %p (%s): cpu=%d account=%d refcnt=%d\n",
	       sk, ss_statename[sk->sk_state], smp_processor_id(),
	       sk_has_account(sk), atomic_read(&sk->sk_refcnt));
	assert_spin_locked(&sk->sk_lock.slock);
	ss_sock_cpu_check(sk);
	BUG_ON(sk->sk_state == TCP_LISTEN);
	/* We must return immediately, so LINGER option is meaningless. */
	WARN_ON(sock_flag(sk, SOCK_LINGER));
	/* We don't support virtual containers, so TCP_REPAIR is prohibited. */
	WARN_ON(tcp_sk(sk)->repair);
	/* The socket must have atomic allocation mask. */
	WARN_ON(!(sk->sk_allocation & GFP_ATOMIC));

	/* The below is mostly copy-paste from tcp_close(). */
	sk->sk_shutdown = SHUTDOWN_MASK;

	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
			  tcp_hdr(skb)->fin;
		data_was_unread += len;
		SS_DBG("free rcv skb %p\n", skb);
		__kfree_skb(skb);
	}

	sk_mem_reclaim(sk);

	if (sk->sk_state == TCP_CLOSE)
		goto adjudge_to_death;

	if (data_was_unread) {
		NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
		tcp_set_state(sk, TCP_CLOSE);
		tcp_send_active_reset(sk, sk->sk_allocation);
	}
	else if (tcp_close_state(sk)) {
		/* The code below is taken from tcp_send_fin(). */
		struct tcp_sock *tp = tcp_sk(sk);
		int mss_now = tcp_current_mss(sk);

		skb = tcp_write_queue_tail(sk);

		if (tcp_send_head(sk) != NULL) {
			/* Send FIN with data if we have any. */
			TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
			TCP_SKB_CB(skb)->end_seq++;
			tp->write_seq++;
		}
		else {
			/* No data to send in the socket, allocate new skb. */
			skb = alloc_skb_fclone(MAX_TCP_HEADER,
					       sk->sk_allocation);
			if (!skb) {
				SS_WARN("can't send FIN due to bad alloc");
			} else {
				skb_reserve(skb, MAX_TCP_HEADER);
				tcp_init_nondata_skb(skb, tp->write_seq,
						     TCPHDR_ACK | TCPHDR_FIN);
				tcp_queue_skb(sk, skb);
			}
		}
		__tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
	}

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * SS sockets are processed in softirq only,
	 * so backlog queue should be empty.
	 */
	WARN_ON(sk->sk_backlog.tail);

	percpu_counter_inc(sk->sk_prot->orphan_count);

	if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
		return;

	if (sk->sk_state == TCP_FIN_WAIT2) {
		const int tmo = tcp_fin_time(sk);
		if (tmo > TCP_TIMEWAIT_LEN) {
			inet_csk_reset_keepalive_timer(sk,
						tmo - TCP_TIMEWAIT_LEN);
		} else {
			tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
			return;
		}
	}
	if (sk->sk_state != TCP_CLOSE) {
		sk_mem_reclaim(sk);
		if (tcp_check_oom(sk, 0)) {
			tcp_set_state(sk, TCP_CLOSE);
			tcp_send_active_reset(sk, GFP_ATOMIC);
			NET_INC_STATS_BH(sock_net(sk),
					 LINUX_MIB_TCPABORTONMEMORY);
		}
	}
	if (sk->sk_state == TCP_CLOSE) {
		struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
		if (req != NULL)
			reqsk_fastopen_remove(sk, req, false);
		inet_csk_destroy_sock(sk);
	}
}
Exemple #22
0
void dccp_close(struct sock *sk, long timeout)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	u32 data_was_unread = 0;
	int state;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	sk_stop_timer(sk, &dp->dccps_xmit_timer);

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		data_was_unread += skb->len;
		__kfree_skb(skb);
	}

	if (data_was_unread) {
		/* Unread data was tossed, send an appropriate Reset Code */
		DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (sk->sk_state != DCCP_CLOSED) {
		/*
		 * Normal connection termination. May need to wait if there are
		 * still packets in the TX queue that are delayed by the CCID.
		 */
		dccp_flush_write_queue(sk, &timeout);
		dccp_terminate_connection(sk);
	}

	/*
	 * Flush write queue. This may be necessary in several cases:
	 * - we have been closed by the peer but still have application data;
	 * - abortive termination (unread data or zero linger time),
	 * - normal termination but queue could not be flushed within time limit
	 */
	__skb_queue_purge(&sk->sk_write_queue);

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	WARN_ON(sock_owned_by_user(sk));

	percpu_counter_inc(sk->sk_prot->orphan_count);

	/* Have we already been destroyed by a softirq or backlog? */
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;

	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

out:
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
void dccp_close(struct sock *sk, long timeout)
{
	struct sk_buff *skb;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	/* FIXME: check for unread data */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		__kfree_skb(skb);
	}

	if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (dccp_close_state(sk)) {
		dccp_send_close(sk, 1);
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	BUG_TRAP(!sock_owned_by_user(sk));

	sock_hold(sk);
	sock_orphan(sk);

	/*
	 * The last release_sock may have processed the CLOSE or RESET
	 * packet moving sock to CLOSED state, if not we have to fire
	 * the CLOSE/CLOSEREQ retransmission timer, see "8.3. Termination"
	 * in draft-ietf-dccp-spec-11. -acme
	 */
	if (sk->sk_state == DCCP_CLOSING) {
		/* FIXME: should start at 2 * RTT */
		/* Timer for repeating the CLOSE/CLOSEREQ until an answer. */
		inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
					  inet_csk(sk)->icsk_rto,
					  DCCP_RTO_MAX);
#if 0
		/* Yeah, we should use sk->sk_prot->orphan_count, etc */
		dccp_set_state(sk, DCCP_CLOSED);
#endif
	}

	atomic_inc(sk->sk_prot->orphan_count);
	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
Exemple #24
0
static int netlink_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct netlink_opt *nlk;

	if (!sk)
		return 0;

	netlink_remove(sk);
	nlk = nlk_sk(sk);

	spin_lock(&nlk->cb_lock);
	if (nlk->cb) {
		nlk->cb->done(nlk->cb);
		netlink_destroy_callback(nlk->cb);
		nlk->cb = NULL;
		__sock_put(sk);
	}
	spin_unlock(&nlk->cb_lock);

	/* OK. Socket is unlinked, and, therefore,
	   no new packets will arrive */

	sock_orphan(sk);
	sock->sk = NULL;
	wake_up_interruptible_all(&nlk->wait);

	skb_queue_purge(&sk->sk_write_queue);

	if (nlk->pid && !nlk->groups) {
		struct netlink_notify n = {
						.protocol = sk->sk_protocol,
						.pid = nlk->pid,
					  };
		notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n);
	}	
	
	sock_put(sk);
	return 0;
}

static int netlink_autobind(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
	struct hlist_head *head;
	struct sock *osk;
	struct hlist_node *node;
	s32 pid = current->pid;
	int err;
	static s32 rover = -4097;

retry:
	cond_resched();
	netlink_table_grab();
	head = nl_pid_hashfn(hash, pid);
	sk_for_each(osk, node, head) {
		if (nlk_sk(osk)->pid == pid) {
			/* Bind collision, search negative pid values. */
			pid = rover--;
			if (rover > -4097)
				rover = -4097;
			netlink_table_ungrab();
			goto retry;
		}
	}
	netlink_table_ungrab();

	err = netlink_insert(sk, pid);
	if (err == -EADDRINUSE)
		goto retry;
	nlk_sk(sk)->groups = 0;
	return 0;
}

static inline int netlink_capable(struct socket *sock, unsigned int flag) 
{ 
	return (nl_nonroot[sock->sk->sk_protocol] & flag) ||
	       capable(CAP_NET_ADMIN);
} 

static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
	struct sock *sk = sock->sk;
	struct netlink_opt *nlk = nlk_sk(sk);
	struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
	int err;
	
	if (nladdr->nl_family != AF_NETLINK)
		return -EINVAL;

	/* Only superuser is allowed to listen multicasts */
	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV))
		return -EPERM;

	if (nlk->pid) {
		if (nladdr->nl_pid != nlk->pid)
			return -EINVAL;
	} else {
		err = nladdr->nl_pid ?
			netlink_insert(sk, nladdr->nl_pid) :
			netlink_autobind(sock);
		if (err)
			return err;
	}

	if (!nladdr->nl_groups && !nlk->groups)
		return 0;

	netlink_table_grab();
	if (nlk->groups && !nladdr->nl_groups)
		__sk_del_bind_node(sk);
	else if (!nlk->groups && nladdr->nl_groups)
		sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list);
	nlk->groups = nladdr->nl_groups;
	netlink_table_ungrab();

	return 0;
}

static int netlink_connect(struct socket *sock, struct sockaddr *addr,
			   int alen, int flags)
{
	int err = 0;
	struct sock *sk = sock->sk;
	struct netlink_opt *nlk = nlk_sk(sk);
	struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr;

	if (addr->sa_family == AF_UNSPEC) {
		sk->sk_state	= NETLINK_UNCONNECTED;
		nlk->dst_pid	= 0;
		nlk->dst_groups = 0;
		return 0;
	}
	if (addr->sa_family != AF_NETLINK)
		return -EINVAL;

	/* Only superuser is allowed to send multicasts */
	if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
		return -EPERM;

	if (!nlk->pid)
		err = netlink_autobind(sock);

	if (err == 0) {
		sk->sk_state	= NETLINK_CONNECTED;
		nlk->dst_pid 	= nladdr->nl_pid;
		nlk->dst_groups = nladdr->nl_groups;
	}

	return err;
}
Exemple #25
0
void dccp_close(struct sock *sk, long timeout)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	u32 data_was_unread = 0;
	int state;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	sk_stop_timer(sk, &dp->dccps_xmit_timer);

	
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		data_was_unread += skb->len;
		__kfree_skb(skb);
	}

	if (data_was_unread) {
		
		DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		
		sk->sk_prot->disconnect(sk, 0);
	} else if (sk->sk_state != DCCP_CLOSED) {
		dccp_terminate_connection(sk);
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);

	
	release_sock(sk);
	
	local_bh_disable();
	bh_lock_sock(sk);
	WARN_ON(sock_owned_by_user(sk));

	percpu_counter_inc(sk->sk_prot->orphan_count);

	
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;

	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	

out:
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}
Exemple #26
0
void dccp_close(struct sock *sk, long timeout)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct sk_buff *skb;
	u32 data_was_unread = 0;
	int state;

	lock_sock(sk);

	sk->sk_shutdown = SHUTDOWN_MASK;

	if (sk->sk_state == DCCP_LISTEN) {
		dccp_set_state(sk, DCCP_CLOSED);

		/* Special case. */
		inet_csk_listen_stop(sk);

		goto adjudge_to_death;
	}

	sk_stop_timer(sk, &dp->dccps_xmit_timer);

	/*
	 * We need to flush the recv. buffs.  We do this only on the
	 * descriptor close, not protocol-sourced closes, because the
	  *reader process may not have drained the data yet!
	 */
	while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		data_was_unread += skb->len;
		__kfree_skb(skb);
	}

	if (data_was_unread) {
		/* Unread data was tossed, send an appropriate Reset Code */
		DCCP_WARN("DCCP: ABORT -- %u bytes unread\n", data_was_unread);
		dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
		dccp_set_state(sk, DCCP_CLOSED);
	} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
		/* Check zero linger _after_ checking for unread data. */
		sk->sk_prot->disconnect(sk, 0);
	} else if (sk->sk_state != DCCP_CLOSED) {
		dccp_terminate_connection(sk);
	}

	sk_stream_wait_close(sk, timeout);

adjudge_to_death:
	state = sk->sk_state;
	sock_hold(sk);
	sock_orphan(sk);
	atomic_inc(sk->sk_prot->orphan_count);

	/*
	 * It is the last release_sock in its life. It will remove backlog.
	 */
	release_sock(sk);
	/*
	 * Now socket is owned by kernel and we acquire BH lock
	 * to finish close. No need to check for user refs.
	 */
	local_bh_disable();
	bh_lock_sock(sk);
	BUG_TRAP(!sock_owned_by_user(sk));

	/* Have we already been destroyed by a softirq or backlog? */
	if (state != DCCP_CLOSED && sk->sk_state == DCCP_CLOSED)
		goto out;

	if (sk->sk_state == DCCP_CLOSED)
		inet_csk_destroy_sock(sk);

	/* Otherwise, socket is reprieved until protocol close. */

out:
	bh_unlock_sock(sk);
	local_bh_enable();
	sock_put(sk);
}