Example #1
0
void __vcc_insert_socket(struct sock *sk)
{
	struct atm_vcc *vcc = atm_sk(sk);
	struct hlist_head *head = &vcc_hash[vcc->vci &
					(VCC_HTABLE_SIZE - 1)];
	sk->sk_hashent = vcc->vci & (VCC_HTABLE_SIZE - 1);
	sk_add_node(sk, head);
}
void pn_sock_hash(struct sock *sk)
{
	struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);

	spin_lock_bh(&pnsocks.lock);
	sk_add_node(sk, hlist);
	spin_unlock_bh(&pnsocks.lock);
}
Example #3
0
static void raw_v4_hash(struct sock *sk)
{
	struct hlist_head *head = &raw_v4_htable[inet_sk(sk)->num &
						 (RAWV4_HTABLE_SIZE - 1)];

	write_lock_bh(&raw_v4_lock);
	sk_add_node(sk, head);
	sock_prot_inc_use(sk->sk_prot);
	write_unlock_bh(&raw_v4_lock);
}
Example #4
0
void raw_hash_sk(struct sock *sk, struct raw_hashinfo *h)
{
	struct hlist_head *head;

	head = &h->ht[inet_sk(sk)->num & (RAW_HTABLE_SIZE - 1)];

	write_lock_bh(&h->lock);
	sk_add_node(sk, head);
	sock_prot_inc_use(sk->sk_prot);
	write_unlock_bh(&h->lock);
}
Example #5
0
void raw_hash_sk(struct sock *sk)
{
	struct raw_hashinfo *h = sk->sk_prot->h.raw_hash;
	struct hlist_head *head;

	head = &h->ht[inet_sk(sk)->inet_num & (RAW_HTABLE_SIZE - 1)];

	write_lock_bh(&h->lock);
	sk_add_node(sk, head);
	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
	write_unlock_bh(&h->lock);
}
Example #6
0
static int netlink_insert(struct sock *sk, u32 pid)
{
	struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
	struct hlist_head *head;
	int err = -EADDRINUSE;
	struct sock *osk;
	struct hlist_node *node;
	int len;

	netlink_table_grab();
	head = nl_pid_hashfn(hash, pid);
	len = 0;
	sk_for_each(osk, node, head) {
		if (nlk_sk(osk)->pid == pid)
			break;
		len++;
	}
	if (node)
		goto err;

	err = -EBUSY;
	if (nlk_sk(sk)->pid)
		goto err;

	err = -ENOMEM;
	if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
		goto err;

	if (len && nl_pid_hash_dilute(hash, len))
		head = nl_pid_hashfn(hash, pid);
	hash->entries++;
	nlk_sk(sk)->pid = pid;
	sk_add_node(sk, head);
	err = 0;

err:
	netlink_table_ungrab();
	return err;
}
Example #7
0
static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
{
	struct pep_sock *pn = pep_sk(sk);
	struct sock *newsk = NULL;
	struct sk_buff *oskb;
	int err;

	lock_sock(sk);
	err = pep_wait_connreq(sk, flags & O_NONBLOCK);
	if (err)
		goto out;

	newsk = __sk_head(&pn->ackq);

	oskb = skb_dequeue(&newsk->sk_receive_queue);
	err = pep_accept_conn(newsk, oskb);
	if (err) {
		skb_queue_head(&newsk->sk_receive_queue, oskb);
		newsk = NULL;
		goto out;
	}
	kfree_skb(oskb);

	sock_hold(sk);
	pep_sk(newsk)->listener = sk;

	sock_hold(newsk);
	sk_del_node_init(newsk);
	sk_acceptq_removed(sk);
	sk_add_node(newsk, &pn->hlist);
	__sock_put(newsk);

out:
	release_sock(sk);
	*errp = err;
	return newsk;
}
Example #8
0
static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb)
{
	struct sock *newsk;
	struct pep_sock *newpn, *pn = pep_sk(sk);
	struct pnpipehdr *hdr;
	struct sockaddr_pn dst;
	u16 peer_type;
	u8 pipe_handle, enabled, n_sb;
	u8 aligned = 0;

	if (!pskb_pull(skb, sizeof(*hdr) + 4))
		return -EINVAL;

	hdr = pnp_hdr(skb);
	pipe_handle = hdr->pipe_handle;
	switch (hdr->state_after_connect) {
	case PN_PIPE_DISABLE:
		enabled = 0;
		break;
	case PN_PIPE_ENABLE:
		enabled = 1;
		break;
	default:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM);
		return -EINVAL;
	}
	peer_type = hdr->other_pep_type << 8;

	if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) {
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE);
		return -ENOBUFS;
	}

	/* Parse sub-blocks (options) */
	n_sb = hdr->data[4];
	while (n_sb > 0) {
		u8 type, buf[1], len = sizeof(buf);
		const u8 *data = pep_get_sb(skb, &type, &len, buf);

		if (data == NULL)
			return -EINVAL;
		switch (type) {
		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
			if (len < 1)
				return -EINVAL;
			peer_type = (peer_type & 0xff00) | data[0];
			break;
		case PN_PIPE_SB_ALIGNED_DATA:
			aligned = data[0] != 0;
			break;
		}
		n_sb--;
	}

	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		return -ENOMEM;

	/* Create a new to-be-accepted sock */
	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot);
	if (!newsk) {
		kfree_skb(skb);
		return -ENOMEM;
	}
	sock_init_data(NULL, newsk);
	newsk->sk_state = TCP_SYN_RECV;
	newsk->sk_backlog_rcv = pipe_do_rcv;
	newsk->sk_protocol = sk->sk_protocol;
	newsk->sk_destruct = pipe_destruct;

	newpn = pep_sk(newsk);
	pn_skb_get_dst_sockaddr(skb, &dst);
	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
	newpn->pn_sk.resource = pn->pn_sk.resource;
	skb_queue_head_init(&newpn->ctrlreq_queue);
	newpn->pipe_handle = pipe_handle;
	atomic_set(&newpn->tx_credits, 0);
	newpn->peer_type = peer_type;
	newpn->rx_credits = 0;
	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
	newpn->init_enable = enabled;
	newpn->aligned = aligned;

	BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue));
	skb_queue_head(&newsk->sk_receive_queue, skb);
	if (!sock_flag(sk, SOCK_DEAD))
		sk->sk_data_ready(sk, 0);

	sk_acceptq_added(sk);
	sk_add_node(newsk, &pn->ackq);
	return 0;
}
Example #9
0
static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
{
	struct pep_sock *pn = pep_sk(sk), *newpn;
	struct sock *newsk = NULL;
	struct sk_buff *skb;
	struct pnpipehdr *hdr;
	struct sockaddr_pn dst, src;
	int err;
	u16 peer_type;
	u8 pipe_handle, enabled, n_sb;
	u8 aligned = 0;

	skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp);
	if (!skb)
		return NULL;

	lock_sock(sk);
	if (sk->sk_state != TCP_LISTEN) {
		err = -EINVAL;
		goto drop;
	}
	sk_acceptq_removed(sk);

	err = -EPROTO;
	if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
		goto drop;

	hdr = pnp_hdr(skb);
	pipe_handle = hdr->pipe_handle;
	switch (hdr->state_after_connect) {
	case PN_PIPE_DISABLE:
		enabled = 0;
		break;
	case PN_PIPE_ENABLE:
		enabled = 1;
		break;
	default:
		pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
				GFP_KERNEL);
		goto drop;
	}
	peer_type = hdr->other_pep_type << 8;

	/* Parse sub-blocks (options) */
	n_sb = hdr->data[4];
	while (n_sb > 0) {
		u8 type, buf[1], len = sizeof(buf);
		const u8 *data = pep_get_sb(skb, &type, &len, buf);

		if (data == NULL)
			goto drop;
		switch (type) {
		case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
			if (len < 1)
				goto drop;
			peer_type = (peer_type & 0xff00) | data[0];
			break;
		case PN_PIPE_SB_ALIGNED_DATA:
			aligned = data[0] != 0;
			break;
		}
		n_sb--;
	}

	/* Check for duplicate pipe handle */
	newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
	if (unlikely(newsk)) {
		__sock_put(newsk);
		newsk = NULL;
		pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
		goto drop;
	}

	/* Create a new to-be-accepted sock */
	newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot);
	if (!newsk) {
		pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
		err = -ENOBUFS;
		goto drop;
	}

	sock_init_data(NULL, newsk);
	newsk->sk_state = TCP_SYN_RECV;
	newsk->sk_backlog_rcv = pipe_do_rcv;
	newsk->sk_protocol = sk->sk_protocol;
	newsk->sk_destruct = pipe_destruct;

	newpn = pep_sk(newsk);
	pn_skb_get_dst_sockaddr(skb, &dst);
	pn_skb_get_src_sockaddr(skb, &src);
	newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
	newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
	newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
	sock_hold(sk);
	newpn->listener = sk;
	skb_queue_head_init(&newpn->ctrlreq_queue);
	newpn->pipe_handle = pipe_handle;
	atomic_set(&newpn->tx_credits, 0);
	newpn->ifindex = 0;
	newpn->peer_type = peer_type;
	newpn->rx_credits = 0;
	newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
	newpn->init_enable = enabled;
	newpn->aligned = aligned;

	err = pep_accept_conn(newsk, skb);
	if (err) {
		sock_put(newsk);
		newsk = NULL;
		goto drop;
	}
	sk_add_node(newsk, &pn->hlist);
drop:
	release_sock(sk);
	kfree_skb(skb);
	*errp = err;
	return newsk;
}
Example #10
0
static int packet_create(struct socket *sock, int protocol)
{
	struct sock *sk;
	struct packet_sock *po;
	int err;

	if (!capable(CAP_NET_RAW))
		return -EPERM;
	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW
#ifdef CONFIG_SOCK_PACKET
	    && sock->type != SOCK_PACKET
#endif
	    )
		return -ESOCKTNOSUPPORT;

	sock->state = SS_UNCONNECTED;

	err = -ENOBUFS;
	sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1);
	if (sk == NULL)
		goto out;

	sock->ops = &packet_ops;
#ifdef CONFIG_SOCK_PACKET
	if (sock->type == SOCK_PACKET)
		sock->ops = &packet_ops_spkt;
#endif
	sock_init_data(sock, sk);

	po = pkt_sk(sk);
	sk->sk_family = PF_PACKET;
	po->num = protocol;

	sk->sk_destruct = packet_sock_destruct;
	atomic_inc(&packet_socks_nr);

	/*
	 *	Attach a protocol block
	 */

	spin_lock_init(&po->bind_lock);
	po->prot_hook.func = packet_rcv;
#ifdef CONFIG_SOCK_PACKET
	if (sock->type == SOCK_PACKET)
		po->prot_hook.func = packet_rcv_spkt;
#endif
	po->prot_hook.af_packet_priv = sk;

	if (protocol) {
		po->prot_hook.type = protocol;
		dev_add_pack(&po->prot_hook);
		sock_hold(sk);
		po->running = 1;
	}

	write_lock_bh(&packet_sklist_lock);
	sk_add_node(sk, &packet_sklist);
	write_unlock_bh(&packet_sklist_lock);
	return(0);
out:
	return err;
}
Example #11
0
static int lapd_pass_frame_to_socket_nt(
	struct sk_buff *skb)
{
	struct lapd_sock *listening_lapd_sock = NULL;
	struct sock *sk = NULL;
	struct hlist_node *node;
	struct lapd_data_hdr *hdr = (struct lapd_data_hdr *)skb->data;
	struct lapd_device *dev = to_lapd_dev(skb->dev);
	int queued = 0;

	write_lock_bh(&lapd_hash_lock);
	sk_for_each(sk, node, lapd_get_hash(dev)) {
		struct lapd_sock *lapd_sock = to_lapd_sock(sk);

		if (lapd_sock->dev == dev) {

			if (sk->sk_state == LAPD_SK_STATE_LISTEN) {
				listening_lapd_sock = lapd_sock;
				continue;
			}

			if (lapd_sock->sapi == hdr->addr.sapi &&
			    lapd_sock->tei == hdr->addr.tei) {

				skb->sk = sk;

				write_unlock_bh(&lapd_hash_lock);

				queued = lapd_pass_frame_to_socket(
						lapd_sock, skb);

				goto frame_handled;
			}
		}
	}

	if (listening_lapd_sock) {
		/* A socket has not been found */
		struct lapd_data_hdr *hdr = (struct lapd_data_hdr *)skb->data;
		struct lapd_sock *new_lapd_sock;

		if (hdr->addr.sapi != LAPD_SAPI_Q931 &&
		    hdr->addr.sapi != LAPD_SAPI_X25) {
			lapd_msg(KERN_WARNING,
				"SAPI %d not supported\n",
				hdr->addr.sapi);
		}

		new_lapd_sock = lapd_new_sock(listening_lapd_sock,
					hdr->addr.tei, hdr->addr.sapi);

		if (!new_lapd_sock) {
			write_unlock_bh(&lapd_hash_lock);
			return FALSE;
		}

		sk_add_node(&new_lapd_sock->sk, lapd_get_hash(dev));
		write_unlock_bh(&lapd_hash_lock);

		skb->sk = &new_lapd_sock->sk;

		{
		struct lapd_new_dlc *new_dlc;
		new_dlc = kmalloc(sizeof(struct lapd_new_dlc), GFP_ATOMIC);
		if (!new_dlc)
			return FALSE;

		new_dlc->lapd_sock = new_lapd_sock;

		hlist_add_head(&new_dlc->node, &listening_lapd_sock->new_dlcs);

		queued = lapd_pass_frame_to_socket(new_lapd_sock, skb);

		if (!sock_flag(&listening_lapd_sock->sk, SOCK_DEAD))
			listening_lapd_sock->sk.sk_data_ready(
				&listening_lapd_sock->sk, skb->len);
		}
	} else {
		lapd_handle_socketless_frame(skb);

		write_unlock_bh(&lapd_hash_lock);
	}

frame_handled:

	return queued;
}