static int bnep_sock_create(struct socket *sock, int protocol) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &bnep_proto, 1); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &bnep_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; }
/** * tipc_create - create a TIPC socket * @sock: pre-allocated socket structure * @protocol: protocol indicator (must be 0) * * This routine creates and attaches a 'struct sock' to the 'struct socket', * then create and attaches a TIPC port to the 'struct sock' part. * * Returns 0 on success, errno otherwise */ static int tipc_create(struct socket *sock, int protocol) { struct tipc_sock *tsock; struct tipc_port *port; struct sock *sk; u32 ref; if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!ref)) return -ENOMEM; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &stream_ops; break; case SOCK_SEQPACKET: sock->ops = &packet_ops; break; case SOCK_DGRAM: tipc_set_portunreliable(ref, 1); /* fall through */ case SOCK_RDM: tipc_set_portunreturnable(ref, 1); sock->ops = &msg_ops; sock->state = SS_READY; break; default: tipc_deleteport(ref); return -EPROTOTYPE; } sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto); if (!sk) { tipc_deleteport(ref); return -ENOMEM; } sock_init_data(sock, sk); init_waitqueue_head(sk->sk_sleep); sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */ tsock = tipc_sk(sk); port = tipc_get_port(ref); tsock->p = port; port->usr_handle = tsock; init_MUTEX(&tsock->sem); dbg("sock_create: %x\n",tsock); atomic_inc(&tipc_user_count); return 0; }
static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto) { struct sock *sk; nfc_dbg("sock=%p", sock); if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; return 0; }
static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &bnep_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; bt_sock_link(&bnep_sk_list, sk); return 0; }
static int pn_socket_create(struct net *net, struct socket *sock, int protocol) { struct sock *sk; struct pn_sock *pn; struct phonet_protocol *pnp; int err; #if !defined (CONFIG_SAMSUNG_PHONE_SVNET) && !defined (CONFIG_SAMSUNG_PHONE_SVNET_MODULE) if (!capable(CAP_SYS_ADMIN)) return -EPERM; #endif if (protocol == 0) { /* Default protocol selection */ switch (sock->type) { case SOCK_DGRAM: protocol = PN_PROTO_PHONET; break; case SOCK_SEQPACKET: protocol = PN_PROTO_PIPE; break; default: return -EPROTONOSUPPORT; } } pnp = phonet_proto_get(protocol); if (pnp == NULL && request_module("net-pf-%d-proto-%d", PF_PHONET, protocol) == 0) pnp = phonet_proto_get(protocol); if (pnp == NULL) return -EPROTONOSUPPORT; if (sock->type != pnp->sock_type) { err = -EPROTONOSUPPORT; goto out; } sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot); if (sk == NULL) { err = -ENOMEM; goto out; } sock_init_data(sock, sk); sock->state = SS_UNCONNECTED; sock->ops = pnp->ops; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; sk->sk_protocol = protocol; pn = pn_sk(sk); pn->sobject = 0; pn->resource = 0; sk->sk_prot->init(sk); err = 0; out: phonet_proto_put(pnp); return err; }
static int MksckCreate(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; uid_t currentUid = current_euid(); if (!(currentUid == 0 || currentUid == Mvpkm_vmwareUid)) { pr_warn("MksckCreate: rejected from process %s " \ "tgid=%d, pid=%d euid:%d.\n", current->comm, task_tgid_vnr(current), task_pid_vnr(current), currentUid); return -EPERM; } if (!sock) return -EINVAL; if (protocol) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_DGRAM: sock->ops = &mksckDgramOps; break; default: return -ESOCKTNOSUPPORT; } sock->state = SS_UNCONNECTED; sk = sk_alloc(net, mksckFamilyOps.family, GFP_KERNEL, &mksckProto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_type = SOCK_DGRAM; sk->sk_destruct = MksckSkDestruct; sk->sk_backlog_rcv = MksckBacklogRcv; sk->sk_protinfo = NULL; sock_reset_flag(sk, SOCK_DONE); return 0; }
/* * Create a socket. Initialise the socket, blank the addresses * set the state. */ static int ieee802154_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int rc; struct proto *proto; const struct proto_ops *ops; if (net != &init_net) return -EAFNOSUPPORT; switch (sock->type) { case SOCK_RAW: proto = &ieee802154_raw_prot; ops = &ieee802154_raw_ops; break; case SOCK_DGRAM: proto = &ieee802154_dgram_prot; ops = &ieee802154_dgram_ops; break; default: rc = -ESOCKTNOSUPPORT; goto out; } rc = -ENOMEM; sk = sk_alloc(net, PF_IEEE802154, GFP_KERNEL, proto); if (!sk) goto out; rc = 0; sock->ops = ops; sock_init_data(sock, sk); /* FIXME: sk->sk_destruct */ sk->sk_family = PF_IEEE802154; /* Checksums on by default */ sock_set_flag(sk, SOCK_ZAPPED); if (sk->sk_prot->hash) sk->sk_prot->hash(sk); if (sk->sk_prot->init) { rc = sk->sk_prot->init(sk); if (rc) sk_common_release(sk); } out: return rc; }
static int unix_create(struct socket *sock, int protocol) { unix_socket *sk; if(protocol && protocol != PF_UNIX) return -EPROTONOSUPPORT; sk=(unix_socket *)sk_alloc(GFP_KERNEL); if(sk==NULL) return -ENOMEM; switch(sock->type) { case SOCK_STREAM: break; /* * Believe it or not BSD has AF_UNIX, SOCK_RAW though * nothing uses it. */ case SOCK_RAW: sock->type=SOCK_DGRAM; case SOCK_DGRAM: break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } sk->type=sock->type; init_timer(&sk->timer); skb_queue_head_init(&sk->write_queue); skb_queue_head_init(&sk->receive_queue); skb_queue_head_init(&sk->back_log); sk->protinfo.af_unix.family=AF_UNIX; sk->protinfo.af_unix.inode=NULL; sk->protinfo.af_unix.locks=1; /* Us */ sk->protinfo.af_unix.readsem=MUTEX; /* single task reading lock */ sk->rcvbuf=SK_RMEM_MAX; sk->sndbuf=SK_WMEM_MAX; sk->allocation=GFP_KERNEL; sk->state=TCP_CLOSE; sk->priority=SOPRI_NORMAL; sk->state_change=def_callback1; sk->data_ready=def_callback2; sk->write_space=def_callback3; sk->error_report=def_callback1; sk->mtu=4096; sk->socket=sock; sock->data=(void *)sk; sk->sleep=sock->wait; unix_insert_socket(sk); return 0; }
/* * create an RxRPC socket */ static int rxrpc_create(struct net *net, struct socket *sock, int protocol, int kern) { struct rxrpc_net *rxnet; struct rxrpc_sock *rx; struct sock *sk; _enter("%p,%d", sock, protocol); /* we support transport protocol UDP/UDP6 only */ if (protocol != PF_INET && IS_ENABLED(CONFIG_AF_RXRPC_IPV6) && protocol != PF_INET6) return -EPROTONOSUPPORT; if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sock->ops = &rxrpc_rpc_ops; sock->state = SS_UNCONNECTED; sk = sk_alloc(net, PF_RXRPC, GFP_KERNEL, &rxrpc_proto, kern); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock_set_flag(sk, SOCK_RCU_FREE); sk->sk_state = RXRPC_UNBOUND; sk->sk_write_space = rxrpc_write_space; sk->sk_max_ack_backlog = 0; sk->sk_destruct = rxrpc_sock_destructor; rx = rxrpc_sk(sk); rx->family = protocol; rx->calls = RB_ROOT; spin_lock_init(&rx->incoming_lock); INIT_LIST_HEAD(&rx->sock_calls); INIT_LIST_HEAD(&rx->to_be_accepted); INIT_LIST_HEAD(&rx->recvmsg_q); rwlock_init(&rx->recvmsg_lock); rwlock_init(&rx->call_lock); memset(&rx->srx, 0, sizeof(rx->srx)); rxnet = rxrpc_net(sock_net(&rx->sk)); timer_reduce(&rxnet->peer_keepalive_timer, jiffies + 1); _leave(" = 0 [%p]", rx); return 0; }
/** * llc_sk_alloc - Allocates LLC sock * @family: upper layer protocol family * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc) * * Allocates a LLC sock and initializes it. Returns the new LLC sock * or %NULL if there's no memory available for one */ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot) { struct sock *sk = sk_alloc(net, family, priority, prot); if (!sk) goto out; llc_sk_init(sk); sock_init_data(NULL, sk); #ifdef LLC_REFCNT_DEBUG atomic_inc(&llc_sock_nr); // printk(KERN_DEBUG "LLC socket %p created in %s, now we have %d alive\n", sk, ; #endif out: return sk; }
/* * sys_socket -> sock_create -> __sock_create -> this */ static int raw_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; int err; /* init sock struct */ err = -ENOBUFS; sk = sk_alloc(net, PF_RAW, GFP_KERNEL, &raw_proto); if (sk == NULL) goto out; sock->ops = &raw_ops; sock_init_data(sock, sk); sk->sk_family = PF_RAW; err = 0; out: return err; }
static struct sock *nr_alloc_sock(void) { nr_cb *nr; struct sock *sk = sk_alloc(PF_NETROM, GFP_ATOMIC, 1, NULL); if (!sk) goto out; nr = sk->sk_protinfo = kmalloc(sizeof(*nr), GFP_ATOMIC); if (!nr) goto frees; memset(nr, 0x00, sizeof(*nr)); nr->sk = sk; out: return sk; frees: sk_free(sk); sk = NULL; goto out; }
static struct sock *nr_alloc_sock(void) { struct sock *sk; nr_cb *nr; if ((sk = sk_alloc(PF_NETROM, GFP_ATOMIC, 1)) == NULL) return NULL; if ((nr = kmalloc(sizeof(*nr), GFP_ATOMIC)) == NULL) { sk_free(sk); return NULL; } MOD_INC_USE_COUNT; memset(nr, 0x00, sizeof(*nr)); sk->protinfo.nr = nr; nr->sk = sk; return sk; }
int vcc_create(struct socket *sock, int protocol, int family) { struct sock *sk; struct atm_vcc *vcc; sock->sk = NULL; if (sock->type == SOCK_STREAM) return -EINVAL; sk = sk_alloc(family, GFP_KERNEL, 1, NULL); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk_set_owner(sk, THIS_MODULE); sk->sk_state_change = vcc_def_wakeup; sk->sk_write_space = vcc_write_space; vcc = sk->sk_protinfo = kmalloc(sizeof(*vcc), GFP_KERNEL); if (!vcc) { sk_free(sk); return -ENOMEM; } memset(vcc, 0, sizeof(*vcc)); vcc->sk = sk; vcc->dev = NULL; memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ atomic_set(&vcc->sk->sk_wmem_alloc, 0); atomic_set(&vcc->sk->sk_rmem_alloc, 0); vcc->push = NULL; vcc->pop = NULL; vcc->push_oam = NULL; vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */ vcc->atm_options = vcc->aal_options = 0; sk->sk_destruct = vcc_sock_destruct; sock->sk = sk; return 0; }
static struct sock *rose_alloc_sock(void) { struct sock *sk; rose_cb *rose; if ((sk = sk_alloc(PF_ROSE, GFP_ATOMIC, 1)) == NULL) return NULL; if ((rose = kmalloc(sizeof(*rose), GFP_ATOMIC)) == NULL) { sk_free(sk); return NULL; } MOD_INC_USE_COUNT; memset(rose, 0x00, sizeof(*rose)); sk->protinfo.rose = rose; rose->sk = sk; return sk; }
struct sock *alloc_atm_vcc_sk(int family) { struct sock *sk; struct atm_vcc *vcc; sk = sk_alloc(family, GFP_KERNEL, 1); if (!sk) return NULL; vcc = sk->protinfo.af_atm = kmalloc(sizeof(*vcc),GFP_KERNEL); if (!vcc) { sk_free(sk); return NULL; } sock_init_data(NULL,sk); sk->destruct = atm_free_sock; memset(vcc,0,sizeof(*vcc)); vcc->sk = sk; if (nodev_vccs) nodev_vccs->prev = vcc; vcc->prev = NULL; vcc->next = nodev_vccs; nodev_vccs = vcc; return sk; }
static int econet_create(struct socket *sock, int protocol) { struct sock *sk; int err; /* Econet only provides datagram services. */ if (sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; MOD_INC_USE_COUNT; err = -ENOBUFS; sk = sk_alloc(PF_ECONET, GFP_KERNEL, 1); if (sk == NULL) goto out; sk->reuse = 1; sock->ops = &econet_ops; sock_init_data(sock,sk); sk->protinfo.af_econet = kmalloc(sizeof(struct econet_opt), GFP_KERNEL); if (sk->protinfo.af_econet == NULL) goto out_free; memset(sk->protinfo.af_econet, 0, sizeof(struct econet_opt)); sk->zapped=0; sk->family = PF_ECONET; sk->num = protocol; sklist_insert_socket(&econet_sklist, sk); return(0); out_free: sk_free(sk); out: MOD_DEC_USE_COUNT; return err; }
static int netlink_create(struct socket *sock, int protocol) { struct sock *sk; struct netlink_opt *nlk; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol<0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; sock->ops = &netlink_ops; sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1, NULL); if (!sk) return -ENOMEM; sock_init_data(sock,sk); sk_set_owner(sk, THIS_MODULE); nlk = sk->sk_protinfo = kmalloc(sizeof(*nlk), GFP_KERNEL); if (!nlk) { sk_free(sk); return -ENOMEM; } memset(nlk, 0, sizeof(*nlk)); spin_lock_init(&nlk->cb_lock); init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; sk->sk_protocol = protocol; return 0; }
static int bnep_sock_create(struct socket *sock, int protocol) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &bnep_sock_ops; if (!(sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, 1))) return -ENOMEM; MOD_INC_USE_COUNT; sock->state = SS_UNCONNECTED; sock_init_data(sock, sk); sk->destruct = NULL; sk->protocol = protocol; return 0; }
static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, int prio) { struct sock *sk; if (!(sk = sk_alloc(PF_BLUETOOTH, prio, 1))) return NULL; bluez_sock_init(sock, sk); sk->zapped = 0; sk->destruct = l2cap_sock_destruct; sk->sndtimeo = L2CAP_CONN_TIMEOUT; sk->protocol = proto; sk->state = BT_OPEN; l2cap_sock_init_timer(sk); bluez_sock_link(&l2cap_sk_list, sk); MOD_INC_USE_COUNT; return sk; }
struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio) { struct sock *sk; sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto); if (!sk) return NULL; sock_init_data(sock, sk); INIT_LIST_HEAD(&bt_sk(sk)->accept_q); sk->sk_destruct = l2cap_sock_destruct; sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT); sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = BT_OPEN; setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk); bt_sock_link(&l2cap_sk_list, sk); return sk; }
struct sock *sk_clone(struct sock *sk, const gfp_t priority) { struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); if (newsk != NULL) { struct sk_filter *filter; memcpy(newsk, sk, sk->sk_prot->obj_size); /* SANITY */ sk_node_init(&newsk->sk_node); sock_lock_init(newsk); bh_lock_sock(newsk); atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_wmem_alloc, 0); atomic_set(&newsk->sk_omem_alloc, 0); skb_queue_head_init(&newsk->sk_receive_queue); skb_queue_head_init(&newsk->sk_write_queue); rwlock_init(&newsk->sk_dst_lock); rwlock_init(&newsk->sk_callback_lock); newsk->sk_dst_cache = NULL; newsk->sk_wmem_queued = 0; newsk->sk_forward_alloc = 0; newsk->sk_send_head = NULL; newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; sock_reset_flag(newsk, SOCK_DONE); skb_queue_head_init(&newsk->sk_error_queue); filter = newsk->sk_filter; if (filter != NULL) sk_filter_charge(newsk, filter); if (sk->sk_create_child) sk->sk_create_child(sk, newsk); if (unlikely(xfrm_sk_clone_policy(newsk))) { /* It is still raw copy of parent, so invalidate * destructor and make plain sk_free() */ newsk->sk_destruct = NULL; sk_free(newsk); newsk = NULL; goto out; } newsk->sk_err = 0; newsk->sk_priority = 0; atomic_set(&newsk->sk_refcnt, 2); /* * Increment the counter in the same struct proto as the master * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that * is the same as sk->sk_prot->socks, as this field was copied * with memcpy). * * This _changes_ the previous behaviour, where * tcp_create_openreq_child always was incrementing the * equivalent to tcp_prot->socks (inet_sock_nr), so this have * to be taken into account in all callers. -acme */ sk_refcnt_debug_inc(newsk); newsk->sk_socket = NULL; newsk->sk_sleep = NULL; if (newsk->sk_prot->sockets_allocated) atomic_inc(newsk->sk_prot->sockets_allocated); } out: return newsk; }
int clip6_set_vcc_netif(struct socket *sock,int number) { struct clip6_vcc *clip6_vcc; struct sock *sk = NULL; struct net_device *dev; struct atm_vcc *vcc = ATM_SD(sock); DPRINTK("clip6_set_vcc_netif 0x%08x\n", (unsigned int)vcc); if (vcc->push != clip6_push) { printk(KERN_WARNING "clip6_set_vcc_netif: non-CLIP VCC\n"); return -EBADF; } /* allocate a scapegoat sk and vcc */ if (sock->type == SOCK_STREAM) return -EINVAL; sk = sk_alloc(sock->sk->family, GFP_KERNEL, 1); if (!sk) return -ENOMEM; sock_init_data(NULL, sk); vcc = sk->protinfo.af_atm = kmalloc(sizeof(*vcc), GFP_KERNEL); if (!vcc) { sk_free(sk); return -ENOMEM; } memset(vcc, 0, sizeof(*vcc)); vcc->sk = sk; clip6_vcc = CLIP6_VCC(vcc); for (dev = clip6_devs; dev; dev = PRIV(dev)->next) { if (PRIV(dev)->number == number) { PRIV(dev)->vccs = clip6_vcc; clip6_vcc->dev = dev; if (vcc->dev) { /* copy MAC address */ /* TODO: This will cause address duplication in case loop back. To avoid this, dev_addr should include the number of interface, or such. */ dev->addr_len = ESI_LEN; memcpy(dev->dev_addr, vcc->dev->esi, dev->addr_len); } /* detach vcc from a soket */ sk->rcvbuf = vcc->sk->rcvbuf; sk->sndbuf = vcc->sk->sndbuf; PRIV(dev)->vcc = vcc; PRIV(dev)->vcc->sk = sk; *(&ATM_SD(sock)) = sk->protinfo.af_atm; sk->protinfo.af_atm->sk = sock->sk; /* TODO: ininialize lists, vcc->prev,next, nodev_vccs */ return 0; } } return -ENODEV; }
struct sock *__vsock_create(struct net *net, struct socket *sock, struct sock *parent, gfp_t priority, unsigned short type, int kern) { struct sock *sk; struct vsock_sock *psk; struct vsock_sock *vsk; sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern); if (!sk) return NULL; sock_init_data(sock, sk); /* sk->sk_type is normally set in sock_init_data, but only if sock is * non-NULL. We make sure that our sockets always have a type by * setting it here if needed. */ if (!sock) sk->sk_type = type; vsk = vsock_sk(sk); vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY); sk->sk_destruct = vsock_sk_destruct; sk->sk_backlog_rcv = vsock_queue_rcv_skb; sock_reset_flag(sk, SOCK_DONE); INIT_LIST_HEAD(&vsk->bound_table); INIT_LIST_HEAD(&vsk->connected_table); vsk->listener = NULL; INIT_LIST_HEAD(&vsk->pending_links); INIT_LIST_HEAD(&vsk->accept_queue); vsk->rejected = false; vsk->sent_request = false; vsk->ignore_connecting_rst = false; vsk->peer_shutdown = 0; psk = parent ? vsock_sk(parent) : NULL; if (parent) { vsk->trusted = psk->trusted; vsk->owner = get_cred(psk->owner); vsk->connect_timeout = psk->connect_timeout; } else { vsk->trusted = capable(CAP_NET_ADMIN); vsk->owner = get_current_cred(); vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT; } if (transport->init(vsk, psk) < 0) { sk_free(sk); return NULL; } if (sock) vsock_insert_unbound(vsk); return sk; }
static int pep_connreq_rcv(struct sock *sk, struct sk_buff *skb) { struct sock *newsk; struct pep_sock *newpn, *pn = pep_sk(sk); struct pnpipehdr *hdr; struct sockaddr_pn dst; u16 peer_type; u8 pipe_handle, enabled, n_sb; u8 aligned = 0; if (!pskb_pull(skb, sizeof(*hdr) + 4)) return -EINVAL; hdr = pnp_hdr(skb); pipe_handle = hdr->pipe_handle; switch (hdr->state_after_connect) { case PN_PIPE_DISABLE: enabled = 0; break; case PN_PIPE_ENABLE: enabled = 1; break; default: pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM); return -EINVAL; } peer_type = hdr->other_pep_type << 8; if (unlikely(sk->sk_state != TCP_LISTEN) || sk_acceptq_is_full(sk)) { pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE); return -ENOBUFS; } /* Parse sub-blocks (options) */ n_sb = hdr->data[4]; while (n_sb > 0) { u8 type, buf[1], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); if (data == NULL) return -EINVAL; switch (type) { case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: if (len < 1) return -EINVAL; peer_type = (peer_type & 0xff00) | data[0]; break; case PN_PIPE_SB_ALIGNED_DATA: aligned = data[0] != 0; break; } n_sb--; } skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return -ENOMEM; /* Create a new to-be-accepted sock */ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_ATOMIC, sk->sk_prot); if (!newsk) { kfree_skb(skb); return -ENOMEM; } sock_init_data(NULL, newsk); newsk->sk_state = TCP_SYN_RECV; newsk->sk_backlog_rcv = pipe_do_rcv; newsk->sk_protocol = sk->sk_protocol; newsk->sk_destruct = pipe_destruct; newpn = pep_sk(newsk); pn_skb_get_dst_sockaddr(skb, &dst); newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); newpn->pn_sk.resource = pn->pn_sk.resource; skb_queue_head_init(&newpn->ctrlreq_queue); newpn->pipe_handle = pipe_handle; atomic_set(&newpn->tx_credits, 0); newpn->peer_type = peer_type; newpn->rx_credits = 0; newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; newpn->init_enable = enabled; newpn->aligned = aligned; BUG_ON(!skb_queue_empty(&newsk->sk_receive_queue)); skb_queue_head(&newsk->sk_receive_queue, skb); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); sk_acceptq_added(sk); sk_add_node(newsk, &pn->ackq); return 0; }
static int kni_vhost_backend_init(struct kni_dev *kni) { struct kni_vhost_queue *q; struct net *net = current->nsproxy->net_ns; int err, i, sockfd; struct rte_kni_fifo *fifo; struct sk_buff *elem; if (kni->vhost_queue != NULL) return -1; if (!(q = (struct kni_vhost_queue *)sk_alloc( net, AF_UNSPEC, GFP_KERNEL, &kni_raw_proto))) return -ENOMEM; err = sock_create_lite(AF_UNSPEC, SOCK_RAW, IPPROTO_RAW, &q->sock); if (err) goto free_sk; sockfd = kni_sock_map_fd(q->sock); if (sockfd < 0) { err = sockfd; goto free_sock; } /* cache init */ q->cache = (struct sk_buff*) kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(struct sk_buff), GFP_KERNEL); if (!q->cache) goto free_fd; fifo = (struct rte_kni_fifo*) kzalloc(RTE_KNI_VHOST_MAX_CACHE_SIZE * sizeof(void *) + sizeof(struct rte_kni_fifo), GFP_KERNEL); if (!fifo) goto free_cache; kni_fifo_init(fifo, RTE_KNI_VHOST_MAX_CACHE_SIZE); for (i = 0; i < RTE_KNI_VHOST_MAX_CACHE_SIZE; i++) { elem = &q->cache[i]; kni_fifo_put(fifo, (void**)&elem, 1); } q->fifo = fifo; /* store sockfd in vhost_queue */ q->sockfd = sockfd; /* init socket */ q->sock->type = SOCK_RAW; q->sock->state = SS_CONNECTED; q->sock->ops = &kni_socket_ops; sock_init_data(q->sock, &q->sk); /* init sock data */ q->sk.sk_write_space = kni_sk_write_space; q->sk.sk_destruct = kni_sk_destruct; q->flags = IFF_NO_PI | IFF_TAP; q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); #ifdef RTE_KNI_VHOST_VNET_HDR_EN q->flags |= IFF_VNET_HDR; #endif /* bind kni_dev with vhost_queue */ q->kni = kni; kni->vhost_queue = q; wmb(); kni->vq_status = BE_START; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) KNI_DBG("backend init sockfd=%d, sock->wq=0x%16llx," "sk->sk_wq=0x%16llx", q->sockfd, (uint64_t)q->sock->wq, (uint64_t)q->sk.sk_wq); #else KNI_DBG("backend init sockfd=%d, sock->wait at 0x%16llx," "sk->sk_sleep=0x%16llx", q->sockfd, (uint64_t)&q->sock->wait, (uint64_t)q->sk.sk_sleep); #endif return 0; free_cache: kfree(q->cache); q->cache = NULL; free_fd: put_unused_fd(sockfd); free_sock: q->kni = NULL; kni->vhost_queue = NULL; kni->vq_status |= BE_FINISH; sock_release(q->sock); q->sock->ops = NULL; q->sock = NULL; free_sk: sk_free((struct sock*)q); return err; }
static int packet_create(struct socket *sock, int protocol) { struct sock *sk; struct packet_sock *po; int err; if (!capable(CAP_NET_RAW)) return -EPERM; if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW #ifdef CONFIG_SOCK_PACKET && sock->type != SOCK_PACKET #endif ) return -ESOCKTNOSUPPORT; sock->state = SS_UNCONNECTED; err = -ENOBUFS; sk = sk_alloc(PF_PACKET, GFP_KERNEL, &packet_proto, 1); if (sk == NULL) goto out; sock->ops = &packet_ops; #ifdef CONFIG_SOCK_PACKET if (sock->type == SOCK_PACKET) sock->ops = &packet_ops_spkt; #endif sock_init_data(sock, sk); po = pkt_sk(sk); sk->sk_family = PF_PACKET; po->num = protocol; sk->sk_destruct = packet_sock_destruct; atomic_inc(&packet_socks_nr); /* * Attach a protocol block */ spin_lock_init(&po->bind_lock); po->prot_hook.func = packet_rcv; #ifdef CONFIG_SOCK_PACKET if (sock->type == SOCK_PACKET) po->prot_hook.func = packet_rcv_spkt; #endif po->prot_hook.af_packet_priv = sk; if (protocol) { po->prot_hook.type = protocol; dev_add_pack(&po->prot_hook); sock_hold(sk); po->running = 1; } write_lock_bh(&packet_sklist_lock); sk_add_node(sk, &packet_sklist); write_unlock_bh(&packet_sklist_lock); return(0); out: return err; }
static int inet6_create(struct socket *sock, int protocol) { struct sock *sk; struct proto *prot; sk = sk_alloc(PF_INET6, GFP_KERNEL, 1); if (sk == NULL) goto do_oom; if(sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET) { if (protocol && protocol != IPPROTO_TCP) goto free_and_noproto; protocol = IPPROTO_TCP; prot = &tcpv6_prot; sock->ops = &inet6_stream_ops; } else if(sock->type == SOCK_DGRAM) { if (protocol && protocol != IPPROTO_UDP) goto free_and_noproto; protocol = IPPROTO_UDP; sk->no_check = UDP_CSUM_DEFAULT; prot=&udpv6_prot; sock->ops = &inet6_dgram_ops; } else if(sock->type == SOCK_RAW) { if (!capable(CAP_NET_RAW)) goto free_and_badperm; if (!protocol) goto free_and_noproto; prot = &rawv6_prot; sock->ops = &inet6_dgram_ops; sk->reuse = 1; sk->num = protocol; } else { goto free_and_badtype; } sock_init_data(sock, sk); sk->destruct = inet6_sock_destruct; sk->zapped = 0; sk->family = PF_INET6; sk->protocol = protocol; sk->prot = prot; sk->backlog_rcv = prot->backlog_rcv; sk->net_pinfo.af_inet6.hop_limit = -1; sk->net_pinfo.af_inet6.mcast_hops = -1; sk->net_pinfo.af_inet6.mc_loop = 1; sk->net_pinfo.af_inet6.pmtudisc = IPV6_PMTUDISC_WANT; /* Init the ipv4 part of the socket since we can have sockets * using v6 API for ipv4. */ sk->protinfo.af_inet.ttl = 64; sk->protinfo.af_inet.mc_loop = 1; sk->protinfo.af_inet.mc_ttl = 1; sk->protinfo.af_inet.mc_index = 0; sk->protinfo.af_inet.mc_list = NULL; if (ipv4_config.no_pmtu_disc) sk->protinfo.af_inet.pmtudisc = IP_PMTUDISC_DONT; else sk->protinfo.af_inet.pmtudisc = IP_PMTUDISC_WANT; #ifdef INET_REFCNT_DEBUG atomic_inc(&inet6_sock_nr); atomic_inc(&inet_sock_nr); #endif MOD_INC_USE_COUNT; if (sk->type==SOCK_RAW && protocol==IPPROTO_RAW) sk->protinfo.af_inet.hdrincl=1; if (sk->num) { /* It assumes that any protocol which allows * the user to assign a number at socket * creation time automatically shares. */ sk->sport = ntohs(sk->num); sk->prot->hash(sk); } if (sk->prot->init) { int err = sk->prot->init(sk); if (err != 0) { MOD_DEC_USE_COUNT; inet_sock_release(sk); return(err); } } return(0); free_and_badtype: sk_free(sk); return -ESOCKTNOSUPPORT; free_and_badperm: sk_free(sk); return -EPERM; free_and_noproto: sk_free(sk); return -EPROTONOSUPPORT; do_oom: return -ENOBUFS; }
static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp) { struct pep_sock *pn = pep_sk(sk), *newpn; struct sock *newsk = NULL; struct sk_buff *skb; struct pnpipehdr *hdr; struct sockaddr_pn dst, src; int err; u16 peer_type; u8 pipe_handle, enabled, n_sb; u8 aligned = 0; skb = skb_recv_datagram(sk, 0, flags & O_NONBLOCK, errp); if (!skb) return NULL; lock_sock(sk); if (sk->sk_state != TCP_LISTEN) { err = -EINVAL; goto drop; } sk_acceptq_removed(sk); err = -EPROTO; if (!pskb_may_pull(skb, sizeof(*hdr) + 4)) goto drop; hdr = pnp_hdr(skb); pipe_handle = hdr->pipe_handle; switch (hdr->state_after_connect) { case PN_PIPE_DISABLE: enabled = 0; break; case PN_PIPE_ENABLE: enabled = 1; break; default: pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM, GFP_KERNEL); goto drop; } peer_type = hdr->other_pep_type << 8; /* Parse sub-blocks (options) */ n_sb = hdr->data[4]; while (n_sb > 0) { u8 type, buf[1], len = sizeof(buf); const u8 *data = pep_get_sb(skb, &type, &len, buf); if (data == NULL) goto drop; switch (type) { case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE: if (len < 1) goto drop; peer_type = (peer_type & 0xff00) | data[0]; break; case PN_PIPE_SB_ALIGNED_DATA: aligned = data[0] != 0; break; } n_sb--; } /* Check for duplicate pipe handle */ newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle); if (unlikely(newsk)) { __sock_put(newsk); newsk = NULL; pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL); goto drop; } /* Create a new to-be-accepted sock */ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot); if (!newsk) { pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL); err = -ENOBUFS; goto drop; } sock_init_data(NULL, newsk); newsk->sk_state = TCP_SYN_RECV; newsk->sk_backlog_rcv = pipe_do_rcv; newsk->sk_protocol = sk->sk_protocol; newsk->sk_destruct = pipe_destruct; newpn = pep_sk(newsk); pn_skb_get_dst_sockaddr(skb, &dst); pn_skb_get_src_sockaddr(skb, &src); newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst); newpn->pn_sk.dobject = pn_sockaddr_get_object(&src); newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst); sock_hold(sk); newpn->listener = sk; skb_queue_head_init(&newpn->ctrlreq_queue); newpn->pipe_handle = pipe_handle; atomic_set(&newpn->tx_credits, 0); newpn->ifindex = 0; newpn->peer_type = peer_type; newpn->rx_credits = 0; newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL; newpn->init_enable = enabled; newpn->aligned = aligned; err = pep_accept_conn(newsk, skb); if (err) { sock_put(newsk); newsk = NULL; goto drop; } sk_add_node(newsk, &pn->hlist); drop: release_sock(sk); kfree_skb(skb); *errp = err; return newsk; }
static int tipc_create(struct net *net, struct socket *sock, int protocol, int kern) { const struct proto_ops *ops; socket_state state; struct sock *sk; struct tipc_port *tp_ptr; /* Validate arguments */ if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; switch (sock->type) { case SOCK_STREAM: ops = &stream_ops; state = SS_UNCONNECTED; break; case SOCK_SEQPACKET: ops = &packet_ops; state = SS_UNCONNECTED; break; case SOCK_DGRAM: case SOCK_RDM: ops = &msg_ops; state = SS_READY; break; default: return -EPROTOTYPE; } /* Allocate socket's protocol area */ sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); if (sk == NULL) return -ENOMEM; /* Allocate TIPC port for socket to use */ tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!tp_ptr)) { sk_free(sk); return -ENOMEM; } /* Finish initializing socket data structures */ sock->ops = ops; sock->state = state; sock_init_data(sock, sk); sk->sk_backlog_rcv = backlog_rcv; tipc_sk(sk)->p = tp_ptr; tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; spin_unlock_bh(tp_ptr->lock); if (sock->state == SS_READY) { tipc_set_portunreturnable(tp_ptr->ref, 1); if (sock->type == SOCK_DGRAM) tipc_set_portunreliable(tp_ptr->ref, 1); } return 0; }