static int hidp_sock_create(struct socket *sock, int protocol) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, &hidp_proto, 1); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &hidp_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; }
struct sk_filter * pfq_alloc_sk_filter(struct sock_fprog *fprog) { struct sock sk; int rv; sock_init_data(NULL, &sk); sk.sk_filter = NULL; atomic_set(&sk.sk_omem_alloc, 0); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) sock_reset_flag(&sk, SOCK_FILTER_LOCKED); #endif pr_devel("[PFQ] BPF: new fprog (len %d)\n", fprog->len); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,8) && LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) if ((rv = __sk_attach_filter(fprog, &sk, sock_owned_by_user(&sk)))) #else if ((rv = sk_attach_filter(fprog, &sk))) #endif { pr_devel("[PFQ] BPF: sk_attach_filter error: (%d)!\n", rv); return NULL; } return sk.sk_filter; }
static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sock->ops = &cmtp_sock_ops; sock->state = SS_UNCONNECTED; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = protocol; sk->sk_state = BT_OPEN; return 0; }
int vcc_create(struct socket *sock, int protocol, int family) { struct sock *sk; struct atm_vcc *vcc; sock->sk = NULL; if (sock->type == SOCK_STREAM) return -EINVAL; sk = sk_alloc(family, GFP_KERNEL, &vcc_proto, 1); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_state_change = vcc_def_wakeup; sk->sk_write_space = vcc_write_space; vcc = atm_sk(sk); vcc->dev = NULL; memset(&vcc->local,0,sizeof(struct sockaddr_atmsvc)); memset(&vcc->remote,0,sizeof(struct sockaddr_atmsvc)); vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */ atomic_set(&sk->sk_wmem_alloc, 0); atomic_set(&sk->sk_rmem_alloc, 0); vcc->push = NULL; vcc->pop = NULL; vcc->push_oam = NULL; vcc->vpi = vcc->vci = 0; /* no VCI/VPI yet */ vcc->atm_options = vcc->aal_options = 0; sk->sk_destruct = vcc_sock_destruct; return 0; }
/** * tipc_create - create a TIPC socket * @sock: pre-allocated socket structure * @protocol: protocol indicator (must be 0) * * This routine creates and attaches a 'struct sock' to the 'struct socket', * then create and attaches a TIPC port to the 'struct sock' part. * * Returns 0 on success, errno otherwise */ static int tipc_create(struct socket *sock, int protocol) { struct tipc_sock *tsock; struct tipc_port *port; struct sock *sk; u32 ref; if (unlikely(protocol != 0)) return -EPROTONOSUPPORT; ref = tipc_createport_raw(NULL, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE); if (unlikely(!ref)) return -ENOMEM; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &stream_ops; break; case SOCK_SEQPACKET: sock->ops = &packet_ops; break; case SOCK_DGRAM: tipc_set_portunreliable(ref, 1); /* fall through */ case SOCK_RDM: tipc_set_portunreturnable(ref, 1); sock->ops = &msg_ops; sock->state = SS_READY; break; default: tipc_deleteport(ref); return -EPROTOTYPE; } sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1); if (!sk) { tipc_deleteport(ref); return -ENOMEM; } sock_init_data(sock, sk); init_waitqueue_head(sk->sk_sleep); sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */ tsock = tipc_sk(sk); port = tipc_get_port(ref); tsock->p = port; port->usr_handle = tsock; init_MUTEX(&tsock->sem); dbg("sock_create: %x\n",tsock); atomic_inc(&tipc_user_count); return 0; }
static int netlink_create(struct socket *sock, int protocol) { struct sock *sk; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol<0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; sock->ops = &netlink_ops; sk = sk_alloc(PF_NETLINK, GFP_KERNEL, 1); if (!sk) return -ENOMEM; sock_init_data(sock,sk); sk->protinfo.af_netlink = kmalloc(sizeof(struct netlink_opt), GFP_KERNEL); if (sk->protinfo.af_netlink == NULL) { sk_free(sk); return -ENOMEM; } memset(sk->protinfo.af_netlink, 0, sizeof(struct netlink_opt)); spin_lock_init(&sk->protinfo.af_netlink->cb_lock); init_waitqueue_head(&sk->protinfo.af_netlink->wait); sk->destruct = netlink_sock_destruct; atomic_inc(&netlink_sock_nr); sk->protocol=protocol; return 0; }
static int rawsock_create(struct net *net, struct socket *sock, const struct nfc_protocol *nfc_proto) { struct sock *sk; nfc_dbg("sock=%p", sock); if (sock->type != SOCK_SEQPACKET) return -ESOCKTNOSUPPORT; sock->ops = &rawsock_ops; sk = sk_alloc(net, PF_NFC, GFP_KERNEL, nfc_proto->proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); sk->sk_protocol = nfc_proto->id; sk->sk_destruct = rawsock_destruct; sock->state = SS_UNCONNECTED; INIT_WORK(&nfc_rawsock(sk)->tx_work, rawsock_tx_work); nfc_rawsock(sk)->tx_work_scheduled = false; return 0; }
static int mpls_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (net != &init_net) return -EAFNOSUPPORT; sock->state = SS_UNCONNECTED; sock->ops = &mpls_sk_ops; sk = sk_alloc(net, PF_INET, GFP_KERNEL, &mpls_proto); if (!sk) return -1; sock_init_data(sock, sk); sk->sk_destruct = mpls_sock_destruct; sk->sk_family = PF_MPLS; sk->sk_protocol = 0; sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; sock_reset_flag(sk, SOCK_ZAPPED); return 0; }
static int hci_sock_create(struct socket *sock, int protocol) { struct sock *sk; BT_DBG("sock %p", sock); if (sock->type != SOCK_RAW) return -ESOCKTNOSUPPORT; sock->ops = &hci_sock_ops; if (!(sk = sk_alloc(PF_BLUETOOTH, GFP_KERNEL, 1))) return -ENOMEM; sock->state = SS_UNCONNECTED; sock_init_data(sock, sk); memset(&sk->protinfo, 0, sizeof(struct hci_pinfo)); sk->destruct = NULL; sk->protocol = protocol; sk->state = BT_OPEN; bluez_sock_link(&hci_sk_list, sk); MOD_INC_USE_COUNT; return 0; }