/* Release the reference on the referenced socket. */ void clean_socketport (void *arg) { struct sock_user *const user = arg; __mutex_lock (&global_lock); sock_release (user->sock); __mutex_unlock (&global_lock); }
/* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT */ int afs_open_socket(struct afs_net *net) { struct sockaddr_rxrpc srx; struct socket *socket; unsigned int min_level; int ret; _enter(""); ret = sock_create_kern(net->net, AF_RXRPC, SOCK_DGRAM, PF_INET6, &socket); if (ret < 0) goto error_1; socket->sk->sk_allocation = GFP_NOFS; /* bind the callback manager's address to make this a server socket */ memset(&srx, 0, sizeof(srx)); srx.srx_family = AF_RXRPC; srx.srx_service = CM_SERVICE; srx.transport_type = SOCK_DGRAM; srx.transport_len = sizeof(srx.transport.sin6); srx.transport.sin6.sin6_family = AF_INET6; srx.transport.sin6.sin6_port = htons(AFS_CM_PORT); min_level = RXRPC_SECURITY_ENCRYPT; ret = kernel_setsockopt(socket, SOL_RXRPC, RXRPC_MIN_SECURITY_LEVEL, (void *)&min_level, sizeof(min_level)); if (ret < 0) goto error_2; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); if (ret == -EADDRINUSE) { srx.transport.sin6.sin6_port = 0; ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx)); } if (ret < 0) goto error_2; rxrpc_kernel_new_call_notification(socket, afs_rx_new_call, afs_rx_discard_new_call); ret = kernel_listen(socket, INT_MAX); if (ret < 0) goto error_2; net->socket = socket; afs_charge_preallocation(&net->charge_preallocation_work); _leave(" = 0"); return 0; error_2: sock_release(socket); error_1: _leave(" = %d", ret); return ret; }
u8 rtl_btcoex_create_kernel_socket(struct rtl_priv *rtlpriv, u8 is_invite) { s8 kernel_socket_err; struct bt_coex_info *pcoex_info = &rtlpriv->coex_info; BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"%s CONNECT_PORT %d\n",__func__,CONNECT_PORT); if(NULL == pcoex_info) { BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"coex_info: NULL\n"); return _FAIL; } kernel_socket_err = sock_create(PF_INET, SOCK_DGRAM, 0, &pcoex_info->udpsock); BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"binding socket,err= %d\n",kernel_socket_err); if (kernel_socket_err<0) { BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"Error during creation of socket error:%d\n",kernel_socket_err); return _FAIL; } else { memset(&(pcoex_info->sin), 0, sizeof(pcoex_info->sin)); pcoex_info->sin.sin_family = AF_INET; pcoex_info->sin.sin_port = htons(CONNECT_PORT); pcoex_info->sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); memset(&(pcoex_info->bt_addr), 0, sizeof(pcoex_info->bt_addr)); pcoex_info->bt_addr.sin_family = AF_INET; pcoex_info->bt_addr.sin_port = htons(CONNECT_PORT_BT); pcoex_info->bt_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); pcoex_info->sk_store = NULL; kernel_socket_err = pcoex_info->udpsock->ops->bind(pcoex_info->udpsock,(struct sockaddr *)&pcoex_info->sin,sizeof(pcoex_info->sin)); if(kernel_socket_err == 0){ BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"binding socket success\n"); pcoex_info->udpsock->sk->sk_data_ready = rtl_btcoex_recvmsg_int; pcoex_info->sock_open |= KERNEL_SOCKET_OK; pcoex_info->BT_attend = false; BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"WIFI sending attend_req\n"); rtl_btcoex_sendmsgbysocket(rtlpriv,attend_req,sizeof(attend_req),true); return _SUCCESS; } else { pcoex_info->BT_attend = false; sock_release(pcoex_info->udpsock); BTC_PRINT(BTC_MSG_SOCKET, SOCKET_CRITICAL,"Error binding socket: %d\n",kernel_socket_err); return _FAIL; } } }
static int restore_unix_rqueue(struct sock *sk, struct cpt_sock_image *si, loff_t pos, struct cpt_context *ctx) { loff_t endpos; pos = pos + si->cpt_hdrlen; endpos = pos + si->cpt_next; while (pos < endpos) { struct sk_buff *skb; struct sock *owner_sk; __u32 owner; skb = rst_skb(sk, &pos, &owner, NULL, ctx); if (IS_ERR(skb)) { if (PTR_ERR(skb) == -EINVAL) { int err; err = rst_sock_attr(&pos, sk, ctx); if (err) return err; } return PTR_ERR(skb); } owner_sk = unix_peer(sk); if (owner != -1) { cpt_object_t *pobj; pobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, owner, ctx); if (pobj == NULL) { eprintk_ctx("orphan af_unix skb?\n"); kfree_skb(skb); continue; } owner_sk = pobj->o_obj; } if (owner_sk == NULL) { dprintk_ctx("orphan af_unix skb 2?\n"); kfree_skb(skb); continue; } skb_set_owner_w(skb, owner_sk); if (UNIXCB(skb).fp) skb->destructor = unix_destruct_fds; skb_queue_tail(&sk->sk_receive_queue, skb); if (sk->sk_state == TCP_LISTEN) { struct socket *sock = skb->sk->sk_socket; if (sock == NULL) BUG(); if (sock->file) BUG(); skb->sk->sk_socket = NULL; skb->sk->sk_sleep = NULL; sock->sk = NULL; sock_release(sock); } } return 0; }
static int rds_tcp_accept_one(struct socket *sock) { struct socket *new_sock = NULL; struct rds_connection *conn; int ret; struct inet_sock *inet; ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type, sock->sk->sk_protocol, &new_sock); if (ret) goto out; new_sock->type = sock->type; new_sock->ops = sock->ops; ret = sock->ops->accept(sock, new_sock, O_NONBLOCK); if (ret < 0) goto out; rds_tcp_tune(new_sock); inet = inet_sk(new_sock->sk); rdsdebug("accepted tcp %pI4:%u -> %pI4:%u\n", &inet->inet_saddr, ntohs(inet->inet_sport), &inet->inet_daddr, ntohs(inet->inet_dport)); conn = rds_conn_create(inet->inet_saddr, inet->inet_daddr, &rds_tcp_transport, GFP_KERNEL); if (IS_ERR(conn)) { ret = PTR_ERR(conn); goto out; } /* * see the comment above rds_queue_delayed_reconnect() */ if (!rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING)) { if (rds_conn_state(conn) == RDS_CONN_UP) rds_tcp_stats_inc(s_tcp_listen_closed_stale); else rds_tcp_stats_inc(s_tcp_connect_raced); rds_conn_drop(conn); ret = 0; goto out; } rds_tcp_set_callbacks(new_sock, conn); rds_connect_complete(conn); new_sock = NULL; ret = 0; out: if (new_sock) sock_release(new_sock); return ret; }
static void __exit netlink_exit(void) { if(nl_sk != NULL) { sock_release(nl_sk->sk_socket); } printk("my netlink: self module exited\n"); }
static void __exit hello_exit(void) { /* * sock_release (struct socket*) */ if (sp != NULL) sock_release(sp->sk_socket); printk(KERN_ALERT "exit!\n"); }
void nlswitch_exit(void) { /* * sock_release (struct socket*) */ if (sp != NULL) sock_release(sp->sk_socket); printk(KERN_ALERT "[i-keylog] nlswitch exit\n"); }
/*Clear the module*/ static void __exit ip6_analysisi_exit(void) { //release netlink socket if(nl_sk != NULL){ sock_release(nl_sk->sk_socket); } //unregister nf_unregister_hook(&nf_in_analysis); PRINT("IPV6 packets receive and analysis module exit.\n"); }
int rdma_exit(rdma_ctx_t ctx) { CHECK_MSG_RET(ctx->sock != 0, "Error releasing socket", -1); sock_release(ctx->sock); memset(ctx, 0, sizeof(struct rdma_ctx)); kfree(ctx); return 0; }
int ks_netlink_modinit(void) { int err; skb_queue_head_init(&ks_backlog); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ks_netlink_rcv_wq = create_singlethread_workqueue("ksnl"); if (!ks_netlink_rcv_wq) { err = -ENOMEM; goto err_create_workqueue; } #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ksnl = netlink_kernel_create(NETLINK_KSTREAMER, ks_netlink_rcv); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ksnl = netlink_kernel_create(NETLINK_KSTREAMER, 0, ks_netlink_rcv, THIS_MODULE); #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ksnl = netlink_kernel_create(NETLINK_KSTREAMER, 0, ks_netlink_rcv, NULL, THIS_MODULE); #else ksnl = netlink_kernel_create(&init_net, NETLINK_KSTREAMER, 0, ks_netlink_rcv, NULL, THIS_MODULE); #endif if (!ksnl) { err = -ENOMEM; goto err_netlink_kernel_create; } netlink_set_nonroot(NETLINK_KSTREAMER, NL_NONROOT_RECV); ks_netlink_state.mcast_seqnum = 0xBEEF; init_rwsem(&ks_netlink_state.topology_lock); init_timer(&ks_netlink_state.lock_timer); ks_netlink_state.lock_timer.function = ks_lock_timeout; ks_netlink_state.lock_timer.data = (unsigned long)&ks_netlink_state; init_waitqueue_head(&ks_netlink_state.lock_sleep); skb_queue_head_init(&ks_netlink_state.mcast_queue); return 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) destroy_workqueue(ks_netlink_rcv_wq); err_create_workqueue: #endif sock_release(ksnl->sk_socket); err_netlink_kernel_create: return err; }
/*---------------------------------------------------------------- * p80211indicate_shutdown * * Called during the p80211 unload to get rid of our netlink * interface. * * Arguments: * none * * Returns: * nothing * * Call context: * Any ----------------------------------------------------------------*/ void p80211indicate_shutdown(void) { struct sock *nl; DBFENTER; nl = nl_indicate; nl_indicate = NULL; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) if ( nl != NULL && nl->socket != NULL) { sock_release(nl->socket); } #else if ( nl != NULL && nl->sk_socket != NULL) { sock_release(nl->sk_socket); } #endif DBFEXIT; return; }
static int inet6_close(struct sock *sk) { assert(sk); assert(sk->p_ops != NULL); if (sk->p_ops->close == NULL) { sock_release(sk); return 0; } return sk->p_ops->close(sk); }
void netlink_detach(int unit) { struct socket *sock; write_lock_bh(&nl_emu_lock); sock = netlink_kernel[unit]; netlink_kernel[unit] = NULL; write_unlock_bh(&nl_emu_lock); sock_release(sock); }
int sock_create_udpserver(struct socket **sock, const char *server_ipaddr, const int server_port) { if( -1 == sock_create_udp(sock) ) return -1; if(-1 == sock_make_udpserver(*sock, server_ipaddr, server_port) ) { sock_release(*sock); return -1; } return 0; }
static int run_network(void *data) { struct msghdr msg; struct iovec iov; mm_segment_t oldfs; char buffer[0x200];// = "Hello"; int cc; struct socket *csock = data; struct nm_packet_rp *reply; printk(KERN_INFO "NetMalloc: creating client thread\n"); while (network_is_running) { memset(&msg, 0, sizeof(msg)); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_flags = MSG_DONTWAIT; msg.msg_iov->iov_len = sizeof(buffer); msg.msg_iov->iov_base = buffer; oldfs = get_fs(); set_fs(KERNEL_DS); cc = sock_recvmsg(csock, &msg, sizeof(buffer), MSG_DONTWAIT); set_fs(oldfs); if (!cc) break; else if (cc == -EWOULDBLOCK) schedule_timeout_interruptible(125); else if (cc > 0) { printk(KERN_INFO "%d bytes received\n", cc); reply = handle_packet((struct nm_packet_rq *) buffer, cc); if (reply) { cc = sizeof(struct nm_packet_rp) + reply->data_len; memset(&msg, 0, sizeof(msg)); msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_flags = MSG_DONTWAIT; msg.msg_iov->iov_len = cc; msg.msg_iov->iov_base = reply; oldfs = get_fs(); set_fs(KERNEL_DS); cc = sock_sendmsg(csock, &msg, cc); set_fs(oldfs); printk(KERN_INFO "%d bytes sent\n", cc); kfree(reply); } } } sock_release(csock); printk(KERN_INFO "NetMalloc: closing client thread\n"); return 0; }
static void __exit netchar_exit(void) { device_destroy(nc_class, nc_dev_t); cdev_del(nc_cdev); class_destroy(nc_class); unregister_chrdev_region(nc_dev_t, 1); nc_socket->ops->shutdown(nc_socket, 0); sock_release(nc_socket); _PKI("exit"); }
int spectral_destroy_netlink(struct ath_softc *sc) { struct ath_spectral *spectral=sc->sc_spectral; spectral->spectral_sock = NULL; if (atomic_dec_and_test(&spectral_nl_users)) { sock_release(spectral_nl_sock->sk_socket); spectral_nl_sock = NULL; } return 0; }
static void mnlk_fini(void) { if(nl_sk) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) netlink_kernel_release(nl_sk); #else sock_release(nl_sk->sk_socket); #endif } }
/** * xs_udp_connect_worker - set up a UDP socket * @args: RPC transport to connect * * Invoked by a work queue tasklet. */ static void xs_udp_connect_worker(void *args) { struct rpc_xprt *xprt = (struct rpc_xprt *) args; struct socket *sock = xprt->sock; int err, status = -EIO; if (xprt->shutdown || xprt->addr.sin_port == 0) goto out; dprintk("RPC: xs_udp_connect_worker for xprt %p\n", xprt); /* Start by resetting any existing state */ xs_close(xprt); if ((err = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock)) < 0) { dprintk("RPC: can't create UDP transport socket (%d).\n", -err); goto out; } xs_reclassify_socket(sock); if (xs_bind(xprt, sock)) { sock_release(sock); goto out; } if (!xprt->inet) { struct sock *sk = sock->sk; write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = xprt; xprt->old_data_ready = sk->sk_data_ready; xprt->old_state_change = sk->sk_state_change; xprt->old_write_space = sk->sk_write_space; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; sk->sk_no_check = UDP_CSUM_NORCV; sk->sk_allocation = GFP_ATOMIC; xprt_set_connected(xprt); /* Reset to new socket */ xprt->sock = sock; xprt->inet = sk; write_unlock_bh(&sk->sk_callback_lock); } xs_udp_do_set_buffer_size(xprt); status = 0; out: xprt_wake_pending_tasks(xprt, status); xprt_clear_connecting(xprt); }
void __exit turbotap_module_end(void) { unsigned int i; for_each_turbo_sock(i) { if (turbotap_interfaces.turbotap_sf[i]->turbotap_sock) sock_release(turbotap_interfaces.turbotap_sf[i]->turbotap_sock); } misc_deregister(&turbotap_tun_miscdev); printk(KERN_INFO "turbotap module exit\n"); }
struct sock *sock_create(int family, int type, int protocol) { int ret; struct sock *new_sk; const struct net_family *nfamily; const struct net_family_type *nftype; const struct net_sock *nsock; nfamily = net_family_lookup(family); if (nfamily == NULL) { return err_ptr(EAFNOSUPPORT); } nftype = net_family_type_lookup(nfamily, type); if (nftype == NULL) { return err_ptr(EPROTOTYPE); } nsock = net_sock_lookup(family, type, protocol); if (nsock == NULL) { return err_ptr(EPROTONOSUPPORT); } new_sk = sock_alloc(nftype->ops, nsock->ops); if (new_sk == NULL) { return err_ptr(ENOMEM); } sock_init(new_sk, family, type, nsock->protocol, nftype->ops, nsock->ops, nfamily->out_ops != NULL ? *nfamily->out_ops : NULL); assert(new_sk->f_ops != NULL); ret = new_sk->f_ops->init(new_sk); if (ret != 0) { sock_release(new_sk); return err_ptr(-ret); } assert(new_sk->p_ops != NULL); if (new_sk->p_ops->init != NULL) { ret = new_sk->p_ops->init(new_sk); if (ret != 0) { sock_close(new_sk); return err_ptr(-ret); } } sock_hash(new_sk); return new_sk; }
void StopListening(void) { struct socket *sock; EnterFunction("StopListening"); if (MainSocket==NULL) return; sock=MainSocket; MainSocket = NULL; sock_release(sock); LeaveFunction("StopListening"); }
void tfw_close_listen_sockets(void) { down_read(&tfw_cfg.mtx); TFW_LOG("Close %u listening sockets\n", listen_socks_n); while (listen_socks_n) sock_release(protos[--listen_socks_n].listener); kfree(protos); up_read(&tfw_cfg.mtx); }
/**--------------------------------------------------------------------------- * Close an existing upcall socket. * * @param[in] info Pointer to the upcall information structure. *---------------------------------------------------------------------------*/ static void vqec_dp_ipcserver_close_upcall_sock (vqec_dp_upcall_info_t *upcall_info) { if (upcall_info->sock) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) upcall_info->sock->ops->shutdown(upcall_info->sock, 0); #else kernel_sock_shutdown(upcall_info->sock, SHUT_RDWR); #endif sock_release(upcall_info->sock); upcall_info->sock = NULL; } }
void channel_exit(void) { printk("channel_exit\n"); nf_unregister_hook(&nfho); if(nl_sk != NULL){ sock_release(nl_sk->sk_socket); } printk("netlink remove ok \n"); }
void sockfd_release(int sockfd) { /* Get the socket structure for this sockfd */ FAR struct socket *psock = sockfd_socket(sockfd); /* Get the socket structure for this sockfd */ if (psock) { sock_release(psock); } }
int sock_close(struct sock *sk) { if (sk == NULL) { return -EINVAL; } assert(sk->f_ops != NULL); if (sk->f_ops->close == NULL) { sock_release(sk); return 0; } return sk->f_ops->close(sk); }
/* * close the RxRPC socket AFS was using */ void afs_close_socket(void) { _enter(""); sock_release(afs_socket); _debug("dework"); destroy_workqueue(afs_async_calls); ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0); ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0); _leave(""); }
//TODO: ? int kclose(ksocket_t socket) { struct socket *sk; int ret; sk = (struct socket *)socket; ret = sk->ops->release(sk); if (sk) sock_release(sk); return ret; }