//start_listening void moon_listen(void) { //For test purposes moonraker_socket_t *listen; listen = kmalloc(sizeof(moonraker_socket_t),GFP_KERNEL); listen->proto = kmalloc(sizeof(moonraker_proto_t),GFP_KERNEL); listen->proto->name = kmalloc(5,GFP_KERNEL); listen->ip = 2130706433; listen->port = 80; listen->proto->defer_accept=0; listen->keepalive_timeout=0; listen->ack_pingpong=1; listen->max_backlog=2048; listen->defer_accept=1; strcpy(listen->proto->name,"http"); //end for test purpose struct sockaddr_in sin; struct socket *sock = NULL; struct sock *sk; struct tcp_sock *tp; struct inet_connection_sock *icsk; moonraker_proto_t *proto = listen->proto; u16 port = listen->port; u32 addr = listen->ip; //127.0.0.1 int err = 0; err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock); if (err) { printk(KERN_ERR "Moonraker: error %d creating socket.\n", err); goto error; } sin.sin_family = AF_INET; sin.sin_addr.s_addr = htonl(addr); sin.sin_port = htons(port); sk = sock->sk; icsk = inet_csk(sk); sk->sk_reuse = 1; sock_set_flag(sk, SOCK_URGINLINE); err = sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); if (err){ printk(KERN_ERR "Moonraker: error %d binding socket. This means that probably some other process is (or was a short time ago) using addr %d\n",err,sin.sin_addr.s_addr); goto error; } tp = tcp_sk(sk); printk("listen sk accept_queue: %d.\n",!reqsk_queue_empty(&icsk->icsk_accept_queue)); icsk->icsk_ack.pingpong = listen->ack_pingpong; sock_reset_flag(sk, SOCK_LINGER); sk->sk_lingertime = 0; tp->linger2 = listen->keepalive_timeout * HZ; if (proto->defer_accept && !listen->keepalive_timeout && listen->defer_accept) icsk->icsk_accept_queue.rskq_defer_accept = 1; err = sock->ops->listen(sock, listen->max_backlog); if (err) { printk(KERN_ERR "Moonraker: error %d listening on socket.\n", err); goto error; } printk(KERN_NOTICE "Moonraker: thread %d listens on %s://%d.%d.%d.%d:%d.\n", 1, proto->name, HIPQUAD(addr), port); // return sock; return; error: if (sock) sock_release(sock); return; return NULL; }
static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) { struct sctp_hmac_algo_param *auth_hmacs = NULL; struct sctp_chunks_param *auth_chunks = NULL; struct sctp_shared_key *null_key; int err; ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); if (!ep->digest) return NULL; if (sctp_auth_enable) { /* */ auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); if (!auth_hmacs) goto nomem; auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + SCTP_NUM_CHUNK_TYPES, gfp); if (!auth_chunks) goto nomem; /* */ auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; auth_hmacs->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 2); auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); /* */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* */ if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 2); } } /* */ /* */ ep->base.type = SCTP_EP_TYPE_SOCKET; /* */ atomic_set(&ep->base.refcnt, 1); ep->base.dead = 0; ep->base.malloced = 1; /* */ sctp_inq_init(&ep->base.inqueue); /* */ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); /* */ sctp_bind_addr_init(&ep->base.bind_addr, 0); /* */ ep->base.sk = sk; sock_hold(ep->base.sk); /* */ INIT_LIST_HEAD(&ep->asocs); /* */ ep->sndbuf_policy = sctp_sndbuf_policy; sk->sk_data_ready = sctp_data_ready; sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); /* */ ep->rcvbuf_policy = sctp_rcvbuf_policy; /* */ get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); ep->last_key = ep->current_key = 0; ep->key_changed_at = jiffies; /* */ INIT_LIST_HEAD(&ep->endpoint_shared_keys); null_key = sctp_auth_shkey_create(0, GFP_KERNEL); if (!null_key) goto nomem; list_add(&null_key->key_list, &ep->endpoint_shared_keys); /* */ err = sctp_auth_init_hmacs(ep, gfp); if (err) goto nomem_hmacs; /* */ ep->auth_hmacs_list = auth_hmacs; ep->auth_chunk_list = auth_chunks; return ep; nomem_hmacs: sctp_auth_destroy_keys(&ep->endpoint_shared_keys); nomem: /* */ kfree(auth_hmacs); kfree(auth_chunks); kfree(ep->digest); return NULL; }
/* * Initialize the base fields of the endpoint structure. */ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, gfp_t gfp) { struct sctp_hmac_algo_param *auth_hmacs = NULL; struct sctp_chunks_param *auth_chunks = NULL; struct sctp_shared_key *null_key; int err; ep->digest = kzalloc(SCTP_SIGNATURE_SIZE, gfp); if (!ep->digest) return NULL; if (sctp_auth_enable) { /* Allocate space for HMACS and CHUNKS authentication * variables. There are arrays that we encode directly * into parameters to make the rest of the operations easier. */ auth_hmacs = kzalloc(sizeof(sctp_hmac_algo_param_t) + sizeof(__u16) * SCTP_AUTH_NUM_HMACS, gfp); if (!auth_hmacs) goto nomem; auth_chunks = kzalloc(sizeof(sctp_chunks_param_t) + SCTP_NUM_CHUNK_TYPES, gfp); if (!auth_chunks) goto nomem; /* Initialize the HMACS parameter. * SCTP-AUTH: Section 3.3 * Every endpoint supporting SCTP chunk authentication MUST * support the HMAC based on the SHA-1 algorithm. */ auth_hmacs->param_hdr.type = SCTP_PARAM_HMAC_ALGO; auth_hmacs->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 2); auth_hmacs->hmac_ids[0] = htons(SCTP_AUTH_HMAC_ID_SHA1); /* Initialize the CHUNKS parameter */ auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS; auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t)); /* If the Add-IP functionality is enabled, we must * authenticate, ASCONF and ASCONF-ACK chunks */ if (sctp_addip_enable) { auth_chunks->chunks[0] = SCTP_CID_ASCONF; auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK; auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t) + 2); } } /* Initialize the base structure. */ /* What type of endpoint are we? */ ep->base.type = SCTP_EP_TYPE_SOCKET; /* Initialize the basic object fields. */ atomic_set(&ep->base.refcnt, 1); ep->base.dead = 0; ep->base.malloced = 1; /* Create an input queue. */ sctp_inq_init(&ep->base.inqueue); /* Set its top-half handler */ sctp_inq_set_th_handler(&ep->base.inqueue, sctp_endpoint_bh_rcv); /* Initialize the bind addr area */ sctp_bind_addr_init(&ep->base.bind_addr, 0); /* Remember who we are attached to. */ ep->base.sk = sk; sock_hold(ep->base.sk); /* Create the lists of associations. */ INIT_LIST_HEAD(&ep->asocs); /* Use SCTP specific send buffer space queues. */ ep->sndbuf_policy = sctp_sndbuf_policy; sk->sk_data_ready = sctp_data_ready; sk->sk_write_space = sctp_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); /* Get the receive buffer policy for this endpoint */ ep->rcvbuf_policy = sctp_rcvbuf_policy; /* Initialize the secret key used with cookie. */ get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); ep->last_key = ep->current_key = 0; ep->key_changed_at = jiffies; /* SCTP-AUTH extensions*/ INIT_LIST_HEAD(&ep->endpoint_shared_keys); null_key = sctp_auth_shkey_create(0, GFP_KERNEL); if (!null_key) goto nomem; list_add(&null_key->key_list, &ep->endpoint_shared_keys); /* Allocate and initialize transorms arrays for suported HMACs. */ err = sctp_auth_init_hmacs(ep, gfp); if (err) goto nomem_hmacs; /* Add the null key to the endpoint shared keys list and * set the hmcas and chunks pointers. */ ep->auth_hmacs_list = auth_hmacs; ep->auth_chunk_list = auth_chunks; return ep; nomem_hmacs: sctp_auth_destroy_keys(&ep->endpoint_shared_keys); nomem: /* Free all allocations */ kfree(auth_hmacs); kfree(auth_chunks); kfree(ep->digest); return NULL; }
int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) { struct sock *sk=sock->sk; #ifdef CONFIG_NET_SK_FILTER struct sk_filter *filter; #endif int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ #ifdef SO_DONTLINGER /* Compatibility item... */ if (optname == SO_DONTLINGER) { lock_sock(sk); sock_reset_flag(sk, SOCK_LINGER); release_sock(sk); return 0; } #endif if(optlen<sizeof(int)) return(-EINVAL); if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val?1:0; lock_sock(sk); switch(optname) { case SO_DEBUG: if(val && !capable(CAP_NET_ADMIN)) { ret = -EACCES; } else if (valbool) sock_set_flag(sk, SOCK_DBG); else sock_reset_flag(sk, SOCK_DBG); break; case SO_REUSEADDR: sk->sk_reuse = valbool; break; case SO_TYPE: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: if (valbool) sock_set_flag(sk, SOCK_LOCALROUTE); else sock_reset_flag(sk, SOCK_LOCALROUTE); break; case SO_BROADCAST: sock_valbool_flag(sk, SOCK_BROADCAST, valbool); break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_wmem_max) val = sysctl_wmem_max; set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; if ((val * 2) < SOCK_MIN_SNDBUF) sk->sk_sndbuf = SOCK_MIN_SNDBUF; else sk->sk_sndbuf = val * 2; /* * Wake up sending tasks if we * upped the value. */ sk->sk_write_space(sk); break; case SO_SNDBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_sndbuf; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_rmem_max) val = sysctl_rmem_max; set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* FIXME: is this lower bound the right one? */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->sk_rcvbuf = SOCK_MIN_RCVBUF; else sk->sk_rcvbuf = val * 2; break; case SO_RCVBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_rcvbuf; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; case SO_OOBINLINE: sock_valbool_flag(sk, SOCK_URGINLINE, valbool); break; case SO_NO_CHECK: sk->sk_no_check = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; break; case SO_LINGER: if(optlen<sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling,optval,sizeof(ling))) { ret = -EFAULT; break; } if (!ling.l_onoff) sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("setsockopt"); break; case SO_PASSCRED: if (valbool) set_bit(SOCK_PASSCRED, &sock->flags); else clear_bit(SOCK_PASSCRED, &sock->flags); break; case SO_TIMESTAMP: if (valbool) { sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk); } else sock_reset_flag(sk, SOCK_RCVTSTAMP); break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); break; #ifdef CONFIG_NETDEVICES case SO_BINDTODEVICE: { char devname[IFNAMSIZ]; /* Sorry... */ if (!capable(CAP_NET_RAW)) { ret = -EPERM; break; } /* Bind this socket to a particular device like "eth0", * as specified in the passed interface name. If the * name is "" or the option length is zero the socket * is not bound. */ if (!valbool) { sk->sk_bound_dev_if = 0; } else { if (optlen > IFNAMSIZ) optlen = IFNAMSIZ; if (copy_from_user(devname, optval, optlen)) { ret = -EFAULT; break; } /* Remove any cached route for this socket. */ sk_dst_reset(sk); if (devname[0] == '\0') { sk->sk_bound_dev_if = 0; } else { struct net_device *dev = dev_get_by_name(devname); if (!dev) { ret = -ENODEV; break; } sk->sk_bound_dev_if = dev->ifindex; dev_put(dev); } } break; } #endif #ifdef CONFIG_NET_SK_FILTER case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_DETACH_FILTER: spin_lock_bh(&sk->sk_lock.slock); filter = sk->sk_filter; if (filter) { sk->sk_filter = NULL; spin_unlock_bh(&sk->sk_lock.slock); sk_filter_release(sk, filter); break; } spin_unlock_bh(&sk->sk_lock.slock); ret = -ENONET; break; #endif /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; }