/* create a nonblocking and keepalive socket Returns: -1 on failure, otherwise the socket */ static int tcp_open_socket (int nonblocking) { int fd, on=1; fd = (int) socket (AF_INET, SOCK_STREAM, 0); if (fd < 0) return -1; if(nonblocking && tcp_set_nonblocking (fd)) { closesocket (fd); return -1; } if(tcp_set_keepalive (fd,1) || setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void*) & on, sizeof (on)) != 0) { closesocket (fd); return -1; } return fd; }
/* accept a new tcp client connection Inputs: <fd> the server listen socket Outputs: <port> the assigned port for this client <ip> the ip address of the client Returns: -1 on failure, otherwise the socket of the client */ int tcp_accept_client(int sock, int *port, unsigned int *ip, int nonblocking) { unsigned int sinsize; struct sockaddr_in sin; int fd; sinsize = sizeof (sin); fd = (int) accept (sock, (struct sockaddr *) &sin, &sinsize); if(fd > 0) { *port = ntohs(sin.sin_port); *ip = ntohl (sin.sin_addr.s_addr); tcp_set_keepalive(fd, 1); if(nonblocking && tcp_set_nonblocking (fd)) { closesocket (fd); fd = -1; } } return fd; }
int sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) { struct sock *sk=sock->sk; struct sk_filter *filter; int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ #ifdef SO_DONTLINGER /* Compatibility item... */ if (optname == SO_DONTLINGER) { lock_sock(sk); sock_reset_flag(sk, SOCK_LINGER); release_sock(sk); return 0; } #endif if(optlen<sizeof(int)) return(-EINVAL); if (get_user(val, (int __user *)optval)) return -EFAULT; valbool = val?1:0; lock_sock(sk); switch(optname) { case SO_DEBUG: if(val && !capable(CAP_NET_ADMIN)) { ret = -EACCES; } else if (valbool) sock_set_flag(sk, SOCK_DBG); else sock_reset_flag(sk, SOCK_DBG); break; case SO_REUSEADDR: sk->sk_reuse = valbool; break; case SO_TYPE: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: if (valbool) sock_set_flag(sk, SOCK_LOCALROUTE); else sock_reset_flag(sk, SOCK_LOCALROUTE); break; case SO_BROADCAST: sock_valbool_flag(sk, SOCK_BROADCAST, valbool); break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_wmem_max) val = sysctl_wmem_max; set_sndbuf: sk->sk_userlocks |= SOCK_SNDBUF_LOCK; if ((val * 2) < SOCK_MIN_SNDBUF) sk->sk_sndbuf = SOCK_MIN_SNDBUF; else sk->sk_sndbuf = val * 2; /* * Wake up sending tasks if we * upped the value. */ sk->sk_write_space(sk); break; case SO_SNDBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_sndbuf; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_rmem_max) val = sysctl_rmem_max; set_rcvbuf: sk->sk_userlocks |= SOCK_RCVBUF_LOCK; /* * We double it on the way in to account for * "struct sk_buff" etc. overhead. Applications * assume that the SO_RCVBUF setting they make will * allow that much actual data to be received on that * socket. * * Applications are unaware that "struct sk_buff" and * other overheads allocate from the receive buffer * during socket buffer allocation. * * And after considering the possible alternatives, * returning the value we actually used in getsockopt * is the most desirable behavior. */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->sk_rcvbuf = SOCK_MIN_RCVBUF; else sk->sk_rcvbuf = val * 2; break; case SO_RCVBUFFORCE: if (!capable(CAP_NET_ADMIN)) { ret = -EPERM; break; } goto set_rcvbuf; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->sk_protocol == IPPROTO_TCP) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); break; case SO_OOBINLINE: sock_valbool_flag(sk, SOCK_URGINLINE, valbool); break; case SO_NO_CHECK: sk->sk_no_check = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) sk->sk_priority = val; else ret = -EPERM; break; case SO_LINGER: if(optlen<sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling,optval,sizeof(ling))) { ret = -EFAULT; break; } if (!ling.l_onoff) sock_reset_flag(sk, SOCK_LINGER); else { #if (BITS_PER_LONG == 32) if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT; else #endif sk->sk_lingertime = (unsigned int)ling.l_linger * HZ; sock_set_flag(sk, SOCK_LINGER); } break; case SO_BSDCOMPAT: sock_warn_obsolete_bsdism("setsockopt"); break; case SO_PASSCRED: if (valbool) set_bit(SOCK_PASSCRED, &sock->flags); else clear_bit(SOCK_PASSCRED, &sock->flags); break; case SO_TIMESTAMP: if (valbool) { sock_set_flag(sk, SOCK_RCVTSTAMP); sock_enable_timestamp(sk); } else sock_reset_flag(sk, SOCK_RCVTSTAMP); break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen); break; #ifdef CONFIG_NETDEVICES case SO_BINDTODEVICE: { char devname[IFNAMSIZ]; /* Sorry... */ if (!capable(CAP_NET_RAW)) { ret = -EPERM; break; } /* Bind this socket to a particular device like "eth0", * as specified in the passed interface name. If the * name is "" or the option length is zero the socket * is not bound. */ if (!valbool) { sk->sk_bound_dev_if = 0; } else { if (optlen > IFNAMSIZ - 1) optlen = IFNAMSIZ - 1; memset(devname, 0, sizeof(devname)); if (copy_from_user(devname, optval, optlen)) { ret = -EFAULT; break; } /* Remove any cached route for this socket. */ sk_dst_reset(sk); if (devname[0] == '\0') { sk->sk_bound_dev_if = 0; } else { struct net_device *dev = dev_get_by_name(devname); if (!dev) { ret = -ENODEV; break; } sk->sk_bound_dev_if = dev->ifindex; dev_put(dev); } } break; } #endif case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_DETACH_FILTER: spin_lock_bh(&sk->sk_lock.slock); filter = sk->sk_filter; if (filter) { sk->sk_filter = NULL; spin_unlock_bh(&sk->sk_lock.slock); sk_filter_release(sk, filter); break; } spin_unlock_bh(&sk->sk_lock.slock); ret = -ENONET; break; /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; }
/** * @fn THRET accept_thread(void *arg) * @brief The accepting thread for TCP connection. * @param[in] arg The argument of accepting thread: CONN_BCAP_SERVER. */ static THRET THTYPE accept_thread(void *arg) { #if !defined(THRET) THRET ret = (THRET)NULL; #endif int client; HRESULT hr; volatile struct CONN_BCAP_SERVER *child; struct CONN_BCAP_SERVER *bcap_param = (struct CONN_BCAP_SERVER *) arg; MUTEX mutex; /* Initializes mutex */ bcap_param->relation_mutex = &mutex; hr = initialize_mutex(&mutex); if(FAILED(hr)) goto exit_proc; while(1) { hr = wait_event(&bcap_param->term_main_evt, 300); if(SUCCEEDED(hr)) { break; } if(bcap_param->num_child < BCAP_CLIENT_MAX) { hr = tcp_accept(bcap_param->device.sock, &client); if(SUCCEEDED(hr)) { /* Sets no delay option */ tcp_set_nodelay(client, 1); /* Sets keep alive option */ tcp_set_keepalive(client, KEEPALIVE_ENABLE, KEEPALIVE_IDLE, KEEPALIVE_INTERVAL, KEEPALIVE_COUNT); /* Adds child */ change_relation(bcap_param, ADD_CHILD, &client); } } /* Deletes child */ change_relation(bcap_param, DELETE_CHILD, NULL); } exit_proc: /* Ends all children thread */ child = bcap_param->node1; while(child != NULL) { set_event((EVENT *) &child->term_main_evt); exit_thread(child->main_thread); child = bcap_param->node1; } /* Deletes child */ change_relation(bcap_param, DELETE_CHILD, NULL); /* Releases mutex */ release_mutex(&mutex); #if !defined(THRET) return ret; #endif }
int sock_setsockopt(struct socket *sock, int level, int optname, char *optval, int optlen) { struct sock *sk=sock->sk; #ifdef CONFIG_FILTER struct sk_filter *filter; #endif int val; int valbool; struct linger ling; int ret = 0; /* * Options without arguments */ #ifdef SO_DONTLINGER /* Compatibility item... */ switch(optname) { case SO_DONTLINGER: sk->linger=0; return 0; } #endif if(optlen<sizeof(int)) return(-EINVAL); if (get_user(val, (int *)optval)) return -EFAULT; valbool = val?1:0; lock_sock(sk); switch(optname) { case SO_DEBUG: if(val && !capable(CAP_NET_ADMIN)) { ret = -EACCES; } else sk->debug=valbool; break; case SO_REUSEADDR: sk->reuse = valbool; break; case SO_TYPE: case SO_ERROR: ret = -ENOPROTOOPT; break; case SO_DONTROUTE: sk->localroute=valbool; break; case SO_BROADCAST: sk->broadcast=valbool; break; case SO_SNDBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_wmem_max) val = sysctl_wmem_max; sk->userlocks |= SOCK_SNDBUF_LOCK; if ((val * 2) < SOCK_MIN_SNDBUF) sk->sndbuf = SOCK_MIN_SNDBUF; else sk->sndbuf = (val * 2); /* * Wake up sending tasks if we * upped the value. */ sk->write_space(sk); break; case SO_RCVBUF: /* Don't error on this BSD doesn't and if you think about it this is right. Otherwise apps have to play 'guess the biggest size' games. RCVBUF/SNDBUF are treated in BSD as hints */ if (val > sysctl_rmem_max) val = sysctl_rmem_max; sk->userlocks |= SOCK_RCVBUF_LOCK; /* FIXME: is this lower bound the right one? */ if ((val * 2) < SOCK_MIN_RCVBUF) sk->rcvbuf = SOCK_MIN_RCVBUF; else sk->rcvbuf = (val * 2); break; case SO_KEEPALIVE: #ifdef CONFIG_INET if (sk->protocol == IPPROTO_TCP) { tcp_set_keepalive(sk, valbool); } #endif sk->keepopen = valbool; break; case SO_OOBINLINE: sk->urginline = valbool; break; case SO_NO_CHECK: sk->no_check = valbool; break; case SO_PRIORITY: if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) sk->priority = val; else ret = -EPERM; break; case SO_LINGER: if(optlen<sizeof(ling)) { ret = -EINVAL; /* 1003.1g */ break; } if (copy_from_user(&ling,optval,sizeof(ling))) { ret = -EFAULT; break; } if(ling.l_onoff==0) { sk->linger=0; } else { #if (BITS_PER_LONG == 32) if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ) sk->lingertime=MAX_SCHEDULE_TIMEOUT; else #endif sk->lingertime=ling.l_linger*HZ; sk->linger=1; } break; case SO_BSDCOMPAT: sk->bsdism = valbool; break; case SO_PASSCRED: sock->passcred = valbool; break; case SO_TIMESTAMP: sk->rcvtstamp = valbool; break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->rcvlowat = val ? : 1; break; case SO_RCVTIMEO: ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen); break; case SO_SNDTIMEO: ret = sock_set_timeout(&sk->sndtimeo, optval, optlen); break; #ifdef CONFIG_NETDEVICES case SO_BINDTODEVICE: { char devname[IFNAMSIZ]; /* Sorry... */ if (!capable(CAP_NET_RAW)) { ret = -EPERM; break; } /* Bind this socket to a particular device like "eth0", * as specified in the passed interface name. If the * name is "" or the option length is zero the socket * is not bound. */ if (!valbool) { sk->bound_dev_if = 0; } else { if (optlen > IFNAMSIZ) optlen = IFNAMSIZ; if (copy_from_user(devname, optval, optlen)) { ret = -EFAULT; break; } /* Remove any cached route for this socket. */ sk_dst_reset(sk); if (devname[0] == '\0') { sk->bound_dev_if = 0; } else { struct net_device *dev = dev_get_by_name(devname); if (!dev) { ret = -ENODEV; break; } sk->bound_dev_if = dev->ifindex; dev_put(dev); } } break; } #endif #ifdef CONFIG_FILTER case SO_ATTACH_FILTER: ret = -EINVAL; if (optlen == sizeof(struct sock_fprog)) { struct sock_fprog fprog; ret = -EFAULT; if (copy_from_user(&fprog, optval, sizeof(fprog))) break; ret = sk_attach_filter(&fprog, sk); } break; case SO_DETACH_FILTER: spin_lock_bh(&sk->lock.slock); filter = sk->filter; if (filter) { sk->filter = NULL; spin_unlock_bh(&sk->lock.slock); sk_filter_release(sk, filter); break; } spin_unlock_bh(&sk->lock.slock); ret = -ENONET; break; #endif /* We implement the SO_SNDLOWAT etc to not be settable (1003.1g 5.3) */ default: ret = -ENOPROTOOPT; break; } release_sock(sk); return ret; }