static int udp6_accept_dst(const struct sock *sk, const struct sk_buff *skb) { assert(sk != NULL); return (sk->opt.so_domain == AF_INET6) && ip6_tester_src_or_any(sk, skb) && ((sock_inet_get_dst_port(sk) == udp_hdr(skb)->source) || (sock_inet_get_dst_port(sk) == 0)) && ((sk->opt.so_bindtodevice == skb->dev) || (sk->opt.so_bindtodevice == NULL)); }
static int tcp_write(struct tcp_sock *tcp_sk, void *buff, size_t len) { void *pb; struct sk_buff *skb; int ret; pb = buff; while (len != 0) { /* Previous comment: try to send wholly msg * We must pass no more than 64k bytes to underlaying IP level */ size_t bytes = min(len, IP_MAX_PACKET_LEN - MAX_HEADER_SIZE); skb = NULL; /* alloc new pkg */ ret = alloc_prep_skb(tcp_sk, 0, &bytes, &skb); if (ret != 0) { break; } debug_print(3, "tcp_sendmsg: sending len %d\n", bytes); tcp_build(skb->h.th, sock_inet_get_dst_port(to_sock(tcp_sk)), sock_inet_get_src_port(to_sock(tcp_sk)), TCP_MIN_HEADER_SIZE, tcp_sk->self.wind.value); memcpy(skb->h.th + 1, pb, bytes); pb += bytes; len -= bytes; /* Fill TCP header */ skb->h.th->psh = (len == 0); tcp_set_ack_field(skb->h.th, tcp_sk->rem.seq); send_seq_from_sock(tcp_sk, skb); } return pb - buff; }
/************************ Socket's functions ***************************/ static int tcp_init(struct sock *sk) { static const struct tcp_wind self_wind_default = { .value = TCP_WINDOW_VALUE_DEFAULT, .factor = TCP_WINDOW_FACTOR_DEFAULT, .size = TCP_WINDOW_VALUE_DEFAULT << TCP_WINDOW_FACTOR_DEFAULT }; struct tcp_sock *tcp_sk; tcp_sk = to_tcp_sock(sk); assert(tcp_sk != NULL); debug_print(3, "tcp_init: sk %p\n", to_sock(tcp_sk)); tcp_sk->p_sk = tcp_sk->p_sk; /* already initialized */ tcp_sk->state = TCP_CLOSED; tcp_sk->self.seq = tcp_sk->last_ack; memcpy(&tcp_sk->self.wind, &self_wind_default, sizeof tcp_sk->self.wind); tcp_sk->rem.wind.factor = 0; tcp_sk->parent = NULL; INIT_LIST_HEAD(&tcp_sk->conn_lnk); /* INIT_LIST_HEAD(&tcp_sk->conn_ready); */ INIT_LIST_HEAD(&tcp_sk->conn_wait); INIT_LIST_HEAD(&tcp_sk->conn_free); tcp_sk->free_wait_queue_len = tcp_sk->free_wait_queue_max = 0; tcp_sk->lock = 0; /* timerclear(&sock.tcp_sk->syn_time); */ timerclear(&tcp_sk->ack_time); timerclear(&tcp_sk->rcv_time); tcp_sk->dup_ack = 0; tcp_sk->rexmit_mode = 0; return 0; } static int tcp_close(struct sock *sk) { struct sk_buff *skb; struct tcphdr *tcph; struct tcp_sock *tcp_sk; tcp_sk = to_tcp_sock(sk); assert(tcp_sk != NULL); debug_print(3, "tcp_close: sk %p\n", to_sock(tcp_sk)); tcp_sock_lock(tcp_sk, TCP_SYNC_STATE); { assert(tcp_sk->state < TCP_MAX_STATE); switch (tcp_sk->state) { default: return -EBADF; case TCP_CLOSED: tcp_sock_unlock(tcp_sk, TCP_SYNC_STATE); tcp_sock_release(tcp_sk); return 0; case TCP_LISTEN: case TCP_SYN_SENT: case TCP_SYN_RECV_PRE: tcp_sock_set_state(tcp_sk, TCP_CLOSED); tcp_sock_unlock(tcp_sk, TCP_SYNC_STATE); tcp_sock_release(tcp_sk); return 0; case TCP_SYN_RECV: case TCP_ESTABIL: case TCP_CLOSEWAIT: skb = NULL; /* alloc new pkg */ if (0 != alloc_prep_skb(tcp_sk, 0, NULL, &skb)) { break; /* error: see ret */ } tcp_sock_set_state(tcp_sk, tcp_sk->state == TCP_CLOSEWAIT ? TCP_LASTACK : TCP_FINWAIT_1); tcph = tcp_hdr(skb); tcp_build(tcph, sock_inet_get_dst_port(to_sock(tcp_sk)), sock_inet_get_src_port(to_sock(tcp_sk)), TCP_MIN_HEADER_SIZE, tcp_sk->self.wind.value); tcph->fin = 1; tcp_set_ack_field(tcph, tcp_sk->rem.seq); send_seq_from_sock(tcp_sk, skb); break; } } tcp_sock_unlock(tcp_sk, TCP_SYNC_STATE); return 0; } static int tcp_connect(struct sock *sk, const struct sockaddr *addr, socklen_t addr_len, int flags) { struct sk_buff *skb; struct tcphdr *tcph; struct tcp_sock *tcp_sk; int ret; static const __u8 magic_opts[] = { TCP_OPT_KIND_MSS, 0x04, /* Maximum segment size: */ 0x40, 0x0C, /* 16396 bytes */ TCP_OPT_KIND_NOP, /* No-Operation */ TCP_OPT_KIND_WS, 0x03, /* Window scale: */ TCP_WINDOW_FACTOR_DEFAULT /* 7 (multiply by 128) */ }; (void)addr; (void)addr_len; (void)flags; tcp_sk = to_tcp_sock(sk); assert(tcp_sk != NULL); debug_print(3, "tcp_connect: sk %p\n", to_sock(tcp_sk)); tcp_sock_lock(tcp_sk, TCP_SYNC_STATE); { assert(tcp_sk->state < TCP_MAX_STATE); switch (tcp_sk->state) { default: ret = -EISCONN; break; case TCP_CLOSED: /* make skb with options */ skb = NULL; /* alloc new pkg */ ret = alloc_prep_skb(tcp_sk, sizeof magic_opts, NULL, &skb); if (ret != 0) { break; } tcp_sock_set_state(tcp_sk, TCP_SYN_SENT); tcph = tcp_hdr(skb); tcp_build(tcph, sock_inet_get_dst_port(to_sock(tcp_sk)), sock_inet_get_src_port(to_sock(tcp_sk)), TCP_MIN_HEADER_SIZE + sizeof magic_opts, tcp_sk->self.wind.value); tcph->syn = 1; memcpy(&tcph->options, &magic_opts[0], sizeof magic_opts); send_seq_from_sock(tcp_sk, skb); //FIXME hack use common lock/unlock systems for socket sched_lock(); { tcp_sock_unlock(tcp_sk, TCP_SYNC_STATE); ret = sock_wait(sk, POLLOUT | POLLERR, MODOPS_CONNECT_TIMEOUT); tcp_sock_lock(tcp_sk, TCP_SYNC_STATE); } sched_unlock(); if (ret == -EAGAIN) { ret = -EINPROGRESS; break; } if (tcp_sock_get_status(tcp_sk) == TCP_ST_NOTEXIST) { ret = -ECONNRESET; break; } if (tcp_sock_get_status(tcp_sk) == TCP_ST_SYNC) { ret = 0; } break; } } tcp_sock_unlock(tcp_sk, TCP_SYNC_STATE); return ret; } static int tcp_sock_alloc_missing_backlog(struct tcp_sock *tcp_sk) { int to_alloc; assert(tcp_sk != NULL); tcp_sock_lock(tcp_sk, TCP_SYNC_CONN_QUEUE); { to_alloc = tcp_sk->free_wait_queue_max - tcp_sk->free_wait_queue_len; /* to_alloc allowed to be negative. It could be if backlog had set and then * was reduced. */ while (to_alloc > 0) { struct sock *newsk; struct tcp_sock *tcp_newsk; newsk = sock_create(to_sock(tcp_sk)->opt.so_domain, SOCK_STREAM, IPPROTO_TCP); if (err(newsk) != 0) { break; } to_alloc--; tcp_newsk = to_tcp_sock(newsk); tcp_newsk->parent = tcp_sk; list_add_tail(&tcp_newsk->conn_lnk, &tcp_sk->conn_free); tcp_sk->free_wait_queue_len++; } } tcp_sock_unlock(tcp_sk, TCP_SYNC_CONN_QUEUE); return tcp_sk->free_wait_queue_len; }