/* * Wait for a DCCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */ unsigned int dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events by poll logic and correct handling of state changes made by another threads is impossible in any case. */ mask = 0; if (sk->sk_err) mask = POLLERR; if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected? */ if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. */ if (sk_stream_is_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } } } return mask; }
/** * sk_stream_write_space - stream socket write_space callback. * @sk: socket * * FIXME: write proper description */ void sk_stream_write_space(struct sock *sk) { struct socket *sock = sk->sk_socket; struct socket_wq *wq; if (sk_stream_is_writeable(sk) && sock) { clear_bit(SOCK_NOSPACE, &sock->flags); rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_poll(&wq->wait, POLLOUT | POLLWRNORM | POLLWRBAND); if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); } }
/* * This callback function is invoked when data canbe transmitted on socket. * It inserts the socket into a list of writable sockets * which is processed in periodic function app_glue_periodic * Paramters: a pointer to struct sock * Returns: void * */ void app_glue_sock_write_space(struct sock *sk) { if(!sk->sk_socket) { return; } if((sk->sk_state != TCP_ESTABLISHED)&&(sk->sk_socket->type == SOCK_STREAM)) { return; } if (sk_stream_is_writeable(sk)) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); if(sk->sk_socket->write_queue_present) { return; } app_glue_sock_writable_called++; sock_hold(sk); sk->sk_socket->write_queue_present = 1; TAILQ_INSERT_TAIL(&write_ready_socket_list_head,sk->sk_socket,write_queue_entry); write_sockets_queue_len++; } }
/* * This function may be called to estimate amount of data can be sent . * Paramters: a pointer to socket structure * Returns: number of bytes the application can send * */ int app_glue_calc_size_of_data_to_send(void *sock) { int bufs_count1,bufs_count2,bufs_count3,stream_space,bufs_min; struct sock *sk = ((struct socket *)sock)->sk; if(!sk_stream_is_writeable(sk)) { return 0; } bufs_count1 = kmem_cache_get_free(get_fclone_cache()); bufs_count2 = kmem_cache_get_free(get_header_cache()); bufs_count3 = get_buffer_count(); if(bufs_count1 > 2) { bufs_count1 -= 2; } if(bufs_count2 > 2) { bufs_count2 -= 2; } bufs_min = min(bufs_count1,bufs_count2); bufs_min = min(bufs_min,bufs_count3); if(bufs_min <= 0) { return 0; } stream_space = sk_stream_wspace(((struct socket *)sock)->sk); return min(bufs_min << 10,stream_space); }