static void death_queue_kill_all(struct death_queue_t *dq) { while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); destroy_coro(dq, conn); } }
static lwan_connection_t * grab_and_watch_client(int epoll_fd, int pipe_fd, lwan_connection_t *conns) { int fd; if (UNLIKELY(read(pipe_fd, &fd, sizeof(int)) != sizeof(int))) return NULL; struct epoll_event event = { .events = events_by_write_flag[1], .data.ptr = &conns[fd] }; if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) < 0)) return NULL; return &conns[fd]; } static void * thread_io_loop(void *data) { lwan_thread_t *t = data; const int epoll_fd = t->epoll_fd; const int read_pipe_fd = t->pipe_fd[0]; const int max_events = min((int)t->lwan->thread.max_fd, 1024); const lwan_t *lwan = t->lwan; lwan_connection_t *conns = lwan->conns; struct epoll_event *events; coro_switcher_t switcher; struct death_queue_t dq; int n_fds; lwan_status_debug("Starting IO loop on thread #%d", (unsigned short)(ptrdiff_t)(t - t->lwan->thread.threads) + 1); events = calloc((size_t)max_events, sizeof(*events)); if (UNLIKELY(!events)) lwan_status_critical("Could not allocate memory for events"); death_queue_init(&dq, lwan); for (;;) { switch (n_fds = epoll_wait(epoll_fd, events, max_events, death_queue_epoll_timeout(&dq))) { case -1: switch (errno) { case EBADF: case EINVAL: goto epoll_fd_closed; } continue; case 0: /* timeout: shutdown waiting sockets */ death_queue_kill_waiting(&dq); break; default: /* activity in some of this poller's file descriptor */ update_date_cache(t); for (struct epoll_event *ep_event = events; n_fds--; ep_event++) { lwan_connection_t *conn; if (!ep_event->data.ptr) { conn = grab_and_watch_client(epoll_fd, read_pipe_fd, conns); if (UNLIKELY(!conn)) continue; spawn_coro(conn, &switcher, &dq); } else { conn = ep_event->data.ptr; if (UNLIKELY(ep_event->events & (EPOLLRDHUP | EPOLLHUP))) { destroy_coro(&dq, conn); continue; } resume_coro_if_needed(&dq, conn, epoll_fd); } death_queue_move_to_last(&dq, conn); } } } epoll_fd_closed: death_queue_kill_all(&dq); free(events); return NULL; }
static int process_request_coro(coro_t *coro) { strbuf_t *strbuf = coro_malloc_full(coro, sizeof(*strbuf), strbuf_free); lwan_connection_t *conn = coro_get_data(coro); lwan_t *lwan = conn->thread->lwan; int fd = lwan_connection_get_fd(conn); char request_buffer[DEFAULT_BUFFER_SIZE]; lwan_value_t buffer = { .value = request_buffer, .len = 0 }; char *next_request = NULL; strbuf_init(strbuf); while (true) { lwan_request_t request = { .conn = conn, .fd = fd, .response = { .buffer = strbuf }, }; assert(conn->flags & CONN_IS_ALIVE); next_request = lwan_process_request(lwan, &request, &buffer, next_request); if (!next_request) break; coro_yield(coro, CONN_CORO_MAY_RESUME); if (UNLIKELY(!strbuf_reset_length(strbuf))) return CONN_CORO_ABORT; } return CONN_CORO_FINISHED; } static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(dq->lwan, conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
void destroy_thread(Worker * thr) { destroy_coro(thr); free (thr); }