static void death_queue_kill_all(struct death_queue_t *dq) { while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); destroy_coro(dq, conn); } }
void death_queue_kill_all(struct death_queue *dq) { while (!death_queue_empty(dq)) { struct lwan_connection *conn = death_queue_idx_to_node(dq, dq->head.next); death_queue_kill(dq, conn); } }
void death_queue_kill_waiting(struct death_queue *dq) { dq->time++; while (!death_queue_empty(dq)) { struct lwan_connection *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; death_queue_kill(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(dq->lwan, conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
static ALWAYS_INLINE int death_queue_epoll_timeout(struct death_queue_t *dq) { return death_queue_empty(dq) ? -1 : 1000; }
static int process_request_coro(coro_t *coro) { strbuf_t *strbuf = coro_malloc_full(coro, sizeof(*strbuf), strbuf_free); lwan_connection_t *conn = coro_get_data(coro); lwan_t *lwan = conn->thread->lwan; int fd = lwan_connection_get_fd(conn); char request_buffer[DEFAULT_BUFFER_SIZE]; lwan_value_t buffer = { .value = request_buffer, .len = 0 }; char *next_request = NULL; strbuf_init(strbuf); while (true) { lwan_request_t request = { .conn = conn, .fd = fd, .response = { .buffer = strbuf }, }; assert(conn->flags & CONN_IS_ALIVE); next_request = lwan_process_request(lwan, &request, &buffer, next_request); if (!next_request) break; coro_yield(coro, CONN_CORO_MAY_RESUME); if (UNLIKELY(!strbuf_reset_length(strbuf))) return CONN_CORO_ABORT; } return CONN_CORO_FINISHED; } static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }