Example #1
0
static void death_queue_remove(struct death_queue_t *dq,
    lwan_connection_t *node)
{
    lwan_connection_t *prev = death_queue_idx_to_node(dq, node->prev);
    lwan_connection_t *next = death_queue_idx_to_node(dq, node->next);
    next->prev = node->prev;
    prev->next = node->next;
}
Example #2
0
static void death_queue_remove(struct death_queue *dq,
                               struct lwan_connection *node)
{
    struct lwan_connection *prev = death_queue_idx_to_node(dq, node->prev);
    struct lwan_connection *next = death_queue_idx_to_node(dq, node->next);

    next->prev = node->prev;
    prev->next = node->next;

    node->next = node->prev = -1;
}
Example #3
0
static void death_queue_remove(struct death_queue_t *dq,
    lwan_connection_t *node)
{
    lwan_connection_t *prev = death_queue_idx_to_node(dq, node->prev);
    lwan_connection_t *next = death_queue_idx_to_node(dq, node->next);
    next->prev = node->prev;
    prev->next = node->next;

    /* FIXME: This shouldn't be required; there may be a bug somewhere
     * that manifests if lots of chunked encoding requests are performed. */
    node->next = node->prev = -1;
}
Example #4
0
static void death_queue_insert(struct death_queue_t *dq,
    lwan_connection_t *new_node)
{
    new_node->next = -1;
    new_node->prev = dq->head.prev;
    lwan_connection_t *prev = death_queue_idx_to_node(dq, dq->head.prev);
    dq->head.prev = prev->next = death_queue_node_to_idx(dq, new_node);
}
Example #5
0
static void
death_queue_kill_all(struct death_queue_t *dq)
{
    while (!death_queue_empty(dq)) {
        lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next);
        destroy_coro(dq, conn);
    }
}
Example #6
0
void death_queue_kill_all(struct death_queue *dq)
{
    while (!death_queue_empty(dq)) {
        struct lwan_connection *conn =
            death_queue_idx_to_node(dq, dq->head.next);
        death_queue_kill(dq, conn);
    }
}
Example #7
0
void death_queue_kill_waiting(struct death_queue *dq)
{
    dq->time++;

    while (!death_queue_empty(dq)) {
        struct lwan_connection *conn =
            death_queue_idx_to_node(dq, dq->head.next);

        if (conn->time_to_die > dq->time)
            return;

        death_queue_kill(dq, conn);
    }

    /* Death queue exhausted: reset epoch */
    dq->time = 0;
}
Example #8
0
static ALWAYS_INLINE void
resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn,
    int epoll_fd)
{
    assert(conn->coro);

    if (!(conn->flags & CONN_SHOULD_RESUME_CORO))
        return;

    lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro);
    /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */
    if (yield_result < CONN_CORO_MAY_RESUME) {
        destroy_coro(dq, conn);
        return;
    }

    bool write_events;
    if (conn->flags & CONN_MUST_READ) {
        write_events = true;
    } else {
        bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME);

        if (should_resume_coro)
            conn->flags |= CONN_SHOULD_RESUME_CORO;
        else
            conn->flags &= ~CONN_SHOULD_RESUME_CORO;

        write_events = (conn->flags & CONN_WRITE_EVENTS);
        if (should_resume_coro == write_events)
            return;
    }

    struct epoll_event event = {
        .events = events_by_write_flag[write_events],
        .data.ptr = conn
    };

    int fd = lwan_connection_get_fd(dq->lwan, conn);
    if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0))
        lwan_status_perror("epoll_ctl");

    conn->flags ^= CONN_WRITE_EVENTS;
}

static void
death_queue_kill_waiting(struct death_queue_t *dq)
{
    dq->time++;

    while (!death_queue_empty(dq)) {
        lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next);

        if (conn->time_to_die > dq->time)
            return;

        destroy_coro(dq, conn);
    }

    /* Death queue exhausted: reset epoch */
    dq->time = 0;
}
Example #9
0
static int
process_request_coro(coro_t *coro)
{
    strbuf_t *strbuf = coro_malloc_full(coro, sizeof(*strbuf), strbuf_free);
    lwan_connection_t *conn = coro_get_data(coro);
    lwan_t *lwan = conn->thread->lwan;
    int fd = lwan_connection_get_fd(conn);
    char request_buffer[DEFAULT_BUFFER_SIZE];
    lwan_value_t buffer = {
        .value = request_buffer,
        .len = 0
    };
    char *next_request = NULL;

    strbuf_init(strbuf);

    while (true) {
        lwan_request_t request = {
            .conn = conn,
            .fd = fd,
            .response = {
                .buffer = strbuf
            },
        };

        assert(conn->flags & CONN_IS_ALIVE);

        next_request = lwan_process_request(lwan, &request, &buffer, next_request);
        if (!next_request)
            break;

        coro_yield(coro, CONN_CORO_MAY_RESUME);

        if (UNLIKELY(!strbuf_reset_length(strbuf)))
            return CONN_CORO_ABORT;
    }

    return CONN_CORO_FINISHED;
}

static ALWAYS_INLINE void
resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn,
    int epoll_fd)
{
    assert(conn->coro);

    if (!(conn->flags & CONN_SHOULD_RESUME_CORO))
        return;

    lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro);
    /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */
    if (yield_result < CONN_CORO_MAY_RESUME) {
        destroy_coro(dq, conn);
        return;
    }

    bool write_events;
    if (conn->flags & CONN_MUST_READ) {
        write_events = true;
    } else {
        bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME);

        if (should_resume_coro)
            conn->flags |= CONN_SHOULD_RESUME_CORO;
        else
            conn->flags &= ~CONN_SHOULD_RESUME_CORO;

        write_events = (conn->flags & CONN_WRITE_EVENTS);
        if (should_resume_coro == write_events)
            return;
    }

    struct epoll_event event = {
        .events = events_by_write_flag[write_events],
        .data.ptr = conn
    };

    int fd = lwan_connection_get_fd(conn);
    if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0))
        lwan_status_perror("epoll_ctl");

    conn->flags ^= CONN_WRITE_EVENTS;
}

static void
death_queue_kill_waiting(struct death_queue_t *dq)
{
    dq->time++;

    while (!death_queue_empty(dq)) {
        lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next);

        if (conn->time_to_die > dq->time)
            return;

        destroy_coro(dq, conn);
    }

    /* Death queue exhausted: reset epoch */
    dq->time = 0;
}