static rlim_t setup_open_file_count_limits(void) { struct rlimit r; if (getrlimit(RLIMIT_NOFILE, &r) < 0) { lwan_status_perror("Could not obtain maximum number of file " "descriptors. Assuming %d", OPEN_MAX); return OPEN_MAX; } if (r.rlim_max != r.rlim_cur) { const rlim_t current = r.rlim_cur; if (r.rlim_max == RLIM_INFINITY) { r.rlim_cur = OPEN_MAX; } else if (r.rlim_cur < r.rlim_max) { r.rlim_cur = r.rlim_max; } else { /* Shouldn't happen, so just return the current value. */ goto out; } if (setrlimit(RLIMIT_NOFILE, &r) < 0) { lwan_status_perror("Could not raise maximum number of file " "descriptors to %" PRIu64 ". Leaving at " "%" PRIu64, r.rlim_max, current); r.rlim_cur = current; } } out: return r.rlim_cur; }
void lwan_format_rfc_time(time_t t, char buffer[30]) { struct tm tm; if (UNLIKELY(!gmtime_r(&t, &tm))) { lwan_status_perror("gmtime_r"); return; } if (UNLIKELY(!strftime(buffer, 30, "%a, %d %b %Y %H:%M:%S GMT", &tm))) lwan_status_perror("strftime"); }
static ALWAYS_INLINE enum herd_accept accept_one(struct lwan *l, uint64_t *cores) { int fd = accept4((int)main_socket, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC); if (LIKELY(fd >= 0)) { *cores |= UINT64_C(1)<<schedule_client(l, fd); return HERD_MORE; } switch (errno) { case EAGAIN: return HERD_GONE; case EBADF: case ECONNABORTED: case EINVAL: if (main_socket < 0) { lwan_status_info("Signal 2 (Interrupt) received"); } else { lwan_status_info("Main socket closed for unknown reasons"); } return HERD_SHUTDOWN; default: lwan_status_perror("accept"); return HERD_MORE; } }
void lwan_main_loop(lwan_t *l) { assert(main_socket == -1); main_socket = l->main_socket; if (signal(SIGINT, sigint_handler) == SIG_ERR) lwan_status_critical("Could not set signal handler"); lwan_status_info("Ready to serve"); for (;;) { int client_fd = accept4((int)main_socket, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC); if (UNLIKELY(client_fd < 0)) { switch (errno) { case EBADF: case ECONNABORTED: if (main_socket < 0) { lwan_status_info("Signal 2 (Interrupt) received"); } else { lwan_status_info("Main socket closed for unknown reasons"); } return; } lwan_status_perror("accept"); } else { schedule_client(l, client_fd); } } }
void lwan_main_loop(lwan_t *l) { assert(main_socket == -1); main_socket = l->main_socket; if (signal(SIGINT, sigint_handler) == SIG_ERR) lwan_status_critical("Could not set signal handler"); lwan_status_info("Ready to serve"); for (;;) { int client_fd = accept4(main_socket, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC); if (UNLIKELY(client_fd < 0)) { if (errno != EBADF) { lwan_status_perror("accept"); continue; } if (main_socket < 0) { lwan_status_info("Signal 2 (Interrupt) received"); } else { lwan_status_info("Main socket closed for unknown reasons"); } break; } schedule_client(l, client_fd); } }
const char *get_config_path(char *path_buf) { char buffer[PATH_MAX]; char *path = NULL; int ret; #if defined(__linux__) ssize_t path_len; path_len = readlink("/proc/self/exe", buffer, PATH_MAX); if (path_len < 0 || path_len >= PATH_MAX) { lwan_status_perror("readlink"); goto out; } buffer[path_len] = '\0'; #elif defined(__FreeBSD__) size_t path_len = PATH_MAX; int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1 }; ret = sysctl(mib, N_ELEMENTS(mib), buffer, &path_len, NULL, 0); if (ret < 0) { lwan_status_perror("sysctl"); goto out; } #else goto out; #endif path = strrchr(buffer, '/'); if (!path) goto out; ret = snprintf(path_buf, PATH_MAX, "%s.conf", path + 1); if (ret < 0 || ret >= PATH_MAX) goto out; return path_buf; out: return "lwan.conf"; }
struct config *config_open(const char *path) { struct config *config; struct stat st; void *data; int fd; fd = open(path, O_RDONLY | O_CLOEXEC); if (fd < 0) { lwan_status_perror("Could not open configuration file: %s", path); return NULL; } if (fstat(fd, &st) < 0) { close(fd); return NULL; } data = mmap(NULL, (size_t)st.st_size, PROT_READ, MAP_SHARED, fd, 0); close(fd); if (data == MAP_FAILED) return NULL; config = malloc(sizeof(*config)); if (!config) { munmap(data, (size_t)st.st_size); return NULL; } config->parser = (struct parser) { .state = parse_config, .lexer = { .state = lex_config, .pos = data, .start = data, .end = (char *)data + st.st_size, .cur_line = 1, } }; config->mapped.addr = data; config->mapped.sz = (size_t)st.st_size; config->error_message = NULL; strbuf_init(&config->parser.strbuf); return config; }
static lwan_url_map_t *add_url_map(lwan_trie_t *t, const char *prefix, const lwan_url_map_t *map) { lwan_url_map_t *copy = malloc(sizeof(*copy)); if (!copy) { lwan_status_perror("Could not copy URL map"); ASSERT_NOT_REACHED_RETURN(NULL); } memcpy(copy, map, sizeof(*copy)); copy->prefix = strdup(prefix ? prefix : copy->prefix); copy->prefix_len = strlen(copy->prefix); lwan_trie_add(t, copy->prefix, copy); return copy; }
static void create_thread(lwan_t *l, lwan_thread_t *thread) { pthread_attr_t attr; memset(thread, 0, sizeof(*thread)); thread->lwan = l; if ((thread->epoll_fd = epoll_create1(EPOLL_CLOEXEC)) < 0) lwan_status_critical_perror("epoll_create"); if (pthread_attr_init(&attr)) lwan_status_critical_perror("pthread_attr_init"); if (pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)) lwan_status_critical_perror("pthread_attr_setscope"); if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE)) lwan_status_critical_perror("pthread_attr_setdetachstate"); if (pipe2(thread->pipe_fd, O_NONBLOCK | O_CLOEXEC) < 0) lwan_status_critical_perror("pipe"); struct epoll_event event = { .events = EPOLLIN, .data.ptr = NULL }; if (epoll_ctl(thread->epoll_fd, EPOLL_CTL_ADD, thread->pipe_fd[0], &event) < 0) lwan_status_critical_perror("epoll_ctl"); if (pthread_create(&thread->self, &attr, thread_io_loop, thread)) lwan_status_critical_perror("pthread_create"); if (pthread_attr_destroy(&attr)) lwan_status_critical_perror("pthread_attr_destroy"); } void lwan_thread_add_client(lwan_thread_t *t, int fd) { t->lwan->conns[fd].flags = 0; t->lwan->conns[fd].thread = t; if (UNLIKELY(write(t->pipe_fd[1], &fd, sizeof(int)) < 0)) lwan_status_perror("write"); }
void lwan_main_loop(lwan_t *l) { if (setjmp(cleanup_jmp_buf)) return; signal(SIGINT, _signal_handler); lwan_status_info("Ready to serve"); for (;;) { int child_fd = accept4(l->main_socket, NULL, NULL, SOCK_NONBLOCK); if (UNLIKELY(child_fd < 0)) { lwan_status_perror("accept"); continue; } _push_request_fd(l, child_fd); } }
static bool get_user_uid_gid(const char *user, uid_t *uid, gid_t *gid) { struct passwd pwd = { }; struct passwd *result; char *buf; long pw_size_max = sysconf(_SC_GETPW_R_SIZE_MAX); int r; if (!user || !*user) { lwan_status_error("Username should be provided"); return false; } if (pw_size_max < 0) pw_size_max = 16384; buf = malloc((size_t)pw_size_max); if (!buf) { lwan_status_error("Could not allocate buffer for passwd struct"); return false; } r = getpwnam_r(user, &pwd, buf, (size_t)pw_size_max, &result); *uid = pwd.pw_uid; *gid = pwd.pw_gid; free(buf); if (result) return true; if (!r) { lwan_status_error("Username not found: %s", user); } else { errno = r; lwan_status_perror("Could not obtain uid/gid for user %s", user); } return false; }
static bool wait_herd(void) { struct pollfd fds = { .fd = (int)main_socket, .events = POLLIN }; return poll(&fds, 1, -1) == 1; } enum herd_accept { HERD_MORE = 0, HERD_GONE = -1, HERD_SHUTDOWN = 1 }; static enum herd_accept accept_one(struct lwan *l, uint64_t *cores) { int fd = accept4((int)main_socket, NULL, NULL, SOCK_NONBLOCK | SOCK_CLOEXEC); if (LIKELY(fd >= 0)) { *cores |= 1ULL<<(unsigned)schedule_client(l, fd); return HERD_MORE; } switch (errno) { case EAGAIN: return HERD_GONE; case EBADF: case ECONNABORTED: case EINVAL: if (main_socket < 0) { lwan_status_info("Signal 2 (Interrupt) received"); } else { lwan_status_info("Main socket closed for unknown reasons"); } return HERD_SHUTDOWN; } lwan_status_perror("accept"); return HERD_MORE; }
static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(dq->lwan, conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
static lwan_connection_t * grab_and_watch_client(int epoll_fd, int pipe_fd, lwan_connection_t *conns) { int fd; if (UNLIKELY(read(pipe_fd, &fd, sizeof(int)) != sizeof(int))) { lwan_status_perror("read"); return NULL; } struct epoll_event event = { .events = events_by_write_flag[1], .data.ptr = &conns[fd] }; if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_ADD, fd, &event) < 0)) lwan_status_critical_perror("epoll_ctl"); return &conns[fd]; } static void * thread_io_loop(void *data) { lwan_thread_t *t = data; const int epoll_fd = t->epoll_fd; const int read_pipe_fd = t->pipe_fd[0]; const int max_events = min((int)t->lwan->thread.max_fd, 1024); lwan_connection_t *conns = t->lwan->conns; struct epoll_event *events; coro_switcher_t switcher; struct death_queue_t dq; int n_fds; lwan_status_debug("Starting IO loop on thread #%d", (unsigned short)(ptrdiff_t)(t - t->lwan->thread.threads) + 1); events = calloc((size_t)max_events, sizeof(*events)); if (UNLIKELY(!events)) lwan_status_critical("Could not allocate memory for events"); death_queue_init(&dq, conns, t->lwan->config.keep_alive_timeout); for (;;) { switch (n_fds = epoll_wait(epoll_fd, events, max_events, death_queue_epoll_timeout(&dq))) { case -1: switch (errno) { case EBADF: case EINVAL: goto epoll_fd_closed; } continue; case 0: /* timeout: shutdown waiting sockets */ death_queue_kill_waiting(&dq); break; default: /* activity in some of this poller's file descriptor */ update_date_cache(t); for (struct epoll_event *ep_event = events; n_fds--; ep_event++) { lwan_connection_t *conn; if (!ep_event->data.ptr) { conn = grab_and_watch_client(epoll_fd, read_pipe_fd, conns); if (UNLIKELY(!conn)) continue; spawn_or_reset_coro_if_needed(conn, &switcher, &dq); } else { conn = ep_event->data.ptr; if (UNLIKELY(ep_event->events & (EPOLLRDHUP | EPOLLHUP))) { destroy_coro(&dq, conn); continue; } spawn_or_reset_coro_if_needed(conn, &switcher, &dq); resume_coro_if_needed(&dq, conn, epoll_fd); } death_queue_move_to_last(&dq, conn); } } } epoll_fd_closed: free(events); return NULL; }
static int process_request_coro(coro_t *coro) { strbuf_t *strbuf = coro_malloc_full(coro, sizeof(*strbuf), strbuf_free); lwan_connection_t *conn = coro_get_data(coro); lwan_t *lwan = conn->thread->lwan; int fd = lwan_connection_get_fd(conn); char request_buffer[DEFAULT_BUFFER_SIZE]; lwan_value_t buffer = { .value = request_buffer, .len = 0 }; char *next_request = NULL; strbuf_init(strbuf); while (true) { lwan_request_t request = { .conn = conn, .fd = fd, .response = { .buffer = strbuf }, }; assert(conn->flags & CONN_IS_ALIVE); next_request = lwan_process_request(lwan, &request, &buffer, next_request); if (!next_request) break; coro_yield(coro, CONN_CORO_MAY_RESUME); if (UNLIKELY(!strbuf_reset_length(strbuf))) return CONN_CORO_ABORT; } return CONN_CORO_FINISHED; } static ALWAYS_INLINE void resume_coro_if_needed(struct death_queue_t *dq, lwan_connection_t *conn, int epoll_fd) { assert(conn->coro); if (!(conn->flags & CONN_SHOULD_RESUME_CORO)) return; lwan_connection_coro_yield_t yield_result = coro_resume(conn->coro); /* CONN_CORO_ABORT is -1, but comparing with 0 is cheaper */ if (yield_result < CONN_CORO_MAY_RESUME) { destroy_coro(dq, conn); return; } bool write_events; if (conn->flags & CONN_MUST_READ) { write_events = true; } else { bool should_resume_coro = (yield_result == CONN_CORO_MAY_RESUME); if (should_resume_coro) conn->flags |= CONN_SHOULD_RESUME_CORO; else conn->flags &= ~CONN_SHOULD_RESUME_CORO; write_events = (conn->flags & CONN_WRITE_EVENTS); if (should_resume_coro == write_events) return; } struct epoll_event event = { .events = events_by_write_flag[write_events], .data.ptr = conn }; int fd = lwan_connection_get_fd(conn); if (UNLIKELY(epoll_ctl(epoll_fd, EPOLL_CTL_MOD, fd, &event) < 0)) lwan_status_perror("epoll_ctl"); conn->flags ^= CONN_WRITE_EVENTS; } static void death_queue_kill_waiting(struct death_queue_t *dq) { dq->time++; while (!death_queue_empty(dq)) { lwan_connection_t *conn = death_queue_idx_to_node(dq, dq->head.next); if (conn->time_to_die > dq->time) return; destroy_coro(dq, conn); } /* Death queue exhausted: reset epoch */ dq->time = 0; }
struct lwan_fd_watch *lwan_watch_fd(struct lwan *l, int fd, uint32_t events, coro_function_t coro_fn, void *data) { struct lwan_fd_watch *watch; watch = malloc(sizeof(*watch)); if (!watch) return NULL; watch->coro = coro_new(&l->switcher, coro_fn, data); if (!watch->coro) goto out; struct epoll_event ev = {.events = events, .data.ptr = watch->coro}; if (epoll_ctl(l->epfd, EPOLL_CTL_ADD, fd, &ev) < 0) { coro_free(watch->coro); goto out; } watch->fd = fd; return watch; out: free(watch); return NULL; } void lwan_unwatch_fd(struct lwan *l, struct lwan_fd_watch *w) { if (l->main_socket != w->fd) { if (epoll_ctl(l->epfd, EPOLL_CTL_DEL, w->fd, NULL) < 0) lwan_status_perror("Could not unwatch fd %d", w->fd); } coro_free(w->coro); free(w); } void lwan_main_loop(struct lwan *l) { struct epoll_event evs[16]; struct lwan_fd_watch *watch; assert(main_socket == -1); main_socket = l->main_socket; if (signal(SIGINT, sigint_handler) == SIG_ERR) lwan_status_critical("Could not set signal handler"); watch = lwan_watch_fd(l, l->main_socket, EPOLLIN | EPOLLHUP | EPOLLRDHUP, accept_connection_coro, l); if (!watch) lwan_status_critical("Could not watch main socket"); lwan_status_info("Ready to serve"); while (true) { int n_evs = epoll_wait(l->epfd, evs, N_ELEMENTS(evs), -1); if (UNLIKELY(n_evs < 0)) { if (main_socket < 0) break; if (errno == EINTR || errno == EAGAIN) continue; break; } for (int i = 0; i < n_evs; i++) { if (!coro_resume_value(evs[i].data.ptr, (int)evs[i].events)) break; } } lwan_unwatch_fd(l, watch); } #ifdef CLOCK_MONOTONIC_COARSE __attribute__((constructor)) static void detect_fastest_monotonic_clock(void) { struct timespec ts; if (!clock_gettime(CLOCK_MONOTONIC_COARSE, &ts)) monotonic_clock_id = CLOCK_MONOTONIC_COARSE; } #endif void lwan_set_thread_name(const char *name) { char thread_name[16]; char process_name[PATH_MAX]; char *tmp; int ret; if (proc_pidpath(getpid(), process_name, sizeof(process_name)) < 0) return; tmp = strrchr(process_name, '/'); if (!tmp) return; ret = snprintf(thread_name, sizeof(thread_name), "%s %s", tmp + 1, name); if (ret < 0) return; pthread_set_name_np(pthread_self(), thread_name); }