static liHandlerResult expire(liVRequest *vr, gpointer param, gpointer *context) { struct tm tm; time_t expire_date; guint len; gint max_age; GString *date_str = vr->wrk->tmp_str; expire_rule *rule = param; guint num = rule->num; time_t now = (time_t)li_cur_ts(vr->wrk); UNUSED(context); if (rule->base == EXPIRE_ACCESS) { expire_date = now + num; max_age = num; } else { /* modification */ struct stat st; gint err; if (!vr->physical.path->len) return LI_HANDLER_GO_ON; switch (li_stat_cache_get(vr, vr->physical.path, &st, &err, NULL)) { case LI_HANDLER_GO_ON: break; case LI_HANDLER_WAIT_FOR_EVENT: return LI_HANDLER_WAIT_FOR_EVENT; default: return LI_HANDLER_GO_ON; } expire_date = st.st_mtime + num; if (expire_date < now) expire_date = now; max_age = expire_date - now; } /* format date */ g_string_set_size(date_str, 255); if (!gmtime_r(&expire_date, &tm)) { VR_ERROR(vr, "gmtime_r(%"G_GUINT64_FORMAT") failed: %s", (guint64)expire_date, g_strerror(errno)); return LI_HANDLER_GO_ON; } len = strftime(date_str->str, date_str->allocated_len, "%a, %d %b %Y %H:%M:%S GMT", &tm); if (len == 0) return LI_HANDLER_GO_ON; g_string_set_size(date_str, len); /* finally set the headers */ li_http_header_overwrite(vr->response.headers, CONST_STR_LEN("Expires"), GSTR_LEN(date_str)); g_string_truncate(date_str, 0); g_string_append_len(date_str, CONST_STR_LEN("max-age=")); li_string_append_int(date_str, max_age); li_http_header_append(vr->response.headers, CONST_STR_LEN("Cache-Control"), GSTR_LEN(date_str)); return LI_HANDLER_GO_ON; }
static AuthFileData* auth_file_get_data(liWorker *wrk, AuthFile *f) { li_tstamp now = li_cur_ts(wrk); AuthFileData *data = NULL; g_mutex_lock(f->lock); if (f->ttl != 0 && now >= f->next_check) { struct stat st; f->next_check = now + f->ttl; if (-1 != stat(f->path->str, &st) && st.st_mtime >= f->last_stat - 1) { g_mutex_unlock(f->lock); /* update without lock held */ data = auth_file_load(wrk->srv, f); g_mutex_lock(f->lock); if (NULL != data) { auth_file_data_release(f->data); f->data = data; } } f->last_stat = now; } data = f->data; if (NULL != data) g_atomic_int_inc(&data->refcount); g_mutex_unlock(f->lock); return data; }
void li_connection_update_io_timeout(liConnection *con) { liWorker *wrk = con->wrk; if (con->io_timeout_elem.queued && (con->io_timeout_elem.ts + 1.0) < li_cur_ts(wrk)) { li_waitqueue_push(&wrk->io_timeout_queue, &con->io_timeout_elem); } }
void li_throttle_set(liWorker *wrk, liThrottleState *state, guint rate, guint burst) { UNUSED(wrk); state->single_rate = rate; state->single_burst = burst; state->single_magazine = burst; state->single_last_rearm = msec_timestamp(li_cur_ts(wrk)); }
static liHandlerResult lua_handle(liVRequest *vr, gpointer param, gpointer *context) { lua_config *conf = (lua_config*) param; lua_worker_config *wc; gboolean timeout = FALSE; liHandlerResult res; UNUSED(context); wc = &conf->worker_config[vr->wrk->ndx]; if (wc->act) timeout = (conf->ttl > 0 && wc->ts_loaded + conf->ttl < li_cur_ts(vr->wrk)); if (!wc->act || timeout) { int err; struct stat st; time_t last_load; res = li_stat_cache_get(vr, conf->filename, &st, &err, NULL); switch (res) { case LI_HANDLER_ERROR: VR_ERROR(vr, "lua.handler: couldn't stat file '%s': %s", conf->filename->str, g_strerror(err)); return LI_HANDLER_ERROR; case LI_HANDLER_WAIT_FOR_EVENT: return LI_HANDLER_WAIT_FOR_EVENT; default: break; } last_load = wc->ts_loaded; wc->ts_loaded = li_cur_ts(vr->wrk); if (timeout && st.st_mtime <= last_load) { goto loaded; } li_action_release(vr->wrk->srv, wc->act); wc->act = NULL; if (!li_config_lua_load(&vr->wrk->LL, vr->wrk->srv, vr->wrk, conf->filename->str, &wc->act, FALSE, conf->args) || !wc->act) { VR_ERROR(vr, "lua.handler: couldn't load '%s'", conf->filename->str); return LI_HANDLER_ERROR; } } loaded: li_action_enter(vr, wc->act); return LI_HANDLER_GO_ON; }
static AuthFile* auth_file_new(liWorker *wrk, const GString *path, gboolean has_realm, gint ttl) { AuthFile* f = g_slice_new0(AuthFile); f->path = g_string_new_len(GSTR_LEN(path)); f->has_realm = has_realm; f->ttl = ttl; f->next_check = li_cur_ts(wrk) + ttl; f->lock = g_mutex_new(); if (NULL == (f->data = auth_file_load(wrk->srv, f))) { auth_file_free(f); return NULL; } return f; }
void li_connection_start(liConnection *con, liSocketAddress remote_addr, int s, liServerSocket *srv_sock) { LI_FORCE_ASSERT(NULL == con->con_sock.data); con->srv_sock = srv_sock; con->state = LI_CON_STATE_REQUEST_START; con->mainvr->ts_started = con->ts_started = li_cur_ts(con->wrk); con->info.remote_addr = remote_addr; li_sockaddr_to_string(remote_addr, con->info.remote_addr_str, FALSE); con->info.local_addr = li_sockaddr_dup(srv_sock->local_addr); li_sockaddr_to_string(con->info.local_addr, con->info.local_addr_str, FALSE); con->info.aborted = FALSE; li_stream_init(&con->in, &con->wrk->loop, _connection_http_in_cb); li_stream_init(&con->out, &con->wrk->loop, _connection_http_out_cb); con->info.req = &con->in; con->info.resp = &con->out; li_connection_update_io_wait(con); if (srv_sock->new_cb) { if (!srv_sock->new_cb(con, s)) { li_connection_error(con); return; } } else { simple_tcp_new(con, s); } LI_FORCE_ASSERT(NULL != con->con_sock.raw_in || NULL != con->con_sock.raw_out); li_chunkqueue_use_limit(con->con_sock.raw_in->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); li_chunkqueue_use_limit(con->con_sock.raw_out->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); li_stream_connect(&con->out, con->con_sock.raw_out); li_stream_connect(con->con_sock.raw_in, &con->in); li_chunk_parser_init(&con->req_parser_ctx.chunk_ctx, con->con_sock.raw_in->out); }
static void li_connection_reset_keep_alive(liConnection *con) { liVRequest *vr = con->mainvr; if (NULL == con->con_sock.raw_in || NULL == con->con_sock.raw_out || con->in.source != con->con_sock.raw_in) { li_connection_reset(con); return; } /* only start keep alive watcher if there isn't more input data already */ if (con->con_sock.raw_in->out->length == 0) { li_event_stop(&con->keep_alive_data.watcher); { con->keep_alive_data.max_idle = CORE_OPTION(LI_CORE_OPTION_MAX_KEEP_ALIVE_IDLE).number; if (con->keep_alive_data.max_idle == 0) { con->state = LI_CON_STATE_CLOSE; con_iostream_shutdown(con); li_connection_reset(con); return; } con->keep_alive_data.timeout = li_cur_ts(con->wrk) + con->keep_alive_data.max_idle; if (con->keep_alive_data.max_idle == con->srv->keep_alive_queue_timeout) { /* queue is sorted by con->keep_alive_data.timeout */ gboolean need_start = (0 == con->wrk->keep_alive_queue.length); con->keep_alive_data.timeout = li_cur_ts(con->wrk) + con->srv->keep_alive_queue_timeout; g_queue_push_tail(&con->wrk->keep_alive_queue, con); con->keep_alive_data.link = g_queue_peek_tail_link(&con->wrk->keep_alive_queue); if (need_start) li_worker_check_keepalive(con->wrk); } else { li_event_timer_once(&con->keep_alive_data.watcher, con->keep_alive_data.max_idle); } } } else { li_stream_again_later(&con->in); } con->state = LI_CON_STATE_KEEP_ALIVE; con->response_headers_sent = FALSE; con->expect_100_cont = FALSE; con->out_has_all_data = FALSE; con->info.keep_alive = TRUE; li_connection_update_io_wait(con); li_vrequest_reset(con->mainvr, TRUE); li_http_request_parser_reset(&con->req_parser_ctx); li_stream_disconnect(&con->out); li_stream_disconnect_dest(&con->in); con->out.out->is_closed = FALSE; memset(&con->in_chunked_decode_state, 0, sizeof(con->in_chunked_decode_state)); /* restore chunkqueue limits */ li_chunkqueue_use_limit(con->con_sock.raw_in->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); li_chunkqueue_use_limit(con->con_sock.raw_out->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); /* reset stats */ con->info.stats.bytes_in = G_GUINT64_CONSTANT(0); con->info.stats.bytes_in_5s = G_GUINT64_CONSTANT(0); con->info.stats.bytes_in_5s_diff = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out_5s = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out_5s_diff = G_GUINT64_CONSTANT(0); con->info.stats.last_avg = 0; }
static liHandlerResult mod_limit_action_handle(liVRequest *vr, gpointer param, gpointer *context) { gboolean limit_reached = FALSE; mod_limit_context *ctx = (mod_limit_context*) param; GPtrArray *arr = g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id); gint cons; mod_limit_req_ip_data *rid; liSocketAddress *remote_addr = &vr->coninfo->remote_addr; gpointer addr; guint32 bits; UNUSED(context); if (li_vrequest_is_handled(vr)) { VR_DEBUG(vr, "%s", "mod_limit: already have a content handler - ignoring limits. Put limit.* before content handlers such as 'static', 'fastcgi' or 'proxy'"); return LI_HANDLER_GO_ON; } /* IPv4 or IPv6? */ switch (remote_addr->addr->plain.sa_family) { case AF_INET: addr = &remote_addr->addr->ipv4.sin_addr.s_addr; bits = 32; break; case AF_INET6: addr = &remote_addr->addr->ipv6.sin6_addr.s6_addr; bits = 128; break; default: if (ctx->type == ML_TYPE_CON_IP || ctx->type == ML_TYPE_REQ_IP) { VR_DEBUG(vr, "%s", "mod_limit only supports ipv4 or ipv6 clients"); return LI_HANDLER_ERROR; } addr = NULL; bits = 0; } if (!arr) { /* request is not in any context yet, create new array */ arr = g_ptr_array_sized_new(2); g_ptr_array_index(vr->plugin_ctx, ctx->plugin->id) = arr; } switch (ctx->type) { case ML_TYPE_CON: #ifdef GLIB_VERSION_2_30 /* since 2.30 g_atomic_int_add does the same as g_atomic_int_exchange_and_add, * before it didn't return the old value. this fixes the deprecation warning. */ if (g_atomic_int_add(&ctx->pool.con, 1) > ctx->limit) { g_atomic_int_add(&ctx->pool.con, -1); limit_reached = TRUE; VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit); } #else if (g_atomic_int_exchange_and_add(&ctx->pool.con, 1) > ctx->limit) { g_atomic_int_add(&ctx->pool.con, -1); limit_reached = TRUE; VR_DEBUG(vr, "limit.con: limit reached (%d active connections)", ctx->limit); } #endif break; case ML_TYPE_CON_IP: g_mutex_lock(ctx->mutex); cons = GPOINTER_TO_INT(li_radixtree_lookup_exact(ctx->pool.con_ip, addr, bits)); if (cons < ctx->limit) { li_radixtree_insert(ctx->pool.con_ip, addr, bits, GINT_TO_POINTER(cons+1)); } else { limit_reached = TRUE; VR_DEBUG(vr, "limit.con_ip: limit reached (%d active connections)", ctx->limit); } g_mutex_unlock(ctx->mutex); break; case ML_TYPE_REQ: g_mutex_lock(ctx->mutex); if (li_cur_ts(vr->wrk) - ctx->pool.req.ts > 1.0) { /* reset pool */ ctx->pool.req.ts = li_cur_ts(vr->wrk); ctx->pool.req.num = 1; } else { ctx->pool.req.num++; if (ctx->pool.req.num > ctx->limit) { limit_reached = TRUE; VR_DEBUG(vr, "limit.req: limit reached (%d req/s)", ctx->limit); } } g_mutex_unlock(ctx->mutex); break; case ML_TYPE_REQ_IP: g_mutex_lock(ctx->mutex); rid = li_radixtree_lookup_exact(ctx->pool.req_ip, addr, bits); if (!rid) { /* IP not known */ rid = g_slice_new0(mod_limit_req_ip_data); rid->requests = 1; rid->ip = li_sockaddr_dup(*remote_addr); rid->ctx = ctx; rid->timeout_elem.data = rid; li_radixtree_insert(ctx->pool.req_ip, addr, bits, rid); li_waitqueue_push(&(((mod_limit_data*)ctx->plugin->data)->timeout_queues[vr->wrk->ndx]), &rid->timeout_elem); } else if (rid->requests < ctx->limit) { rid->requests++; } else { limit_reached = TRUE; VR_DEBUG(vr, "limit.req_ip: limit reached (%d req/s)", ctx->limit); } g_mutex_unlock(ctx->mutex); break; } if (limit_reached) { /* limit reached, we either execute the defined action or return a 503 error page */ if (ctx->action_limit_reached) { /* execute action */ li_action_enter(vr, ctx->action_limit_reached); } else { /* return 503 error page */ if (!li_vrequest_handle_direct(vr)) { return LI_HANDLER_ERROR; } vr->response.http_status = 503; } } else { g_ptr_array_add(arr, ctx); g_atomic_int_inc(&ctx->refcount); } return LI_HANDLER_GO_ON; }
guint li_throttle_query(liWorker *wrk, liThrottleState *state, guint interested, liThrottleNotifyCB notify_callback, gpointer data) { guint now = msec_timestamp(li_cur_ts(wrk)); gint fill, pool_fill; guint i, len; if (NULL == state) return interested; state->notify_callback = NULL; state->wqueue_elem.data = NULL; throttle_debug("li_throttle_query[%u]: interested %i, magazine %i\n", now, interested, state->magazine); if (interested > THROTTLE_MAX_STEP) interested = THROTTLE_MAX_STEP; if ((gint) interested <= state->magazine + THROTTLE_OVERLOAD) return interested; /* also try to balance negative magazine */ fill = interested - state->magazine; if (state->single_rate != 0) { if (now - state->single_last_rearm >= LI_THROTTLE_GRANULARITY) { guint single_fill = (((guint64) state->single_rate) * (now - state->single_last_rearm)) / 1000u; state->single_last_rearm = now; if (state->single_burst - state->single_magazine < single_fill) { state->single_magazine = state->single_burst; } else { state->single_magazine += single_fill; } } if (fill > state->single_magazine) fill = state->single_magazine; throttle_debug("single_magazine: %i\n", state->single_magazine); } /* pool_fill <= fill in the loop */ pool_fill = fill; for (i = 0, len = state->pools->len; i < len; ++i) { liThrottlePoolState *pstate = g_ptr_array_index(state->pools, i); liThrottlePool *pool = pstate->pool; liThrottlePoolWorkerState *pwstate = &pool->workers[wrk->ndx]; if (fill > pstate->magazine) { throttle_register(pwstate, pstate); throttle_pool_rearm(wrk, pool, now); if (fill > pstate->magazine) { throttle_register(pwstate, pstate); if (pool_fill > pstate->magazine) { pool_fill = pstate->magazine; } } } throttle_debug("pool %i magazine: %i\n", i, state->single_magazine); } throttle_debug("query refill: %i\n", pool_fill); if (pool_fill > 0) { if (state->single_rate != 0) { state->single_magazine -= pool_fill; } for (i = 0, len = state->pools->len; i < len; ++i) { liThrottlePoolState *pstate = g_ptr_array_index(state->pools, i); pstate->magazine -= pool_fill; } state->magazine += pool_fill; } if (state->magazine + THROTTLE_OVERLOAD <= 0) { throttle_debug("query queueing\n"); state->wqueue_elem.data = data; state->notify_callback = notify_callback; state->interested = interested; if (!state->wqueue_elem.queued) { li_waitqueue_push(&wrk->throttle_queue, &state->wqueue_elem); } return 0; } throttle_debug("query success: %i\n", state->magazine + THROTTLE_OVERLOAD); if ((gint) interested <= state->magazine + THROTTLE_OVERLOAD) return interested; return state->magazine + THROTTLE_OVERLOAD; }