static void mainvr_connection_upgrade(liVRequest *vr, liStream *backend_drain, liStream *backend_source) { liConnection* con = li_connection_from_vrequest(vr); LI_FORCE_ASSERT(NULL != con); if (con->response_headers_sent || NULL != con->out.source) { li_connection_error(con); return; } if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "connection upgrade: write response headers"); } con->response_headers_sent = TRUE; con->info.keep_alive = FALSE; li_response_send_headers(vr, con->out.out, NULL, TRUE); con->state = LI_CON_STATE_UPGRADED; vr->response.transfer_encoding = 0; li_connection_update_io_wait(con); li_stream_disconnect_dest(&con->in); con->in.out->is_closed = FALSE; li_stream_connect(&con->in, backend_drain); li_stream_connect(backend_source, &con->out); li_vrequest_reset(con->mainvr, TRUE); if (NULL != con->in.source) { li_chunkqueue_steal_all(con->out.out, backend_drain->out); } con->info.out_queue_length = con->out.out->length; li_stream_notify(&con->out); li_stream_notify(&con->in); }
static void memcache_callback(liMemcachedRequest *request, liMemcachedResult result, liMemcachedItem *item, GError **err) { memcache_request *req = request->cb_data; liVRequest *vr = req->vr; /* request done */ req->req = NULL; if (!vr) { g_slice_free(memcache_request, req); return; } switch (result) { case LI_MEMCACHED_OK: /* STORED, VALUE, DELETED */ /* steal buffer */ req->buffer = item->data; item->data = NULL; if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "memcached.lookup: key '%s' found, flags = %u", item->key->str, (guint) item->flags); } break; case LI_MEMCACHED_NOT_FOUND: /* ok, nothing to do - we just didn't find an entry */ if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "memcached.lookup: key not found"); } break; case LI_MEMCACHED_NOT_STORED: case LI_MEMCACHED_EXISTS: VR_ERROR(vr, "memcached error: %s", "unexpected result"); /* TODO (not possible for lookup) */ break; case LI_MEMCACHED_RESULT_ERROR: if (err && *err) { if (LI_MEMCACHED_DISABLED != (*err)->code) { VR_ERROR(vr, "memcached error: %s", (*err)->message); } } else { VR_ERROR(vr, "memcached error: %s", "Unknown error"); } break; } li_vrequest_joblist_append(vr); }
static liHandlerResult auth_handle_deny(liVRequest *vr, gpointer param, gpointer *context) { liPlugin *p = param; UNUSED(context); if (!li_vrequest_handle_direct(vr)) { if (_OPTION(vr, p, 0).boolean || CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "skipping auth.deny as request is already handled with current status %i", vr->response.http_status); } return LI_HANDLER_GO_ON; } vr->response.http_status = 403; return LI_HANDLER_GO_ON; }
void li_connection_error(liConnection *con) { liVRequest *vr = con->mainvr; if (LI_CON_STATE_CLOSE == con->state || LI_CON_STATE_DEAD == con->state) return; if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "connection closed (error)"); } con->state = LI_CON_STATE_CLOSE; con_iostream_close(con); li_plugins_handle_close(con); li_connection_reset(con); }
void li_connection_request_done(liConnection *con) { liVRequest *vr = con->mainvr; liServerState s; if (LI_CON_STATE_CLOSE == con->state || LI_CON_STATE_DEAD == con->state) return; if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(con->mainvr, "response end (keep_alive = %i)", con->info.keep_alive); } li_plugins_handle_close(con); s = g_atomic_int_get(&con->srv->dest_state); if (con->info.keep_alive && (LI_SERVER_RUNNING == s || LI_SERVER_WARMUP == s) && NULL != con->con_sock.data) { li_connection_reset_keep_alive(con); } else { con->state = LI_CON_STATE_CLOSE; con_iostream_shutdown(con); li_connection_reset(con); } }
static void li_connection_reset_keep_alive(liConnection *con) { liVRequest *vr = con->mainvr; if (NULL == con->con_sock.raw_in || NULL == con->con_sock.raw_out || con->in.source != con->con_sock.raw_in) { li_connection_reset(con); return; } /* only start keep alive watcher if there isn't more input data already */ if (con->con_sock.raw_in->out->length == 0) { li_event_stop(&con->keep_alive_data.watcher); { con->keep_alive_data.max_idle = CORE_OPTION(LI_CORE_OPTION_MAX_KEEP_ALIVE_IDLE).number; if (con->keep_alive_data.max_idle == 0) { con->state = LI_CON_STATE_CLOSE; con_iostream_shutdown(con); li_connection_reset(con); return; } con->keep_alive_data.timeout = li_cur_ts(con->wrk) + con->keep_alive_data.max_idle; if (con->keep_alive_data.max_idle == con->srv->keep_alive_queue_timeout) { /* queue is sorted by con->keep_alive_data.timeout */ gboolean need_start = (0 == con->wrk->keep_alive_queue.length); con->keep_alive_data.timeout = li_cur_ts(con->wrk) + con->srv->keep_alive_queue_timeout; g_queue_push_tail(&con->wrk->keep_alive_queue, con); con->keep_alive_data.link = g_queue_peek_tail_link(&con->wrk->keep_alive_queue); if (need_start) li_worker_check_keepalive(con->wrk); } else { li_event_timer_once(&con->keep_alive_data.watcher, con->keep_alive_data.max_idle); } } } else { li_stream_again_later(&con->in); } con->state = LI_CON_STATE_KEEP_ALIVE; con->response_headers_sent = FALSE; con->expect_100_cont = FALSE; con->out_has_all_data = FALSE; con->info.keep_alive = TRUE; li_connection_update_io_wait(con); li_vrequest_reset(con->mainvr, TRUE); li_http_request_parser_reset(&con->req_parser_ctx); li_stream_disconnect(&con->out); li_stream_disconnect_dest(&con->in); con->out.out->is_closed = FALSE; memset(&con->in_chunked_decode_state, 0, sizeof(con->in_chunked_decode_state)); /* restore chunkqueue limits */ li_chunkqueue_use_limit(con->con_sock.raw_in->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); li_chunkqueue_use_limit(con->con_sock.raw_out->out, LI_CONNECTION_DEFAULT_CHUNKQUEUE_LIMIT); /* reset stats */ con->info.stats.bytes_in = G_GUINT64_CONSTANT(0); con->info.stats.bytes_in_5s = G_GUINT64_CONSTANT(0); con->info.stats.bytes_in_5s_diff = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out_5s = G_GUINT64_CONSTANT(0); con->info.stats.bytes_out_5s_diff = G_GUINT64_CONSTANT(0); con->info.stats.last_avg = 0; }
/* http response header/data -> tcp/ssl */ static void _connection_http_out_cb(liStream *stream, liStreamEvent event) { liConnection *con = LI_CONTAINER_OF(stream, liConnection, out); liChunkQueue *raw_out = stream->out, *out; liVRequest *vr = con->mainvr; switch (event) { case LI_STREAM_NEW_DATA: /* handle below */ break; case LI_STREAM_CONNECTED_SOURCE: /* also handle data immediately */ break; case LI_STREAM_DISCONNECTED_SOURCE: if (!con->out_has_all_data) li_connection_error(con); return; case LI_STREAM_DISCONNECTED_DEST: if (!raw_out->is_closed || 0 != raw_out->length || NULL == con->con_sock.raw_out) { li_connection_error(con); } else { connection_close(con); } return; case LI_STREAM_DESTROY: con->info.resp = NULL; li_job_later(&con->wrk->loop.jobqueue, &con->job_reset); return; default: return; } out = (NULL != stream->source) ? stream->source->out : NULL; /* keep raw_out->is_closed = FALSE for keep-alive requests; instead set con->out_has_all_data = TRUE */ if (LI_CON_STATE_HANDLE_MAINVR <= con->state) { if (NULL == stream->source) { if (LI_CON_STATE_HANDLE_MAINVR == con->state) { /* wait for vrequest to connect the stream as signal that the headers are ready */ return; } } if (!con->response_headers_sent) { if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "write response headers"); } con->response_headers_sent = TRUE; if (0 == CORE_OPTION(LI_CORE_OPTION_MAX_KEEP_ALIVE_IDLE).number) { con->info.keep_alive = FALSE; } li_response_send_headers(vr, raw_out, out, FALSE); } if (!con->out_has_all_data && !raw_out->is_closed && NULL != out) { if (vr->response.transfer_encoding & LI_HTTP_TRANSFER_ENCODING_CHUNKED) { li_filter_chunked_encode(vr, raw_out, out); } else { li_chunkqueue_steal_all(raw_out, out); } } if (raw_out->is_closed || NULL == out || out->is_closed) { con->out_has_all_data = TRUE; raw_out->is_closed = FALSE; } if (con->out_has_all_data) { if (con->state < LI_CON_STATE_WRITE) { con->state = LI_CON_STATE_WRITE; li_connection_update_io_wait(con); } if (NULL != out) { out = NULL; li_stream_disconnect(stream); } } con->info.out_queue_length = raw_out->length; } li_stream_notify(stream); }
/* tcp/ssl -> http "parser" */ static void _connection_http_in_cb(liStream *stream, liStreamEvent event) { liConnection *con = LI_CONTAINER_OF(stream, liConnection, in); liChunkQueue *raw_in, *in; liVRequest *vr = con->mainvr; switch (event) { case LI_STREAM_NEW_DATA: /* handle below */ break; case LI_STREAM_DISCONNECTED_SOURCE: connection_close(con); return; case LI_STREAM_DESTROY: con->info.req = NULL; li_job_later(&con->wrk->loop.jobqueue, &con->job_reset); return; default: return; } if (NULL == stream->source) return; /* raw_in never gets closed normally - if we receive EOF from the client it means it cancelled the request */ raw_in = stream->source->out; if (raw_in->is_closed) { connection_close(con); return; } /* always close "in" after request body end. reopen it on keep-alive */ in = con->in.out; if (0 == raw_in->length) return; /* no (new) data */ if (LI_CON_STATE_UPGRADED == con->state) { li_chunkqueue_steal_all(in, raw_in); li_stream_notify(stream); return; } if (con->state == LI_CON_STATE_KEEP_ALIVE) { /* stop keep alive timeout watchers */ if (con->keep_alive_data.link) { g_queue_delete_link(&con->wrk->keep_alive_queue, con->keep_alive_data.link); con->keep_alive_data.link = NULL; } con->keep_alive_data.timeout = 0; li_event_stop(&con->keep_alive_data.watcher); con->keep_alive_requests++; /* disable keep alive if limit is reached */ if (con->keep_alive_requests == CORE_OPTION(LI_CORE_OPTION_MAX_KEEP_ALIVE_REQUESTS).number) con->info.keep_alive = FALSE; /* reopen stream for request body */ li_chunkqueue_reset(in); /* reset stuff from keep-alive and record timestamp */ li_vrequest_start(con->mainvr); con->state = LI_CON_STATE_READ_REQUEST_HEADER; /* put back in io timeout queue */ li_connection_update_io_wait(con); } else if (con->state == LI_CON_STATE_REQUEST_START) { con->state = LI_CON_STATE_READ_REQUEST_HEADER; li_connection_update_io_wait(con); } if (con->state == LI_CON_STATE_READ_REQUEST_HEADER) { liHandlerResult res; if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "reading request header"); } res = li_http_request_parse(vr, &con->req_parser_ctx); /* max uri length 8 kilobytes */ /* TODO: check this and similar in request_parse and response_parse */ if (vr->request.uri.raw->len > 8*1024) { VR_INFO(vr, "request uri too large. limit: 8kb, received: %s", li_counter_format(vr->request.uri.raw->len, COUNTER_BYTES, vr->wrk->tmp_str)->str ); con->info.keep_alive = FALSE; vr->response.http_status = 414; /* Request-URI Too Large */ con->state = LI_CON_STATE_WRITE; li_connection_update_io_wait(con); li_stream_again(&con->out); return; } switch(res) { case LI_HANDLER_GO_ON: break; /* go on */ case LI_HANDLER_WAIT_FOR_EVENT: return; case LI_HANDLER_ERROR: case LI_HANDLER_COMEBACK: /* unexpected */ /* unparsable header */ if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "parsing header failed"); } con->wrk->stats.requests++; con->info.keep_alive = FALSE; /* set status 400 if not already set to e.g. 413 */ if (vr->response.http_status == 0) vr->response.http_status = 400; con->state = LI_CON_STATE_WRITE; li_connection_update_io_wait(con); li_stream_again(&con->out); return; } con->wrk->stats.requests++; /* headers ready */ if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "validating request header"); } if (!li_request_validate_header(con)) { /* set status 400 if not already set */ if (vr->response.http_status == 0) vr->response.http_status = 400; con->state = LI_CON_STATE_WRITE; con->info.keep_alive = FALSE; li_connection_update_io_wait(con); li_stream_again(&con->out); return; } /* When does a client ask for 100 Continue? probably not while trying to ddos us * as post content probably goes to a dynamic backend anyway, we don't * care about the rare cases we could determine that we don't want a request at all * before sending it to a backend - so just send the stupid header */ if (con->expect_100_cont) { if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "send 100 Continue"); } li_chunkqueue_append_mem(con->out.out, CONST_STR_LEN("HTTP/1.1 100 Continue\r\n\r\n")); con->expect_100_cont = FALSE; li_stream_notify(&con->out); } con->state = LI_CON_STATE_HANDLE_MAINVR; li_connection_update_io_wait(con); li_action_enter(vr, con->srv->mainaction); li_vrequest_handle_request_headers(vr); } if (con->state != LI_CON_STATE_READ_REQUEST_HEADER && !in->is_closed) { goffset newbytes = 0; if (-1 == vr->request.content_length) { if (!in->is_closed) { if (!li_filter_chunked_decode(vr, in, raw_in, &con->in_chunked_decode_state)) { if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "failed decoding chunked request body"); } li_connection_error(con); return; } if (in->is_closed) vr->request.content_length = in->bytes_in; newbytes = 1; /* always notify */ } } else { if (in->bytes_in < vr->request.content_length) { newbytes = li_chunkqueue_steal_len(in, raw_in, vr->request.content_length - in->bytes_in); } if (in->bytes_in == vr->request.content_length) { in->is_closed = TRUE; } } if (newbytes > 0 || in->is_closed) { li_stream_notify(&con->in); } } }
static liHandlerResult memcache_store_filter(liVRequest *vr, liFilter *f) { memcache_filter *mf = (memcache_filter*) f->param; if (NULL == f->in) { memcache_store_filter_free(vr, f); /* didn't handle f->in->is_closed? abort forwarding */ if (!f->out->is_closed) li_stream_reset(&f->stream); return LI_HANDLER_GO_ON; } if (NULL == mf) goto forward; if (f->in->is_closed && 0 == f->in->length && f->out->is_closed) { /* nothing to do anymore */ return LI_HANDLER_GO_ON; } /* check if size still fits into buffer */ if ((gssize) (f->in->length + mf->buf->used) > (gssize) mf->ctx->maxsize) { /* response too big, switch to "forward" mode */ memcache_store_filter_free(vr, f); goto forward; } while (0 < f->in->length) { char *data; off_t len; liChunkIter ci; liHandlerResult res; GError *err = NULL; ci = li_chunkqueue_iter(f->in); if (LI_HANDLER_GO_ON != (res = li_chunkiter_read(ci, 0, 16*1024, &data, &len, &err))) { if (NULL != err) { VR_ERROR(vr, "Couldn't read data from chunkqueue: %s", err->message); g_error_free(err); } return res; } if ((gssize) (len + mf->buf->used) > (gssize) mf->ctx->maxsize) { /* response too big, switch to "forward" mode */ memcache_store_filter_free(vr, f); goto forward; } memcpy(mf->buf->addr + mf->buf->used, data, len); mf->buf->used += len; if (!f->out->is_closed) { li_chunkqueue_steal_len(f->out, f->in, len); } else { li_chunkqueue_skip(f->in, len); } } if (f->in->is_closed) { /* finally: store response in memcached */ liMemcachedCon *con; GError *err = NULL; liMemcachedRequest *req; memcached_ctx *ctx = mf->ctx; assert(0 == f->in->length); f->out->is_closed = TRUE; con = mc_ctx_prepare(ctx, vr->wrk); mc_ctx_build_key(vr->wrk->tmp_str, ctx, vr); if (NULL != vr && CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "memcached.store: storing response for key '%s'", vr->wrk->tmp_str->str); } req = li_memcached_set(con, vr->wrk->tmp_str, ctx->flags, ctx->ttl, mf->buf, NULL, NULL, &err); memcache_store_filter_free(vr, f); if (NULL == req) { if (NULL != err) { if (NULL != vr && LI_MEMCACHED_DISABLED != err->code) { VR_ERROR(vr, "memcached.store: set failed: %s", err->message); } g_clear_error(&err); } else if (NULL != vr) { VR_ERROR(vr, "memcached.store: set failed: %s", "Unkown error"); } } } return LI_HANDLER_GO_ON; forward: if (f->out->is_closed) { li_chunkqueue_skip_all(f->in); li_stream_disconnect(&f->stream); } else { li_chunkqueue_steal_all(f->out, f->in); if (f->in->is_closed) f->out->is_closed = f->in->is_closed; } return LI_HANDLER_GO_ON; }
static liHandlerResult mc_handle_lookup(liVRequest *vr, gpointer param, gpointer *context) { memcached_ctx *ctx = param; memcache_request *req = *context; if (req) { static const GString default_mime_str = { CONST_STR_LEN("application/octet-stream"), 0 }; liBuffer *buf = req->buffer; const GString *mime_str; if (NULL != req->req) return LI_HANDLER_WAIT_FOR_EVENT; /* not done yet */ g_slice_free(memcache_request, req); *context = NULL; if (NULL == buf) { /* miss */ if (ctx->act_miss) li_action_enter(vr, ctx->act_miss); return LI_HANDLER_GO_ON; } if (!li_vrequest_handle_direct(vr)) { if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "memcached.lookup: request already handled"); } li_buffer_release(buf); return LI_HANDLER_GO_ON; } if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "memcached.lookup: key found, handling request"); } li_chunkqueue_append_buffer(vr->direct_out, buf); vr->response.http_status = 200; mime_str = li_mimetype_get(vr, vr->request.uri.path); if (!mime_str) mime_str = &default_mime_str; li_http_header_overwrite(vr->response.headers, CONST_STR_LEN("Content-Type"), GSTR_LEN(mime_str)); /* hit */ if (ctx->act_found) li_action_enter(vr, ctx->act_found); return LI_HANDLER_GO_ON; } else { liMemcachedCon *con; GError *err = NULL; if (li_vrequest_is_handled(vr)) { if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "%s", "memcached.lookup: request already handled"); } return LI_HANDLER_GO_ON; } con = mc_ctx_prepare(ctx, vr->wrk); mc_ctx_build_key(vr->wrk->tmp_str, ctx, vr); if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "memcached.lookup: looking up key '%s'", vr->wrk->tmp_str->str); } req = g_slice_new0(memcache_request); req->req = li_memcached_get(con, vr->wrk->tmp_str, memcache_callback, req, &err); if (NULL == req->req) { if (NULL != err) { if (LI_MEMCACHED_DISABLED != err->code) { VR_ERROR(vr, "memcached.lookup: get failed: %s", err->message); } g_clear_error(&err); } else { VR_ERROR(vr, "memcached.lookup: get failed: %s", "Unkown error"); } g_slice_free(memcache_request, req); /* miss */ if (ctx->act_miss) li_action_enter(vr, ctx->act_miss); return LI_HANDLER_GO_ON; } req->vr = vr; *context = req; return LI_HANDLER_WAIT_FOR_EVENT; } }
static liHandlerResult stat_cache_get(liVRequest *vr, GString *path, struct stat *st, int *err, int *fd, gboolean async) { liStatCache *sc; liStatCacheEntry *sce; guint i; /* force blocking call if we are not in a vrequest context or stat cache is disabled */ if (!vr || !(sc = vr->wrk->stat_cache) || !CORE_OPTION(LI_CORE_OPTION_ASYNC_STAT).boolean) async = FALSE; if (async) { sce = g_hash_table_lookup(sc->entries, path); if (sce) { /* cache hit, check state */ if (g_atomic_int_get(&sce->state) == STAT_CACHE_ENTRY_WAITING) { /* already waiting for it? */ for (i = 0; i < vr->stat_cache_entries->len; i++) { if (g_ptr_array_index(vr->stat_cache_entries, i) == sce) { return LI_HANDLER_WAIT_FOR_EVENT; } } li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */ return LI_HANDLER_WAIT_FOR_EVENT; } sc->hits++; } else { /* cache miss, allocate new entry */ sce = stat_cache_entry_new(sc, path); sce->type = STAT_CACHE_ENTRY_SINGLE; li_stat_cache_entry_acquire(vr, sce); /* assign sce to vr */ /* uses initial reference of sce */ li_waitqueue_push(&sc->delete_queue, &sce->queue_elem); g_hash_table_insert(sc->entries, sce->data.path, sce); sce->refcount++; li_tasklet_push(vr->wrk->tasklets, stat_cache_run, stat_cache_finished, sce); sc->misses++; return LI_HANDLER_WAIT_FOR_EVENT; } } if (fd) { /* open + fstat */ while (-1 == (*fd = open(path->str, O_RDONLY))) { if (errno == EINTR) continue; *err = errno; return LI_HANDLER_ERROR; } if (-1 == fstat(*fd, st)) { *err = errno; close(*fd); *fd = -1; return LI_HANDLER_ERROR; } } else { /* stat */ if (-1 == stat(path->str, st)) { *err = errno; return LI_HANDLER_ERROR; } } return LI_HANDLER_GO_ON; }
static liHandlerResult auth_basic(liVRequest *vr, gpointer param, gpointer *context) { liHttpHeader *hdr; gboolean auth_ok = FALSE; AuthBasicData *bdata = param; gboolean debug = _OPTION(vr, bdata->p, 0).boolean; UNUSED(context); if (li_vrequest_is_handled(vr)) { if (debug || CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "skipping auth.basic as request is already handled with current status %i", vr->response.http_status); } return LI_HANDLER_GO_ON; } /* check for Authorization header */ hdr = li_http_header_lookup(vr->request.headers, CONST_STR_LEN("Authorization")); if (!hdr || !g_str_has_prefix(LI_HEADER_VALUE(hdr), "Basic ")) { if (debug) { VR_DEBUG(vr, "requesting authorization from client for realm \"%s\"", bdata->realm->str); } } else { gchar *decoded, *username = NULL, *password; size_t len; /* auth_info contains username:password encoded in base64 */ if (NULL != (decoded = (gchar*)g_base64_decode(LI_HEADER_VALUE(hdr) + sizeof("Basic ") - 1, &len))) { /* bogus data? */ if (NULL != (password = strchr(decoded, ':'))) { *password = '******'; password++; username = decoded; } else { g_free(decoded); } } if (!username) { if (debug) { VR_DEBUG(vr, "couldn't parse authorization info from client for realm \"%s\"", bdata->realm->str); } } else { GString user = li_const_gstring(username, password - username - 1); GString pass = li_const_gstring(password, len - (password - username)); if (bdata->backend(vr, &user, &pass, bdata, debug)) { auth_ok = TRUE; li_environment_set(&vr->env, CONST_STR_LEN("REMOTE_USER"), username, password - username - 1); li_environment_set(&vr->env, CONST_STR_LEN("AUTH_TYPE"), CONST_STR_LEN("Basic")); } else { if (debug) { VR_DEBUG(vr, "wrong authorization info from client on realm \"%s\" (user: \"%s\")", bdata->realm->str, username); } } g_free(decoded); } } g_string_truncate(vr->wrk->tmp_str, 0); g_string_append_len(vr->wrk->tmp_str, CONST_STR_LEN("Basic realm=\"")); g_string_append_len(vr->wrk->tmp_str, GSTR_LEN(bdata->realm)); g_string_append_c(vr->wrk->tmp_str, '"'); /* generate header always */ if (!auth_ok) { li_http_header_overwrite(vr->response.headers, CONST_STR_LEN("WWW-Authenticate"), GSTR_LEN(vr->wrk->tmp_str)); /* we already checked for handled */ if (!li_vrequest_handle_direct(vr)) return LI_HANDLER_ERROR; vr->response.http_status = 401; return LI_HANDLER_GO_ON; } else { /* lets hope browser just ignore the header if status is not 401 * but this way it is easier to use a later "auth.deny;" */ li_http_header_overwrite(vr->response.headers, CONST_STR_LEN("WWW-Authenticate"), GSTR_LEN(vr->wrk->tmp_str)); } if (debug) { VR_DEBUG(vr, "client authorization successful for realm \"%s\"", bdata->realm->str); } return LI_HANDLER_GO_ON; }
static liHandlerResult cache_etag_handle(liVRequest *vr, gpointer param, gpointer *context) { cache_etag_context *ctx = (cache_etag_context*) param; cache_etag_file *cfile = (cache_etag_file*) *context; GList *etag_entry; liHttpHeader *etag; struct stat st; GString *tmp_str = vr->wrk->tmp_str; liHandlerResult res; int err, fd; if (!cfile) { if (vr->request.http_method != LI_HTTP_METHOD_GET) return LI_HANDLER_GO_ON; LI_VREQUEST_WAIT_FOR_RESPONSE_HEADERS(vr); if (vr->response.http_status != 200) return LI_HANDLER_GO_ON; /* Don't cache static files if filter list is empty */ if (NULL == vr->filters_out_first && vr->backend_source->out->is_closed && 0 == vr->backend_source->out->mem_usage) return LI_HANDLER_GO_ON; etag_entry = li_http_header_find_first(vr->response.headers, CONST_STR_LEN("etag")); if (!etag_entry) return LI_HANDLER_GO_ON; /* no etag -> no caching */ if (li_http_header_find_next(etag_entry, CONST_STR_LEN("etag"))) { VR_ERROR(vr, "%s", "duplicate etag header in response, will not cache it"); return LI_HANDLER_GO_ON; } etag = (liHttpHeader*) etag_entry->data; cfile = cache_etag_file_create(createFileName(vr, ctx->path, etag)); *context = cfile; } res = li_stat_cache_get(vr, cfile->filename, &st, &err, &fd); if (res == LI_HANDLER_WAIT_FOR_EVENT) return res; if (res == LI_HANDLER_GO_ON) { liFilter *f; if (!S_ISREG(st.st_mode)) { VR_ERROR(vr, "Unexpected file type for cache file '%s' (mode %o)", cfile->filename->str, (unsigned int) st.st_mode); close(fd); return LI_HANDLER_GO_ON; /* no caching */ } cfile->hit_fd = fd; #ifdef FD_CLOEXEC fcntl(cfile->hit_fd, F_SETFD, FD_CLOEXEC); #endif if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "cache hit for '%s'", vr->request.uri.path->str); } cfile->hit_length = st.st_size; g_string_truncate(tmp_str, 0); li_string_append_int(tmp_str, st.st_size); li_http_header_overwrite(vr->response.headers, CONST_STR_LEN("Content-Length"), GSTR_LEN(tmp_str)); f = li_vrequest_add_filter_out(vr, cache_etag_filter_hit, NULL, NULL, NULL); if (NULL != f) { li_chunkqueue_append_file_fd(f->out, NULL, 0, cfile->hit_length, cfile->hit_fd); f->out->is_closed = TRUE; cfile->hit_fd = -1; } cache_etag_file_free(cfile); *context = NULL; return LI_HANDLER_GO_ON; } if (CORE_OPTION(LI_CORE_OPTION_DEBUG_REQUEST_HANDLING).boolean) { VR_DEBUG(vr, "cache miss for '%s'", vr->request.uri.path->str); } if (!cache_etag_file_start(vr, cfile)) { cache_etag_file_free(cfile); *context = NULL; return LI_HANDLER_GO_ON; /* no caching */ } li_vrequest_add_filter_out(vr, cache_etag_filter_miss, cache_etag_filter_free, NULL, cfile); *context = NULL; return LI_HANDLER_GO_ON; }