static int uwsgi_api_cache_set(lua_State *L) { int args = lua_gettop(L); const char *key ; const char *value ; uint64_t expires = 0; size_t vallen; if (!uwsgi.caches) goto error; if (args > 1) { key = lua_tolstring(L, 1, NULL); value = lua_tolstring(L, 2, &vallen); if (args > 2) { expires = lua_tonumber(L, 3); } uwsgi_wlock(uwsgi.caches->lock); uwsgi_cache_set((char *)key, strlen(key), (char *)value, (uint16_t) vallen, expires, 0); uwsgi_rwunlock(uwsgi.caches->lock); } error: lua_pushnil(L); return 1; }
void *cache_sweeper_loop(void *noarg) { int i; // block all signals sigset_t smask; sigfillset(&smask); pthread_sigmask(SIG_BLOCK, &smask, NULL); if (!uwsgi.cache_expire_freq) uwsgi.cache_expire_freq = 3; // remove expired cache items TODO use rb_tree timeouts for (;;) { sleep(uwsgi.cache_expire_freq); uint64_t freed_items = 0; // skip the first slot for (i = 1; i < (int) uwsgi.cache_max_items; i++) { uwsgi_wlock(uwsgi.cache_lock); if (uwsgi.cache_items[i].expires) { if (uwsgi.cache_items[i].expires < (uint64_t) uwsgi.current_time) { uwsgi_cache_del(NULL, 0, i); freed_items++; } } uwsgi_rwunlock(uwsgi.cache_lock); } if (uwsgi.cache_report_freed_items && freed_items > 0) { uwsgi_log("freed %llu cache items\n", (unsigned long long) freed_items); } }; return NULL; }
static void stats_pusher_dogstatsd(struct uwsgi_stats_pusher_instance *uspi, time_t now, char *json, size_t json_len) { if (!uspi->configured) { struct dogstatsd_node *sn = uwsgi_calloc(sizeof(struct dogstatsd_node)); char *comma = strchr(uspi->arg, ','); if (comma) { sn->prefix = comma+1; sn->prefix_len = strlen(sn->prefix); *comma = 0; } else { sn->prefix = "uwsgi"; sn->prefix_len = 5; } char *colon = strchr(uspi->arg, ':'); if (!colon) { uwsgi_log("invalid dd address %s\n", uspi->arg); if (comma) *comma = ','; free(sn); return; } sn->addr_len = socket_to_in_addr(uspi->arg, colon, 0, &sn->addr.sa_in); sn->fd = socket(AF_INET, SOCK_DGRAM, 0); if (sn->fd < 0) { uwsgi_error("stats_pusher_dogstatsd()/socket()"); if (comma) *comma = ','; free(sn); return; } uwsgi_socket_nb(sn->fd); if (comma) *comma = ','; uspi->data = sn; uspi->configured = 1; } // we use the same buffer for all of the packets struct uwsgi_buffer *ub = uwsgi_buffer_new(uwsgi.page_size); struct uwsgi_metric *um = uwsgi.metrics; while(um) { uwsgi_rlock(uwsgi.metrics_lock); // ignore return value if (um->type == UWSGI_METRIC_GAUGE) { dogstatsd_send_metric(ub, uspi, um->name, um->name_len, *um->value, "|g"); } else { dogstatsd_send_metric(ub, uspi, um->name, um->name_len, *um->value, "|c"); } uwsgi_rwunlock(uwsgi.metrics_lock); if (um->reset_after_push){ uwsgi_wlock(uwsgi.metrics_lock); *um->value = um->initial_value; uwsgi_rwunlock(uwsgi.metrics_lock); } um = um->next; } uwsgi_buffer_destroy(ub); }
void uwsgi_ssl_session_remove_cb(SSL_CTX *ctx, SSL_SESSION *sess) { uwsgi_wlock(uwsgi.ssl_sessions_cache->lock); if (uwsgi_cache_del2(uwsgi.ssl_sessions_cache, (char *) sess->session_id, sess->session_id_length, 0, 0)) { if (uwsgi.ssl_verbose) { uwsgi_log("[uwsgi-ssl] error removing cache item\n"); } } uwsgi_rwunlock(uwsgi.ssl_sessions_cache->lock); }
static uint64_t get_uwsgi_custom_snmp_value(uint64_t val, uint8_t * oid_t) { val--; uwsgi_wlock(uwsgi.snmp_lock); if (uwsgi.shared->snmp_value[val].type) { *oid_t = uwsgi.shared->snmp_value[val].type; uwsgi_rwunlock(uwsgi.snmp_lock); return uwsgi.shared->snmp_value[val].val; } uwsgi_rwunlock(uwsgi.snmp_lock); *oid_t = SNMP_NULL; return 0; }
static void stats_pusher_socket(struct uwsgi_stats_pusher_instance *uspi, time_t now, char *json, size_t json_len) { if (!uspi->configured) { struct socket_node *sn = uwsgi_calloc(sizeof(struct socket_node)); char *comma = strchr(uspi->arg, ','); if (comma) { sn->prefix = comma+1; sn->prefix_len = strlen(sn->prefix); *comma = 0; } else { sn->prefix = "uwsgi"; sn->prefix_len = 5; } sn->fd = uwsgi_socket_from_addr(&sn->addr, &sn->addr_len, uspi->arg, SOCK_DGRAM); if (sn->fd < -1) { if (comma) *comma = ','; free(sn); return; } uwsgi_socket_nb(sn->fd); if (comma) *comma = ','; uspi->data = sn; uspi->configured = 1; } // we use the same buffer for all of the packets struct uwsgi_buffer *ub = uwsgi_buffer_new(uwsgi.page_size); struct uwsgi_metric *um = uwsgi.metrics; while(um) { uwsgi_rlock(uwsgi.metrics_lock); socket_send_metric(ub, uspi, um); uwsgi_rwunlock(uwsgi.metrics_lock); if (um->reset_after_push){ uwsgi_wlock(uwsgi.metrics_lock); *um->value = um->initial_value; uwsgi_rwunlock(uwsgi.metrics_lock); } um = um->next; } uwsgi_buffer_destroy(ub); }
int uwsgi_ssl_session_new_cb(SSL *ssl, SSL_SESSION *sess) { char session_blob[4096]; int len = i2d_SSL_SESSION(sess, NULL); if (len > 4096) { if (uwsgi.ssl_verbose) { uwsgi_log("[uwsgi-ssl] unable to store session of size %d\n", len); } return 0; } unsigned char *p = (unsigned char *) session_blob; i2d_SSL_SESSION(sess, &p); // ok let's write the value to the cache uwsgi_wlock(uwsgi.ssl_sessions_cache->lock); if (uwsgi_cache_set2(uwsgi.ssl_sessions_cache, (char *) sess->session_id, sess->session_id_length, session_blob, len, uwsgi.ssl_sessions_timeout, 0)) { if (uwsgi.ssl_verbose) { uwsgi_log("[uwsgi-ssl] unable to store session of size %d in the cache\n", len); } } uwsgi_rwunlock(uwsgi.ssl_sessions_cache->lock); return 0; }
int cheaper_busyness_algo(int can_spawn) { int i; // we use microseconds uint64_t t = uwsgi.cheaper_overload*1000000; int active_workers = 0; uint64_t total_busyness = 0; uint64_t avg_busyness = 0; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { active_workers++; uwsgi_cheaper_busyness_global.was_busy[i] += uwsgi_worker_is_busy(i+1); } else { uwsgi_cheaper_busyness_global.was_busy[i] = 0; } } #ifdef __linux__ int backlog = uwsgi.shared->backlog; #endif uint64_t now = uwsgi_micros(); if (now - uwsgi_cheaper_busyness_global.tcheck >= t) { uwsgi_cheaper_busyness_global.tcheck = now; for (i = 0; i < uwsgi.numproc; i++) { if (uwsgi.workers[i+1].cheaped == 0 && uwsgi.workers[i+1].pid > 0) { uint64_t percent = (( (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i])*100)/t); if (percent > 100) { percent = 100; } else if (uwsgi.workers[i+1].running_time-uwsgi_cheaper_busyness_global.last_values[i] == 0 && percent == 0 && uwsgi_cheaper_busyness_global.was_busy[i] > 0) { // running_time did not change but workers were busy // this means that workers had response times > busyness check interval if (uwsgi_cheaper_busyness_global.verbose) { uwsgi_log("[busyness] worker %d was busy %d time(s) in last cycle but no request was completed during this time, marking as 100%% busy\n", i+1, uwsgi_cheaper_busyness_global.was_busy[i]); } percent = 100; } uwsgi_cheaper_busyness_global.was_busy[i] = 0; total_busyness += percent; if (uwsgi_cheaper_busyness_global.verbose && active_workers > 1) uwsgi_log("[busyness] worker nr %d %llus average busyness is at %llu%%\n", i+1, uwsgi.cheaper_overload, percent); if (uwsgi.has_metrics) { // update metrics uwsgi_wlock(uwsgi.metrics_lock); uwsgi_cheaper_busyness_global.current_busyness[i] = percent; uwsgi_rwunlock(uwsgi.metrics_lock); } } uwsgi_cheaper_busyness_global.last_values[i] = uwsgi.workers[i+1].running_time; } avg_busyness = (active_workers ? total_busyness / active_workers : 0); if (uwsgi.has_metrics) { uwsgi_wlock(uwsgi.metrics_lock); uwsgi_cheaper_busyness_global.total_avg_busyness = avg_busyness; uwsgi_rwunlock(uwsgi.metrics_lock); } if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %ds average busyness of %d worker(s) is at %d%%\n", (int) uwsgi.cheaper_overload, (int) active_workers, (int) avg_busyness); if (avg_busyness > uwsgi_cheaper_busyness_global.busyness_max) { // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; int decheaped = 0; if (can_spawn) { for (i = 1; i <= uwsgi.numproc; i++) { if (uwsgi.workers[i].cheaped == 1 && uwsgi.workers[i].pid == 0) { decheaped++; if (decheaped >= uwsgi.cheaper_step) break; } } } if (decheaped > 0) { // store information that we just spawned new workers uwsgi_cheaper_busyness_global.last_action = 1; // calculate number of seconds since last worker was cheaped if ((now - uwsgi_cheaper_busyness_global.last_cheaped)/uwsgi.cheaper_overload/1000000 <= uwsgi_cheaper_busyness_global.cheap_multi) { // worker was cheaped and then spawned back in less than current multiplier*cheaper_overload seconds // we will increase the multiplier so that next time worker will need to wait longer before being cheaped uwsgi_cheaper_busyness_global.cheap_multi += uwsgi_cheaper_busyness_global.penalty; uwsgi_log("[busyness] worker(s) respawned to fast, increasing cheaper multiplier to %llu (+%llu)\n", uwsgi_cheaper_busyness_global.cheap_multi, uwsgi_cheaper_busyness_global.penalty); } else { decrease_multi(); } set_next_cheap_time(); uwsgi_log("[busyness] %llus average busyness is at %llu%%, will spawn %d new worker(s)\n", uwsgi.cheaper_overload, avg_busyness, decheaped); } else { uwsgi_log("[busyness] %llus average busyness is at %llu%% but we already started maximum number of workers available with current limits (%d)\n", uwsgi.cheaper_overload, avg_busyness, active_workers); } // return the maximum number of workers to spawn return decheaped; #ifdef __linux__ } else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { return spawn_emergency_worker(backlog); #endif } else if (avg_busyness < uwsgi_cheaper_busyness_global.busyness_min) { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; // we need to reset this to 0 since this is not idle cycle uwsgi_cheaper_busyness_global.tolerance_counter = 0; if (active_workers > uwsgi.cheaper_count) { // cheap a worker if too much are running if (now >= uwsgi_cheaper_busyness_global.next_cheap) { // lower cheaper multiplier if this is subsequent cheap if (uwsgi_cheaper_busyness_global.last_action == 2) decrease_multi(); set_next_cheap_time(); uwsgi_log("[busyness] %llus average busyness is at %llu%%, cheap one of %d running workers\n", uwsgi.cheaper_overload, avg_busyness, (int) active_workers); // store timestamp uwsgi_cheaper_busyness_global.last_cheaped = uwsgi_micros(); // store information that last action performed was cheaping worker uwsgi_cheaper_busyness_global.last_action = 2; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) uwsgi_cheaper_busyness_global.emergency_workers--; return -1; } else if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] need to wait %llu more second(s) to cheap worker\n", (uwsgi_cheaper_busyness_global.next_cheap - now)/1000000); } } else { // with only 1 worker running there is no point in doing all that magic if (active_workers == 1) return 0; if (uwsgi_cheaper_busyness_global.emergency_workers > 0) // we had emergency workers running and we went down to the busyness // level that is high enough to slow down cheaping workers at extra speed uwsgi_cheaper_busyness_global.emergency_workers--; // we have min <= busyness <= max we need to check what happened before uwsgi_cheaper_busyness_global.tolerance_counter++; if (uwsgi_cheaper_busyness_global.tolerance_counter >= 3) { // we had three or more cycles when min <= busyness <= max, lets reset the cheaper timer // this is to prevent workers from being cheaped if we had idle cycles for almost all // time needed to cheap them, than a lot min<busy<max when we do not reset timer // and then another idle cycle than would trigger cheaping if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), resetting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); set_next_cheap_time(); } else { // we had < 3 idle cycles in a row so we won't reset idle timer yet since this might be just short load spike // but we need to add cheaper-overload seconds to the cheaper timer so this cycle isn't counted as idle if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] %llus average busyness is at %llu%%, %llu non-idle cycle(s), adjusting cheaper timer\n", uwsgi.cheaper_overload, avg_busyness, uwsgi_cheaper_busyness_global.tolerance_counter); uwsgi_cheaper_busyness_global.next_cheap += uwsgi.cheaper_overload*1000000; } } } #ifdef __linux__ else if (can_spawn && backlog > uwsgi_cheaper_busyness_global.backlog_alert && active_workers < uwsgi.numproc) { // we check for backlog overload every cycle return spawn_emergency_worker(backlog); } else if (backlog > 0) { if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) { // backlog was > 0 last time, check timestamp and spawn workers if needed if (can_spawn && (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000 >= uwsgi_cheaper_busyness_global.backlog_nonzero_alert) { uwsgi_log("[busyness] backlog was non-zero for %llu second(s), spawning new worker(s)\n", (now - uwsgi_cheaper_busyness_global.backlog_nonzero_since)/1000000); uwsgi_cheaper_busyness_global.backlog_nonzero_since = now; return spawn_emergency_worker(backlog); } } else { // this is first > 0 pass, setup timer if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] backlog is starting to fill (%d)\n", backlog); uwsgi_cheaper_busyness_global.backlog_is_nonzero = 1; uwsgi_cheaper_busyness_global.backlog_nonzero_since = now; } } else if (uwsgi_cheaper_busyness_global.backlog_is_nonzero) { if (uwsgi_cheaper_busyness_global.verbose) uwsgi_log("[busyness] backlog is now empty\n"); uwsgi_cheaper_busyness_global.backlog_is_nonzero = 0; } #endif return 0; }
// this function does not use the magic api internally to avoid too much copy static void manage_magic_context(struct wsgi_request *wsgi_req, struct uwsgi_cache_magic_context *ucmc) { struct uwsgi_buffer *ub = NULL; struct uwsgi_cache *uc = uwsgi.caches; if (ucmc->cache_len > 0) { uc = uwsgi_cache_by_namelen(ucmc->cache, ucmc->cache_len); if (!uc) return; } if (!uc) return; // cache get if (!uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "get", 3)) { uint64_t vallen = 0; uint64_t expires = 0; uwsgi_rlock(uc->lock); char *value = uwsgi_cache_get3(uc, ucmc->key, ucmc->key_len, &vallen, &expires); if (!value) { uwsgi_rwunlock(uc->lock); return; } // we are still locked !!! ub = uwsgi_buffer_new(uwsgi.page_size); ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "status", 6, "ok", 2)) goto error; if (uwsgi_buffer_append_keynum(ub, "size", 4, vallen)) goto error; if (expires) { if (uwsgi_buffer_append_keynum(ub, "expires", 7, expires)) goto error; } if (uwsgi_buffer_set_uh(ub, 111, 17)) goto error; if (uwsgi_buffer_append(ub, value, vallen)) goto error; // unlock !!! uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, ub->buf, ub->pos); uwsgi_buffer_destroy(ub); return; } // cache exists if (!uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "exists", 6)) { uwsgi_rlock(uc->lock); if (!uwsgi_cache_exists2(uc, ucmc->key, ucmc->key_len)) { uwsgi_rwunlock(uc->lock); return; } // we are still locked !!! ub = uwsgi_buffer_new(uwsgi.page_size); ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "status", 6, "ok", 2)) goto error; if (uwsgi_buffer_set_uh(ub, 111, 17)) goto error; // unlock !!! uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, ub->buf, ub->pos); uwsgi_buffer_destroy(ub); return; } // cache del if (!uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "del", 3)) { uwsgi_wlock(uc->lock); if (uwsgi_cache_del2(uc, ucmc->key, ucmc->key_len, 0, 0)) { uwsgi_rwunlock(uc->lock); return; } // we are still locked !!! ub = uwsgi_buffer_new(uwsgi.page_size); ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "status", 6, "ok", 2)) goto error; if (uwsgi_buffer_set_uh(ub, 111, 17)) goto error; // unlock !!! uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, ub->buf, ub->pos); uwsgi_buffer_destroy(ub); return; } // cache clear if (!uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "clear", 5)) { uint64_t i; uwsgi_wlock(uc->lock); for (i = 1; i < uwsgi.caches->max_items; i++) { if (uwsgi_cache_del2(uc, NULL, 0, i, 0)) { uwsgi_rwunlock(uc->lock); return; } } // we are still locked !!! ub = uwsgi_buffer_new(uwsgi.page_size); ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "status", 6, "ok", 2)) goto error; if (uwsgi_buffer_set_uh(ub, 111, 17)) goto error; // unlock !!! uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, ub->buf, ub->pos); uwsgi_buffer_destroy(ub); return; } // cache set if (!uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "set", 3) || !uwsgi_strncmp(ucmc->cmd, ucmc->cmd_len, "update", 6)) { if (ucmc->size == 0 || ucmc->size > uc->max_item_size) return; wsgi_req->post_cl = ucmc->size; // read the value ssize_t rlen = 0; char *value = uwsgi_request_body_read(wsgi_req, ucmc->size, &rlen); if (rlen != (ssize_t) ucmc->size) return; // ok let's lock uwsgi_wlock(uc->lock); if (uwsgi_cache_set2(uc, ucmc->key, ucmc->key_len, value, ucmc->size, ucmc->expires, ucmc->cmd_len > 3 ? UWSGI_CACHE_FLAG_UPDATE : 0)) { uwsgi_rwunlock(uc->lock); return; } // we are still locked !!! ub = uwsgi_buffer_new(uwsgi.page_size); ub->pos = 4; if (uwsgi_buffer_append_keyval(ub, "status", 6, "ok", 2)) goto error; if (uwsgi_buffer_set_uh(ub, 111, 17)) goto error; // unlock !!! uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, ub->buf, ub->pos); uwsgi_buffer_destroy(ub); return; } return; error: uwsgi_rwunlock(uc->lock); uwsgi_buffer_destroy(ub); }
static int uwsgi_cache_request(struct wsgi_request *wsgi_req) { uint64_t vallen = 0; char *value; char *argv[3]; uint16_t argvs[3]; uint8_t argc = 0; // used for modifier2 17 struct uwsgi_cache_magic_context ucmc; struct uwsgi_cache *uc = NULL; switch(wsgi_req->uh->modifier2) { case 0: // get if (wsgi_req->uh->_pktsize > 0) { value = uwsgi_cache_magic_get(wsgi_req->buffer, wsgi_req->uh->_pktsize, &vallen, NULL, NULL); if (value) { wsgi_req->uh->_pktsize = vallen; if (uwsgi_response_write_body_do(wsgi_req, (char *)&wsgi_req->uh, 4)) { free(value) ; return -1;} uwsgi_response_write_body_do(wsgi_req, value, vallen); free(value); } } break; case 1: // set if (wsgi_req->uh->_pktsize > 0) { // max 3 items argc = 3; if (!uwsgi_parse_array(wsgi_req->buffer, wsgi_req->uh->_pktsize, argv, argvs, &argc)) { if (argc > 1) { uwsgi_cache_magic_set(argv[0], argvs[0], argv[1], argvs[1], 0, 0, NULL); } } } break; case 2: // del if (wsgi_req->uh->_pktsize > 0) { uwsgi_cache_magic_del(wsgi_req->buffer, wsgi_req->uh->_pktsize, NULL); } break; case 3: case 4: // dict if (wsgi_req->uh->_pktsize > 0) { uwsgi_hooked_parse(wsgi_req->buffer, wsgi_req->uh->_pktsize, cache_simple_command, (void *) wsgi_req); } break; case 5: // get (uwsgi + stream) if (wsgi_req->uh->_pktsize > 0) { value = uwsgi_cache_magic_get(wsgi_req->buffer, wsgi_req->uh->_pktsize, &vallen, NULL, NULL); if (value) { wsgi_req->uh->_pktsize = 0; wsgi_req->uh->modifier2 = 1; if (uwsgi_response_write_body_do(wsgi_req, (char *)&wsgi_req->uh, 4)) { free(value) ;return -1;} uwsgi_response_write_body_do(wsgi_req, value, vallen); free(value); } else { wsgi_req->uh->_pktsize = 0; wsgi_req->uh->modifier2 = 0; uwsgi_response_write_body_do(wsgi_req, (char *)&wsgi_req->uh, 4); free(value); } } break; case 6: // dump uc = uwsgi.caches; if (wsgi_req->uh->_pktsize > 0) { uc = uwsgi_cache_by_namelen(wsgi_req->buffer, wsgi_req->uh->_pktsize); } if (!uc) break; uwsgi_wlock(uc->lock); struct uwsgi_buffer *cache_dump = uwsgi_buffer_new(uwsgi.page_size + uc->filesize); cache_dump->pos = 4; if (uwsgi_buffer_append_keynum(cache_dump, "items", 5, uc->max_items)) { uwsgi_buffer_destroy(cache_dump); break; } if (uwsgi_buffer_append_keynum(cache_dump, "blocksize", 9, uc->blocksize)) { uwsgi_buffer_destroy(cache_dump); break; } if (uwsgi_buffer_set_uh(cache_dump, 111, 7)) { uwsgi_buffer_destroy(cache_dump); break; } if (uwsgi_buffer_append(cache_dump, (char *)uc->items, uc->filesize)) { uwsgi_buffer_destroy(cache_dump); break; } uwsgi_rwunlock(uc->lock); uwsgi_response_write_body_do(wsgi_req, cache_dump->buf, cache_dump->pos); uwsgi_buffer_destroy(cache_dump); break; case 17: if (wsgi_req->uh->_pktsize == 0) break; memset(&ucmc, 0, sizeof(struct uwsgi_cache_magic_context)); if (uwsgi_hooked_parse(wsgi_req->buffer, wsgi_req->uh->_pktsize, uwsgi_cache_magic_context_hook, &ucmc)) { break; } manage_magic_context(wsgi_req, &ucmc); break; default: break; } return UWSGI_OK; }
int uwsgi_file_serve(struct wsgi_request *wsgi_req, char *document_root, uint16_t document_root_len, char *path_info, uint16_t path_info_len, int is_a_file) { struct stat st; char real_filename[PATH_MAX + 1]; size_t real_filename_len = 0; char *filename = NULL; size_t filename_len = 0; struct uwsgi_string_list *index = NULL; if (!is_a_file) { filename = uwsgi_concat3n(document_root, document_root_len, "/", 1, path_info, path_info_len); filename_len = document_root_len + 1 + path_info_len; } else { filename = uwsgi_concat2n(document_root, document_root_len, "", 0); filename_len = document_root_len; } #ifdef UWSGI_DEBUG uwsgi_log("[uwsgi-fileserve] checking for %s\n", filename); #endif if (uwsgi.static_cache_paths) { uwsgi_rlock(uwsgi.static_cache_paths->lock); uint64_t item_len; char *item = uwsgi_cache_get2(uwsgi.static_cache_paths, filename, filename_len, &item_len); if (item && item_len > 0 && item_len <= PATH_MAX) { memcpy(real_filename, item, item_len); real_filename_len = item_len; uwsgi_rwunlock(uwsgi.static_cache_paths->lock); goto found; } uwsgi_rwunlock(uwsgi.static_cache_paths->lock); } if (!realpath(filename, real_filename)) { #ifdef UWSGI_DEBUG uwsgi_log("[uwsgi-fileserve] unable to get realpath() of the static file\n"); #endif free(filename); return -1; } real_filename_len = strlen(real_filename); if (uwsgi.static_cache_paths) { uwsgi_wlock(uwsgi.static_cache_paths->lock); uwsgi_cache_set2(uwsgi.static_cache_paths, filename, filename_len, real_filename, real_filename_len, uwsgi.use_static_cache_paths, UWSGI_CACHE_FLAG_UPDATE); uwsgi_rwunlock(uwsgi.static_cache_paths->lock); } found: free(filename); if (uwsgi_starts_with(real_filename, real_filename_len, document_root, document_root_len)) { struct uwsgi_string_list *safe = uwsgi.static_safe; while(safe) { if (!uwsgi_starts_with(real_filename, real_filename_len, safe->value, safe->len)) { goto safe; } safe = safe->next; } uwsgi_log("[uwsgi-fileserve] security error: %s is not under %.*s or a safe path\n", real_filename, document_root_len, document_root); return -1; } safe: if (!uwsgi_static_stat(wsgi_req, real_filename, &real_filename_len, &st, &index)) { if (index) { // if we are here the PATH_INFO need to be changed if (uwsgi_req_append_path_info_with_index(wsgi_req, index->value, index->len)) { return -1; } } // skip methods other than GET and HEAD if (uwsgi_strncmp(wsgi_req->method, wsgi_req->method_len, "GET", 3) && uwsgi_strncmp(wsgi_req->method, wsgi_req->method_len, "HEAD", 4)) { return -1; } // check for skippable ext struct uwsgi_string_list *sse = uwsgi.static_skip_ext; while (sse) { if (real_filename_len >= sse->len) { if (!uwsgi_strncmp(real_filename + (real_filename_len - sse->len), sse->len, sse->value, sse->len)) { return -1; } } sse = sse->next; } #ifdef UWSGI_ROUTING // before sending the file, we need to check if some rule applies if (!wsgi_req->is_routing && uwsgi_apply_routes_do(wsgi_req, NULL, 0) == UWSGI_ROUTE_BREAK) { return 0; } wsgi_req->routes_applied = 1; #endif return uwsgi_real_file_serve(wsgi_req, real_filename, real_filename_len, &st); } return -1; }