static void complete_multiple_query(multiple_query_ctx_t *query_ctx) { assert(query_ctx->num_result == query_ctx->num_query); if (query_ctx->do_update) do_updates(query_ctx); else { query_ctx->cleanup = true; query_ctx->gen = get_json_generator(&query_ctx->ctx->json_generator, &query_ctx->ctx->json_generator_num); if (query_ctx->gen) serialize_items(query_ctx->res, query_ctx->num_result, &query_ctx->gen, query_ctx->req); else send_error(INTERNAL_SERVER_ERROR, REQ_ERROR, query_ctx->req); } }
static void complete_multiple_query(multiple_query_ctx_t *query_ctx) { assert(query_ctx->num_result == query_ctx->num_query); if (query_ctx->do_update) do_updates(query_ctx); else { thread_context_t * const ctx = H2O_STRUCT_FROM_MEMBER(thread_context_t, event_loop.h2o_ctx, query_ctx->req->conn->ctx); query_ctx->gen = get_json_generator(&ctx->json_generator, &ctx->json_generator_num); if (query_ctx->gen) serialize_items(query_ctx->res, query_ctx->num_result, &query_ctx->gen, query_ctx->req); else send_error(INTERNAL_SERVER_ERROR, REQ_ERROR, query_ctx->req); } }
// handle the update file event static void do_handle_update_download(struct http_get_struct *get) { static int mkdir_res= -1; // flag as not tried int sts; // try to make sure the directory is there if(mkdir_res < 0){ mkdir_res = mkdir_config("tmp"); } if(get != NULL){ // did we finish properly? if(get->status == 0){ // release the memory if(get->fp){ fclose(get->fp); } free(get); // yes, lets start using the new file sts = move_file_to_updates("tmp/temp000.dat", files_lst, doing_custom); // trigger processing this file if(!sts){ do_updates(); } else { LOG_ERROR("Unable to finish %s processing (%d)", files_lst, errno); } // and go back to normal processing return; } // wait for the just completed thread so we free it's resources assert(get->thread_index<MAX_THREADS); SDL_WaitThread(thread_list[get->thread_index], NULL); thread_list[get->thread_index] = NULL; //no, we need to free the memory and try again if(get->fp){ fclose(get->fp); } free(get); } // we need to download the update file if we get here if(update_attempt_count++ < 3){ char filename[1024]; FILE *fp; // select a server if(num_update_servers > 1){ int num; srand( (unsigned)time( NULL ) ); num= rand()%num_update_servers; if(!strcmp(update_server, update_servers[num])){ // oops, the same server twice in a row, try to avoid num= rand()%num_update_servers; if(!strcmp(update_server, update_servers[num])){ // oops, the same server twice in a row, try to avoid num= rand()%num_update_servers; if(!strcmp(update_server, update_servers[num])){ // oops, the same server twice in a row, try to avoid num= rand()%num_update_servers; } } } safe_strncpy(update_server, update_servers[num], sizeof(update_server)); update_server[127]= '\0'; LOG_DEBUG("downloading from mirror %d of %d %s", num+1, num_update_servers, update_server); } else { safe_strncpy(update_server, update_servers[0], sizeof(update_server)); } ++temp_counter; fp = open_file_config("tmp/temp000.dat", "wb+"); if(fp == NULL){ LOG_ERROR("%s: %s \"tmp/temp000.dat\": %s\n", reg_error_str, cant_open_file, strerror(errno)); } else { if(is_this_files_lst) //files.lst { safe_snprintf(filename, sizeof(filename), "http://%s/updates%d%d%d/%s", update_server, VER_MAJOR, VER_MINOR, VER_RELEASE, files_lst); } else { //custom_files.lst safe_snprintf(filename, sizeof(filename), "http://%s/updates/%s", update_server, files_lst); } LOG_DEBUG("* server %s filename %s", update_server, filename); http_threaded_get_file(update_server, filename, fp, NULL, EVENT_UPDATES_DOWNLOADED); } // and keep running until we get a response return; } // total failure, error and clear the busy flag LOG_DEBUG("Failed to download (%s) 3 times. Giving up.", files_lst); update_busy= 0; }