void zeb_curl_client_verify_response (zeb_curl_client_t *self, int status, char *content) { do { struct timeval timeout; int rc; /* select () return code */ CURLMcode mc; /* curl_multi_fdset () return code */ fd_set fdread; fd_set fdwrite; fd_set fdexcep; int maxfd = -1; long curl_timeo = -1; FD_ZERO (&fdread); FD_ZERO (&fdwrite); FD_ZERO (&fdexcep); /* set a suitable timeout to play around with */ timeout.tv_sec = 1; timeout.tv_usec = 0; curl_multi_timeout (self->multi_handle, &curl_timeo); if(curl_timeo >= 0) { timeout.tv_sec = curl_timeo / 1000; if(timeout.tv_sec > 1) timeout.tv_sec = 1; else timeout.tv_usec = (curl_timeo % 1000) * 1000; } /* get file descriptors from the transfers */ mc = curl_multi_fdset (self->multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); if (mc != CURLM_OK) { fprintf (stderr, "curl_multi_fdset () failed, code %d.\n", mc); break; } /* On success the value of maxfd is guaranteed to be >= -1. We call select(maxfd + 1, ...); specially in case of (maxfd == -1) there are no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- to sleep 100ms, which is the minimum suggested value in the curl_multi_fdset() doc. */ if (maxfd == -1) { /* Portable sleep for platforms other than Windows. */ struct timeval wait = { 0, 100 * 1000 }; /* 100ms */ rc = select (0, NULL, NULL, NULL, &wait); } else { /* Note that on some platforms 'timeout' may be modified by select(). If you need access to the original value save a copy beforehand. */ rc = select (maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout); } switch (rc) { case -1: /* select error */ break; case 0: /* timeout */ default: /* action */ curl_multi_perform (self->multi_handle, &self->still_running); break; } } while (self->still_running); int dontcare; CURLMsg *msg = NULL; while ((msg = curl_multi_info_read (self->multi_handle, &dontcare))) { if (msg->msg == CURLMSG_DONE) { CURL *curl = msg->easy_handle; long actual_status; curl_easy_getinfo (curl, CURLINFO_RESPONSE_CODE, &actual_status); assert (actual_status == status); assert (streq (self->data, content)); zstr_free (&self->data); curl_multi_remove_handle (self->multi_handle, curl); curl_easy_cleanup (curl); } } }
bool HTTP::Tick() { // curl_multi_perform() returns as soon as the reads/writes are done. // This function does not require that there actually is any data available for reading or that data can be written, it can be called just in case. int running_transfers = 0; int loopcheck = 0; CURLMcode result = CURLM_CALL_MULTI_PERFORM; while (result == CURLM_CALL_MULTI_PERFORM) { result = curl_multi_perform(multihandle, &running_transfers); loopcheck++; assert(loopcheck < 1000 && "infinite loop in HTTP::Tick()"); } CURLMsg * msg = NULL; do { int msgs_in_queue = 0; msg = curl_multi_info_read(multihandle, &msgs_in_queue); if (msg && msg->msg == CURLMSG_DONE) { // Handle completion. CURL * easyhandle = msg->easy_handle; // Get the url. std::map <CURL*, REQUEST>::iterator u = easyhandles.find(easyhandle); assert(u != easyhandles.end() && "corruption in easyhandles map"); std::string url = u->second.url; if (msg->data.result == CURLE_OK) { // Completion. requests[url].state = HTTPINFO::COMPLETE; curl_easy_getinfo(easyhandle, CURLINFO_SPEED_DOWNLOAD, &requests[url].speed); } else { // Failure. requests[url].state = HTTPINFO::FAILED; requests[url].error = "unknown"; } // Cleanup. curl_easy_cleanup(easyhandle); fclose(u->second.file); easyhandles.erase(easyhandle); } // Update status. for (std::map <CURL*, REQUEST>::iterator i = easyhandles.begin(); i != easyhandles.end(); i++) { CURL * easyhandle = i->first; std::map <CURL*, REQUEST>::iterator u = easyhandles.find(easyhandle); assert(u != easyhandles.end() && "corruption in requestUrls map"); std::string url = u->second.url; curl_easy_getinfo(easyhandle, CURLINFO_SPEED_DOWNLOAD, &requests[url].speed); requests[url].state = requests[url].downloaded > 0 ? HTTPINFO::DOWNLOADING : HTTPINFO::CONNECTING; } } while (msg); downloading = (running_transfers > 0); return downloading; }
void Downloader::update() { // read data from the network CURLMcode ret; int running_handles; while((ret = curl_multi_perform(m_multi_handle, &running_handles)) == CURLM_CALL_MULTI_PERFORM) { log_debug << "updating" << std::endl; } // check if any downloads got finished int msgs_in_queue; CURLMsg* msg; while ((msg = curl_multi_info_read(m_multi_handle, &msgs_in_queue))) { switch(msg->msg) { case CURLMSG_DONE: { log_info << "Download completed with " << msg->data.result << std::endl; curl_multi_remove_handle(m_multi_handle, msg->easy_handle); auto it = std::find_if(m_transfers.begin(), m_transfers.end(), [&msg](const std::unique_ptr<Transfer>& rhs) { return rhs->get_curl_handle() == msg->easy_handle; }); assert(it != m_transfers.end()); TransferStatusPtr status = (*it)->get_status(); status->error_msg = (*it)->get_error_buffer(); m_transfers.erase(it); if (msg->data.result == CURLE_OK) { bool success = true; for(auto& callback : status->callbacks) { try { callback(success); } catch(const std::exception& err) { success = false; log_warning << "Exception in Downloader: " << err.what() << std::endl; status->error_msg = err.what(); } } } else { log_warning << "Error: " << curl_easy_strerror(msg->data.result) << std::endl; for(auto& callback : status->callbacks) { try { callback(false); } catch(const std::exception& err) { log_warning << "Illegal exception in Downloader: " << err.what() << std::endl; } } } } break; default: log_warning << "unhandled cURL message: " << msg->msg << std::endl; break; } } }
// Responsible to read the tileData from the service // takes a vector of tileCoordinates to be read from the service. bool MapzenVectorTileJson::LoadTile(std::vector<glm::ivec3> _tileCoords) { std::vector<std::unique_ptr<std::string>> urls; if(_tileCoords.size() == 0) { logMsg("No tiles to fetch."); } //construct tileID and url for every tileCoord for(auto& tileCoord : _tileCoords) { urls.push_back(constructURL(tileCoord)); } CURLM *multiHandle; CURLMsg *handleMsg; // file descriptors to be used with curl_multi_fdset and select() fd_set fdRead; fd_set fdWrite; fd_set fdExcep; int fdMax; struct timeval timeout; int rc; //return value for select() call CURLMcode cres; int queuedHandles, numHandles = urls.size(); int prevHandle; int fetchTry = 0; //Counter to check for curl/select timeOuts.. maxed by static count MAX_FETCH_TRY int fdsetTimeoutCount = 0; // out will store the stringStream contents from libCurl std::stringstream *out[urls.size()]; curl_global_init(CURL_GLOBAL_DEFAULT); multiHandle = curl_multi_init(); int count = 0; // initialize curl simple interface for every url for(auto& url : urls) { out[count] = new std::stringstream; curlInit(multiHandle, *url.get(), out[count]); count++; } //do curl stuff if(multiHandle) { //start fetching cres = curl_multi_perform(multiHandle, &numHandles); if(cres != CURLM_OK) { logMsg("curl_multi_perform failed %d\n", cres); for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return false; } //if numHandles is 0, then multi_perform has no easy handles to perform fetching if(!numHandles) { logMsg("Number of easy handles returned by curl_multi_perform is 0, should not be."); for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return true; } //Start fetching info until no easy handle left to fetch data do { //set all file descriptors to 0 FD_ZERO(&fdRead); FD_ZERO(&fdWrite); FD_ZERO(&fdExcep); //timeout specification for select() call //select() will unblock either when a fd is ready or tileout is reached timeout.tv_sec = 1; //enough time for fd to be ready reading data... could be optimized. timeout.tv_usec = 0; //get file descriptors from the transfer cres = curl_multi_fdset(multiHandle, &fdRead, &fdWrite, &fdExcep, &fdMax); if(cres != CURLM_OK) { logMsg("curl_multi_fdset failed: %d\n", cres); for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return false; } //wait and repeat until curl has something to report to the kernel wrt file descriptors // TODO: if no internet, then this gets stuck... put a timeout here. while(fdMax < 0 && fdsetTimeoutCount < 20) { //TODO: Get a better heuristic on the sleep milliseconds //sleeps for 100 msec and calls perform and fdset to see if multi perform has started its job std::this_thread::sleep_for(std::chrono::milliseconds(100)); cres = curl_multi_perform(multiHandle, &numHandles); prevHandle = numHandles; curl_multi_fdset(multiHandle, &fdRead, &fdWrite, &fdExcep, &fdMax); std::cout<<"Here\n"; //TODO: Remove this. Its here to test how many times this loop runs till //multi_perform starts doing stuff fdsetTimeoutCount++; } if(fdMax < 0) { logMsg("fdMax set timeout: fdmax still not set by curl_multi_fdset. Internet connection??"); for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return false; } //select blocks the thread until the fd set by curl is ready with data. rc = select(fdMax+1, &fdRead, &fdWrite, &fdExcep, &timeout); // helper variables to convert extracted data to Json on the spot instead of waiting for all urls to be // fetched and then converting the extracted data to json char *url; char *tmpOutData; //to read the CURLINFO_PRIVATE data which is type casted to char* from stringstream* std::string tmpJsonData; int length; std::shared_ptr<Json::Value> jsonVal(new Json::Value); Json::Reader jsonReader; // see what select returned switch(rc) { case -1: //select call ERRORed break; case 0: std::cout<<"Here timeout\n"; //TODO: Remove this. Its here to test how many times select times out. // So far never with 1 sec of timeout. //select call Timed out. No fd ready to read anything. fetchTry++; if(fetchTry == MAX_FETCH_TRY) { curl_multi_cleanup(multiHandle); curl_global_cleanup(); for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return false; } break; default: // sleep for 5 msec to give enough time for curl to read data for any of the file descriptors. std::this_thread::sleep_for(std::chrono::milliseconds(5)); std::cout<<"Possible Change\n"; //TODO: Remove this. Its here to test how many times fd is ready and // will result in a complete data read //Perform again to see what happened with individual easy handles curl_multi_perform(multiHandle,&numHandles); // if easy handle status changed some urls are done. if(prevHandle != numHandles) { std::cout<<"Change happened\n";//TODO: Remove this. Only here for testing prevHandle = numHandles; handleMsg = curl_multi_info_read(multiHandle, &queuedHandles); // for every url done fill the jsonValue for(auto qHandItr = 0; qHandItr <= queuedHandles; qHandItr++) { if(handleMsg->msg == CURLMSG_DONE) { //get the url from the easyHandle curl_easy_getinfo(handleMsg->easy_handle, CURLINFO_EFFECTIVE_URL , &url); //get the tmpOutData which is holding the extracted info from the url curl_easy_getinfo(handleMsg->easy_handle, CURLINFO_PRIVATE , &tmpOutData); // typecast back from char* to std::stringstream tmpJsonData = ((std::stringstream *)tmpOutData)->str(); length = tmpJsonData.size(); jsonReader.parse(tmpJsonData.c_str(), tmpJsonData.c_str() + length, *(jsonVal.get())); // no way to get what ID this url was for so have to extract ID from url m_JsonRoots[extractIDFromUrl(std::string(url))] = jsonVal; logMsg("R: %d - %s <%s>\n", handleMsg->data.result, curl_easy_strerror(handleMsg->data.result), url); curl_multi_remove_handle(multiHandle, handleMsg->easy_handle); curl_easy_cleanup(handleMsg->easy_handle); } } } break; } }while(numHandles); curl_multi_cleanup(multiHandle); curl_global_cleanup(); } for(auto i = 0; i < urls.size(); i++) { delete out[i]; } urls.clear(); return true; }
CURLMsg* LLCurl::Multi::info_read(S32* msgs_in_queue) { CURLMsg* curlmsg = curl_multi_info_read(mCurlMultiHandle, msgs_in_queue); return curlmsg; }
int test(char *URL) { CURLM* multi; sslctxparm p; CURLMcode res; int running; char done = FALSE; int i = 0; CURLMsg *msg; struct timeval ml_start; struct timeval mp_start; char ml_timedout = FALSE; char mp_timedout = FALSE; if(libtest_arg2) { portnum = atoi(libtest_arg2); } if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) { fprintf(stderr, "curl_global_init() failed\n"); return TEST_ERR_MAJOR_BAD; } if ((p.curl = curl_easy_init()) == NULL) { fprintf(stderr, "curl_easy_init() failed\n"); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } p.accessinfoURL = (unsigned char *) strdup(URL); p.accesstype = OBJ_obj2nid(OBJ_txt2obj("AD_DVCS",0)) ; curl_easy_setopt(p.curl, CURLOPT_URL, p.accessinfoURL); curl_easy_setopt(p.curl, CURLOPT_SSL_CTX_FUNCTION, sslctxfun) ; curl_easy_setopt(p.curl, CURLOPT_SSL_CTX_DATA, &p); curl_easy_setopt(p.curl, CURLOPT_SSL_VERIFYPEER, FALSE); curl_easy_setopt(p.curl, CURLOPT_SSL_VERIFYHOST, 1); if ((multi = curl_multi_init()) == NULL) { fprintf(stderr, "curl_multi_init() failed\n"); curl_easy_cleanup(p.curl); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } if ((res = curl_multi_add_handle(multi, p.curl)) != CURLM_OK) { fprintf(stderr, "curl_multi_add_handle() failed, " "with code %d\n", res); curl_multi_cleanup(multi); curl_easy_cleanup(p.curl); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } fprintf(stderr, "Going to perform %s\n", (char *)p.accessinfoURL); ml_timedout = FALSE; ml_start = tutil_tvnow(); while (!done) { fd_set rd, wr, exc; int max_fd; struct timeval interval; interval.tv_sec = 1; interval.tv_usec = 0; if (tutil_tvdiff(tutil_tvnow(), ml_start) > MAIN_LOOP_HANG_TIMEOUT) { ml_timedout = TRUE; break; } mp_timedout = FALSE; mp_start = tutil_tvnow(); while (res == CURLM_CALL_MULTI_PERFORM) { res = curl_multi_perform(multi, &running); if (tutil_tvdiff(tutil_tvnow(), mp_start) > MULTI_PERFORM_HANG_TIMEOUT) { mp_timedout = TRUE; break; } fprintf(stderr, "running=%d res=%d\n",running,res); if (running <= 0) { done = TRUE; break; } } if (mp_timedout || done) break; if (res != CURLM_OK) { fprintf(stderr, "not okay???\n"); i = 80; break; } FD_ZERO(&rd); FD_ZERO(&wr); FD_ZERO(&exc); max_fd = 0; if (curl_multi_fdset(multi, &rd, &wr, &exc, &max_fd) != CURLM_OK) { fprintf(stderr, "unexpected failured of fdset.\n"); i = 89; break; } if (select_test(max_fd+1, &rd, &wr, &exc, &interval) == -1) { fprintf(stderr, "bad select??\n"); i =95; break; } res = CURLM_CALL_MULTI_PERFORM; } if (ml_timedout || mp_timedout) { if (ml_timedout) fprintf(stderr, "ml_timedout\n"); if (mp_timedout) fprintf(stderr, "mp_timedout\n"); fprintf(stderr, "ABORTING TEST, since it seems " "that it would have run forever.\n"); i = TEST_ERR_RUNS_FOREVER; } else { msg = curl_multi_info_read(multi, &running); /* this should now contain a result code from the easy handle, get it */ if(msg) i = msg->data.result; fprintf(stderr, "all done\n"); } curl_multi_remove_handle(multi, p.curl); curl_easy_cleanup(p.curl); curl_multi_cleanup(multi); curl_global_cleanup(); free(p.accessinfoURL); return i; }
/*************************************************************************** * This function is still only for testing purposes. It makes a great way * to run the full test suite on the multi interface instead of the easy one. *************************************************************************** * * The *new* curl_easy_perform() is the external interface that performs a * transfer previously setup. * * Wrapper-function that: creates a multi handle, adds the easy handle to it, * runs curl_multi_perform() until the transfer is done, then detaches the * easy handle, destroys the multi handle and returns the easy handle's return * code. This will make everything internally use and assume multi interface. */ CURLcode curl_easy_perform(CURL *easy) { CURLM *multi; CURLMcode mcode; CURLcode code = CURLE_OK; int still_running; struct timeval timeout; int rc; CURLMsg *msg; fd_set fdread; fd_set fdwrite; fd_set fdexcep; int maxfd; if(!easy) return CURLE_BAD_FUNCTION_ARGUMENT; multi = curl_multi_init(); if(!multi) return CURLE_OUT_OF_MEMORY; mcode = curl_multi_add_handle(multi, easy); if(mcode) { curl_multi_cleanup(multi); if(mcode == CURLM_OUT_OF_MEMORY) return CURLE_OUT_OF_MEMORY; else return CURLE_FAILED_INIT; } /* we start some action by calling perform right away */ do { while(CURLM_CALL_MULTI_PERFORM == curl_multi_perform(multi, &still_running)); if(!still_running) break; FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); /* timeout once per second */ timeout.tv_sec = 1; timeout.tv_usec = 0; /* Old deprecated style: get file descriptors from the transfers */ curl_multi_fdset(multi, &fdread, &fdwrite, &fdexcep, &maxfd); rc = Curl_select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout); /* The way is to extract the sockets and wait for them without using select. This whole alternative version should probably rather use the curl_multi_socket() approach. */ if(rc == -1) /* select error */ break; /* timeout or data to send/receive => loop! */ } while(still_running); msg = curl_multi_info_read(multi, &rc); if(msg) code = msg->data.result; mcode = curl_multi_remove_handle(multi, easy); /* what to do if it fails? */ mcode = curl_multi_cleanup(multi); /* what to do if it fails? */ return code; }
static CURLcode wait_or_timeout(struct Curl_multi *multi, struct events *ev) { bool done = FALSE; CURLMcode mcode; CURLcode rc = CURLE_OK; while(!done) { CURLMsg *msg; struct socketmonitor *m; struct pollfd *f; struct pollfd fds[4]; int numfds=0; int pollrc; int i; struct timeval before; struct timeval after; /* populate the fds[] array */ for(m = ev->list, f=&fds[0]; m; m = m->next) { f->fd = m->socket.fd; f->events = m->socket.events; f->revents = 0; /* fprintf(stderr, "poll() %d check socket %d\n", numfds, f->fd); */ f++; numfds++; } /* get the time stamp to use to figure out how long poll takes */ before = curlx_tvnow(); /* wait for activity or timeout */ pollrc = Curl_poll(fds, numfds, (int)ev->ms); after = curlx_tvnow(); ev->msbump = FALSE; /* reset here */ if(0 == pollrc) { /* timeout! */ ev->ms = 0; /* fprintf(stderr, "call curl_multi_socket_action( TIMEOUT )\n"); */ mcode = curl_multi_socket_action(multi, CURL_SOCKET_TIMEOUT, 0, &ev->running_handles); } else if(pollrc > 0) { /* loop over the monitored sockets to see which ones had activity */ for(i = 0; i< numfds; i++) { if(fds[i].revents) { /* socket activity, tell libcurl */ int act = poll2cselect(fds[i].revents); /* convert */ infof(multi->easyp, "call curl_multi_socket_action( socket %d )\n", fds[i].fd); mcode = curl_multi_socket_action(multi, fds[i].fd, act, &ev->running_handles); } } if(!ev->msbump) /* If nothing updated the timeout, we decrease it by the spent time. * If it was updated, it has the new timeout time stored already. */ ev->ms += curlx_tvdiff(after, before); } if(mcode) return CURLE_URL_MALFORMAT; /* TODO: return a proper error! */ /* we don't really care about the "msgs_in_queue" value returned in the second argument */ msg = curl_multi_info_read(multi, &pollrc); if(msg) { rc = msg->data.result; done = TRUE; } } return rc; }
int test(char *URL) { int res = 0; CURLM *m = NULL; CURLMsg *msg; /* for picking up messages with the transfer status */ int msgs_left; /* how many messages are left */ int running; int handlenum = 0; struct timeval last_handle_add; if(parse_url_file("log/urls.txt") <= 0) goto test_cleanup; start_test_timing(); curl_global_init(CURL_GLOBAL_ALL); multi_init(m); create_handles(); multi_setopt(m, CURLMOPT_PIPELINING, 1L); multi_setopt(m, CURLMOPT_MAX_HOST_CONNECTIONS, 2L); multi_setopt(m, CURLMOPT_MAX_PIPELINE_LENGTH, 3L); multi_setopt(m, CURLMOPT_CONTENT_LENGTH_PENALTY_SIZE, 15000L); multi_setopt(m, CURLMOPT_CHUNK_LENGTH_PENALTY_SIZE, 10000L); multi_setopt(m, CURLMOPT_PIPELINING_SITE_BL, site_blacklist); multi_setopt(m, CURLMOPT_PIPELINING_SERVER_BL, server_blacklist); last_handle_add = tutil_tvnow(); for(;;) { struct timeval interval; struct timeval now; long int msnow, mslast; fd_set rd, wr, exc; int maxfd = -99; long timeout; interval.tv_sec = 1; interval.tv_usec = 0; if(handlenum < num_handles) { now = tutil_tvnow(); msnow = now.tv_sec * 1000 + now.tv_usec / 1000; mslast = last_handle_add.tv_sec * 1000 + last_handle_add.tv_usec / 1000; if(msnow - mslast >= urltime[handlenum] && handlenum < num_handles) { fprintf(stdout, "Adding handle %d\n", handlenum); setup_handle(URL, m, handlenum); last_handle_add = now; handlenum++; } } curl_multi_perform(m, &running); abort_on_test_timeout(); /* See how the transfers went */ while ((msg = curl_multi_info_read(m, &msgs_left))) { if (msg->msg == CURLMSG_DONE) { int i, found = 0; /* Find out which handle this message is about */ for (i = 0; i < num_handles; i++) { found = (msg->easy_handle == handles[i]); if(found) break; } printf("Handle %d Completed with status %d\n", i, msg->data.result); curl_multi_remove_handle(m, handles[i]); } } if(handlenum == num_handles && !running) { break; /* done */ } FD_ZERO(&rd); FD_ZERO(&wr); FD_ZERO(&exc); curl_multi_fdset(m, &rd, &wr, &exc, &maxfd); /* At this point, maxfd is guaranteed to be greater or equal than -1. */ curl_multi_timeout(m, &timeout); if(timeout < 0) timeout = 1; interval.tv_sec = timeout / 1000; interval.tv_usec = (timeout % 1000) * 1000; interval.tv_sec = 0; interval.tv_usec = 1000; select_test(maxfd+1, &rd, &wr, &exc, &interval); abort_on_test_timeout(); } test_cleanup: remove_handles(); /* undocumented cleanup sequence - type UB */ curl_multi_cleanup(m); curl_global_cleanup(); free_urls(); return res; }
int main(void) { CURLM *cm; CURLMsg *msg; long L; unsigned int C=0; int M, Q, U = -1; fd_set R, W, E; struct timeval T; int res; double bytes=0; long header_bytes=0; double transfer_time=0; unsigned long page_size=0; struct timeval start, stop; gettimeofday(&start, NULL); curl_global_init(CURL_GLOBAL_ALL); cm = curl_multi_init(); /* we can optionally limit the total amount of connections this multi handle uses */ curl_multi_setopt(cm, CURLMOPT_MAXCONNECTS, (long)MAX); for(C = 0; C < MAX; ++C) { init(cm, C); } while(U) { curl_multi_perform(cm, &U); if(U) { FD_ZERO(&R); FD_ZERO(&W); FD_ZERO(&E); if(curl_multi_fdset(cm, &R, &W, &E, &M)) { fprintf(stderr, "E: curl_multi_fdset\n"); return EXIT_FAILURE; } if(curl_multi_timeout(cm, &L)) { fprintf(stderr, "E: curl_multi_timeout\n"); return EXIT_FAILURE; } if(L == -1) L = 100; if(M == -1) { #ifdef WIN32 Sleep(L); #else sleep((unsigned int)L / 1000); #endif } else { T.tv_sec = L/1000; T.tv_usec = (L%1000)*1000; if(0 > select(M+1, &R, &W, &E, &T)) { fprintf(stderr, "E: select(%i,,,,%li): %i: %s\n", M+1, L, errno, strerror(errno)); return EXIT_FAILURE; } } } while((msg = curl_multi_info_read(cm, &Q))) { if(msg->msg == CURLMSG_DONE) { char *url; CURL *e = msg->easy_handle; curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, &url); fprintf(stderr, "R: %d - %s <%s>\n", msg->data.result, curl_easy_strerror(msg->data.result), url); if((res = curl_easy_getinfo(msg->easy_handle, CURLINFO_SIZE_DOWNLOAD, &bytes)) != CURLE_OK || (res = curl_easy_getinfo(msg->easy_handle, CURLINFO_HEADER_SIZE, &header_bytes)) != CURLE_OK || (res = curl_easy_getinfo(msg->easy_handle, CURLINFO_TOTAL_TIME, &transfer_time)) != CURLE_OK ) { fprintf(stderr, "cURL error: %s\n", curl_easy_strerror(res)); } gettimeofday(&stop, NULL); page_size+=(long)bytes+header_bytes; double time=((stop.tv_sec - start.tv_sec) * 1000 + (stop.tv_usec - start.tv_usec) / 1000); printf("[%f]Time: %f Size: %lu\n", time,transfer_time, (long)bytes+header_bytes); curl_multi_remove_handle(cm, e); curl_easy_cleanup(e); } else { fprintf(stderr, "E: CURLMsg (%d)\n", msg->msg); } if(C < CNT) { init(cm, C++); U++; /* just to prevent it from remaining at 0 if there are more URLs to get */ } } } curl_multi_cleanup(cm); curl_global_cleanup(); printf("Page size: %ld\n",page_size); return EXIT_SUCCESS; }
/* FIXME dont run every time, only if dlqueue is full!!! */ int curlFetch(struct url *ptr, int dlnum) { CURLM *cm; CURLMsg *msg; long L=100; unsigned int C=0; int M, Q, U = -1; fd_set R, W, E; struct timeval T; CURLMcode ret; qboolean got404=false; if (!cls.downloadServer) return 0; curl_global_init(CURL_GLOBAL_ALL); cm = curl_multi_init(); curl_easy_setopt(cm, CURLMOPT_MAXCONNECTS, (long)MAX); for (C = 0; C < dlnum; ++C) init(cm, C); while (U) { ret = curl_multi_perform(cm, &U); if (U) { FD_ZERO(&R); FD_ZERO(&W); FD_ZERO(&E); if (ret != CURLM_OK) { Com_Printf ("curl_multi_perform error, Aborting HTTP downloads.\n"); return 1; } if (curl_multi_fdset(cm, &R, &W, &E, &M)) { Com_Printf("E: curl_multi_fdset\n"); return 1; } if (L == -1) L = 100; if (M == -1) { /* obviously we need to sleep a short while so we DO NOT RUN OUT OF FDs! */ #ifdef WIN32 Sleep(L); #else sleep(L / 1000); #endif } else { T.tv_sec = L/1000; T.tv_usec = (L%1000)*1000; //T.tv_sec = 5; //T.tv_usec = 0; if (0 > select(M+1, &R, &W, &E, &T)) { Com_Printf("E: select(%i,,,,%li): %i: %s\n", M+1, L, errno, strerror(errno)); return 1; } } } while ((msg = curl_multi_info_read(cm, &Q))) { // allow user to use console CL_SendCommand (); if (msg->msg == CURLMSG_DONE) { long responseCode; extern struct MemoryStruct *memPtr; char *url; double recvsize; double totaltime; char *localfile; char *fullurl; int remainingFiles; static int finishcnt; CURL *e = msg->easy_handle; curl_easy_getinfo(msg->easy_handle, CURLINFO_RESPONSE_CODE, &responseCode); curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, &url); curl_easy_getinfo(msg->easy_handle, CURLINFO_SIZE_DOWNLOAD, &recvsize); curl_easy_getinfo(msg->easy_handle, CURLINFO_TOTAL_TIME, &totaltime); curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, &localfile); curl_easy_getinfo(msg->easy_handle, CURLINFO_EFFECTIVE_URL, &fullurl); remainingFiles=0; if (responseCode == 404) { //Com_Printf("[HTTP] %s [404 Not Found] [x remaining files]\n", //localfile, recvsize/1000,recvsize/(1000*totaltime),remainingFiles); // dont show 404 error in some cases, prettier console... #if HTTP404ERROR Com_Printf("[HTTP] %s [404 Not Found]\n", localfile, recvsize/1000,recvsize/(1000*totaltime),remainingFiles); got404=true; cls.downloadnow=false; // dont download over http again... #endif } else if (responseCode == 200) { /* Com_Printf("[HTTP] %s [%.f kB, %.0f kB/sec] [x remaining files]\n", localfile, recvsize/1000,recvsize/(1000*totaltime),remainingFiles); */ Com_Printf("[HTTP] %s [%.f kB, %.0f kB/sec]\n", localfile, recvsize/1000,recvsize/(1000*totaltime),remainingFiles); binaryWrite(localfile, memPtr->memory, memPtr->size); } finishcnt++; if (memPtr && memPtr->memory) free(memPtr->memory); curl_multi_remove_handle(cm, e); curl_easy_cleanup(e); } else { Com_Printf("E: CURLMsg (%d)\n", msg->msg); return 1; } if (C < dlnum) { init(cm, C++); U++; } } } curl_multi_cleanup(cm); curl_global_cleanup(); return got404 ? 1:0; }
/* * Download a file over HTTP/2, take care of server push. */ int main(void) { CURL *easy; CURLM *multi_handle; int still_running; /* keep number of running handles */ int transfers = 1; /* we start with one */ struct CURLMsg *m; /* init a multi stack */ multi_handle = curl_multi_init(); easy = curl_easy_init(); /* set options */ setup(easy); /* add the easy transfer */ curl_multi_add_handle(multi_handle, easy); curl_multi_setopt(multi_handle, CURLMOPT_PIPELINING, CURLPIPE_MULTIPLEX); curl_multi_setopt(multi_handle, CURLMOPT_PUSHFUNCTION, server_push_callback); curl_multi_setopt(multi_handle, CURLMOPT_PUSHDATA, &transfers); /* we start some action by calling perform right away */ curl_multi_perform(multi_handle, &still_running); do { struct timeval timeout; int rc; /* select() return code */ CURLMcode mc; /* curl_multi_fdset() return code */ fd_set fdread; fd_set fdwrite; fd_set fdexcep; int maxfd = -1; long curl_timeo = -1; FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); /* set a suitable timeout to play around with */ timeout.tv_sec = 1; timeout.tv_usec = 0; curl_multi_timeout(multi_handle, &curl_timeo); if(curl_timeo >= 0) { timeout.tv_sec = curl_timeo / 1000; if(timeout.tv_sec > 1) timeout.tv_sec = 1; else timeout.tv_usec = (curl_timeo % 1000) * 1000; } /* get file descriptors from the transfers */ mc = curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); if(mc != CURLM_OK) { fprintf(stderr, "curl_multi_fdset() failed, code %d.\n", mc); break; } /* On success the value of maxfd is guaranteed to be >= -1. We call select(maxfd + 1, ...); specially in case of (maxfd == -1) there are no fds ready yet so we call select(0, ...) --or Sleep() on Windows-- to sleep 100ms, which is the minimum suggested value in the curl_multi_fdset() doc. */ if(maxfd == -1) { #ifdef _WIN32 Sleep(100); rc = 0; #else /* Portable sleep for platforms other than Windows. */ struct timeval wait = { 0, 100 * 1000 }; /* 100ms */ rc = select(0, NULL, NULL, NULL, &wait); #endif } else { /* Note that on some platforms 'timeout' may be modified by select(). If you need access to the original value save a copy beforehand. */ rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); } switch(rc) { case -1: /* select error */ break; case 0: default: /* timeout or readable/writable sockets */ curl_multi_perform(multi_handle, &still_running); break; } /* * A little caution when doing server push is that libcurl itself has * created and added one or more easy handles but we need to clean them up * when we are done. */ do { int msgq = 0;; m = curl_multi_info_read(multi_handle, &msgq); if(m && (m->msg == CURLMSG_DONE)) { CURL *e = m->easy_handle; transfers--; curl_multi_remove_handle(multi_handle, e); curl_easy_cleanup(e); } } while(m); } while(transfers); /* as long as we have transfers going */ curl_multi_cleanup(multi_handle); return 0; }
static void tr_webThreadFunc( void * vsession ) { int unused; CURLM * multi; struct tr_web * web; int taskCount = 0; tr_session * session = vsession; /* try to enable ssl for https support; but if that fails, * try a plain vanilla init */ if( curl_global_init( CURL_GLOBAL_SSL ) ) curl_global_init( 0 ); web = tr_new0( struct tr_web, 1 ); web->close_mode = ~0; web->taskLock = tr_lockNew( ); web->tasks = NULL; multi = curl_multi_init( ); session->web = web; for( ;; ) { long msec; CURLMsg * msg; CURLMcode mcode; struct tr_web_task * task; if( web->close_mode == TR_WEB_CLOSE_NOW ) break; if( ( web->close_mode == TR_WEB_CLOSE_WHEN_IDLE ) && !taskCount ) break; /* add tasks from the queue */ tr_lockLock( web->taskLock ); while(( task = tr_list_pop_front( &web->tasks ))) { curl_multi_add_handle( multi, createEasy( session, task )); /*fprintf( stderr, "adding a task.. taskCount is now %d\n", taskCount );*/ ++taskCount; } tr_lockUnlock( web->taskLock ); /* maybe wait a little while before calling curl_multi_perform() */ msec = 0; curl_multi_timeout( multi, &msec ); if( msec < 0 ) msec = THREADFUNC_MAX_SLEEP_MSEC; if( msec > 0 ) { int usec; int max_fd; struct timeval t; fd_set r_fd_set, w_fd_set, c_fd_set; max_fd = 0; FD_ZERO( &r_fd_set ); FD_ZERO( &w_fd_set ); FD_ZERO( &c_fd_set ); curl_multi_fdset( multi, &r_fd_set, &w_fd_set, &c_fd_set, &max_fd ); if( msec > THREADFUNC_MAX_SLEEP_MSEC ) msec = THREADFUNC_MAX_SLEEP_MSEC; usec = msec * 1000; t.tv_sec = usec / 1000000; t.tv_usec = usec % 1000000; tr_select( max_fd+1, &r_fd_set, &w_fd_set, &c_fd_set, &t ); } /* call curl_multi_perform() */ do { mcode = curl_multi_perform( multi, &unused ); } while( mcode == CURLM_CALL_MULTI_PERFORM ); /* pump completed tasks from the multi */ while(( msg = curl_multi_info_read( multi, &unused ))) { if(( msg->msg == CURLMSG_DONE ) && ( msg->easy_handle != NULL )) { struct tr_web_task * task; CURL * e = msg->easy_handle; curl_easy_getinfo( e, CURLINFO_PRIVATE, (void*)&task ); curl_easy_getinfo( e, CURLINFO_RESPONSE_CODE, &task->code ); curl_multi_remove_handle( multi, e ); curl_easy_cleanup( e ); /*fprintf( stderr, "removing a completed task.. taskCount is now %d (response code: %d, response len: %d)\n", taskCount, (int)task->code, (int)EVBUFFER_LENGTH(task->response) );*/ tr_runInEventThread( task->session, task_finish_func, task ); --taskCount; } } } /* cleanup */ curl_multi_cleanup( multi ); tr_lockFree( web->taskLock ); tr_free( web ); session->web = NULL; }
void runWebCallBacks(JSContext *cx){ if(!curlHandle) return; int p; curl_multi_perform(curlHandle, &p); CURLMsg *msg = NULL; while( msg = curl_multi_info_read(curlHandle,&p)){ if( msg->msg == CURLMSG_DONE ) { if( msg->data.result == CURLE_OK){ JSObject *ob = NULL; ob = JS_NewObject(cx,&webRespClass,NULL,NULL); if(!ob){ fprint(stderr,"Error creating http response javascript object\n"); exit(EXIT_FAILURE); } JS_SetPrivate(cx,ob,msg); static JSFunctionSpec responseFuncSpec[3]= { JS_FS("toString", webRspData,0,0), JS_FS("getImage", webRspGetImg,0,0), //JS_FS("hasHeaderLine", webRspHL,0,0), JS_FS_END }; if(!JS_DefineFunctions(cx, ob, responseFuncSpec)) fprint(stderr,"Unable to create http response object\n"); //Create an instance of webResponseObject, with private data set. jsval result; jsval oj = OBJECT_TO_JSVAL(ob); rqPrivate *rq = NULL; curl_easy_getinfo(msg->easy_handle,CURLINFO_PRIVATE,(char**)&rq); if(!rq){ fprint(stderr,"Error retrieving data from the handle"); exit( EXIT_FAILURE); } JS_CallFunctionValue(cx, NULL,rq->success, 1, &oj , &result); } else { /* Just return error string -> Cleanup now */ char *errCode = (char*)curl_easy_strerror(msg->data.result); if(!errCode) errCode = "FAILED WITH NO REASON GIVEN"; JSString *string; string = JS_NewStringCopyZ(cx, errCode); jsval result, errString = STRING_TO_JSVAL(string); rqPrivate *rq = NULL; curl_easy_getinfo(msg->easy_handle,CURLINFO_PRIVATE,(char**)&rq); if(!rq){ fprint(stderr,"Error retrieving data from the handle"); exit( EXIT_FAILURE); } JS_CallFunctionValue(cx, NULL,rq->failure, 1, &errString, &result); curl_multi_remove_handle(curlHandle,msg->easy_handle); if(rq->data) free(rq->data); free(rq); curl_easy_cleanup(msg->easy_handle); } } } }
/** * start_async_http_req - performs an HTTP request, stores results in pvars * - TCP connect phase is synchronous, due to libcurl limitations * - TCP read phase is asynchronous, thanks to the libcurl multi interface * * @msg: sip message struct * @method: HTTP verb * @url: HTTP URL to be queried * @req_body: Body of the request (NULL if not needed) * @req_ctype: Value for the "Content-Type: " header of the request (same as ^) * @out_handle: CURL easy handle used to perform the transfer * @body: reply body; gradually reallocated as data arrives * @ctype: will eventually hold the last "Content-Type" header of the reply */ int start_async_http_req(struct sip_msg *msg, enum rest_client_method method, char *url, char *req_body, char *req_ctype, CURL **out_handle, str *body, str *ctype) { CURL *handle; CURLcode rc; CURLMcode mrc; fd_set rset, wset, eset; int max_fd, fd, i; long busy_wait, timeout; long retry_time, check_time = 5; /* 5ms looping time */ int msgs_in_queue; CURLMsg *cmsg; if (transfers == FD_SETSIZE) { LM_ERR("too many ongoing tranfers: %d\n", FD_SETSIZE); clean_header_list; return ASYNC_NO_IO; } handle = curl_easy_init(); if (!handle) { LM_ERR("Init curl handle failed!\n"); clean_header_list; return ASYNC_NO_IO; } w_curl_easy_setopt(handle, CURLOPT_URL, url); switch (method) { case REST_CLIENT_POST: w_curl_easy_setopt(handle, CURLOPT_POST, 1); w_curl_easy_setopt(handle, CURLOPT_POSTFIELDS, req_body); if (req_ctype) { sprintf(print_buff, "Content-Type: %s", req_ctype); header_list = curl_slist_append(header_list, print_buff); w_curl_easy_setopt(handle, CURLOPT_HTTPHEADER, header_list); } break; case REST_CLIENT_GET: break; default: LM_ERR("Unsupported rest_client_method: %d, defaulting to GET\n", method); } if (header_list) w_curl_easy_setopt(handle, CURLOPT_HTTPHEADER, header_list); w_curl_easy_setopt(handle, CURLOPT_CONNECTTIMEOUT, connection_timeout); w_curl_easy_setopt(handle, CURLOPT_TIMEOUT, curl_timeout); w_curl_easy_setopt(handle, CURLOPT_VERBOSE, 1); w_curl_easy_setopt(handle, CURLOPT_FAILONERROR, 1); w_curl_easy_setopt(handle, CURLOPT_STDERR, stdout); w_curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, write_func); w_curl_easy_setopt(handle, CURLOPT_WRITEDATA, body); if (ctype) { w_curl_easy_setopt(handle, CURLOPT_HEADERFUNCTION, header_func); w_curl_easy_setopt(handle, CURLOPT_HEADERDATA, ctype); } if (ssl_capath) w_curl_easy_setopt(handle, CURLOPT_CAPATH, ssl_capath); if (!ssl_verifypeer) w_curl_easy_setopt(handle, CURLOPT_SSL_VERIFYPEER, 0L); if (!ssl_verifyhost) w_curl_easy_setopt(handle, CURLOPT_SSL_VERIFYHOST, 0L); curl_multi_add_handle(multi_handle, handle); timeout = connection_timeout_ms; /* obtain a read fd in "connection_timeout" seconds at worst */ for (timeout = connection_timeout_ms; timeout > 0; timeout -= busy_wait) { mrc = curl_multi_perform(multi_handle, &running_handles); if (mrc != CURLM_OK) { LM_ERR("curl_multi_perform: %s\n", curl_multi_strerror(mrc)); goto error; } mrc = curl_multi_timeout(multi_handle, &retry_time); if (mrc != CURLM_OK) { LM_ERR("curl_multi_timeout: %s\n", curl_multi_strerror(mrc)); goto error; } if (retry_time == -1) { LM_INFO("curl_multi_timeout() returned -1, pausing %ldms...\n", sleep_on_bad_timeout); busy_wait = sleep_on_bad_timeout; usleep(1000UL * busy_wait); continue; } busy_wait = retry_time < timeout ? retry_time : timeout; /** * libcurl is currently stuck in internal operations (connect) * we have to wait a bit until we receive a read fd */ for (i = 0; i < busy_wait; i += check_time) { /* transfer may have already been completed!! */ while ((cmsg = curl_multi_info_read(multi_handle, &msgs_in_queue))) { if (cmsg->easy_handle == handle && cmsg->msg == CURLMSG_DONE) { LM_DBG("done, no need for async!\n"); clean_header_list; *out_handle = handle; return ASYNC_SYNC; } } FD_ZERO(&rset); mrc = curl_multi_fdset(multi_handle, &rset, &wset, &eset, &max_fd); if (mrc != CURLM_OK) { LM_ERR("curl_multi_fdset: %s\n", curl_multi_strerror(mrc)); goto error; } if (max_fd != -1) { for (fd = 0; fd <= max_fd; fd++) { if (FD_ISSET(fd, &rset)) { LM_DBG(" >>>>>>>>>> fd %d ISSET(read)\n", fd); if (is_new_transfer(fd)) { LM_DBG("add fd to read list: %d\n", fd); add_transfer(fd); goto success; } } } } usleep(1000UL * check_time); } } LM_ERR("timeout while connecting to '%s' (%ld sec)\n", url, connection_timeout); goto error; success: clean_header_list; *out_handle = handle; return fd; error: mrc = curl_multi_remove_handle(multi_handle, handle); if (mrc != CURLM_OK) LM_ERR("curl_multi_remove_handle: %s\n", curl_multi_strerror(mrc)); cleanup: clean_header_list; curl_easy_cleanup(handle); return ASYNC_NO_IO; }
int test(char *URL) { CURL *easy = NULL; CURLM *multi = NULL; int res = 0; int running; int msgs_left; int phase; CURLMsg *msg; start_test_timing(); res_global_init(CURL_GLOBAL_ALL); if(res) { return res; } easy_init(easy); multi_init(multi); for (phase = CONNECT_ONLY_PHASE; phase < LAST_PHASE; ++phase) { /* go verbose */ easy_setopt(easy, CURLOPT_VERBOSE, 1L); /* specify target */ easy_setopt(easy, CURLOPT_URL, URL); /* enable 'CONNECT_ONLY' option when in connect phase */ if (phase == CONNECT_ONLY_PHASE) easy_setopt(easy, CURLOPT_CONNECT_ONLY, 1L); /* enable 'NOBODY' option to send 'QUIT' command in quit phase */ if (phase == QUIT_PHASE) { easy_setopt(easy, CURLOPT_CONNECT_ONLY, 0L); easy_setopt(easy, CURLOPT_NOBODY, 1L); easy_setopt(easy, CURLOPT_FORBID_REUSE, 1L); } multi_add_handle(multi, easy); for(;;) { struct timeval interval; fd_set fdread; fd_set fdwrite; fd_set fdexcep; long timeout = -99; int maxfd = -99; multi_perform(multi, &running); abort_on_test_timeout(); if(!running) break; /* done */ FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); multi_fdset(multi, &fdread, &fdwrite, &fdexcep, &maxfd); /* At this point, maxfd is guaranteed to be greater or equal than -1. */ multi_timeout(multi, &timeout); /* At this point, timeout is guaranteed to be greater or equal than -1. */ if(timeout != -1L) { int itimeout = (timeout > (long)INT_MAX) ? INT_MAX : (int)timeout; interval.tv_sec = itimeout/1000; interval.tv_usec = (itimeout%1000)*1000; } else { interval.tv_sec = TEST_HANG_TIMEOUT/1000+1; interval.tv_usec = 0; } select_test(maxfd+1, &fdread, &fdwrite, &fdexcep, &interval); abort_on_test_timeout(); } msg = curl_multi_info_read(multi, &msgs_left); if(msg) res = msg->data.result; multi_remove_handle(multi, easy); } test_cleanup: /* undocumented cleanup sequence - type UA */ curl_multi_cleanup(multi); curl_easy_cleanup(easy); curl_global_cleanup(); return res; }
static int loop(CURLM *cm, const char* url, const char* userpwd, struct curl_slist *headers) { CURLMsg *msg; CURLMcode code; long L; int M, Q, U = -1; fd_set R, W, E; struct timeval T; if(init(cm, url, userpwd, headers)) return 1; /* failure */ while (U) { do { code = curl_multi_perform(cm, &U); } while (code == CURLM_CALL_MULTI_PERFORM); if (U) { FD_ZERO(&R); FD_ZERO(&W); FD_ZERO(&E); if (curl_multi_fdset(cm, &R, &W, &E, &M)) { fprintf(stderr, "E: curl_multi_fdset\n"); return 1; /* failure */ } /* In a real-world program you OF COURSE check the return that maxfd is bigger than -1 so that the call to select() below makes sense! */ if (curl_multi_timeout(cm, &L)) { fprintf(stderr, "E: curl_multi_timeout\n"); return 1; /* failure */ } if(L != -1) { T.tv_sec = L/1000; T.tv_usec = (L%1000)*1000; } else { T.tv_sec = 5; T.tv_usec = 0; } if (0 > select(M+1, &R, &W, &E, &T)) { fprintf(stderr, "E: select\n"); return 1; /* failure */ } } while ((msg = curl_multi_info_read(cm, &Q))) { if (msg->msg == CURLMSG_DONE) { CURL *e = msg->easy_handle; fprintf(stderr, "R: %d - %s\n", (int)msg->data.result, curl_easy_strerror(msg->data.result)); curl_multi_remove_handle(cm, e); curl_easy_cleanup(e); } else { fprintf(stderr, "E: CURLMsg (%d)\n", (int)msg->msg); } } } return 0; /* success */ }
void* thread(void*) { while (true) { _messages.wait(); int still_running = -1; while (still_running) { ThreadDispatcher::peekMessage tmsg; if (_messages.peek(tmsg, true)) { if (tmsg.msgid == 1) return 0; curl_multi_add_handle(multi_handle, (CURL*)tmsg.arg1); } int prev = still_running; curl_multi_perform(multi_handle, &still_running); if (still_running) { struct timeval timeout; fd_set fdread; fd_set fdwrite; fd_set fdexcep; int maxfd; FD_ZERO(&fdread); FD_ZERO(&fdwrite); FD_ZERO(&fdexcep); /* set a suitable timeout to play around with */ timeout.tv_sec = 1; timeout.tv_usec = 0; /* get file descriptors from the transfers */ curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd); if (maxfd == -1) { sleep(100); } else { int rc = select(maxfd + 1, &fdread, &fdwrite, &fdexcep, &timeout); } } if (still_running != prev) { CURLMsg* msg = 0; int num; while ((msg = curl_multi_info_read(multi_handle, &num))) { if (msg->msg == CURLMSG_DONE) { #ifdef OX_HAS_CPP11 //msg broken in VS2010 curl_multi_remove_handle(multi_handle, msg->easy_handle); core::getMainThreadDispatcher().postCallback(ID_DONE, msg->easy_handle, (void*)msg->data.result, mainThreadFunc, 0); #endif } } } } } return 0; }
S3Status S3_runonce_request_context(S3RequestContext *requestContext, int *requestsRemainingReturn) { CURLMcode status; do { status = curl_multi_perform(requestContext->curlm, requestsRemainingReturn); switch (status) { case CURLM_OK: case CURLM_CALL_MULTI_PERFORM: break; case CURLM_OUT_OF_MEMORY: return S3StatusOutOfMemory; default: return S3StatusInternalError; } CURLMsg *msg; int junk; while ((msg = curl_multi_info_read(requestContext->curlm, &junk))) { if (msg->msg != CURLMSG_DONE) { return S3StatusInternalError; } Request *request; if (curl_easy_getinfo(msg->easy_handle, CURLINFO_PRIVATE, (char **) (char *) &request) != CURLE_OK) { return S3StatusInternalError; } // Remove the request from the list of requests if (request->prev == request->next) { // It was the only one on the list requestContext->requests = 0; } else { // It doesn't matter what the order of them are, so just in // case request was at the head of the list, put the one after // request to the head of the list requestContext->requests = request->next; request->prev->next = request->next; request->next->prev = request->prev; } if ((msg->data.result != CURLE_OK) && (request->status == S3StatusOK)) { request->status = request_curl_code_to_status (msg->data.result); } if (curl_multi_remove_handle(requestContext->curlm, msg->easy_handle) != CURLM_OK) { return S3StatusInternalError; } // Finish the request, ensuring that all callbacks have been made, // and also releases the request request_finish(request); // Now, since a callback was made, there may be new requests // queued up to be performed immediately, so do so status = CURLM_CALL_MULTI_PERFORM; } } while (status == CURLM_CALL_MULTI_PERFORM); return S3StatusOK; }
static int testExternalPut () { struct MHD_Daemon *d; CURL *c; struct CBC cbc; CURLM *multi; CURLMcode mret; fd_set rs; fd_set ws; fd_set es; int max; int running; struct CURLMsg *msg; time_t start; struct timeval tv; unsigned int pos = 0; int done_flag = 0; char buf[2048]; cbc.buf = buf; cbc.size = 2048; cbc.pos = 0; multi = NULL; d = MHD_start_daemon (MHD_USE_DEBUG, 1082, NULL, NULL, &ahc_echo, &done_flag, MHD_OPTION_CONNECTION_MEMORY_LIMIT, (size_t) (PUT_SIZE * 4), MHD_OPTION_END); if (d == NULL) return 256; c = curl_easy_init (); curl_easy_setopt (c, CURLOPT_URL, "http://127.0.0.1:1082/hello_world"); curl_easy_setopt (c, CURLOPT_WRITEFUNCTION, ©Buffer); curl_easy_setopt (c, CURLOPT_WRITEDATA, &cbc); curl_easy_setopt (c, CURLOPT_READFUNCTION, &putBuffer); curl_easy_setopt (c, CURLOPT_READDATA, &pos); curl_easy_setopt (c, CURLOPT_UPLOAD, 1L); curl_easy_setopt (c, CURLOPT_INFILESIZE, (long) PUT_SIZE); curl_easy_setopt (c, CURLOPT_FAILONERROR, 1); curl_easy_setopt (c, CURLOPT_TIMEOUT, 150L); if (oneone) curl_easy_setopt (c, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); else curl_easy_setopt (c, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_0); curl_easy_setopt (c, CURLOPT_CONNECTTIMEOUT, 150L); // NOTE: use of CONNECTTIMEOUT without also // setting NOSIGNAL results in really weird // crashes on my system! curl_easy_setopt (c, CURLOPT_NOSIGNAL, 1); multi = curl_multi_init (); if (multi == NULL) { curl_easy_cleanup (c); MHD_stop_daemon (d); return 512; } mret = curl_multi_add_handle (multi, c); if (mret != CURLM_OK) { curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 1024; } start = time (NULL); while ((time (NULL) - start < 5) && (multi != NULL)) { max = 0; FD_ZERO (&rs); FD_ZERO (&ws); FD_ZERO (&es); curl_multi_perform (multi, &running); mret = curl_multi_fdset (multi, &rs, &ws, &es, &max); if (mret != CURLM_OK) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 2048; } if (MHD_YES != MHD_get_fdset (d, &rs, &ws, &es, &max)) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 4096; } tv.tv_sec = 0; tv.tv_usec = 1000; select (max + 1, &rs, &ws, &es, &tv); curl_multi_perform (multi, &running); if (running == 0) { msg = curl_multi_info_read (multi, &running); if (msg == NULL) break; if (msg->msg == CURLMSG_DONE) { if (msg->data.result != CURLE_OK) printf ("%s failed at %s:%d: `%s'\n", "curl_multi_perform", __FILE__, __LINE__, curl_easy_strerror (msg->data.result)); curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); c = NULL; multi = NULL; } } MHD_run (d); } if (multi != NULL) { curl_multi_remove_handle (multi, c); curl_easy_cleanup (c); curl_multi_cleanup (multi); } MHD_stop_daemon (d); if (cbc.pos != strlen ("/hello_world")) { fprintf (stderr, "Got invalid response `%.*s'\n", (int)cbc.pos, cbc.buf); return 8192; } if (0 != strncmp ("/hello_world", cbc.buf, strlen ("/hello_world"))) return 16384; return 0; }
/* * Source code in here hugely as reported in bug report 651464 by * Christopher R. Palmer. * * Use multi interface to get document over proxy with bad port number. * This caused the interface to "hang" in libcurl 7.10.2. */ int test(char *URL) { CURL *c; int ret=0; CURLM *m; fd_set rd, wr, exc; CURLMcode res; char done = FALSE; int running; int max_fd; int rc; struct timeval ml_start; struct timeval mp_start; char ml_timedout = FALSE; char mp_timedout = FALSE; if (curl_global_init(CURL_GLOBAL_ALL) != CURLE_OK) { fprintf(stderr, "curl_global_init() failed\n"); return TEST_ERR_MAJOR_BAD; } if ((c = curl_easy_init()) == NULL) { fprintf(stderr, "curl_easy_init() failed\n"); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } /* the point here being that there must not run anything on the given proxy port */ curl_easy_setopt(c, CURLOPT_PROXY, arg2); curl_easy_setopt(c, CURLOPT_URL, URL); curl_easy_setopt(c, CURLOPT_VERBOSE, 1); if ((m = curl_multi_init()) == NULL) { fprintf(stderr, "curl_multi_init() failed\n"); curl_easy_cleanup(c); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } if ((res = curl_multi_add_handle(m, c)) != CURLM_OK) { fprintf(stderr, "curl_multi_add_handle() failed, " "with code %d\n", res); curl_multi_cleanup(m); curl_easy_cleanup(c); curl_global_cleanup(); return TEST_ERR_MAJOR_BAD; } ml_timedout = FALSE; ml_start = curlx_tvnow(); while (!done) { struct timeval interval; interval.tv_sec = 1; interval.tv_usec = 0; if (curlx_tvdiff(curlx_tvnow(), ml_start) > MAIN_LOOP_HANG_TIMEOUT) { ml_timedout = TRUE; break; } mp_timedout = FALSE; mp_start = curlx_tvnow(); fprintf(stderr, "curl_multi_perform()\n"); res = CURLM_CALL_MULTI_PERFORM; while (res == CURLM_CALL_MULTI_PERFORM) { res = curl_multi_perform(m, &running); if (curlx_tvdiff(curlx_tvnow(), mp_start) > MULTI_PERFORM_HANG_TIMEOUT) { mp_timedout = TRUE; break; } } if (mp_timedout) break; if(!running) { /* This is where this code is expected to reach */ int numleft; CURLMsg *msg = curl_multi_info_read(m, &numleft); fprintf(stderr, "Expected: not running\n"); if(msg && !numleft) ret = 100; /* this is where we should be */ else ret = 99; /* not correct */ break; } fprintf(stderr, "running == %d, res == %d\n", running, res); if (res != CURLM_OK) { ret = 2; break; } FD_ZERO(&rd); FD_ZERO(&wr); FD_ZERO(&exc); max_fd = 0; fprintf(stderr, "curl_multi_fdset()\n"); if (curl_multi_fdset(m, &rd, &wr, &exc, &max_fd) != CURLM_OK) { fprintf(stderr, "unexpected failured of fdset.\n"); ret = 3; break; } rc = select_test(max_fd+1, &rd, &wr, &exc, &interval); fprintf(stderr, "select returned %d\n", rc); } if (ml_timedout || mp_timedout) { if (ml_timedout) fprintf(stderr, "ml_timedout\n"); if (mp_timedout) fprintf(stderr, "mp_timedout\n"); fprintf(stderr, "ABORTING TEST, since it seems " "that it would have run forever.\n"); ret = TEST_ERR_RUNS_FOREVER; } curl_multi_remove_handle(m, c); curl_easy_cleanup(c); curl_multi_cleanup(m); curl_global_cleanup(); return ret; }
static int testExternalGet () { struct MHD_Daemon *d; CURL *c; char buf[2048]; struct CBC cbc; CURLM *multi; CURLMcode mret; fd_set rs; fd_set ws; fd_set es; int max; int running; time_t start; struct timeval tv; int i; multi = NULL; cbc.buf = buf; cbc.size = 2048; cbc.pos = 0; d = MHD_start_daemon (MHD_NO_FLAG /* | MHD_USE_DEBUG */ , 11080, NULL, NULL, &ahc_echo, "GET", MHD_OPTION_END); if (d == NULL) return 256; multi = curl_multi_init (); if (multi == NULL) { MHD_stop_daemon (d); return 512; } zzuf_socat_start (); for (i = 0; i < LOOP_COUNT; i++) { fprintf (stderr, "."); c = curl_easy_init (); curl_easy_setopt (c, CURLOPT_URL, "http://localhost:11081/hello_world"); curl_easy_setopt (c, CURLOPT_WRITEFUNCTION, ©Buffer); curl_easy_setopt (c, CURLOPT_WRITEDATA, &cbc); curl_easy_setopt (c, CURLOPT_FAILONERROR, 1); curl_easy_setopt (c, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_1_1); curl_easy_setopt (c, CURLOPT_TIMEOUT_MS, CURL_TIMEOUT); curl_easy_setopt (c, CURLOPT_CONNECTTIMEOUT_MS, CURL_TIMEOUT); // NOTE: use of CONNECTTIMEOUT without also // setting NOSIGNAL results in really weird // crashes on my system! curl_easy_setopt (c, CURLOPT_NOSIGNAL, 1); mret = curl_multi_add_handle (multi, c); if (mret != CURLM_OK) { curl_multi_cleanup (multi); curl_easy_cleanup (c); zzuf_socat_stop (); MHD_stop_daemon (d); return 1024; } start = time (NULL); while ((time (NULL) - start < 5) && (c != NULL)) { max = 0; FD_ZERO (&rs); FD_ZERO (&ws); FD_ZERO (&es); curl_multi_perform (multi, &running); mret = curl_multi_fdset (multi, &rs, &ws, &es, &max); if (mret != CURLM_OK) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); zzuf_socat_stop (); MHD_stop_daemon (d); return 2048; } if (MHD_YES != MHD_get_fdset (d, &rs, &ws, &es, &max)) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); zzuf_socat_stop (); MHD_stop_daemon (d); return 4096; } tv.tv_sec = 0; tv.tv_usec = 1000; select (max + 1, &rs, &ws, &es, &tv); curl_multi_perform (multi, &running); if (running == 0) { curl_multi_info_read (multi, &running); curl_multi_remove_handle (multi, c); curl_easy_cleanup (c); c = NULL; } MHD_run (d); } if (c != NULL) { curl_multi_remove_handle (multi, c); curl_easy_cleanup (c); } } fprintf (stderr, "\n"); curl_multi_cleanup (multi); zzuf_socat_stop (); MHD_stop_daemon (d); return 0; }
static void tr_webThreadFunc( void * vsession ) { CURLM * multi; struct tr_web * web; int taskCount = 0; struct tr_web_task * task; tr_session * session = vsession; /* try to enable ssl for https support; but if that fails, * try a plain vanilla init */ if( curl_global_init( CURL_GLOBAL_SSL ) ) curl_global_init( 0 ); web = tr_new0( struct tr_web, 1 ); web->close_mode = ~0; web->taskLock = tr_lockNew( ); web->tasks = NULL; web->curl_verbose = getenv( "TR_CURL_VERBOSE" ) != NULL; web->cookie_filename = tr_buildPath( session->configDir, "cookies.txt", NULL ); multi = curl_multi_init( ); session->web = web; for( ;; ) { long msec; int unused; CURLMsg * msg; CURLMcode mcode; if( web->close_mode == TR_WEB_CLOSE_NOW ) break; if( ( web->close_mode == TR_WEB_CLOSE_WHEN_IDLE ) && ( web->tasks == NULL ) ) break; /* add tasks from the queue */ tr_lockLock( web->taskLock ); while(( task = tr_list_pop_front( &web->tasks ))) { dbgmsg( "adding task to curl: [%s]", task->url ); curl_multi_add_handle( multi, createEasy( session, web, task )); /*fprintf( stderr, "adding a task.. taskCount is now %d\n", taskCount );*/ ++taskCount; } tr_lockUnlock( web->taskLock ); /* maybe wait a little while before calling curl_multi_perform() */ msec = 0; curl_multi_timeout( multi, &msec ); if( msec < 0 ) msec = THREADFUNC_MAX_SLEEP_MSEC; if( session->isClosed ) msec = 100; /* on shutdown, call perform() more frequently */ if( msec > 0 ) { int usec; int max_fd; struct timeval t; fd_set r_fd_set, w_fd_set, c_fd_set; max_fd = 0; FD_ZERO( &r_fd_set ); FD_ZERO( &w_fd_set ); FD_ZERO( &c_fd_set ); curl_multi_fdset( multi, &r_fd_set, &w_fd_set, &c_fd_set, &max_fd ); if( msec > THREADFUNC_MAX_SLEEP_MSEC ) msec = THREADFUNC_MAX_SLEEP_MSEC; usec = msec * 1000; t.tv_sec = usec / 1000000; t.tv_usec = usec % 1000000; tr_select( max_fd+1, &r_fd_set, &w_fd_set, &c_fd_set, &t ); } /* call curl_multi_perform() */ do { mcode = curl_multi_perform( multi, &unused ); } while( mcode == CURLM_CALL_MULTI_PERFORM ); /* pump completed tasks from the multi */ while(( msg = curl_multi_info_read( multi, &unused ))) { if(( msg->msg == CURLMSG_DONE ) && ( msg->easy_handle != NULL )) { double total_time; struct tr_web_task * task; long req_bytes_sent; CURL * e = msg->easy_handle; curl_easy_getinfo( e, CURLINFO_PRIVATE, (void*)&task ); curl_easy_getinfo( e, CURLINFO_RESPONSE_CODE, &task->code ); curl_easy_getinfo( e, CURLINFO_REQUEST_SIZE, &req_bytes_sent ); curl_easy_getinfo( e, CURLINFO_TOTAL_TIME, &total_time ); task->did_connect = task->code>0 || req_bytes_sent>0; task->did_timeout = !task->code && ( total_time >= task->timeout_secs ); curl_multi_remove_handle( multi, e ); curl_easy_cleanup( e ); /*fprintf( stderr, "removing a completed task.. taskCount is now %d (response code: %d, response len: %d)\n", taskCount, (int)task->code, (int)evbuffer_get_length(task->response) );*/ tr_runInEventThread( task->session, task_finish_func, task ); --taskCount; } } #if 0 { tr_list * l; for( l=web->tasks; l!=NULL; l=l->next ) fprintf( stderr, "still pending: %s\n", ((struct tr_web_task*)l->data)->url ); } fprintf( stderr, "loop is ending... web is closing\n" ); #endif } /* Discard any remaining tasks. * This is rare, but can happen on shutdown with unresponsive trackers. */ while(( task = tr_list_pop_front( &web->tasks ))) { dbgmsg( "Discarding task \"%s\"", task->url ); task_free( task ); } /* cleanup */ curl_multi_cleanup( multi ); tr_lockFree( web->taskLock ); tr_free( web->cookie_filename ); tr_free( web ); session->web = NULL; }
/** * @brief A download finished, find out what it was, whether there were any errors and * if so, how severe. If none, rename file and other such stuff. */ static void CL_FinishHTTPDownload (void) { int messagesInQueue, i; CURLcode result; CURL *curl; long responseCode; double timeTaken, fileSize; char tempName[MAX_OSPATH]; bool isFile; do { CURLMsg *msg = curl_multi_info_read(multi, &messagesInQueue); dlhandle_t *dl = NULL; if (!msg) { Com_Printf("CL_FinishHTTPDownload: Odd, no message for us...\n"); return; } if (msg->msg != CURLMSG_DONE) { Com_Printf("CL_FinishHTTPDownload: Got some weird message...\n"); continue; } curl = msg->easy_handle; /* curl doesn't provide reverse-lookup of the void * ptr, so search for it */ for (i = 0; i < 4; i++) { if (cls.HTTPHandles[i].curl == curl) { dl = &cls.HTTPHandles[i]; break; } } if (!dl) Com_Error(ERR_DROP, "CL_FinishHTTPDownload: Handle not found"); /* we mark everything as done even if it errored to prevent multiple attempts. */ dl->queueEntry->state = DLQ_STATE_DONE; /* filelist processing is done on read */ if (dl->file) isFile = true; else isFile = false; if (isFile) { fclose(dl->file); dl->file = NULL; } /* might be aborted */ if (pendingCount) pendingCount--; handleCount--; /* Com_Printf("finished dl: hc = %d\n", handleCount); */ cls.downloadName[0] = 0; cls.downloadPosition = 0; result = msg->data.result; switch (result) { /* for some reason curl returns CURLE_OK for a 404... */ case CURLE_HTTP_RETURNED_ERROR: case CURLE_OK: curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &responseCode); if (responseCode == 404) { const char *extension = Com_GetExtension(dl->queueEntry->ufoPath); if (extension != NULL && Q_streq(extension, "pk3")) downloadingPK3 = false; if (isFile) FS_RemoveFile(dl->filePath); Com_Printf("HTTP(%s): 404 File Not Found [%d remaining files]\n", dl->queueEntry->ufoPath, pendingCount); curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &fileSize); if (fileSize > 512) { /* ick */ isFile = false; result = CURLE_FILESIZE_EXCEEDED; Com_Printf("Oversized 404 body received (%d bytes), aborting HTTP downloading.\n", (int)fileSize); } else { curl_multi_remove_handle(multi, dl->curl); continue; } } else if (responseCode == 200) { if (!isFile && !abortDownloads) CL_ParseFileList(dl); break; } /* every other code is treated as fatal, fallthrough here */ /* fatal error, disable http */ case CURLE_COULDNT_RESOLVE_HOST: case CURLE_COULDNT_CONNECT: case CURLE_COULDNT_RESOLVE_PROXY: if (isFile) FS_RemoveFile(dl->filePath); Com_Printf("Fatal HTTP error: %s\n", curl_easy_strerror(result)); curl_multi_remove_handle(multi, dl->curl); if (abortDownloads) continue; CL_CancelHTTPDownloads(true); continue; default: i = strlen(dl->queueEntry->ufoPath); if (Q_streq(dl->queueEntry->ufoPath + i - 4, ".pk3")) downloadingPK3 = false; if (isFile) FS_RemoveFile(dl->filePath); Com_Printf("HTTP download failed: %s\n", curl_easy_strerror(result)); curl_multi_remove_handle(multi, dl->curl); continue; } if (isFile) { /* rename the temp file */ Com_sprintf(tempName, sizeof(tempName), "%s/%s", FS_Gamedir(), dl->queueEntry->ufoPath); if (!FS_RenameFile(dl->filePath, tempName, false)) Com_Printf("Failed to rename %s for some odd reason...", dl->filePath); /* a pk3 file is very special... */ i = strlen(tempName); if (Q_streq(tempName + i - 4, ".pk3")) { FS_RestartFilesystem(NULL); CL_ReVerifyHTTPQueue(); downloadingPK3 = false; } } /* show some stats */ curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &timeTaken); curl_easy_getinfo(curl, CURLINFO_SIZE_DOWNLOAD, &fileSize); /** @todo * technically i shouldn't need to do this as curl will auto reuse the * existing handle when you change the URL. however, the handleCount goes * all weird when reusing a download slot in this way. if you can figure * out why, please let me know. */ curl_multi_remove_handle(multi, dl->curl); Com_Printf("HTTP(%s): %.f bytes, %.2fkB/sec [%d remaining files]\n", dl->queueEntry->ufoPath, fileSize, (fileSize / 1024.0) / timeTaken, pendingCount); } while (messagesInQueue > 0); if (handleCount == 0) { if (abortDownloads == HTTPDL_ABORT_SOFT) abortDownloads = HTTPDL_ABORT_NONE; else if (abortDownloads == HTTPDL_ABORT_HARD) cls.downloadServer[0] = 0; } /* done current batch, see if we have more to dl - maybe a .bsp needs downloaded */ if (cls.state == ca_connected && !CL_PendingHTTPDownloads()) CL_RequestNextDownload(); }
static int testExternalGet () { struct MHD_Daemon *d; CURL *c; char buf[2048]; struct CBC cbc; CURLM *multi; CURLMcode mret; fd_set rs; fd_set ws; fd_set es; MHD_socket maxsock; #ifdef MHD_WINSOCK_SOCKETS int maxposixs; /* Max socket number unused on W32 */ #else /* MHD_POSIX_SOCKETS */ #define maxposixs maxsock #endif /* MHD_POSIX_SOCKETS */ int running; struct CURLMsg *msg; time_t start; struct timeval tv; int i; MHD_socket fd; multi = NULL; cbc.buf = buf; cbc.size = 2048; cbc.pos = 0; d = MHD_start_daemon (MHD_USE_DEBUG, 11080, NULL, NULL, &ahc_echo, "GET", MHD_OPTION_END); if (d == NULL) return 256; c = setupCURL(&cbc); multi = curl_multi_init (); if (multi == NULL) { curl_easy_cleanup (c); MHD_stop_daemon (d); return 512; } mret = curl_multi_add_handle (multi, c); if (mret != CURLM_OK) { curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 1024; } for (i = 0; i < 2; i++) { start = time (NULL); while ((time (NULL) - start < 5) && (multi != NULL)) { maxsock = MHD_INVALID_SOCKET; maxposixs = -1; FD_ZERO (&rs); FD_ZERO (&ws); FD_ZERO (&es); curl_multi_perform (multi, &running); mret = curl_multi_fdset (multi, &rs, &ws, &es, &maxposixs); if (mret != CURLM_OK) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 2048; } if (MHD_YES != MHD_get_fdset (d, &rs, &ws, &es, &maxsock)) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 4096; } tv.tv_sec = 0; tv.tv_usec = 1000; select (maxposixs + 1, &rs, &ws, &es, &tv); curl_multi_perform (multi, &running); if (running == 0) { msg = curl_multi_info_read (multi, &running); if (msg == NULL) break; if (msg->msg == CURLMSG_DONE) { if (i == 0 && msg->data.result != CURLE_OK) printf ("%s failed at %s:%d: `%s'\n", "curl_multi_perform", __FILE__, __LINE__, curl_easy_strerror (msg->data.result)); else if (i == 1 && msg->data.result == CURLE_OK) printf ("%s should have failed at %s:%d\n", "curl_multi_perform", __FILE__, __LINE__); curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); c = NULL; multi = NULL; } } MHD_run (d); } if (i == 0) { /* quiesce the daemon on the 1st iteration, so the 2nd should fail */ fd = MHD_quiesce_daemon(d); if (MHD_INVALID_SOCKET == fd) { fprintf (stderr, "MHD_quiesce_daemon failed.\n"); curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 2; } c = setupCURL (&cbc); multi = curl_multi_init (); mret = curl_multi_add_handle (multi, c); if (mret != CURLM_OK) { curl_multi_remove_handle (multi, c); curl_multi_cleanup (multi); curl_easy_cleanup (c); MHD_stop_daemon (d); return 32768; } } } if (multi != NULL) { curl_multi_remove_handle (multi, c); curl_easy_cleanup (c); curl_multi_cleanup (multi); } MHD_stop_daemon (d); MHD_socket_close_ (fd); if (cbc.pos != strlen ("/hello_world")) return 8192; if (0 != strncmp ("/hello_world", cbc.buf, strlen ("/hello_world"))) return 16384; return 0; }
int test( char *URL ) { CURLM* multi; sslctxparm p; int i = 0; CURLMsg *msg; if ( arg2 ) { portnum = atoi( arg2 ); } curl_global_init( CURL_GLOBAL_ALL ); p.curl = curl_easy_init(); p.accessinfoURL = (unsigned char *) strdup( URL ); p.accesstype = OBJ_obj2nid( OBJ_txt2obj( "AD_DVCS",0 ) ) ; curl_easy_setopt( p.curl, CURLOPT_URL, p.accessinfoURL ); curl_easy_setopt( p.curl, CURLOPT_SSL_CTX_FUNCTION, sslctxfun ) ; curl_easy_setopt( p.curl, CURLOPT_SSL_CTX_DATA, &p ); curl_easy_setopt( p.curl, CURLOPT_SSL_VERIFYPEER, FALSE ); curl_easy_setopt( p.curl, CURLOPT_SSL_VERIFYHOST, 1 ); fprintf( stderr, "Going to perform %s\n", (char *)p.accessinfoURL ); { CURLMcode res; int running; char done = FALSE; multi = curl_multi_init(); res = curl_multi_add_handle( multi, p.curl ); while ( !done ) { fd_set rd, wr, exc; int max_fd; struct timeval interval; interval.tv_sec = 1; interval.tv_usec = 0; while ( res == CURLM_CALL_MULTI_PERFORM ) { res = curl_multi_perform( multi, &running ); fprintf( stderr, "running=%d res=%d\n",running,res ); if ( running <= 0 ) { done = TRUE; break; } } if ( done ) { break; } if ( res != CURLM_OK ) { fprintf( stderr, "not okay???\n" ); i = 80; break; } FD_ZERO( &rd ); FD_ZERO( &wr ); FD_ZERO( &exc ); max_fd = 0; if ( curl_multi_fdset( multi, &rd, &wr, &exc, &max_fd ) != CURLM_OK ) { fprintf( stderr, "unexpected failured of fdset.\n" ); i = 89; break; } if ( select( max_fd + 1, &rd, &wr, &exc, &interval ) == -1 ) { fprintf( stderr, "bad select??\n" ); i = 95; break; } res = CURLM_CALL_MULTI_PERFORM; } msg = curl_multi_info_read( multi, &running ); /* this should now contain a result code from the easy handle, get it */ if ( msg ) { i = msg->data.result; } } fprintf( stderr, "all done\n" ); curl_multi_remove_handle( multi, p.curl ); curl_easy_cleanup( p.curl ); curl_multi_cleanup( multi ); curl_global_cleanup(); free( p.accessinfoURL ); return i; }
static int cetcd_reap_watchers(cetcd_client *cli, CURLM *mcurl) { int added, ignore; CURLMsg *msg; CURL *curl; cetcd_string url; cetcd_watcher *watcher; cetcd_response *resp; added = 0; while ((msg = curl_multi_info_read(mcurl, &ignore)) != NULL) { if (msg->msg == CURLMSG_DONE) { curl = msg->easy_handle; curl_easy_getinfo(curl, CURLINFO_PRIVATE, &watcher); resp = watcher->parser->resp; if (msg->data.result != CURLE_OK) { /*try next in round-robin ways*/ /*FIXME There is a race condition if multiple watchers failed*/ if (watcher->attempts) { cli->picked = (cli->picked+1)%(cetcd_array_size(cli->addresses)); url = cetcd_watcher_build_url(cli, watcher); curl_easy_setopt(watcher->curl, CURLOPT_URL, url); sdsfree(url); curl_multi_remove_handle(mcurl, curl); curl_multi_add_handle(mcurl, curl); /*++added; *watcher->attempts --; */ continue; } else { resp->err = calloc(1, sizeof(cetcd_error)); resp->err->ecode = error_cluster_failed; resp->err->message = sdsnew("cetcd_reap_watchers: all cluster servers failed."); } } if (watcher->callback) { watcher->callback(watcher->userdata, resp); if (resp->err) { curl_multi_remove_handle(mcurl, curl); cetcd_watcher_release(watcher); break; } cetcd_response_release(resp); watcher->parser->resp = NULL; /*surpress it be freed again by cetcd_watcher_release*/ } if (!watcher->once) { sdsclear(watcher->parser->buf); watcher->parser->st = 0; watcher->parser->resp = calloc(1, sizeof(cetcd_response)); if (watcher->index) { watcher->index ++; url = cetcd_watcher_build_url(cli, watcher); curl_easy_setopt(watcher->curl, CURLOPT_URL, url); sdsfree(url); } curl_multi_remove_handle(mcurl, curl); curl_multi_add_handle(mcurl, curl); ++added; continue; } curl_multi_remove_handle(mcurl, curl); cetcd_watcher_release(watcher); } } return added; }
bool LLXMLRPCTransaction::Impl::process() { switch(mStatus) { case LLXMLRPCTransaction::StatusComplete: case LLXMLRPCTransaction::StatusCURLError: case LLXMLRPCTransaction::StatusXMLRPCError: case LLXMLRPCTransaction::StatusOtherError: { return true; } case LLXMLRPCTransaction::StatusNotStarted: { setStatus(LLXMLRPCTransaction::StatusStarted); break; } default: { // continue onward } } const F32 MAX_PROCESSING_TIME = 0.05f; LLTimer timer; int count; while (CURLM_CALL_MULTI_PERFORM == curl_multi_perform(mCurlMulti, &count)) { if (timer.getElapsedTimeF32() >= MAX_PROCESSING_TIME) { return false; } } while(CURLMsg* curl_msg = curl_multi_info_read(mCurlMulti, &count)) { if (CURLMSG_DONE == curl_msg->msg) { if (curl_msg->data.result != CURLE_OK) { setCurlStatus(curl_msg->data.result); llwarns << "LLXMLRPCTransaction CURL error " << mCurlCode << ": " << mCurlErrorBuffer << llendl; llwarns << "LLXMLRPCTransaction request URI: " << mURI << llendl; return true; } setStatus(LLXMLRPCTransaction::StatusComplete); mResponse = XMLRPC_REQUEST_FromXML( mResponseText.data(), mResponseText.size(), NULL); bool hasError = false; bool hasFault = false; int faultCode = 0; std::string faultString; LLXMLRPCValue error(XMLRPC_RequestGetError(mResponse)); if (error.isValid()) { hasError = true; faultCode = error["faultCode"].asInt(); faultString = error["faultString"].asString(); } else if (XMLRPC_ResponseIsFault(mResponse)) { hasFault = true; faultCode = XMLRPC_GetResponseFaultCode(mResponse); faultString = XMLRPC_GetResponseFaultString(mResponse); } if (hasError || hasFault) { setStatus(LLXMLRPCTransaction::StatusXMLRPCError); llwarns << "LLXMLRPCTransaction XMLRPC " << (hasError ? "error " : "fault ") << faultCode << ": " << faultString << llendl; llwarns << "LLXMLRPCTransaction request URI: " << mURI << llendl; } return true; } } return false; }
LOCAL void moloch_http_curlm_check_multi_info(MolochHttpServer_t *server) { char *eff_url; CURLMsg *msg; int msgs_left; MolochHttpRequest_t *request; CURL *easy; while ((msg = curl_multi_info_read(server->multi, &msgs_left))) { if (msg->msg == CURLMSG_DONE) { easy = msg->easy_handle; curl_easy_getinfo(easy, CURLINFO_PRIVATE, (void*)&request); curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url); long responseCode; curl_easy_getinfo(easy, CURLINFO_RESPONSE_CODE, &responseCode); if (config.logESRequests || (server->printErrors && responseCode/100 != 2)) { double totalTime; double connectTime; double uploadSize; double downloadSize; curl_easy_getinfo(easy, CURLINFO_TOTAL_TIME, &totalTime); curl_easy_getinfo(easy, CURLINFO_CONNECT_TIME, &connectTime); curl_easy_getinfo(easy, CURLINFO_SIZE_UPLOAD, &uploadSize); curl_easy_getinfo(easy, CURLINFO_SIZE_DOWNLOAD, &downloadSize); LOG("%d/%d ASYNC %ld %s %.0lf/%.0lf %.0lfms %.0lfms", request->server->outstanding, request->server->connections, responseCode, request->url, uploadSize, downloadSize, connectTime*1000, totalTime*1000); } #ifdef MOLOCH_HTTP_DEBUG LOG("HTTPDEBUG DECR %p %d %s", request, server->outstanding, request->url); #endif if (responseCode == 0 && request->retries < server->maxRetries) { curl_multi_remove_handle(server->multi, easy); request->retries++; struct timeval now; gettimeofday(&now, NULL); MOLOCH_LOCK(requests); server->snames[request->namePos].allowedAtSeconds = now.tv_sec + 30; server->outstanding--; moloch_http_add_request(server, request, TRUE); MOLOCH_UNLOCK(requests); } else { if (server->printErrors && responseCode/100 != 2) { LOG("Response length=%d :>\n%.*s", request->used, MIN(request->used, 4000), request->dataIn); } if (request->func) { if (request->dataIn) request->dataIn[request->used] = 0; request->func(responseCode, request->dataIn, request->used, request->uw); } if (request->dataIn) { free(request->dataIn); request->dataIn = 0; } if (request->dataOut) { MOLOCH_SIZE_FREE(buffer, request->dataOut); } if (request->headerList) { curl_slist_free_all(request->headerList); } MOLOCH_TYPE_FREE(MolochHttpRequest_t, request); curl_multi_remove_handle(server->multi, easy); curl_easy_cleanup(easy); MOLOCH_LOCK(requests); server->outstanding--; MOLOCH_UNLOCK(requests); } } } }
/* * Source code in here hugely as reported in bug report 651464 by * Christopher R. Palmer. * * Use multi interface to get document over proxy with bad port number. * This caused the interface to "hang" in libcurl 7.10.2. */ int test(char *URL) { CURL *c = NULL; int res = 0; CURLM *m = NULL; fd_set rd, wr, exc; int running; start_test_timing(); global_init(CURL_GLOBAL_ALL); easy_init(c); /* The point here is that there must not be anything running on the given proxy port */ if (libtest_arg2) easy_setopt(c, CURLOPT_PROXY, libtest_arg2); easy_setopt(c, CURLOPT_URL, URL); easy_setopt(c, CURLOPT_VERBOSE, 1L); multi_init(m); multi_add_handle(m, c); for(;;) { struct timeval interval; int maxfd = -99; interval.tv_sec = 1; interval.tv_usec = 0; fprintf(stderr, "curl_multi_perform()\n"); multi_perform(m, &running); abort_on_test_timeout(); if(!running) { /* This is where this code is expected to reach */ int numleft; CURLMsg *msg = curl_multi_info_read(m, &numleft); fprintf(stderr, "Expected: not running\n"); if(msg && !numleft) res = TEST_ERR_SUCCESS; /* this is where we should be */ else res = TEST_ERR_FAILURE; /* not correct */ break; /* done */ } fprintf(stderr, "running == %d\n", running); FD_ZERO(&rd); FD_ZERO(&wr); FD_ZERO(&exc); fprintf(stderr, "curl_multi_fdset()\n"); multi_fdset(m, &rd, &wr, &exc, &maxfd); /* At this point, maxfd is guaranteed to be greater or equal than -1. */ select_test(maxfd+1, &rd, &wr, &exc, &interval); abort_on_test_timeout(); } test_cleanup: /* proper cleanup sequence - type PA */ curl_multi_remove_handle(m, c); curl_multi_cleanup(m); curl_easy_cleanup(c); curl_global_cleanup(); return res; }