// There is currently the possibility of a race condition if a chunk // upload timed-out. It's possible that a second upload succeeds, // has the chunk marked as "complete" and then the first request makes // its way through the queue and marks the chunk as pending again. // Since we are just about to close the file, we'll check to see if any // chunks are marked as pending, and if so, we'll retry them. void check_for_complete_chunks(vector<File> &files) { for (int currCheckNum=0; currCheckNum < NUM_CHUNK_CHECKS; ++currCheckNum){ map<string, JSON> fileDescriptions; while (!chunksFinished.empty()) { Chunk *c = chunksFinished.consume(); // Cache file descriptions so we only have to do once per file, // not once per chunk. if (fileDescriptions.find(c->fileID) == fileDescriptions.end()) fileDescriptions[c->fileID] = fileDescribe(c->fileID); if (!is_chunk_complete(c, fileDescriptions[c->fileID])) { // After the chunk was uploaded, it was cleared, removing the data // from the buffer. We need to reload if we're going to upload again. chunksToRead.produce(c); } } // All of the chunks were marked as complete, so let's exit and we // should be safeish to close the file. if(chunksToRead.size() == 0) return; // Set the totalChunks variable to the # of chunks we're going // to retry now plus the number of chunks in the failed queue. The monitor // thread will be busy until the size of chunksFinished + chunksFailed // equals totalChunks. DXLOG(logINFO) << "Retrying " << chunksToRead.size() << " chunks that did not complete."; totalChunks = chunksToRead.size() + chunksFailed.size(); // Read, compress, and upload the chunks which weren't marked as complete. createWorkerThreads(files); boost::thread monitorThread(monitor); monitorThread.join(); interruptWorkerThreads(); joinWorkerThreads(); } // We have tried to upload incomplete chunks NUM_CHUNK_CHECKS times! // Check to see if there are any chunks still not complete and if so, // print warning. map<string, JSON> fileDescriptions; while (!chunksFinished.empty()) { Chunk *c = chunksFinished.consume(); // Cache file descriptions so we only have to do once per file, // not once per chunk. if (fileDescriptions.find(c->fileID) == fileDescriptions.end()) fileDescriptions[c->fileID] = fileDescribe(c->fileID); if (!is_chunk_complete(c, fileDescriptions[c->fileID])) { cerr << "Chunk " << c->index << " of file " << c->fileID << " did not complete. This file will not be accessible. PLease try to upload this file again." << endl; } } }
/* Test to see if the queue is sorted within reasonable limits */ bool is_sorted(BlockingQueue<timeout_t>& bq) { LogContext& log = LogManager::instance().getLogContext("Test", "Scheduler"); timeout_t last = 0; while(!bq.empty()) { timeout_t cur = bq.front(); bq.pop(); if(cur < last && (last - cur) > 5 MILLIS ) { /* we give a 5 ms grace period */ log.printfln(ERROR, "%ld is less than %ld", cur, last); return false; } last = cur; } return true; }
int testBlockingQueue(void) { BlockingQueue<int> testQueue; testQueue.push(1); testQueue.push(2); testQueue.push(3); int val = testQueue.pop(); printf("First value is %d\n", val); val = testQueue.pop(); printf("Second value is %d\n", val); val = testQueue.pop(); printf("Third value is %d\n", val); bool isEmpty = testQueue.empty(); if(isEmpty){ printf("Queue is empty.\n"); } else { printf("Queue is not empty.\n"); } return 0; }
int main(int argc, char * argv[]) { try { // Note: Verbose mode logging is enabled (if requested) by options parse() opt.parse(argc, argv); } catch (exception &e) { cerr << "Error processing arguments: " << e.what() << endl; opt.printHelp(argv[0]); return 1; } if (opt.env()) { opt.setApiserverDxConfig(); // needed for 'ua --env' to report project name printEnvironmentInfo(); return 0; } if (opt.version()) { cout << "Upload Agent Version: " << UAVERSION; #if OLD_KERNEL_SUPPORT cout << " (old-kernel-support)"; #endif cout << endl << "git version: " << DXTOOLKIT_GITVERSION << endl << "libboost version: " << (BOOST_VERSION / 100000) << "." << ((BOOST_VERSION / 100) % 1000) << "." << (BOOST_VERSION % 100) << endl << "libcurl version: " << LIBCURL_VERSION_MAJOR << "." << LIBCURL_VERSION_MINOR << "." << LIBCURL_VERSION_PATCH << endl; return 0; } else if (opt.help() || opt.files.empty()) { opt.printHelp(argv[0]); return (opt.help()) ? 0 : 1; } setUserAgentString(); // also sets dx::config::USER_AGENT_STRING() DXLOG(logINFO) << "DNAnexus Upload Agent " << UAVERSION << " (git version: " << DXTOOLKIT_GITVERSION << ")"; DXLOG(logINFO) << "Upload agent's User Agent string: '" << userAgentString << "'"; DXLOG(logINFO) << "dxcpp's User Agent string: '" << dx::config::USER_AGENT_STRING() << "'"; DXLOG(logINFO) << opt; try { opt.setApiserverDxConfig(); opt.validate(); /* * Check for updates, and terminate execution if necessary. This also * has the side effect of verifying that we can connect to the API * server, and that the authentication token is valid. */ try { checkForUpdates(); } catch (runtime_error &e) { cerr << endl << e.what() << endl; return 3; } if (!opt.doNotResume) { disallowDuplicateFiles(opt.files, opt.projects); } } catch (exception &e) { cerr << endl << "ERROR: " << e.what() << endl; return 1; } const bool anyImportAppToBeCalled = (opt.reads || opt.pairedReads || opt.mappings || opt.variants); chunksToCompress.setCapacity(opt.compressThreads); chunksToUpload.setCapacity(opt.uploadThreads); int exitCode = 0; try { curlInit(); // for curl requests to be made by upload chunk request NUMTRIES_g = opt.tries; vector<File> files; for (unsigned int i = 0; i < opt.files.size(); ++i) { DXLOG(logINFO) << "Getting MIME type for local file " << opt.files[i] << "..."; string mimeType = getMimeType(opt.files[i]); DXLOG(logINFO) << "MIME type for local file " << opt.files[i] << " is '" << mimeType << "'."; bool toCompress; if (!opt.doNotCompress) { bool is_compressed = isCompressed(mimeType); toCompress = !is_compressed; if (is_compressed) DXLOG(logINFO) << "File " << opt.files[i] << " is already compressed, so won't try to compress it any further."; else DXLOG(logINFO) << "File " << opt.files[i] << " is not compressed, will compress it before uploading."; } else { toCompress = false; } if (toCompress) { mimeType = "application/x-gzip"; } files.push_back(File(opt.files[i], opt.projects[i], opt.folders[i], opt.names[i], toCompress, !opt.doNotResume, mimeType, opt.chunkSize, i)); totalChunks += files[i].createChunks(chunksToRead, opt.tries); cerr << endl; } if (opt.waitOnClose) { for (unsigned int i = 0; i < files.size(); ++i) { files[i].waitOnClose = true; } } // Create folders all at once (instead of one by one, above, where we // initialize the File objects). createFolders(opt.projects, opt.folders); // Take this point as the starting time for program operation // (to calculate average transfer speed) startTime = std::time(0); DXLOG(logINFO) << "Created " << totalChunks << " chunks."; createWorkerThreads(files); DXLOG(logINFO) << "Creating monitor thread.."; boost::thread monitorThread(monitor); boost::thread uploadProgressThread; if (opt.progress) { DXLOG(logINFO) << "Creating Upload Progress thread.."; uploadProgressThread = boost::thread(uploadProgress, boost::ref(files)); } DXLOG(logINFO) << "Joining monitor thread..."; monitorThread.join(); DXLOG(logINFO) << "Monitor thread finished."; if (opt.progress) { DXLOG(logINFO) << "Joining Upload Progress thread.."; keepShowingUploadProgress = false; uploadProgressThread.interrupt(); uploadProgressThread.join(); DXLOG(logINFO) << "Upload Progress thread finished."; } interruptWorkerThreads(); joinWorkerThreads(); while (!chunksFailed.empty()) { Chunk * c = chunksFailed.consume(); c->log("Chunk failed", logERROR); markFileAsFailed(files, c->fileID); } if (opt.verbose) { cerr << endl; } for (unsigned int i = 0; i < files.size(); ++i) { if (files[i].failed) { cerr << "File \""<< files[i].localFile << "\" could not be uploaded." << endl; } else { cerr << "File \"" << files[i].localFile << "\" was uploaded successfully. Closing..." << endl; if (files[i].isRemoteFileOpen) { files[i].close(); } } if (files[i].failed) files[i].fileID = "failed"; } DXLOG(logINFO) << "Waiting for files to be closed..."; boost::thread waitOnCloseThread(waitOnClose, boost::ref(files)); DXLOG(logINFO) << "Joining wait-on-close thread..."; waitOnCloseThread.join(); DXLOG(logINFO) << "Wait-on-close thread finished."; if (anyImportAppToBeCalled) { runImportApps(opt, files); } for (unsigned i = 0; i < files.size(); ++i) { cout << files[i].fileID; if (files[i].fileID == "failed") exitCode = 1; if (anyImportAppToBeCalled) { if (files[i].jobID == "failed") exitCode = 1; cout << "\t" << files[i].jobID; } cout << endl; } curlCleanup(); DXLOG(logINFO) << "Exiting."; } catch (bad_alloc &e) { boost::call_once(bad_alloc_once, boost::bind(&handle_bad_alloc, e)); } catch (exception &e) { curlCleanup(); cerr << endl << "ERROR: " << e.what() << endl; return 1; } return exitCode; }
bool workPresent(void) { return !workQueue.empty(); }