static void NOINLINE retrieve_file_data(FILE *dfp, int output_fd) { char buf[512]; if (!(option_mask32 & WGET_OPT_QUIET)) progress_meter(-1); if (G.chunked) goto get_clen; /* Loops only if chunked */ while (1) { while (1) { int n; unsigned rdsz; rdsz = sizeof(buf); if (G.got_clen) { if (G.content_len < (off_t)sizeof(buf)) { if ((int)G.content_len <= 0) break; rdsz = (unsigned)G.content_len; } } n = safe_fread(buf, rdsz, dfp); if (n <= 0) { if (ferror(dfp)) { /* perror will not work: ferror doesn't set errno */ bb_error_msg_and_die(bb_msg_read_error); } break; } xwrite(output_fd, buf, n); #if ENABLE_FEATURE_WGET_STATUSBAR G.transferred += n; #endif if (G.got_clen) G.content_len -= n; } if (!G.chunked) break; safe_fgets(buf, sizeof(buf), dfp); /* This is a newline */ get_clen: safe_fgets(buf, sizeof(buf), dfp); G.content_len = STRTOOFF(buf, NULL, 16); /* FIXME: error check? */ if (G.content_len == 0) break; /* all done! */ G.got_clen = 1; } if (!(option_mask32 & WGET_OPT_QUIET)) progress_meter(0); }
int main(int argc, char *argv[]) { char *whisker_file_name, *bar_file_name, *prefix; size_t prefix_len; FILE *fp; Image *bg=0, *image=0; int i,depth; char * movie; /* Process Arguments */ Process_Arguments(argc,argv,Spec,0); { char* paramfile = "default.parameters"; if(Load_Params_File("default.parameters")) { warning( "Could not load parameters from file: %s\n" "Writing %s\n" "\tTrying again\n",paramfile,paramfile); Print_Params_File(paramfile); if(Load_Params_File("default.parameters")) error("\tStill could not load parameters.\n"); } } prefix = Get_String_Arg("prefix"); prefix_len = strlen(prefix); { char *dot = strrchr(prefix,'.'); // Remove any file extension from the prefix if(dot) *dot = 0; } whisker_file_name = (char*) Guarded_Malloc( (prefix_len+32)*sizeof(char), "whisker file name"); bar_file_name = (char*) Guarded_Malloc( (prefix_len+32)*sizeof(char), "bar file name"); memset(whisker_file_name, 0, (prefix_len+32)*sizeof(char) ); memset(bar_file_name , 0, (prefix_len+32)*sizeof(char) ); sprintf( whisker_file_name, "%s.whiskers", prefix ); sprintf( bar_file_name, "%s.bar", prefix ); progress("Loading...\n"); fflush(stdout); movie = Get_String_Arg("movie"); TRY(image = load(movie,0,&depth),ErrorOpen); progress("Done.\n"); // No background subtraction (init to blank) { bg = Make_Image( image->kind, image->width, image->height ); memset(bg->array, 0, bg->width * bg->height ); } Free_Image( image ); #if 0 /* * Bar tracking */ if( !Is_Arg_Matched("--no-bar") ) { double x,y; BarFile *bfile = Bar_File_Open( bar_file_name, "w" ); progress( "Finding bar positions\n" ); for( i=0; i<depth; i++ ) { progress_meter(i, 0, depth-1, 79, "Finding post: [%5d/%5d]",i,depth); image = load(movie,i,NULL); invert_uint8( image ); Compute_Bar_Location( image, &x, // Output: x position &y, // Output: y position 15, // Neighbor distance 15, // minimum contour length 0, // minimum intensity of interest 255, // maximum intentity of interest 10.0, // minimum radius of interest 30.0 );// maximum radius of interest Bar_File_Append_Bar( bfile, Bar_Static_Cast(i,x,y) ); Free_Image(image); } Bar_File_Close( bfile ); } #endif /* * Trace whisker segments */ //if( !Is_Arg_Matched("--no-whisk") ) { int nTotalSegs = 0; Whisker_Seg *wv; int wv_n; WhiskerFile wfile = Whisker_File_Open(whisker_file_name,"whiskbin1","w"); if( !wfile ) { fprintf(stderr, "Warning: couldn't open %s for writing.", whisker_file_name); } else { //int step = (int) pow(10,round(log10(depth/100))); for( i=0; i<depth; i++ ) //for( i=450; i<460; i++ ) //for( i=0; i<depth; i+= step ) { int k; TRY(image=load(movie,i,NULL),ErrorRead); progress_meter(i, 0, depth, 79, "Finding segments: [%5d/%5d]",i,depth-1); wv = find_segments(i, image, bg, &wv_n); // Thrashing heap k = Remove_Overlapping_Whiskers_One_Frame( wv, wv_n, image->width, image->height, 2.0, // scale down by this 2.0, // distance threshold 0.5 ); // significant overlap fraction Whisker_File_Append_Segments(wfile, wv, k); Free_Whisker_Seg_Vec( wv, wv_n ); Free_Image(image); } printf("\n"); Whisker_File_Close(wfile); } } load(movie,-1,NULL); // Close (and free) if(bg) Free_Image( bg ); return 0; ErrorRead: load(movie,-1,NULL); // Close (and free) if(bg) Free_Image( bg ); error("Could not read frame %d from %s"ENDL,i,movie); return 1; ErrorOpen: error("Could not open %s"ENDL,movie); return 2; }
static void tftp_progress_done(void) { if (G.pmt.inited) progress_meter(0); }
static void tftp_progress_init(void) { progress_meter(-1); }
static void NOINLINE retrieve_file_data(FILE *dfp) { #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT # if ENABLE_FEATURE_WGET_TIMEOUT unsigned second_cnt; # endif struct pollfd polldata; polldata.fd = fileno(dfp); polldata.events = POLLIN | POLLPRI; #endif progress_meter(PROGRESS_START); if (G.chunked) goto get_clen; /* Loops only if chunked */ while (1) { #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT /* Must use nonblocking I/O, otherwise fread will loop * and *block* until it reads full buffer, * which messes up progress bar and/or timeout logic. * Because of nonblocking I/O, we need to dance * very carefully around EAGAIN. See explanation at * clearerr() call. */ ndelay_on(polldata.fd); #endif while (1) { int n; unsigned rdsz; rdsz = sizeof(G.wget_buf); if (G.got_clen) { if (G.content_len < (off_t)sizeof(G.wget_buf)) { if ((int)G.content_len <= 0) break; rdsz = (unsigned)G.content_len; } } #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT # if ENABLE_FEATURE_WGET_TIMEOUT second_cnt = G.timeout_seconds; # endif while (1) { if (safe_poll(&polldata, 1, 1000) != 0) break; /* error, EOF, or data is available */ # if ENABLE_FEATURE_WGET_TIMEOUT if (second_cnt != 0 && --second_cnt == 0) { progress_meter(PROGRESS_END); bb_error_msg_and_die("download timed out"); } # endif /* Needed for "stalled" indicator */ progress_meter(PROGRESS_BUMP); } /* fread internally uses read loop, which in our case * is usually exited when we get EAGAIN. * In this case, libc sets error marker on the stream. * Need to clear it before next fread to avoid possible * rare false positive ferror below. Rare because usually * fread gets more than zero bytes, and we don't fall * into if (n <= 0) ... */ clearerr(dfp); errno = 0; #endif n = fread(G.wget_buf, 1, rdsz, dfp); /* man fread: * If error occurs, or EOF is reached, the return value * is a short item count (or zero). * fread does not distinguish between EOF and error. */ if (n <= 0) { #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT if (errno == EAGAIN) /* poll lied, there is no data? */ continue; /* yes */ #endif if (ferror(dfp)) bb_perror_msg_and_die(bb_msg_read_error); break; /* EOF, not error */ } xwrite(G.output_fd, G.wget_buf, n); #if ENABLE_FEATURE_WGET_STATUSBAR G.transferred += n; progress_meter(PROGRESS_BUMP); #endif if (G.got_clen) { G.content_len -= n; if (G.content_len == 0) break; } } #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT clearerr(dfp); ndelay_off(polldata.fd); /* else fgets can get very unhappy */ #endif if (!G.chunked) break; fgets_and_trim(dfp); /* Eat empty line */ get_clen: fgets_and_trim(dfp); G.content_len = STRTOOFF(G.wget_buf, NULL, 16); /* FIXME: error check? */ if (G.content_len == 0) break; /* all done! */ G.got_clen = 1; } /* If -c failed, we restart from the beginning, * but we do not truncate file then, we do it only now, at the end. * This lets user to ^C if his 99% complete 10 GB file download * failed to restart *without* losing the almost complete file. */ { off_t pos = lseek(G.output_fd, 0, SEEK_CUR); if (pos != (off_t)-1) ftruncate(G.output_fd, pos); } /* Draw full bar and free its resources */ G.chunked = 0; /* makes it show 100% even for chunked download */ G.got_clen = 1; /* makes it show 100% even for download of (formerly) unknown size */ progress_meter(PROGRESS_END); }
static void NOINLINE retrieve_file_data(FILE *dfp, int output_fd) { char buf[512]; #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT # if ENABLE_FEATURE_WGET_TIMEOUT unsigned second_cnt; # endif struct pollfd polldata; polldata.fd = fileno(dfp); polldata.events = POLLIN | POLLPRI; ndelay_on(polldata.fd); #endif progress_meter(PROGRESS_START); if (G.chunked) goto get_clen; /* Loops only if chunked */ while (1) { while (1) { int n; unsigned rdsz; rdsz = sizeof(buf); if (G.got_clen) { if (G.content_len < (off_t)sizeof(buf)) { if ((int)G.content_len <= 0) break; rdsz = (unsigned)G.content_len; } } #if ENABLE_FEATURE_WGET_STATUSBAR || ENABLE_FEATURE_WGET_TIMEOUT # if ENABLE_FEATURE_WGET_TIMEOUT second_cnt = G.timeout_seconds; # endif while (1) { if (safe_poll(&polldata, 1, 1000) != 0) break; /* error, EOF, or data is available */ # if ENABLE_FEATURE_WGET_TIMEOUT if (second_cnt != 0 && --second_cnt == 0) { progress_meter(PROGRESS_END); bb_perror_msg_and_die("download timed out"); } # endif /* Needed for "stalled" indicator */ progress_meter(PROGRESS_BUMP); } #endif n = safe_fread(buf, rdsz, dfp); if (n <= 0) { if (ferror(dfp)) { /* perror will not work: ferror doesn't set errno */ bb_error_msg_and_die(bb_msg_read_error); } break; } xwrite(output_fd, buf, n); #if ENABLE_FEATURE_WGET_STATUSBAR G.transferred += n; progress_meter(PROGRESS_BUMP); #endif if (G.got_clen) G.content_len -= n; } if (!G.chunked) break; safe_fgets(buf, sizeof(buf), dfp); /* This is a newline */ get_clen: safe_fgets(buf, sizeof(buf), dfp); G.content_len = STRTOOFF(buf, NULL, 16); /* FIXME: error check? */ if (G.content_len == 0) break; /* all done! */ G.got_clen = 1; } progress_meter(PROGRESS_END); }
int main(int argc, char **argv) { try { std::string progname = argv[0]; // Process commandline options int argn; bool help = false; std::string outdir; std::string configfilename; std::string delta; khGetopt options; options.flagOpt("help", help); options.flagOpt("?", help); options.opt("output", outdir); options.opt("config", configfilename, &khGetopt::FileExists); options.opt("delta", delta, &khGetopt::DirExists); if (!options.processAll(argc, argv, argn)) usage(progname); if (help) usage(progname); if (argn != argc) usage(progname); // Validate commandline options if (!outdir.size()) { usage(progname, "No output specified"); } if (!configfilename.size()) { usage(progname, "No --config specified"); } if (delta.size()) { notify(NFY_FATAL, "--delta not supported yet."); } geFilePool file_pool; geindexgen::UnifiedConfig config; if (!config.Load(configfilename)) { usage(progname, "Unable to load %s", configfilename.c_str()); } // Print the input file sizes for diagnostic log file info. // Here we want to take in the sizes of the indexes from the config file. std::vector<std::string> input_files; input_files.push_back(configfilename); for (std::vector<geindexgen::UnifiedConfig::Entry>::const_iterator entry = config.indexes_.begin(); entry != config.indexes_.end(); ++entry) { input_files.push_back(entry->indexdir_); } khPrintFileSizes("Input File Sizes", input_files); // create the writer geindex::UnifiedWriter writer(file_pool, outdir, geindex::UnifiedWriter::FullIndexMode, kUnifiedType); geindex::UnifiedWriter::ReadBuffer tmp_read_buf; // pre-populate this for delta index operations // leave empty for new indexes std::map<std::string, uint32> unified_filemap; // progress meter - will need to be modified a bit when // incremental updates are implemented (TODO: mikegoss) khProgressMeter progress_meter(0, "entries"); // will be filled in with the mapping from old file numbers to new // file numbers std::vector<std::vector<uint32> > translated_filenums; translated_filenums.resize(config.indexes_.size()); std::map<std::string, uint32> dated_imagery_channels_map; typedef geindex::AdaptingTraverserBase<geindex::UnifiedBucket>::MergeEntry MergeEntryType; typedef Merge<MergeEntryType> MergeType; MergeType merger("UnifiedIndex Merger"); uint source_id = 0; for (std::vector<geindexgen::UnifiedConfig::Entry>::const_iterator entry = config.indexes_.begin(); entry != config.indexes_.end(); ++entry, ++source_id) { std::string entry_type = entry->type_; geindex::TypedEntry::TypeEnum type; FromString(entry->type_, type); if (type == geindex::TypedEntry::Imagery || type == geindex::TypedEntry::DatedImagery) { // For DatedImagery, keep track of the date to channel map. dated_imagery_channels_map[entry->date_string_] = entry->channel_id_; typedef geindex::Traverser<geindex::BlendBucket> BlendTraverser; khTransferGuard<BlendTraverser> traverser = TransferOwnership(new BlendTraverser( "ImageryTraverser", file_pool, entry->indexdir_)); PopulateFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, traverser->GetIndexBundleReader()); merger.AddSource( TransferOwnership( new geindex::UnifiedAdaptingTraverser<BlendTraverser>( "ImageryAdaptingTraverser", geindex::TypedEntry::Imagery, traverser, entry->channel_id_, entry->version_))); } else if (type == geindex::TypedEntry::Terrain) { #if 1 if (!PacketFile::IsPacketFile(entry->indexdir_)) { throw khException( kh::tr("INTERNAL ERROR: Terrain path must be a packetfile: %1") .arg(entry->indexdir_)); } // register packetfile PopulatePacketfileFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, entry->indexdir_); // make and adapting traverser merger.AddSource( TransferOwnership( new geindex::UnifiedPacketFileAdaptingTraverser( file_pool, "TerrainPacketAdaptingTraverser", geindex::TypedEntry::Terrain, entry->version_, entry->channel_id_, entry->indexdir_))); #else typedef geindex::Traverser<geindex::CombinedTmeshBucket> CombinedTmeshTraverser; khTransferGuard<CombinedTmeshTraverser> traverser = TransferOwnership(new CombinedTmeshTraverser( "TerrainTraverser", file_pool, entry->indexdir_)); PopulateFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, traverser->GetIndexBundleReader()); merger.AddSource( TransferOwnership( new geindex::UnifiedAdaptingTraverser<CombinedTmeshTraverser>( "TerrainAdaptingTraverser", geindex::TypedEntry::Terrain, traverser, entry->channel_id_, entry->version_))); #endif } else if (type == geindex::TypedEntry::VectorGE) { typedef geindex::Traverser<geindex::VectorBucket> VectorTraverser; khTransferGuard<VectorTraverser> traverser = TransferOwnership(new VectorTraverser( "VectorTraverser", file_pool, entry->indexdir_)); PopulateFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, traverser->GetIndexBundleReader()); merger.AddSource( TransferOwnership( new geindex::UnifiedAdaptingTraverser<VectorTraverser>( "VectorGEAdaptingTraverser", geindex::TypedEntry::VectorGE, traverser, 0 /* unused override channel id */, 0 /* unused override version */))); } else if (type == geindex::TypedEntry::VectorMaps) { if (!PacketFile::IsPacketFile(entry->indexdir_)) { throw khException( kh::tr("INTERNAL ERROR: VectorMaps " "path must be a packetfile: %1") .arg(entry->indexdir_)); } // register packetfile PopulatePacketfileFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, entry->indexdir_); // make and adapting traverser that uses the packetfile // number from above merger.AddSource( TransferOwnership( new geindex::UnifiedPacketFileAdaptingTraverser( file_pool, "VectorMapsAdaptingTraverser", geindex::TypedEntry::VectorMaps, entry->version_, entry->channel_id_, entry->indexdir_))); } else if (type == geindex::TypedEntry::VectorMapsRaster) { if (!PacketFile::IsPacketFile(entry->indexdir_)) { throw khException( kh::tr("INTERNAL ERROR: VectorMapsRaster " "path must be a packetfile: %1") .arg(entry->indexdir_)); } // register packetfile PopulatePacketfileFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, entry->indexdir_); // make and adapting traverser that uses the packetfile // number from above merger.AddSource( TransferOwnership( new geindex::UnifiedPacketFileAdaptingTraverser( file_pool, "VectorMapsRasterAdaptingTraverser", geindex::TypedEntry::VectorMapsRaster, entry->version_, entry->channel_id_, entry->indexdir_))); } else if (type == geindex::TypedEntry::QTPacket || type == geindex::TypedEntry::QTPacket2) { if (!PacketFile::IsPacketFile(entry->indexdir_)) { throw khException( kh::tr("INTERNAL ERROR: QTPacket path must be a packetfile: %1") .arg(entry->indexdir_)); } // register packetfile PopulatePacketfileFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, entry->indexdir_); // make and adapting traverser that uses the packetfile // number from above merger.AddSource( TransferOwnership( new geindex::UnifiedPacketFileAdaptingTraverser( file_pool, "QTPacketAdaptingTraverser", type, entry->version_, 0 /* channel */, entry->indexdir_))); } else if (type == geindex::TypedEntry::Unified) { typedef geindex::Traverser<geindex::UnifiedBucket> UnifiedTraverser; khTransferGuard<UnifiedTraverser> traverser = TransferOwnership(new UnifiedTraverser( "VectorMapsTraverser", file_pool, entry->indexdir_)); PopulateFilenumTranslations(writer, unified_filemap, translated_filenums[source_id], progress_meter, traverser->GetIndexBundleReader()); merger.AddSource(traverser); } else { throw khException( kh::tr("INTERNAL ERROR: Unknown input type: %1") .arg(entry->type_)); } } // perform the merge merger.Start(); do { const MergeEntryType &slot = merger.Current(); uint source_id = merger.CurrentSourceId(); for (uint i = 0; i < slot.size(); ++i) { geindex::TypedEntry entry = slot[i]; entry.dataAddress.fileNum = translated_filenums[source_id][entry.dataAddress.fileNum]; writer.Put(slot.qt_path(), entry, tmp_read_buf); } progress_meter.incrementDone(slot.size()); } while (merger.Advance()); merger.Close(); writer.Close(); // Write the dated_imagery_channels.txt // only if we have more than 1 dated imagery layer. if (dated_imagery_channels_map.size() > 1) { std::string dated_channels_file_name = outdir + "/" + kDatedImageryChannelsFileName; notify(NFY_DEBUG, "Writing Dated Imagery channels to:\n %s", dated_channels_file_name.c_str()); FILE* fp = fopen(dated_channels_file_name.c_str(), "w"); std::map<std::string, uint32>::const_iterator iter = dated_imagery_channels_map.begin(); for (; iter != dated_imagery_channels_map.end(); ++iter) { fprintf(fp, "%s %d\n", iter->first.c_str(), iter->second); } fclose(fp); } // On successful completion, print out the output file sizes. std::vector<std::string> output_files; output_files.push_back(outdir); khPrintFileSizes("Output File Sizes", output_files); } catch (const khAbortedException &e) { notify(NFY_FATAL, "Unable to proceed: See previous warnings"); } catch (const std::exception &e) { notify(NFY_FATAL, "%s", e.what()); } catch (...) { notify(NFY_FATAL, "Unknown error"); } return 0; }