/*---------------------------------------------------------------------- | Test1 +---------------------------------------------------------------------*/ static bool Test1(PLT_TaskManager* task_manager, const char* url, NPT_Size& size) { NPT_LOG_INFO("########### TEST 1 ######################"); NPT_MemoryStreamReference memory_stream(new NPT_MemoryStream()); NPT_OutputStreamReference output_stream(memory_stream); PLT_Downloader downloader(task_manager, url, output_stream); downloader.Start(); while (1) { switch(downloader.GetState()) { case PLT_DOWNLOADER_SUCCESS: { size = memory_stream->GetDataSize(); return true; } case PLT_DOWNLOADER_ERROR: return false; default: NPT_System::Sleep(NPT_TimeInterval(0, 10000)); break; } }; return false; }
/** \brief Create a Zip archive from the specified FileCollection. * * This function is expected to be used with a DirectoryCollection * that you created to save the collection in an archive. * * \param[in,out] os The output stream where the Zip archive is saed. * \param[in] collection The collection to save in this output stream. * \param[in] zip_comment The global comment of the Zip archive. */ void ZipFile::saveCollectionToArchive(std::ostream & os, FileCollection & collection, std::string const & zip_comment) { try { ZipOutputStream output_stream(os); output_stream.setComment(zip_comment); FileEntry::vector_t entries(collection.entries()); for(auto it(entries.begin()); it != entries.end(); ++it) { output_stream.putNextEntry(*it); // get an InputStream if available (i.e. directories do not have an input stream) if(!(*it)->isDirectory()) { FileCollection::stream_pointer_t is(collection.getInputStream((*it)->getName())); if(is) { output_stream << is->rdbuf(); } } } // clean up mantually so we can get any exception // (so we avoid having exceptions gobbled by the destructor) output_stream.closeEntry(); output_stream.finish(); output_stream.close(); } catch(...) { os.setstate(std::ios::failbit); throw; } }
/*---------------------------------------------------------------------- | Test3 +---------------------------------------------------------------------*/ static bool Test3(PLT_TaskManager* task_manager, NPT_HttpUrl url, PLT_RingBufferStreamReference& ringbuffer_stream, NPT_Size& size) { NPT_LOG_INFO("########### TEST 3 ######################"); /* reset output param first */ size = 0; NPT_MemoryStreamReference memory_stream(new NPT_MemoryStream()); NPT_OutputStreamReference output_stream(memory_stream); PLT_Downloader downloader(task_manager, url, output_stream); downloader.Start(); /* asynchronously write onto ring buffer stream */ task_manager->StartTask(new RingBufferWriterTask(ringbuffer_stream)); /* start pulling data */ while (1) { switch(downloader.GetState()) { case PLT_DOWNLOADER_SUCCESS: size = memory_stream->GetDataSize(); return true; case PLT_DOWNLOADER_ERROR: return false; default: NPT_System::Sleep(NPT_TimeInterval(.1f)); break; } }; return false; }
string CNetScheduleJobSerializer::SaveJobOutput( CNetScheduleAPI::EJobStatus job_status, const string& target_dir, CNetCacheAPI& nc_api) { string target_file = CDirEntry::ConcatPath(target_dir, m_Job.job_id + ".out"); CNcbiOfstream output_stream(target_file.c_str(), CNcbiOfstream::binary); output_stream << "job_status=" << CNetScheduleAPI::StatusToString(job_status) << " ret_code=" << m_Job.ret_code; if (!m_Job.error_msg.empty()) { output_stream << " error_msg=\"" << NStr::PrintableString(m_Job.error_msg) << '"'; } output_stream << NcbiEndl; CStringOrBlobStorageReader job_output_reader(m_Job.output, nc_api); CRStream job_output_istream(&job_output_reader); NcbiStreamCopy(output_stream, job_output_istream); return target_file; }
void flush() { // once we can determine whether we are in a signal handler, we // should add the following assert here: // assert(xxxxxx, "can not flush buffer inside signal handler"); xmlTextStream::flush(); fflush(output_stream()); if (has_log_file()) _log_file->flush(); }
void App::OnSave(wxCommandEvent& event) { wxFileDialog saveFileDialog(this->GetTopWindow(), _("save in dat file"), "", "",_("dat file (*.dat)|*.dat"), wxFD_SAVE|wxFD_OVERWRITE_PROMPT); if (saveFileDialog.ShowModal() == wxID_CANCEL) return; // the user changed idea... // save the current contents in the file; wxString path = saveFileDialog.GetPath(); if(!path.EndsWith(".dat")) path.append(".dat"); // open file wxFileOutputStream output_stream(path); if (!output_stream.IsOk()) { wxLogError(_("Save data as '%s' failed."), saveFileDialog.GetPath()); return; } // stop measurement if(mMeasure) mMeasure=false; // save oscilloscope or fft data depending on witch page is active switch(mPage) { case PAGE_OSZI: { output_stream.Write("t[ms]\tU[v]\n",11); double * x = mOsziPage->Scale(); double * y = mOsziPage->Data(); for(unsigned int i =0;i<mDB.mValues.Elements();i++) { wxString Line; Line.Printf(wxT("%f\t%f\n"),x[i],y[i]); output_stream.Write( Line.data(),Line.size()); } break; } case PAGE_FFT: { output_stream.Write("f[Hz]\tU[v]\n",11); double out[mDB.mValues.Elements()/2+1]; FFTPanel::compute(mDB.mValues.Elements(),mDB.mValues.GetData(),out); double deltaf = 1000.0/(mDB.mEffSampleTime *mDB.mValues.Elements()); for(unsigned int k = 0; k <= mDB.mValues.Elements()/2; ++k) { wxString Line; Line.Printf(wxT("%f\t%f\n"),deltaf*(double)k,out[k]); output_stream.Write( Line.data(),Line.size()); } break; } } }
void FFM::saveModel(std::string outputDir) const { std::ofstream output_stream((char*)(outputDir +"bw.csv").c_str(), std::ios::out); for ( int i = 0; i < num_features*num_fields*parameters.num_factors*2; i++ ) { output_stream << W[i] << std::endl; } output_stream.close(); }
void PolicyEditor::saveToFile(QString file_name){ QFile file(file_name); if(!file.open(QIODevice::WriteOnly)) return; QTextStream output_stream(&file); output_stream<<toPlainText(); file.close(); setFile(file_name); }
void DrawWindow::on_pushButton_generateProgram_clicked() { QString output_filename = QFileDialog::getSaveFileName( this, "Choose location...", "Auton.c", "RobotC programs (*.c)"); QProgressDialog* write_progress = new QProgressDialog( "Writing program...", QString(), 0, 15, this); write_progress->setMinimumDuration(500); write_progress->setFixedSize(280, 75); write_progress->setWindowTitle("Generating program..."); write_progress->exec(); QFile output_program(output_filename); output_program.open(QIODevice::ReadWrite | QIODevice::Text); QTextStream output_stream(&output_program); write_progress->setValue(1); output_stream << SetupWindow::read_file("code/controller_config.txt"); write_progress->setValue(2); output_stream << "\n"; output_stream << "#include \"JoystickDriver.c\"\n\n"; write_progress->setValue(3); output_stream << SetupWindow::read_file("code/additional_includes.txt"); write_progress->setValue(4); output_stream << "\n"; output_stream << canned_declares; output_stream << "\n"; write_progress->setValue(5); output_stream << SetupWindow::read_file("code/misc_declare.txt"); write_progress->setValue(6); output_stream << "\ntask main()\n{\n"; write_progress->setValue(7); output_stream << SetupWindow::read_file("code/misc_init.txt"); write_progress->setValue(8); output_stream << "\n\twaitForStart();\n\n"; write_progress->setValue(9); output_stream << list_history->getCalls(); output_stream << "}\n\n"; write_progress->setValue(10); output_stream << SetupWindow::read_file("code/definition_move.txt"); output_stream << "\n"; write_progress->setValue(11); output_stream << SetupWindow::read_file("code/definition_turn.txt"); write_progress->setValue(12); output_stream << "\n"; output_stream << canned_definitions; output_stream << "\n"; write_progress->setValue(13); output_stream << SetupWindow::read_file("code/misc__define.txt"); output_stream << "\n"; write_progress->setValue(14); output_stream.flush(); write_progress->setValue(15); }
// ****************************************************************************************** // Copy a template from location <template_path> to location <output_path> and replace package name // ****************************************************************************************** bool ConfigurationFilesWidget::copyTemplate( const std::string& template_path, const std::string& output_path ) { // Check if template strings have been loaded yet if( template_strings_.empty() ) { loadTemplateStrings(); } // Error check file if( ! fs::is_regular_file( template_path ) ) { ROS_ERROR_STREAM( "Unable to find template file " << template_path ); return false; } // Load file std::ifstream template_stream( template_path.c_str() ); if( !template_stream.good() ) // File not found { ROS_ERROR_STREAM( "Unable to load file " << template_path ); return false; } // Load the file to a string using an efficient memory allocation technique std::string template_string; template_stream.seekg(0, std::ios::end); template_string.reserve(template_stream.tellg()); template_stream.seekg(0, std::ios::beg); template_string.assign( (std::istreambuf_iterator<char>(template_stream)), std::istreambuf_iterator<char>() ); template_stream.close(); // Replace keywords in string ------------------------------------------------------------ for(int i = 0; i < template_strings_.size(); ++i) { boost::replace_all(template_string, template_strings_[i].first, template_strings_[i].second); } // Save string to new location ----------------------------------------------------------- std::ofstream output_stream( output_path.c_str(), std::ios_base::trunc ); if( !output_stream.good() ) { ROS_ERROR_STREAM( "Unable to open file for writing " << output_path ); return false; } output_stream << template_string.c_str(); output_stream.close(); return true; // file created successfully }
znModel::~znModel() { wxLogDebug(wxT("<<< znModel::~znModel() >>>")); // Save all options into a physical ini file. wxFileOutputStream output_stream(m_ini_file_name); if (output_stream.IsOk() == true) { m_ini_file->Save(output_stream); } delete m_ini_file; }
/* Parse yara output and return a vector containing the matched rules as string */ vector<string> YaraHeuristic::parseYaraOutput(string output){ vector<string> matched_rules; istringstream output_stream(output); for(string line; getline(output_stream,line);){ // iterate through lines try{ string matched_string = Helper::split(line,' ').at(0); MYINFO("Adding matched Yara rule %s",matched_string.c_str()); matched_rules.push_back(matched_string); } catch (const std::out_of_range&){ } } return matched_rules; }
void carl::print_stats_to_file(std::vector<double>& vec_data, const std::string filename) { std::ofstream output_stream(filename,std::ofstream::app); libMesh::StatisticsVector<double> statistics_vec(vec_data.size(),0); for(unsigned int iii = 0; iii < vec_data.size(); ++iii) { statistics_vec[iii] = vec_data[iii]; } output_stream << statistics_vec.minimum() << " " << statistics_vec.maximum() << " " << statistics_vec.mean() << " " << statistics_vec.median() << " " << statistics_vec.stddev() << std::endl; output_stream.close(); };
// // Visit_Event // void Stub_Generator:: Visit_Event (const CHAOS::Event & event) { std::string name (event.name ()); std::string fq_name (CUTS_BE_CPP::fq_type (event)); this->header_ << this->export_macro_ << " bool operator <<= (" << name << " &, const ::CUTS_OSPL" << fq_name << " & );" << this->export_macro_ << " bool operator >>= (const " << name << " &, ::CUTS_OSPL" << fq_name << " & );" << std::endl; std::vector <CHAOS::Member> members = event.Member_children (); this->source_ << "bool operator <<= (" << name << " & corba, const ::CUTS_OSPL" << fq_name << " & dds)" << "{"; Input_Stream_Generator input_stream (this->source_, false); for (auto member : members) member.Accept (input_stream); this->source_ << "return true;" << "}" << "bool operator >>= (const " << name << " & corba, ::CUTS_OSPL" << fq_name << " & dds)" << "{"; Output_Stream_Generator output_stream (this->source_, false); for (auto member : members) member.Accept (output_stream); this->source_ << "return true;" << "}"; this->events_.insert (event); }
void create_phat_filtration( const std::string& input_filename, bool dualize, int64_t upper_dim, const std::string& output_filename ) { Complex complex; complex.load_binary( input_filename, upper_dim ); dipha::data_structures::distributed_vector< int64_t > filtration_to_cell_map; dipha::algorithms::get_filtration_to_cell_map( complex, dualize, filtration_to_cell_map ); dipha::data_structures::distributed_vector< int64_t > cell_to_filtration_map; dipha::algorithms::get_cell_to_filtration_map( complex.get_num_cells( ), filtration_to_cell_map, cell_to_filtration_map ); const int64_t nr_columns = complex.get_num_cells( ); std::vector< std::vector< int64_t > > matrix( nr_columns ); std::vector< int64_t > dims( nr_columns, -1 ); for( int64_t cur_dim = 0; cur_dim <= complex.get_max_dim(); cur_dim++ ) { dipha::data_structures::flat_column_stack unreduced_columns; dipha::algorithms::generate_unreduced_columns( complex, filtration_to_cell_map, cell_to_filtration_map, cur_dim, dualize, unreduced_columns ); dipha::data_structures::heap_column col; while( !unreduced_columns.empty( ) ) { int64_t index; unreduced_columns.pop( index, col ); std::sort( col.begin( ), col.end( ) ); matrix[ index ] = col; dims[ index ] = dualize ? complex.get_max_dim( ) - cur_dim : cur_dim; } } std::ofstream output_stream( output_filename.c_str( ), std::ios_base::binary | std::ios_base::out ); output_stream.write( (char*)&nr_columns, sizeof( int64_t ) ); for( int64_t cur_col = 0; cur_col < nr_columns; cur_col++ ) { int64_t cur_dim = dims[ cur_col ]; output_stream.write( (char*)&cur_dim, sizeof( int64_t ) ); int64_t cur_nr_rows = matrix[ cur_col ].size( ); output_stream.write( (char*)&cur_nr_rows, sizeof( int64_t ) ); for( int64_t cur_row_idx = 0; cur_row_idx < cur_nr_rows; cur_row_idx++ ) { int64_t cur_row = matrix[ cur_col ][ cur_row_idx ]; output_stream.write( (char*)&cur_row, sizeof( int64_t ) ); } } output_stream.close( ); }
/*---------------------------------------------------------------------- | Test2 +---------------------------------------------------------------------*/ static bool Test2(PLT_TaskManager* task_manager, const char* url, NPT_Size& size) { NPT_LOG_INFO("########### TEST 2 ######################"); /* reset output param first */ size = 0; PLT_RingBufferStreamReference ringbuffer_stream(new PLT_RingBufferStream()); NPT_OutputStreamReference output_stream(ringbuffer_stream); NPT_InputStreamReference input_stream(ringbuffer_stream); PLT_Downloader downloader(task_manager, url, output_stream); downloader.Start(); while (1) { switch(downloader.GetState()) { case PLT_DOWNLOADER_SUCCESS: ringbuffer_stream->SetEos(); /* fallthrough */ case PLT_DOWNLOADER_DOWNLOADING: { NPT_Size bytes_read; NPT_Result res = ReadBody(downloader, input_stream, bytes_read); if (NPT_FAILED(res)) { return (res==NPT_ERROR_EOS)?true:false; } size += bytes_read; } break; case PLT_DOWNLOADER_ERROR: return false; default: NPT_System::Sleep(NPT_TimeInterval(0, 10000)); break; } }; return false; }
bool CSerialEntityXML::SaveToXml(const char *pFileName) { if (this->m_Children.size() != 1) return false; bool ret; std::ofstream output_stream(pFileName); char *buffer(new char[BUFSIZ]); try { output_stream<<"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"; ret = this->m_Children[0]->DoSaveXml(output_stream, buffer); } catch(...) { delete [] buffer; throw; } delete [] buffer; return ret; }
BEGIN_NCBI_SCOPE string CNetScheduleJobSerializer::SaveJobInput(const string& target_dir, CNetCacheAPI& nc_api) { string target_file = CDirEntry::ConcatPath(target_dir, m_Job.job_id + ".in"); CNcbiOfstream output_stream(target_file.c_str(), CNcbiOfstream::binary); bool need_space = false; if (!m_Job.affinity.empty()) { output_stream << "affinity=\"" << NStr::PrintableString(m_Job.affinity) << '"'; need_space = true; } if (!m_Job.group.empty()) { if (need_space) output_stream << ' '; output_stream << "group=\"" << NStr::PrintableString(m_Job.group) << '"'; need_space = true; } if ((m_Job.mask & CNetScheduleAPI::eExclusiveJob) != 0) { if (need_space) output_stream << ' '; output_stream << "exclusive"; } output_stream << NcbiEndl; CStringOrBlobStorageReader job_input_reader(m_Job.input, nc_api); CRStream job_input_istream(&job_input_reader); NcbiStreamCopy(output_stream, job_input_istream); return target_file; }
void generator_thread::create_script(QString uuid,QString major, QString minor, int strength){ QString file_name; if(update_minor) file_name = uuid+"/"+major+"/start_beacon_"+major+"_"+minor+".sh"; else file_name = uuid+"/"+minor+"/start_beacon_"+major+"_"+minor+".sh"; //qDebug()<<file_name; QFile file(file_name); if(file.open(QIODevice::ReadWrite)){ QTextStream output_stream(&file); QString command = "#!/bin/bash \nhciconfig hci0 up\nhciconfig hci0 leadv 3 \n"; command.append(create_command_string(uuid)); command.append(parse_int(major.toInt())); command.append(parse_int(minor.toInt())); command.append(QString::number(strength,16)); command.append(" 00"); //qDebug()<<command; output_stream<<command; } file.setPermissions(QFile::ExeGroup | QFile::ReadGroup | QFile::ReadOwner | QFile::ReadOther | QFile::WriteOwner | QFile::ExeUser); }
TEST(ExtensionSetTest, PackedSerializationToStream) { // Serialize as TestPackedExtensions and parse as TestPackedTypes to insure // wire compatibility of extensions. // // This checks serialization to an output stream by creating an array output // stream that can only buffer 1 byte at a time - this prevents the message // from ever jumping to the fast path, ensuring that serialization happens via // the CodedOutputStream. unittest::TestPackedExtensions source; unittest::TestPackedTypes destination; TestUtil::SetPackedExtensions(&source); int size = source.ByteSize(); string data; data.resize(size); { io::ArrayOutputStream array_stream(string_as_array(&data), size, 1); io::CodedOutputStream output_stream(&array_stream); source.SerializeWithCachedSizes(&output_stream); ASSERT_FALSE(output_stream.HadError()); } EXPECT_TRUE(destination.ParseFromString(data)); TestUtil::ExpectPackedFieldsSet(destination); }
/*---------------------------------------------------------------------- | Test3 +---------------------------------------------------------------------*/ static bool Test3(PLT_TaskManager* task_manager, const char* url, PLT_RingBufferStreamReference& ringbuffer_stream, NPT_Size& size) { NPT_LOG_INFO("########### TEST 3 ######################"); /* reset output param first */ size = 0; NPT_MemoryStreamReference memory_stream(new NPT_MemoryStream()); NPT_OutputStreamReference output_stream(memory_stream); PLT_Downloader downloader(task_manager, url, output_stream); downloader.Start(); /* asynchronously write onto ring buffer stream */ char buffer[32768]; ringbuffer_stream->WriteFully(buffer, 32768); /* mark as done */ ringbuffer_stream->SetEos(); while (1) { switch(downloader.GetState()) { case PLT_DOWNLOADER_SUCCESS: size = memory_stream->GetDataSize(); return true; case PLT_DOWNLOADER_ERROR: return false; default: NPT_System::Sleep(NPT_TimeInterval(0, 10000)); break; } }; return false; }
void blobs_getting_started_sample() { try { // Initialize storage account azure::storage::cloud_storage_account storage_account = azure::storage::cloud_storage_account::parse(storage_connection_string); // Create a blob container azure::storage::cloud_blob_client blob_client = storage_account.create_cloud_blob_client(); azure::storage::cloud_blob_container container = blob_client.get_container_reference(_XPLATSTR("my-sample-container")); // Return value is true if the container did not exist and was successfully created. container.create_if_not_exists(); // Make the blob container publicly accessible azure::storage::blob_container_permissions permissions; permissions.set_public_access(azure::storage::blob_container_public_access_type::blob); container.upload_permissions(permissions); // Upload a blob from a file concurrency::streams::istream input_stream = concurrency::streams::file_stream<uint8_t>::open_istream(_XPLATSTR("DataFile.txt")).get(); azure::storage::cloud_block_blob blob1 = container.get_block_blob_reference(_XPLATSTR("my-blob-1")); blob1.upload_from_stream(input_stream); input_stream.close().wait(); // Upload some blobs from text azure::storage::cloud_block_blob blob2 = container.get_block_blob_reference(_XPLATSTR("my-blob-2")); blob2.upload_text(_XPLATSTR("more text")); azure::storage::cloud_block_blob blob3 = container.get_block_blob_reference(_XPLATSTR("my-directory/my-sub-directory/my-blob-3")); blob3.upload_text(_XPLATSTR("other text")); // List blobs in the blob container azure::storage::continuation_token token; do { azure::storage::list_blob_item_segment result = container.list_blobs_segmented(token); for (auto& item : result.results()) { if (item.is_blob()) { ucout << _XPLATSTR("Blob: ") << item.as_blob().uri().primary_uri().to_string() << std::endl; } else { ucout << _XPLATSTR("Directory: ") << item.as_directory().uri().primary_uri().to_string() << std::endl; } } token = result.continuation_token(); } while (!token.empty()); // Download a blob to a stream concurrency::streams::container_buffer<std::vector<uint8_t>> buffer; concurrency::streams::ostream output_stream(buffer); azure::storage::cloud_block_blob binary_blob = container.get_block_blob_reference(_XPLATSTR("my-blob-1")); binary_blob.download_to_stream(output_stream); ucout << _XPLATSTR("Stream: ") << to_string(buffer.collection()) << std::endl; // Download a blob as text azure::storage::cloud_block_blob text_blob = container.get_block_blob_reference(_XPLATSTR("my-blob-2")); utility::string_t text = text_blob.download_text(); ucout << _XPLATSTR("Text: ") << text << std::endl; // Delete the blobs blob1.delete_blob(); blob2.delete_blob(); blob3.delete_blob(); // Create an append blob azure::storage::cloud_append_blob append_blob = container.get_append_blob_reference(_XPLATSTR("my-append-1")); append_blob.properties().set_content_type(_XPLATSTR("text/plain; charset=utf-8")); append_blob.create_or_replace(); // Append two blocks concurrency::streams::istream append_input_stream1 = concurrency::streams::bytestream::open_istream(utility::conversions::to_utf8string(_XPLATSTR("some text."))); concurrency::streams::istream append_input_stream2 = concurrency::streams::bytestream::open_istream(utility::conversions::to_utf8string(_XPLATSTR("more text."))); append_blob.append_block(append_input_stream1, utility::string_t()); append_blob.append_block(append_input_stream2, utility::string_t()); append_input_stream1.close().wait(); append_input_stream2.close().wait(); // Download append blob as text utility::string_t append_text = append_blob.download_text(); ucout << _XPLATSTR("Append Text: ") << append_text << std::endl; // Cancellation token pplx::cancellation_token_source source; // This is used to cancel the request. auto download_text_task = append_blob.download_text_async(azure::storage::access_condition(), azure::storage::blob_request_options(), azure::storage::operation_context(), source.get_token()); source.cancel();// This call will cancel download_text_task try { auto downloaded_text = download_text_task.get(); ucout << _XPLATSTR("Text downloaded successfully unexpectedly, text is: ") << downloaded_text << std::endl; } catch (const azure::storage::storage_exception& e) { ucout << _XPLATSTR("Operation should be cancelled, the error message is: ") << e.what() << std::endl; } // Millisecond level timeout azure::storage::blob_request_options options; options.set_maximum_execution_time(std::chrono::milliseconds(1)); try { download_text_task = append_blob.download_text_async(azure::storage::access_condition(), options, azure::storage::operation_context()); auto downloaded_text = download_text_task.get(); ucout << _XPLATSTR("Text downloaded successfully unexpectedly, text is: ") << downloaded_text << std::endl; } catch (const azure::storage::storage_exception& e) { ucout << _XPLATSTR("Operation should be timed-out, the error message is: ") << e.what() << std::endl; } // Delete the blob append_blob.delete_blob(); // Delete the blob container // Return value is true if the container did exist and was successfully deleted. container.delete_container_if_exists(); } catch (const azure::storage::storage_exception& e) { ucout << _XPLATSTR("Error: ") << e.what() << std::endl; azure::storage::request_result result = e.result(); azure::storage::storage_extended_error extended_error = result.extended_error(); if (!extended_error.message().empty()) { ucout << extended_error.message() << std::endl; } } catch (const std::exception& e) { ucout << _XPLATSTR("Error: ") << e.what() << std::endl; } }
void run(cv::String video_path, size_t num_worker_threads, cv::Mat (*const command_function)(cv::Mat&, cv::Mat&)) { VideoCapture capture(video_path); if (!capture.isOpened()) { log("Error: could not open video capture"); exit(EXIT_FAILURE); } double fps = capture.get(CV_CAP_PROP_FPS); WorkStream work_stream(fps); WorkStream output_stream(fps); size_t i; pthread_t worker_threads[num_worker_threads]; for (i = 0; i < num_worker_threads; i++) { if (pthread_create(&worker_threads[i], NULL, &worker::work, &work_stream)) { log("Error: could not create worker threads"); exit(EXIT_FAILURE); } pthread_detach(worker_threads[i]); } pthread_t output_thread; if (pthread_create(&output_thread, NULL, &worker::output, &output_stream)) { log("Error: could not create output thread"); exit(EXIT_FAILURE); } timespec tspec; Mat frame, prev; Ptr<Workable> workable_ptr; for (;;) { clock_gettime(CLOCK_MONOTONIC, &tspec); if (!capture.read(frame) || frame.empty()) break; workable_ptr = makePtr<Workable>(command_function, frame, prev); work_stream.push(workable_ptr); output_stream.push(workable_ptr); swap(frame, prev); tspec.tv_nsec += 1e9/fps; if (tspec.tv_nsec >= 1e9) { tspec.tv_nsec -= 1e9; tspec.tv_sec++; } while(clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &tspec, NULL) == EINTR); } for (i = 0; i < num_worker_threads; i++) { workable_ptr = makePtr<Workable>(&commands::exit, frame, prev); work_stream.push(workable_ptr); } workable_ptr = makePtr<Workable>(&commands::exit, frame, prev); output_stream.push(workable_ptr); pthread_join(output_thread, NULL); }
int main( int ac, char** av ) { try { comma::command_line_options options( ac, av, usage ); comma::csv::options input_csv( options, field_names< snark::control::target_t >() ); const char delimiter = input_csv.delimiter; comma::csv::input_stream< snark::control::target_t > input_stream( std::cin, input_csv, snark::control::target_t( options.exists( "--heading-is-absolute" ) ) ); comma::csv::options output_csv( options ); output_csv.full_xpath = true; output_csv.fields = "wayline/heading,error/cross_track,error/heading"; if( input_csv.binary() ) { output_csv.format( format< snark::control::control_data_t >( output_csv.fields, true ) ); } comma::csv::output_stream< snark::control::control_data_t > output_stream( std::cout, output_csv ); if( options.exists( "--format" ) ) { std::cout << format< snark::control::target_t >() << std::endl; return 0; } if( options.exists( "--output-format" ) ) { std::cout << format< snark::control::control_data_t >( output_csv.fields, true ) << std::endl; return 0; } if( options.exists( "--output-fields" ) ) { std::cout << output_csv.fields << std::endl; return 0; } double proximity = options.value< double >( "--proximity", default_proximity ); if( proximity <= 0 ) { std::cerr << name << ": expected positive proximity, got " << proximity << std::endl; return 1; } control_mode_t mode = mode_from_string( options.value< std::string >( "--mode", default_mode ) ); bool use_past_endpoint = options.exists( "--past-endpoint" ); bool use_delay = options.exists( "--frequency,-f" ); boost::posix_time::microseconds delay( 0 ); boost::posix_time::ptime next_output_time( boost::posix_time::microsec_clock::universal_time() ); if( use_delay ) { double frequency = options.value< double >( "--frequency,-f" ); if( frequency <= 0 ) { std::cerr << name << ": expected positive frequency, got " << frequency << std::endl; return 1; } delay = boost::posix_time::microseconds( static_cast< long >( 1000000 / frequency ) ); } bool verbose = options.exists( "--verbose,-v" ); std::vector< std::string > unnamed = options.unnamed( "--help,-h,--verbose,-v,--format,--output-format,--output-fields,--past-endpoint,--heading-is-absolute", "-.*,--.*" ); if( unnamed.empty() ) { std::cerr << name << ": feedback stream is not given" << std::endl; return 1; } comma::csv::options feedback_csv = comma::name_value::parser( "filename", ';', '=', false ).get< comma::csv::options >( unnamed[0] ); if( input_csv.binary() && !feedback_csv.binary() ) { std::cerr << name << ": cannot join binary input stream with ascii feedback stream" << std::endl; return 1; } if( !input_csv.binary() && feedback_csv.binary() ) { std::cerr << name << ": cannot join ascii input stream with binary feedback stream" << std::endl; return 1; } if( feedback_csv.fields.empty() ) { feedback_csv.fields = field_names< snark::control::feedback_t >(); } comma::io::istream feedback_in( feedback_csv.filename, feedback_csv.binary() ? comma::io::mode::binary : comma::io::mode::ascii, comma::io::mode::non_blocking ); comma::csv::input_stream< snark::control::feedback_t > feedback_stream( *feedback_in, feedback_csv ); comma::io::select select; select.read().add( feedback_in ); boost::optional< snark::control::vector_t > from; boost::scoped_ptr< snark::control::wayline_t > wayline; comma::signal_flag is_shutdown; while( !is_shutdown && ( input_stream.ready() || ( std::cin.good() && !std::cin.eof() ) ) ) { reached_t reached; const snark::control::target_t* target = input_stream.read(); if( !target ) { break; } snark::control::vector_t to = target->position; if( verbose ) { std::cerr << name << ": received target waypoint " << snark::control::serialise( to ) << std::endl; } if( from && snark::control::distance( *from, to ) < proximity ) { continue; } if( from ) { wayline.reset( new snark::control::wayline_t( *from, to, verbose ) ); } while( !is_shutdown && std::cout.good() ) { if( input_stream.ready() ) { if( mode == fixed ) {} else if( mode == dynamic ) { from = boost::none; break; } else { std::cerr << name << ": control mode '" << mode_to_string( mode ) << "' is not implemented" << std::endl; return 1; } } select.wait( boost::posix_time::microseconds( 10000 ) ); if( !is_shutdown && ( feedback_stream.ready() || select.read().ready( feedback_in ) ) ) { const snark::control::feedback_t* feedback = feedback_stream.read(); if( !feedback ) { std::cerr << name << ": feedback stream error occurred prior to reaching waypoint " << snark::control::serialise( to ) << std::endl; return 1; } if( use_delay && boost::posix_time::microsec_clock::universal_time() < next_output_time ) { continue; } if( !from ) { from = feedback->position; wayline.reset( new snark::control::wayline_t( *from, to, verbose ) ); } if( snark::control::distance( feedback->position, to ) < proximity ) { reached = reached_t( "proximity" ); } if( use_past_endpoint && wayline->is_past_endpoint( feedback->position ) ) { reached = reached_t( "past endpoint" ); } if( reached ) { if( verbose ) { std::cerr << name << ": waypoint " << snark::control::serialise( to ) << " is reached (" << reached.reason << ")" << std::endl; } if( mode == fixed ) { from = to; } else if( mode == dynamic ) { from = boost::none; } else { std::cerr << name << ": control mode '" << mode_to_string( mode ) << "' is not implemented" << std::endl; return 1; } break; } snark::control::error_t error; error.cross_track = wayline->cross_track_error( feedback->position ); if( target->is_absolute ) { error.heading = snark::control::wrap_angle( target->heading_offset - feedback->yaw ); } else { error.heading = wayline->heading_error( feedback->yaw, target->heading_offset ); } if( input_csv.binary() ) { std::cout.write( input_stream.binary().last(), input_csv.format().size() ); } else { std::cout << comma::join( input_stream.ascii().last(), delimiter ) << delimiter; } if( feedback_csv.binary() ) { std::cout.write( feedback_stream.binary().last(), feedback_csv.format().size() ); } else { std::cout << comma::join( feedback_stream.ascii().last(), delimiter ) << delimiter; } output_stream.write( snark::control::control_data_t( *wayline, error ) ); if( use_delay ) { next_output_time = boost::posix_time::microsec_clock::universal_time() + delay; } } } } return 0; } catch( std::exception& ex ) { std::cerr << name << ": " << ex.what() << std::endl; } catch( ... ) { std::cerr << name << ": unknown exception" << std::endl; } return 1; }
void blobs_getting_started_sample() { try { // Initialize storage account azure::storage::cloud_storage_account storage_account = azure::storage::cloud_storage_account::parse(storage_connection_string); // Create a blob container azure::storage::cloud_blob_client blob_client = storage_account.create_cloud_blob_client(); azure::storage::cloud_blob_container container = blob_client.get_container_reference(U("my-sample-container")); // Return value is true if the container did not exist and was successfully created. container.create_if_not_exists(); // Make the blob container publicly accessible azure::storage::blob_container_permissions permissions; permissions.set_public_access(azure::storage::blob_container_public_access_type::blob); container.upload_permissions(permissions); // Upload a blob from a file concurrency::streams::istream input_stream = concurrency::streams::file_stream<uint8_t>::open_istream(U("DataFile.txt")).get(); azure::storage::cloud_block_blob blob1 = container.get_block_blob_reference(U("my-blob-1")); blob1.upload_from_stream(input_stream); input_stream.close().wait(); // Upload some blobs from text azure::storage::cloud_block_blob blob2 = container.get_block_blob_reference(U("my-blob-2")); blob2.upload_text(U("more text")); azure::storage::cloud_block_blob blob3 = container.get_block_blob_reference(U("my-directory/my-sub-directory/my-blob-3")); blob3.upload_text(U("other text")); // List blobs in the blob container azure::storage::continuation_token token; do { azure::storage::blob_result_segment result = container.list_blobs_segmented(token); std::vector<azure::storage::cloud_blob> blobs = result.blobs(); for (std::vector<azure::storage::cloud_blob>::const_iterator it = blobs.cbegin(); it != blobs.cend(); ++it) { ucout << U("Blob: ") << it->uri().primary_uri().to_string() << std::endl; } std::vector<azure::storage::cloud_blob_directory> directories = result.directories(); for (std::vector<azure::storage::cloud_blob_directory>::const_iterator it = directories.cbegin(); it != directories.cend(); ++it) { ucout << U("Directory: ") << it->uri().primary_uri().to_string() << std::endl; } token = result.continuation_token(); } while (!token.empty()); // Download a blob to a stream concurrency::streams::container_buffer<std::vector<uint8_t>> buffer; concurrency::streams::ostream output_stream(buffer); azure::storage::cloud_block_blob binary_blob = container.get_block_blob_reference(U("my-blob-1")); binary_blob.download_to_stream(output_stream); ucout << U("Stream: ") << to_string(buffer.collection()) << std::endl; // Download a blob as text azure::storage::cloud_block_blob text_blob = container.get_block_blob_reference(U("my-blob-2")); utility::string_t text = text_blob.download_text(); ucout << U("Text: ") << text << std::endl; // Delete the blobs blob1.delete_blob(); blob2.delete_blob(); blob3.delete_blob(); // Delete the blob container // Return value is true if the container did exist and was successfully deleted. container.delete_container_if_exists(); } catch (const azure::storage::storage_exception& e) { ucout << U("Error: ") << e.what() << std::endl; azure::storage::request_result result = e.result(); azure::storage::storage_extended_error extended_error = result.extended_error(); if (!extended_error.message().empty()) { ucout << extended_error.message() << std::endl; } } catch (const std::exception& e) { ucout << U("Error: ") << e.what() << std::endl; } }
/*---------------------------------------------------------------------- | PLT_HttpServerSocketTask::DoRun +---------------------------------------------------------------------*/ void PLT_SsdpSearchTask::DoRun() { NPT_HttpResponse* response = NULL; PLT_HttpClient client; NPT_Timeout timeout = 30; NPT_HttpRequestContext context; do { // get the address of the server NPT_IpAddress server_address; NPT_CHECK_LABEL_SEVERE(server_address.ResolveName( m_Request->GetUrl().GetHost(), timeout), done); NPT_SocketAddress address(server_address, m_Request->GetUrl().GetPort()); // send 2 requests in a row NPT_OutputStreamReference output_stream( new PLT_OutputDatagramStream(m_Socket, 4096, &address)); NPT_CHECK_LABEL_SEVERE(client.SendRequest( output_stream, *m_Request), done); NPT_CHECK_LABEL_SEVERE(client.SendRequest( output_stream, *m_Request), done); output_stream = NULL; // keep track of when we sent the request NPT_TimeStamp last_send; NPT_System::GetCurrentTimeStamp(last_send); while (!IsAborting(0)) { // read response PLT_InputDatagramStreamReference input_stream( new PLT_InputDatagramStream(m_Socket)); NPT_InputStreamReference stream = input_stream; NPT_Result res = client.WaitForResponse(stream, *m_Request, context, response); // callback to process response if (NPT_SUCCEEDED(res)) { // get source info NPT_SocketInfo info; input_stream->GetInfo(info); context.SetLocalAddress(info.local_address); context.SetRemoteAddress(info.remote_address); // process response ProcessResponse(NPT_SUCCESS, m_Request, context, response); delete response; response = NULL; } else if (res != NPT_ERROR_TIMEOUT) { NPT_LOG_WARNING_1("PLT_SsdpSearchTask got an error (%d) waiting for response", res); } input_stream = NULL; // check if it's time to resend request NPT_TimeStamp now; NPT_System::GetCurrentTimeStamp(now); if (now >= last_send + (long)m_Timeout/1000) break; } } while (!IsAborting(0) && m_Repeat); done: return; }