void chart_build::docx_convert(oox::docx_conversion_context & Context) { if (object_type_ == 1) { Context.start_chart(L""); oox::oox_chart_context & chart_context = Context.current_chart(); oox_convert(chart_context); chart_context.set_cache_only(true); Context.end_chart(); } else if (object_type_ == 2 && office_text_) { office_text_->docx_convert(Context); } else if (object_type_ == 3 && office_math_) { oox::docx_conversion_context::StreamsManPtr prev = Context.get_stream_man(); std::wstringstream temp_stream(Context.get_drawing_context().get_text_stream_frame()); Context.set_stream_man( boost::shared_ptr<oox::streams_man>( new oox::streams_man(temp_stream) )); bool runState = Context.get_run_state(); Context.set_run_state(false); bool pState = Context.get_paragraph_state(); Context.set_paragraph_state(false); style_text_properties textProperty; textProperty.content().style_font_name_ = L"Cambria Math"; textProperty.content().fo_font_size_ = odf_types::length(baseFontHeight_, odf_types::length::pt); Context.push_text_properties(&textProperty); office_math_->docx_convert(Context); Context.pop_text_properties(); Context.get_drawing_context().get_text_stream_frame() = temp_stream.str(); Context.set_stream_man (prev); Context.set_run_state (runState); Context.set_paragraph_state (pState); } }
bool InputReader::read_input(char* filename) { std::ifstream f(filename); if(!f.is_open()) { std::cerr << "fatal error: could not open input file " << filename << ". aborting " << std::endl; return false; } // if // read the whole file into a string std::string input_string; std::getline(f, input_string, (char)EOF); f.close(); // create and assign the string stream std::istringstream temp_stream(input_string.c_str()); input_stream_.str(temp_stream.str()); return true; } // InputReader::init()
void office_annotation::docx_convert(oox::docx_conversion_context & Context) { std::wstring date; std::wstring author; if (dc_date_) { date = xml::utils::replace_text_to_xml(dynamic_cast<dc_date * >(dc_date_.get())->content_); } if (dc_creator_) { author = xml::utils::replace_text_to_xml(dynamic_cast<dc_creator * >(dc_creator_.get())->content_); } //////////////////////////////////////// Context.start_comment(); oox::StreamsManPtr prev = Context.get_stream_man(); std::wstringstream temp_stream(Context.get_drawing_context().get_text_stream_frame()); Context.set_stream_man( boost::shared_ptr<oox::streams_man>( new oox::streams_man(temp_stream) )); bool runState = Context.get_run_state(); Context.set_run_state(false); bool pState = Context.get_paragraph_state(); Context.set_paragraph_state(false); for (size_t i = 0; i < content_.size(); i++) { content_[i]->docx_convert(Context); } Context.set_run_state(runState); Context.set_paragraph_state(pState); Context.get_comments_context().start_comment(temp_stream.str(), author,date);//content, date, author Context.dump_hyperlinks(Context.get_comments_context().get_rels(), oox::hyperlinks::comment_place); Context.set_stream_man(prev); Context.end_comment(); }
void StreamSorter<Message>::stream_sort(istream& stream_in, ostream& stream_out, StreamIndex<Message>* index_to) { // We want to work out the file size, if we can. size_t file_size = 0; { // Save our position auto here = stream_in.tellg(); // Go to the end stream_in.seekg(0, stream_in.end); // Get its position auto there = stream_in.tellg(); // Go back to where we were stream_in.seekg(here); if (stream_in.good()) { // We can seek in this stream. So how far until the end? file_size = there - here; } else { // It's entirely possible that none of that worked. So clear the error flags and leave the size at 0. stream_in.clear(); } } // Don't give an actual 0 to the progress code or it will NaN create_progress("break into sorted chunks", file_size == 0 ? 1 : file_size); // Eventually we put sorted chunks of data in temp files and put their names here vector<string> outstanding_temp_files; // This tracks the number of messages in each file, by file name unordered_map<string, size_t> messages_per_file; // This tracks the total messages observed on input size_t total_messages_read = 0; // This cursor will read in the input file. cursor_t input_cursor(stream_in); #pragma omp parallel shared(stream_in, input_cursor, outstanding_temp_files, messages_per_file, total_messages_read) { while(true) { vector<Message> thread_buffer; #pragma omp critical (input_cursor) { // Each thread fights for the file and the winner takes some data size_t buffered_message_bytes = 0; while (input_cursor.has_next() && buffered_message_bytes < max_buf_size) { // Until we run out of input messages or space, buffer each, recording its size. thread_buffer.emplace_back(std::move(input_cursor.take())); buffered_message_bytes += thread_buffer.back().ByteSizeLong(); } // Update the progress bar update_progress(stream_in.tellg()); } if (thread_buffer.empty()) { // No data was found break; } // Do a sort of the data we grabbed this->sort(thread_buffer); // Save it to a temp file. string temp_name = temp_file::create(); ofstream temp_stream(temp_name); // OK to save as one massive group here. // TODO: This write could also be in a thread. stream::write_buffered(temp_stream, thread_buffer, 0); #pragma omp critical (outstanding_temp_files) { // Remember the temp file name outstanding_temp_files.push_back(temp_name); // Remember the messages in the file, for progress purposes messages_per_file[temp_name] = thread_buffer.size(); // Remember how many messages we found in the total total_messages_read += thread_buffer.size(); } } } // Now we know the reader thmessages have taken care of the input, and all the data is in temp files. destroy_progress(); while (outstanding_temp_files.size() > max_fan_in) { // We can't merge them all at once, so merge subsets of them. outstanding_temp_files = streaming_merge(outstanding_temp_files, &messages_per_file); } // Now we can merge (and maybe index) the final layer of the tree. // Open up cursors into all the files. list<ifstream> temp_ifstreams; list<cursor_t> temp_cursors; open_all(outstanding_temp_files, temp_ifstreams, temp_cursors); // Maintain our own group buffer at a higher scope than the emitter. vector<Message> group_buffer; { // Make an output emitter emitter_t emitter(stream_out); if (index_to != nullptr) { emitter.on_message([&index_to,&group_buffer](const Message& m) { // Copy every message that is emitted. // TODO: Just compute indexing stats instead. group_buffer.push_back(m); }); emitter.on_group([&index_to,&group_buffer](int64_t start_vo, int64_t past_end_vo) { // On every group, tell the index to record the group stats, and clear the buffer. index_to->add_group(group_buffer, start_vo, past_end_vo); group_buffer.clear(); }); } // Merge the cursors into the emitter streaming_merge(temp_cursors, emitter, total_messages_read); } // Clean up temp_cursors.clear(); temp_ifstreams.clear(); for (auto& filename : outstanding_temp_files) { temp_file::remove(filename); } }
static Json::Value parse_json_value(std::string& value_string) { std::stringstream temp_stream(value_string); Json::Value temp_json; temp_stream >> temp_json; return temp_json; }