void MergeSortingBlockInputStream::remerge() { LOG_DEBUG(log, "Re-merging intermediate ORDER BY data (" << blocks.size() << " blocks with " << sum_rows_in_blocks << " rows) to save memory consumption"); /// NOTE Maybe concat all blocks and partial sort will be faster than merge? MergeSortingBlocksBlockInputStream merger(blocks, description, max_merged_block_size, limit); Blocks new_blocks; size_t new_sum_rows_in_blocks = 0; size_t new_sum_bytes_in_blocks = 0; merger.readPrefix(); while (Block block = merger.read()) { new_sum_rows_in_blocks += block.rows(); new_sum_bytes_in_blocks += block.allocatedBytes(); new_blocks.emplace_back(std::move(block)); } merger.readSuffix(); LOG_DEBUG(log, "Memory usage is lowered from " << formatReadableSizeWithBinarySuffix(sum_bytes_in_blocks) << " to " << formatReadableSizeWithBinarySuffix(new_sum_bytes_in_blocks)); /// If the memory consumption was not lowered enough - we will not perform remerge anymore. 2 is a guess. if (new_sum_bytes_in_blocks * 2 > sum_bytes_in_blocks) remerge_is_useful = false; blocks = std::move(new_blocks); sum_rows_in_blocks = new_sum_rows_in_blocks; sum_bytes_in_blocks = new_sum_bytes_in_blocks; }
TEST_F(MergeTests, simple_merge_test_valid_rows) { auto main = io::Loader::shortcuts::load("test/merge1_main.tbl"); auto delta = io::Loader::shortcuts::load("test/merge1_delta.tbl"); auto correct_result = io::Loader::shortcuts::load("test/merge1_result.tbl"); std::vector<hyrise::storage::c_atable_ptr_t > tables; tables.push_back(main); tables.push_back(delta); TableMerger merger(new DefaultMergeStrategy(), new SequentialHeapMerger()); // First test with no valid std::vector<bool> validOne(main->size() + delta->size(), false); auto result = merger.merge(tables, true, validOne); ASSERT_EQ(0u, result[0]->size()); // Second test with only some valid std::vector<bool> valid(main->size() + delta->size(), false); valid[3] = true; valid[valid.size() - 1] = true; result = merger.merge(tables, true, valid); ASSERT_EQ(2u, result[0]->size()); ASSERT_EQ(2u, result[0]->dictionaryAt(0)->size()); ASSERT_EQ(2u, result[0]->dictionaryAt(1)->size()); ASSERT_EQ(2u, result[0]->dictionaryAt(2)->size()); }
TEST_F(TableMergerTest, SimpleMerge) { std::unique_ptr<ResourceTable> tableA = test::ResourceTableBuilder() .setPackageId(u"com.app.a", 0x7f) .addReference(u"@com.app.a:id/foo", u"@com.app.a:id/bar") .addReference(u"@com.app.a:id/bar", u"@com.app.b:id/foo") .addValue(u"@com.app.a:styleable/view", test::StyleableBuilder() .addItem(u"@com.app.b:id/foo") .build()) .build(); std::unique_ptr<ResourceTable> tableB = test::ResourceTableBuilder() .setPackageId(u"com.app.b", 0x7f) .addSimple(u"@com.app.b:id/foo") .build(); ResourceTable finalTable; TableMerger merger(mContext.get(), &finalTable, TableMergerOptions{}); io::FileCollection collection; ASSERT_TRUE(merger.merge({}, tableA.get())); ASSERT_TRUE(merger.mergeAndMangle({}, u"com.app.b", tableB.get(), &collection)); EXPECT_TRUE(merger.getMergedPackages().count(u"com.app.b") != 0); // Entries from com.app.a should not be mangled. AAPT_EXPECT_TRUE(finalTable.findResource(test::parseNameOrDie(u"@com.app.a:id/foo"))); AAPT_EXPECT_TRUE(finalTable.findResource(test::parseNameOrDie(u"@com.app.a:id/bar"))); AAPT_EXPECT_TRUE(finalTable.findResource(test::parseNameOrDie(u"@com.app.a:styleable/view"))); // The unmangled name should not be present. AAPT_EXPECT_FALSE(finalTable.findResource(test::parseNameOrDie(u"@com.app.b:id/foo"))); // Look for the mangled name. AAPT_EXPECT_TRUE(finalTable.findResource(test::parseNameOrDie(u"@com.app.a:id/com.app.b$foo"))); }
void DesignDocumentView::fromText(const QString &text) { QScopedPointer<Model> inputModel(Model::create("QtQuick.Rectangle", 1, 0, model())); inputModel->setFileUrl(model()->fileUrl()); QPlainTextEdit textEdit; QString imports; foreach (const Import &import, model()->imports()) imports += QStringLiteral("import ") + import.toString(true) + QLatin1Char(';') + QLatin1Char('\n'); textEdit.setPlainText(imports + text); NotIndentingTextEditModifier modifier(&textEdit); QScopedPointer<RewriterView> rewriterView(new RewriterView(RewriterView::Amend, nullptr)); rewriterView->setCheckSemanticErrors(false); rewriterView->setTextModifier(&modifier); inputModel->setRewriterView(rewriterView.data()); rewriterView->restoreAuxiliaryData(); if (rewriterView->errors().isEmpty() && rewriterView->rootModelNode().isValid()) { ModelMerger merger(this); try { merger.replaceModel(rewriterView->rootModelNode()); } catch(Exception &/*e*/) { /* e.showException(); Do not show any error if the clipboard contains invalid QML */ } } }
/** * ByteRangeIndex::parindexread_merge: used by MPI open parallel * index read (both read_and_merge and split_and_merge cases) to take * a set of "procs" index streams in index_streams in memory and * merge them into one single index stream (the result is saved in * the "index_stream" pointer... this is all in memory, no threads or * I/O used. * * @param path loaded into Index object (XXX: is it used?) * @param index_streams array of input stream buffers to merge * @param index_sizes array of sizes of the buffers * @param procs the number of streams we are merging * @param index_stream buffer with resulting stream placed here * @return the number of bytes in the output stream */ int ByteRangeIndex::parindexread_merge(const char * /* path */, char *index_streams, int *index_sizes, int procs, void **index_stream) { int count; size_t size; ByteRangeIndex merger(NULL); /* temporary obj use for collection */ // Merge all of the indices that were passed in for(count=0; count<procs; count++) { char *istream; if(count>0) { int index_inc=index_sizes[count-1]; mlog(INT_DCOMMON, "Incrementing the index by %d",index_inc); index_streams+=index_inc; } ByteRangeIndex tmp(NULL); istream=index_streams; /* * XXXCDC: old code, ignores return value */ tmp.global_from_stream(istream); merger.merge_idx(merger.idx, merger.chunk_map, &merger.eof_tracker, &merger.backing_bytes, tmp.idx, tmp.chunk_map); } // Convert temporary merger Index object into a stream and return that merger.global_to_stream(index_stream, &size); /* XXX: ignored retval */ mlog(INT_DCOMMON, "Inside parindexread merge stream size %lu", (unsigned long)size); return (int)size; }
int main() { #if STXXL_PARALLEL_MULTIWAY_MERGE LOG1 << "STXXL_PARALLEL_MULTIWAY_MERGE"; #endif // special parameter type using InputType = stxxl::stream::from_sorted_sequences<value_type>; using CreateRunsAlg = stxxl::stream::runs_creator< InputType, Cmp, 4096, foxxll::random_cyclic>; using SortedRunsType = CreateRunsAlg::sorted_runs_type; unsigned input_size = (10 * megabyte / sizeof(value_type)); Cmp c; CreateRunsAlg SortedRuns(c, 10 * megabyte); value_type checksum_before(0); std::mt19937_64 randgen; std::uniform_int_distribution<unsigned> distr_value; for (unsigned cnt = input_size; cnt > 0; ) { std::uniform_int_distribution<unsigned> distr_size(1, cnt); unsigned run_size = distr_size(randgen); // random run length cnt -= run_size; LOG1 << "current run size: " << run_size; std::vector<unsigned> tmp(run_size); // create temp storage for current run // fill with random numbers std::generate(tmp.begin(), tmp.end(), std::bind(distr_value, std::ref(randgen)) _STXXL_FORCE_SEQUENTIAL); std::sort(tmp.begin(), tmp.end(), c); // sort for (unsigned j = 0; j < run_size; ++j) { checksum_before += tmp[j]; SortedRuns.push(tmp[j]); // push sorted values to the current run } SortedRuns.finish(); // finish current run } SortedRunsType Runs = SortedRuns.result(); // get sorted_runs data structure die_unless(check_sorted_runs(Runs, Cmp())); // merge the runs stxxl::stream::runs_merger<SortedRunsType, Cmp> merger(Runs, Cmp(), 10 * megabyte); stxxl::vector<value_type, 4, stxxl::lru_pager<8> > array; LOG1 << input_size << " " << Runs->elements; LOG1 << "checksum before: " << checksum_before; value_type checksum_after(0); for (unsigned i = 0; i < input_size; ++i) { checksum_after += *merger; array.push_back(*merger); ++merger; } LOG1 << "checksum after: " << checksum_after; die_unless(stxxl::is_sorted(array.cbegin(), array.cend(), Cmp())); die_unless(checksum_before == checksum_after); die_unless(merger.empty()); return 0; }
bool MergeInfillLines::mergeInfillLines(double speed, unsigned int& path_idx) { //Check for lots of small moves and combine them into one large line Point prev_middle; Point last_middle; int64_t line_width; MergeInfillLines merger(gcode, paths, travelConfig, nozzle_size); if (merger.isConvertible(path_idx, prev_middle, last_middle, line_width, false)) { // path_idx + 3 is the index of the second extrusion move to be converted in combination with the first { GCodePath& last_path = paths[path_idx + 3]; gcode.writeMove(prev_middle, travelConfig.getSpeed(), 0); writeCompensatedMove(last_middle, speed, last_path, line_width); } path_idx += 2; for (; merger.isConvertible(path_idx, prev_middle, last_middle, line_width, true); path_idx += 2) { GCodePath& last_path = paths[path_idx + 3]; writeCompensatedMove(last_middle, speed, last_path, line_width); } path_idx = path_idx + 1; // means that the next path considered is the travel path after the converted extrusion path corresponding to the updated path_idx return true; } return false; };
TEST_F(TableMergerTest, MergeFileReferences) { std::unique_ptr<ResourceTable> tableA = test::ResourceTableBuilder() .setPackageId(u"com.app.a", 0x7f) .addFileReference(u"@com.app.a:xml/file", u"res/xml/file.xml") .build(); std::unique_ptr<ResourceTable> tableB = test::ResourceTableBuilder() .setPackageId(u"com.app.b", 0x7f) .addFileReference(u"@com.app.b:xml/file", u"res/xml/file.xml") .build(); ResourceTable finalTable; TableMerger merger(mContext.get(), &finalTable, TableMergerOptions{}); io::FileCollection collection; collection.insertFile("res/xml/file.xml"); ASSERT_TRUE(merger.merge({}, tableA.get())); ASSERT_TRUE(merger.mergeAndMangle({}, u"com.app.b", tableB.get(), &collection)); FileReference* f = test::getValue<FileReference>(&finalTable, u"@com.app.a:xml/file"); ASSERT_NE(f, nullptr); EXPECT_EQ(std::u16string(u"res/xml/file.xml"), *f->path); f = test::getValue<FileReference>(&finalTable, u"@com.app.a:xml/com.app.b$file"); ASSERT_NE(f, nullptr); EXPECT_EQ(std::u16string(u"res/xml/com.app.b$file.xml"), *f->path); }
// Serilize Constructor MessageMergeProcess() : Message( PROTOCOL_VERSION , 171 , 0 ) { task_id( "" ); uri_list( ); merger( "" ); }
static void NO_INLINE execute(Map ** source_maps, size_t num_maps, Map *& result_map, Merger && merger, ThreadPool &) { std::vector<typename Map::iterator> iterators(num_maps); for (size_t i = 1; i < num_maps; ++i) iterators[i] = source_maps[i]->begin(); result_map = source_maps[0]; while (true) { bool finish = true; for (size_t i = 1; i < num_maps; ++i) { if (iterators[i] == source_maps[i]->end()) continue; finish = false; merger((*result_map)[iterators[i]->first], iterators[i]->second); ++iterators[i]; } if (finish) break; } }
TEST_F(MergeTests, merge_with_different_layout_2) { auto main = io::Loader::shortcuts::load("test/merge1_main.tbl"); auto delta = io::Loader::shortcuts::load("test/merge1_delta.tbl"); //, io::Loader::params().set_modifiable(true)); auto correct_result = io::Loader::shortcuts::load("test/merge1_result.tbl"); hyrise::storage::atable_ptr_t dest = io::Loader::shortcuts::load("test/merge1_newlayout_2.tbl", io::Loader::params().setModifiableMutableVerticalTable(true)); ASSERT_EQ(3u, main->partitionCount()); ASSERT_EQ(2u, dest->partitionCount()); std::vector<hyrise::storage::c_atable_ptr_t> tables; tables.push_back(main); tables.push_back(delta); TableMerger merger(new DefaultMergeStrategy(), new SequentialHeapMerger()); const auto& result = merger.mergeToTable(dest, tables); ASSERT_TRUE(result[0]->contentEquals(correct_result)); ASSERT_TRUE(result[0]->dictionaryAt(0)->size() == 7); ASSERT_TRUE(result[0]->dictionaryAt(1)->size() == 7); ASSERT_TRUE(result[0]->dictionaryAt(2)->size() == 8); ASSERT_EQ(2u, result[0]->partitionCount()); ASSERT_EQ(dest, result[0]); }
QString DesignDocumentView::toText() const { QScopedPointer<Model> outputModel(Model::create("QtQuick.Rectangle", 1, 0, model())); outputModel->setFileUrl(model()->fileUrl()); QPlainTextEdit textEdit; QString imports; foreach (const Import &import, model()->imports()) { if (import.isFileImport()) imports += QStringLiteral("import ") + QStringLiteral("\"") + import.file() + QStringLiteral("\"")+ QStringLiteral(";\n"); else imports += QStringLiteral("import ") + import.url() + QStringLiteral(" ") + import.version() + QStringLiteral(";\n"); } textEdit.setPlainText(imports + QStringLiteral("Item {\n}\n")); NotIndentingTextEditModifier modifier(&textEdit); QScopedPointer<RewriterView> rewriterView(new RewriterView(RewriterView::Amend, nullptr)); rewriterView->setCheckSemanticErrors(false); rewriterView->setTextModifier(&modifier); outputModel->setRewriterView(rewriterView.data()); ModelMerger merger(rewriterView.data()); merger.replaceModel(rootModelNode()); ModelNode rewriterNode(rewriterView->rootModelNode()); rewriterView->writeAuxiliaryData(); return rewriterView->extractText({rewriterNode}).value(rewriterNode) + rewriterView->getRawAuxiliaryData(); //get the text of the root item without imports }
void RecordList::search_for_matching(int mote, int reboot) { matching_header->clear(); matching_records->clear(); matching_timesync_data->clear(); sdc::TimeSyncMerger merger(mote, reboot); typedef std::set<sdc::RecordID> Set; const Set& matching = merger.recordID_of_pairs(); int size = 0; for (Set::const_iterator i=matching.begin(); i!=matching.end(); ++i) { copy_matching_header(*i); copy_matching_line(*i); ++size; } matching_timesync_data->resize(size); sdc::RecordID rec_id(mote, reboot); TimeSyncData* data = matching_timesync_data->data(); sdc::TimeSyncCalc fill_timesync_data(merger, rec_id, data, size); Q_ASSERT(matching_records->size()==matching_timesync_data->size()); qDebug() << "Set size: " << matching.size(); dump_matching_data(); }
void merger_sort(ItemType array[], ItemType tmp_array[], int left_pos, int right_pos){ int center = (left_pos + right_pos) / 2; if(left_pos < right_pos){ merger_sort(array, tmp_array, left_pos, center); merger_sort(array, tmp_array, center + 1, right_pos); merger(array, tmp_array, left_pos, center + 1, right_pos); } }
void GateVisitor::mergeAssigns() { GateMergeAssignsGraphVisitor merger(&m_graph); for (V3GraphVertex* itp = m_graph.verticesBeginp(); itp; itp=itp->verticesNextp()) { if (GateVarVertex* vvertexp = dynamic_cast<GateVarVertex*>(itp)) { merger.mergeAssignsTree(vvertexp); } } m_statAssignMerged += merger.numMergedAssigns(); }
int main(){ int i; for (i = 0; i < 6; i++){ if (merger(cases[i].n1, cases[i].n2, cases[i].r) == cases[i].out) printf("PASS\n"); else printf("FAIL\n"); } getchar(); }
/** * calls sorter for subArrays of current subArray of input array and merge sorted subArrays * @param array - array with data * @param p - start of subArray * @param r - end of subArray */ void sorter(int *array, const int p, const int r) { int q = (p+r)/2; if(p < q) { sorter(array, p, q); } if(q+1 < r) { sorter(array, q+1, r); } merger(array, p ,r); }
void MergeThread::run() { // First time through the while loop we do the merge that we were started with OneMergePtr merge(this->startMerge); ConcurrentMergeSchedulerPtr merger(_merger); LuceneException finally; try { merger->message(L" merge thread: start"); IndexWriterPtr writer(_writer); while (true) { setRunningMerge(merge); merger->doMerge(merge); // Subsequent times through the loop we do any new merge that writer says is necessary merge = writer->getNextMerge(); if (merge) { writer->mergeInit(merge); merger->message(L" merge thread: do another merge " + merge->segString(merger->dir)); } else break; } merger->message(L" merge thread: done"); } catch (MergeAbortedException&) { // Ignore the exception if it was due to abort } catch (LuceneException& e) { if (!merger->suppressExceptions) { // suppressExceptions is normally only set during testing. merger->anyExceptions = true; merger->handleMergeException(e); } else finally = e; } { SyncLock syncLock(merger); merger->notifyAll(); bool removed = merger->mergeThreads.remove(shared_from_this()); BOOST_ASSERT(removed); } finally.throwException(); }
bool merge(int n = 2, int limit = 0) { TFileMerger merger(kFALSE,kFALSE); // hadd style merger.OutputFile(TString::Format("merged%d.root",n)); if (limit > 0) { merger.SetMaxOpenedFiles(limit); } for(UInt_t i = 0; i < (UInt_t)n; ++i ) { if (! merger.AddFile(TString::Format("input%d.root",i))) { return false; } } return merger.Merge(); }
static void NO_INLINE execute(Map ** source_maps, size_t num_maps, Map *& result_map, Merger && merger, ThreadPool &) { for (size_t i = 1; i < num_maps; ++i) { auto begin = source_maps[i]->begin(); auto end = source_maps[i]->end(); for (auto it = begin; it != end; ++it) merger((*source_maps[0])[it->first], it->second); } result_map = source_maps[0]; }
TEST_F(TableMergerTest, MergeFileOverlay) { ResourceTable finalTable; TableMergerOptions tableMergerOptions; tableMergerOptions.autoAddOverlay = false; TableMerger merger(mContext.get(), &finalTable, tableMergerOptions); ResourceFile fileDesc; fileDesc.name = test::parseNameOrDie(u"@xml/foo"); test::TestFile fileA("path/to/fileA.xml.flat"); test::TestFile fileB("path/to/fileB.xml.flat"); ASSERT_TRUE(merger.mergeFile(fileDesc, &fileA)); ASSERT_TRUE(merger.mergeFileOverlay(fileDesc, &fileB)); }
void Aggregate::worker() try { //fancyprint("STARTER",NOTIFY); starter(); //fancyprint("SPLITTER",NOTIFY); splitter(); //fancyprint("JOIN ALL",NOTIFY); joinChunks(); //fancyprint("MERGER",NOTIFY); merger(); } catch ( ex::Error e ) { m_failed = true; //fancyprint(e.what(),ERROR); std::cout << e.what() << std::endl; }
void ClusterPruner::prune(const std::list<std::shared_ptr<Cluster> > &clusters) { _clusters.clear(); // Calculate the mean size of clusters. //MeanSequentialEstimator mean_estimator; std::set<int> taken_ids; for (const auto& c : clusters) { // mean_estimator.Add(c->size()); taken_ids.insert(c->ClusterID()); } // Only split clusters whose size is above the mean cutoff. //size_t mean_cutoff = static_cast<int>(mean_estimator.GetMean()); std::cout << "Split threshold for post prunning is " << _split_size << std::endl; std::shared_ptr<IDGenerator> id_pool(new IDGenerator(taken_ids)); std::list<std::vector<double>> entropies; ClusterSplitter splitter(_recalibrator, id_pool); for (const auto& c : clusters) { if (c->size() <= _split_size) { _clusters.push_back(c); entropies.push_back(Entropy(c->bpFrequency())); } else { vector<shared_ptr<Cluster>> splited_clusters = splitter.split(c); for (const auto& s : splited_clusters) { if (s.get()) { _clusters.push_back(s); entropies.push_back(Entropy(s->bpFrequency())); } } } } // Merge cluster who has the same centers. MergeByCenters merger(_recalibrator); merger.merge(_clusters, entropies); _clusters = merger.clusters(); // Filter out cluster // 3.Remove those clusters whose size is below the cutoff. list<shared_ptr<Cluster>> filtered_clusters; for (const auto& c : _clusters) { if (c->size() >= this->_cutoff) { filtered_clusters.push_back(c); } } //_clusters = std::move(filtered_clusters); std::swap(_clusters, filtered_clusters); }
// Constructors must take custom types to ensure promotion. explicit SymbolicRegisterState(const SymbolicValuePtr &proto, const RegisterDictionary *rd): RegisterStateGeneric(proto, rd) { STRACE << "SymbolicRegisterState::SymbolicRegisterState(proto, rd)" << LEND; merger(CERTMerger::instance()); // For some reason the DF register still needs to be initialized or our code doesn't work // properly. This might be a bug in ROSE, or something subtle in our code. For now, // simply initializing the DF register to a variable (which is what should happen by // default without this code) somehow fixes the problem. Semantics2::DispatcherX86Ptr dispatcher = RoseDispatcherX86::instance(); dispatcher->set_register_dictionary(regdict); std::vector<RegisterDescriptor> regs; regs.push_back(*(regdict->lookup("df"))); initialize_nonoverlapping(dispatcher->get_usual_registers(), false); }
TEST_F(MergeTests, simple_logarithmic_merger_test) { auto m = io::Loader::shortcuts::loadMainDelta("test/merge1_main.tbl", "test/merge1_delta.tbl"); std::vector<hyrise::storage::c_atable_ptr_t > tables; tables.push_back(m->getMainTable()); tables.push_back(m->getDeltaTable()); TableMerger merger(new DefaultMergeStrategy(), new SequentialHeapMerger()); const auto& result = merger.merge(tables); const auto& correct_result = io::Loader::shortcuts::load("test/merge1_result.tbl"); ASSERT_TRUE(result[0]->contentEquals(correct_result)); ASSERT_TRUE(result[0]->dictionaryAt(0)->size() == 7); ASSERT_TRUE(result[0]->dictionaryAt(1)->size() == 7); ASSERT_TRUE(result[0]->dictionaryAt(2)->size() == 8); }
Module::Module( const char *name, WCValSList<String> & enabled, WCValSList<String> & disabled ) //------------------------------------------------------------ { WCPtrOrderedVector<ComponentFile> components; FileInfo finf( name ); int i; MsgRetType ret; _dataFile = new ElfFile( name, false ); DwarfFileMerger merger( name, enabled, disabled ); if( !merger.upToDate() ) { if( !finf.exists() ) { merger.doMerge(); } else { if( enabled.entries() != 0 ) { ret = WMessageDialog::messagef( topWindow, MsgQuestion, MsgYesNo, "Source Browser", "Database %s is not consistent with module files.\n" "Merge the database now?", name ); if( ret == MsgRetYes ) { merger.doMerge(); } } } } _dataFile->initSections(); _dataFile->getEnabledComponents( &components ); for( i = 0; i < components.entries(); i += 1 ) { _enabledFiles.add( new WFileName( components[i]->name ) ); } components.clear(); _dataFile->getDisabledComponents( &components ); for( i = 0; i < components.entries(); i += 1 ) { _disabledFiles.add( new WFileName( components[i]->name ) ); } checkSourceTime(); _dbgInfo = DRDbgInit( this, _dataFile->getDRSizes(), false ); DRSetDebug( _dbgInfo ); }
TEST_F(TableMergerTest, MergeAddResourceFromOverlay) { std::unique_ptr<ResourceTable> tableA = test::ResourceTableBuilder() .setPackageId(u"", 0x7f) .setSymbolState(u"@bool/foo", {}, SymbolState::kUndefined) .build(); std::unique_ptr<ResourceTable> tableB = test::ResourceTableBuilder() .setPackageId(u"", 0x7f) .addValue(u"@bool/foo", ResourceUtils::tryParseBool(u"true")) .build(); ResourceTable finalTable; TableMerger merger(mContext.get(), &finalTable, TableMergerOptions{}); ASSERT_TRUE(merger.merge({}, tableA.get())); ASSERT_TRUE(merger.mergeOverlay({}, tableB.get())); }
int main() { #if STXXL_PARALLEL_MULTIWAY_MERGE STXXL_MSG("STXXL_PARALLEL_MULTIWAY_MERGE"); #endif // special parameter type typedef stxxl::stream::use_push<value_type> InputType; typedef stxxl::stream::runs_creator<InputType, Cmp, 4096, stxxl::RC> CreateRunsAlg; typedef CreateRunsAlg::sorted_runs_type SortedRunsType; unsigned input_size = (50 * megabyte / sizeof(value_type)); Cmp c; CreateRunsAlg SortedRuns(c, 1 * megabyte / 64); value_type checksum_before(0); stxxl::random_number32 rnd; for (unsigned cnt = input_size; cnt > 0; --cnt) { const value_type element = rnd(); checksum_before += element; SortedRuns.push(element); // push into the sorter } SortedRunsType& Runs = SortedRuns.result(); // get sorted_runs data structure assert(stxxl::stream::check_sorted_runs(Runs, Cmp())); // merge the runs stxxl::stream::runs_merger<SortedRunsType, Cmp> merger(Runs, Cmp(), 1 * megabyte); stxxl::vector<value_type, 4, stxxl::lru_pager<8>, block_size, STXXL_DEFAULT_ALLOC_STRATEGY> array; STXXL_MSG(input_size << " " << Runs->elements); STXXL_MSG("checksum before: " << checksum_before); value_type checksum_after(0); for (unsigned i = 0; i < input_size; ++i) { checksum_after += *merger; array.push_back(*merger); ++merger; } STXXL_MSG("checksum after: " << checksum_after); assert(stxxl::is_sorted(array.begin(), array.end(), Cmp())); assert(checksum_before == checksum_after); assert(merger.empty()); return 0; }
/** * calls sorter for subArrays of current subArray of input array and merge sorted subArrays * @param array - array with data * @param p - start of subArray * @param r - end of subArray */ void sorter(int *array, const int p, const int r, const int lowLimit) { int q = (p+r)/2; if ((r - p) < lowLimit) { sorter (array, p, r); } else { if(p < q) { #pragma omp task firstprivate(array, lowLimit, p, q) sorter(array, p, q); } if(q+1 < r) { #pragma omp task firstprivate(array, lowLimit, r, q) sorter(array, q+1, r); } #pragma omp taskwait merger(array, p ,r); } }
TEST_F(TableMergerTest, FailToMergeNewResourceWithoutAutoAddOverlay) { std::unique_ptr<ResourceTable> tableA = test::ResourceTableBuilder() .setPackageId(u"", 0x7f) .build(); std::unique_ptr<ResourceTable> tableB = test::ResourceTableBuilder() .setPackageId(u"", 0x7f) .addValue(u"@bool/foo", ResourceUtils::tryParseBool(u"true")) .build(); ResourceTable finalTable; TableMergerOptions options; options.autoAddOverlay = false; TableMerger merger(mContext.get(), &finalTable, options); ASSERT_TRUE(merger.merge({}, tableA.get())); ASSERT_FALSE(merger.mergeOverlay({}, tableB.get())); }