int main(int argc, char **argv) { t_info_array board; t_pos_square result; char *file; char *line_file; char *map; board.nb_line = 0; board.nb_col = 0; board.array = NULL; if (argc == 1) my_putstr("Usage: ./bsq [FILE] "); file = fd(argv); line_file = line(file); board.nb_line = my_getnbr(line_file); free(line_file); map = maps(file); board.nb_col = my_col_nbr(map); board.array = get_array(map, board.nb_line, board.nb_col, 0); algo(&board, &result, 0, 0); modif_map(&board, &result); my_putchar_et(&board); free(file); return (0); }
// Create a maps file that is extremely large. TEST(MapsTest, large_file) { TemporaryFile tf; ASSERT_TRUE(tf.fd != -1); std::string file_data; uint64_t start = 0x700000; for (size_t i = 0; i < 5000; i++) { file_data += android::base::StringPrintf("%" PRIx64 "-%" PRIx64 " r-xp 1000 00:0 0 /fake%zu.so\n", start + i * 4096, start + (i + 1) * 4096, i); } ASSERT_TRUE(android::base::WriteStringToFile(file_data, tf.path, 0660, getuid(), getgid())); FileMaps maps(tf.path); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(5000U, maps.Total()); for (size_t i = 0; i < 5000; i++) { MapInfo* info = maps.Get(i); ASSERT_EQ(start + i * 4096, info->start) << "Failed at map " + std::to_string(i); ASSERT_EQ(start + (i + 1) * 4096, info->end) << "Failed at map " + std::to_string(i); std::string name = "/fake" + std::to_string(i) + ".so"; ASSERT_EQ(name, info->name) << "Failed at map " + std::to_string(i); } }
TEST(MapsTest, file_no_map_name) { TemporaryFile tf; ASSERT_TRUE(tf.fd != -1); ASSERT_TRUE( android::base::WriteStringToFile("7b29b000-7b29e000 r-xp a0000000 00:00 0\n" "7b2b0000-7b2e0000 r-xp b0000000 00:00 0 /fake2.so\n" "7b2e0000-7b2f0000 r-xp c0000000 00:00 0 \n", tf.path, 0660, getuid(), getgid())); FileMaps maps(tf.path); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(3U, maps.Total()); auto it = maps.begin(); ASSERT_EQ(0x7b29b000U, it->start); ASSERT_EQ(0x7b29e000U, it->end); ASSERT_EQ(0xa0000000U, it->offset); ASSERT_EQ(PROT_READ | PROT_EXEC, it->flags); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(0x7b2b0000U, it->start); ASSERT_EQ(0x7b2e0000U, it->end); ASSERT_EQ(0xb0000000U, it->offset); ASSERT_EQ(PROT_READ | PROT_EXEC, it->flags); ASSERT_EQ("/fake2.so", it->name); ++it; ASSERT_EQ(0x7b2e0000U, it->start); ASSERT_EQ(0x7b2f0000U, it->end); ASSERT_EQ(0xc0000000U, it->offset); ASSERT_EQ(PROT_READ | PROT_EXEC, it->flags); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(it, maps.end()); }
TEST(MapsTest, parse_name) { BufferMaps maps( "7b29b000-7b29e000 rw-p 00000000 00:00 0\n" "7b29e000-7b29f000 rw-p 00000000 00:00 0 /system/lib/fake.so\n" "7b29f000-7b2a0000 rw-p 00000000 00:00 0"); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(3U, maps.Total()); auto it = maps.begin(); ASSERT_EQ("", it->name); ASSERT_EQ(0x7b29b000U, it->start); ASSERT_EQ(0x7b29e000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ(PROT_READ | PROT_WRITE, it->flags); ++it; ASSERT_EQ("/system/lib/fake.so", it->name); ASSERT_EQ(0x7b29e000U, it->start); ASSERT_EQ(0x7b29f000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ(PROT_READ | PROT_WRITE, it->flags); ++it; ASSERT_EQ("", it->name); ASSERT_EQ(0x7b29f000U, it->start); ASSERT_EQ(0x7b2a0000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ(PROT_READ | PROT_WRITE, it->flags); ++it; ASSERT_EQ(it, maps.end()); }
void init_maps() { if (g_stack_max != 0){ return; } maps(); }
TEST(MapsTest, find) { BufferMaps maps( "1000-2000 r--p 00000010 00:00 0 /system/lib/fake1.so\n" "3000-4000 -w-p 00000020 00:00 0 /system/lib/fake2.so\n" "6000-8000 --xp 00000030 00:00 0 /system/lib/fake3.so\n" "a000-b000 rw-p 00000040 00:00 0 /system/lib/fake4.so\n" "e000-f000 rwxp 00000050 00:00 0 /system/lib/fake5.so\n"); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(5U, maps.Total()); ASSERT_TRUE(maps.Find(0x500) == nullptr); ASSERT_TRUE(maps.Find(0x2000) == nullptr); ASSERT_TRUE(maps.Find(0x5010) == nullptr); ASSERT_TRUE(maps.Find(0x9a00) == nullptr); ASSERT_TRUE(maps.Find(0xf000) == nullptr); ASSERT_TRUE(maps.Find(0xf010) == nullptr); MapInfo* info = maps.Find(0x1000); ASSERT_TRUE(info != nullptr); ASSERT_EQ(0x1000U, info->start); ASSERT_EQ(0x2000U, info->end); ASSERT_EQ(0x10U, info->offset); ASSERT_EQ(PROT_READ, info->flags); ASSERT_EQ("/system/lib/fake1.so", info->name); info = maps.Find(0x3020); ASSERT_TRUE(info != nullptr); ASSERT_EQ(0x3000U, info->start); ASSERT_EQ(0x4000U, info->end); ASSERT_EQ(0x20U, info->offset); ASSERT_EQ(PROT_WRITE, info->flags); ASSERT_EQ("/system/lib/fake2.so", info->name); info = maps.Find(0x6020); ASSERT_TRUE(info != nullptr); ASSERT_EQ(0x6000U, info->start); ASSERT_EQ(0x8000U, info->end); ASSERT_EQ(0x30U, info->offset); ASSERT_EQ(PROT_EXEC, info->flags); ASSERT_EQ("/system/lib/fake3.so", info->name); info = maps.Find(0xafff); ASSERT_TRUE(info != nullptr); ASSERT_EQ(0xa000U, info->start); ASSERT_EQ(0xb000U, info->end); ASSERT_EQ(0x40U, info->offset); ASSERT_EQ(PROT_READ | PROT_WRITE, info->flags); ASSERT_EQ("/system/lib/fake4.so", info->name); info = maps.Find(0xe500); ASSERT_TRUE(info != nullptr); ASSERT_EQ(0xe000U, info->start); ASSERT_EQ(0xf000U, info->end); ASSERT_EQ(0x50U, info->offset); ASSERT_EQ(PROT_READ | PROT_WRITE | PROT_EXEC, info->flags); ASSERT_EQ("/system/lib/fake5.so", info->name); }
static void VerifyLine(std::string line, MapInfo* info) { BufferMaps maps(line.c_str()); if (info == nullptr) { ASSERT_FALSE(maps.Parse()) << "Failed on: " + line; } else { ASSERT_TRUE(maps.Parse()) << "Failed on: " + line; MapInfo* element = maps.Get(0); ASSERT_TRUE(element != nullptr) << "Failed on: " + line; *info = *element; } }
TEST(MapsTest, file_should_fail) { TemporaryFile tf; ASSERT_TRUE(tf.fd != -1); ASSERT_TRUE(android::base::WriteStringToFile( "7ffff7dda000-7ffff7dfd7ffff7ff3000-7ffff7ff4000 ---p 0000f000 fc:02 44171565\n", tf.path, 0660, getuid(), getgid())); FileMaps maps(tf.path); ASSERT_FALSE(maps.Parse()); }
void MapDatabase::LoadMap(const wxInt32 index, Map &map) { wxASSERT(sOffsets.size() > (wxUint32) index); wxFileInputStream x(FilePath::Data(DataFileMaps)); wxDataInputStream maps(x); //seek to this position in the game file x.SeekI(sOffsets[index]); map.load(maps); }
void YOGLoginScreen::runLobby() { Glob2TabScreen screen(true); YOGClientLobbyScreen lobby(&screen, client); YOGClientOptionsScreen options(&screen, client); YOGClientMapDownloadScreen maps(&screen, client); int rc = screen.execute(globalContainer->gfx, 40); if(rc == YOGClientLobbyScreen::ConnectionLost) endExecute(ConnectionLost); else if(rc == -1) endExecute(-1); else endExecute(LoggedIn); }
void HlpFlightPlannerApp::setLayerSet( bool updateExtent ) { QList<QgsMapCanvasLayer> layers; layers.append( QgsMapCanvasLayer( mFlightlineLayer, true ) ); layers.append( QgsMapCanvasLayer( mWaypointLayer, true ) ); layers.append( QgsMapCanvasLayer( mProfileLayer, true ) ); // reverse order of legend QListIterator<QgsMapCanvasLayer> maps( HlpMapRegistry::instance()->layers() ); maps.toBack(); while ( maps.hasPrevious() ) layers.append( maps.previous() ); mMapCanvas->setLayerSet( layers ); if ( updateExtent ) mMapCanvas->zoomToFullExtent(); }
void MapLoader::load(unsigned int width, const QString &theme, unsigned int height, float opacity) { // find the maps available QValueList<uint> sizes; QStringList files = maps(theme); for (uint i=0; i<files.count(); ++i) { QString f = files[i]; int pos = f.findRev("/"); if (pos >= 0) f = f.mid(pos+1); pos = f.findRev("."); if (pos >= 0) f = f.left(pos); sizes.append(f.toInt()); } qHeapSort(sizes); // find the closest (bigger) size uint size=0; for (uint i=0; i<sizes.count(); ++i) if (sizes[i] >= width) { size = sizes[i]; break; } QImage image; if (size == 0) { image = QImage(locate("data", "kworldclock/maps/depths/800.jpg")); size = 800; } else image = QImage(locate("data", QString("kworldclock/maps/%1/%2.jpg").arg(theme).arg(size))); if (height == 0) height = width/2; if ((image.width() != (int)width) || (image.height() != (int)height)) image = image.smoothScale(width, height); // convert to light map _light.convertFromImage(image); // calculate dark map _dark.convertFromImage(KImageEffect::blend(Qt::black, image, opacity)); }
cv::Mat HogIntegralImageComputer::create_hog_maps(const cv::Mat &mags, const cv::Mat &qangles) { cv::Mat maps(mags.rows, mags.cols, CV_32FC(9), cv::Scalar(0.0f)); for (int i = 0; i < mags.rows; i++) { for (int j = 0; j < mags.cols; j++) { cv::Vec2b ang = qangles.at<cv::Vec2b>(i,j); cv::Vec2f mag = mags.at<cv::Vec2f>(i,j); cv::VecHogf v(0.0f); v[ang[0]] += mag[0]; v[ang[1]] += mag[1]; v[CC_HOG_CHANS] = mag[0] + mag[1]; // gradient magnitude... maps.at<cv::VecHogf>(i,j) = v; } } return maps; }
bool isIsomorphic(string s, string t) { int len = s.size(); vector<int> maps(300, -1); vector<int> mapt(300, -1); for (int i = 0; i < len; ++i) { int idt = t[i]; int ids = s[i]; if ((maps[ids] == -1 || maps[ids] == idt) && (mapt[idt] == -1 || mapt[idt] == ids)) { maps[ids] = idt; mapt[idt] = ids; } else { return false; } } return true; }
TEST(MapsTest, parse_permissions) { BufferMaps maps( "1000-2000 ---s 00000000 00:00 0\n" "2000-3000 r--s 00000000 00:00 0\n" "3000-4000 -w-s 00000000 00:00 0\n" "4000-5000 --xp 00000000 00:00 0\n" "5000-6000 rwxp 00000000 00:00 0\n"); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(5U, maps.Total()); auto it = maps.begin(); ASSERT_EQ(PROT_NONE, it->flags); ASSERT_EQ(0x1000U, it->start); ASSERT_EQ(0x2000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(PROT_READ, it->flags); ASSERT_EQ(0x2000U, it->start); ASSERT_EQ(0x3000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(PROT_WRITE, it->flags); ASSERT_EQ(0x3000U, it->start); ASSERT_EQ(0x4000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(PROT_EXEC, it->flags); ASSERT_EQ(0x4000U, it->start); ASSERT_EQ(0x5000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(PROT_READ | PROT_WRITE | PROT_EXEC, it->flags); ASSERT_EQ(0x5000U, it->start); ASSERT_EQ(0x6000U, it->end); ASSERT_EQ(0U, it->offset); ASSERT_EQ("", it->name); ++it; ASSERT_EQ(it, maps.end()); }
TMXTiledMap* MapLayer::initMapWithFile(const char *name) { TMXTiledMap *tileMap; std::string path; std::string maps("Maps"); auto iterFind = std::find(searchPaths.begin(), searchPaths.end(), maps); if (iterFind == searchPaths.end()) CCAssert(iterFind != searchPaths.end(), "Maps in vector searchPaths not for"); else { path = maps + '/' + name; tileMap = TMXTiledMap::create(path); CCAssert(tileMap != nullptr, "tileMap == NULL"); tileMap->setPosition(Vec2(0, 0)); this->addChild(tileMap); } global->tileMap = tileMap; return tileMap; }
void stats(std:: valarray<float> map, float &mean, float &sigma, float &kurt, float &skew) { float sum = map.sum(); int n = map.size(); mean = map.sum()/float(n); std:: valarray <float> maps(n); valarray<float> maps2(n),maps3(n),maps4(n); for(int i=0; i<n; i++) { maps2[i] = gsl_pow_2(map[i] - mean); maps3[i] = gsl_pow_3(map[i] - mean); maps4[i] = gsl_pow_4(map[i] - mean); } sum = maps2.sum(); sigma = sqrt(sum/(float(n)-1.)); sum = maps3.sum(); double mu3 = sum/(float(n)-1.); sum = maps4.sum(); double mu4 = sum/(float(n)-1.); kurt = mu4/gsl_pow_4(sigma) -3; skew = mu3/gsl_pow_3(sigma); }
void interpret(char* str) { vector<string> strs; string line(str); boost::split(strs,line,boost::is_any_of(" ")); if(strs[0]=="open") { if(boost::starts_with(strs[1],"http://") || boost::starts_with(strs[1],"www") || boost::ends_with(strs[1],".com")) urlopen(strs[1].c_str()); else fileopen((char*)strs[1].c_str()); }else if(strs[0]=="search") { string term(strs[1]); for(int i=2;i<strs.size();i++) { term+=string(" "); term+=string(strs[i]); } websearch(term.c_str()); }else if(strs[0]=="drive") { std::string s = string(str); std::string delimiter = " from "; std::string from,to; size_t pos = 0; std::string token; pos = s.find(delimiter); token = s.substr(0, pos); std::cout << token << std::endl; s.erase(0, pos + delimiter.length()); delimiter=" to "; while ((pos = s.find(delimiter)) != std::string::npos) { from = s.substr(0, pos); s.erase(0, pos + delimiter.length()); } to=s; maps(from.c_str(),to.c_str()); } }
TEST(MapsTest, device) { BufferMaps maps( "a000-e000 rw-p 00000000 00:00 0 /dev/\n" "f000-f100 rw-p 00000000 00:00 0 /dev/does_not_exist\n" "f100-f200 rw-p 00000000 00:00 0 /dev/ashmem/does_not_exist\n" "f200-f300 rw-p 00000000 00:00 0 /devsomething/does_not_exist\n"); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(4U, maps.Total()); auto it = maps.begin(); ASSERT_TRUE(it->flags & 0x8000); ASSERT_EQ("/dev/", it->name); ++it; ASSERT_TRUE(it->flags & 0x8000); ASSERT_EQ("/dev/does_not_exist", it->name); ++it; ASSERT_FALSE(it->flags & 0x8000); ASSERT_EQ("/dev/ashmem/does_not_exist", it->name); ++it; ASSERT_FALSE(it->flags & 0x8000); ASSERT_EQ("/devsomething/does_not_exist", it->name); }
TEST(MapsTest, parse_offset) { BufferMaps maps( "a000-e000 rw-p 00000000 00:00 0 /system/lib/fake.so\n" "e000-f000 rw-p 00a12345 00:00 0 /system/lib/fake.so\n"); ASSERT_TRUE(maps.Parse()); ASSERT_EQ(2U, maps.Total()); auto it = maps.begin(); ASSERT_EQ(0U, it->offset); ASSERT_EQ(0xa000U, it->start); ASSERT_EQ(0xe000U, it->end); ASSERT_EQ(PROT_READ | PROT_WRITE, it->flags); ASSERT_EQ("/system/lib/fake.so", it->name); ++it; ASSERT_EQ(0xa12345U, it->offset); ASSERT_EQ(0xe000U, it->start); ASSERT_EQ(0xf000U, it->end); ASSERT_EQ(PROT_READ | PROT_WRITE, it->flags); ASSERT_EQ("/system/lib/fake.so", it->name); ++it; ASSERT_EQ(maps.end(), it); }
bool extractPythonLibrary() { #if defined(HAVE_DLADDR) // Add the library into the path in case it has a .zip file appended Dl_info info; memset(&info, 0, sizeof(info)); int res = dladdr((void *)&extractPythonLibrary, &info); if (!res) { qWarning() << "Could not determine library path"; return false; } QString fname = QString::fromUtf8(info.dli_fname); qDebug() << "Got library name: " << fname; // On Android, dladdr() returns only the basename of the file, so we go // hunt for the full path in /proc/self/maps, where the shared library is // mapped (TODO: We could parse the address range and compare that, too) if (!fname.startsWith("/")) { QFile mapsf("/proc/self/maps"); if (mapsf.exists()) { mapsf.open(QIODevice::ReadOnly); QTextStream maps(&mapsf); QString line; while (!(line = maps.readLine()).isNull()) { QString filename = line.split(' ', QString::SkipEmptyParts).last(); if (filename.endsWith("/" + fname)) { fname = filename; qDebug() << "Resolved full path:" << fname; break; } } } } prependPythonPath(fname); #endif return true; }
void CObjectThread::run() { CTimer cleanup(10 * SECOND); CTimer maps(100); while(!this->shutting_down) { if(cleanup.Passed()) { this->rwl->lockForRead(); CObjectManager::MapType::iterator itr; for(itr = this->mang->container.begin(); itr != this->mang->container.end(); true) { CObjectManager::MapType::iterator current = itr; ++itr; CObject * object = current->second; if(object && object->type < OBJECT_EMPTY) { this->rwl->lockForWrite(); switch(object->type) { case VOID_PLAYER: { CPlayer * player = (CPlayer*)object; delete player; break; } case VOID_UNIT: { CUnit * unit = (CUnit*)object; delete unit; break; } case VOID_EMPTY: { break; } case VOID_BOT: { CBot * bot = (CBot*)object; delete bot; break; } } this->mang->container.erase(current); this->rwl->unlock(); } } this->rwl->unlock(); } if(maps.Passed()) { for(uint32 i = 0; i < MAX_MAPS; ++i) { this->map[i]->guids.clear(); } this->rwl->lockForRead(); CObjectManager::MapType::iterator itr; for(itr = this->mang->container.begin(); itr != this->mang->container.end(); true) { CObjectManager::MapType::iterator current = itr; ++itr; CObject * object = current->second; if(object && (object->map >= 0 && object->map < MAX_MAPS) && object->type > OBJECT_EMPTY) { this->map[object->map]->guids.push_back(object->guid); } } this->rwl->unlock(); init_genrand(GetTicks()); for(uint32 i = 0; i < MAX_MAPS; ++i) { CWorldMap * pmap = this->map[i]; for(uint32 i = 0; i < pmap->guids.size(); ++i) { if(pmap->guids.at(i).lo == 0) { uint32 count = 0; while(count < RETRIES) { ++count; GUID_LOW lo = uint32(genrand_int32()) % MAX_GUID; bool found = false; for(uint32 k = 0; k < pmap->guids.size(); ++k) { if(pmap->guids.at(k).lo == lo) { found = true; break; } } if(!found) { this->mang->FindHigh(pmap->guids.at(i).hi)->guid.lo = lo; pmap->guids.at(i).lo = lo; break; } } assert(count != RETRIES); } } pmap->UpdateMap(); } } } this->msleep(DEFSLEEP); }
int main(int argc, char* argv[]) { int opt; auto pss_sort = [](const Vma& a, const Vma& b) { uint64_t pss_a = a.usage.pss; uint64_t pss_b = b.usage.pss; return pss_a > pss_b; }; auto uss_sort = [](const Vma& a, const Vma& b) { uint64_t uss_a = a.usage.uss; uint64_t uss_b = b.usage.uss; return uss_a > uss_b; }; std::function<bool(const Vma& a, const Vma& b)> sort_func = nullptr; while ((opt = getopt(argc, argv, "himpuWw")) != -1) { switch (opt) { case 'h': hide_zeroes = true; break; case 'i': // TODO: libmeminfo doesn't support the flag to chose // between idle page tracking vs clear_refs. So for now, // this flag is unused and the library defaults to using // /proc/<pid>/clear_refs for finding the working set. use_pageidle = true; break; case 'm': // this is the default break; case 'p': sort_func = pss_sort; break; case 'u': sort_func = uss_sort; break; case 'W': reset_wss = true; break; case 'w': show_wss = true; break; case '?': usage(EXIT_SUCCESS); default: usage(EXIT_FAILURE); } } if (optind != (argc - 1)) { fprintf(stderr, "Need exactly one pid at the end\n"); usage(EXIT_FAILURE); } pid_t pid = atoi(argv[optind]); if (pid == 0) { std::cerr << "Invalid process id" << std::endl; exit(EXIT_FAILURE); } if (reset_wss) { if (!ProcMemInfo::ResetWorkingSet(pid)) { std::cerr << "Failed to reset working set of pid : " << pid << std::endl; exit(EXIT_FAILURE); } return 0; } ProcMemInfo proc(pid, show_wss); const MemUsage& proc_stats = proc.Usage(); std::vector<Vma> maps(proc.Maps()); if (sort_func != nullptr) { std::sort(maps.begin(), maps.end(), sort_func); } return show(proc_stats, maps); }
int main(int argc, char ** argv) { size_t n = atoi(argv[1]); size_t num_threads = atoi(argv[2]); size_t method = argc <= 3 ? 0 : atoi(argv[3]); std::cerr << std::fixed << std::setprecision(2); ThreadPool pool(num_threads); Source data(n); { Stopwatch watch; DB::ReadBufferFromFileDescriptor in1(STDIN_FILENO); DB::CompressedReadBuffer in2(in1); in2.readStrict(reinterpret_cast<char*>(&data[0]), sizeof(data[0]) * n); watch.stop(); std::cerr << std::fixed << std::setprecision(2) << "Vector. Size: " << n << ", elapsed: " << watch.elapsedSeconds() << " (" << n / watch.elapsedSeconds() << " elem/sec.)" << std::endl << std::endl; } if (!method || method == 1) { /** Вариант 1. * В разных потоках агрегируем независимо в разные хэш-таблицы. * Затем сливаем их вместе. */ std::vector<Map> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate1, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); for (size_t i = 1; i < num_threads; ++i) for (auto it = maps[i].begin(); it != maps[i].end(); ++it) maps[0][it->first] += it->second; watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << maps[0].size() << std::endl << std::endl; } if (!method || method == 12) { /** То же самое, но с оптимизацией для подряд идущих одинаковых значений. */ std::vector<Map> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate12, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); for (size_t i = 1; i < num_threads; ++i) for (auto it = maps[i].begin(); it != maps[i].end(); ++it) maps[0][it->first] += it->second; watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << maps[0].size() << std::endl << std::endl; } if (!method || method == 11) { /** Вариант 11. * То же, что вариант 1, но при мердже, изменён порядок циклов, * что потенциально может дать лучшую кэш-локальность. * * На практике, разницы нет. */ std::vector<Map> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate1, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); std::vector<Map::iterator> iterators(num_threads); for (size_t i = 1; i < num_threads; ++i) iterators[i] = maps[i].begin(); while (true) { bool finish = true; for (size_t i = 1; i < num_threads; ++i) { if (iterators[i] == maps[i].end()) continue; finish = false; maps[0][iterators[i]->first] += iterators[i]->second; ++iterators[i]; } if (finish) break; } watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << maps[0].size() << std::endl << std::endl; } if (!method || method == 2) { /** Вариант 2. * В разных потоках агрегируем независимо в разные two-level хэш-таблицы. * Затем сливаем их вместе, распараллелив по bucket-ам первого уровня. * При использовании хэш-таблиц больших размеров (10 млн. элементов и больше), * и большого количества потоков (8-32), слияние является узким местом, * и преимущество в производительности достигает 4 раз. */ std::vector<MapTwoLevel> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate2, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) pool.schedule(std::bind(merge2, &maps[0], num_threads, i)); pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << maps[0].size() << std::endl << std::endl; } if (!method || method == 22) { std::vector<MapTwoLevel> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate22, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); for (size_t i = 0; i < MapTwoLevel::NUM_BUCKETS; ++i) pool.schedule(std::bind(merge2, &maps[0], num_threads, i)); pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << maps[0].size() << std::endl << std::endl; } if (!method || method == 3) { /** Вариант 3. * В разных потоках агрегируем независимо в разные хэш-таблицы, * пока их размер не станет достаточно большим. * Если размер локальной хэш-таблицы большой, и в ней нет элемента, * то вставляем его в одну глобальную хэш-таблицу, защищённую mutex-ом, * а если mutex не удалось захватить, то вставляем в локальную. * Затем сливаем все локальные хэш-таблицы в глобальную. * Этот метод плохой - много contention-а. */ std::vector<Map> local_maps(num_threads); Map global_map; Mutex mutex; Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate3, std::ref(local_maps[i]), std::ref(global_map), std::ref(mutex), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes (local): "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << local_maps[i].size(); size_before_merge += local_maps[i].size(); } std::cerr << std::endl; std::cerr << "Size (global): " << global_map.size() << std::endl; size_before_merge += global_map.size(); watch.restart(); for (size_t i = 0; i < num_threads; ++i) for (auto it = local_maps[i].begin(); it != local_maps[i].end(); ++it) global_map[it->first] += it->second; pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << global_map.size() << std::endl << std::endl; } if (!method || method == 33) { /** Вариант 33. * В разных потоках агрегируем независимо в разные хэш-таблицы, * пока их размер не станет достаточно большим. * Затем сбрасываем данные в глобальную хэш-таблицу, защищённую mutex-ом, и продолжаем. */ std::vector<Map> local_maps(num_threads); Map global_map; Mutex mutex; Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate33, std::ref(local_maps[i]), std::ref(global_map), std::ref(mutex), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes (local): "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << local_maps[i].size(); size_before_merge += local_maps[i].size(); } std::cerr << std::endl; std::cerr << "Size (global): " << global_map.size() << std::endl; size_before_merge += global_map.size(); watch.restart(); for (size_t i = 0; i < num_threads; ++i) for (auto it = local_maps[i].begin(); it != local_maps[i].end(); ++it) global_map[it->first] += it->second; pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << global_map.size() << std::endl << std::endl; } if (!method || method == 4) { /** Вариант 4. * В разных потоках агрегируем независимо в разные хэш-таблицы, * пока их размер не станет достаточно большим. * Если размер локальной хэш-таблицы большой, и в ней нет элемента, * то вставляем его в одну из 256 глобальных хэш-таблиц, каждая из которых под своим mutex-ом. * Затем сливаем все локальные хэш-таблицы в глобальную. * Этот метод не такой уж плохой при большом количестве потоков, но хуже второго. */ std::vector<Map> local_maps(num_threads); MapTwoLevel global_map; std::vector<Mutex> mutexes(MapTwoLevel::NUM_BUCKETS); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate4, std::ref(local_maps[i]), std::ref(global_map), &mutexes[0], data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes (local): "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << local_maps[i].size(); size_before_merge += local_maps[i].size(); } std::cerr << std::endl; size_t sum_size = global_map.size(); std::cerr << "Size (global): " << sum_size << std::endl; size_before_merge += sum_size; watch.restart(); for (size_t i = 0; i < num_threads; ++i) for (auto it = local_maps[i].begin(); it != local_maps[i].end(); ++it) global_map[it->first] += it->second; pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << global_map.size() << std::endl << std::endl; } /* if (!method || method == 5) { */ /** Вариант 5. * В разных потоках агрегируем независимо в разные хэш-таблицы, * пока их размер не станет достаточно большим. * Если размер локальной хэш-таблицы большой, и в ней нет элемента, * то вставляем его в одну глобальную хэш-таблицу, содержащую маленькие защёлки в каждой ячейке, * а если защёлку не удалось захватить, то вставляем в локальную. * Затем сливаем все локальные хэш-таблицы в глобальную. */ /* Map local_maps[num_threads]; MapSmallLocks global_map; Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate5, std::ref(local_maps[i]), std::ref(global_map), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes (local): "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << local_maps[i].size(); size_before_merge += local_maps[i].size(); } std::cerr << std::endl; std::cerr << "Size (global): " << global_map.size() << std::endl; size_before_merge += global_map.size(); watch.restart(); for (size_t i = 0; i < num_threads; ++i) for (auto it = local_maps[i].begin(); it != local_maps[i].end(); ++it) global_map.insert(std::make_pair(it->first, 0)).first->second += it->second; pool.wait(); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << global_map.size() << std::endl << std::endl; }*/ /*if (!method || method == 6) { *//** Вариант 6. * В разных потоках агрегируем независимо в разные хэш-таблицы. * Затем "сливаем" их, проходя по ним в одинаковом порядке ключей. * Довольно тормозной вариант. */ /* std::vector<Map> maps(num_threads); Stopwatch watch; for (size_t i = 0; i < num_threads; ++i) pool.schedule(std::bind(aggregate1, std::ref(maps[i]), data.begin() + (data.size() * i) / num_threads, data.begin() + (data.size() * (i + 1)) / num_threads)); pool.wait(); watch.stop(); double time_aggregated = watch.elapsedSeconds(); std::cerr << "Aggregated in " << time_aggregated << " (" << n / time_aggregated << " elem/sec.)" << std::endl; size_t size_before_merge = 0; std::cerr << "Sizes: "; for (size_t i = 0; i < num_threads; ++i) { std::cerr << (i == 0 ? "" : ", ") << maps[i].size(); size_before_merge += maps[i].size(); } std::cerr << std::endl; watch.restart(); using Maps = std::vector<Map *>; Maps maps_to_merge(num_threads); for (size_t i = 0; i < num_threads; ++i) maps_to_merge[i] = &maps[i]; size_t size = 0; for (size_t i = 0; i < 100; ++i) processMergedHashTables(maps_to_merge, [] (Map::value_type & dst, const Map::value_type & src) { dst.second += src.second; }, [&] (const Map::value_type & dst) { ++size; }); watch.stop(); double time_merged = watch.elapsedSeconds(); std::cerr << "Merged in " << time_merged << " (" << size_before_merge / time_merged << " elem/sec.)" << std::endl; double time_total = time_aggregated + time_merged; std::cerr << "Total in " << time_total << " (" << n / time_total << " elem/sec.)" << std::endl; std::cerr << "Size: " << size << std::endl << std::endl; }*/ return 0; }
//--------------|--------------------------------------------- int main(int argc, char **argv) { int ret = 0; int opt; int timed = 0; char cmd[10240]; while (0 < (opt = getopt(argc, argv, "?c:dmp:s:tv-"))) { switch (opt) { case '?': help(); break; case 'c': hxcrash = atoi(optarg); break; case 'd': ++hxdebug; break; case 'm': mmode |= HX_MMAP; break; case 's': mmode |= HX_FSYNC; break; case 't': timed++; break; case 'v': ++verbose; break; } } argc -= optind; argv += optind; setvbuf(stdout, NULL, _IOLBF, 0); setvbuf(stderr, NULL, _IOLBF, 0); errno = 0; //setvbuf sets errno !? int size = 0; HXMODE mode; HXFILE *hp = NULL; FILE *fp = NULL; if (!*argv) die("See 'chx help' for usage"); if (*argv[0] == '?' || !strcmp(argv[0], "help")) help(); double tstart = tick(); if (hxdebug) hxtime = tstart; if (!strcmp(argv[0], "build")) { hp = do_hxopen("build", argv[1], HX_UPDATE); fp = do_fopen("build", argv[2], "r"); int memsize = argc > 3 ? atoi(argv[3]) : 1; int inpsize = argc > 4 ? atoi(argv[4]) : 0; memsize <<= 20; is_hxret("build", hxbuild(hp, fp, memsize, inpsize)); } else if (!strcmp(argv[0], "check")) { char *udata = argc > 3 ? argv[3] : NULL; if (argc < 2) die("%s: requires filename [pgsize [udata]]", argv[0]); if (argc > 2 && !sscanf(argv[2], "%d", &size)) die("%s: invalid pgsize", argv[2]); hp = do_hxopen("check", argv[1], HX_CHECK); mode = hxfix(hp, NULL, size, udata, udata ? strlen(udata) : 0); if (verbose || mode != HX_UPDATE) printf("%s %s\n", hxmode(mode), errno ? strerror(errno) : ""); ret = mode != HX_UPDATE; } else if (!strcmp(argv[0], "create")) { if (argc < 3 || !sscanf(argv[2], "%d", &size)) die("create: requires filename, pgsize, type"); char const *type = argv[3] ? argv[3] : ""; is_hxret("create", hxcreate(argv[1], 0644, size, type, strlen(type))); hp = do_hxopen("create", argv[1], HX_RECOVER); } else if (!strcmp(argv[0], "del")) { hp = do_hxopen("del", argv[1], HX_UPDATE); fp = do_fopen("del", argv[2], "r"); del(hp, fp); } else if (!strcmp(argv[0], "dump")) { hp = do_hxopen("dump", argv[1], HX_READ); dump(hp, stdout); } else if (!strcmp(argv[0], "fix") || !strcmp(argv[0], "repair")) { char *udata = argc > 3 ? argv[3] : NULL; if (argc < 2) die("%s: requires filename [pgsize [udata]]", argv[0]); if (argv[2] && !sscanf(argv[2], "%d", &size)) die("%s: invalid pgsize", argv[2]); hp = do_hxopen("repair", argv[1], HX_UPDATE); fp = tmpfile(); mode = hxfix(hp, fp, size, udata, udata ? strlen(udata) : 0); if (verbose || mode != HX_UPDATE) printf("%s %s\n", hxmode(mode), errno ? strerror(errno) : ""); ret = mode != HX_UPDATE; } else if (!strcmp(argv[0], "hdrs")) { hp = do_hxopen("hdrs", argv[1], HX_READ); fp = do_fopen("hdrs", argv[2], "w"); hdrs(hp); } else if (!strcmp(argv[0], "info")) { hp = do_hxopen("info", argv[1], HX_READ); info(hp); } else if (!strcmp(argv[0], "load")) { hp = do_hxopen("load", argv[1], HX_UPDATE); fp = do_fopen("hdrs", argv[2], "r"); do_load(hp, fp); } else if (!strcmp(argv[0], "lock")) { hp = do_hxopen("lock", argv[1], HX_READ); do_lock(hp); } else if (!strcmp(argv[0], "maps")) { hp = do_hxopen("maps", argv[1], HX_READ); is_hxret("maps", maps(hp)); } else if (!strcmp(argv[0], "pack")) { hp = do_hxopen("load", argv[1], HX_UPDATE); is_hxret("pack", hxpack(hp)); } else if (!strcmp(argv[0], "save")) { hp = do_hxopen("save", argv[1], HX_READ); fp = do_fopen("save", argv[2], "w"); do_save(hp, fp); } else if (!strcmp(argv[0], "shape")) { double density; if (argc != 3 || !sscanf(argv[2], "%lf", &density)) die("%s: requires density arg (0 to 1.0)", argv[2]); hp = do_hxopen("shape", argv[1], HX_UPDATE); is_hxret("shape", hxshape(hp, density)); } else if (!strcmp(argv[0], "stat")) { hp = do_hxopen("stat", argv[1], HX_READ); stats(hp); } else if (!strcmp(argv[0], "types")) { // for each dir in LD_LIBRARY_PATH (and "" -> ".") then /lib then /usr/lib, // find all files of the form "<dir>/hx_<rectype>.so" // and build a file of unique rectypes. // Then call hxlib which returns an exact path and a bitmask of DIFF/LOAD/TEST // "type diff-load-test path" // If the path matches, print the entry. types(getenv("LD_LIBRARY_PATH")); types("lib:/usr/lib"); } else { die("%s: unknown command. See 'chx help'", cmd); } if (fp) fclose(fp); if (hp) hxclose(hp); if (timed) fprintf(stderr, "# chx %s: %.3f secs\n", *argv, tick() - tstart); return ret; }
TEST(MapsTest, file_buffer_cross) { constexpr size_t kBufferSize = 2048; TemporaryFile tf; ASSERT_TRUE(tf.fd != -1); // Compute how many to add in the first buffer. size_t entry_len = CreateEntry(0).size(); size_t index; std::string file_data; for (index = 0; index < kBufferSize / entry_len; index++) { file_data += CreateEntry(index); } // Add a long name to make sure that the first buffer does not contain a // complete line. // Remove the last newline. size_t extra = 0; size_t leftover = kBufferSize % entry_len; size_t overlap1_index = 0; std::string overlap1_name; if (leftover == 0) { // Exact match, add a long name to cross over the value. overlap1_name = "/fake/name/is/long/on/purpose"; file_data.erase(file_data.size() - 1); file_data += ' ' + overlap1_name + '\n'; extra = entry_len + overlap1_name.size() + 1; overlap1_index = index; } // Compute how many need to go in to hit the buffer boundary exactly. size_t bytes_left_in_buffer = kBufferSize - extra; size_t entries_to_add = bytes_left_in_buffer / entry_len + index; for (; index < entries_to_add; index++) { file_data += CreateEntry(index); } // Now figure out how many bytes to add to get exactly to the buffer boundary. leftover = bytes_left_in_buffer % entry_len; std::string overlap2_name; size_t overlap2_index = 0; if (leftover != 0) { file_data.erase(file_data.size() - 1); file_data += ' '; overlap2_name = std::string(leftover - 1, 'x'); file_data += overlap2_name + '\n'; overlap2_index = index - 1; } // Now add a few entries on the next page. for (size_t start = index; index < start + 10; index++) { file_data += CreateEntry(index); } ASSERT_TRUE(android::base::WriteStringToFile(file_data, tf.path, 0660, getuid(), getgid())); FileMaps maps(tf.path); ASSERT_TRUE(maps.Parse()); EXPECT_EQ(index, maps.Total()); // Verify all of the maps. for (size_t i = 0; i < index; i++) { MapInfo* info = maps.Get(i); ASSERT_TRUE(info != nullptr) << "Failed verifying index " + std::to_string(i); EXPECT_EQ(i * 4096, info->start) << "Failed verifying index " + std::to_string(i); EXPECT_EQ((i + 1) * 4096, info->end) << "Failed verifying index " + std::to_string(i); EXPECT_EQ(0U, info->offset) << "Failed verifying index " + std::to_string(i); if (overlap1_index != 0 && i == overlap1_index) { EXPECT_EQ(overlap1_name, info->name) << "Failed verifying overlap1 name " + std::to_string(i); } else if (overlap2_index != 0 && i == overlap2_index) { EXPECT_EQ(overlap2_name, info->name) << "Failed verifying overlap2 name " + std::to_string(i); } else { EXPECT_EQ("", info->name) << "Failed verifying index " + std::to_string(i); } } }
void windy::app::on_assets_delete_pressed(nana::menu::item_proxy& ip) { if (this->_instance_creator->items().size() > 0) { switch (this->_instance_creator->category()) { case layer::kind::graphicable: { // references from graphicables and nuke { for (auto item : this->_instance_creator->items()) { for (auto instance : item->instances()) { this->_environment->project()->remove(instance); } auto sprite = std::dynamic_pointer_cast<content::sprite>(item); this->_environment->project()->remove_sprite(sprite); } } } break; case layer::kind::groupable: { for (auto item : this->_instance_creator->items()) { // references from compositions and nuke if single-animation remains { auto compositions = this->_environment->project()->assets()->collection()-> get(layer::kind::compositable); std::vector <std::shared_ptr<content::composition> > compositions_nuked; for (auto composition_item : compositions) { auto composition = std::dynamic_pointer_cast<content::composition> (composition_item); auto& instances = composition->instances(); for (auto instance : instances) { auto compositable = std::dynamic_pointer_cast <content::compositable>(instance); std::vector<std::shared_ptr<content::groupable> > groupables_nuked; for (auto child : compositable->children()) { auto groupable = std::dynamic_pointer_cast <content::groupable>(child); if (groupable->asset_uuid() == item->uuid()) { groupables_nuked.push_back(groupable); this->_environment->project()->assets()-> collection()->remove(groupable); } } for (auto nuked : groupables_nuked) { auto& children = compositable->children(); auto it = std::find(children.begin(), children.end(), nuked); children.erase(it); } } composition->unmap(item); if (composition->maps().empty()) { compositions_nuked.push_back(composition); } } for (auto composition : compositions_nuked) { for (auto instance : composition->instances()) { this->_environment->project()->remove(instance); } this->_environment->project()->remove_composition(composition); } } // nuke timeline triggers { for (auto instance : item->instances()) { auto groupable = std::dynamic_pointer_cast<content::groupable> (instance); for (auto trigger : groupable->triggers()) { this->_environment->timeline()->remove_trigger(trigger); } } } // references from groupables and nuke { auto instances = item->instances(); // copy by reference to store the instances for (auto instance : instances) { this->_environment->project()->remove(instance); } auto group = std::dynamic_pointer_cast<content::group>(item); this->_environment->project()->remove_group(group); } } } break; case layer::kind::compositable: { // references from compositables and nuke { for (auto item : this->_instance_creator->items()) { for (auto instance : item->instances()) { this->_environment->project()->remove(instance); } auto composition = std::dynamic_pointer_cast<content::composition>(item); this->_environment->project()->remove_composition(composition); } } } break; } // refresh { this->_environment->project()->assets()->refresh(); // this->_selection_targets.clear(); this->_environment->edition()->refresh(this->_environment->project()->game()->container()); this->_environment->properties()->clear(); this->_environment->trigger_recalculation(); } } else { nana::msgbox error(this->handle(), L"Error"); error << L"Please select one or more assets first."; error.show(); } }
int main (int argc, char *argv[]) { if ((argc < 5) || (argc > 6)) usage (argv[0]); std::string quad_list_prefix = argv[1]; std::string alm_dir = argv[2]; size_t Nstart, Nend; if (! Npoint_Functions::from_string (argv[3], Nstart)) { std::cerr << "Could not parse Nstart\n"; usage (argv[0]); } if (! Npoint_Functions::from_string (argv[4], Nend)) { std::cerr << "Could not parse Nend\n"; usage (argv[0]); } bool have_mask = false; Healpix_Map<double> mask; if (argc == 6) { read_Healpix_map_from_fits (argv[5], mask); have_mask = true; } // Figure out how many bins there are by trying to open files. std::vector<std::string> quad_list_files = Npoint_Functions::get_range_file_list(quad_list_prefix, 0, 400); if (quad_list_files.size() == 0) { std::cerr << "No quad list files found!\n"; usage (argv[0]); } int Lmax; std::vector<Healpix_Map<double> > maps (Nend-Nstart); // Make maps { Npoint_Functions::Quadrilateral_List_File<int> qlf; qlf.initialize (quad_list_files[0]); if (have_mask) { if (static_cast<size_t>(mask.Nside()) != qlf.Nside()) { std::cerr << "Mask and quadrilateral lists do not have" << " the same Nside: " << mask.Nside() << " != " << qlf.Nside() << std::endl; std::exit(1); } if (mask.Scheme() != qlf.Scheme()) mask.swap_scheme(); } Lmax = std::min(200UL, 4*qlf.Nside()+1); //#pragma omp parallel shared(qlf, maps) { Alm<xcomplex<double> > alm (Lmax, Lmax); //#pragma omp for schedule(static) for (size_t k=0; k < maps.size(); ++k) { read_Alm_from_fits (dirtree::filename(alm_dir, "alm_T_", ".fits", k+Nstart), alm, Lmax, Lmax); maps[k].SetNside (qlf.Nside(), RING); alm2map (alm, maps[k]); if (maps[k].Scheme() != qlf.Scheme()) maps[k].swap_scheme(); } } } std::vector<double> bin_list(quad_list_files.size()); /* We will generate this by bin for each map so make the bin number the * first index. */ std::vector<std::vector<double> > Corr(quad_list_files.size()); #pragma omp parallel shared(Corr, bin_list, quad_list_files, maps, mask) { Npoint_Functions::Quadrilateral_List_File<int> qlf; #pragma omp for schedule(dynamic,2) for (size_t k=0; k < quad_list_files.size(); ++k) { if (! qlf.initialize (quad_list_files[k])) { std::cerr << "Error initializing quadrilateral list from " << quad_list_files[k] << std::endl; std::exit(1); } bin_list[k] = qlf.bin_value(); if (have_mask) { Npoint_Functions::calculate_masked_fourpoint_function_list (maps, mask, qlf, Corr[k]); } else { Npoint_Functions::calculate_fourpoint_function_list (maps, qlf, Corr[k]); } } } std::cout << "# LCDM four point function from " << quad_list_prefix << std::endl; std::cout << "# First line is bin values, rest are the four point function.\n"; for (size_t k=0; k < bin_list.size(); ++k) { std::cout << bin_list[k] << " "; } std::cout << std::endl; for (size_t j=0; j < maps.size(); ++j) { for (size_t k=0; k < bin_list.size(); ++k) { std::cout << Corr[k][j] << " "; } std::cout << std::endl; } return 0; }
// Determine UNIX processes by reading "/proc". Default to ps if // it does not exist ProcDataList processList(const ProcDataList& previous) { const QDir procDir(QLatin1String("/proc/")); if (!procDir.exists()) return unixProcessListPS(previous); ProcDataList rc; const QStringList procIds = procDir.entryList(); if (procIds.isEmpty()) return rc; foreach (const QString &procId, procIds) { if (!isUnixProcessId(procId)) continue; QString filename = QLatin1String("/proc/"); filename += procId; filename += QLatin1String("/stat"); QFile file(filename); if (!file.open(QIODevice::ReadOnly)) continue; // process may have exited const QStringList data = QString::fromLocal8Bit(file.readAll()).split(' '); ProcData proc; proc.ppid = procId; proc.name = data.at(1); if (proc.name.startsWith(QLatin1Char('(')) && proc.name.endsWith(QLatin1Char(')'))) { proc.name.truncate(proc.name.size() - 1); proc.name.remove(0, 1); } proc.state = data.at(2); // PPID is element 3 proc.user = QFileInfo(file).owner(); file.close(); QFile cmdFile(QLatin1String("/proc/") + procId + QLatin1String("/cmdline")); if(cmdFile.open(QFile::ReadOnly)) { QByteArray cmd = cmdFile.readAll(); cmd.replace('\0', ' '); if ( !cmd.isEmpty() ) proc.name = QString::fromLocal8Bit(cmd); } cmdFile.close(); QFile maps(QLatin1String("/proc/") + procId + QLatin1String("/maps")); if (!maps.open(QIODevice::ReadOnly)) continue; // process may have exited proc.type = ProcData::NoQtApp; forever { const QByteArray line = maps.readLine(); if (line.isEmpty()) { break; } if (line.contains(QByteArray("/libQtCore.so"))) { proc.type = ProcData::QtApp; break; } } rc.push_back(proc); } return rc; }
int main() { pixel(); maps(); }