void TestZRange(const char *name, const char *lutfile, int extraFlags, int clean_lut, RWeightMode weightMode=RWNone, bool biasMap=false, bool biasCorrect=false) { ImageData lut = ReadLUTFile(lutfile); vector3f delta(0.001f,0.001f, 0.001f); if(PathSeperator(lutfile).extension != "jpg"){ WriteJPEGFile(SPrintf("%s-lut.jpg",name).c_str(), lut); } if (clean_lut) { BenchmarkLUT::CleanupLUT(lut); WriteJPEGFile( std::string(lutfile).substr(0, strlen(lutfile)-4).append("_bmlut.jpg").c_str(), lut ); } QTrkComputedConfig settings; settings.qi_iterations = 2; settings.zlut_minradius = 1; settings.qi_minradius = 1; settings.width = settings.height = 100; settings.Update(); float maxVal=10000; std::vector<float> stdv; dbgprintf("High-res LUT range...\n"); SampleFisherMatrix fm( maxVal); QueuedCPUTracker trk(settings); ImageData rescaledLUT; ResampleLUT(&trk, &lut, lut.h, &rescaledLUT); if (biasCorrect) { CImageData result; trk.ComputeZBiasCorrection(lut.h*10, &result, 4, true); WriteImageAsCSV(SPrintf("%s-biasc.txt", name).c_str(), result.data, result.w, result.h); } int f = 0; if (weightMode == RWDerivative) f |= LT_LocalizeZWeighted; else if(weightMode == RWRadial) { std::vector<float> w(settings.zlut_radialsteps); for (int i=0;i<settings.zlut_radialsteps;i++) w[i]= settings.zlut_minradius + i/(float)settings.zlut_radialsteps*settings.zlut_maxradius; trk.SetRadialWeights(&w[0]); } else if (weightMode == RWStetson) trk.SetRadialWeights( ComputeRadialBinWindow(settings.zlut_radialsteps) ); trk.SetLocalizationMode(LT_QI|LT_LocalizeZ|LT_NormalizeProfile|extraFlags|f); uint nstep= InDebugMode ? 20 : 1000; uint smpPerStep = InDebugMode ? 2 : 200; if (biasMap) { smpPerStep=1; nstep=InDebugMode? 200 : 2000; } std::vector<vector3f> truepos, positions,crlb; std::vector<float> stdevz; for (uint i=0;i<nstep;i++) { float z = 1 + i / (float)nstep * (rescaledLUT.h-2); vector3f pos = vector3f(settings.width/2, settings.height/2, z); truepos.push_back(pos); Matrix3X3 invFisherLUT = fm.Compute(pos, delta, rescaledLUT, settings.width, settings.height, settings.zlut_minradius, settings.zlut_maxradius).Inverse(); crlb.push_back(sqrt(invFisherLUT.diag())); ImageData img=ImageData::alloc(settings.width,settings.height); for (uint j=0;j<smpPerStep; j++) { vector3f rndvec(rand_uniform<float>(), rand_uniform<float>(), rand_uniform<float>()); if (biasMap) rndvec=vector3f(); vector3f rndpos = pos + vector3f(1,1,0.1) * (rndvec-0.5f); // 0.1 plane is still a lot larger than the 0.02 typical accuracy GenerateImageFromLUT(&img, &rescaledLUT, settings.zlut_minradius, settings.zlut_maxradius, rndpos, true); img.normalize(); if (!biasMap) ApplyPoissonNoise(img, maxVal); LocalizationJob job(positions.size(), 0, 0, 0); trk.ScheduleImageData(&img, &job); positions.push_back(rndpos); if(j==0 && InDebugMode) { WriteJPEGFile(SPrintf("%s-sampleimg.jpg",name).c_str(), img); } } dbgprintf("[%d] z=%f Min std deviation: X=%f, Y=%f, Z=%f.\n", i, z, crlb[i].x,crlb[i].y,crlb[i].z); img.free(); } WaitForFinish(&trk, positions.size()); std::vector<vector3f> trkmean(nstep), trkstd(nstep); std::vector<vector3f> resultpos(nstep*smpPerStep); for (uint i=0;i<positions.size();i++) { LocalizationResult lr; trk.FetchResults(&lr, 1); resultpos[lr.job.frame]=lr.pos; } for (uint i=0;i<nstep;i++) { for (uint j=0;j<smpPerStep;j ++) { vector3f err=resultpos[i*smpPerStep+j]-positions[i*smpPerStep+j]; trkmean[i]+=err; } trkmean[i]/=smpPerStep; vector3f variance; for (uint j=0;j<smpPerStep;j ++) { vector3f r = resultpos[i*smpPerStep+j]; vector3f t = positions[i*smpPerStep+j];; vector3f err=r-t; err -= trkmean[i]; variance += err*err; if (InDebugMode) { dbgprintf("Result: x=%f,y=%f,z=%f. True: x=%f,y=%f,z=%f\n", r.x,r.y,r.z,t.x,t.y,t.z); } } if (biasMap) trkstd[i]=vector3f(); else trkstd[i] = sqrt(variance / (smpPerStep-1)); } vector3f mean_std; std::vector<float> output; for(uint i=0;i<nstep;i++) { dbgprintf("trkstd[%d]:%f crlb=%f bias=%f true=%f\n", i, trkstd[i].z, crlb[i].z, trkmean[i].z, truepos[i].z); output.push_back(truepos[i].z); output.push_back(trkmean[i].x); output.push_back(trkstd[i].x); output.push_back(trkmean[i].z); output.push_back(trkstd[i].z); output.push_back(crlb[i].x); output.push_back(crlb[i].z); mean_std += trkstd[i]; } dbgprintf("mean z err: %f\n", (mean_std/nstep).z); WriteImageAsCSV( SPrintf("%s_%d_flags%d_cl%d.txt",name, weightMode, extraFlags,clean_lut).c_str(), &output[0], 7, output.size()/7); lut.free(); rescaledLUT.free(); }
bool IdDecorator::isFinished() const { Q_ASSERT(d1); return job()->isFinished(); }
void IdDecorator::requestAbort() { Q_ASSERT(d1); job()->requestAbort(); }
void createProcess(const wstring& fileToOpen, const wstring& params, const wstring& standardInput, pwstring pstandardOutput, pwstring pstandardError) { STARTUPINFO startupInfo = {0}; PROCESS_INFORMATION processInfo = {0}; startupInfo.cb = sizeof(startupInfo); HANDLE hWriteStdinFinished = NULL; HANDLE hReadStdoutFinished = NULL; HANDLE hReadStderrFinished = NULL; if (standardInput.size()) { hWriteStdinFinished = ::CreateEvent(NULL, TRUE, FALSE, NULL); byps_ptr<WriteStringToPipe_Job> job(new WriteStringToPipe_Job(standardInput, hWriteStdinFinished)); startupInfo.hStdInput = job->start(jsfs->tpool); startupInfo.dwFlags |= STARTF_USESTDHANDLES; } if (pstandardOutput) { hReadStdoutFinished = ::CreateEvent(NULL, TRUE, FALSE, NULL); byps_ptr<ReadStringFromPipe_Job> job(new ReadStringFromPipe_Job(pstandardOutput, hReadStdoutFinished)); startupInfo.hStdOutput = job->start(jsfs->tpool); startupInfo.dwFlags |= STARTF_USESTDHANDLES; } if (pstandardError) { hReadStderrFinished = ::CreateEvent(NULL, TRUE, FALSE, NULL); byps_ptr<ReadStringFromPipe_Job> job(new ReadStringFromPipe_Job(pstandardError, hReadStderrFinished)); startupInfo.hStdError = job->start(jsfs->tpool); startupInfo.dwFlags |= STARTF_USESTDHANDLES; } DWORD cmdlen = fileToOpen.length() + 1 + params.length() + 1; LPWSTR cmd = new WCHAR[cmdlen]; wcscpy_s(cmd, cmdlen, fileToOpen.c_str()); wcscat_s(cmd, cmdlen, L" "); wcscat_s(cmd, cmdlen, params.c_str()); BOOL succ = ::CreateProcess(NULL, cmd, NULL, NULL, TRUE, CREATE_NO_WINDOW, NULL, NULL, &startupInfo, &processInfo); DWORD err = ::GetLastError(); delete[] cmd; if (startupInfo.hStdInput) ::CloseHandle(startupInfo.hStdInput); if (startupInfo.hStdOutput) ::CloseHandle(startupInfo.hStdOutput); if (startupInfo.hStdError) ::CloseHandle(startupInfo.hStdError); if (!succ) { throw CFileSystemServiceImpl::createException(L"Failed to execute " + fileToOpen, err); } HANDLE waitHandles[4] = { 0 }; DWORD nbOfHandles = 0; waitHandles[nbOfHandles++] = processInfo.hProcess; if (hReadStdoutFinished) waitHandles[nbOfHandles++] = hReadStdoutFinished; if (hReadStderrFinished) waitHandles[nbOfHandles++] = hReadStderrFinished; if (hWriteStdinFinished) waitHandles[nbOfHandles++] = hWriteStdinFinished; DWORD wait = ::WaitForMultipleObjects(nbOfHandles, waitHandles, TRUE, INFINITE); err = ::GetLastError(); ::CloseHandle(processInfo.hProcess); ::CloseHandle(processInfo.hThread); if (hWriteStdinFinished) ::CloseHandle(hWriteStdinFinished); if (hReadStdoutFinished) ::CloseHandle(hReadStdoutFinished); if (hReadStderrFinished) ::CloseHandle(hReadStderrFinished); if (wait == WAIT_TIMEOUT) { throw BException(EX_TIMEOUT, L"Timeout while waiting for " + args->at(0)); } else if (wait == WAIT_FAILED) { throw CFileSystemServiceImpl::createException(L"Error while waiting for " + args->at(0), err); } }
bool FindWnd::FindTrack(int _dir, bool (*job)(MediaTrack*,const char*)) { bool update = false, found = false; if (g_searchStr && *g_searchStr) { int startTrIdx = -1; bool clearCurrentSelection = false; if (_dir) { if (int selTracksCount = SNM_CountSelectedTracks(NULL, true)) { if (MediaTrack* startTr = SNM_GetSelectedTrack(NULL, _dir > 0 ? 0 : selTracksCount-1, true)) { int id = CSurf_TrackToID(startTr, false); if ((_dir > 0 && id < CountTracks(NULL)) || (_dir < 0 && id >0)) { startTrIdx = id + _dir; clearCurrentSelection = true; } } } else startTrIdx = (_dir > 0 ? 0 : CountTracks(NULL)); } else { startTrIdx = 0; clearCurrentSelection = true; } if (clearCurrentSelection) { Undo_BeginBlock2(NULL); Main_OnCommand(40297,0); // unselect all tracks update = true; } if (startTrIdx >= 0) { for (int i = startTrIdx; i <= CountTracks(NULL) && i>=0; i += (!_dir ? 1 : _dir)) { MediaTrack* tr = CSurf_TrackFromID(i, false); if (tr && job(tr, g_searchStr)) { if (!update) Undo_BeginBlock2(NULL); update = found = true; GetSetMediaTrackInfo(tr, "I_SELECTED", &g_i1); if (_dir) break; } } } UpdateNotFoundMsg(found); if (found) ScrollSelTrack(true, true); } if (update) Undo_EndBlock2(NULL, __LOCALIZE("Find: change track selection","sws_undo"), UNDO_STATE_ALL); return update; }
void ColorQuantizationCommand::onExecute(Context* context) { try { app::gen::PaletteFromSprite window; PalettePicks entries; Sprite* sprite; frame_t frame; Palette* curPalette; { ContextReader reader(context); Site site = context->activeSite(); sprite = site.sprite(); frame = site.frame(); curPalette = sprite->palette(frame); window.newPalette()->setSelected(true); window.alphaChannel()->setSelected( App::instance()->preferences().quantization.withAlpha()); window.ncolors()->setText("256"); ColorBar::instance()->getPaletteView()->getSelectedEntries(entries); if (entries.picks() > 1) { window.currentRange()->setTextf( "%s, %d color(s)", window.currentRange()->text().c_str(), entries.picks()); } else window.currentRange()->setEnabled(false); window.currentPalette()->setTextf( "%s, %d color(s)", window.currentPalette()->text().c_str(), curPalette->size()); } window.openWindowInForeground(); if (window.closer() != window.ok()) return; bool withAlpha = window.alphaChannel()->isSelected(); App::instance()->preferences().quantization.withAlpha(withAlpha); bool createPal = false; if (window.newPalette()->isSelected()) { int n = window.ncolors()->textInt(); n = MAX(1, n); entries = PalettePicks(n); entries.all(); createPal = true; } else if (window.currentPalette()->isSelected()) { entries.all(); } if (entries.picks() == 0) return; Palette tmpPalette(frame, entries.picks()); ContextReader reader(context); SpriteJob job(reader, "Color Quantization"); const bool newBlend = Preferences::instance().experimental.newBlend(); job.startJobWithCallback( [sprite, withAlpha, &tmpPalette, &job, newBlend]{ render::create_palette_from_sprite( sprite, 0, sprite->lastFrame(), withAlpha, &tmpPalette, &job, newBlend); // SpriteJob is a render::TaskDelegate }); job.waitJob(); if (job.isCanceled()) return; std::unique_ptr<Palette> newPalette( new Palette(createPal ? tmpPalette: *get_current_palette())); if (createPal) { entries = PalettePicks(newPalette->size()); entries.all(); } int i = 0, j = 0; for (bool state : entries) { if (state) newPalette->setEntry(i, tmpPalette.getEntry(j++)); ++i; } if (*curPalette != *newPalette) job.tx()(new cmd::SetPalette(sprite, frame, newPalette.get())); set_current_palette(newPalette.get(), false); ui::Manager::getDefault()->invalidate(); } catch (const base::Exception& e) { Console::showException(e); } }
void AlertsBatch::deleteAlert( int alertId ) { Job job( &AlertsBatch::deleteAlertJob ); job.addArg( alertId ); m_queue.addJob( job ); }
Renderjob* Renderable::pointlightJob(const ion_uint32 lightnr,const Light& rLight,const ion_uint32 jobnr) { return job(jobnr); }
bool QgsRenderChecker::runTest( QString theTestName, unsigned int theMismatchCount ) { if ( mExpectedImageFile.isEmpty() ) { qDebug( "QgsRenderChecker::runTest failed - Expected Image File not set." ); mReport = "<table>" "<tr><td>Test Result:</td><td>Expected Result:</td></tr>\n" "<tr><td>Nothing rendered</td>\n<td>Failed because Expected " "Image File not set.</td></tr></table>\n"; return false; } // // Load the expected result pixmap // QImage myExpectedImage( mExpectedImageFile ); mMatchTarget = myExpectedImage.width() * myExpectedImage.height(); // // Now render our layers onto a pixmap // mMapSettings.setBackgroundColor( qRgb( 152, 219, 249 ) ); mMapSettings.setFlag( QgsMapSettings::Antialiasing ); mMapSettings.setOutputSize( QSize( myExpectedImage.width(), myExpectedImage.height() ) ); QTime myTime; myTime.start(); QgsMapRendererSequentialJob job( mMapSettings ); job.start(); job.waitForFinished(); mElapsedTime = myTime.elapsed(); QImage myImage = job.renderedImage(); // // Save the pixmap to disk so the user can make a // visual assessment if needed // mRenderedImageFile = QDir::tempPath() + QDir::separator() + theTestName + "_result.png"; myImage.setDotsPerMeterX( myExpectedImage.dotsPerMeterX() ); myImage.setDotsPerMeterY( myExpectedImage.dotsPerMeterY() ); myImage.save( mRenderedImageFile, "PNG", 100 ); //create a world file to go with the image... QFile wldFile( QDir::tempPath() + QDir::separator() + theTestName + "_result.wld" ); if ( wldFile.open( QIODevice::WriteOnly ) ) { QgsRectangle r = mMapSettings.extent(); QTextStream stream( &wldFile ); stream << QString( "%1\r\n0 \r\n0 \r\n%2\r\n%3\r\n%4\r\n" ) .arg( qgsDoubleToString( mMapSettings.mapUnitsPerPixel() ) ) .arg( qgsDoubleToString( -mMapSettings.mapUnitsPerPixel() ) ) .arg( qgsDoubleToString( r.xMinimum() + mMapSettings.mapUnitsPerPixel() / 2.0 ) ) .arg( qgsDoubleToString( r.yMaximum() - mMapSettings.mapUnitsPerPixel() / 2.0 ) ); } return compareImages( theTestName, theMismatchCount ); }
std::string default_map_generator::generate_map(std::map<map_location,std::string>* labels, boost::optional<boost::uint32_t> randomseed) { boost::uint32_t seed; if(const boost::uint32_t* pseed = randomseed.get_ptr()) { seed = *pseed; } else { seed = seed_rng::next_seed(); } // Suppress labels? if ( !show_labels_ ) labels = NULL; // the random generator thinks odd widths are nasty, so make them even if (is_odd(width_)) ++width_; size_t iterations = (iterations_*width_*height_)/(default_width_*default_height_); size_t island_size = 0; size_t island_off_center = 0; size_t max_lakes = max_lakes_; if(island_size_ >= max_coastal) { //islands look good with much fewer iterations than normal, and fewer lake iterations /= 10; max_lakes /= 9; //the radius of the island should be up to half the width of the map const size_t island_radius = 50 + ((max_island - island_size_)*50)/(max_island - max_coastal); island_size = (island_radius*(width_/2))/100; } else if(island_size_ > 0) { DBG_NG << "coastal...\n"; //the radius of the island should be up to twice the width of the map const size_t island_radius = 40 + ((max_coastal - island_size_)*40)/max_coastal; island_size = (island_radius*width_*2)/100; island_off_center = std::min<size_t>(width_,height_); DBG_NG << "calculated coastal params...\n"; } // A map generator can fail so try a few times to get a map before aborting. std::string map; // Keep a copy of labels as it can be written to by the map generator func std::map<map_location,std::string> labels_copy; std::map<map_location,std::string> * labels_ptr = labels ? &labels_copy : NULL; std::string error_message; //initilize the job outside the loop so that we really get a different result everytime we run the loop. default_map_generator_job job(seed); int tries = 10; do { if (labels) { // Reset the labels. labels_copy = *labels; } try{ map = job.default_generate_map(width_, height_, island_size, island_off_center, iterations, hill_size_, max_lakes, (nvillages_ * width_ * height_) / 1000, castle_size_, nplayers_, link_castles_, labels_ptr, cfg_); error_message = ""; } catch (mapgen_exception& exc){ error_message = exc.message; } --tries; } while (tries && map.empty()); if (labels) { labels->swap(labels_copy); } if (error_message != "") throw mapgen_exception(error_message); return map; }
TEST(SimpleMPI,persistent_distributed_array_mpi) { sip::GlobalState::reset_program_count(); sip::SIPMPIAttr &sip_mpi_attr = sip::SIPMPIAttr::get_instance(); int my_rank = sip_mpi_attr.global_rank(); std::cout << "****************************************\n"; sip::DataManager::scope_count=0; //create setup_file std::string job("persistent_distributed_array_mpi"); std::cout << "JOBNAME = " << job << std::endl; double x = 3.456; int norb = 2; int segs[] = {2,3}; if (attr.global_rank() == 0) { init_setup(job.c_str()); set_scalar("x",x); set_constant("norb",norb); std::string tmp = job + "1.siox"; const char* nm= tmp.c_str(); add_sial_program(nm); std::string tmp1 = job + "2.siox"; const char* nm1= tmp1.c_str(); add_sial_program(nm1); set_aoindex_info(2,segs); finalize_setup(); } sip::SIPMPIUtils::check_err(MPI_Barrier(MPI_COMM_WORLD)); setup::BinaryInputFile setup_file(job + ".dat"); setup::SetupReader setup_reader(setup_file); std::cout << "SETUP READER DATA:\n" << setup_reader<< std::endl; //get siox name from setup, load and print the sip tables std::string prog_name = setup_reader.sial_prog_list_.at(0); std::string siox_dir(dir_name); setup::BinaryInputFile siox_file(siox_dir + prog_name); sip::SipTables sipTables(setup_reader, siox_file); if (!sip_mpi_attr.is_server()) { std::cout << "SIP TABLES" << '\n' << sipTables << std::endl; } if (sip_mpi_attr.global_rank()==0) { std::cout << "\n\n\n\n>>>>>>>>>>>>starting SIAL PROGRAM "<< job << std::endl; } //create worker and server sip::DataDistribution data_distribution(sipTables, sip_mpi_attr); sip::GlobalState::set_program_name(prog_name); sip::GlobalState::increment_program(); sip::WorkerPersistentArrayManager wpam; sip::ServerPersistentArrayManager spam; std::cout << "rank " << my_rank << " reached first barrier" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout << "rank " << my_rank << " passed first barrier" << std::endl << std::flush; if (sip_mpi_attr.is_server()) { sip::SIPServer server(sipTables, data_distribution, sip_mpi_attr, &spam); std::cout << "at first barrier in prog 1 at server" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout<<"passed first barrier at server, starting server" << std::endl; server.run(); spam.save_marked_arrays(&server); std::cout << "Server state after termination" << server << std::endl; } else { sip::SialxTimer sialxTimer(sipTables.max_timer_slots()); sip::Interpreter runner(sipTables, sialxTimer, &wpam); std::cout << "at first barrier in prog 1 at worker" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout << "after first barrier; starting worker for "<< job << std::endl; runner.interpret(); wpam.save_marked_arrays(&runner); std::cout << "\n end of prog1 at worker"<< std::endl; } std::cout << std::flush; if (sip_mpi_attr.global_rank()==0) { std::cout << "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" << std::endl << std::flush; std::cout << "SETUP READER DATA FOR SECOND PROGRAM:\n" << setup_reader<< std::endl; } std::string prog_name2 = setup_reader.sial_prog_list_.at(1); setup::BinaryInputFile siox_file2(siox_dir + prog_name2); sip::SipTables sipTables2(setup_reader, siox_file2); if (sip_mpi_attr.global_rank()==0) { std::cout << "SIP TABLES FOR " << prog_name2 << '\n' << sipTables2 << std::endl; } sip::DataDistribution data_distribution2(sipTables2, sip_mpi_attr); sip::GlobalState::set_program_name(prog_name); sip::GlobalState::increment_program(); std::cout << "rank " << my_rank << " reached second barrier in test" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout << "rank " << my_rank << " passed second barrier in test" << std::endl << std::flush; if (sip_mpi_attr.is_server()) { sip::SIPServer server(sipTables2, data_distribution2, sip_mpi_attr, &spam); std::cout << "barrier in prog 2 at server" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout<< "rank " << my_rank << "starting server for prog 2" << std::endl; server.run(); std::cout<< "rank " << my_rank << "Server state after termination of prog2" << server << std::endl; } else { sip::SialxTimer sialxTimer2(sipTables2.max_timer_slots()); sip::Interpreter runner(sipTables2, sialxTimer2, &wpam); std::cout << "rank " << my_rank << "barrier in prog 2 at worker" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout << "rank " << my_rank << "starting worker for prog2"<< job << std::endl; runner.interpret(); std::cout << "\nSIAL PROGRAM 2 TERMINATED"<< std::endl; // Test contents of blocks of distributed array "b" // Get the data for local array block "b" int b_slot = runner.array_slot(std::string("lb")); // Test b(1,1) sip::index_selector_t b_indices_1; b_indices_1[0] = 1; b_indices_1[1] = 1; for (int i = 2; i < MAX_RANK; i++) b_indices_1[i] = sip::unused_index_value; sip::BlockId b_bid_1(b_slot, b_indices_1); std::cout << b_bid_1 << std::endl; sip::Block::BlockPtr b_bptr_1 = runner.get_block_for_reading(b_bid_1); sip::Block::dataPtr b_data_1 = b_bptr_1->get_data(); std::cout << " Comparing block " << b_bid_1 << std::endl; double fill_seq_1_1 = 1.0; for (int i=0; i<segs[0]; i++) { for (int j=0; j<segs[0]; j++) { ASSERT_DOUBLE_EQ(fill_seq_1_1, b_data_1[i*segs[0] + j]); fill_seq_1_1++; } } // Test b(2, 2) sip::index_selector_t b_indices_2; b_indices_2[0] = 2; b_indices_2[1] = 2; for (int i = 2; i < MAX_RANK; i++) b_indices_2[i] = sip::unused_index_value; sip::BlockId b_bid_2(b_slot, b_indices_2); std::cout << b_bid_2 << std::endl; sip::Block::BlockPtr b_bptr_2 = runner.get_block_for_reading(b_bid_2); sip::Block::dataPtr b_data_2 = b_bptr_2->get_data(); std::cout << " Comparing block " << b_bid_2 << std::endl; double fill_seq_2_2 = 4.0; for (int i=0; i<segs[1]; i++) { for (int j=0; j<segs[1]; j++) { ASSERT_DOUBLE_EQ(fill_seq_2_2, b_data_2[i*segs[1] + j]); fill_seq_2_2++; } } // Test b(2,1) sip::index_selector_t b_indices_3; b_indices_3[0] = 2; b_indices_3[1] = 1; for (int i = 2; i < MAX_RANK; i++) b_indices_3[i] = sip::unused_index_value; sip::BlockId b_bid_3(b_slot, b_indices_3); std::cout << b_bid_3 << std::endl; sip::Block::BlockPtr b_bptr_3 = runner.get_block_for_reading(b_bid_3); sip::Block::dataPtr b_data_3 = b_bptr_3->get_data(); std::cout << " Comparing block " << b_bid_3 << std::endl; double fill_seq_2_1 = 3.0; for (int i=0; i<segs[1]; i++) { for (int j=0; j<segs[0]; j++) { ASSERT_DOUBLE_EQ(fill_seq_2_1, b_data_3[i*segs[0] + j]); fill_seq_2_1++; } } } std::cout << "rank " << my_rank << " reached third barrier in test" << std::endl << std::flush; MPI_Barrier(MPI_COMM_WORLD); std::cout << "rank " << my_rank << " passed third barrier in test" << std::endl << std::flush; }
TEST(Routing, callbacks) { zmq::context_t context(1); zmq::socket_t alphaSocket(context, ZMQ_PAIR); zmq::socket_t betaSocket(context, ZMQ_PAIR); std::unique_ptr<zmq::socket_t> alphaSocketRouter(new zmq::socket_t(context, ZMQ_PAIR)); std::unique_ptr<zmq::socket_t> betaSocketRouter(new zmq::socket_t(context, ZMQ_PAIR)); alphaSocket.bind("tcp://*:5555"); betaSocket.bind("tcp://*:5556"); alphaSocketRouter->connect("tcp://localhost:5555"); betaSocketRouter->connect("tcp://localhost:5556"); SocketRouter router; router.add_socket(SocketID::ALPHA, alphaSocketRouter); router.add_socket(SocketID::BETA, betaSocketRouter); Message hello(HELLO); Message heartbeat(HEARTBEAT); Message problemspec(PROBLEMSPEC); Message jobrequest(JOBREQUEST); Message job(JOB); Message jobswap(JOBSWAP); Message alldone(ALLDONE); Message goodbye(GOODBYE); auto fwdToBeta = [&] (const Message&m) { router.send(SocketID::BETA, m);}; // Bind functionality to the router router(SocketID::ALPHA).onRcvHELLO.connect(fwdToBeta); router(SocketID::ALPHA).onRcvHEARTBEAT.connect(fwdToBeta); router(SocketID::ALPHA).onRcvPROBLEMSPEC.connect(fwdToBeta); router(SocketID::ALPHA).onRcvJOBREQUEST.connect(fwdToBeta); router(SocketID::ALPHA).onRcvJOB.connect(fwdToBeta); router(SocketID::ALPHA).onRcvJOBSWAP.connect(fwdToBeta); router(SocketID::ALPHA).onRcvALLDONE.connect(fwdToBeta); router(SocketID::ALPHA).onRcvGOODBYE.connect(fwdToBeta); router.start(10);//ms per poll send(alphaSocket, hello); Message rhello = receive(betaSocket); send(alphaSocket, heartbeat); Message rheartbeat = receive(betaSocket); send(alphaSocket, problemspec); Message rproblemspec = receive(betaSocket); send(alphaSocket, jobrequest); Message rjobrequest = receive(betaSocket); send(alphaSocket, job); Message rjob = receive(betaSocket); send(alphaSocket, jobswap); Message rjobswap = receive(betaSocket); send(alphaSocket, alldone); Message ralldone = receive(betaSocket); send(alphaSocket, goodbye); Message rgoodbye = receive(betaSocket); router.stop(); ASSERT_EQ(hello, rhello); ASSERT_EQ(heartbeat, rheartbeat); ASSERT_EQ(problemspec, rproblemspec); ASSERT_EQ(jobrequest, rjobrequest); ASSERT_EQ(job, rjob); ASSERT_EQ(jobswap, rjobswap); ASSERT_EQ(alldone, ralldone); ASSERT_EQ(goodbye, rgoodbye); }
/** * \brief This function creates a new thread that will process the request. * * \param data The request. Cannot be NULL. */ void schedule(boost::function<void()> callback) { boost::shared_ptr<ThreadPerRequestStrategyJob> job(new ThreadPerRequestStrategyJob(callback)); boost::thread thread(boost::bind(&ThreadPerRequestStrategyJob::run, job)); thread.detach(); }
void PreferencesSearchPageWidget::handleSearchEngineUpdate(bool isSuccess) { SearchEngineFetchJob *job(qobject_cast<SearchEngineFetchJob*>(sender())); if (!job) { return; } SearchEnginesManager::SearchEngineDefinition searchEngine(job->getSearchEngine()); const QString identifier(searchEngine.isValid() ? searchEngine.identifier : m_updateJobs.key(job)); if (!identifier.isEmpty()) { for (int i = 0; i < m_ui->searchViewWidget->getRowCount(); ++i) { const QModelIndex index(m_ui->searchViewWidget->getIndex(i, 0)); if (index.data(IdentifierRole).toString() == identifier) { if (isSuccess) { m_ui->searchViewWidget->setData(index, searchEngine.title, Qt::DisplayRole); m_ui->searchViewWidget->setData(index, searchEngine.title, Qt::ToolTipRole); if (searchEngine.icon.isNull()) { m_ui->searchViewWidget->setData(index, QColor(Qt::transparent), Qt::DecorationRole); } else { m_ui->searchViewWidget->setData(index, searchEngine.icon, Qt::DecorationRole); } } m_ui->searchViewWidget->setData(index, false, IsUpdatingRole); break; } } m_updateJobs.remove(identifier); if (m_updateJobs.isEmpty()) { m_updateAnimation->deleteLater(); m_updateAnimation = nullptr; } } if (!isSuccess) { QMessageBox::warning(this, tr("Error"), tr("Failed to update search engine."), QMessageBox::Close); return; } if (m_searchEngines.contains(identifier)) { searchEngine.keyword = m_searchEngines[identifier].second.keyword; m_searchEngines[identifier] = {true, searchEngine}; } }
int CTestNetScheduleClient::Run(void) { CArgs args = GetArgs(); const string& service = args["service"].AsString(); const string& queue_name = args["queue"].AsString(); unsigned jcount = 1000; if (args["jobs"]) { jcount = args["jobs"].AsInteger(); if (jcount == 0) { fputs("The 'jobs' parameter cannot be zero.\n", stderr); return 1; } } unsigned naff = 17; if (args["naff"]) naff = args["naff"].AsInteger(); s_SeedTokens(naff); unsigned input_length = 0; if (args["ilen"]) input_length = args["ilen"].AsInteger(); unsigned maximum_runtime = 60; if (args["maxruntime"]) maximum_runtime = args["maxruntime"].AsInteger(); CNetScheduleAPI::EJobStatus status; CNetScheduleAPI cl(service, "client_test", queue_name); STimeout comm_timeout; comm_timeout.sec = 1200; comm_timeout.usec = 0; cl.GetService().GetServerPool().SetCommunicationTimeout(comm_timeout); CNetScheduleSubmitter submitter = cl.GetSubmitter(); string input; if (args["input"]) { input = args["input"].AsString(); } else if (args["ilen"]) { input = s_GenInput(input_length); } else { input = "Hello " + queue_name; } if (0) {{ NcbiCout << "SubmitAndWait..." << NcbiEndl; unsigned wait_time = 30; CNetScheduleJob j1(input); status = submitter.SubmitJobAndWait(j1, wait_time); if (status == CNetScheduleAPI::eDone) { NcbiCout << j1.job_id << " done." << NcbiEndl; } else { NcbiCout << j1.job_id << " is not done in " << wait_time << " seconds." << NcbiEndl; } NcbiCout << "SubmitAndWait...done." << NcbiEndl; }} set<string> submitted_job_ids; CStopWatch sw(CStopWatch::eStart); {{ NcbiCout << "Batch submit " << jcount << " jobs..." << NcbiEndl; unsigned batch_size = 1000; for (unsigned i = 0; i < jcount; i += batch_size) { vector<CNetScheduleJob> jobs; if (jcount - i < batch_size) batch_size = jcount - i; for (unsigned j = 0; j < batch_size; ++j) { CNetScheduleJob job(input); job.affinity = s_GetRandomToken(); jobs.push_back(job); } submitter.SubmitJobBatch(jobs); ITERATE(vector<CNetScheduleJob>, job, jobs) { submitted_job_ids.insert(job->job_id); } NcbiCout << "." << flush; } _ASSERT(submitted_job_ids.size() == jcount); NcbiCout << NcbiEndl << "Done." << NcbiEndl; double elapsed = sw.Elapsed(); NcbiCout.setf(IOS_BASE::fixed, IOS_BASE::floatfield); NcbiCout << "Avg time: " << (elapsed / jcount) << " sec, " << (jcount/elapsed) << " jobs/sec" << NcbiEndl; }}
bool MyMoneyXmlContentHandler::endElement(const QString& /* namespaceURI */, const QString& /* localName */, const QString& qName) { bool rc = true; QString s = qName.toLower(); if (m_level) { m_currNode = m_currNode.parentNode().toElement(); m_level--; if (!m_level) { try { if (s == "transaction") { MyMoneyTransaction t0(m_baseNode); if (!t0.id().isEmpty()) { MyMoneyTransaction t1(m_reader->d->nextTransactionID(), t0); m_reader->d->tList[t1.uniqueSortKey()] = t1; } m_reader->signalProgress(++m_elementCount, 0); } else if (s == "account") { MyMoneyAccount a(m_baseNode); if (!a.id().isEmpty()) m_reader->d->aList[a.id()] = a; m_reader->signalProgress(++m_elementCount, 0); } else if (s == "payee") { MyMoneyPayee p(m_baseNode); if (!p.id().isEmpty()) m_reader->d->pList[p.id()] = p; } else if (s == "tag") { MyMoneyTag ta(m_baseNode); if (!ta.id().isEmpty()) m_reader->d->taList[ta.id()] = ta; } else if (s == "currency") { MyMoneySecurity s(m_baseNode); if (!s.id().isEmpty()) m_reader->d->secList[s.id()] = s; m_reader->signalProgress(++m_elementCount, 0); } else if (s == "security") { MyMoneySecurity s(m_baseNode); if (!s.id().isEmpty()) m_reader->d->secList[s.id()] = s; m_reader->signalProgress(++m_elementCount, 0); } else if (s == "keyvaluepairs") { MyMoneyKeyValueContainer kvp(m_baseNode); m_reader->m_storage->setPairs(kvp.pairs()); } else if (s == "institution") { MyMoneyInstitution i(m_baseNode); if (!i.id().isEmpty()) m_reader->d->iList[i.id()] = i; } else if (s == "report") { MyMoneyReport r(m_baseNode); if (!r.id().isEmpty()) m_reader->d->rList[r.id()] = r; m_reader->signalProgress(++m_elementCount, 0); } else if (s == "budget") { MyMoneyBudget b(m_baseNode); if (!b.id().isEmpty()) m_reader->d->bList[b.id()] = b; } else if (s == "fileinfo") { rc = m_reader->readFileInformation(m_baseNode); m_reader->signalProgress(-1, -1); } else if (s == "user") { rc = m_reader->readUserInformation(m_baseNode); m_reader->signalProgress(-1, -1); } else if (s == "scheduled_tx") { MyMoneySchedule s(m_baseNode); if (!s.id().isEmpty()) m_reader->d->sList[s.id()] = s; } else if (s == "price") { MyMoneyPrice p(m_reader->d->m_fromSecurity, m_reader->d->m_toSecurity, m_baseNode); m_reader->d->prList[MyMoneySecurityPair(m_reader->d->m_fromSecurity, m_reader->d->m_toSecurity)][p.date()] = p; m_reader->signalProgress(++m_elementCount, 0); } else if (s == "onlinejob") { onlineJob job(m_baseNode); if (!job.id().isEmpty()) m_reader->d->onlineJobList[job.id()] = job; } else { m_errMsg = i18n("Unknown XML tag %1 found in line %2", qName, m_loc->lineNumber()); kWarning() << m_errMsg; rc = false; } } catch (const MyMoneyException &e) { m_errMsg = i18n("Exception while creating a %1 element: %2", s, e.what()); kWarning() << m_errMsg; rc = false; } m_doc = QDomDocument(); } } else { if (s == "institutions") { // last institution read, now dump them into the engine m_reader->m_storage->loadInstitutions(m_reader->d->iList); m_reader->d->iList.clear(); } else if (s == "accounts") { // last account read, now dump them into the engine m_reader->m_storage->loadAccounts(m_reader->d->aList); m_reader->d->aList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "payees") { // last payee read, now dump them into the engine m_reader->m_storage->loadPayees(m_reader->d->pList); m_reader->d->pList.clear(); } else if (s == "tags") { // last tag read, now dump them into the engine m_reader->m_storage->loadTags(m_reader->d->taList); m_reader->d->taList.clear(); } else if (s == "transactions") { // last transaction read, now dump them into the engine m_reader->m_storage->loadTransactions(m_reader->d->tList); m_reader->d->tList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "schedules") { // last schedule read, now dump them into the engine m_reader->m_storage->loadSchedules(m_reader->d->sList); m_reader->d->sList.clear(); } else if (s == "securities") { // last security read, now dump them into the engine m_reader->m_storage->loadSecurities(m_reader->d->secList); m_reader->d->secList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "currencies") { // last currency read, now dump them into the engine m_reader->m_storage->loadCurrencies(m_reader->d->secList); m_reader->d->secList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "reports") { // last report read, now dump them into the engine m_reader->m_storage->loadReports(m_reader->d->rList); m_reader->d->rList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "budgets") { // last budget read, now dump them into the engine m_reader->m_storage->loadBudgets(m_reader->d->bList); m_reader->d->bList.clear(); } else if (s == "prices") { // last price read, now dump them into the engine m_reader->m_storage->loadPrices(m_reader->d->prList); m_reader->d->bList.clear(); m_reader->signalProgress(-1, -1); } else if (s == "onlinejobs") { m_reader->m_storage->loadOnlineJobs(m_reader->d->onlineJobList); m_reader->d->onlineJobList.clear(); } } return rc; }
bool combineHulls(JOB_SWARM_STANDALONE::JobSwarmContext *jobSwarmContext) { bool combine = false; // each new convex hull is given a unique guid. // A hash map is used to make sure that no hulls are tested twice. CHullVector output; HaU32 count = (HaU32)mChulls.size(); // Early out to save walking all the hulls. Hulls are combined based on // a target number or on a number of generated hulls. bool mergeTargetMet = (HaU32)mChulls.size() <= mMergeNumHulls; if (mergeTargetMet && (mSmallClusterThreshold == 0.0f)) return false; hacd::vector< CombineVolumeJob > jobs; // First, see if there are any pairs of hulls who's combined volume we have not yet calculated. // If there are, then we add them to the jobs list { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); if ( v == NULL ) { CombineVolumeJob job(cr,match,hashIndex); jobs.push_back(job); (*mHasBeenTested)[hashIndex] = 0.0f; // assign it to some value so we don't try to create more than one job for it. } } } } // ok..we have posted all of the jobs, let's let's solve them in parallel for (hacd::HaU32 i=0; i<jobs.size(); i++) { jobs[i].startJob(jobSwarmContext); } // solve all of them in parallel... while ( gCombineCount != 0 ) { jobSwarmContext->processSwarmJobs(); // solve merged hulls in parallel } // once we have the answers, now put the results into the hash table. for (hacd::HaU32 i=0; i<jobs.size(); i++) { CombineVolumeJob &job = jobs[i]; (*mHasBeenTested)[job.mHashIndex] = job.mCombinedVolume; } HaF32 bestVolume = 1e9; CHull *mergeA = NULL; CHull *mergeB = NULL; // now find the two hulls which merged produce the smallest combined volume. { for (HaU32 i=0; i<count; i++) { CHull *cr = mChulls[i]; for (HaU32 j=i+1; j<count; j++) { CHull *match = mChulls[j]; HaU32 hashIndex; if ( match->mGuid < cr->mGuid ) { hashIndex = (match->mGuid << 16) | cr->mGuid; } else { hashIndex = (cr->mGuid << 16 ) | match->mGuid; } HaF32 *v = mHasBeenTested->find(hashIndex); HACD_ASSERT(v); if ( v && *v != 0 && *v < bestVolume ) { bestVolume = *v; mergeA = cr; mergeB = match; } } } } // If we found a merge pair, and we are below the merge threshold or we haven't reduced to the target // do the merge. bool thresholdBelow = ((bestVolume / mTotalVolume) * 100.0f) < mSmallClusterThreshold; if ( mergeA && (thresholdBelow || !mergeTargetMet)) { CHull *merge = doMerge(mergeA,mergeB); HaF32 volumeA = mergeA->mVolume; HaF32 volumeB = mergeB->mVolume; if ( merge ) { combine = true; output.push_back(merge); for (CHullVector::iterator j=mChulls.begin(); j!=mChulls.end(); ++j) { CHull *h = (*j); if ( h !=mergeA && h != mergeB ) { output.push_back(h); } } delete mergeA; delete mergeB; // Remove the old volumes and add the new one. mTotalVolume -= (volumeA + volumeB); mTotalVolume += merge->mVolume; } mChulls = output; } return combine; }
// Note that, when parsing file names and arguments, paths are interpreted // to be relative to the applications working directory. void parser::parse_dag (void) { dag_ = boost::shared_ptr <dag> (new dag (scheduler_file_)); std::cout << "parsing " << dag_file_ << std::endl; try { ticpp::Document doc (dag_file_); doc.LoadFile (); // get the top adag element ticpp::Element * adag = doc.FirstChildElement ("adag"); // list nodes ticpp::Iterator <ticpp::Element> job ("job"); // we parse jobs twice. On the first run, we add all nodes. On the // second run, we add all edges (connected nodes are known now). for ( job = job.begin (adag); job != job.end (); job++ ) { node_description nd; std::string s_id = job->GetAttribute ("id"); std::string s_name = job->GetAttribute ("name"); nd.set_attribute ("Executable", s_name); // get args ticpp::Element * args = job->FirstChildElement ("argument"); if ( args ) { // iterate over args, if we have them ticpp::Iterator <ticpp::Node> arg; std::vector <std::string> s_args; for ( arg = arg.begin (args); arg != arg.end (); arg++ ) { if ( arg->Type () == TiXmlNode::ELEMENT ) { ticpp::Element * elem = arg->ToElement (); std::string s_file = elem->GetAttribute ("file"); s_args.push_back (s_file); } else if ( arg->Type () == TiXmlNode::TEXT ) { std::stringstream ss; ss << *arg; std::string tmp = ss.str (); if ( tmp.size () ) { std::vector <std::string> s_tmp = split (tmp); for ( unsigned int j = 0; j < s_tmp.size (); j++ ) { if ( s_tmp [j] == "." ) { s_args.push_back (s_tmp[j]); } else { s_args.push_back (s_tmp[j]); } } } } } nd.set_vector_attribute ("Arguments", s_args); } boost::shared_ptr <node> n = dag_->create_node (nd, s_id); dag_->add_node (s_id, n); } // second run: we have input and output files specified for each jobs. // Find pairs, and add as edges std::vector <std::pair <std::string, std::string> > inputs; std::vector <std::pair <std::string, std::string> > outputs; for ( job = job.begin (adag); job != job.end (); job++ ) { std::string s_id = job->GetAttribute ("id"); std::string s_name = job->GetAttribute ("name"); ticpp::Iterator <ticpp::Element> uses ("uses"); for ( uses = uses.begin (job.Get ()); uses != uses.end (); uses++ ) { std::string s_file = uses->GetAttribute ("file"); std::string s_link = uses->GetAttribute ("link"); std::string s_transfer = uses->GetAttribute ("transfer"); if ( s_link == "input" ) { inputs.push_back (std::pair <std::string, std::string> (s_file, s_id)); } else if ( s_link == "output" ) { outputs.push_back (std::pair <std::string, std::string> (s_file, s_id)); } else { std::cerr << "cannot handle link type " << s_link << std::endl; } } } // iterate over inputs, and find outputs which produce them. inputs not // produced by some outputting node are assumed to be staged in from // a data src (INPUT). Also, data which are produced but not consumed // by another node are to be staged to an data sink (OUTPUT). In both // cases, we simply add edges with empty src/tgt nodes, and leave it to // the scheduler to interprete that correctly. // first, iterate over inputs, and add edges for those inputs which are // produced by another node, and also for those which need to be staged // in. for ( unsigned int i = 0; i < inputs.size (); i++ ) { std::string file = inputs[i].first; std::string i_node = inputs[i].second; std::string o_node = ""; // std::cout << " --- checking inputs: " << file << std::endl; // for each input file, find output node for ( unsigned int j = 0; j < outputs.size (); j++ ) { if ( outputs[j].first == file ) { o_node = outputs[j].second; // stop loop j = inputs.size (); } } if ( o_node == "" ) { // std::cout << " --- needs to be staged to node " << i_node << std::endl; saga::url loc (file); loc.set_scheme ("any"); boost::shared_ptr <edge> e = dag_->create_edge (loc); // std::cout << " e 1 " << file << " : " << "INPUT->" << o_node << std::endl; dag_->add_edge (e, "INPUT", i_node); } else if ( o_node != i_node ) { saga::url loc (file); loc.set_scheme ("any"); boost::shared_ptr <edge> e = dag_->create_edge (loc); // std::cout << " e 3: " << file << " : " << o_node << "->" << i_node << std::endl; dag_->add_edge (e, o_node, i_node); } } // inputs have been iterated above - now iterate over outputs, and look // for remaining ones which do not have a partner. for ( unsigned int k = 0; k < outputs.size (); k++ ) { std::string file = outputs[k].first; std::string i_node = ""; std::string o_node = outputs[k].second; // for each output node, find the input node for ( unsigned int l = 0; l < inputs.size (); l++ ) { if ( inputs[l].first == file ) { i_node = inputs[l].second; // stop loop l = inputs.size (); } } if ( i_node == "" ) { // will stage data out to data sink saga::url loc (file); loc.set_scheme ("any"); boost::shared_ptr <edge> e = dag_->create_edge (loc); // std::cout << " e 1 " << file << " : " << o_node << "-> OUTPUT " << std::endl; dag_->add_edge (e, o_node, "OUTPUT"); } } } catch ( const ticpp::Exception & e ) { std::cout << e.what () << std::endl; } std::cout << "parsing " << dag_file_ << " done" << std::endl; }
MStatus AbcExport::doIt(const MArgList & args) { try { MStatus status; util::clearIsAnimatedCache(); MTime oldCurTime = MAnimControl::currentTime(); MArgParser argData(syntax(), args, &status); if (argData.isFlagSet("help")) { MGlobal::displayInfo(util::getHelpText()); return MS::kSuccess; } bool verbose = argData.isFlagSet("verbose"); // If skipFrame is true, when going through the playback range of the // scene, as much frames are skipped when possible. This could cause // a problem for, time dependent solutions like // particle system / hair simulation bool skipFrame = true; if (argData.isFlagSet("dontSkipUnwrittenFrames")) skipFrame = false; double startEvaluationTime = DBL_MAX; if (argData.isFlagSet("preRollStartFrame")) { double startAt = 0.0; argData.getFlagArgument("preRollStartFrame", 0, startAt); startEvaluationTime = startAt; } unsigned int jobSize = argData.numberOfFlagUses("jobArg"); if (jobSize == 0) return status; // the frame range we will be iterating over for all jobs, // includes frames which are not skipped and the startAt offset std::set<double> allFrameRange; // this will eventually hold only the animated jobs. // its a list because we will be removing jobs from it std::list < AbcWriteJobPtr > jobList; for (unsigned int jobIndex = 0; jobIndex < jobSize; jobIndex++) { JobArgs jobArgs; MArgList jobArgList; argData.getFlagArgumentList("jobArg", jobIndex, jobArgList); MString jobArgsStr = jobArgList.asString(0); MStringArray jobArgsArray; { // parse the job arguments // e.g. -perFrameCallbackMel "print \"something\"" will be splitted to // [0] -perFrameCallbackMel // [1] print "something" enum State { kArgument, // parsing an argument (not quoted) kDoubleQuotedString, // parsing a double quoted string kSingleQuotedString, // parsing a single quoted string }; State state = kArgument; MString stringBuffer; for (unsigned int charIdx = 0; charIdx < jobArgsStr.numChars(); charIdx++) { MString ch = jobArgsStr.substringW(charIdx, charIdx); switch (state) { case kArgument: if (ch == " ") { // space terminates the current argument if (stringBuffer.length() > 0) { jobArgsArray.append(stringBuffer); stringBuffer.clear(); } // goto another argument state = kArgument; } else if (ch == "\"") { if (stringBuffer.length() > 0) { // double quote is part of the argument stringBuffer += ch; } else { // goto double quoted string state = kDoubleQuotedString; } } else if (ch == "'") { if (stringBuffer.length() > 0) { // single quote is part of the argument stringBuffer += ch; } else { // goto single quoted string state = kSingleQuotedString; } } else { stringBuffer += ch; } break; case kDoubleQuotedString: // double quote terminates the current string if (ch == "\"") { jobArgsArray.append(stringBuffer); stringBuffer.clear(); state = kArgument; } else if (ch == "\\") { // escaped character MString nextCh = (++charIdx < jobArgsStr.numChars()) ? jobArgsStr.substringW(charIdx, charIdx) : "\\"; if (nextCh == "n") stringBuffer += "\n"; else if (nextCh == "t") stringBuffer += "\t"; else if (nextCh == "r") stringBuffer += "\r"; else if (nextCh == "\\") stringBuffer += "\\"; else if (nextCh == "'") stringBuffer += "'"; else if (nextCh == "\"") stringBuffer += "\""; else stringBuffer += nextCh; } else { stringBuffer += ch; } break; case kSingleQuotedString: // single quote terminates the current string if (ch == "'") { jobArgsArray.append(stringBuffer); stringBuffer.clear(); state = kArgument; } else if (ch == "\\") { // escaped character MString nextCh = (++charIdx < jobArgsStr.numChars()) ? jobArgsStr.substringW(charIdx, charIdx) : "\\"; if (nextCh == "n") stringBuffer += "\n"; else if (nextCh == "t") stringBuffer += "\t"; else if (nextCh == "r") stringBuffer += "\r"; else if (nextCh == "\\") stringBuffer += "\\"; else if (nextCh == "'") stringBuffer += "'"; else if (nextCh == "\"") stringBuffer += "\""; else stringBuffer += nextCh; } else { stringBuffer += ch; } break; } } // the rest of the argument if (stringBuffer.length() > 0) { jobArgsArray.append(stringBuffer); } } // the frame range within this job std::vector< FrameRangeArgs > frameRanges(1); frameRanges.back().startTime = oldCurTime.value(); frameRanges.back().endTime = oldCurTime.value(); frameRanges.back().strideTime = 1.0; bool hasRange = false; bool hasRoot = false; bool sampleGeo = true; // whether or not to subsample geometry std::string fileName; bool asOgawa = true; unsigned int numJobArgs = jobArgsArray.length(); for (unsigned int i = 0; i < numJobArgs; ++i) { MString arg = jobArgsArray[i]; arg.toLowerCase(); if (arg == "-f" || arg == "-file") { if (i+1 >= numJobArgs) { MGlobal::displayError("File incorrectly specified."); return MS::kFailure; } fileName = jobArgsArray[++i].asChar(); } else if (arg == "-fr" || arg == "-framerange") { if (i+2 >= numJobArgs || !jobArgsArray[i+1].isDouble() || !jobArgsArray[i+2].isDouble()) { MGlobal::displayError("Frame Range incorrectly specified."); return MS::kFailure; } // this is not the first -frameRange argument, we are going // to add one more frame range to the frame range array. if (hasRange) { frameRanges.push_back(FrameRangeArgs()); } hasRange = true; frameRanges.back().startTime = jobArgsArray[++i].asDouble(); frameRanges.back().endTime = jobArgsArray[++i].asDouble(); // make sure start frame is smaller or equal to endTime if (frameRanges.back().startTime > frameRanges.back().endTime) { std::swap(frameRanges.back().startTime, frameRanges.back().endTime); } } else if (arg == "-frs" || arg == "-framerelativesample") { if (i+1 >= numJobArgs || !jobArgsArray[i+1].isDouble()) { MGlobal::displayError( "Frame Relative Sample incorrectly specified."); return MS::kFailure; } frameRanges.back().shutterSamples.insert( jobArgsArray[++i].asDouble()); } else if (arg == "-nn" || arg == "-nonormals") { jobArgs.noNormals = true; } else if (arg == "-pr" || arg == "-preroll") { frameRanges.back().preRoll = true; } else if (arg == "-ro" || arg == "-renderableonly") { jobArgs.excludeInvisible = true; } else if (arg == "-s" || arg == "-step") { if (i+1 >= numJobArgs || !jobArgsArray[i+1].isDouble()) { MGlobal::displayError("Step incorrectly specified."); return MS::kFailure; } frameRanges.back().strideTime = jobArgsArray[++i].asDouble(); } else if (arg == "-sl" || arg == "-selection") { jobArgs.useSelectionList = true; } else if (arg == "-sn" || arg == "-stripnamespaces") { if (i+1 >= numJobArgs || !jobArgsArray[i+1].isUnsigned()) { // the strip all namespaces case // so we pick a very LARGE number jobArgs.stripNamespace = 0xffffffff; } else { jobArgs.stripNamespace = jobArgsArray[++i].asUnsigned(); } } else if (arg == "-uv" || arg == "-uvwrite") { jobArgs.writeUVs = true; } else if (arg == "-wcs" || arg == "-writecolorsets") { jobArgs.writeColorSets = true; } else if (arg == "-wfs" || arg == "-writefacesets") { jobArgs.writeFaceSets = true; } else if (arg == "-wfg" || arg == "-wholeframegeo") { sampleGeo = false; } else if (arg == "-ws" || arg == "-worldspace") { jobArgs.worldSpace = true; } else if (arg == "-wv" || arg == "-writevisibility") { jobArgs.writeVisibility = true; } else if (arg == "-wc" || arg == "-writecreases") { jobArgs.writeCreases = true; } else if (arg == "-mfc" || arg == "-melperframecallback") { if (i+1 >= numJobArgs) { MGlobal::displayError( "melPerFrameCallback incorrectly specified."); return MS::kFailure; } jobArgs.melPerFrameCallback = jobArgsArray[++i].asChar(); } else if (arg == "-pfc" || arg == "-pythonperframecallback") { if (i+1 >= numJobArgs) { MGlobal::displayError( "pythonPerFrameCallback incorrectly specified."); return MS::kFailure; } jobArgs.pythonPerFrameCallback = jobArgsArray[++i].asChar(); } else if (arg == "-mpc" || arg == "-melpostjobcallback") { if (i+1 >= numJobArgs) { MGlobal::displayError( "melPostJobCallback incorrectly specified."); return MS::kFailure; } jobArgs.melPostCallback = jobArgsArray[++i].asChar(); } else if (arg == "-ppc" || arg == "-pythonpostjobcallback") { if (i+1 >= numJobArgs) { MGlobal::displayError( "pythonPostJobCallback incorrectly specified."); return MS::kFailure; } jobArgs.pythonPostCallback = jobArgsArray[++i].asChar(); } // geomArbParams - attribute filtering stuff else if (arg == "-atp" || arg == "-attrprefix") { if (i+1 >= numJobArgs) { MGlobal::displayError( "attrPrefix incorrectly specified."); return MS::kFailure; } jobArgs.prefixFilters.push_back(jobArgsArray[++i].asChar()); } else if (arg == "-a" || arg == "-attr") { if (i+1 >= numJobArgs) { MGlobal::displayError( "attr incorrectly specified."); return MS::kFailure; } jobArgs.attribs.insert(jobArgsArray[++i].asChar()); } // userProperties - attribute filtering stuff else if (arg == "-uatp" || arg == "-userattrprefix") { if (i+1 >= numJobArgs) { MGlobal::displayError( "userAttrPrefix incorrectly specified."); return MS::kFailure; } jobArgs.userPrefixFilters.push_back(jobArgsArray[++i].asChar()); } else if (arg == "-u" || arg == "-userattr") { if (i+1 >= numJobArgs) { MGlobal::displayError( "userAttr incorrectly specified."); return MS::kFailure; } jobArgs.userAttribs.insert(jobArgsArray[++i].asChar()); } else if (arg == "-rt" || arg == "-root") { if (i+1 >= numJobArgs) { MGlobal::displayError( "root incorrectly specified."); return MS::kFailure; } hasRoot = true; MString root = jobArgsArray[++i]; MSelectionList sel; if (sel.add(root) != MS::kSuccess) { MString warn = root; warn += " could not be select, skipping."; MGlobal::displayWarning(warn); continue; } unsigned int numRoots = sel.length(); for (unsigned int j = 0; j < numRoots; ++j) { MDagPath path; if (sel.getDagPath(j, path) != MS::kSuccess) { MString warn = path.fullPathName(); warn += " (part of "; warn += root; warn += " ) not a DAG Node, skipping."; MGlobal::displayWarning(warn); continue; } jobArgs.dagPaths.insert(path); } } else if (arg == "-ef" || arg == "-eulerfilter") { jobArgs.filterEulerRotations = true; } else if (arg == "-df" || arg == "-dataformat") { if (i+1 >= numJobArgs) { MGlobal::displayError( "dataFormat incorrectly specified."); return MS::kFailure; } MString dataFormat = jobArgsArray[++i]; dataFormat.toLowerCase(); if (dataFormat == "hdf") { asOgawa = false; } else if (dataFormat == "ogawa") { asOgawa = true; } } else { MString warn = "Ignoring unsupported flag: "; warn += jobArgsArray[i]; MGlobal::displayWarning(warn); } } // for i if (fileName == "") { MString error = "-file not specified."; MGlobal::displayError(error); return MS::kFailure; } { MString fileRule, expandName; MString alembicFileRule = "alembicCache"; MString alembicFilePath = "cache/alembic"; MString queryFileRuleCmd; queryFileRuleCmd.format("workspace -q -fre \"^1s\"", alembicFileRule); MString queryFolderCmd; queryFolderCmd.format("workspace -en `workspace -q -fre \"^1s\"`", alembicFileRule); // query the file rule for alembic cache MGlobal::executeCommand(queryFileRuleCmd, fileRule); if (fileRule.length() > 0) { // we have alembic file rule, query the folder MGlobal::executeCommand(queryFolderCmd, expandName); } else { // alembic file rule does not exist, create it MString addFileRuleCmd; addFileRuleCmd.format("workspace -fr \"^1s\" \"^2s\"", alembicFileRule, alembicFilePath); MGlobal::executeCommand(addFileRuleCmd); // save the workspace. maya may discard file rules on exit MGlobal::executeCommand("workspace -s"); // query the folder MGlobal::executeCommand(queryFolderCmd, expandName); } // resolve the expanded file rule if (expandName.length() == 0) { expandName = alembicFilePath; } // get the path to the alembic file rule MFileObject directory; directory.setRawFullName(expandName); MString directoryName = directory.resolvedFullName(); // make sure the cache folder exists if (!directory.exists()) { // create the cache folder MString createFolderCmd; createFolderCmd.format("sysFile -md \"^1s\"", directoryName); MGlobal::executeCommand(createFolderCmd); } // resolve the relative path MFileObject absoluteFile; absoluteFile.setRawFullName(fileName.c_str()); #if MAYA_API_VERSION < 201300 if (absoluteFile.resolvedFullName() != absoluteFile.expandedFullName()) { #else if (!MFileObject::isAbsolutePath(fileName.c_str())) { #endif // this is a relative path MString absoluteFileName = directoryName + "/" + fileName.c_str(); absoluteFile.setRawFullName(absoluteFileName); fileName = absoluteFile.resolvedFullName().asChar(); } else { fileName = absoluteFile.resolvedFullName().asChar(); } // check the path must exist before writing MFileObject absoluteFilePath; absoluteFilePath.setRawFullName(absoluteFile.path()); if (!absoluteFilePath.exists()) { MString error; error.format("Path ^1s does not exist!", absoluteFilePath.resolvedFullName()); MGlobal::displayError(error); return MS::kFailure; } // check the file is used by any AlembicNode in the scene MItDependencyNodes dgIter(MFn::kPluginDependNode); for (; !dgIter.isDone(); dgIter.next()) { MFnDependencyNode alembicNode(dgIter.thisNode()); if (alembicNode.typeName() != "AlembicNode") { continue; } MPlug abcFilePlug = alembicNode.findPlug("abc_File"); if (abcFilePlug.isNull()) { continue; } MFileObject alembicFile; alembicFile.setRawFullName(abcFilePlug.asString()); if (!alembicFile.exists()) { continue; } if (alembicFile.resolvedFullName() == absoluteFile.resolvedFullName()) { MString error = "Can't export to an Alembic file which is in use."; MGlobal::displayError(error); return MS::kFailure; } } std::ofstream ofs(fileName.c_str()); if (!ofs.is_open()) { MString error = MString("Can't write to file: ") + fileName.c_str(); MGlobal::displayError(error); return MS::kFailure; } ofs.close(); } // if -frameRelativeSample argument is not specified for a frame range, // we are assuming a -frameRelativeSample 0.0 for (std::vector<FrameRangeArgs>::iterator range = frameRanges.begin(); range != frameRanges.end(); ++range) { if (range->shutterSamples.empty()) range->shutterSamples.insert(0.0); } if (jobArgs.prefixFilters.empty()) { jobArgs.prefixFilters.push_back("ABC_"); } // the list of frame ranges for sampling std::vector<FrameRangeArgs> sampleRanges; std::vector<FrameRangeArgs> preRollRanges; for (std::vector<FrameRangeArgs>::const_iterator range = frameRanges.begin(); range != frameRanges.end(); ++range) { if (range->preRoll) preRollRanges.push_back(*range); else sampleRanges.push_back(*range); } // the list of frames written into the abc file std::set<double> geoSamples; std::set<double> transSamples; for (std::vector<FrameRangeArgs>::const_iterator range = sampleRanges.begin(); range != sampleRanges.end(); ++range) { for (double frame = range->startTime; frame <= range->endTime; frame += range->strideTime) { for (std::set<double>::const_iterator shutter = range->shutterSamples.begin(); shutter != range->shutterSamples.end(); ++shutter) { double curFrame = *shutter + frame; if (!sampleGeo) { double intFrame = (double)(int)( curFrame >= 0 ? curFrame + .5 : curFrame - .5); // only insert samples that are close to being an integer if (fabs(curFrame - intFrame) < 1e-4) { geoSamples.insert(curFrame); } } else { geoSamples.insert(curFrame); } transSamples.insert(curFrame); } } if (geoSamples.empty()) { geoSamples.insert(range->startTime); } if (transSamples.empty()) { transSamples.insert(range->startTime); } } bool isAcyclic = false; if (sampleRanges.empty()) { // no frame ranges or all frame ranges are pre-roll ranges hasRange = false; geoSamples.insert(frameRanges.back().startTime); transSamples.insert(frameRanges.back().startTime); } else { // check if the time range is even (cyclic) // otherwise, we will use acyclic // sub frames pattern std::vector<double> pattern( sampleRanges.begin()->shutterSamples.begin(), sampleRanges.begin()->shutterSamples.end()); std::transform(pattern.begin(), pattern.end(), pattern.begin(), std::bind2nd(std::plus<double>(), sampleRanges.begin()->startTime)); // check the frames against the pattern std::vector<double> timeSamples( transSamples.begin(), transSamples.end()); for (size_t i = 0; i < timeSamples.size(); i++) { // next pattern if (i % pattern.size() == 0 && i / pattern.size() > 0) { std::transform(pattern.begin(), pattern.end(), pattern.begin(), std::bind2nd(std::plus<double>(), sampleRanges.begin()->strideTime)); } // pattern mismatch, we use acyclic time sampling type if (timeSamples[i] != pattern[i % pattern.size()]) { isAcyclic = true; break; } } } // the list of frames to pre-roll std::set<double> preRollSamples; for (std::vector<FrameRangeArgs>::const_iterator range = preRollRanges.begin(); range != preRollRanges.end(); ++range) { for (double frame = range->startTime; frame <= range->endTime; frame += range->strideTime) { for (std::set<double>::const_iterator shutter = range->shutterSamples.begin(); shutter != range->shutterSamples.end(); ++shutter) { double curFrame = *shutter + frame; preRollSamples.insert(curFrame); } } if (preRollSamples.empty()) { preRollSamples.insert(range->startTime); } } if (jobArgs.dagPaths.size() > 1) { // check for validity of the DagPath relationships complexity : n^2 util::ShapeSet::const_iterator m, n; util::ShapeSet::const_iterator end = jobArgs.dagPaths.end(); for (m = jobArgs.dagPaths.begin(); m != end; ) { MDagPath path1 = *m; m++; for (n = m; n != end; n++) { MDagPath path2 = *n; if (util::isAncestorDescendentRelationship(path1,path2)) { MString errorMsg = path1.fullPathName(); errorMsg += " and "; errorMsg += path2.fullPathName(); errorMsg += " have an ancestor relationship."; MGlobal::displayError(errorMsg); return MS::kFailure; } } // for n } // for m } // no root is specified, and we aren't using a selection // so we'll try to translate the whole Maya scene by using all // children of the world as roots. else if (!hasRoot && !jobArgs.useSelectionList) { MSelectionList sel; #if MAYA_API_VERSION >= 201100 sel.add("|*", true); #else // older versions of Maya will not be able to find top level nodes // within namespaces sel.add("|*"); #endif unsigned int numRoots = sel.length(); for (unsigned int i = 0; i < numRoots; ++i) { MDagPath path; sel.getDagPath(i, path); jobArgs.dagPaths.insert(path); } } else if (hasRoot && jobArgs.dagPaths.empty()) { MString errorMsg = "No valid root nodes were specified."; MGlobal::displayError(errorMsg); return MS::kFailure; } else if (jobArgs.useSelectionList) { MSelectionList activeList; MGlobal::getActiveSelectionList(activeList); if (activeList.length() == 0) { MString errorMsg = "-selection specified but nothing is actively selected."; MGlobal::displayError(errorMsg); return MS::kFailure; } } AbcA::TimeSamplingPtr transTime, geoTime; if (hasRange) { if (isAcyclic) { // acyclic, uneven time sampling // e.g. [0.8, 1, 1.2], [2.8, 3, 3.2], .. not continuous // [0.8, 1, 1.2], [1.7, 2, 2.3], .. shutter different std::vector<double> samples( transSamples.begin(), transSamples.end()); std::transform(samples.begin(), samples.end(), samples.begin(), std::bind2nd(std::multiplies<double>(), util::spf())); transTime.reset(new AbcA::TimeSampling(AbcA::TimeSamplingType( AbcA::TimeSamplingType::kAcyclic), samples)); } else { // cyclic, even time sampling between time periods // e.g. [0.8, 1, 1.2], [1.8, 2, 2.2], ... std::vector<double> samples; double startTime = sampleRanges[0].startTime; double strideTime = sampleRanges[0].strideTime; for (std::set<double>::const_iterator shutter = sampleRanges[0].shutterSamples.begin(); shutter != sampleRanges[0].shutterSamples.end(); ++shutter) { samples.push_back((startTime + *shutter) * util::spf()); } if (samples.size() > 1) { Alembic::Util::uint32_t numSamples = static_cast<Alembic::Util::uint32_t>(samples.size()); transTime.reset( new AbcA::TimeSampling(AbcA::TimeSamplingType( numSamples, strideTime * util::spf()), samples)); } // uniform sampling else { transTime.reset(new AbcA::TimeSampling( strideTime * util::spf(), samples[0])); } } } else { // time ranges are not specified transTime.reset(new AbcA::TimeSampling()); } if (sampleGeo || !hasRange) { geoTime = transTime; } else { // sampling geo on whole frames if (isAcyclic) { // acyclic, uneven time sampling std::vector<double> samples( geoSamples.begin(), geoSamples.end()); // one more sample for setup() if (*transSamples.begin() != *geoSamples.begin()) samples.insert(samples.begin(), *transSamples.begin()); std::transform(samples.begin(), samples.end(), samples.begin(), std::bind2nd(std::multiplies<double>(), util::spf())); geoTime.reset(new AbcA::TimeSampling(AbcA::TimeSamplingType( AbcA::TimeSamplingType::kAcyclic), samples)); } else { double geoStride = sampleRanges[0].strideTime; if (geoStride < 1.0) geoStride = 1.0; double geoStart = *geoSamples.begin() * util::spf(); geoTime.reset(new AbcA::TimeSampling( geoStride * util::spf(), geoStart)); } } AbcWriteJobPtr job(new AbcWriteJob(fileName.c_str(), asOgawa, transSamples, transTime, geoSamples, geoTime, jobArgs)); jobList.push_front(job); // make sure we add additional whole frames, if we arent skipping // the inbetween ones if (!skipFrame && !allFrameRange.empty()) { double localMin = *(transSamples.begin()); std::set<double>::iterator last = transSamples.end(); last--; double localMax = *last; double globalMin = *(allFrameRange.begin()); last = allFrameRange.end(); last--; double globalMax = *last; // if the min of our current frame range is beyond // what we know about, pad a few more frames if (localMin > globalMax) { for (double f = globalMax; f < localMin; f++) { allFrameRange.insert(f); } } // if the max of our current frame range is beyond // what we know about, pad a few more frames if (localMax < globalMin) { for (double f = localMax; f < globalMin; f++) { allFrameRange.insert(f); } } } // right now we just copy over the translation samples since // they are guaranteed to contain all the geometry samples allFrameRange.insert(transSamples.begin(), transSamples.end()); // copy over the pre-roll samples allFrameRange.insert(preRollSamples.begin(), preRollSamples.end()); } // add extra evaluation run up, if necessary if (startEvaluationTime != DBL_MAX && !allFrameRange.empty()) { double firstFrame = *allFrameRange.begin(); for (double f = startEvaluationTime; f < firstFrame; ++f) { allFrameRange.insert(f); } } std::set<double>::iterator it = allFrameRange.begin(); std::set<double>::iterator itEnd = allFrameRange.end(); MComputation computation; computation.beginComputation(); // loop through every frame in the list, if a job has that frame in it's // list of transform or shape frames, then it will write out data and // call the perFrameCallback, if that frame is also the last one it has // to work on then it will also call the postCallback. // If it doesn't have this frame, then it does nothing for (; it != itEnd; it++) { if (verbose) { double frame = *it; MString info; info = frame; MGlobal::displayInfo(info); } MGlobal::viewFrame(*it); std::list< AbcWriteJobPtr >::iterator j = jobList.begin(); std::list< AbcWriteJobPtr >::iterator jend = jobList.end(); while (j != jend) { if (computation.isInterruptRequested()) return MS::kFailure; bool lastFrame = (*j)->eval(*it); if (lastFrame) { j = jobList.erase(j); } else j++; } } computation.endComputation(); // set the time back MGlobal::viewFrame(oldCurTime); return MS::kSuccess; } catch (Alembic::Util::Exception & e) { MString theError("Alembic Exception encountered: "); theError += e.what(); MGlobal::displayError(theError); return MS::kFailure; } catch (std::exception & e) { MString theError("std::exception encountered: "); theError += e.what(); MGlobal::displayError(theError); return MS::kFailure; } }
config cave_map_generator::create_scenario(boost::optional<boost::uint32_t> randomseed) { cave_map_generator_job job(*this, randomseed); return job.res_; }
int main(int argc, char **argv) { std::cout << "MapReduce Wordcount Application"; if (argc < 2) { std::cerr << "Usage: wordcount directory [num_map_tasks]\n"; return 1; } mapreduce::specification spec; spec.input_directory = argv[1]; std::cout << "\n" << std::max(1,(int)boost::thread::hardware_concurrency()) << " CPU cores"; std::cout << "\n" << typeid(wordcount::job).name() << "\n"; mapreduce::results result; wordcount::job::datasource_type datasource(spec); try { if (argc > 2) spec.map_tasks = atoi(argv[2]); if (argc > 3) spec.reduce_tasks = atoi(argv[3]); else spec.reduce_tasks = std::max(1U,boost::thread::hardware_concurrency()); std::cout << "\nRunning Parallel WordCount MapReduce..."; wordcount::job job(datasource, spec); job.run<mapreduce::schedule_policy::cpu_parallel<wordcount::job> >(result); std::cout << "\nMapReduce Finished."; std::cout << std::endl << "\nMapReduce statistics:"; std::cout << "\n MapReduce job runtime : " << result.job_runtime << " seconds, of which..."; std::cout << "\n Map phase runtime : " << result.map_runtime << " seconds"; std::cout << "\n Reduce phase runtime : " << result.reduce_runtime << " seconds"; std::cout << "\n\n Map:"; std::cout << "\n Total Map keys : " << result.counters.map_keys_executed; std::cout << "\n Map keys processed : " << result.counters.map_keys_completed; std::cout << "\n Map key processing errors : " << result.counters.map_key_errors; std::cout << "\n Number of Map Tasks run (in parallel) : " << result.counters.actual_map_tasks; std::cout << "\n Fastest Map key processed in : " << *std::min_element(result.map_times.begin(), result.map_times.end()) << " seconds"; std::cout << "\n Slowest Map key processed in : " << *std::max_element(result.map_times.begin(), result.map_times.end()) << " seconds"; std::cout << "\n Average time to process Map keys : " << std::accumulate(result.map_times.begin(), result.map_times.end(), boost::posix_time::time_duration()) / result.map_times.size() << " seconds"; std::cout << "\n\n Reduce:"; std::cout << "\n Total Reduce keys : " << result.counters.reduce_keys_executed; std::cout << "\n Reduce keys processed : " << result.counters.reduce_keys_completed; std::cout << "\n Reduce key processing errors : " << result.counters.reduce_key_errors; std::cout << "\n Number of Reduce Tasks run (in parallel): " << result.counters.actual_reduce_tasks; std::cout << "\n Number of Result Files : " << result.counters.num_result_files; if (result.reduce_times.size() > 0) { std::cout << "\n Fastest Reduce key processed in : " << *std::min_element(result.reduce_times.begin(), result.reduce_times.end()) << " seconds"; std::cout << "\n Slowest Reduce key processed in : " << *std::max_element(result.reduce_times.begin(), result.reduce_times.end()) << " seconds"; std::cout << "\n Average time to process Reduce keys : " << std::accumulate(result.reduce_times.begin(), result.reduce_times.end(), boost::posix_time::time_duration()) / result.map_times.size() << " seconds"; } wordcount::job::const_result_iterator it = job.begin_results(); wordcount::job::const_result_iterator ite = job.end_results(); if (it != ite) { typedef std::list<wordcount::job::keyvalue_t> frequencies_t; frequencies_t frequencies; frequencies.push_back(*it); frequencies_t::reverse_iterator it_smallest = frequencies.rbegin(); for (++it; it!=ite; ++it) { if (frequencies.size() < 10) // show top 10 { frequencies.push_back(*it); if (it->second < it_smallest->second) it_smallest = frequencies.rbegin(); } else if (it->second > it_smallest->second) { *it_smallest = *it; it_smallest = std::min_element(frequencies.rbegin(), frequencies.rend(), mapreduce::detail::less_2nd<wordcount::job::keyvalue_t>); } } frequencies.sort(mapreduce::detail::greater_2nd<wordcount::job::keyvalue_t>); std::cout << "\n\nMapReduce results:"; for (frequencies_t::const_iterator freq=frequencies.begin(); freq!=frequencies.end(); ++freq) printf("\n%.*s\t%d", freq->first.second, freq->first.first, freq->second); } } catch (std::exception &e) { std::cout << std::endl << "Error: " << e.what(); } return 0; }
void RunThread(const Body& body, const Arg& arg) { NativeParallelForTask<Arg,Body> job(arg, body); job.start(); job.wait_to_finish(); }
Arc::MCC_Status ARexService::MigrateActivity(ARexGMConfig& config,Arc::XMLNode in,Arc::XMLNode out,const std::string& clientid) { /* MigrateActivity ActivityIdentifier (wsa:EndpointReferenceType) ActivityDocument jsdl:JobDefinition ForceMigration MigrateActivityResponse ActivityIdentifier (wsa:EndpointReferenceType) ActivityDocument jsdl:JobDefinition NotAuthorizedFault NotAcceptingNewActivitiesFault UnsupportedFeatureFault InvalidRequestMessageFault */ { std::string s; in.GetXML(s); logger_.msg(Arc::VERBOSE, "MigrateActivity: request = \n%s", s); }; Arc::WSAEndpointReference id(in["ActivityIdentifier"]); if(!(Arc::XMLNode)id) { // Wrong request logger_.msg(Arc::ERROR, "MigrateActivitys: no ActivityIdentifier found"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find ActivityIdentifier element in request"); InvalidRequestMessageFault(fault,"jsdl:ActivityIdentifier","Element is missing"); out.Destroy(); return Arc::MCC_Status(); }; std::string migrateid = Arc::WSAEndpointReference(id).Address() + "/" + (std::string)Arc::WSAEndpointReference(id).ReferenceParameters()["a-rex:JobID"]; if(migrateid.empty()) { // EPR is wrongly formated or not an A-REX EPR logger_.msg(Arc::ERROR, "MigrateActivity: EPR contains no JobID"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobID element in ActivityIdentifier"); InvalidRequestMessageFault(fault,"a-rex:JobID","Element is missing"); out.Destroy(); return Arc::MCC_Status(); }; // HPC Basic Profile 1.0 comply (these fault handlings are defined in the KnowARC standards // conformance roadmap 2nd release) // End of the HPC BP 1.0 fault handling part std::string delegation; Arc::XMLNode delegated_token = in["arcdeleg:DelegatedToken"]; if(delegated_token) { // Client wants to delegate credentials if(!delegation_stores_.DelegatedToken(config.GmConfig().DelegationDir(),delegated_token,config.GridName(),delegation)) { // Failed to accept delegation (report as bad request) logger_.msg(Arc::ERROR, "MigrateActivity: Failed to accept delegation"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Failed to accept delegation"); InvalidRequestMessageFault(fault,"arcdeleg:DelegatedToken","This token does not exist"); out.Destroy(); return Arc::MCC_Status(); }; }; if( !(in["ActivityDocument"]["JobDefinition"])) { /* // First try to get job desc from old cluster logger_.msg(Arc::VERBOSE, "MigrateActivity: no job description found try to get it from old cluster"); Arc::MCCConfig cfg; // TODO: //if (!proxyPath.empty()) cfg.AddProxy(delegation); //if (!certificatePath.empty()) //cfg.AddCertificate(certificatePath); //if (!keyPath.empty()) //cfg.AddPrivateKey(keyPath); //if (!caCertificatesDir.empty()) //cfg.AddCADir(caCertificatesDir); Arc::URL url(migrateid); Arc::PathIterator pi(url.Path(), true); url.ChangePath(*pi); Arc::AREXClient ac(url, cfg); Arc::NS ns; ns["a-rex"] = "http://www.nordugrid.org/schemas/a-rex"; ns["bes-factory"] = "http://schemas.ggf.org/bes/2006/08/bes-factory"; ns["wsa"] = "http://www.w3.org/2005/08/addressing"; ns["jsdl"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl"; ns["jsdl-posix"] = "http://schemas.ggf.org/jsdl/2005/11/jsdl-posix"; ns["jsdl-arc"] = "http://www.nordugrid.org/ws/schemas/jsdl-arc"; ns["jsdl-hpcpa"] = "http://schemas.ggf.org/jsdl/2006/07/jsdl-hpcpa"; Arc::XMLNode id(ns, "ActivityIdentifier"); id.NewChild("wsa:Address") = url.str(); id.NewChild("wsa:ReferenceParameters").NewChild("a-rex:JobID") = pi.Rest(); std::string idstr; id.GetXML(idstr); std::string desc_str; if (ac.getdesc(idstr,desc_str)){ Arc::JobDescription desc; desc.setSource(desc_str); if (desc.isValid()) { logger_.msg(Arc::INFO,"Valid job description obtained"); if ( !( in["ActivityDocument"] ) ) in.NewChild("bes-factory:ActivityDocument"); Arc::XMLNode XMLdesc; desc.getXML(XMLdesc); in["ActivityDocument"].NewChild(XMLdesc); } else { // Wrongly formatted job description logger_.msg(Arc::ERROR, "MigrateActivity: job description could not be fetch from old cluster"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobDefinition element in request"); InvalidRequestMessageFault(fault,"jsdl:JobDefinition","Element is missing"); out.Destroy(); return Arc::MCC_Status(); } } */ //else { // Not able to get job description logger_.msg(Arc::ERROR, "MigrateActivity: no job description found"); //logger_.msg(Arc::ERROR, "MigrateActivity: job description could not be fetch from old cluster"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Can't find JobDefinition element in request"); InvalidRequestMessageFault(fault,"jsdl:JobDefinition","Element is missing"); out.Destroy(); return Arc::MCC_Status(); //} }; Arc::XMLNode jsdl = in["ActivityDocument"]["JobDefinition"]; Arc::NS ns; // Creating migration XMLNode Arc::XMLNode migration(ns, "Migration"); migration.NewChild("ActivityIdentifier") = migrateid; if( (bool)in["ForceMigration"]){ migration.NewChild("ForceMigration") = (std::string)in["ForceMigration"]; } else { migration.NewChild("ForceMigration") = "true"; } std::string migrationStr; migration.GetDoc(migrationStr, true); logger_.msg(Arc::INFO, "Migration XML sent to AREXJob: %s", migrationStr); JobIDGeneratorARC idgenerator(config.Endpoint()); ARexJob job(jsdl,config,delegation,clientid,logger_,idgenerator,migration); if(!job) { ARexJobFailure failure_type = job; std::string failure = job.Failure(); switch(failure_type) { case ARexJobDescriptionUnsupportedError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Unsupported feature in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionMissingError: { Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Missing needed element in job description"); UnsupportedFeatureFault(fault,failure); }; break; case ARexJobDescriptionLogicalError: { std::string element; std::string::size_type pos = failure.find(' '); if(pos != std::string::npos) { element=failure.substr(0,pos); failure=failure.substr(pos+1); }; Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,"Logical error in job description"); InvalidRequestMessageFault(fault,element,failure); }; break; default: { logger_.msg(Arc::ERROR, "MigrateActivity: Failed to migrate new job: %s",failure); // Failed to migrate new job (no corresponding BES fault defined - using generic SOAP error) logger_.msg(Arc::ERROR, "MigrateActivity: Failed to migrate new job"); Arc::SOAPFault fault(out.Parent(),Arc::SOAPFault::Sender,("Failed to migrate new activity: "+failure).c_str()); GenericFault(fault); }; break; }; out.Destroy(); return Arc::MCC_Status(); }; // Make SOAP response Arc::WSAEndpointReference identifier(out.NewChild("bes-factory:ActivityIdentifier")); // Make job's ID identifier.Address(config.Endpoint()); // address of service identifier.ReferenceParameters().NewChild("a-rex:JobID")=job.ID(); identifier.ReferenceParameters().NewChild("a-rex:JobSessionDir")=config.Endpoint()+"/"+job.ID(); out.NewChild(in["ActivityDocument"]); logger_.msg(Arc::VERBOSE, "MigrateActivity finished successfully"); { std::string s; out.GetXML(s); logger_.msg(Arc::VERBOSE, "MigrateActivity: response = \n%s", s); }; /* Needs to kill old job */ return Arc::MCC_Status(Arc::STATUS_OK); }
bool NOAADownloader::DownloadMETAR(const char *code, METAR &metar, JobRunner &runner) { #ifndef NDEBUG assert(strlen(code) == 4); for (unsigned i = 0; i < 4; i++) assert(code[i] >= 'A' && code[i] <= 'Z'); #endif // Build file url char url[256] = "ftp://tgftp.nws.noaa.gov/data/observations/metar/stations/"; strcat(url, code); strcat(url, ".TXT"); PathName path(url); // Open download session Net::Session session; if (session.Error()) return false; // Request the file char buffer[4096]; Net::DownloadToBufferJob job(session, path, buffer, sizeof(buffer) - 1); if (!runner.Run(job) || job.GetLength() < 0) return false; buffer[job.GetLength()] = 0; /* * Example: * * 2011/07/01 10:20 * EDDL 011020Z 31004KT 270V340 9999 SCT032TCU SCT050 17/09 Q1022 TEMPO SHRA */ // Parse date and time of last update const char *p = ParseDateTime(buffer, metar.last_update); if (p == buffer) return false; // Skip characters until line feed or string end while (*p != '\n' && *p != 0) p++; if (*p == 0) return false; // p is now at the first character after the line feed p++; if (*p == 0) return false; // Read rest of the response into the content string metar.content.clear(); AppendToContentString(p, metar.content); // Trim the content string TrimRight(metar.content.buffer()); return true; }
ActionReply KeyboardAuthHelper::save( const QVariantMap& args ) { QString model = args["model"].toString(); QString layout = args["layout"].toString(); QString variant = args["variant"].toString(); bool error = false; bool isKeyboardctlInstalled = QFile::exists( "/usr/bin/keyboardctl" ); if ( isKeyboardctlInstalled ) { QProcess* keyboardctl = new QProcess( this ); keyboardctl->start( "/usr/bin/keyboardctl", QStringList() << "--set-layout" << model << layout << variant ); keyboardctl->waitForStarted(); keyboardctl->waitForFinished(); if ( keyboardctl->exitStatus() ) error = true; } else { // remove leftover keyboardctl files const QString keyboardctlXorgConf( "/etc/X11/xorg.conf.d/20-keyboard.conf" ); const QString keyboardctlConf( "/etc/keyboard.conf" ); if ( QFile::exists( keyboardctlXorgConf ) ) QFile::remove( keyboardctlXorgConf ); if ( QFile::exists( keyboardctlConf ) ) QFile::remove( keyboardctlConf ); } // localed d-bus interface to set X11 keyboard layout QDBusInterface dbusInterface( "org.freedesktop.locale1", "/org/freedesktop/locale1", "org.freedesktop.locale1", QDBusConnection::systemBus() ); QVariant optionsVariant = dbusInterface.property( "X11Options" ); if ( optionsVariant.isValid() ) { QString options = optionsVariant.toString(); /* SetX11Keyboard -> ssssbb * string -> layout * string -> model * string -> variant * string -> options * boolean -> convert (set vconsole keyboard too) * boolean -> arg_ask_password */ QDBusMessage reply; reply = dbusInterface.call( "SetX11Keyboard", layout, model, variant, options, true, true ); if ( reply.type() == QDBusMessage::ErrorMessage ) error = true; } else { SetKeyboardLayoutJob job( model, layout, variant ); if ( job.exec() == false ) error = true; } if ( error ) return ActionReply::HelperErrorReply(); return ActionReply::SuccessReply(); }
bool NOAADownloader::DownloadTAF(const char *code, TAF &taf, JobRunner &runner) { #ifndef NDEBUG assert(strlen(code) == 4); for (unsigned i = 0; i < 4; i++) assert(code[i] >= 'A' && code[i] <= 'Z'); #endif // Build file url char url[256] = "ftp://tgftp.nws.noaa.gov/data/forecasts/taf/stations/"; strcat(url, code); strcat(url, ".TXT"); PathName path(url); // Open download session Net::Session session; if (session.Error()) return false; // Request the file char buffer[4096]; Net::DownloadToBufferJob job(session, path, buffer, sizeof(buffer) - 1); if (!runner.Run(job) || job.GetLength() < 0) return false; buffer[job.GetLength()] = 0; /* * Example: * * 2011/07/01 12:27 * TAF EDDL 011100Z 0112/0218 32010KT 9999 SCT040 * TEMPO 0112/0119 4000 SHRA SCT030CB PROB30 * TEMPO 0112/0118 32015G30KT TSRA * BECMG 0118/0121 32005KT PROB30 * TEMPO 0202/0207 BKN012 * BECMG 0210/0213 31010KT */ // Parse date and time of last update const char *p = ParseDateTime(buffer, taf.last_update); if (p == buffer) return false; // Skip characters until line feed or string end while (*p != '\n' && *p != 0) p++; if (*p == 0) return false; // p is now at the first character after the line feed p++; if (*p == 0) return false; // Read rest of the response into the content string taf.content.clear(); AppendToContentString(p, taf.content); // Trim the content string TrimRight(taf.content.buffer()); return true; }
void IdDecorator::aboutToBeDequeued_locked(QueueAPI *api) { Q_ASSERT(d1); job()->aboutToBeDequeued_locked(api); }
void simulate(double runtime, double rand_seed, State init_state, double* init_position, void (*job)(void* dyn, State s, void *job_msg, data_union* job_data, long long iteration), void *job_msg, data_union* job_data) { if (FP_EXCEPTION_FATAL) { feenableexcept(FE_ALL_EXCEPT); // NaN generation kills program signal(SIGFPE, FPE_signal_handler); } MTRand* rand = new MTRand(rand_seed); Dynein_onebound *dyn_ob; Dynein_bothbound *dyn_bb; if (init_state == BOTHBOUND) { dyn_ob = NULL; dyn_bb = new Dynein_bothbound( init_position[0], // nma_init init_position[1], // fma_init init_position[2], // nbx_init init_position[3], // nby_init init_position[4], // L NULL, // internal forces NULL, // brownian forces NULL, // equilibrium angles rand); // MTRand } else { dyn_ob = new Dynein_onebound( init_position[0], // bba_init init_position[1], // bma_init init_position[2], // uma_init init_position[3], // uba_init init_position[4], // bbx_init init_position[5], // bby_init init_state, // Initial state NULL, // Optional custom internal forces NULL, // Optional custom brownian forces NULL, // Optional custom equilibrium angles rand); dyn_bb = NULL; } double t = 0; long long iter = 0; State current_state = init_state; // double rebinding_immune_until = 0; // to prevent immediate rebinding in BB->OB transitions bool run_indefinite; if (runtime == 0) { run_indefinite = true; printf("Running indefinitely.\n"); } else { run_indefinite = false; printf("Running for %g s\n", runtime); } double near_unbinding_prob_printing_average = 0; int unbinding_prob_printing_n = 0; while( t < runtime or run_indefinite) { if (current_state == NEARBOUND or current_state == FARBOUND) while (t < runtime or run_indefinite) { // loop as long as it is onebound if (am_debugging_time) printf("\n==== t = %8g/%8g ====\n", t, runtime); double unbinding_prob = dyn_ob->get_unbinding_rate()*dt; double binding_prob = dyn_ob->get_binding_rate()*dt; if (am_debugging_rates and binding_prob != 0 and rand->rand() < 1e-3) { printf("binding probability: %g, uby %g at time %g s\n", binding_prob, dyn_ob->get_uby(), t); } if (rand->rand() < unbinding_prob) { // unbind, switch to unbound delete dyn_ob; dyn_ob = NULL; current_state = UNBOUND; break; } else if (rand->rand() < binding_prob) { // switch to bothbound dyn_bb = new Dynein_bothbound(dyn_ob, rand); if (am_debugging_state_transitions) printf("Transitioning from onebound to bothbound\n"); if (am_debugging_state_transitions) printf("just bound b/c binding probability was: %g, boltzmann factor: %g\n", binding_prob, exp(-(dyn_bb->get_PE()-dyn_ob->get_PE())/kb/T)); delete dyn_ob; dyn_ob = NULL; current_state = BOTHBOUND; job(dyn_bb, current_state, job_msg, job_data, iter); t += dt; iter++; break; } else { // move like normal job(dyn_ob, current_state, job_msg, job_data, iter); t += dt; iter++; double old_bba = dyn_ob->get_bba() ; double old_bma = dyn_ob->get_bma() ; double old_uma = dyn_ob->get_uma() ; double old_uba = dyn_ob->get_uba() ; bool accept_step = false; int attempts = 0; const long long max_attempts = 1e6; while(!accept_step){ if (attempts > max_attempts) { printf("Over %lld attempts needed to avoid a NaN state in onebound at time %g, something must be wrong. Exiting.\n", max_attempts, t); fprintf(stderr, "Over %lld attempts needed to avoid a NaN state at time %g, something must be wrong. Exiting.\n", max_attempts, t); if (am_only_writing_on_crash) on_crash_write_movie_buffer(); exit(1); } // if (attempts > 0 and attempts % 1000 == 0) printf("Taking %g rerolls to avoid a NaN velocity at time %g\n", (double) attempts, t); if(attempts > 0){ dyn_ob->set_bba(old_bba); dyn_ob->set_bma(old_bma); dyn_ob->set_uma(old_uma); dyn_ob->set_uba(old_uba); dyn_ob->update_velocities(); } double temp_bba = dyn_ob->get_bba() + dyn_ob->get_d_bba() * dt; double temp_bma = dyn_ob->get_bma() + dyn_ob->get_d_bma() * dt; double temp_uma = dyn_ob->get_uma() + dyn_ob->get_d_uma() * dt; double temp_uba = dyn_ob->get_uba() + dyn_ob->get_d_uba() * dt; dyn_ob->set_bba(temp_bba); dyn_ob->set_bma(temp_bma); dyn_ob->set_uma(temp_uma); dyn_ob->set_uba(temp_uba); accept_step = dyn_ob->update_velocities(); attempts++; total_attempts += 1; } // if (attempts > 1) { // printf("NaN avoiding code: (onebound) At time t=%g, took %d attempts to timestep without NaNs\n", t, attempts); // } } } if (current_state == BOTHBOUND) { while (t < runtime or run_indefinite) { // loop as long as it is bothbound if (am_debugging_time) printf("\n==== t = %8g/%8g ====\n", t, runtime); double near_unbinding_prob = dyn_bb->get_near_unbinding_rate()*dt; double far_unbinding_prob = dyn_bb->get_far_unbinding_rate()*dt; double roll = rand->rand(); while (roll == 0) roll = rand->rand(); if (am_debugging_rates and roll < 1e-8) { near_unbinding_prob_printing_average = (near_unbinding_prob_printing_average*unbinding_prob_printing_n + near_unbinding_prob); near_unbinding_prob_printing_average /= (unbinding_prob_printing_n + 1); unbinding_prob_printing_n++; printf("BB near unbinding probability: %g\n", near_unbinding_prob_printing_average); } bool unbind_near = roll < near_unbinding_prob; bool unbind_far = roll < far_unbinding_prob; if (unbind_near && unbind_far) { if (debug_stepping) printf("both MTBDs want to fall off!\n"); if (iter % 2 == 0) unbind_far = false; else unbind_near = false; } if (unbind_near and not frozen_in_bothbound) { // switch to farbound dyn_ob = new Dynein_onebound(dyn_bb, rand, FARBOUND); if (am_debugging_state_transitions) printf("Transitioning from bothbound to farbound\n"); if (am_debugging_state_transitions) printf("just unbound b/c unbinding probability was: %g, roll was: %g, boltzmann factor: %g\n", near_unbinding_prob, roll, exp(-(dyn_ob->get_PE()-dyn_bb->get_PE())/kb/T)); delete dyn_bb; dyn_bb = NULL; current_state = FARBOUND; job(dyn_ob, current_state, job_msg, job_data, iter); t += dt; iter++; // rebinding_immune_until = t + REBINDING_IMMUNITY_TIME; break; } else if (unbind_far and not frozen_in_bothbound) { // switch to nearbound dyn_ob = new Dynein_onebound(dyn_bb, rand, NEARBOUND); if (am_debugging_state_transitions) printf("Transitioning from bothbound to nearbound\n"); if (am_debugging_state_transitions) printf("just unbound b/c unbinding probability was: %g, roll as: %g, boltzmann factor: %g\n", far_unbinding_prob, roll, exp(-(dyn_ob->get_PE()-dyn_bb->get_PE())/kb/T)); delete dyn_bb; dyn_bb = NULL; current_state = NEARBOUND; job(dyn_ob, current_state, job_msg, job_data, iter); t += dt; iter++; // rebinding_immune_until = t + REBINDING_IMMUNITY_TIME; break; } else { // move like normal job(dyn_bb, BOTHBOUND, job_msg, job_data, iter); t += dt; iter++; // double temp_nma = dyn_bb->get_nma() + dyn_bb->get_d_nma()*dt; // double temp_fma = dyn_bb->get_fma() + dyn_bb->get_d_fma()*dt; // dyn_bb->set_nma(temp_nma); // dyn_bb->set_fma(temp_fma); // dyn_bb->update_velocities(); double old_nma = dyn_bb->get_nma(); double old_fma = dyn_bb->get_fma(); bool accept_step = false; int attempts = 0; const long long max_attempts = 1e6; while(!accept_step){ if (attempts > max_attempts) { printf("Over %lld attempts needed to avoid a NaN state in bothbound at time %g, something must be wrong. Exiting.\n", max_attempts, t); fprintf(stderr, "Over %lld attempts needed to avoid a NaN state at time %g, something must be wrong. Exiting.\n", max_attempts, t); if (am_only_writing_on_crash) on_crash_write_movie_buffer(); exit(1); } // if (attempts > 0 and attempts % 100 == 0) printf("Taking %g rerolls to avoid a NaN velocity at time %g\n", (double) attempts, t); if(attempts > 0){ dyn_bb->set_nma(old_nma); dyn_bb->set_fma(old_fma); dyn_bb->update_velocities(); } double temp_nma = dyn_bb->get_nma() + dyn_bb->get_d_nma() * dt; double temp_fma = dyn_bb->get_fma() + dyn_bb->get_d_fma() * dt; dyn_bb->set_nma(temp_nma); dyn_bb->set_fma(temp_fma); accept_step = dyn_bb->update_velocities(); attempts++; total_attempts += 1; } // if (attempts > 1) { // printf("NaN avoiding code: (bothbound) At time t=%g, took %d attempts to timestep without NaNs\n", t, attempts); // } } } } if (current_state == UNBOUND) { job(NULL, UNBOUND, job_msg, job_data, iter); goto end_simulation; } } printf("Simulation exited successfully.\n"); printf("Executed %f NaN retries, or %g retries per step.\n", total_attempts, total_attempts / t * dt); end_simulation: delete rand; if (dyn_bb == NULL) delete dyn_ob; else delete dyn_bb; }
bool IdDecorator::success() const { Q_ASSERT(d1); return job()->success(); }
/** * Main program execution */ int main (int argc, char *argv[]) { TOKENIZER *tokenizer; char string[1024] = ""; char *tok; int br; int most_recent_job = 0; ProcessMap *jobs = new_map(); //Set up signal handling signal(SIGINT, SIG_IGN); signal(SIGTSTP, SIG_IGN); signal(SIGTTOU, SIG_IGN); signal(SIGTTIN, SIG_IGN); signal(SIGTERM, SIG_IGN); string[1023] = '\0'; /* ensure that string is always null-terminated */ printf("\nEnter a command or type ctrl-d to end session.\n" ); write(1, "\nmy-sh$ ", 8); //Input loop while ((br = read( STDIN_FILENO, string, 1023 )) > 0) { if (br <= 1) { write(1, "my-sh$ ", 8); continue; } string[br-1] = '\0'; tokenizer = init_tokenizer(string); //Create linked list of tokens LinkedList *input_command = new_list(256); while( (tok = get_next_token( tokenizer )) != NULL ) { push_back(input_command, tok); free(tok); } free_tokenizer(tokenizer); int executed = 0; int error = 0; //Checks for fg or bg if (get_length(input_command) == 1) { char *only_token = pop_back(input_command); if (compare_strings(only_token, "fg")) { if (move_to_foreground(jobs, &most_recent_job) == -1) error = 1; executed = 1; } else if (compare_strings(only_token, "bg")) { if (move_to_background(jobs, &most_recent_job) == -1) error = 1; executed = 1; } else { push_back(input_command, only_token); } free(only_token); } //Process input for pipes or background if an error has already been detected, go to the next command if (!executed && !error) { //Sees if a background ampersand is detected bool is_background = determine_background(input_command); LinkedList *full_command = copy_list(input_command); if (is_background) { printf("Running: "); print_list(input_command); } //Test for pipes bool is_pipe = false; LinkedList *first_command_list = new_list(50); LinkedList *second_command_list = new_list(50); int valid_pipe = find_piping(input_command, &is_pipe, first_command_list, second_command_list); //Command blocks are created from the command lists CommandBlock *first_command = new_command_block(first_command_list); CommandBlock *second_command = new_command_block(second_command_list); //Runs a function to check that there are no invalid redirections in the case of a piping if (is_pipe) { valid_pipe = valid_pipe && check_pipe(first_command, second_command); } //Notifies user of any incorrect pipe commands if (!is_pipe && !first_command->valid) { printf("Invalid command structure\n"); } else if (is_pipe && (!first_command->valid || !second_command->valid || !valid_pipe) ) { printf("Invalid command structure\n"); } //If it is a pipe and all necessary conditions are valid, then the piping occurs if (is_pipe && first_command->valid && second_command->valid && valid_pipe) { if (pipe_job (first_command, second_command, is_background, full_command, jobs, &most_recent_job) == -1) error = 1; } // No piping else if (!is_pipe && first_command->valid) { if (job (first_command, is_background, full_command, jobs, &most_recent_job) == -1) error = 1; } destroy_list(first_command_list); destroy_list(second_command_list); destroy_block(first_command); destroy_block(second_command); destroy_list(full_command); } destroy_list(input_command); monitor_background_jobs (jobs, &most_recent_job); if (error) perror("ERROR "); write(1, "my-sh$ ", 8); } destroy_map(jobs); printf( "\nSession ended\n" ); return 0; }