TEST(TestValidate, Simple) { boost::shared_ptr<SimpleTest> n(new SimpleTest()); { Reporter reporter; NodeReporter node_reporter(reporter, n); ASSERT_EQ(0UL, reporter.num_errors()); ASSERT_EQ(0UL, reporter.num_warnings()); n->pre_transform(node_reporter); EXPECT_EQ(1UL, reporter.num_errors()); EXPECT_EQ(3UL, reporter.num_warnings()); EXPECT_TRUE(n->pre_transform_called); EXPECT_FALSE(n->post_transform_called); } n->add_child(node_p(new Null())); { Reporter reporter; NodeReporter node_reporter(reporter, n); ASSERT_EQ(0UL, reporter.num_errors()); ASSERT_EQ(0UL, reporter.num_warnings()); n->pre_transform_called = false; n->post_transform(node_reporter); EXPECT_EQ(0UL, reporter.num_errors()); EXPECT_EQ(3UL, reporter.num_warnings()); EXPECT_FALSE(n->pre_transform_called); EXPECT_TRUE(n->post_transform_called); } }
void abnormalTermination(const std::string& classification, const std::string&, const std::string& finalState) { terminalState = true; if(globalErrorMessage.length() > 0){ errorMessage += " " + globalErrorMessage; } msg_ifce::getInstance()->set_transfer_error_scope(&tr_completed, getDefaultScope()); msg_ifce::getInstance()->set_transfer_error_category(&tr_completed, getDefaultReasonClass()); msg_ifce::getInstance()->set_failure_phase(&tr_completed, getDefaultErrorPhase()); msg_ifce::getInstance()->set_transfer_error_message(&tr_completed, errorMessage); msg_ifce::getInstance()->set_final_transfer_state(&tr_completed, finalState); msg_ifce::getInstance()->set_tr_timestamp_complete(&tr_completed, msg_ifce::getInstance()->getTimestamp()); msg_ifce::getInstance()->SendTransferFinishMessage(&tr_completed); reporter.timeout = timeout; reporter.nostreams = nbstreams; reporter.buffersize = tcpbuffersize; if (strArray[0].length() > 0) reporter.constructMessage(g_job_id, strArray[0], classification, errorMessage, diff, source_size); else reporter.constructMessage(g_job_id, g_file_id, classification, errorMessage, diff, source_size); std::string moveFile = fileManagement->archive(); if (moveFile.length() != 0) { logStream << fileManagement->timestamp() << "ERROR Failed to archive file: " << moveFile << '\n'; } if (reuseFile.length() > 0) unlink(readFile.c_str()); cancelTransfer(); sleep(1); exit(1); }
void CardDriverActivity::printOn(Reporter& report) const{ report.tagValuePair(tr("oldestRecord"), oldestRecord); report.tagValuePair(tr("newestRecord"), newestRecord); report.writeArray(cardActivityDailyRecords, tr("cardActivityDailyRecords")); if(cardActivityDailyRecords.numberOfBlocks() > 0){ QString tablehead = QString("<table><tr><th></th><th>%1</th><th>%2</th><th>%3</th><th>%4</th></tr>").arg(tr("Driving"), tr("Working"), tr("Rest"), tr("Available")); activityVector durationsMonth; int currentMonth = cardActivityDailyRecords[0].activityRecordDate.date().month(); QString table = QString("<ul><li>%1:").arg(tr("Timesheet for %1").arg(QDate::longMonthName(currentMonth))) + tablehead; for(int j = 0; j < cardActivityDailyRecords.numberOfBlocks(); ++j){ if(cardActivityDailyRecords[j].activityRecordDate.date().month() != currentMonth){ currentMonth = cardActivityDailyRecords[j].activityRecordDate.date().month(); table += QString("<tr><th>%1</th><th>%2</th><th>%3</th><th>%4</th><th>%5</th></tr>").arg(tr("Summary"), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::DRIVING]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::WORK]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::REST] + durationsMonth[ActivityChangeInfo::SHORTREST]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::AVAILABLE])); durationsMonth = activityVector(); table += QString("</table></li><li>%1:").arg(tr("Timesheet for %1").arg(QDate::longMonthName(currentMonth))) + tablehead; } activityVector durations; for(int k = 0; k < cardActivityDailyRecords[j].activityChangeInfos.numberOfBlocks(); ++k){ durations[cardActivityDailyRecords[j].activityChangeInfos[k].activity] += cardActivityDailyRecords[j].activityChangeInfos[k].duration; } table += QString("<tr><td>%1</td><td>%2</td><td>%3</td><td>%4</td><td>%5</td></tr>").arg( cardActivityDailyRecords[j].activityRecordDate.toString(), ActivityChangeInfo::formatClock(durations[ActivityChangeInfo::DRIVING]), ActivityChangeInfo::formatClock(durations[ActivityChangeInfo::WORK]), ActivityChangeInfo::formatClock(durations[ActivityChangeInfo::REST] + durations[ActivityChangeInfo::SHORTREST]), ActivityChangeInfo::formatClock(durations[ActivityChangeInfo::AVAILABLE])); durationsMonth += durations; } table += QString("<tr><th>%1</th><th>%2</th><th>%3</th><th>%4</th><th>%4</th></tr>").arg(tr("Summary"), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::DRIVING]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::WORK]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::REST] + durationsMonth[ActivityChangeInfo::SHORTREST]), ActivityChangeInfo::formatClock(durationsMonth[ActivityChangeInfo::AVAILABLE])); table += "</table></li></ul>"; report.tagValuePair(tr("Timesheet"), table); } }
int main(int argc, char * argv[]) { if (argc < 2) { printf("Usage at_command <command> ...\n"); return -1; } QApplication app(argc, argv); ModemMap mm; SerialPort sp(mm.signalChannel().toAscii()); Reporter * reporter = 0; if (mm.provider() == "ZTE") { reporter = new ZTEReport(mm); } else { reporter = new HuaWeiReport(mm); } QByteArray data; data.append(argv[1]); if (!data.endsWith('\r')) { data.append('\r'); } reporter->sendCommand(sp, data, 5000); qDebug("Result:"); qDebug() << data; return 0; }
void Forth_test_stdwords(bool verbose) { ForthEnv env; Reporter reporter; for (size_t i = 0; i < SK_ARRAY_COUNT(gRecs); i++) { ForthEngine engine(NULL); ForthWord* word = env.findWord(gRecs[i].fName); if (NULL == word) { SkString str; str.printf("--- can't find stdword %d", gRecs[i].fName); reporter.reportFailure(str.c_str()); } else { if (verbose) { SkDebugf("--- testing %s %p\n", gRecs[i].fName, word); } gRecs[i].fProc(word, &engine, &reporter); } } if (0 == reporter.fFailureCount) { SkDebugf("--- success!\n"); } else { SkDebugf("--- %d failures\n", reporter.fFailureCount); } }
/** Function to get an executable's Offset location. 1. Traverse through all the image entries available in the iImgVsExeStatus container. 2. Get the executable Offset. @internalComponent @released @param aExeName - Executable's name. @return - returns 0 upon failure to find the Executable. - otherwise returns the Offset. */ const unsigned int SidChecker::GetExecutableOffset(const String& aExeName) { Reporter* reporter = Reporter::Instance(iCmdLine->ReportFlag()); ImgVsExeStatus& aImgVsExeStatus = reporter->GetContainerReference(); ImgVsExeStatus::iterator imgBegin = aImgVsExeStatus.begin(); ImgVsExeStatus::iterator imgEnd = aImgVsExeStatus.end(); ExeVsMetaData::iterator exeBegin; ExeVsMetaData::iterator exeEnd; while(imgBegin != imgEnd) { ExeVsMetaData& exeVsMetaData = imgBegin->second; exeBegin = exeVsMetaData.begin(); exeEnd = exeVsMetaData.end(); while(exeBegin != exeEnd) { if(aExeName == (exeBegin->second).iExeName) { return (exeBegin->second).iIdData->iFileOffset; } ++exeBegin; } ++imgBegin; } return 0; }
void EncryptedCertificate::printOn(Reporter& report) const { if(decryptedCertificate){ decryptedCertificate->printOn(report); report.writeBlock(certificateAuthorityReference, tr("Certificate verified from:")); } else { report.writeBlock(certificateAuthorityReference, tr("Unverified certificate, needs verification from:")); } }
static void ProgressCallback(Object *object, const EventObject &event, void *clientdata) { ProcessObject *process = dynamic_cast<ProcessObject *>(object); Reporter *reporter = reinterpret_cast<Reporter *>(clientdata); if (process && reporter) { float progress = process->GetProgress(); reporter->report(int(progress * 100.f)); } }
void check_elapsed(bool check=true) { Reporter sw; ex::sleep_for<typename Reporter::clock>(milliseconds(100)); typename Reporter::duration d=sw.elapsed(); std::cout << d << std::endl; if (check) BOOST_TEST(d >= milliseconds(100)); }
void Launcher::ReportPhase(Reporter& reporter, const PhaseCore& phase, const std::string& name) const { reporter.ReportPhaseHeader(); reporter.ReportPhase(phase, phase.metrics()); reporter.ReportPhaseFooter(); for (auto& child : phase._child) { std::string child_name = name + "." + child->name(); ReportPhase(reporter, *child, child_name); } }
/** * @brief Main function */ static void main() { string ip; cout << "----------------------------------------------------------------------------" << endl; cout << " REPOTER" << endl; cout << "----------------------------------------------------------------------------" << endl; cout << endl; Reporter reporter; reporter.open(); };
void taskStatusUpdater(int time) { while (1) { if (strArray[0].length() > 0) { logStream << fileManagement->timestamp() << "INFO Sending back to the server url-copy is still alive!" << '\n'; reporter.constructMessageUpdater(job_id, strArray[0]); } else { logStream << fileManagement->timestamp() << "INFO Sending back to the server url-copy is still alive!" << '\n'; reporter.constructMessageUpdater(job_id, file_id); } boost::this_thread::sleep(boost::posix_time::seconds(time)); } }
void run(Reporter& reporter) { reporter.run(); for (auto it = suites_.begin(); it != suites_.end(); ++it) { reporter.runTestSuite(it->second->number(), it->second->name()); it->second->runTests(reporter); reporter.finishTestSuite(); } reporter.finish(); }
/** * \brief Reduce down the values from another aggregate UDF instance, and copy in to this aggregate instance */ void reduce(const AggregateUDF* otherAgg, Reporter& reporter) { try { // loop through other's values and add to our equivalent value (if exists) // if doesn't exist in our values, add to our values const GroupAndAggregateUDF* other = (const GroupAndAggregateUDF*)otherAgg; for(auto ivi=other->interimValues.begin(); ivi!=other->interimValues.end(); ++ivi) { std::string groupBy(ivi->first); auto it = interimValues.find(groupBy); if (it == interimValues.end()) { // new IInterimResult* newInterim = resultTypeRef(); newInterim->groupByValue = groupBy; newInterim->merge(ivi->second); interimValues.insert(std::pair<std::string,IInterimResult*>(groupBy,newInterim)); } else { IInterimResult* interim = interimValues.at(groupBy); // merge interim->merge(ivi->second); } } // end for } catch (std::exception& ex) { reporter.error(("Exception in reduce(): " + std::string(ex.what())).c_str()); } } // end reduce
/** * \brief Get the values we need for the aggregate and cache them * * Assume the first column (index 0) is the group by field (with LOW cardinality) */ void map(TupleIterator& values, Reporter& reporter) { try { for (; !values.done(); values.next()) { std::string groupBy; String tempString; values.value(0,tempString); groupBy = std::string(tempString); auto it = interimValues.find(groupBy); if (it == interimValues.end()) { // new IInterimResult* newInterim = resultTypeRef(); newInterim->groupByValue = groupBy; newInterim->add(values); interimValues.insert(std::pair<std::string,IInterimResult*>(groupBy,newInterim)); } else { IInterimResult* interim = interimValues.at(groupBy); // merge interim->add(values); } } } catch (std::exception& ex) { reporter.error(("Exception in map(): " + std::string(ex.what())).c_str()); } }
void report(Diagnostic* diagnostic) { std::lock_guard<std::mutex> guard(lock_); intermittent_reports_.push_back(diagnostic); if(reporter_) { reporter_->report(); } }
/** * \brief Return the final result from the remaining aggregate UDF instance */ void finish(OutputSequence& os, Reporter& reporter) { try { // Take Interim Values and merge results for (auto ivi = interimValues.begin();ivi != interimValues.end();++ivi) { ivi->second->writeToMap(os); } } catch (std::exception& ex) { reporter.error(("Exception in finish(): " + std::string(ex.what())).c_str()); } }
/** * \brief Encode to XDQP stream */ void encode(Encoder& e, Reporter& reporter) { try { // TODO consider gzip compression at this top level, and using a std::ostringstream to collate values (optional setting at aggregate level) // no need to encode resultTypeRef as that's handled in the start() or clone() function // do need to encode count though e.encode((int)interimValues.size()); for (auto ivi = interimValues.begin();ivi != interimValues.end();++ivi) { ivi->second->encode(e,reporter); } } catch (std::exception& ex) { reporter.error(("Exception in encode(): " + std::string(ex.what())).c_str()); } }
/** * \brief Decode from XDQP stream */ void decode(Decoder& d, Reporter& reporter) { try { int size; d.decode(size); IInterimResult* interim; for (long i = 0;i < size;i++) { // decode all interim results interim = resultTypeRef(); interim->decode(d,reporter); } } catch (std::exception& ex) { reporter.error(("Exception in decode(): " + std::string(ex.what())).c_str()); } }
Status SyncSourceFeedback::_updateUpstream(OperationContext* txn, BackgroundSync* bgsync) { Reporter* reporter; { stdx::lock_guard<stdx::mutex> lock(_mtx); reporter = _reporter; } auto syncTarget = reporter->getTarget(); auto triggerStatus = reporter->trigger(); if (!triggerStatus.isOK()) { warning() << "unable to schedule reporter to update replication progress on " << syncTarget << ": " << triggerStatus; return triggerStatus; } auto status = reporter->join(); if (!status.isOK()) { log() << "SyncSourceFeedback error sending update to " << syncTarget << ": " << status; // Some errors should not cause result in blacklisting the sync source. if (status != ErrorCodes::InvalidSyncSource) { // The command could not be created because the node is now primary. } else if (status != ErrorCodes::NodeNotFound) { // The command could not be created, likely because this node was removed from the set. } else { // Blacklist sync target for .5 seconds and find a new one. stdx::lock_guard<stdx::mutex> lock(_mtx); auto replCoord = repl::ReplicationCoordinator::get(txn); replCoord->blacklistSyncSource(syncTarget, Date_t::now() + Milliseconds(500)); bgsync->clearSyncTarget(); } } return status; }
void start(Sequence& args, Reporter& reporter) { try { // default to mean average resultTypeRef = &MeanInterimResult::create; // refer to function, not call it, so no () for (; !args.done(); args.next()) { String argValue; args.value(argValue); std::string arg = std::string(argValue); if ("aggregate=sum" == arg) { resultTypeRef = &SumInterimResult::create; } // TODO other parameters // TODO check for a "compression=gzip" setting, and set our configuration accordingly } // end for } catch (std::exception& ex) { reporter.error(("Exception in start(): " + std::string(ex.what())).c_str()); } }
bool compile(const std::string& file) { std::ifstream fin(file.c_str()); if (!fin.is_open()) { reporter_->fileNotFound(file); return false; } if (boost::optional<cst::Module> module = parseFile(file, fin, logger_)) { ast::Module parsed = parseModule(*module, logger_); if (!logger_.errors().empty()) return true; FunctionAddrMap fam; generateBytecode(parsed, fam).swap(bytecode_); } return true; }
int main(int argc, char **argv) { REGISTER_SIGNAL(SIGABRT); REGISTER_SIGNAL(SIGSEGV); REGISTER_SIGNAL(SIGTERM); REGISTER_SIGNAL(SIGILL); REGISTER_SIGNAL(SIGFPE); REGISTER_SIGNAL(SIGBUS); REGISTER_SIGNAL(SIGTRAP); REGISTER_SIGNAL(SIGSYS); REGISTER_SIGNAL(SIGUSR1); // register signal SIGINT & SIGUSR1signal handler signal(SIGINT, signalHandler); signal(SIGUSR1, signalHandler); /**TODO: disable for now*/ //set_terminate(myterminate); //set_unexpected(myunexpected); std::string bytes_to_string(""); struct stat statbufsrc; struct stat statbufdest; GError * tmp_err = NULL; // classical GError/glib error management params = gfalt_params_handle_new(NULL); gfalt_set_event_callback(params, event_logger, NULL); int ret = -1; long long transferred_bytes = 0; UserProxyEnv* cert = NULL; hostname[1023] = '\0'; gethostname(hostname, 1023); for (register int i(1); i < argc; ++i) { std::string temp(argv[i]); if (temp.compare("-K") == 0) file_Metadata = std::string(argv[i + 1]); if (temp.compare("-J") == 0) job_Metadata = std::string(argv[i + 1]); if (temp.compare("-I") == 0) userFilesize = boost::lexical_cast<double>(argv[i + 1]); if (temp.compare("-H") == 0) bringonline = boost::lexical_cast<int>(argv[i + 1]); if (temp.compare("-G") == 0) reuseFile = std::string(argv[i + 1]); if (temp.compare("-F") == 0) debug = true; if (temp.compare("-D") == 0) sourceSiteName = std::string(argv[i + 1]); if (temp.compare("-E") == 0) destSiteName = std::string(argv[i + 1]); if (temp.compare("-C") == 0) vo = std::string(argv[i + 1]); if (temp.compare("-y") == 0) algorithm = std::string(argv[i + 1]); if (temp.compare("-z") == 0) checksum_value = std::string(argv[i + 1]); if (temp.compare("-A") == 0) compare_checksum = true; if (temp.compare("-w") == 0) timeout_per_mb = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-x") == 0) no_progress_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-u") == 0) lan_connection = true; if (temp.compare("-v") == 0) fail_nearline = true; if (temp.compare("-t") == 0) copy_pin_lifetime = boost::lexical_cast<int>(argv[i + 1]); if (temp.compare("-q") == 0) dont_ping_source = true; if (temp.compare("-r") == 0) dont_ping_dest = true; if (temp.compare("-s") == 0) disable_dir_check = true; if (temp.compare("-a") == 0) job_id = std::string(argv[i + 1]); if (temp.compare("-b") == 0) source_url = std::string(argv[i + 1]); if (temp.compare("-c") == 0) dest_url = std::string(argv[i + 1]); if (temp.compare("-d") == 0) overwrite = true; if (temp.compare("-e") == 0) nbstreams = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-f") == 0) tcpbuffersize = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-g") == 0) blocksize = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-h") == 0) timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-i") == 0) daemonize = true; if (temp.compare("-j") == 0) dest_token_desc = std::string(argv[i + 1]); if (temp.compare("-k") == 0) source_token_desc = std::string(argv[i + 1]); if (temp.compare("-l") == 0) markers_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-m") == 0) first_marker_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-n") == 0) srm_get_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-o") == 0) srm_put_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-p") == 0) http_timeout = boost::lexical_cast<unsigned int>(argv[i + 1]); if (temp.compare("-B") == 0) file_id = std::string(argv[i + 1]); if (temp.compare("-proxy") == 0) proxy = std::string(argv[i + 1]); } g_file_id = file_id; g_job_id = job_id; /*TODO: until we find a way to calculate RTT(perfsonar) accurately, OS tcp auto-tuning does a better job*/ tcpbuffersize = DEFAULT_BUFFSIZE; CRYPTO_malloc_init(); // Initialize malloc, free, etc for OpenSSL's use SSL_library_init(); // Initialize OpenSSL's SSL libraries SSL_load_error_strings(); // Load SSL error strings ERR_load_BIO_strings(); // Load BIO error strings OpenSSL_add_all_algorithms(); // Load all available encryption algorithms OpenSSL_add_all_digests(); OpenSSL_add_all_ciphers(); StaticSslLocking::init_locks(); try{ /*send an update message back to the server to indicate it's alive*/ boost::thread btUpdater(taskStatusUpdater, 30); }catch (std::exception& e) { globalErrorMessage = e.what(); throw; }catch(...){ globalErrorMessage = "Failed to create boost thread, boost::thread_resource_error"; throw; } if (proxy.length() > 0) { // Set Proxy Env cert = new UserProxyEnv(proxy); } GError* handleError = NULL; handle = gfal_context_new(&handleError); if (!handle) { errorMessage = "Failed to create the gfal2 handle: "; if (handleError && handleError->message) { errorMessage += handleError->message; abnormalTermination("FAILED", errorMessage, "Error"); } } //reuse session if (reuseFile.length() > 0) { gfal2_set_opt_boolean(handle, "GRIDFTP PLUGIN", "SESSION_REUSE", TRUE, NULL); } std::vector<std::string> urlsFile; std::string line(""); readFile = "/var/lib/fts3/" + job_id; if (reuseFile.length() > 0) { std::ifstream infile(readFile.c_str(), std::ios_base::in); while (getline(infile, line, '\n')) { urlsFile.push_back(line); } infile.close(); unlink(readFile.c_str()); } //cancelation point long unsigned int reuseOrNot = (urlsFile.empty() == true) ? 1 : urlsFile.size(); unsigned timerTimeout = reuseOrNot * (http_timeout + srm_put_timeout + srm_get_timeout + timeout); try{ boost::thread bt(taskTimer, timerTimeout); }catch (std::exception& e) { globalErrorMessage = e.what(); throw; }catch(...){ globalErrorMessage = "Failed to create boost thread, boost::thread_resource_error"; throw; } if (reuseFile.length() > 0 && urlsFile.empty() == true) { errorMessage = "Transfer " + g_job_id + " containes no urls with session reuse enabled"; abnormalTermination("FAILED", errorMessage, "Error"); } for (register unsigned int ii = 0; ii < reuseOrNot; ii++) { errorScope = std::string(""); reasonClass = std::string(""); errorPhase = std::string(""); if (reuseFile.length() > 0) { std::string mid_str(urlsFile[ii]); typedef boost::tokenizer<boost::char_separator<char> > tokenizer; tokenizer tokens(mid_str, boost::char_separator<char> (" ")); std::copy(tokens.begin(), tokens.end(), strArray); } else { strArray[0] = file_id; strArray[1] = source_url; strArray[2] = dest_url; strArray[3] = checksum_value; strArray[4] = userFilesize; strArray[5] = file_Metadata; } fileManagement->setSourceUrl(strArray[1]); fileManagement->setDestUrl(strArray[2]); fileManagement->setFileId(strArray[0]); fileManagement->setJobId(job_id); g_file_id = strArray[0]; g_job_id = job_id; reporter.timeout = timeout; reporter.nostreams = nbstreams; reporter.buffersize = tcpbuffersize; reporter.source_se = fileManagement->getSourceHostname(); reporter.dest_se = fileManagement->getDestHostname(); fileManagement->generateLogFile(); msg_ifce::getInstance()->set_tr_timestamp_start(&tr_completed, msg_ifce::getInstance()->getTimestamp()); msg_ifce::getInstance()->set_agent_fqdn(&tr_completed, hostname); msg_ifce::getInstance()->set_t_channel(&tr_completed, fileManagement->getSePair()); msg_ifce::getInstance()->set_transfer_id(&tr_completed, fileManagement->getLogFileName()); msg_ifce::getInstance()->set_source_srm_version(&tr_completed, srmVersion(strArray[1])); msg_ifce::getInstance()->set_destination_srm_version(&tr_completed, srmVersion(strArray[2])); msg_ifce::getInstance()->set_source_url(&tr_completed, strArray[1]); msg_ifce::getInstance()->set_dest_url(&tr_completed, strArray[2]); msg_ifce::getInstance()->set_source_hostname(&tr_completed, fileManagement->getSourceHostname()); msg_ifce::getInstance()->set_dest_hostname(&tr_completed, fileManagement->getDestHostname()); msg_ifce::getInstance()->set_channel_type(&tr_completed, "urlcopy"); msg_ifce::getInstance()->set_vo(&tr_completed, vo); msg_ifce::getInstance()->set_source_site_name(&tr_completed, sourceSiteName); msg_ifce::getInstance()->set_dest_site_name(&tr_completed, destSiteName); nstream_to_string = to_string<unsigned int>(nbstreams, std::dec); msg_ifce::getInstance()->set_number_of_streams(&tr_completed, nstream_to_string.c_str()); tcpbuffer_to_string = to_string<unsigned int>(tcpbuffersize, std::dec); msg_ifce::getInstance()->set_tcp_buffer_size(&tr_completed, tcpbuffer_to_string.c_str()); block_to_string = to_string<unsigned int>(blocksize, std::dec); msg_ifce::getInstance()->set_block_size(&tr_completed, block_to_string.c_str()); timeout_to_string = to_string<unsigned int>(timeout, std::dec); msg_ifce::getInstance()->set_transfer_timeout(&tr_completed, timeout_to_string.c_str()); msg_ifce::getInstance()->set_srm_space_token_dest(&tr_completed, dest_token_desc); msg_ifce::getInstance()->set_srm_space_token_source(&tr_completed, source_token_desc); msg_ifce::getInstance()->SendTransferStartMessage(&tr_completed); int checkError = fileManagement->getLogStream(logStream); if (checkError != 0) { std::string message = mapErrnoToString(checkError); errorMessage = "Failed to create transfer log file, error was: " + message; goto stop; } { //add curly brackets to delimit the scope of the logger logger log(logStream); gfalt_set_user_data(params, &log, NULL); log << fileManagement->timestamp() << "INFO Transfer accepted" << '\n'; log << fileManagement->timestamp() << "INFO Proxy:" << proxy << '\n'; log << fileManagement->timestamp() << "INFO VO:" << vo << '\n'; //a log << fileManagement->timestamp() << "INFO Job id:" << job_id << '\n'; //a log << fileManagement->timestamp() << "INFO File id:" << strArray[0] << '\n'; //a log << fileManagement->timestamp() << "INFO Source url:" << strArray[1] << '\n'; //b log << fileManagement->timestamp() << "INFO Dest url:" << strArray[2] << '\n'; //c log << fileManagement->timestamp() << "INFO Overwrite enabled:" << overwrite << '\n'; //d log << fileManagement->timestamp() << "INFO nbstreams:" << nbstreams << '\n'; //e log << fileManagement->timestamp() << "INFO tcpbuffersize:" << tcpbuffersize << '\n'; //f log << fileManagement->timestamp() << "INFO blocksize:" << blocksize << '\n'; //g log << fileManagement->timestamp() << "INFO Timeout:" << timeout << '\n'; //h log << fileManagement->timestamp() << "INFO Daemonize:" << daemonize << '\n'; //i log << fileManagement->timestamp() << "INFO Dest space token:" << dest_token_desc << '\n'; //j log << fileManagement->timestamp() << "INFO Sourcespace token:" << source_token_desc << '\n'; //k log << fileManagement->timestamp() << "INFO markers_timeout:" << markers_timeout << '\n'; //l log << fileManagement->timestamp() << "INFO first_marker_timeout:" << first_marker_timeout << '\n'; //m log << fileManagement->timestamp() << "INFO srm_get_timeout:" << srm_get_timeout << '\n'; //n log << fileManagement->timestamp() << "INFO srm_put_timeout:" << srm_put_timeout << '\n'; //o log << fileManagement->timestamp() << "INFO http_timeout:" << http_timeout << '\n'; //p log << fileManagement->timestamp() << "INFO dont_ping_source:" << dont_ping_source << '\n'; //q log << fileManagement->timestamp() << "INFO dont_ping_dest:" << dont_ping_dest << '\n'; //r log << fileManagement->timestamp() << "INFO disable_dir_check:" << disable_dir_check << '\n'; //s log << fileManagement->timestamp() << "INFO copy_pin_lifetime:" << copy_pin_lifetime << '\n'; //t log << fileManagement->timestamp() << "INFO bringOnline:" << bringonline << '\n'; //t log << fileManagement->timestamp() << "INFO lan_connection:" << lan_connection << '\n'; //u log << fileManagement->timestamp() << "INFO fail_nearline:" << fail_nearline << '\n'; //v log << fileManagement->timestamp() << "INFO timeout_per_mb:" << timeout_per_mb << '\n'; //w log << fileManagement->timestamp() << "INFO no_progress_timeout:" << no_progress_timeout << '\n'; //x log << fileManagement->timestamp() << "INFO Checksum:" << strArray[3] << '\n'; //z log << fileManagement->timestamp() << "INFO Checksum enabled:" << compare_checksum << '\n'; //A log << fileManagement->timestamp() << "INFO User specified filesize:" << userFilesize << '\n'; //A log << fileManagement->timestamp() << "INFO File metadata:" << strArray[5] << '\n'; //A log << fileManagement->timestamp() << "INFO Job metadata:" << job_Metadata << '\n'; //A log << fileManagement->timestamp() << "INFO Send transfer start message to monitoring" << '\n'; if ((bringonline > 0 || copy_pin_lifetime > 0) && isSrmUrl(strArray[1])) { //issue a bring online reporter.constructMessage(job_id, strArray[0], "STAGING", "", diff, source_size); if (gfal2_bring_online(handle, (strArray[1]).c_str(), copy_pin_lifetime, bringonline, &tmp_err) < 0) { std::string tempError(tmp_err->message); const int errCode = tmp_err->code; log << fileManagement->timestamp() << "ERROR Failed to stage file, errno:" << tempError << '\n'; errorMessage = "Failed to stage file: " + tempError; errorScope = SOURCE; reasonClass = mapErrnoToString(errCode); errorPhase = TRANSFER_PREPARATION; g_clear_error(&tmp_err); goto stop; } //staging finished without failure log << fileManagement->timestamp() << "INFO Staging file" << source_size << " finished" << '\n'; reporter.constructMessage(job_id, strArray[0], "STAGING", "", diff, source_size); } //set to active log << fileManagement->timestamp() << "INFO Set the transfer to ACTIVE, report back to the server" << '\n'; reporter.constructMessage(job_id, strArray[0], "ACTIVE", "", diff, source_size); if (fexists(proxy.c_str()) != 0) { errorMessage = "ERROR proxy doesn't exist, probably expired and not renewed " + proxy; errorScope = SOURCE; reasonClass = mapErrnoToString(errno); errorPhase = TRANSFER_PREPARATION; log << fileManagement->timestamp() << errorMessage << '\n'; goto stop; } /*set infosys to gfal2*/ if (handle) { char *bdii = (char *) fileManagement->getBDII().c_str(); if (bdii) { log << fileManagement->timestamp() << "INFO BDII:" << bdii << '\n'; if (std::string(bdii).compare("false") == 0) { gfal2_set_opt_boolean(handle, "BDII", "ENABLED", false, NULL); } else { gfal2_set_opt_string(handle, "BDII", "LCG_GFAL_INFOSYS", bdii, NULL); } } } /*gfal2 debug logging*/ if (debug == true) { log << fileManagement->timestamp() << "INFO Set the transfer to debug mode" << '\n'; gfal_set_verbose(GFAL_VERBOSE_TRACE | GFAL_VERBOSE_VERBOSE | GFAL_VERBOSE_TRACE_PLUGIN); FILE* reopenDebugFile = freopen(fileManagement->getLogFileFullPath().c_str(), "w", stderr); chmod(fileManagement->getLogFileFullPath().c_str(), (mode_t) 0644); if (reopenDebugFile == NULL) { log << fileManagement->timestamp() << "WARN Failed to create debug file, errno:" << mapErrnoToString(errno) << '\n'; } gfal_log_set_handler((GLogFunc) log_func, NULL); } if (source_token_desc.length() > 0) gfalt_set_src_spacetoken(params, source_token_desc.c_str(), NULL); if (dest_token_desc.length() > 0) gfalt_set_dst_spacetoken(params, dest_token_desc.c_str(), NULL); gfalt_set_create_parent_dir(params, TRUE, NULL); //get checksum timeout from gfal2 log << fileManagement->timestamp() << "INFO get checksum timeout" << '\n'; int checksumTimeout = gfal2_get_opt_integer(handle, "GRIDFTP PLUGIN", "CHECKSUM_CALC_TIMEOUT", NULL); msg_ifce::getInstance()->set_checksum_timeout(&tr_completed, boost::lexical_cast<std::string > (checksumTimeout)); log << fileManagement->timestamp() << "INFO Stat the source surl start" << '\n'; for (int sourceStatRetry = 0; sourceStatRetry < 4; sourceStatRetry++) { if (gfal2_stat(handle, (strArray[1]).c_str(), &statbufsrc, &tmp_err) < 0) { std::string tempError(tmp_err->message); const int errCode = tmp_err->code; log << fileManagement->timestamp() << "ERROR Failed to get source file size, errno:" << tempError << '\n'; errorMessage = "Failed to get source file size: " + tempError; errorScope = SOURCE; reasonClass = mapErrnoToString(errCode); errorPhase = TRANSFER_PREPARATION; g_clear_error(&tmp_err); if (sourceStatRetry == 3 || ENOENT == errCode || EACCES == errCode){ log << fileManagement->timestamp() << "INFO No more retries for stat the source" << '\n'; goto stop; } } else { if (statbufsrc.st_size <= 0) { errorMessage = "Source file size is 0"; log << fileManagement->timestamp() << "ERROR " << errorMessage << '\n'; errorScope = SOURCE; reasonClass = mapErrnoToString(gfal_posix_code_error()); errorPhase = TRANSFER_PREPARATION; if (sourceStatRetry == 3){ log << fileManagement->timestamp() << "INFO No more retries for stat the source" << '\n'; goto stop; } } else if (userFilesize != 0 && userFilesize != statbufsrc.st_size) { std::stringstream error_; error_ << "User specified source file size is " << userFilesize << " but stat returned " << statbufsrc.st_size; errorMessage = error_.str(); log << fileManagement->timestamp() << "ERROR " << errorMessage << '\n'; errorScope = SOURCE; reasonClass = mapErrnoToString(gfal_posix_code_error()); errorPhase = TRANSFER_PREPARATION; if (sourceStatRetry == 3){ log << fileManagement->timestamp() << "INFO No more retries for stat the source" << '\n'; goto stop; } } else { log << fileManagement->timestamp() << "INFO Source file size: " << statbufsrc.st_size << '\n'; if (statbufsrc.st_size > 0) source_size = statbufsrc.st_size; //conver longlong to string std::string size_to_string = to_string<long double > (source_size, std::dec); //set the value of file size to the message msg_ifce::getInstance()->set_file_size(&tr_completed, size_to_string.c_str()); break; } } log << fileManagement->timestamp() << "INFO Stat the source file will be retried" << '\n'; sleep(3); //give it some time to breath } /*Checksuming*/ if (compare_checksum) { if (checksum_value.length() > 0) { //user provided checksum log << fileManagement->timestamp() << "INFO user provided checksum" << '\n'; std::vector<std::string> token = split((strArray[3]).c_str()); std::string checkAlg = token[0]; std::string csk = token[1]; gfalt_set_user_defined_checksum(params, checkAlg.c_str(), csk.c_str(), NULL); gfalt_set_checksum_check(params, TRUE, NULL); } else {//use auto checksum log << fileManagement->timestamp() << "INFO Calculate checksum auto" << '\n'; gfalt_set_checksum_check(params, TRUE, NULL); } } //overwrite dest file if exists if (overwrite) { log << fileManagement->timestamp() << "INFO Overwrite is enabled" << '\n'; gfalt_set_replace_existing_file(params, TRUE, NULL); } gfalt_set_timeout(params, timeout, NULL); gfalt_set_nbstreams(params, nbstreams, NULL); gfalt_set_tcp_buffer_size(params, tcpbuffersize, NULL); gfalt_set_monitor_callback(params, &call_perf, NULL); //calculate tr time in seconds start = std::time(NULL); //check all params before passed to gfal2 if ((strArray[1]).c_str() == NULL || (strArray[2]).c_str() == NULL) { log << fileManagement->timestamp() << "ERROR Failed to get source or dest surl" << '\n'; } log << fileManagement->timestamp() << "INFO Transfer Starting" << '\n'; if ((ret = gfalt_copy_file(handle, params, (strArray[1]).c_str(), (strArray[2]).c_str(), &tmp_err)) != 0) { diff = std::difftime(std::time(NULL), start); if (tmp_err != NULL && tmp_err->message != NULL) { log << fileManagement->timestamp() << "ERROR Transfer failed - errno: " << tmp_err->code << " Error message:" << tmp_err->message << '\n'; if (tmp_err->code == 110) { errorMessage = std::string(tmp_err->message); errorMessage += ", operation timeout"; } else { errorMessage = std::string(tmp_err->message); } errorScope = TRANSFER; reasonClass = mapErrnoToString(tmp_err->code); errorPhase = TRANSFER; g_clear_error(&tmp_err); } else { log << fileManagement->timestamp() << "ERROR Transfer failed - Error message: Unresolved error" << '\n'; errorMessage = std::string("Unresolved error"); errorScope = TRANSFER; reasonClass = GENERAL_FAILURE; errorPhase = TRANSFER; } goto stop; } else { diff = difftime(std::time(NULL), start); log << fileManagement->timestamp() << "INFO Transfer completed successfully" << '\n'; } transferred_bytes = source_size; bytes_to_string = to_string<double>(transferred_bytes, std::dec); msg_ifce::getInstance()->set_total_bytes_transfered(&tr_completed, bytes_to_string.c_str()); log << fileManagement->timestamp() << "INFO Stat the dest surl start" << '\n'; for (int destStatRetry = 0; destStatRetry < 4; destStatRetry++) { if (gfal2_stat(handle, (strArray[2]).c_str(), &statbufdest, &tmp_err) < 0) { if (tmp_err->message) { std::string tempError(tmp_err->message); log << fileManagement->timestamp() << "ERROR Failed to get dest file size, errno:" << tempError << '\n'; errorMessage = "Failed to get dest file size: " + tempError; errorScope = DESTINATION; reasonClass = mapErrnoToString(tmp_err->code); errorPhase = TRANSFER_FINALIZATION; } else { std::string tempError = "Undetermined error"; log << fileManagement->timestamp() << "ERROR Failed to get dest file size, errno:" << tempError << '\n'; errorMessage = "Failed to get dest file size: " + tempError; errorScope = DESTINATION; reasonClass = mapErrnoToString(tmp_err->code); errorPhase = TRANSFER_FINALIZATION; } g_clear_error(&tmp_err); if (destStatRetry == 3) { log << fileManagement->timestamp() << "INFO No more retry stating the destination" << '\n'; goto stop; } } else { if (statbufdest.st_size <= 0) { errorMessage = "Destination file size is 0"; log << fileManagement->timestamp() << "ERROR " << errorMessage << '\n'; errorScope = DESTINATION; reasonClass = mapErrnoToString(gfal_posix_code_error()); errorPhase = TRANSFER_FINALIZATION; if (destStatRetry == 3) { log << fileManagement->timestamp() << "INFO No more retry stating the destination" << '\n'; goto stop; } } else if (userFilesize != 0 && userFilesize != statbufdest.st_size) { std::stringstream error_; error_ << "User specified destination file size is " << userFilesize << " but stat returned " << statbufsrc.st_size; errorMessage = error_.str(); log << fileManagement->timestamp() << "ERROR " << errorMessage << '\n'; errorScope = DESTINATION; reasonClass = mapErrnoToString(gfal_posix_code_error()); errorPhase = TRANSFER_FINALIZATION; if (destStatRetry == 3) { log << fileManagement->timestamp() << "INFO No more retry stating the destination" << '\n'; goto stop; } } else { log << fileManagement->timestamp() << "INFO Destination file size: " << statbufdest.st_size << '\n'; dest_size = statbufdest.st_size; break; } } log << fileManagement->timestamp() << "WARN Stat the destination will be retried" << '\n'; sleep(3); //give it some time to breath } //check source and dest file sizes if (source_size == dest_size) { log << fileManagement->timestamp() << "INFO Source and destination file size matching" << '\n'; } else { log << fileManagement->timestamp() << "ERROR Source and destination file size are different" << '\n'; errorMessage = "Source and destination file size mismatch"; errorScope = DESTINATION; reasonClass = mapErrnoToString(gfal_posix_code_error()); errorPhase = TRANSFER_FINALIZATION; goto stop; } gfalt_set_user_data(params, NULL, NULL); }//logStream stop: msg_ifce::getInstance()->set_transfer_error_scope(&tr_completed, errorScope); msg_ifce::getInstance()->set_transfer_error_category(&tr_completed, reasonClass); msg_ifce::getInstance()->set_failure_phase(&tr_completed, errorPhase); msg_ifce::getInstance()->set_transfer_error_message(&tr_completed, errorMessage); if (errorMessage.length() > 0) { msg_ifce::getInstance()->set_final_transfer_state(&tr_completed, "Error"); reporter.timeout = timeout; reporter.nostreams = nbstreams; reporter.buffersize = tcpbuffersize; if (!terminalState) { /*TODO: re-enable it later*/ //logStream << fileManagement->timestamp() << "INFO Try issuing a cancel to clean resources" << '\n'; //cancelTransfer(); logStream << fileManagement->timestamp() << "INFO Report FAILED back to the server" << '\n'; reporter.constructMessage(job_id, strArray[0], "FAILED", errorMessage, diff, source_size); } } else { msg_ifce::getInstance()->set_final_transfer_state(&tr_completed, "Ok"); reporter.timeout = timeout; reporter.nostreams = nbstreams; reporter.buffersize = tcpbuffersize; logStream << fileManagement->timestamp() << "INFO Report FINISHED back to the server" << '\n'; reporter.constructMessage(job_id, strArray[0], "FINISHED", errorMessage, diff, source_size); } logStream << fileManagement->timestamp() << "INFO Send monitoring complete message" << '\n'; msg_ifce::getInstance()->set_tr_timestamp_complete(&tr_completed, msg_ifce::getInstance()->getTimestamp()); msg_ifce::getInstance()->SendTransferFinishMessage(&tr_completed); logStream << fileManagement->timestamp() << "INFO Closing the log stream" << '\n'; if (logStream.is_open()) { logStream.close(); } if (debug == true) { fclose(stderr); } std::string moveFile = fileManagement->archive(); }//end for reuse loop if (params) { gfalt_params_handle_delete(params, NULL); params = NULL; } if (handle) { gfal_context_free(handle); handle = NULL; } if (cert) { delete cert; cert = NULL; } if (reuseFile.length() > 0) unlink(readFile.c_str()); if (fileManagement) delete fileManagement; StaticSslLocking::kill_locks(); return EXIT_SUCCESS; }
/** * Example of a test. To be completed. * */ bool testLocalEstimatorFromFunctorAdapter() { unsigned int nbok = 0; unsigned int nb = 0; trace.beginBlock ( "Testing init ..." ); using namespace Z3i; typedef ImplicitDigitalEllipse3<Point> ImplicitDigitalEllipse; typedef LightImplicitDigitalSurface<KSpace,ImplicitDigitalEllipse> SurfaceContainer; typedef DigitalSurface< SurfaceContainer > Surface; typedef SurfaceContainer::SurfelConstIterator ConstIterator; typedef SurfaceContainer::Surfel Surfel; trace.beginBlock("Creating Surface"); Point p1( -10, -10, -10 ); Point p2( 10, 10, 10 ); KSpace K; nbok += K.init( p1, p2, true ) ? 1 : 0; nb++; trace.info() << "(" << nbok << "/" << nb << ") " << "K.init() is ok" << std::endl; ImplicitDigitalEllipse ellipse( 6.0, 4.5, 3.4 ); Surfel bel = Surfaces<KSpace>::findABel( K, ellipse, 10000 ); SurfaceContainer* surfaceContainer = new SurfaceContainer ( K, ellipse, SurfelAdjacency<KSpace::dimension>( true ), bel ); Surface surface( surfaceContainer ); // acquired unsigned int nbsurfels = 0; for ( ConstIterator it = surface.begin(), it_end = surface.end(); it != it_end; ++it ) { ++nbsurfels; } trace.info() << nbsurfels << " surfels found." << std::endl; trace.endBlock(); trace.beginBlock("Creating adapter"); typedef DGtal::functors::DummyEstimatorFromSurfels<Surfel, CanonicSCellEmbedder<KSpace> > Functor; typedef DGtal::functors::ConstValue< double > ConvFunctor; typedef LocalEstimatorFromSurfelFunctorAdapter<SurfaceContainer, LpMetric<Z3i::Space>, Functor, ConvFunctor> Reporter; typedef LocalEstimatorFromSurfelFunctorAdapter<SurfaceContainer, LpMetric<Z3i::Space>, Functor, DGtal::functors::GaussianKernel> ReporterGaussian; LpMetric<Z3i::Space> l2(2.0); CanonicSCellEmbedder<KSpace> embedder(surface.container().space()); Functor estimator(embedder, 1); ConvFunctor convFunc(1.0); Reporter reporter; reporter.attach(surface); reporter.setParams(l2,estimator,convFunc, 5); //We just test the init for Gaussian DGtal::functors::GaussianKernel gaussKernelFunc(1.0); ReporterGaussian reporterGaussian; reporterGaussian.attach(surface); reporterGaussian.setParams(l2,estimator,gaussKernelFunc, 5.0); reporterGaussian.init(1, surface.begin(), surface.end()); reporter.init(1.0, surface.begin(), surface.end()); Functor::Quantity val = reporter.eval( surface.begin() ); trace.info() << "Value with radius 5= "<<val << std::endl; nbok += ((fabs((double)val - 124.0)) < 40) ? 1 : 0; nb++; reporter.setParams(l2,estimator,convFunc, 20.0); reporter.init(1, surface.begin(), surface.end()); Functor::Quantity val2 = reporter.eval( surface.begin() ); trace.info() << "Value with radius 20= "<<val2 << std::endl; nbok += ((fabs((double)val2 - 398.0)) < 120) ? 1 : 0; nb++; trace.endBlock(); trace.endBlock(); trace.info() << "(" << nbok << "/" << nb << ") " << "true == true" << std::endl; return nbok == nb; }
void check_report() { Reporter sw; ex::sleep_for<typename Reporter::clock>(milliseconds(100)); sw.report(); }
/** * Example of a test. To be completed. * */ bool testFitting() { unsigned int nbok = 0; unsigned int nb = 0; trace.beginBlock ( "Testing init ..." ); using namespace Z3i; trace.beginBlock("Creating Surface"); Point p1( -20, -20, -20 ); Point p2( 20, 20, 20 ); ImplicitBall<Z3i::Space> shape( RealPoint(6.0,0,0), 4); typedef GaussDigitizer<Z3i::Space, ImplicitBall<Z3i::Space> > Gauss; Gauss gauss; gauss.attach(shape); gauss.init(p1, p2, 1); typedef LightImplicitDigitalSurface<KSpace, Gauss > SurfaceContainer; typedef DigitalSurface<SurfaceContainer> Surface; typedef Surface::Surfel Surfel; KSpace K; nbok += K.init( p1, p2, true ) ? 1 : 0; nb++; trace.info() << "(" << nbok << "/" << nb << ") " << "K.init() is ok" << std::endl; Surfel bel = Surfaces<KSpace>::findABel( K, gauss, 10000 ); SurfaceContainer* surfaceContainer = new SurfaceContainer ( K, gauss, SurfelAdjacency<KSpace::dimension>( true ), bel ); Surface surface( surfaceContainer ); // acquired CanonicSCellEmbedder<KSpace> embedder(surface.container().space()); trace.endBlock(); trace.beginBlock("Normal vector field computation"); typedef functors::ElementaryConvolutionNormalVectorEstimator<Surfel, CanonicSCellEmbedder<KSpace> > FunctorNormal; typedef LocalEstimatorFromSurfelFunctorAdapter<SurfaceContainer, Z3i::L2Metric, FunctorNormal, DGtal::functors::GaussianKernel> ReporterNormal; typedef EstimatorCache<ReporterNormal> NormalCache; //estimator DGtal::functors::GaussianKernel gaussKernelFunc(5.0); FunctorNormal functorNormal(embedder, 1.0); ReporterNormal reporterNormal; reporterNormal.attach(surface); reporterNormal.setParams(l2Metric, functorNormal, gaussKernelFunc, 5.0); //caching normal field NormalCache normalCache(reporterNormal); normalCache.init( 1, surface.begin(), surface.end()); trace.info() << "Normal vector field cached... "<< normalCache << std::endl; trace.endBlock(); trace.beginBlock("Creating sphere fitting adapter from normal vector field"); typedef functors::SphereFittingEstimator<Surfel, CanonicSCellEmbedder<KSpace> , NormalCache> Functor; typedef functors::ConstValue< double > ConvFunctor; typedef LocalEstimatorFromSurfelFunctorAdapter<SurfaceContainer, Z3i::L2Metric, Functor, ConvFunctor> Reporter; Functor fitter(embedder,1.0, 5.0, normalCache); ConvFunctor convFunc(1.0); Reporter reporter; reporter.attach(surface); reporter.setParams(l2Metric, fitter , convFunc, 15.0); reporter.init(1, surface.begin(), surface.end()); for(Surface::ConstIterator it = surface.begin(), ite=surface.end(); it!=ite; ++it) { Functor::Quantity val = reporter.eval( it ); trace.info() << "Fitting = "<<val.center <<" rad="<<val.radius<<std::endl; } trace.endBlock(); trace.endBlock(); nbok += true ? 1 : 0; nb++; trace.info() << "(" << nbok << "/" << nb << ") " << "true == true" << std::endl; return nbok == nb; }
Reporter operator<<(Reporter report, const ReporterEnd& end) { report.processEnd(); return report; }
/* * single_run.cpp * it is basically the independet thread that can be launched * independently * Created on: Mar 2, 2011 * Author: cnua */ int single_run (int run_id, int cur_run, int* SEED, string OUTPUT_STRING, \ RunParameters* p_parameters,\ ProblemDefinition* p_problem, \ Reporter pop_reporter,\ time_t start, time_t finish, double delta_t) { // create directory run_k char num_field[10]; sprintf(num_field, "%d", cur_run+1); string num_field_s = num_field; string DIR_RUN_K = OUTPUT_STRING + "/run_" + num_field; mkdir(DIR_RUN_K.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH ); // seed the random generator srand(SEED[cur_run]); // start time time(&start); int elapsed_time; //--------------------------------------------------------------------- // CREATE THE POPULATION (constructor called) //--------------------------------------------------------------------- Variable* Z; //to be included in ProblemDefinition... p_problem->initialise_variables(&Z, p_parameters->max_n_periods); // do you really need new? no, but don't touch it for now Population* P = new Population(p_parameters, p_problem); if (!P) { cerr << "\nmain : Error creating population!!!\n"; exit(-1); } //as it's hard to pass Pop as a parameter to fdf_c__ through fortran functions, treat it as a global variable Pop = P; cout << "\nP = " << P; cout << "\nPop = " << Pop; ///// INITIAL GENERATION (0) /////////////////////////////////////// // trees before parameters insertion cout << "\nInitialization of the population (generation 0)\n"; P->print_population_without_parameters(0); /// split the data set in tuning set (data_tuning) and validation set (data_validation). See SPLIT and VALIDATING_LINES in input file //this function will also allow to increase the number of fitness cases during the run... P->split_data(p_parameters, p_problem, 0,1); // here or before parallel section begins? // Depends if you want to change fitness cases during the evolution... /////////// OTHER GENERATIONS /////////////////////////////////////// int check_end = 0; int last_gen=0; for (int i=0; i<p_parameters->G+1; i++) { // generations if (i) { //skip generation 0 // split the data for the current generation (this function will also allow to increase the number of fitness cases during the run...) //P->split_data(i,G,split); // not used... /* if ((i%6)==0) { //6 // KILLING and FILLING P->kill_and_fill(&problem); //cin.get(); } else //*/ // GENETIC OPERATORS: sorting, reproduction, crossover, mutation, pruning P->new_spawn(*p_parameters, *p_problem, p_parameters->nfitcases,i); // print population WITHOUT parameters after genetic operations //if (COMMENT) { // printf("\n\n***********Generation %d after genetic operations (not sorted, new trees marked with f9.999999E+99)************\n", i-1); // P->print_population_without_parameters(i-1); // printf("\n***********************************************************************\n"); //} } // evaluate fitness function (in structural GP parameters are added and tuned first, then the evaluation is performed) P->evaluate(i,p_parameters->G); //---------------------------------------------------------------------------------- // extfitness - keeping the individual with least fitness value ------- // sort according to fitness (error) //P->sort(i,tree_comp_fitness); ///printf("\n\n***********Generation %d after genetic operations (not sorted, new trees marked with f9.999999E+99)************\n", i-1); //P->print_population_without_parameters(i-1); //printf("\n***********************************************************************\n"); // update the best individual - structure and complete tree (for PARAMETER INHERITANCE) //P->update_ext_archive(); //--------------------------------------------------------------------------------------------- // sort according to F // VITAL! Both populations must be sorted, trees[] and complete_trees[]... P->sort(i,tree_comp_F); ///printf("\n\n***********Generation %d after genetic operations (not sorted, new trees marked with f9.999999E+99)************\n", i-1); ///P->print_population_without_parameters(i-1); ///printf("\n***********************************************************************\n"); // update the best individual - structure and complete tree (for PARAMETER INHERITANCE) P->update_ext_archive(); // compute elapsed time elapsed_time = (int)(P->compute_time(start, finish, &delta_t)); if (VERBOSE) { // print elapsed time cout << "Elapsed time: " << elapsed_time << " sec"; //total seconds // print out the best member - population WITHOUT parameters P->print_population_without_parameters(i); // print out the best member - population WITH parameters P->print_population_with_parameters(i); } // compute statistical data relating to population (vital if data is shared through populations) // IT's REALLY IMPORTANT that this function is executed after evaluate and sort, // as evaluate uses statistical data referring to the previous generation P->compute_statistics(); // evaluate termination condition check_end=P->terminate(p_parameters->threshold); last_gen = i; // for the split data set, re-tune and re-evaluate the individuals on the merged data set if (p_parameters->split) { if ((check_end) || (i==p_parameters->G)) { cout << "\nBest Individual re-tuning and re-evaluation on the whole dataset" << endl; P->split_data(p_parameters, p_problem,last_gen,last_gen); P->evaluate(i,p_parameters->G); } } // PRINT TO FILE OPERATIONS (in case of crash, data is saved) ------------------------- // print to file (if termination criterion met, it closes the stream of data to file ) pop_reporter.stats2file(p_parameters, P, DIR_RUN_K, i, check_end); // write the test-points (training set), and the corresponding values of g_obj and the best individual (only one) pop_reporter.points2file(p_parameters, p_problem, P, DIR_RUN_K, i, check_end, start, finish, delta_t,SEED[cur_run]); // write best individual's expression ATTENZIONE: cambiato il 7/4/2015... da verificare pop_reporter.update_best2file_build(P, DIR_RUN_K, i, check_end); //old function: update_best2file(P, DIR_RUN_K, i, check_end); // update the list of the best-so-far individuals (see elite or archive) - truncation! ATTENZIONE: cambiato il 7/4/2015... da verificare pop_reporter.archive2file_build(P, DIR_RUN_K, i, check_end); //old function: objective_table2file(P, DIR_RUN_K, i, check_end); // update no of tree evaluations pop_reporter.n_tree_eval2file(P, DIR_RUN_K, i, check_end); // ------------------------------------------------------------------------------------- if (check_end) break; } # pragma omp critical { cout << "Elapsed time to complete run " << cur_run+1 << ": " << elapsed_time << " sec" << endl; //total seconds //termination criterion satisfied if (check_end) { cout << "Termination criterion satisfied (RMSE < " << p_parameters->threshold << ")." << endl; cout << "Possible solution: " << endl; P->print_population_with_parameters(last_gen); cout << "Check latest_archive.txt for solutions\n" << endl; } else { P->print_population_with_parameters(last_gen); } } // just for test //P->get_tree_derivative_given_norm_vector(problem, P->complete_trees[0]); ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // write node statistics to file pop_reporter.node_stats2file(p_parameters, P, DIR_RUN_K); // end - free memory allocated to Population delete P; }
void Launcher::Report(Reporter& reporter) const { // Report header, system & environment reporter.ReportHeader(); reporter.ReportSystem(); reporter.ReportEnvironment(); reporter.ReportBenchmarksHeader(); // For all registered benchmarks... for (auto& benchmark : _benchmarks) { // Filter performed benchmarks if (benchmark->_launched) { // Report benchmark results reporter.ReportBenchmarkHeader(); reporter.ReportBenchmark(*benchmark, benchmark->settings()); reporter.ReportPhasesHeader(); for (auto& root_phase : benchmark->_phases) ReportPhase(reporter, *root_phase, root_phase->name()); reporter.ReportPhasesFooter(); reporter.ReportBenchmarkFooter(); } } // Report footer reporter.ReportBenchmarksFooter(); reporter.ReportFooter(); }
void start_reporter(STATE) { if(!reporter_) { reporter_ = new Reporter(state, this); reporter_->start(state); } }