Пример #1
0
void Load::loadMultipleFiles() {
  // allFilenames contains "rows" of filenames. If the row has more than 1 file
  // in it
  // then that row is to be summed across each file in the row
  const std::vector<std::vector<std::string>> allFilenames =
      getProperty("Filename");
  std::string outputWsName = getProperty("OutputWorkspace");

  std::vector<std::string> wsNames(allFilenames.size());
  std::transform(allFilenames.begin(), allFilenames.end(), wsNames.begin(),
                 generateWsNameFromFileNames);

  auto wsName = wsNames.cbegin();
  assert(allFilenames.size() == wsNames.size());

  std::vector<API::Workspace_sptr> loadedWsList;
  loadedWsList.reserve(allFilenames.size());

  Workspace_sptr tempWs;

  // Cycle through the filenames and wsNames.
  for (auto filenames = allFilenames.cbegin(); filenames != allFilenames.cend();
       ++filenames, ++wsName) {
    auto filename = filenames->cbegin();
    Workspace_sptr sumWS = loadFileToWs(*filename, *wsName);

    ++filename;
    for (; filename != filenames->cend(); ++filename) {
      tempWs = loadFileToWs(*filename, "__@loadsum_temp@");
      sumWS = plusWs(sumWS, tempWs);
    }

    API::WorkspaceGroup_sptr group =
        boost::dynamic_pointer_cast<WorkspaceGroup>(sumWS);
    if (group) {
      std::vector<std::string> childWsNames = group->getNames();
      auto childWsName = childWsNames.begin();
      size_t count = 1;
      for (; childWsName != childWsNames.end(); ++childWsName, ++count) {
        Workspace_sptr childWs = group->getItem(*childWsName);
        const std::string childName =
            group->getName() + "_" + std::to_string(count);
        API::AnalysisDataService::Instance().addOrReplace(childName, childWs);
        // childWs->setName(group->getName() + "_" +
        // boost::lexical_cast<std::string>(count));
      }
    }
    // Add the sum to the list of loaded workspace names.
    loadedWsList.push_back(sumWS);
  }

  // If we only have one loaded ws, set it as the output.
  if (loadedWsList.size() == 1) {
    setProperty("OutputWorkspace", loadedWsList[0]);
    AnalysisDataService::Instance().rename(loadedWsList[0]->getName(),
                                           outputWsName);
  }
  // Else we have multiple loaded workspaces - group them and set the group as
  // output.
  else {
    API::WorkspaceGroup_sptr group = groupWsList(loadedWsList);
    setProperty("OutputWorkspace", group);

    std::vector<std::string> childWsNames = group->getNames();
    size_t count = 1;
    for (auto &childWsName : childWsNames) {
      if (childWsName == outputWsName) {
        Mantid::API::Workspace_sptr child = group->getItem(childWsName);
        // child->setName(child->getName() + "_" +
        // boost::lexical_cast<std::string>(count));
        const std::string childName =
            child->getName() + "_" + std::to_string(count);
        API::AnalysisDataService::Instance().addOrReplace(childName, child);
        count++;
      }
    }

    childWsNames = group->getNames();
    count = 1;
    for (auto &childWsName : childWsNames) {
      Workspace_sptr childWs = group->getItem(childWsName);
      std::string outWsPropName = "OutputWorkspace_" + std::to_string(count);
      ++count;
      declareProperty(Kernel::make_unique<WorkspaceProperty<Workspace>>(
          outWsPropName, childWsName, Direction::Output));
      setProperty(outWsPropName, childWs);
    }
  }

  // Clean up.
  if (tempWs) {
    Algorithm_sptr alg =
        AlgorithmManager::Instance().createUnmanaged("DeleteWorkspace");
    alg->initialize();
    alg->setChild(true);
    alg->setProperty("Workspace", tempWs);
    alg->execute();
  }
}
Пример #2
0
std::unique_ptr<MeshLib::Mesh> appendLinesAlongPolylines(
    const MeshLib::Mesh& mesh, const GeoLib::PolylineVec& ply_vec)
{
    // copy existing nodes and elements
    std::vector<MeshLib::Node*> vec_new_nodes = MeshLib::copyNodeVector(mesh.getNodes());
    std::vector<MeshLib::Element*> vec_new_eles = MeshLib::copyElementVector(mesh.getElements(), vec_new_nodes);

    std::vector<int> new_mat_ids;
    {
        if (mesh.getProperties().existsPropertyVector<int>("MaterialIDs")) {
            auto ids =
                mesh.getProperties().getPropertyVector<int>("MaterialIDs");
            new_mat_ids.reserve(ids->size());
            std::copy(ids->cbegin(), ids->cend(),
                      std::back_inserter(new_mat_ids));
        }
    }
    int max_matID(0);
    if (!new_mat_ids.empty())
        max_matID = *(std::max_element(new_mat_ids.cbegin(), new_mat_ids.cend()));

    const std::size_t n_ply (ply_vec.size());
    // for each polyline
    for (std::size_t k(0); k < n_ply; k++)
    {
        const GeoLib::Polyline* ply = (*ply_vec.getVector())[k];

        // search nodes on the polyline
        MeshGeoToolsLib::MeshNodesAlongPolyline mshNodesAlongPoly(
            mesh, *ply, mesh.getMinEdgeLength() * 0.5,
            MeshGeoToolsLib::SearchAllNodes::Yes);
        auto &vec_nodes_on_ply = mshNodesAlongPoly.getNodeIDs();
        if (vec_nodes_on_ply.empty()) {
            std::string ply_name;
            ply_vec.getNameOfElementByID(k, ply_name);
            INFO("No nodes found on polyline %s", ply_name.c_str());
            continue;
        }

        // add line elements
        for (std::size_t i=0; i<vec_nodes_on_ply.size()-1; i++) {
            std::array<MeshLib::Node*, 2> element_nodes;
            element_nodes[0] = vec_new_nodes[vec_nodes_on_ply[i]];
            element_nodes[1] = vec_new_nodes[vec_nodes_on_ply[i+1]];
            vec_new_eles.push_back(
                new MeshLib::Line(element_nodes, vec_new_eles.size()));
            new_mat_ids.push_back(max_matID+k+1);
        }
    }

    // generate a mesh
    const std::string name = mesh.getName() + "_with_lines";
    auto new_mesh =
        std::make_unique<MeshLib::Mesh>(name, vec_new_nodes, vec_new_eles);
    auto opt_mat_pv = new_mesh->getProperties().createNewPropertyVector<int>(
        "MaterialIDs", MeshLib::MeshItemType::Cell);
    if (opt_mat_pv) {
        auto & mat_pv = *opt_mat_pv;
        mat_pv.reserve(new_mat_ids.size());
        std::copy(new_mat_ids.cbegin(), new_mat_ids.cend(),
            std::back_inserter(mat_pv));
    }
    return new_mesh;
}
Пример #3
0
estring::const_iterator estring::find(char_t c) const {
	return find(c, cbegin());
}
Пример #4
0
 /// Get the list of tags for this object.
 const TagList& tags() const {
     return osmium::detail::subitem_of_type<const TagList>(cbegin(), cend());
 }
Пример #5
0
 const_iterator cend() const { return cbegin(); }
Пример #6
0
cell_vector::const_reverse_iterator cell_vector::crend() const
{
    return const_reverse_iterator(cbegin());
}
Пример #7
0
/**
 * This verifies that
 *   1) the load is evenly balanced across servers.
 *   2) the act of adding a server to a pool will never result in a server
 *      handling keyspace that it previously handled but no longer does.
 *      If this occurs, then stale data may be returned.
 */
TEST(ch3, verify_correctness) {
  uint32_t i, j;
  uint32_t maximum_pool_size = furc_maximum_pool_size();
  char key[MAX_KEY_LENGTH + 1];
  std::vector<uint64_t> pools[NUM_POOLS];
  uint32_t sizes[NUM_POOLS];
  size_t num_pools;
  auto weights = std::make_unique<std::array<double, 1U << 23U>>();
  weights->fill(1.0);

  srand(time(nullptr));

  for (num_pools = 0; /* see end of loop */; ++num_pools) {
    if (num_pools == 0) {
      sizes[num_pools] = 1;
    } else if (num_pools == NUM_POOLS - 1) {
      sizes[num_pools] = maximum_pool_size;
    } else if (num_pools % 2 == 1) { // grow pool size geometrically
      sizes[num_pools] = sizes[num_pools - 1] * drand_in_range(1.5, 2.5);
    } else { // grow pool size arithmetically
      sizes[num_pools] = sizes[num_pools - 1] + rand_in_range(1, 11);
    }

    /* Make sure we don't exceed the maximum pool size. */
    if (sizes[num_pools] > maximum_pool_size) {
      sizes[num_pools] = maximum_pool_size;
    }

    pools[num_pools] = std::vector<uint64_t>(sizes[num_pools]);

    if (sizes[num_pools] == maximum_pool_size)
      break;
  }

  for (i = 0; i < NUM_SAMPLES; ++i) {
    size_t previous_num = -1;
    int len;

    make_random_key(key, MAX_KEY_LENGTH);
    len = strlen(key);

    // hash the same key in each pool, in increasing pool size order
    for (j = 0; j < num_pools; ++j) {
      size_t num = furc_hash(key, len, sizes[j]);
      EXPECT_LT(num, sizes[j]);

      // Verify that the weighted furc yields identical result with weights at 1
      assert(sizes[j] <= weights->size());
      folly::Range<const double*> weightRange(
          weights->cbegin(), weights->cbegin() + sizes[j]);
      size_t weighted = facebook::mcrouter::weightedFurcHash(
          folly::StringPiece(key, len), weightRange);
      EXPECT_EQ(num, weighted);

      ++pools[j][num];

      // make sure that this key either hashes the same server,
      // or hashes to a new server
      if (previous_num != num && j > 0) {
        EXPECT_GE(num, sizes[j - 1]);
      }

      previous_num = num;
    }
  }

  for (i = 0; i < num_pools; ++i) {
    /* Verify that load is evenly distributed. This isn't easy to do
       generally without significantly increasing the runtime by choosing
       a huge NUM_SAMPLES, so just check pools up to 1000 in size. */

    uint32_t pool_size = sizes[i];
    if (pool_size > 1000)
      break;
    double expected_mean = ((double)NUM_SAMPLES) / pool_size;

    double max_diff = 0;
    double sum = 0;
    for (j = 0; j < pool_size; j++) {
      double diff = std::abs(pools[i][j] - expected_mean);
      if (diff > max_diff)
        max_diff = diff;
      sum += pools[i][j];
    }
    double mean = sum / pool_size;
    // expect the sample mean to be within 5% of expected mean
    EXPECT_NEAR(mean, expected_mean, expected_mean * 0.05);

    // expect the maximum deviation from mean to be within 15%
    EXPECT_NEAR(max_diff, 0, mean * 0.15);

    sum = 0;
    for (j = 0; j < pool_size; j++) {
      double diff = pools[i][j] - mean;
      sum += diff * diff;
    }
    double stddev = sqrt(sum / pool_size);
    // expect the standard deviation to be < 5%
    EXPECT_NEAR(stddev, 0, mean * 0.05);
  }
}
Пример #8
0
/*!
 * \brief Returns the full path to a working copy of the test file with the specified \a relativeTestFilePath.
 *
 * The specified \a mode controls whether a working copy is actually created or whether just the path is returned. If only the
 * path is returned, the \a relativeTestFilePath is ignored.
 *
 * In contrast to workingCopyPath(), this method allows to adjust the relative path of the working copy within the working copy
 * directory via \a relativeWorkingCopyPath.
 *
 * \remarks
 * - The test file specified via \a relativeTestFilePath is located using testFilePath().
 * - The name of the working copy file specified via \a relativeWorkingCopyPath will be adjusted if it already exists in the file
 *   system and can not be truncated.
 */
string TestApplication::workingCopyPathAs(
    const std::string &relativeTestFilePath, const std::string &relativeWorkingCopyPath, WorkingCopyMode mode) const
{
    // ensure working directory is present
    if (!dirExists(m_workingDir) && !makeDir(m_workingDir)) {
        cerr << Phrases::Error << "Unable to create working copy for \"" << relativeTestFilePath << "\": can't create working directory \""
             << m_workingDir << "\"." << Phrases::EndFlush;
        return string();
    }

    // ensure subdirectory exists
    const auto parts = splitString<vector<string>>(relativeWorkingCopyPath, "/", EmptyPartsTreat::Omit);
    if (!parts.empty()) {
        // create subdirectory level by level
        string currentLevel;
        currentLevel.reserve(m_workingDir.size() + relativeWorkingCopyPath.size() + 1);
        currentLevel.assign(m_workingDir);
        for (auto i = parts.cbegin(), end = parts.end() - 1; i != end; ++i) {
            if (currentLevel.back() != '/') {
                currentLevel += '/';
            }
            currentLevel += *i;

            // continue if subdirectory level already exists or we can successfully create the directory
            if (dirExists(currentLevel) || makeDir(currentLevel)) {
                continue;
            }
            // fail otherwise
            cerr << Phrases::Error << "Unable to create working copy for \"" << relativeWorkingCopyPath << "\": can't create directory \""
                 << currentLevel << "\" (inside working directory)." << Phrases::EndFlush;
            return string();
        }
    }

    // just return the path if we don't want to actually create a copy
    if (mode == WorkingCopyMode::NoCopy) {
        return m_workingDir + relativeWorkingCopyPath;
    }

    // copy the file
    const auto origFilePath(testFilePath(relativeTestFilePath));
    auto workingCopyPath(m_workingDir + relativeWorkingCopyPath);
    size_t workingCopyPathAttempt = 0;
    NativeFileStream origFile, workingCopy;
    origFile.open(origFilePath, ios_base::in | ios_base::binary);
    if (origFile.fail()) {
        cerr << Phrases::Error << "Unable to create working copy for \"" << relativeTestFilePath
             << "\": an IO error occurred when opening original file \"" << origFilePath << "\"." << Phrases::EndFlush;
        cerr << "error: " << strerror(errno) << endl;
        return string();
    }
    workingCopy.open(workingCopyPath, ios_base::out | ios_base::binary | ios_base::trunc);
    while (workingCopy.fail() && fileSystemItemExists(workingCopyPath)) {
        // adjust the working copy path if the target file already exists and can not be truncated
        workingCopyPath = argsToString(m_workingDir, relativeWorkingCopyPath, '.', ++workingCopyPathAttempt);
        workingCopy.clear();
        workingCopy.open(workingCopyPath, ios_base::out | ios_base::binary | ios_base::trunc);
    }
    if (workingCopy.fail()) {
        cerr << Phrases::Error << "Unable to create working copy for \"" << relativeTestFilePath
             << "\": an IO error occurred when opening target file \"" << workingCopyPath << "\"." << Phrases::EndFlush;
        cerr << "error: " << strerror(errno) << endl;
        return string();
    }
    workingCopy << origFile.rdbuf();
    if (!origFile.fail() && !workingCopy.fail()) {
        return workingCopyPath;
    }

    cerr << Phrases::Error << "Unable to create working copy for \"" << relativeTestFilePath << "\": ";
    if (origFile.fail()) {
        cerr << "an IO error occurred when reading original file \"" << origFilePath << "\"";
        return string();
    }
    if (workingCopy.fail()) {
        if (origFile.fail()) {
            cerr << " and ";
        }
        cerr << " an IO error occurred when writing to target file \"" << workingCopyPath << "\".";
    }
    cerr << "error: " << strerror(errno) << endl;
    return string();
}
Пример #9
0
Calamares::JobResult CommandList::run()
{
    QLatin1Literal rootMagic( "@@ROOT@@" );
    QLatin1Literal userMagic( "@@USER@@" );

    System::RunLocation location = m_doChroot ? System::RunLocation::RunInTarget : System::RunLocation::RunInHost;

    /* Figure out the replacement for @@ROOT@@ */
    QString root = QStringLiteral( "/" );
    Calamares::GlobalStorage* gs = Calamares::JobQueue::instance()->globalStorage();

    bool needsRootSubstitution = findInCommands( *this, rootMagic );
    if ( needsRootSubstitution && ( location == System::RunLocation::RunInHost ) )
    {
        if ( !gs || !gs->contains( "rootMountPoint" ) )
        {
            cError() << "No rootMountPoint defined.";
            return Calamares::JobResult::error( QCoreApplication::translate( "CommandList", "Could not run command." ),
                                                QCoreApplication::translate( "CommandList", "The command runs in the host environment and needs to know the root path, but no rootMountPoint is defined." ) );
        }
        root = gs->value( "rootMountPoint" ).toString();
    }

    bool needsUserSubstitution = findInCommands( *this, userMagic );
    if ( needsUserSubstitution && ( !gs || !gs->contains( "username" ) ) )
    {
        cError() << "No username defined.";
        return Calamares::JobResult::error(
            QCoreApplication::translate( "CommandList", "Could not run command." ),
            QCoreApplication::translate( "CommandList", "The command needs to know the user's name, but no username is defined." ) );
    }
    QString user = gs->value( "username" ).toString();  // may be blank if unset

    for ( CommandList::const_iterator i = cbegin(); i != cend(); ++i )
    {
        QString processed_cmd = i->command();
        processed_cmd.replace( rootMagic, root ).replace( userMagic, user );
        bool suppress_result = false;
        if ( processed_cmd.startsWith( '-' ) )
        {
            suppress_result = true;
            processed_cmd.remove( 0, 1 );  // Drop the -
        }

        QStringList shell_cmd { "/bin/sh", "-c" };
        shell_cmd << processed_cmd;

        int timeout = i->timeout() >= 0 ? i->timeout() : m_timeout;
        ProcessResult r = System::runCommand(
                              location, shell_cmd, QString(), QString(), timeout );

        if ( r.getExitCode() != 0 )
        {
            if ( suppress_result )
                cDebug() << "Error code" << r.getExitCode() << "ignored by CommandList configuration.";
            else
                return r.explainProcess( processed_cmd, timeout );
        }
    }

    return Calamares::JobResult::ok();
}
Пример #10
0
Hypothesis::operator std::vector<std::vector<int>>() {
    std::vector<std::vector<int>> vec;
    for (auto s = cbegin(); s != cend(); ++s)
        vec.push_back(std::vector<int>(**s));
    return vec;
}
int main(int argc, const char* argv[]) {
    try {
        // Parse command line arguments.
        TCLAP::CmdLine cmd("Depth RF trainer", ' ', "0.3");
        TCLAP::ValueArg<std::string> image_list_file_arg("f", "image-list-file", "File containing the names of image files", true, "", "string", cmd);
        TCLAP::ValueArg<int> num_of_classes_arg("n", "num-of-classes", "Number of classes in the data", true, 1, "int", cmd);
        TCLAP::SwitchArg print_confusion_matrix_switch("m", "conf-matrix", "Print confusion matrix", cmd, true);
        TCLAP::ValueArg<int> background_label_arg("l", "background-label", "Lower bound of background labels to be ignored", false, -1, "int", cmd);
        TCLAP::ValueArg<std::string> json_forest_file_arg("j", "json-forest-file", "JSON file where the trained forest should be saved", false, "forest.json", "string");
        TCLAP::ValueArg<std::string> binary_forest_file_arg("b", "binary-forest-file", "Binary file where the trained forest should be saved", false, "forest.bin", "string");
        TCLAP::ValueArg<std::string> config_file_arg("c", "config", "YAML file with training parameters", false, "", "string", cmd);
#if AIT_MULTI_THREADING
        TCLAP::ValueArg<int> num_of_threads_arg("t", "threads", "Number of threads to use", false, -1, "int", cmd);
#endif
        cmd.xorAdd(json_forest_file_arg, binary_forest_file_arg);
        cmd.parse(argc, argv);
        
        const int num_of_classes = num_of_classes_arg.getValue();
        bool print_confusion_matrix = print_confusion_matrix_switch.getValue();
        const std::string image_list_file = image_list_file_arg.getValue();

        // Initialize training and weak-learner parameters to defaults or load from file
        ForestTrainerT::ParametersT training_parameters;
        WeakLearnerT::ParametersT weak_learner_parameters;
        if (config_file_arg.isSet()) {
            ait::log_info(false) << "Reading config file " << config_file_arg.getValue() << "... " << std::flush;
            std::ifstream ifile_config(config_file_arg.getValue());
            cereal::JSONInputArchive iarchive(ifile_config);
            iarchive(cereal::make_nvp("training_parameters", training_parameters));
            iarchive(cereal::make_nvp("weak_learner_parameters", weak_learner_parameters));
            ait::log_info(false) << " Done." << std::endl;
        }
#if AIT_MULTI_THREADING
        if (num_of_threads_arg.isSet()) {
            training_parameters.num_of_threads = num_of_threads_arg.getValue();
        }
#endif

        // Read image file list
        ait::log_info(false) << "Reading image list ... " << std::flush;
        std::vector<std::tuple<std::string, std::string>> image_list;
        std::ifstream ifile(image_list_file);
        if (!ifile.good()) {
            throw std::runtime_error("Unable to open image list file");
        }
        ait::CSVReader<std::string> csv_reader(ifile);
        for (auto it = csv_reader.begin(); it != csv_reader.end(); ++it) {
            if (it->size() != 2) {
                cmd.getOutput()->usage(cmd);
                ait::log_error() << "Image list file should contain two columns with the data and label filenames.";
                exit(-1);
            }
            const std::string& data_filename = (*it)[0];
            const std::string& label_filename = (*it)[1];
            
            boost::filesystem::path data_path = boost::filesystem::path(data_filename);
            boost::filesystem::path label_path = boost::filesystem::path(label_filename);
            if (!data_path.is_absolute()) {
                data_path = boost::filesystem::path(image_list_file).parent_path();
                data_path /= data_filename;
            }
            if (!label_path.is_absolute()) {
                label_path = boost::filesystem::path(image_list_file).parent_path();
                label_path /= label_filename;
            }
            
            image_list.push_back(std::make_tuple(data_path.string(), label_path.string()));
        }
        ait::log_info(false) << " Done." << std::endl;
        
        // TODO: Ensure that label images do not contain values > num_of_classes except for background pixels. Other approach: Test samples directly below.
        
        // Set lower bound for background pixel lables
        ait::label_type background_label;
        if (background_label_arg.isSet()) {
            background_label = background_label_arg.getValue();
        } else {
            background_label = num_of_classes;
        }
        weak_learner_parameters.background_label = background_label;

        // Create weak learner and trainer.
        StatisticsT::Factory statistics_factory(num_of_classes);
        WeakLearnerT iwl(weak_learner_parameters, statistics_factory);
        ForestTrainerT trainer(iwl, training_parameters);
        SampleProviderT sample_provider(image_list, weak_learner_parameters);
        BaggingWrapperT bagging_wrapper(trainer, sample_provider);

#ifdef AIT_TESTING
        RandomEngineT rnd_engine(11);
#else
        std::random_device rnd_device;
        ait::log_info() << "rnd(): " << rnd_device();
        RandomEngineT rnd_engine(rnd_device());
#endif

        // Train a forest and time it.
        auto start_time = std::chrono::high_resolution_clock::now();
        // TODO
        //		ForestTrainerT::ForestT forest = bagging_wrapper.train_forest(rnd_engine);
        // TODO: Testing all samples for comparison with depth_trainer
        sample_provider.clear_samples();
        for (int i = 0; i < image_list.size(); ++i) {
            sample_provider.load_samples_from_image(i, rnd_engine);
        }
        SampleIteratorT samples_start = sample_provider.get_samples_begin();
        SampleIteratorT samples_end = sample_provider.get_samples_end();
        ait::log_info() << "Starting training ...";
        ForestTrainerT::ForestT forest = trainer.train_forest(samples_start, samples_end, rnd_engine);
        auto stop_time = std::chrono::high_resolution_clock::now();
        auto duration = stop_time - start_time;
        auto period = std::chrono::high_resolution_clock::period();
        double elapsed_seconds = duration.count() * period.num / static_cast<double>(period.den);
        ait::log_info() << "Done.";
        ait::log_info() << "Running time: " << elapsed_seconds;
        
        // Optionally: Serialize forest to JSON file.
        if (json_forest_file_arg.isSet()) {
            {
                ait::log_info(false) << "Writing json forest file " << json_forest_file_arg.getValue() << "... " << std::flush;
                std::ofstream ofile(json_forest_file_arg.getValue());
                cereal::JSONOutputArchive oarchive(ofile);
                oarchive(cereal::make_nvp("forest", forest));
                ait::log_info(false) << " Done." << std::endl;
            }
        // Optionally: Serialize forest to binary file.
        } else if (binary_forest_file_arg.isSet()) {
            {
                ait::log_info(false) << "Writing binary forest file " << binary_forest_file_arg.getValue() << "... " << std::flush;
                std::ofstream ofile(binary_forest_file_arg.getValue(), std::ios_base::binary);
                cereal::BinaryOutputArchive oarchive(ofile);
                oarchive(cereal::make_nvp("forest", forest));
                ait::log_info(false) << " Done." << std::endl;
            }
        } else {
            throw("This should never happen. Either a JSON or a binary forest file have to be specified!");
        }

        // Optionally: Compute some stats and print them.
        if (print_confusion_matrix) {
            ait::log_info(false) << "Creating samples for testing ... " << std::flush;
            sample_provider.clear_samples();
            for (int i = 0; i < image_list.size(); ++i) {
                sample_provider.load_samples_from_image(i, rnd_engine);
            }
            SampleIteratorT samples_start = sample_provider.get_samples_begin();
            SampleIteratorT samples_end = sample_provider.get_samples_end();
            ait::log_info(false) << " Done." << std::endl;
            
            std::vector<ait::size_type> sample_counts(num_of_classes, 0);
            for (auto sample_it = samples_start; sample_it != samples_end; sample_it++) {
                ++sample_counts[sample_it->get_label()];
            }
            auto logger = ait::log_info(true);
            logger << "Sample counts>> ";
            for (int c = 0; c < num_of_classes; ++c) {
                if (c > 0) {
                    logger << ", ";
                }
                logger << "class " << c << ": " << sample_counts[c];
            }
            logger.close();
            // For each tree extract leaf node indices for each sample.
            std::vector<std::vector<ait::size_type>> forest_leaf_indices = forest.evaluate(samples_start, samples_end);
            
            // Compute number of prediction matches based on a majority vote among the forest.
            int match = 0;
            int no_match = 0;
            for (auto tree_it = forest.cbegin(); tree_it != forest.cend(); ++tree_it) {
                for (auto sample_it = samples_start; sample_it != samples_end; sample_it++) {
                    const auto &node_it = tree_it->cbegin() + (forest_leaf_indices[tree_it - forest.cbegin()][sample_it - samples_start]);
                    const auto &statistics = node_it->get_statistics();
                    auto max_it = std::max_element(statistics.get_histogram().cbegin(), statistics.get_histogram().cend());
                    auto label = max_it - statistics.get_histogram().cbegin();
                    if (label == sample_it->get_label()) {
                        match++;
                    } else {
                        no_match++;
                    }
                }
            }
            ait::log_info() << "Match: " << match << ", no match: " << no_match;
            
            // Compute confusion matrix.
            auto forest_utils = ait::make_forest_utils(forest);
            auto confusion_matrix = forest_utils.compute_confusion_matrix(samples_start, samples_end);
            ait::log_info() << "Confusion matrix:" << std::endl << confusion_matrix;
            auto norm_confusion_matrix = ait::EvaluationUtils::normalize_confusion_matrix(confusion_matrix);
            ait::log_info() << "Normalized confusion matrix:" << std::endl << norm_confusion_matrix;
            ait::log_info() << "Diagonal of normalized confusion matrix:" << std::endl << norm_confusion_matrix.diagonal();
            
            // Computing per-frame confusion matrix
            ait::log_info() << "Computing per-frame confusion matrix.";
            using ConfusionMatrixType = typename decltype(forest_utils)::MatrixType;
            ConfusionMatrixType per_frame_confusion_matrix(num_of_classes, num_of_classes);
            per_frame_confusion_matrix.setZero();
            WeakLearnerT::ParametersT full_parameters(weak_learner_parameters);
            // Modify parameters to retrieve all pixels per sample
            full_parameters.samples_per_image_fraction = 1.0;
            SampleProviderT full_sample_provider(image_list, full_parameters);
            for (int i = 0; i < image_list.size(); ++i) {
                full_sample_provider.clear_samples();
                full_sample_provider.load_samples_from_image(i, rnd_engine);
                samples_start = full_sample_provider.get_samples_begin();
                samples_end = full_sample_provider.get_samples_end();
                forest_utils.update_confusion_matrix(per_frame_confusion_matrix, samples_start, samples_end);
            }
            ait::log_info() << "Per-frame confusion matrix:" << std::endl << per_frame_confusion_matrix;
            ConfusionMatrixType per_frame_norm_confusion_matrix = ait::EvaluationUtils::normalize_confusion_matrix(per_frame_confusion_matrix);
            ait::log_info() << "Normalized per-frame confusion matrix:" << std::endl << per_frame_norm_confusion_matrix;
            ait::log_info() << "Diagonal of normalized per-frame confusion matrix:" << std::endl << per_frame_norm_confusion_matrix.diagonal();
            ait::log_info() << "Mean of diagonal of normalized per-frame confusion matrix:" << std::endl << per_frame_norm_confusion_matrix.diagonal().mean();
        }

    } catch (const std::runtime_error& error) {
        std::cerr << "Runtime exception occured" << std::endl;
        std::cerr << error.what() << std::endl;
    }
    
    return 0;
}
hooke_jeeves_algorithm<T, N>::hooke_jeeves_algorithm() noexcept
    : optimiser<T, N>(),
      initial_stepsize(T(1.0)),
      stepsize_decrease(T(2.0)) {
  this->optimisation_function = [this](const mant::problem<T, N>& problem, const std::vector<std::array<T, N>>& initial_parameters) {
    assert(stepsize_decrease > T(1.0));

    auto&& start_time  = std::chrono::steady_clock::now();
    optimise_result<T, N> result;

    for (const auto& parameter : initial_parameters) {
      const auto objective_value = problem.objective_function(parameter);
      ++result.evaluations;
      result.duration = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now() - start_time);
	  
      if (objective_value <= result.objective_value) {
        result.parameter = parameter;
        result.objective_value = objective_value;

        if (result.objective_value <= this->acceptable_objective_value) {
          return result;
        }
      }

      if (result.evaluations >= this->maximal_evaluations) {
        return result;
      } else if (result.duration >= this->maximal_duration) {
        return result;
      }
    }

    T stepsize = initial_stepsize;

    while (result.duration < this->maximal_duration && result.evaluations < this->maximal_evaluations && result.objective_value > this->acceptable_objective_value) {
      bool is_improving = false;

      for (std::size_t n = 0; n < this->active_dimensions.size(); ++n) {
        auto parameter = result.parameter;
        parameter.at(n) += stepsize;

        std::transform(
          parameter.cbegin(), parameter.cend(),
          parameter.begin(),
          [](const auto element) {
            if (element < T(0.0)) {
              return T(0.0);
            } else if(element > T(1.0)) {
              return T(1.0);
            }

            return element;
          });

        auto objective_value = problem.objective_function(parameter);
        ++result.evaluations;
        result.duration = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now() - start_time);

        if (objective_value < result.objective_value) {
          result.parameter = parameter;
          result.objective_value = objective_value;

          if (result.objective_value <= this->acceptable_objective_value) {
            return result;
          }

          is_improving = true;
        }

        if (result.evaluations >= this->maximal_evaluations) {
          return result;
        } else if (result.duration >= this->maximal_duration) {
          return result;
        }

        parameter.at(n) -= T(2.0) * stepsize;

        std::transform(
          parameter.cbegin(), parameter.cend(),
          parameter.begin(),
          [](const auto element) {
            return std::fmin(std::fmax(element, T(0.0)), T(1.0));
          });

        objective_value = problem.objective_function(parameter);
        ++result.evaluations;
        result.duration = std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::steady_clock::now() - start_time);

        if (objective_value < result.objective_value) {
          result.parameter = parameter;
          result.objective_value = objective_value;
          is_improving = true;
        }
      }

      if (!is_improving) {
        stepsize /= stepsize_decrease;
      }
    }

    return result;
  };
}
Пример #13
0
	const_reference operator[](size_type pos) const noexcept
	{ return *(cbegin() + pos); }
Пример #14
0
	const_reverse_iterator rcend() const noexcept
	{ return const_reverse_iterator(cbegin()); }
Пример #15
0
 IDX operator[](size_t i) const { return cbegin()[i]; }
Пример #16
0
 const WayNodeList& nodes() const {
     return osmium::detail::subitem_of_type<const WayNodeList>(cbegin(), cend());
 }
Пример #17
0
cell_vector::const_iterator cell_vector::begin() const
{
    return cbegin();
}
Пример #18
0
std::string String::str() const {
  return std::string(cbegin(), cend());
}
Пример #19
0
 // Returns a random access (contiguous) iterator pointing to the end of
 // the compilation output.  It is valid for the lifetime of this object.
 // If there is no compilation result, then returns nullptr.
 const_iterator cend() const {
   if (!compilation_result_) return nullptr;
   return cbegin() +
          shaderc_result_get_length(compilation_result_) /
              sizeof(OutputElementType);
 }
Пример #20
0
	void visit(const Structure* structure) {
		Assert(isTheoryOpen());

		printTab();
		output() << "Data: " << '\n';
		indent();

		auto voc = structure->vocabulary();
		for (auto it = voc->firstSort(); it != voc->lastSort(); ++it) {
			auto s = it->second;
			if (not s->builtin()) {
				printTab();
				auto name = s->name();
				name = capitalize(name);
				output() << name << " = ";
				auto st = structure->inter(s);
				visit(st);
				output() << '\n';
			}
		}
		for (auto it = voc->firstPred(); it != voc->lastPred(); ++it) {
			auto sp = it->second->nonbuiltins();
			for (auto jt = sp.cbegin(); jt != sp.cend(); ++jt) {
				auto p = *jt;
				if (p->arity() == 1 && p->sorts()[0]->pred() == p) { // If it is in fact a sort, ignore it
					continue;
				}
				auto pi = structure->inter(p);
				if (pi->ct()->size() == 0 && pi->cf()->size() == 0) {
					continue;
				}
				if (not pi->approxTwoValued()) {
					output() << "Partial: " << '\n'; //TEMPORARY GO TO PARTIAL BLOCK
				}
				printTab();
				auto name = p->nameNoArity();
				name = capitalize(name);
				output() << name << " = ";
				visit(pi->ct());
				if (not pi->approxTwoValued()) {
					visit(pi->cf());
					output() << '\n';
					output() << "Data: "; //RETURN TO DATA BLOCK
				}
				output() << '\n';
			}
		}
		for (auto it = voc->firstFunc(); it != voc->lastFunc(); ++it) {
			auto sf = it->second->nonbuiltins();
			for (auto jt = sf.cbegin(); jt != sf.cend(); ++jt) {
				auto f = *jt;
				auto fi = structure->inter(f);
				if (fi->approxTwoValued()) {
					printTab();
					auto name = f->nameNoArity();
					name = capitalize(name);
					output() << name << " = ";
					auto ft = fi->funcTable();
					visit(ft);
				} else {
					auto pi = fi->graphInter();
					auto ct = pi->ct();
					auto cf = pi->cf();
					if (ct->approxEmpty() && cf->approxEmpty()) {
						continue;
					}
					output() << "Partial: " << '\n'; //TEMPORARY GO TO PARTIAL BLOCK
					printTab();
					auto name = f->nameNoArity();
					name = capitalize(name);
					output() << name << " = ";
					printAsFunc(ct);
					printAsFunc(cf);
					output() << '\n';
					output() << "Data: "; //RETURN TO DATA BLOCK
				}
				output() << '\n';
			}
		}
		unindent();
		output() << '\n';
	}
Пример #21
0
double terrama2::services::analysis::core::grid::zonal::history::prec::operatorImpl(terrama2::services::analysis::core::StatisticOperation statisticOperation,
    const std::string& dataSeriesName,
    const std::string& dateDiscardBefore,
    const std::string& dateDiscardAfter,
    const size_t band,
    terrama2::services::analysis::core::Buffer buffer)
{
  OperatorCache cache;
  terrama2::services::analysis::core::python::readInfoFromDict(cache);
  // After the operator lock is released it's not allowed to return any value because it doesn' have the interpreter lock.
  // In case an exception is thrown, we need to set this boolean. Once the code left the lock is acquired we should return NAN.
  bool exceptionOccurred = false;

  auto& contextManager = ContextManager::getInstance();
  auto analysis = cache.analysisPtr;

  try
  {
    terrama2::core::verify::analysisMonitoredObject(analysis);
  }
  catch (const terrama2::core::VerifyException&)
  {
    contextManager.addError(cache.analysisHashCode, QObject::tr("Use of invalid operator for analysis %1.").arg(analysis->id).toStdString());
    return std::nan("");
  }

  terrama2::services::analysis::core::MonitoredObjectContextPtr context;
  try
  {
    context = ContextManager::getInstance().getMonitoredObjectContext(cache.analysisHashCode);
  }
  catch(const terrama2::Exception& e)
  {
    TERRAMA2_LOG_ERROR() << boost::get_error_info<terrama2::ErrorDescription>(e)->toStdString();
    return std::nan("");
  }


  try
  {
    // In case an error has already occurred, there is nothing to do.
    if(context->hasError())
      return std::nan("");

    auto valuesMap = accum::getAccumulatedMap(dataSeriesName, dateDiscardBefore, dateDiscardAfter, band, buffer, context, cache);

    if(exceptionOccurred)
      return std::nan("");

    if(valuesMap.empty() && statisticOperation != StatisticOperation::COUNT)
    {
      return std::nan("");
    }

    std::vector<double> values;
    values.reserve(valuesMap.size());
    std::transform(valuesMap.cbegin(), valuesMap.cend(), back_inserter(values), [](std::pair<std::pair<int, int>, std::pair<double, int> > value){ return value.second.first/value.second.second; });

    terrama2::services::analysis::core::calculateStatistics(values, cache);
    return terrama2::services::analysis::core::getOperationResult(cache, statisticOperation);
  }
  catch(const terrama2::Exception& e)
  {
    context->addLogMessage(BaseContext::MessageType::ERROR_MESSAGE, boost::get_error_info<terrama2::ErrorDescription>(e)->toStdString());
    return std::nan("");
  }
  catch(const std::exception& e)
  {
    context->addLogMessage(BaseContext::MessageType::ERROR_MESSAGE, e.what());
    return std::nan("");
  }
  catch(...)
  {
    QString errMsg = QObject::tr("An unknown exception occurred.");
    context->addLogMessage(BaseContext::MessageType::ERROR_MESSAGE, errMsg.toStdString());
    return std::nan("");
  }
}
Пример #22
0
 inline
 cjson_wrapper::const_iterator cjson_wrapper::begin() const
 {
     return cbegin();
 }
Пример #23
0
 const_iterator begin() const {
     return cbegin();
 }
Пример #24
0
 void run_check() {
   const auto opened_after = open_fds();
   ASSERT_EQ(opened_before.size(), opened_after.size()) << "Number of file descriptors changed";
   EXPECT_TRUE(std::equal(opened_before.cbegin(), opened_before.cend(), opened_after.cbegin()))
     << "Set of opened file descriptors changed";
 }
Пример #25
0
		typename const_iterator begin () const { return cbegin(); };
Пример #26
0
void http_net::run() {
	// timeout handling
	if((start_time + request_timeout * 1000) < SDL_GetTicks()) {
		if(status_code == HTTP_STATUS::NONE) {
			status_code = HTTP_STATUS::TIMEOUT;
		}
		log_error("timeout for %s%s request!", server_name, server_url);
		receive_cb(this, status_code, server_name, "timeout");
		this->set_thread_should_finish();
	}
	
	// if there is no data to handle, return
	if(use_ssl) {
		if(ssl_protocol.is_running() && !ssl_protocol.is_received_data()) return;
	}
	else {
		if(plain_protocol.is_running() && !plain_protocol.is_received_data()) return;
	}
	
	// get and process new data (first concat everything, then split after \r\n)
	auto received_data = (use_ssl ? ssl_protocol.get_and_clear_received_data() : plain_protocol.get_and_clear_received_data());
	string received_data_str = "";
	for(const auto& recv_elem : received_data) {
		received_data_str += string(recv_elem.data(), recv_elem.size());
	}
	vector<string> lines { core::tokenize(received_data_str, "\r\n") };
	
	// if the received data ends on "\r\n", tokenize will create an additional unwanted empty line -> remove it
	// also: ending data on "\r\n" will also signal the next received data not to append the first line to the last received line (see below)
	const size_t recv_lev = received_data_str.size();
	const bool cur_crlf = (recv_lev >= 2 && received_data_str[recv_lev - 2] == '\r' && received_data_str[recv_lev - 1] == '\n');
	if(cur_crlf) {
		lines.pop_back();
	}
	
	// insert new lines into receive store
	auto lines_begin = begin(lines);
	if(!receive_store.empty() && !prev_crlf) {
		// if the previous received data didn't end on "\r\n", add the first line to the last line/element of the receive store
		receive_store.back() += lines[0];
		lines_begin++; // insert from the next line onwards
	}
	receive_store.insert(end(receive_store), lines_begin, end(lines));
	prev_crlf = cur_crlf;
	
	// first, try to get the header
	if(!header_read) {
		header_length = 0;
		for(auto line_iter = cbegin(receive_store), end_iter = cend(receive_store); line_iter != end_iter; line_iter++) {
			header_length += line_iter->size() + 2; // +2 == CRLF
			// check for empty line
			if(line_iter->size() == 0) {
				header_read = true;
				check_header(line_iter);
				
				// remove header from receive store
				line_iter++;
				receive_store.erase(begin(receive_store), line_iter);
				break;
			}
		}
	}
	
	// if header has been found (previously or just now), try to find the message end
	if(header_read) {
		bool packet_complete = false;
		const auto received_length = (use_ssl ? ssl_protocol.get_received_length() : plain_protocol.get_received_length());
		if(packet_type == http_net::PACKET_TYPE::NORMAL && (received_length - header_length) == content_length) {
			packet_complete = true;
			for(const auto& line : receive_store) {
				page_data += line;
				page_data += "\r\n";
			}
			
			// reset received data counter
			if(use_ssl) ssl_protocol.subtract_received_length(content_length);
			else plain_protocol.subtract_received_length(content_length);
			// TODO: reset for chunked packets as well
		}
		else if(packet_type == http_net::PACKET_TYPE::CHUNKED) {
			// note: this iterates over the receive store twice, once to check if all data was received and sizes are correct and
			// a second time to write the chunk data to page_data
			for(auto line_iter = cbegin(receive_store), line_end = cend(receive_store); line_iter != line_end; line_iter++) {
				// get chunk length
				size_t chunk_len = (size_t)strtoull(line_iter->c_str(), nullptr, 16);
				if(chunk_len == 0 && line_iter->size() > 0) {
					if(packet_complete) break; // second run is complete, break
					packet_complete = true;
					
					// packet complete, start again, add data to page_data this time
					line_iter = cbegin(receive_store);
					chunk_len = (size_t)strtoull(line_iter->c_str(), nullptr, 16);
				}
				
				size_t chunk_received_len = 0;
				while(++line_iter != cend(receive_store)) {
					// append chunk data
					if(packet_complete) page_data += *line_iter + "\r\n";
					chunk_received_len += line_iter->size();
					
					// check if complete chunk was received
					if(chunk_len == chunk_received_len) break;
					chunk_received_len += 2; // newline / data \r\n (not part of the protocol)
				}
				
				if(line_iter == cend(receive_store)) break;
			}
		}
		
		if(packet_complete) {
			receive_cb(this, status_code, server_name, page_data);
			
			// we're done here, clear and finish
			this->set_thread_should_finish();
		}
	}
}
Пример #27
0
reaver::assembler::ast reaver::assembler::parser::parse(const std::vector<reaver::assembler::line> & lines) const
{
    ast ret;

    lexer lex{};

    for (const auto & x : lines)
    {
        try
        {
            auto t = reaver::lexer::tokenize(*x, lex.desc);

            auto begin = t.cbegin();

            auto label_match = reaver::parser::parse(label_definition, begin, t.cend(), skip);

            if (label_match)
            {
                ret.add_label(*label_match);

                if (begin == t.cend())
                {
                    continue;
                }
            }

            else
            {
                begin = t.cbegin();
            }

            {
                auto b = begin;
                auto data_match = reaver::parser::parse(data, b, t.cend(), skip);

                if (data_match)
                {
                    if (b != t.cend())
                    {
                        throw "garbage at the end of a line.";
                    }

                    ret.add_data(*data_match);

                    continue;
                }
            }

            {
                auto b = begin;
                auto bits_match = reaver::parser::parse(bits_directive, b, t.cend(), skip);

                if (bits_match)
                {
                    if (b != t.cend())
                    {
                        throw "garbage at the end of a line.";
                    }

                    ret.set_bitness(*bits_match);

                    continue;
                }
            }

            {
                auto b = begin;
                auto extern_match = reaver::parser::parse(extern_directive, b, t.cend(), skip);

                if (extern_match)
                {
                    if (b != t.cend())
                    {
                        throw "garbage at the end of a line.";
                    }

                    ret.add_extern(*extern_match);

                    continue;
                }
            }

            {
                auto b = begin;
                auto global_match = reaver::parser::parse(global_directive, b, t.cend(), skip);

                if (global_match)
                {
                    if (b != t.cend())
                    {
                        throw "garbage at the end of a line.";
                    }

                    ret.add_global(*global_match);

                    continue;
                }
            }

            {
                auto b = begin;
                auto section_match = reaver::parser::parse(section_directive, b, t.cend(), skip);

                if (section_match)
                {
                    if (b != t.cend())
                    {
                        throw "garbage at the end of a line.";
                    }

                    ret.start_section(*section_match);

                    continue;
                }
            }

            {
                auto b = begin;
                auto instruction_match = reaver::parser::parse(assembly_instruction, b, t.cend(), skip);

                if (instruction_match)
                {
                    if (b != t.cend())
                    {
                        if (b->as<std::string>() == ",")
                        {
                            throw "invalid operand.";
                        }

                        throw "garbage at the end of a line.";
                    }

                    ret.add_instruction(*instruction_match);

                    continue;
                }

                throw "invalid line.";
            }
        }

        catch (const char * e)
        {
            throw exception{ error, x.chain() } << e;
        }

        catch (const reaver::parser::expectation_failure & e)
        {
            throw exception{ error, x.chain() } << "unexpected token: " << e.iter->as<std::string>();
        }
    }

    return ret;
}
Пример #28
0
Intersection::Base::iterator Intersection::findClosestTurn(double angle)
{
    // use the const operator to avoid code duplication
    return begin() +
           std::distance(cbegin(), static_cast<const Intersection *>(this)->findClosestTurn(angle));
}
Пример #29
0
estring::const_iterator estring::data() const {
	return cbegin();
}
Пример #30
0
worksheet::const_iterator worksheet::begin() const
{
    return cbegin();
}