コード例 #1
0
/// the out integral channel will be resized to the required dimensions
void get_integral_channels(const integral_channels_t &in,
                           const point_t &modelWindowSize, const point_t &dataOffset, const int resizing_factor,
                           integral_channels_t &out)
{
    get_integral_channels(in,
                          dataOffset.x(), dataOffset.y(),
                          modelWindowSize.x(), modelWindowSize.y(),
                          resizing_factor,
                          out);
    return;
}
コード例 #2
0
    bool Polygon::within_n(const point_t &p, double d2) const
    {
        if (_in_mbr(p)) {
            for (size_t i = 0; i < outer_.size(); ++i)
                if (outer_[i].contains(p))
                    return outer_[i].within_n(p, d2);

            for (size_t i = 0; i < inner_.size(); ++i)
                if (inner_[i].contains(p))
                    return inner_[i].within_n(p, d2);

            return true;
        } else {
            if (p.y() >= mbr_[0]) return _within_n(p, d2, 0);
            if (p.x() <= mbr_[1]) return _within_n(p, d2, 1);
            if (p.y() <= mbr_[2]) return _within_n(p, d2, 2);
            if (p.x() >= mbr_[3]) return _within_n(p, d2, 3);
        }

        // not reachable
        return true;
    }
コード例 #3
0
void TrainingData::addPositiveSamples(const std::vector<std::string> &filenamesPositives,
                                      const point_t &modelWindowSize, const point_t &dataOffset)
{
    const size_t
            initialNumberOfTrainingSamples = getNumExamples(), // yl images number have been added to the training set
            finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + filenamesPositives.size();
    if(finalNumberOfTrainingSamples > getMaxNumExamples())
    {
        throw std::runtime_error("TrainingData::addPositiveSamples is trying to add more data than initially specified");
    }


    printf("\nCollecting %zi positive samples\n", filenamesPositives.size());
    boost::progress_display progress_indicator(filenamesPositives.size());


    meta_datum_t  metaDatum;
    integral_channels_t sampleIntegralChannels;

    // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop
    for (size_t filenameIndex = 0; filenameIndex < filenamesPositives.size(); filenameIndex +=1)
    {
        gil::rgb8_image_t image;
        gil::rgb8c_view_t image_view = doppia::open_image(filenamesPositives[filenameIndex].c_str(), image);

        _integralChannelsComputer.set_image(image_view);
        _integralChannelsComputer.compute();

        get_integral_channels(_integralChannelsComputer.get_integral_channels(),
                              modelWindowSize, dataOffset, _integralChannelsComputer.get_shrinking_factor(),// shrinking factor = 4
                              sampleIntegralChannels);

        metaDatum.filename = filenamesPositives[filenameIndex];
        metaDatum.imageClass = 1;//classes[k];
        metaDatum.x = dataOffset.x();
        metaDatum.y = dataOffset.y();

        setDatum(initialNumberOfTrainingSamples + filenameIndex,
                 metaDatum, sampleIntegralChannels);

        ++progress_indicator;
    } // end of "for each filename"

    return;
}
コード例 #4
0
ファイル: ModelIO.cpp プロジェクト: HaoLiuHust/doppia
void ModelIO::initWrite(const std::string datasetName,
                        const DetectorModel::DetectorTypes type,
                        const std::string detectorName,
                        const point_t modelWindow,
                        const rectangle_t objectWindow)
{

    doppia_protobuf::Point2d *model_window = _model.mutable_model_window_size();
    model_window->set_x(modelWindow.x());
    model_window->set_y(modelWindow.y());

    doppia_protobuf::Box *b = _model.mutable_object_window();
    b->mutable_min_corner()->set_x(objectWindow.min_corner().x());
    b->mutable_min_corner()->set_y(objectWindow.min_corner().y());
    b->mutable_max_corner()->set_x(objectWindow.max_corner().x());
    b->mutable_max_corner()->set_y(objectWindow.max_corner().y());

    _model.set_training_dataset_name(datasetName.c_str());
    _model.set_detector_type(type);
    _model.set_detector_name(detectorName);

    return;
}
コード例 #5
0
bool operator ==(const point_t& a, const point_t& b)
{
	return QuasiEqual(a.x(), b.x(), margin) && QuasiEqual(a.y(), b.y(), margin) && QuasiEqual(a.z(), b.z(), margin);
}
コード例 #6
0
void TrainingData::addNegativeSamples(const std::vector<std::string> &filenamesBackground,
                                      const point_t &modelWindowSize, const point_t &dataOffset,
                                      const size_t numNegativeSamplesToAdd)
{

    int feature_extraction_time = 0, image_loading_time = 0, tmp_time;

    const size_t
            initialNumberOfTrainingSamples = get_num_examples(),
            finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + numNegativeSamplesToAdd;
    if(finalNumberOfTrainingSamples > getMaxNumExamples())
    {
        throw std::runtime_error("TrainingData::addNegativeSamples is trying to add more data than initially specified");
    }

    printf("\nCollecting %zi random negative samples\n", numNegativeSamplesToAdd);
    doppia::progress_display_with_eta progress_indicator(numNegativeSamplesToAdd);

    meta_datum_t  metaDatum;
    integral_channels_t sampleIntegralChannels;

#if defined(DEBUG)
    srand(1);
#else
    srand(time(NULL));
#endif
    srand(1);

    const int samplesPerImage = std::max<int>(1, numNegativeSamplesToAdd / filenamesBackground.size());

    // FIXME no idea what the +1 does
    const int
            minWidth = (modelWindowSize.x()+1 + 2*dataOffset.x()),
            minHeight = (modelWindowSize.y()+1 + 2*dataOffset.y());

    const float maxSkippedFraction = 0.25;

    size_t numNegativesSamplesAdded = 0, numSkippedImages = 0, filenameIndex = 0;

    // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop
    while (numNegativesSamplesAdded < numNegativeSamplesToAdd)
    {
        if (filenameIndex >= filenamesBackground.size())
        {
            // force to loop until we have reached the desired number of samples
            filenameIndex = 0;
        }
        const string &background_image_path = filenamesBackground[filenameIndex];
        filenameIndex +=1;

        gil::rgb8c_view_t imageView;
        gil::rgb8_image_t image;
//tmp_time = (int)round(omp_get_wtime());
        imageView = doppia::open_image(background_image_path.c_str(), image);
//image_loading_time += (int)round(omp_get_wtime()) - tmp_time;

        if ((imageView.width() < minWidth) or (imageView.height() < minHeight))
        {
            // if input image is too small, we skip it
            //printf("Skipping negative sample %s, because it is too small\n", filename.c_str());
            numSkippedImages += 1;

            const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size();
            if (skippedFraction > maxSkippedFraction)
            {
                printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small (or too big to process)\n",
                       numSkippedImages, filenamesBackground.size(), skippedFraction*100);

                throw std::runtime_error("Too many negatives images where skipped. Dataset needs to be fixed");
            }
            continue;
        }

        const int
                maxRandomX = (imageView.width() - modelWindowSize.x()+1 - 2*dataOffset.x()),
                maxRandomY = (imageView.height() - modelWindowSize.y()+1 - 2*dataOffset.y());

        try
        {
            // FIXME harcoded values
            const size_t
                    expected_channels_size = imageView.size()*10,
                    max_texture_size = 134217728; // 2**27 for CUDA capability 2.x
            if(expected_channels_size > max_texture_size)
            {
                throw std::invalid_argument("The image is monstruously big!");
            }

            const boost::filesystem::path file_path = background_image_path;
#if BOOST_VERSION <= 104400
            const std::string filename = file_path.filename();
#else
            const std::string filename = file_path.filename().string();
#endif
tmp_time = (int)round(omp_get_wtime());
            _integralChannelsComputer->set_image(imageView, filename);
image_loading_time += (int)round(omp_get_wtime()) - tmp_time;
tmp_time = (int)round(omp_get_wtime());
            _integralChannelsComputer->compute();
feature_extraction_time += (int)round(omp_get_wtime()) - tmp_time;

        }
        catch(std::exception &e)
        {
            printf("Computing integral channels of image %s \033[1;31mfailed\033[0m (size %zix%zi). Skipping it. Error was:\n%s\n",
                   background_image_path.c_str(),
                   imageView.width(), imageView.height(),
                   e.what());
            numSkippedImages += 1;
            continue; // we skip this image
        }
        catch(...)
        {
            printf("Computing integral channels of %s \033[1;31mfailed\033[0m (size %zix%zi). Skipping it. Received unknown error.\n",
                   background_image_path.c_str(),
                   imageView.width(), imageView.height());
            numSkippedImages += 1;
            continue; // we skip this image
        }

        metaDatum.filename = background_image_path;
        metaDatum.imageClass = _backgroundClassLabel;

        size_t numSamplesForImage = std::min<size_t>(samplesPerImage,
                                                     (numNegativeSamplesToAdd - numNegativesSamplesAdded));
        numSamplesForImage = 1;
        for (size_t randomSampleIndex = 0; randomSampleIndex < numSamplesForImage; randomSampleIndex += 1)
        {
            //const point_t::coordinate_t
            size_t
                    x = dataOffset.x() + rand() % maxRandomX,
                    y = dataOffset.y() + rand() % maxRandomY;
            //printf("random x,y == %i, %i\n", x,y);
            const point_t randomOffset(x,y);
            metaDatum.x = randomOffset.x(); metaDatum.y = randomOffset.y();
//tmp_time = (int)round(omp_get_wtime());
            get_integral_channels(_integralChannelsComputer->get_integral_channels(),
                                  modelWindowSize, randomOffset, doppia::IntegralChannelsForPedestrians::get_shrinking_factor(),
                                  sampleIntegralChannels);
//image_loading_time += (int)round(omp_get_wtime()) - tmp_time;
            setDatum(initialNumberOfTrainingSamples + numNegativesSamplesAdded,
                     metaDatum, sampleIntegralChannels);

            numNegativesSamplesAdded += 1;
            ++progress_indicator;
        }

    } // end of "for each background image"



    if (numSkippedImages > 0)
    {
        const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size();
        printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small (or too big to process)\n",
               numSkippedImages, filenamesBackground.size(), skippedFraction*100);
    }
    printf("Time elapsed while loading negative images: %02d:%02d:%02d\n",
           image_loading_time/3600, (image_loading_time%3600)/60, image_loading_time%60);
    printf("Time elapsed while extracting features from negative images: %02d:%02d:%02d\n",
           feature_extraction_time/3600, (feature_extraction_time%3600)/60, feature_extraction_time%60);

    return;
}
コード例 #7
0
void TrainingData::addHardNegativeSamples(const std::vector<std::string> &filenamesHardNegatives,
                                          const point_t &modelWindowSize, const point_t &dataOffset)
{
    int feature_extraction_time = 0, image_loading_time = 0, tmp_time;

    const size_t
            initialNumberOfTrainingSamples = get_num_examples(),
            finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + filenamesHardNegatives.size();
    if(finalNumberOfTrainingSamples > getMaxNumExamples())
    {
        throw std::runtime_error("TrainingData::addHardNegativeSamples "
                                 "is trying to add more data than initially specified");
    }


    printf("\nCollecting %zi hard negative samples\n", filenamesHardNegatives.size());
    doppia::progress_display_with_eta progress_indicator(filenamesHardNegatives.size());


    meta_datum_t  metaDatum;
    integral_channels_t sampleIntegralChannels;

    // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop
    for (size_t filenameIndex = 0; filenameIndex < filenamesHardNegatives.size(); filenameIndex +=1)
    {
        tmp_time = (int)round(omp_get_wtime());
        gil::rgb8_image_t image;
        gil::rgb8c_view_t image_view = doppia::open_image(filenamesHardNegatives[filenameIndex].c_str(), image);

        const boost::filesystem::path file_path = filenamesHardNegatives[filenameIndex];
#if BOOST_VERSION <= 104400
        const std::string filename = file_path.filename();
#else
        const std::string filename = file_path.filename().string();
#endif
        _integralChannelsComputer->set_image(image_view, filename);
        image_loading_time += (int)round(omp_get_wtime()) - tmp_time;
        tmp_time = (int)round(omp_get_wtime());
        _integralChannelsComputer->compute();
        feature_extraction_time += (int)round(omp_get_wtime()) - tmp_time;

        get_integral_channels(_integralChannelsComputer->get_integral_channels(),
                              modelWindowSize, dataOffset, doppia::IntegralChannelsForPedestrians::get_shrinking_factor(),
                              sampleIntegralChannels);

        metaDatum.filename = filenamesHardNegatives[filenameIndex];
        metaDatum.imageClass = _backgroundClassLabel;
        metaDatum.x = dataOffset.x();
        metaDatum.y = dataOffset.y();

        setDatum(initialNumberOfTrainingSamples + filenameIndex, metaDatum, sampleIntegralChannels);

        ++progress_indicator;
    } // end of "for each filename"
    printf("Time elapsed while loading images for hard negatives extraction: %02d:%02d:%02d\n",
           image_loading_time/3600, (image_loading_time%3600)/60, image_loading_time%60);
    printf("Time elapsed while extracting features from hard negatives: %02d:%02d:%02d\n",
           feature_extraction_time/3600, (feature_extraction_time%3600)/60, feature_extraction_time%60);

    return;
}
コード例 #8
0
void TrainingData::addNegativeSamples(const std::vector<std::string> &filenamesBackground,
                                      const point_t &modelWindowSize, const point_t &dataOffset,
                                      const size_t numNegativeSamplesToAdd)
{

    const size_t
            initialNumberOfTrainingSamples = getNumExamples(),
            finalNumberOfTrainingSamples = initialNumberOfTrainingSamples + numNegativeSamplesToAdd;
    if(finalNumberOfTrainingSamples > getMaxNumExamples())
    {
        throw std::runtime_error("TrainingData::addNegativeSamples is trying to add more data than initially specified");
    }

    printf("\nCollecting %zi random negative samples\n", numNegativeSamplesToAdd);
    boost::progress_display progress_indicator(numNegativeSamplesToAdd);

    meta_datum_t  metaDatum;
    integral_channels_t sampleIntegralChannels;

#if defined(DEBUG)
    srand(1);
#else
    srand(time(NULL));
#endif
    srand(1);

    const int samplesPerImage = std::max<int>(1, numNegativeSamplesToAdd / filenamesBackground.size());

    // FIXME no idea what the +1 does
    const int
            minWidth = (modelWindowSize.x()+1 + 2*dataOffset.x()),
            minHeight = (modelWindowSize.y()+1 + 2*dataOffset.y());

    const float maxSkippedFraction = 0.25;

    size_t numNegativesSamplesAdded = 0, numSkippedImages = 0, filenameIndex = 0;

    // integralChannelsComputer is already multithreaded, so no benefit on paralelizing this for loop
    while (numNegativesSamplesAdded < numNegativeSamplesToAdd)
    {
        if (filenameIndex >= filenamesBackground.size())
        {
            // force to loop until we have reached the desired number of samples
            filenameIndex = 0;
        }

        const string &filename = filenamesBackground[filenameIndex];
        filenameIndex +=1;

        gil::rgb8c_view_t imageView;
        gil::rgb8_image_t image;
        imageView = doppia::open_image(filename.c_str(), image);

        if ((imageView.width() < minWidth) or (imageView.height() < minHeight))
        {
            // if input image is too small, we skip it
            //printf("Skipping negative sample %s, because it is too small\n", filename.c_str());
            numSkippedImages += 1;

            const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size();
            if (skippedFraction > maxSkippedFraction)
            {
                printf("Skipped %i images (out of %zi, %.3f%%) because they where too small\n",
                       numSkippedImages, filenamesBackground.size(), skippedFraction*100);

                throw std::runtime_error("Too many negatives images where skipped. Dataset needs to be fixed");
            }
            continue;
        }

        const int
                maxRandomX = (imageView.width() - modelWindowSize.x()+1 - 2*dataOffset.x()),
                maxRandomY = (imageView.height() - modelWindowSize.y()+1 - 2*dataOffset.y());

        _integralChannelsComputer.set_image(imageView);
        _integralChannelsComputer.compute();

        metaDatum.filename = filename;
        metaDatum.imageClass = _backgroundClassLabel;

        size_t numSamplesForImage = std::min<size_t>(samplesPerImage,
                                                           (numNegativeSamplesToAdd - numNegativesSamplesAdded));
                numSamplesForImage = 1;
        for (size_t randomSampleIndex = 0; randomSampleIndex < numSamplesForImage; randomSampleIndex += 1)
        {
            //const point_t::coordinate_t
            size_t
			x = dataOffset.x() + rand() % maxRandomX, 
				  y = dataOffset.y() + rand() % maxRandomY;
            //printf("random x,y == %i, %i\n", x,y);
                        const point_t randomOffset(x,y);
            metaDatum.x = randomOffset.x(); metaDatum.y = randomOffset.y();
            get_integral_channels(_integralChannelsComputer.get_integral_channels(),
                                  modelWindowSize, randomOffset, _integralChannelsComputer.get_shrinking_factor(),
                                  sampleIntegralChannels);

            setDatum(initialNumberOfTrainingSamples + numNegativesSamplesAdded,
                     metaDatum, sampleIntegralChannels);

            numNegativesSamplesAdded += 1;
            ++progress_indicator;
        }

    } // end of "for each background image"



    if (numSkippedImages > 0)
    {
        const float skippedFraction = static_cast<float>(numSkippedImages) / filenamesBackground.size();
        printf("Skipped %zi images (out of %zi, %.3f%%) because they where too small\n",
               numSkippedImages, filenamesBackground.size(), skippedFraction*100);
    }


    return;
}
コード例 #9
0
ファイル: point.hpp プロジェクト: LNSEAB/mmaccel
	inline bool operator<( point_t<Origin> const& lhs, point_t<Origin> const& rhs ) noexcept
	{
		return lhs.x() * lhs.x() + lhs.y() * lhs.y() < rhs.x() * rhs.x() + rhs.y() * rhs.y();
	}
コード例 #10
0
ファイル: point.hpp プロジェクト: LNSEAB/mmaccel
	inline bool operator==( point_t<origin_type::client> const& lhs, point_t<origin_type::client> const& rhs ) noexcept
	{
		return lhs.window_handle() == rhs.window_handle() && lhs.x() == rhs.x() && lhs.y() == rhs.y();
	}
コード例 #11
0
ファイル: point.hpp プロジェクト: LNSEAB/mmaccel
	inline bool operator==( point_t<origin_type::screen> const& lhs, point_t<origin_type::screen> const& rhs ) noexcept
	{
		return lhs.x() == rhs.x() && lhs.y() == rhs.y();
	}