Exemplo n.º 1
0
void Mixture::convolve(const HOGPyramid & pyramid, vector<HOGPyramid::Matrix> & scores,
                       vector<Indices> & argmaxes,
                       vector<vector<vector<Model::Positions> > > * positions) const
{
    if(empty() || pyramid.empty()) {
        scores.clear();
        argmaxes.clear();

        if(positions)
            positions->clear();

        return;
    }

    const int nbModels = models_.size();
    const int nbLevels = pyramid.levels().size();
    // Convolve with all the models
    vector<vector<HOGPyramid::Matrix> > tmp(nbModels);
    convolve(pyramid, tmp, positions);

    // In case of error
    if(tmp.empty()) {
        scores.clear();
        argmaxes.clear();

        if(positions)
            positions->clear();

        return;
    }

    // Resize the scores and argmaxes
    scores.resize(nbLevels);
    argmaxes.resize(nbLevels);
    int i;
    #pragma omp parallel for private(i)

    for(i = 0; i < nbLevels; ++i) {
        scores[i].resize(pyramid.levels()[i].rows() - maxSize().first + 1,
                         pyramid.levels()[i].cols() - maxSize().second + 1);
        argmaxes[i].resize(scores[i].rows(), scores[i].cols());

        for(int y = 0; y < scores[i].rows(); ++y) {
            for(int x = 0; x < scores[i].cols(); ++x) {
                int argmax = 0;

                for(int j = 1; j < nbModels; ++j)
                    if(tmp[j][i](y, x) > tmp[argmax][i](y, x))
                        argmax = j;

                scores[i](y, x) = tmp[argmax][i](y, x);
                argmaxes[i](y, x) = argmax;
            }
        }
    }
}
Exemplo n.º 2
0
void printHogSizes(HOGPyramid pyramid){
    int nlevels = pyramid.levels().size();

    for(int level = 0; level < nlevels; level++){ 
        //const float* raw_hog = pyramid.levels()[level].data()->data();
        int width = pyramid.levels()[level].cols();
        int height = pyramid.levels()[level].rows();
        int depth = pyramid.NbFeatures;
        printf("level %d: width=%d, height=%d, depth=%d \n", level, width, height, depth);
    }
}
Exemplo n.º 3
0
// nRows = 32
// nCols = width*height
void writePyraToCsv(HOGPyramid pyramid){
    int nlevels = pyramid.levels().size();
    for(int level = 0; level < nlevels; level++){
        //printf("writing to CSV: level %d \n", level);
        const float* raw_hog = pyramid.levels()[level].data()->data();        int width = pyramid.levels()[level].cols();
        int height = pyramid.levels()[level].rows();
        int depth = pyramid.NbFeatures;
        ostringstream fname;
        fname << "../piggyHOG_results/level" << level << ".csv"; //TODO: get orig img name into the CSV name.
    
        int nCols = depth; //one descriptor per row
        int nRows = width*height;

        //TODO: also write (depth, width, height) -- in some order -- to the top of the CSV file.
        writeCsv_2dFloat(raw_hog, nRows, nCols, fname.str());
    }
}
Exemplo n.º 4
0
Patchwork::Patchwork(const HOGPyramid & pyramid) : padx_(pyramid.padx()), pady_(pyramid.pady()),
interval_(pyramid.interval())
{
	// Remove the padding from the bottom/right sides since convolutions with Fourier wrap around
	const int nbLevels = pyramid.levels().size();
	
	rectangles_.resize(nbLevels);
	
	for (int i = 0; i < nbLevels; ++i) {
		rectangles_[i].first.setWidth(pyramid.levels()[i].cols() - padx_);
		rectangles_[i].first.setHeight(pyramid.levels()[i].rows() - pady_);
	}
	
	// Build the patchwork planes
	const int nbPlanes = BLF(rectangles_);
	
	// Constructs an empty patchwork in case of error
	if (nbPlanes <= 0)
		return;
	
	planes_.resize(nbPlanes);
	
	for (int i = 0; i < nbPlanes; ++i) {
		planes_[i] = Plane::Constant(MaxRows_, HalfCols_, Cell::Zero());
		
		Map<HOGPyramid::Level, Aligned>
			plane(reinterpret_cast<HOGPyramid::Cell *>(planes_[i].data()), MaxRows_, HalfCols_ * 2);
		
		// Set the last feature to 1
		for (int y = 0; y < MaxRows_; ++y)
			for (int x = 0; x < MaxCols_; ++x)
				plane(y, x)(HOGPyramid::NbFeatures - 1) = 1.0f;
	}
	
	// Recopy the pyramid levels into the planes
	for (int i = 0; i < nbLevels; ++i) {
		Map<HOGPyramid::Level, Aligned>
			plane(reinterpret_cast<HOGPyramid::Cell *>(planes_[rectangles_[i].second].data()),
				  MaxRows_, HalfCols_ * 2);
		
		plane.block(rectangles_[i].first.y(), rectangles_[i].first.x(),
					rectangles_[i].first.height(), rectangles_[i].first.width()) =
			pyramid.levels()[i].topLeftCorner(rectangles_[i].first.height(),
											  rectangles_[i].first.width());
	}
	
	// Transform the planes
	int i;
#pragma omp parallel for private(i)
	for (i = 0; i < nbPlanes; ++i)
#ifndef FFLD_HOGPYRAMID_DOUBLE
		fftwf_execute_dft_r2c(Forwards_, reinterpret_cast<float *>(planes_[i].data()->data()),
							  reinterpret_cast<fftwf_complex *>(planes_[i].data()->data()));
#else
		fftw_execute_dft_r2c(Forwards_, reinterpret_cast<double *>(planes_[i].data()->data()),
							 reinterpret_cast<fftw_complex *>(planes_[i].data()->data()));
#endif
}
Exemplo n.º 5
0
void CFFLD::detect(const Mixture & mixture, int width, int height, const HOGPyramid & pyramid, double threshold, double overlap, const string image, ostream & out, const string & images, vector<Detection> & detections, vector<DetectionResult> & vResult )
{
	// Compute the scores
	vector<HOGPyramid::Matrix> scores;
	vector<Mixture::Indices> argmaxes;
	vector<vector<vector<Model::Positions> > > positions;
	
	if (!images.empty())
	{
		mixture.convolve(pyramid, scores, argmaxes, &positions);
	}
	else
	{
		mixture.convolve(pyramid, scores, argmaxes);
	}
	

	//cout<<"conv"<<endl;
	// Cache the size of the models
	vector<pair<int, int> > sizes(mixture.models().size());
	
	for (int i = 0; i < sizes.size(); ++i)
	{
		sizes[i] = mixture.models()[i].rootSize();
	}
	
	// For each scale
	for (int i = pyramid.interval(); i < scores.size(); ++i) 
	{
		// Scale = 8 / 2^(1 - i / interval)
		const double scale = pow(2.0, static_cast<double>(i) / pyramid.interval() + 2.0);
		
		const int rows = scores[i].rows();
		const int cols = scores[i].cols();
		
		for (int y = 0; y < rows; ++y) 
		{
			for (int x = 0; x < cols; ++x) 
			{
				const float score = scores[i](y, x);
				
				if (score > threshold) 
				{
					if (((y == 0) || (x == 0) || (score > scores[i](y - 1, x - 1))) &&
						((y == 0) || (score > scores[i](y - 1, x))) &&
						((y == 0) || (x == cols - 1) || (score > scores[i](y - 1, x + 1))) &&
						((x == 0) || (score > scores[i](y, x - 1))) &&
						((x == cols - 1) || (score > scores[i](y, x + 1))) &&
						((y == rows - 1) || (x == 0) || (score > scores[i](y + 1, x - 1))) &&
						((y == rows - 1) || (score > scores[i](y + 1, x))) &&
						((y == rows - 1) || (x == cols - 1) || (score > scores[i](y + 1, x + 1)))) 
					{
						FFLD::Rectangle bndbox((x - pyramid.padx()) * scale + 0.5,
											   (y - pyramid.pady()) * scale + 0.5,
											   sizes[argmaxes[i](y, x)].second * scale + 0.5,
											   sizes[argmaxes[i](y, x)].first * scale + 0.5);
						
						// Truncate the object
						bndbox.setX(max(bndbox.x(), 0));
						bndbox.setY(max(bndbox.y(), 0));
						bndbox.setWidth(min(bndbox.width(), width - bndbox.x()));
						bndbox.setHeight(min(bndbox.height(), height - bndbox.y()));
						
						if (!bndbox.empty())
						{
							detections.push_back(Detection(score, i, x, y, bndbox));
						}
					}
				}
			}
		}
	}
	
	// Non maxima suppression
	sort(detections.begin(), detections.end());
	
	for (int i = 1; i < detections.size(); ++i)
		detections.resize(remove_if(detections.begin() + i, detections.end(),
									Intersector(detections[i - 1], overlap, true)) -
						  detections.begin());
	
	// Print the detection
	const size_t lastDot = image.find_last_of('.');
	
	string id = image.substr(0, lastDot);
	
	const size_t lastSlash = id.find_last_of("/\\");
	
	if (lastSlash != string::npos)
		id = id.substr(lastSlash + 1);
	
	if (out) 
	{
#pragma omp critical
		for (int i = 0; i < detections.size(); ++i)
		{
			out << id << ' ' << detections[i].score << ' ' << (detections[i].left() + 1) << ' '
				<< (detections[i].top() + 1) << ' ' << (detections[i].right() + 1) << ' '
				<< (detections[i].bottom() + 1) << endl;
		}
	}

	// Output the result to OpenCV Rect
	for( int j = 0; j < detections.size(); j ++ )
	{
			DetectionResult result;
			// The position of the root one octave below
			const int argmax = argmaxes[detections[j].l](detections[j].y, detections[j].x);
			const int x2 = detections[j].x * 2 - pyramid.padx();
			const int y2 = detections[j].y * 2 - pyramid.pady();
			const int l = detections[j].l - pyramid.interval();

			const double scale = pow(2.0, static_cast<double>(l) / pyramid.interval() + 2.0);
			//cout<<positions[argmax].size()<<endl;	
			for (int k = 0; k < positions[argmax].size(); ++k) 
			{
				const FFLD::Rectangle bndbox((positions[argmax][k][l](y2, x2)(0) - 
					pyramid.padx()) * scale + 0.5, 
					(positions[argmax][k][l](y2, x2)(1) - pyramid.pady()) * scale + 0.5,
					mixture.models()[argmax].partSize().second * scale + 0.5,
					mixture.models()[argmax].partSize().second * scale + 0.5 );
				Rect rtPart( bndbox.x_, bndbox.y_, bndbox.width_, bndbox.height_ );	
				//cout<<rtPart<<endl;
				result.vParts.push_back( rtPart );
			}

			Rect rtRoot( detections[j].x_, detections[j].y_, detections[j].width_, detections[j].height_ );
			result.rtRoot = rtRoot;

			vResult. push_back(result);
	}
}
Exemplo n.º 6
0
void Mixture::convolve(const HOGPyramid & pyramid,
                       vector<vector<HOGPyramid::Matrix> > & scores,
                       vector<vector<vector<Model::Positions> > > * positions) const
{
    if(empty() || pyramid.empty()) {
        scores.clear();

        if(positions)
            positions->clear();
    }

    const int nbModels = models_.size();
    scores.resize(nbModels);

    if(positions)
        positions->resize(nbModels);

    // Transform the filters if needed
    #pragma omp critical

    if(filterCache_.empty())
        cacheFilters();

    while(!cached_);

    // Create a patchwork
    const Patchwork patchwork(pyramid);
    // Convolve the patchwork with the filters
    vector<vector<HOGPyramid::Matrix> > convolutions(filterCache_.size());
    patchwork.convolve(filterCache_, convolutions);

    // In case of error
    if(convolutions.empty()) {
        scores.clear();

        if(positions)
            positions->clear();

        return;
    }

    // Save the offsets of each model in the filter list
    vector<int> offsets(nbModels);

    for(int i = 0, j = 0; i < nbModels; ++i) {
        offsets[i] = j;
        j += models_[i].parts_.size();
    }

    // For each model
    int i;
    #pragma omp parallel for private(i)

    for(i = 0; i < nbModels; ++i) {
        vector<vector<HOGPyramid::Matrix> > tmp(models_[i].parts_.size());

        for(size_t j = 0; j < tmp.size(); ++j)
            tmp[j].swap(convolutions[offsets[i] + j]);

        models_[i].convolve(pyramid, tmp, scores[i], positions ? & (*positions)[i] : 0);
    }
}