示例#1
0
/** get 3D points out of the image */
float matToVec(const cv::Mat_<cv::Vec3f> &src_ref, const cv::Mat_<cv::Vec3f> &src_mod, std::vector<cv::Vec3f>& pts_ref, std::vector<cv::Vec3f>& pts_mod)
{
  pts_ref.clear();
  pts_mod.clear();
  int px_missing = 0;

  cv::MatConstIterator_<cv::Vec3f> it_ref = src_ref.begin();
  cv::MatConstIterator_<cv::Vec3f> it_mod = src_mod.begin();
  for (; it_ref != src_ref.end(); ++it_ref, ++it_mod)
  {
    if (!cv::checkRange(*it_ref))
      continue;

    pts_ref.push_back(*it_ref);
    if (cv::checkRange(*it_mod))
    {
      pts_mod.push_back(*it_mod);
    }
    else
    {
      pts_mod.push_back(cv::Vec3f(0.0f, 0.0f, 0.0f));
      ++px_missing;
    }
  }

  float ratio = 0.0f;
  if ((src_ref.cols > 0) && (src_ref.rows > 0))
    ratio = float(px_missing) / float(src_ref.cols * src_ref.rows);
  return ratio;
}
示例#2
0
void EmotionDetector::output_HOG_frame(std::ofstream* hog_file, bool good_frame, const cv::Mat_<double>& hog_descriptor, int num_rows, int num_cols)
{

	// Using FHOGs, hence 31 channels
	int num_channels = 31;

	hog_file->write((char*)(&num_cols), 4);
	hog_file->write((char*)(&num_rows), 4);
	hog_file->write((char*)(&num_channels), 4);

	// Not the best way to store a bool, but will be much easier to read it
	float good_frame_float;
	if (good_frame)
		good_frame_float = 1;
	else
		good_frame_float = -1;

	hog_file->write((char*)(&good_frame_float), 4);

	cv::MatConstIterator_<double> descriptor_it = hog_descriptor.begin();

	for (int y = 0; y < num_cols; ++y)
	{
		for (int x = 0; x < num_rows; ++x)
		{
			for (unsigned int o = 0; o < 31; ++o)
			{

				float hog_data = (float)(*descriptor_it++);
				hog_file->write((char*)&hog_data, 4);
			}
		}
	}
}
 inline void
 cvToCloud(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<PointT>& cloud, const cv::Mat& mask = cv::Mat())
 {
   cloud.clear();
   cloud.width = points3d.size().width;
   cloud.height = points3d.size().height;
   cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
   const bool has_mask = !mask.empty();
   cv::Mat_<uchar>::const_iterator mask_it;
   if (has_mask)
     mask_it = mask.begin<uchar>();
   for (; point_it != point_end; ++point_it, (has_mask ? ++mask_it : mask_it))
   {
     if (has_mask && !*mask_it)
       continue;
     cv::Point3f p = *point_it;
     if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
       continue;
     PointT cp;
     cp.x = p.x;
     cp.y = p.y;
     cp.z = p.z;
     cloud.push_back(cp);
   }
 }
  /**
   * \breif convert an opencv collection of points to a pcl::PoinCloud, your opencv mat should have NAN's for invalid points.
   * @param points3d opencv matrix of nx1 3 channel points
   * @param cloud output cloud
   * @param rgb the rgb, required, will color points
   * @param mask the mask, required, must be same size as rgb
   */
  inline void
  cvToCloudXYZRGB(const cv::Mat_<cv::Point3f>& points3d, pcl::PointCloud<pcl::PointXYZRGB>& cloud, const cv::Mat& rgb,
                  const cv::Mat& mask, bool brg = true)
  {
    cloud.clear();
    cv::Mat_<cv::Point3f>::const_iterator point_it = points3d.begin(), point_end = points3d.end();
    cv::Mat_<cv::Vec3b>::const_iterator rgb_it = rgb.begin<cv::Vec3b>();
    cv::Mat_<uchar>::const_iterator mask_it;
    if(!mask.empty())
      mask_it = mask.begin<uchar>();
    for (; point_it != point_end; ++point_it, ++rgb_it)
    {
      if(!mask.empty())
      {
        ++mask_it;
        if (!*mask_it)
          continue;
      }

      cv::Point3f p = *point_it;
      if (p.x != p.x && p.y != p.y && p.z != p.z) //throw out NANs
        continue;
      pcl::PointXYZRGB cp;
      cp.x = p.x;
      cp.y = p.y;
      cp.z = p.z;
      cp.r = (*rgb_it)[2]; //expecting in BGR format.
      cp.g = (*rgb_it)[1];
      cp.b = (*rgb_it)[0];
      cloud.push_back(cp);
    }
  }
示例#5
0
	// Create a row vector Felzenszwalb HOG descriptor from a given image
	void Extract_FHOG_descriptor(cv::Mat_<double>& descriptor, const cv::Mat& image, int& num_rows, int& num_cols, int cell_size)
	{
		
		dlib::array2d<dlib::matrix<float,31,1> > hog;
		if(image.channels() == 1)
		{
			dlib::cv_image<uchar> dlib_warped_img(image);
			dlib::extract_fhog_features(dlib_warped_img, hog, cell_size);
		}
		else
		{
			dlib::cv_image<dlib::bgr_pixel> dlib_warped_img(image);
			dlib::extract_fhog_features(dlib_warped_img, hog, cell_size);
		}

		// Convert to a usable format
		num_cols = hog.nc();
		num_rows = hog.nr();

		descriptor = Mat_<double>(1, num_cols * num_rows * 31);
		cv::MatIterator_<double> descriptor_it = descriptor.begin();
		for(int y = 0; y < num_cols; ++y)
		{
			for(int x = 0; x < num_rows; ++x)
			{
				for(unsigned int o = 0; o < 31; ++o)
				{
					*descriptor_it++ = (double)hog[y][x](o);
				}
			}
		}
	}
int crslic_segmentation::operator()(const cv::Mat& image, cv::Mat_<int>& labels)
{
	float directCliqueCost = 0.3;
	unsigned int const iterations = 3;
	double const diagonalCliqueCost = directCliqueCost / sqrt(2);

	bool isColorImage = (image.channels() == 3);
	std::vector<FeatureType> features;
	if (isColorImage)
		features.push_back(Color);
	else
		features.push_back(Grayvalue);

	features.push_back(Compactness);

	ContourRelaxation<int> crslic_obj(features);
	cv::Mat labels_temp = createBlockInitialization<int>(image.size(), settings.superpixel_size, settings.superpixel_size);

	crslic_obj.setCompactnessData(settings.superpixel_compactness);

	if (isColorImage)
	{
		cv::Mat imageYCrCb;
		cv::cvtColor(image, imageYCrCb, CV_BGR2YCrCb);
		std::vector<cv::Mat> imageYCrCbChannels;
		cv::split(imageYCrCb, imageYCrCbChannels);

		crslic_obj.setColorData(imageYCrCbChannels[0], imageYCrCbChannels[1], imageYCrCbChannels[2]);
	}
	else
		crslic_obj.setGrayvalueData(image.clone());

	crslic_obj.relax(labels_temp, directCliqueCost, diagonalCliqueCost, iterations, labels);
	return 1+*(std::max_element(labels.begin(), labels.end()));
}
示例#7
0
// matrix version
void multi_img::setPixel(unsigned int row, unsigned int col,
						 const cv::Mat_<Value>& values)
{
	assert((int)row < height && (int)col < width);
	assert(values.rows*values.cols == (int)size());
	Pixel &p = pixels[row*width + col];
	p.assign(values.begin(), values.end());

	for (size_t i = 0; i < size(); ++i)
		bands[i](row, col) = p[i];

	dirty(row, col) = 0;
}
示例#8
0
文件: PDM.cpp 项目: ezzaouia/OpenFace
//===========================================================================
// Clamping the parameter values to be within 3 standard deviations
void PDM::Clamp(cv::Mat_<float>& local_params, cv::Vec6d& params_global, const FaceModelParameters& parameters)
{
	double n_sigmas = 3;
	cv::MatConstIterator_<double> e_it  = this->eigen_values.begin();
	cv::MatIterator_<float> p_it =  local_params.begin();

	double v;

	// go over all parameters
	for(; p_it != local_params.end(); ++p_it, ++e_it)
	{
		// Work out the maximum value
		v = n_sigmas*sqrt(*e_it);

		// if the values is too extreme clamp it
		if(fabs(*p_it) > v)
		{
			// Dealing with positive and negative cases
			if(*p_it > 0.0)
			{
				*p_it=v;
			}
			else
			{
				*p_it=-v;
			}
		}
	}
	
	// do not let the pose get out of hand
	if(parameters.limit_pose)
	{
		if(params_global[1] > M_PI / 2)
			params_global[1] = M_PI/2;
		if(params_global[1] < -M_PI / 2)
			params_global[1] = -M_PI/2;
		if(params_global[2] > M_PI / 2)
			params_global[2] = M_PI/2;
		if(params_global[2] < -M_PI / 2)
			params_global[2] = -M_PI/2;
		if(params_global[3] > M_PI / 2)
			params_global[3] = M_PI/2;
		if(params_global[3] < -M_PI / 2)
			params_global[3] = -M_PI/2;
	}
	

}
void OvershootClusterer::drawTo(cv::Mat_<cv::Vec3b> &out) const
{
    out.setTo(0);
    out.setTo(Scalar(255,255,255), img > 0);

    Mat_<Vec3b>::iterator itOut = out.begin(), itOutEnd = out.end();
    Mat_<unsigned char>::const_iterator it = smallestOvershootVisit.begin(),
                                        itEnd = smallestOvershootVisit.end();
    for (; itOut != itOutEnd && it != itEnd; ++it, ++itOut)
    {
        if ((*it) < CLUSTERER_OVERSHOOTS)
        {
            unsigned char nonRed = (*it)*(255/CLUSTERER_OVERSHOOTS);
            *itOut = Vec3b(nonRed,nonRed,255);
        }
    }
}
    void
    cvToCloudXYZ(const cv::Mat_<float>& points3d, pcl::PointCloud<PointT>& cloud)
    {
      const int width = cloud.width;
      const int height = cloud.height;
      cv::Mat_<float>::const_iterator begin = points3d.begin();

      for (int v = 0; v < height; ++v)
      {
        for (int u = 0; u < width; ++u)
        {
          PointT& p = cloud(u, v);
          p.x = *(begin++);
          p.y = *(begin++);
          p.z = *(begin++);
        }
      }

    }
示例#11
0
	void Visualise_FHOG(const cv::Mat_<double>& descriptor, int num_rows, int num_cols, cv::Mat& visualisation)
	{

		// First convert to dlib format
		dlib::array2d<dlib::matrix<float,31,1> > hog(num_rows, num_cols);
		
		cv::MatConstIterator_<double> descriptor_it = descriptor.begin();
		for(int y = 0; y < num_cols; ++y)
		{
			for(int x = 0; x < num_rows; ++x)
			{
				for(unsigned int o = 0; o < 31; ++o)
				{
					hog[y][x](o) = *descriptor_it++;
				}
			}
		}

		// Draw the FHOG to OpenCV format
		auto fhog_vis = dlib::draw_fhog(hog);
		visualisation = dlib::toMat(fhog_vis).clone();
	}
示例#12
0
float singleeyefitter::cvx::histKmeans(const cv::Mat_<float>& hist, int bin_min, int bin_max, int K, float init_centres[], cv::Mat_<uchar>& labels, cv::TermCriteria termCriteria)
{
    using namespace math;

    CV_Assert( hist.rows == 1 || hist.cols == 1 && K > 0 );

    labels = cv::Mat_<uchar>::zeros(hist.size());
    int nbins = hist.total();
    float binWidth = (bin_max - bin_min)/nbins;
    float binStart = bin_min + binWidth/2;

    cv::Mat_<float> centres(K, 1, init_centres, 4);

    int iters = 0;
    bool finalRun = false;
    while (true)
    {
        ++iters;
        cv::Mat_<float> old_centres = centres.clone();

        int i_bin;
        cv::Mat_<float>::const_iterator i_hist;
        cv::Mat_<uchar>::iterator i_labels;
        cv::Mat_<float>::iterator i_centres;
        uchar label;

        float sumDist = 0;
        int movedCount = 0;

        // Step 1. Assign each element a label
        for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
             i_bin < nbins;
             ++i_bin, ++i_labels, ++i_hist)
        {
            float bin_val = binStart + i_bin*binWidth;
            float minDist = sq(bin_val - centres(*i_labels));
            int curLabel = *i_labels;

            for (label = 0; label < K; ++label)
            {
                float dist = sq(bin_val - centres(label));
                if (dist < minDist)
                {
                    minDist = dist;
                    *i_labels = label;
                }
            }

            if (*i_labels != curLabel)
                movedCount++;

            sumDist += (*i_hist) * std::sqrt(minDist);
        }

        if (finalRun)
            return sumDist;

        // Step 2. Recalculate centres
        cv::Mat_<float> counts(K, 1, 0.0f);
        for (i_bin = 0, i_labels = labels.begin(), i_hist = hist.begin();
             i_bin < nbins;
             ++i_bin, ++i_labels, ++i_hist)
        {
            float bin_val = binStart + i_bin*binWidth;

            centres(*i_labels) += (*i_hist) * bin_val;
            counts(*i_labels) += *i_hist;
        }
        for (label = 0; label < K; ++label)
        {
            if (counts(label) == 0)
                return std::numeric_limits<float>::infinity();

            centres(label) /= counts(label);
        }

        // Step 3. Detect termination criteria
        if (movedCount == 0)
            finalRun = true;
        else if (termCriteria.type | cv::TermCriteria::COUNT && iters >= termCriteria.maxCount)
            finalRun = true;
        else if (termCriteria.type | cv::TermCriteria::EPS)
        {
            float max_movement = 0;
            for (label = 0; label < K; ++label)
            {
                max_movement = std::max(max_movement, sq(centres(label) - old_centres(label)));
            }
            if (sqrt(max_movement) < termCriteria.epsilon)
                finalRun = true;
        }
    }
    return std::numeric_limits<float>::infinity();
}
示例#13
0
TagPoseMap estimate(TagCornerMap const& tags, cv::Vec<RealT, 4> const& camDeltaR, cv::Vec<RealT, 3> const& camDeltaX)
{
    TagPoseMap objects;

    //Pass the latest camera movement difference for prediction (if 3D filtering is enabled)
    mEstimatePose3D.setCamDelta(camDeltaR, camDeltaX);

    //Predict pose for all known tags with camera movement (if 3D filtering is enabled)
    mEstimatePose3D(objects);

    //Correct pose prediction with new observations
    std::map<
        const std::string,     //name of the object
        std::pair<
            std::vector<cv::Point3_<RealT> >,      //points in object
            std::vector<cv::Point2f> > >   //points in frame
    objectToPointMapping;

    auto configurationIt = mId2Configuration.begin();
    auto configurationEnd = mId2Configuration.end();
    for (const auto &tag : tags) {
        int tagId = tag.first;
        const cv::Mat_<cv::Point2f> corners(tag.second);

        while (configurationIt != configurationEnd
               && configurationIt->first < tagId)
            ++configurationIt;

        if (configurationIt != configurationEnd) {
            if (configurationIt->first == tagId) {
                const auto &configuration = configurationIt->second;
                if (configuration.second.mKeep) {
                    mEstimatePose3D(cv::format("tag_%d", tagId),
                                    configuration.second.mLocalcorners,
                                    corners,
                                    objects);
                }
                auto & pointMapping = objectToPointMapping[configuration.first];
                pointMapping.first.insert(
                    pointMapping.first.end(),
                    configuration.second.mCorners.begin(),
                    configuration.second.mCorners.end());
                pointMapping.second.insert(
                    pointMapping.second.end(),
                    corners.begin(),
                    corners.end());
            } else if (!mOmitOtherTags) {
                mEstimatePose3D(cv::format("tag_%d", tagId),
                                mDefaultTagCorners,
                                corners,
                                objects);
            }

        } else if (!mOmitOtherTags) {
            mEstimatePose3D(cv::format("tag_%d", tagId),
                            mDefaultTagCorners,
                            corners,
                            objects);
        }
    }

    for (auto& objectToPoints : objectToPointMapping) {
        mEstimatePose3D(objectToPoints.first,
                        objectToPoints.second.first,
                        cv::Mat_<cv::Point2f>(objectToPoints.second.second),
                        objects);
    }

    return objects;
}
示例#14
0
std::map<std::string, cv::Matx44d> estimate(const std::map<int, Quad> &tags) {

    std::map<std::string, cv::Matx44d> objects;

    std::map<
        const std::string,     //name of the object
        std::pair<
            std::vector<cv::Point3f>,      //points in object
            std::vector<cv::Point2f> > >   //points in frame
    objectToPointMapping;


    auto configurationIt = mId2Configuration.begin();
    auto configurationEnd = mId2Configuration.end();
    for (const auto &tag : tags) {
        int tagId = tag.first;
        const cv::Mat_<cv::Point2f> corners(tag.second);

        while (configurationIt != configurationEnd
               && configurationIt->first < tagId)
            ++configurationIt;

        if (configurationIt != configurationEnd) {
            if (configurationIt->first == tagId) {
                const auto &configuration = configurationIt->second;
                if (configuration.second.mKeep) {
                    computeTransformation(cv::format("tag_%d", tagId),
                                          configuration.second.mLocalcorners,
                                          corners,
                                          objects);
                }
                auto & pointMapping = objectToPointMapping[configuration.first];
                pointMapping.first.insert(
                    pointMapping.first.end(),
                    configuration.second.mCorners.begin(),
                    configuration.second.mCorners.end());
                pointMapping.second.insert(
                    pointMapping.second.end(),
                    corners.begin(),
                    corners.end());
            } else if (!mOmitOtherTags) {
                computeTransformation(cv::format("tag_%d", tagId),
                                      mDefaultTagCorners,
                                      corners,
                                      objects);
            }

        } else if (!mOmitOtherTags) {
            computeTransformation(cv::format("tag_%d", tagId),
                                  mDefaultTagCorners,
                                  corners,
                                  objects);
        }
    }

    for (auto& objectToPoints : objectToPointMapping) {
        computeTransformation(objectToPoints.first,
                              objectToPoints.second.first,
                              cv::Mat_<cv::Point2f>(objectToPoints.second.second),
                              objects);
    }

    return mFilter(objects);
}
示例#15
0
//===========================================================================
void CCNF_neuron::Response(cv::Mat_<float> &im, cv::Mat_<double> &im_dft, cv::Mat &integral_img, cv::Mat &integral_img_sq, cv::Mat_<float> &resp)
{

	int h = im.rows - weights.rows + 1;
	int w = im.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	cv::Mat_<float> I;

	if(neuron_type == 3)
	{
		// Perform normalisation across whole patch (ignoring the invalid values indicated by <= 0

		cv::Scalar mean;
		cv::Scalar std;
		
		// ignore missing values
		cv::Mat_<uchar> mask = im > 0;
		cv::meanStdDev(im, mean, std, mask);

		// if all values the same don't divide by 0
		if(std[0] != 0)
		{
			I = (im - mean[0]) / std[0];
		}
		else
		{
			I = (im - mean[0]);
		}

		I.setTo(0, mask == 0);
	}
	else
	{
		if(neuron_type == 0)
		{
			I = im;
		}
		else
		{
			printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,neuron_type);
			abort();
		}
	}
  
	if(resp.empty())
	{		
		resp.create(h, w);
	}

	// The response from neuron before activation
	if(neuron_type == 3)
	{
		// In case of depth we use per area, rather than per patch normalisation
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF); // the linear multiplication, efficient calc of response
	}
	else
	{
		matchTemplate_m(I, im_dft, integral_img, integral_img_sq, weights, weights_dfts, resp, CV_TM_CCOEFF_NORMED); // the linear multiplication, efficient calc of response
	}

	cv::MatIterator_<float> p = resp.begin();

	cv::MatIterator_<float> q1 = resp.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = resp.end();

	// the logistic function (sigmoid) applied to the response
	while(q1 != q2)
	{
		*p++ = (2 * alpha) * 1.0 /(1.0 + exp( -(*q1++ * norm_weights + bias )));
	}

}
示例#16
0
void SVR_patch_expert::ResponseDepth(const Mat_<float>& area_of_interest, cv::Mat_<double> &response)
{

	// How big the response map will be
	int response_height = area_of_interest.rows - weights.rows + 1;
	int response_width = area_of_interest.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	Mat_<float> normalised_area_of_interest;
  
	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	if(type == 0)
	{
		// Perform normalisation across whole patch
		cv::Scalar mean;
		cv::Scalar std;
		
		// ignore missing values
		cv::Mat_<uchar> mask = area_of_interest > 0;
		cv::meanStdDev(area_of_interest, mean, std, mask);

		// if all values the same don't divide by 0
		if(std[0] == 0)
		{
			std[0] = 1;
		}

		normalised_area_of_interest = (area_of_interest - mean[0]) / std[0];

		// Set the invalid pixels to 0
		normalised_area_of_interest.setTo(0, mask == 0);
	}
	else
	{
		printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__,type);
		abort();
	}
  
	Mat_<float> svr_response;
		
	// The empty matrix as we don't pass precomputed dft's of image
	Mat_<double> empty_matrix_0(0,0,0.0);
	Mat_<float> empty_matrix_1(0,0,0.0);
	Mat_<float> empty_matrix_2(0,0,0.0);

	// Efficient calc of patch expert response across the area of interest
	matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF); 

	response.create(svr_response.size());
	MatIterator_<double> p = response.begin();

	cv::MatIterator_<float> q1 = svr_response.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = svr_response.end();

	while(q1 != q2)
	{
		// the SVR response passed through a logistic regressor
		*p++ = 1.0/(1.0 + exp( -(*q1++ * scaling + bias )));
	}	
}
示例#17
0
文件: PDM.cpp 项目: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over all parameters (rigid and non-rigid), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS
void PDM::ComputeJacobian(const cv::Mat_<float>& params_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacobian, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{ 
	
	// number of vertices
	int n = this->NumberOfPoints();
		
	// number of non-rigid parameters
	int m = this->NumberOfModes();

	Jacobian.create(n * 2, 6 + m);
	
	float X,Y,Z;
	
	float s = (float) params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	params_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx =  Jacobian.begin();
	cv::MatIterator_<float> Jy =  Jx + n * (6 + m);
	cv::MatConstIterator_<double> Vx =  this->princ_comp.begin();
	cv::MatConstIterator_<double> Vy =  Vx + n*m;
	cv::MatConstIterator_<double> Vz =  Vy + n*m;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
    
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ = (X  * r11 + Y * r12 + Z * r13);
		*Jy++ = (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

		for(int j = 0; j < m; j++,++Vx,++Vy,++Vz)
		{
			// How much the change of the non-rigid parameters (when object is rotated) affect 2D motion
			*Jx++ = (float) ( s*(r11*(*Vx) + r12*(*Vy) + r13*(*Vz)) );
			*Jy++ = (float) ( s*(r21*(*Vx) + r22*(*Vy) + r23*(*Vz)) );
		}
	}	

	// Adding the weights here
	cv::Mat Jacob_w = Jacobian.clone();
	
	if(cv::trace(W)[0] != W.rows) 
	{
		Jx =  Jacobian.begin();
		Jy =  Jx + n*(6+m);

		cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
		cv::MatIterator_<float> Jy_w =  Jx_w + n*(6+m);

		// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
		for(int i = 0; i < n; i++)
		{
			float w_x = W.at<float>(i, i);
			float w_y = W.at<float>(i+n, i+n);

			for(int j = 0; j < Jacobian.cols; ++j)
			{
				*Jx_w++ = *Jx++ * w_x;
				*Jy_w++ = *Jy++ * w_y;
			}
		}
	}
	Jacob_t_w = Jacob_w.t();

}
示例#18
0
文件: PDM.cpp 项目: ezzaouia/OpenFace
//===========================================================================
// Calculate the PDM's Jacobian over rigid parameters (rotation, translation and scaling), the additional input W represents trust for each of the landmarks and is part of Non-Uniform RLMS 
void PDM::ComputeRigidJacobian(const cv::Mat_<float>& p_local, const cv::Vec6d& params_global, cv::Mat_<float> &Jacob, const cv::Mat_<float> W, cv::Mat_<float> &Jacob_t_w)
{
  	
	// number of verts
	int n = this->NumberOfPoints();
  
	Jacob.create(n * 2, 6);

	float X,Y,Z;

	float s = (float)params_global[0];
  	
	cv::Mat_<double> shape_3D_d;
	cv::Mat_<double> p_local_d;
	p_local.convertTo(p_local_d, CV_64F);
	this->CalcShape3D(shape_3D_d, p_local_d);
	
	cv::Mat_<float> shape_3D;
	shape_3D_d.convertTo(shape_3D, CV_32F);

	 // Get the rotation matrix
	cv::Vec3d euler(params_global[1], params_global[2], params_global[3]);
	cv::Matx33d currRot = Euler2RotationMatrix(euler);
	
	float r11 = (float) currRot(0,0);
	float r12 = (float) currRot(0,1);
	float r13 = (float) currRot(0,2);
	float r21 = (float) currRot(1,0);
	float r22 = (float) currRot(1,1);
	float r23 = (float) currRot(1,2);
	float r31 = (float) currRot(2,0);
	float r32 = (float) currRot(2,1);
	float r33 = (float) currRot(2,2);

	cv::MatIterator_<float> Jx = Jacob.begin();
	cv::MatIterator_<float> Jy = Jx + n * 6;

	for(int i = 0; i < n; i++)
	{
    
		X = shape_3D.at<float>(i,0);
		Y = shape_3D.at<float>(i+n,0);
		Z = shape_3D.at<float>(i+n*2,0);    
		
		// The rigid jacobian from the axis angle rotation matrix approximation using small angle assumption (R * R')
		// where R' = [1, -wz, wy
		//             wz, 1, -wx
		//             -wy, wx, 1]
		// And this is derived using the small angle assumption on the axis angle rotation matrix parametrisation

		// scaling term
		*Jx++ =  (X  * r11 + Y * r12 + Z * r13);
		*Jy++ =  (X  * r21 + Y * r22 + Z * r23);
		
		// rotation terms
		*Jx++ = (s * (Y * r13 - Z * r12) );
		*Jy++ = (s * (Y * r23 - Z * r22) );
		*Jx++ = (-s * (X * r13 - Z * r11));
		*Jy++ = (-s * (X * r23 - Z * r21));
		*Jx++ = (s * (X * r12 - Y * r11) );
		*Jy++ = (s * (X * r22 - Y * r21) );
		
		// translation terms
		*Jx++ = 1.0f;
		*Jy++ = 0.0f;
		*Jx++ = 0.0f;
		*Jy++ = 1.0f;

	}

	cv::Mat Jacob_w = cv::Mat::zeros(Jacob.rows, Jacob.cols, Jacob.type());
	
	Jx =  Jacob.begin();
	Jy =  Jx + n*6;

	cv::MatIterator_<float> Jx_w =  Jacob_w.begin<float>();
	cv::MatIterator_<float> Jy_w =  Jx_w + n*6;

	// Iterate over all Jacobian values and multiply them by the weight in diagonal of W
	for(int i = 0; i < n; i++)
	{
		float w_x = W.at<float>(i, i);
		float w_y = W.at<float>(i+n, i+n);

		for(int j = 0; j < Jacob.cols; ++j)
		{
			*Jx_w++ = *Jx++ * w_x;
			*Jy_w++ = *Jy++ * w_y;
		}		
	}

	Jacob_t_w = Jacob_w.t();
}
//===========================================================================
void SVR_patch_expert::Response(const cv::Mat_<float>& area_of_interest, cv::Mat_<float>& response)
{

	int response_height = area_of_interest.rows - weights.rows + 1;
	int response_width = area_of_interest.cols - weights.cols + 1;
	
	// the patch area on which we will calculate reponses
	cv::Mat_<float> normalised_area_of_interest;
  
	if(response.rows != response_height || response.cols != response_width)
	{
		response.create(response_height, response_width);
	}

	// If type is raw just normalise mean and standard deviation
	if(type == 0)
	{
		// Perform normalisation across whole patch
		cv::Scalar mean;
		cv::Scalar std;

		cv::meanStdDev(area_of_interest, mean, std);
		// Avoid division by zero
		if(std[0] == 0)
		{
			std[0] = 1;
		}
		normalised_area_of_interest = (area_of_interest - mean[0]) / std[0];
	}
	// If type is gradient, perform the image gradient computation
	else if(type == 1)
	{
		Grad(area_of_interest, normalised_area_of_interest);
	}
  	else
	{
		printf("ERROR(%s,%d): Unsupported patch type %d!\n", __FILE__,__LINE__, type);
		abort();
	}
	
	cv::Mat_<float> svr_response;

	// The empty matrix as we don't pass precomputed dft's of image
	cv::Mat_<double> empty_matrix_0(0,0,0.0);
	cv::Mat_<float> empty_matrix_1(0,0,0.0);
	cv::Mat_<float> empty_matrix_2(0,0,0.0);

	// Efficient calc of patch expert SVR response across the area of interest
	matchTemplate_m(normalised_area_of_interest, empty_matrix_0, empty_matrix_1, empty_matrix_2, weights, weights_dfts, svr_response, CV_TM_CCOEFF_NORMED); 
	
	response.create(svr_response.size());
	cv::MatIterator_<float> p = response.begin();

	cv::MatIterator_<float> q1 = svr_response.begin(); // respone for each pixel
	cv::MatIterator_<float> q2 = svr_response.end();

	while(q1 != q2)
	{
		// the SVR response passed into logistic regressor
		*p++ = 1.0/(1.0 + exp( -(*q1++ * scaling + bias )));
	}

}