void GradientOrientationFilter::computeOrientationAndMagnitudeForFloat(const Mat& image, Mat& filtered) const {
	for (int row = 0; row < image.rows; ++row) {
		const Vec2f* gradients = image.ptr<Vec2f>(row); // gradient for x and y
		Vec2f* gradientOrientations = filtered.ptr<Vec2f>(row); // orientation and magnitude
		for (int col = 0; col < image.cols; ++col) {
			const Vec2f& gradient = gradients[col];
			gradientOrientations[col][0] = computeOrientation(gradient[0], gradient[1]);
			gradientOrientations[col][1] = computeMagnitude(gradient[0], gradient[1]);
		}
	}
}
void ScreenOrientationController::updateOrientation()
{
    ASSERT(m_orientation);
    ASSERT(frame());

    FrameView* view = frame()->view();
    WebScreenOrientationType orientationType = screenOrientationType(view);
    if (orientationType == WebScreenOrientationUndefined) {
        // The embedder could not provide us with an orientation, deduce it ourselves.
        orientationType = computeOrientation(view);
    }
    ASSERT(orientationType != WebScreenOrientationUndefined);

    m_orientation->setType(orientationType);
    m_orientation->setAngle(screenOrientationAngle(view));
}
void ScreenOrientationController::updateOrientation()
{
    ASSERT(m_orientation);
    ASSERT(frame());
    ASSERT(frame()->host());

    ChromeClient& chromeClient = frame()->host()->chromeClient();
    WebScreenOrientationType orientationType = effectiveType(chromeClient);
    if (orientationType == WebScreenOrientationUndefined) {
        // The embedder could not provide us with an orientation, deduce it ourselves.
        orientationType = computeOrientation(chromeClient.screenInfo().rect, effectiveAngle(chromeClient));
    }
    ASSERT(orientationType != WebScreenOrientationUndefined);

    m_orientation->setType(orientationType);
    m_orientation->setAngle(effectiveAngle(chromeClient));
}
Example #4
0
    void Stroker::generateArc(const Vector& center,const Vector& start,
                              const Vector& end,Winding winding,
                              DynamicArray<float>* points)
    {

        Vector realStart = start;
        Vector realEnd = end;

        inversePathTransform_.transform(realStart);
        inversePathTransform_.transform(realEnd);

        float startAngle = computeOrientation(realStart);
        float diff = computeAngle(realStart,realEnd);
        float radius = size_ / 2;
        double ra = (std::fabs(radius) + std::fabs(radius)) / 2;
        double da = acos(ra/(ra + 0.125f)) * 2;

        vplUint numSteps = static_cast<vplUint>(diff/da);

        float angle = 0.0f;

        Vector point;

        for(vplUint i = 0;i < numSteps;i++)
        {
            if(winding == cClockwise)
                angle = startAngle + i / float(numSteps) * diff;
            else
                angle = startAngle - i / float(numSteps) * diff;

            Vector offset(cos(angle),sin(angle));

            offset *= radius;

            scale_.transform(offset);

            point = center + offset;

            addPoint(points,point);
        }
    }
void GradientOrientationFilter::computeOrientationAndMagnitudeForFloatN(const Mat& image, int originalChannels, Mat& filtered) const {
	for (int row = 0; row < image.rows; ++row) {
		const Vec2f* gradients = image.ptr<Vec2f>(row); // gradient for x and y
		Vec2f* gradientOrientations = filtered.ptr<Vec2f>(row); // orientation and magnitude
		for (int col = 0; col < image.cols; ++col) {
			Vec2f strongestGradient = gradients[0];
			float strongestSquaredMagnitude = computeSquaredMagnitude(strongestGradient[0], strongestGradient[1]);
			for (int ch = 1; ch < originalChannels; ++ch) {
				const Vec2f& gradient = gradients[ch];
				float squaredMagnitude = computeSquaredMagnitude(gradient[0], gradient[1]);
				if (squaredMagnitude > strongestSquaredMagnitude) {
					strongestSquaredMagnitude = squaredMagnitude;
					strongestGradient = gradient;
				}
			}
			gradientOrientations[col][0] = computeOrientation(strongestGradient[0], strongestGradient[1]);
			gradientOrientations[col][1] = std::sqrt(strongestSquaredMagnitude);
			gradients += originalChannels;
		}
	}
}
Example #6
0
void Odometer::imu_cb(const sensor_msgs::Imu::ConstPtr &msg)
{
    if (!m_bReady)
    {
        m_vBaseAcc.push_back(msg->linear_acceleration);
        genMinMaxAcc();
    }

    if (( msg->header.stamp - m_LastTime).toSec() > 0.1)
    {
        geometry_msgs::Vector3 v3 = linearAccFilter(msg->linear_acceleration);
        computeOrientation(msg->orientation);
        computeDeltaPose(v3, msg->header.stamp);
        computeSpeed(v3, msg->header.stamp);

        m_LastTime = msg->header.stamp;

        if (m_bReady)
            m_Pub.publish(m_Odom);
    }
}
void AdaptiveManifoldFilterN::computeClusters(Mat1b& cluster, Mat1b& cluster_minus, Mat1b& cluster_plus)
{
    
    Mat1f difOreientation;
    if (jointCnNum > 1)
    {
        Mat1f initVec(1, jointCnNum);
        if (useRNG)
        {
            rnd.fill(initVec, RNG::UNIFORM, -0.5, 0.5);
        }
        else
        {
            for (int i = 0; i < (int)initVec.total(); i++)
                initVec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
        }

        vector<Mat> difEtaSrc(jointCnNum);
        for (int i = 0; i < jointCnNum; i++)
            subtract(jointCn[i], etaFull[i], difEtaSrc[i]);

        Mat1f eigenVec(1, jointCnNum);
        computeEigenVector(difEtaSrc, cluster, eigenVec, num_pca_iterations_, initVec);

        computeOrientation(difEtaSrc, eigenVec, difOreientation);
        CV_DbgAssert(difOreientation.size() == srcSize);
    }
    else
    {
        subtract(jointCn[0], etaFull[0], difOreientation);
    }

    compare(difOreientation, 0, cluster_minus, CMP_LT);
    bitwise_and(cluster_minus, cluster, cluster_minus);

    compare(difOreientation, 0, cluster_plus, CMP_GE);
    bitwise_and(cluster_plus, cluster, cluster_plus);
}
void GradientOrientationFilter::createGradientLut() {
	union {
		ushort index;
		struct {
			uchar x, y;
		} gradient;
	} gradientCode;
	// build the look-up table for gradient images of depth CV_8U
	// index of the look-up table is the binary concatenation of the gradients of x and y
	// values inside the look-up table are the orientation and magnitude
	gradientCode.gradient.x = 0;
	for (int x = 0; x < 256; ++x) {
		float gradientX = (x - 127.f) / 255.f;
		gradientCode.gradient.y = 0;
		for (int y = 0; y < 256; ++y) {
			float gradientY = (y - 127.f) / 255.f;
			gradientLut[gradientCode.index][0] = computeOrientation(gradientX, gradientY);
			gradientLut[gradientCode.index][1] = computeMagnitude(gradientX, gradientY);
			++gradientCode.gradient.y;
		}
		++gradientCode.gradient.x;
	}
}
Example #9
0
void LDB::compute( const cv::Mat& image,
                   std::vector<cv::KeyPoint>& _keypoints,
                   cv::Mat& _descriptors,
				   bool flag) const
{
    // Added to work with color images.
    cv::Mat _image;
    image.copyTo(_image);

	if(_image.empty() )
		return;

	//ROI handling
	int halfPatchSize = patchSize / 2;
	int border = halfPatchSize*1.415 + 1;
	if( _image.type() != CV_8UC1 )
		cvtColor(_image, _image, CV_BGR2GRAY);
	int levelsNum = 0;
	for( size_t i = 0; i < _keypoints.size(); i++ )
		levelsNum = std::max(levelsNum, std::max(_keypoints[i].octave, 0));
	levelsNum++;
	
	//Compute Orientation
    if(flag == 1){
       computeOrientation(_image, _keypoints, halfPatchSize);
    }

	// Pre-compute the scale pyramids
    std::vector<cv::Mat> imagePyramid(levelsNum);
	for (int level = 0; level < levelsNum; ++level)
	{
		float scale = 1/getScale(level, firstLevel, scaleFactor);
        cv::Size sz(cvRound(_image.cols*scale), cvRound(_image.rows*scale));
        cv::Size wholeSize(sz.width + border*2, sz.height + border*2);
        cv::Mat temp(wholeSize, _image.type()), masktemp;
        imagePyramid[level] = temp(cv::Rect(border, border, sz.width, sz.height));

		// Compute the resized image
		if( level != firstLevel )
		{
			if( level < firstLevel )
                resize(_image, imagePyramid[level], sz, 0, 0, cv::INTER_LINEAR);
			else
                resize(imagePyramid[level-1], imagePyramid[level], sz, 0, 0, cv::INTER_LINEAR);

            cv::copyMakeBorder(imagePyramid[level], temp, border, border, border, border,
                cv::BORDER_REFLECT_101+cv::BORDER_ISOLATED);
		}
		else
            cv::copyMakeBorder(_image, temp, border, border, border, border,
                cv::BORDER_REFLECT_101);
	}

	// Pre-compute the keypoints (we keep the best over all scales, so this has to be done beforehand
    std::vector < std::vector<cv::KeyPoint> > allKeypoints;

	// Cluster the input keypoints depending on the level they were computed at
	allKeypoints.resize(levelsNum);
    for (std::vector<cv::KeyPoint>::iterator keypoint = _keypoints.begin(),
		keypointEnd = _keypoints.end(); keypoint != keypointEnd; ++keypoint)
		allKeypoints[keypoint->octave].push_back(*keypoint);

	// Make sure we rescale the coordinates
	for (int level = 0; level < levelsNum; ++level)
	{
		if (level == firstLevel)
			continue;

        std::vector<cv::KeyPoint> & keypoints = allKeypoints[level];
		float scale = 1/getScale(level, firstLevel, scaleFactor);
        for (std::vector<cv::KeyPoint>::iterator keypoint = keypoints.begin(),
			keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
			keypoint->pt *= scale;
	}

    cv::Mat descriptors;
    std::vector<cv::Point> pattern;

	int nkeypoints = 0;
	for (int level = 0; level < levelsNum; ++level){
        std::vector<cv::KeyPoint>& keypoints = allKeypoints[level];
        cv::Mat& workingmat = imagePyramid[level];
		if(keypoints.size() > 1)
            cv::KeyPointsFilter::runByImageBorder(keypoints, workingmat.size(), border);

		nkeypoints += keypoints.size();
	}
	if( nkeypoints == 0 )
		_descriptors.release();
	else
	{
		_descriptors.create(nkeypoints, descriptorSize(), CV_8U);
		descriptors = _descriptors;
	}

	_keypoints.clear();
	int offset = 0;
	for (int level = 0; level < levelsNum; ++level)
	{
		// preprocess the resized image
        cv::Mat& workingmat = imagePyramid[level];
		// Get the features and compute their orientation
        std::vector<cv::KeyPoint>& keypoints = allKeypoints[level];
		if(keypoints.size() > 1)
            cv::KeyPointsFilter::runByImageBorder(keypoints, workingmat.size(), border);
		int nkeypoints = (int)keypoints.size();

		// Compute the descriptors
        cv::Mat desc;
		if (!descriptors.empty())
		{
			desc = descriptors.rowRange(offset, offset + nkeypoints);
		}

		offset += nkeypoints;
        //boxFilter(working_cv::Mat, working_cv::Mat, working_cv::Mat.depth(), Size(5,5), Point(-1,-1), true, BORDER_REFLECT_101);
        GaussianBlur(workingmat, workingmat, cv::Size(7, 7), 2, 2, cv::BORDER_REFLECT_101);
        cv::Mat integral_image;
        integral(workingmat, integral_image, CV_32S);
        computeDescriptors(workingmat, integral_image, patchSize, keypoints, desc, descriptorSize(), flag);

		// Copy to the output data
		if (level != firstLevel)
		{
			float scale = getScale(level, firstLevel, scaleFactor);
            for (std::vector<cv::KeyPoint>::iterator keypoint = keypoints.begin(),
				keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
				keypoint->pt *= scale;
		}
		// And add the keypoints to the output
		_keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end());
	}
	
}
int HolisticFeatureExtractor::extractFeatures(const LTKTraceGroup& traceGroup, const LTKCaptureDevice& captureDevice, const LTKScreenContext& screenContext, LTKPreprocessorInterface *ltkShapeRecPtr, float2DVector& featureVector)
{
	LOG( LTKLogger::LTK_LOGLEVEL_DEBUG) << 
        "Entered HolisticFeatureExtractor::extractFeatures"  << endl;

	LTKTrace preprocessedTrace;				// a trace of the trace group

	LTKTrace preprocessedTrace2;			// a trace of the trace group

	LTKTrace preprocessedTrace3;			// a trace of the trace group

	LTKTraceGroup preprocessedTraceGroup;

	LTKTraceGroup preprocessedTraceGroup2;

	LTKTraceGroup preprocessedTraceGroup3;

	int	traceIndex;				//	variable to loop over all traces of the trace group


	// preprocessing the traceGroup in 3 ways to extract 3 kinds of features
		
	preprocess(traceGroup, preprocessedTraceGroup, captureDevice, screenContext, ltkShapeRecPtr);

	preprocess2(traceGroup, preprocessedTraceGroup2, captureDevice, screenContext, ltkShapeRecPtr);

	preprocess3(traceGroup, preprocessedTraceGroup3, captureDevice, screenContext, ltkShapeRecPtr);

	//	extracting the feature vector

	for(traceIndex = 0; traceIndex < traceGroup.getNumTraces(); ++traceIndex)
	{
		preprocessedTrace = preprocessedTraceGroup.getTraceAt(traceIndex);

		preprocessedTrace2 = preprocessedTraceGroup2.getTraceAt(traceIndex);

		preprocessedTrace3 = preprocessedTraceGroup3.getTraceAt(traceIndex);

		// calling the compute features methods 

		floatVector features;

		// calculating features with preprocessedTrace2
		
		features.push_back(computeEER(preprocessedTrace));

		features.push_back(computeOrientation(preprocessedTrace));

		// calculating features with preprocessedTrace2

		float TCL = computeTCL(preprocessedTrace2);

		TCL /= calculateBBoxPerimeter(screenContext);	// normalizing using the perimeter
		
		features.push_back(TCL);

		features.push_back(computePII(preprocessedTrace2, screenContext));

		// calculating features with preprocessedTrace3

		float swAng = computeSweptAngle(preprocessedTrace3);

		// normalizing the swept angle with swAngNormFactor x 360 degrees
		swAng /= (2*PI*m_swAngNormFactor);

		features.push_back(swAng);

		featureVector.push_back(features);

	}//traceIndex

	LOG( LTKLogger::LTK_LOGLEVEL_DEBUG) << 
        "Exiting HolisticFeatureExtractor::extractFeatures"  << endl;

	return SUCCESS;

}