コード例 #1
0
bool StopLineDetector::EstimateVerticalLine(Mat &input, Mat &debugImage, Rect roi, Point2i searchedSlope, CVMath::Line *outputLine) {
	vector<Vec4i> foundLines;
	Mat localCopy;
	input(roi).copyTo(localCopy);
	Canny(localCopy, localCopy, 50, 200, 3);

	HoughLinesP(localCopy, foundLines, 1, CV_PI / 180 , 10, 15, 4);

	const float COS_ANGLE_LIMIT = cos(20.0 / 180 * CV_PI);

	int resultListSize = 0;
	Point2f resultTangent;
	Point2f resultPoint;

	size_t listSize = foundLines.size();
	for (size_t i = 0; i < listSize; i++) {
		Vec4i l = foundLines[i];

		Point2f p0 = Point2f(l[0], l[1]) + Point2f(roi.x, roi.y);
		Point2f p1 = Point2f(l[2], l[3]) + Point2f(roi.x, roi.y);

		Point2f tangent = p0 - p1;
		tangent /= cv::norm(tangent);

		float dotProduct = tangent.dot(searchedSlope);
		if (dotProduct > COS_ANGLE_LIMIT || -dotProduct > COS_ANGLE_LIMIT ) {
			if(debugEnabled) {
				line(debugImage, p0, p1, Scalar(255, 0, 0), 2);
			}

			resultPoint += p0;
			resultTangent += tangent;
			resultListSize++;
		}
	}

	if(resultListSize > 0) {
		*outputLine = CVMath::Line (resultPoint / resultListSize, resultTangent / resultListSize);
		if(debugEnabled) {
			line(debugImage, outputLine->pointOnLine - outputLine->direction*20, outputLine->pointOnLine + outputLine->direction*20, Scalar(0, 100, 100), 2);
		}
		return true;
	}

	return false;
}
コード例 #2
0
ファイル: main.cpp プロジェクト: ZiangLi/VS2013
int main()
{
	// ����һ��ͼƬ��ԭ����  
	Mat img = imread("2.jpg");
	// ����һ����Ϊ "ԭ��"����  
	cvNamedWindow("ԭ��");
	imshow("ԭ��", img);
	//// �ڴ�������ʾ��Ϸԭ��
	//////////////////////////////////////////////////////////////////////////
	////������ȡ
	//Vec3f intensity = img.at<Vec3f>(90, 90);
	//float r = intensity.val[0];
	//float g = intensity.val[1];
	//float b = intensity.val[2];
	//cout << r << g << b << endl;
	//////////////////////////////////////////////////////////////////////////
	////ͼ��ʴ
	//Mat element = getStructuringElement(MORPH_RECT, Size(25, 25));
	//Mat dstImage;
	//erode(img, dstImage, element);
	//imshow("��ʴͼ", dstImage);
	//////////////////////////////////////////////////////////////////////////
	////��ֵ�˲�
	//Mat dstImage;
	//blur(img, dstImage, Size(70, 70));
	//imshow("��ֵ�˲�ͼ", dstImage);
	//////////////////////////////////////////////////////////////////////////
	////��Ե���
	//Mat dstImage, edge, grayImage;//��������
	//dstImage.create(img.size(), img.type());//��imgͬ��С�ľ���dst
	//cvtColor(img, grayImage, CV_BGR2GRAY);//ת��Ϊ�Ҷ�ͼ
	//blur(img, edge, Size(3, 3));//����
	//Canny(edge, edge, 3, 9, 3);//����canny��Ե���
	//imshow("��Ե���ͼ", edge);
	//////////////////////////////////////////////////////////////////////////
	////��ȡ��Ƶ
	//VideoCapture capture;
	//capture.open("3.mp4");
	//while (1)
	//{
	//	Mat frame;
	//	capture >> frame;
	//	imshow("��ȡ��Ƶ", frame);
	//	waitKey(100);
	//}
	//////////////////////////////////////////////////////////////////////////
	////��ȡ����ͷ
	//VideoCapture capture;
	//capture.open(0);
	//while (1)
	//{
	//	Mat frame;
	//	capture >> frame;
	//	imshow("��ȡ��Ƶ", frame);
	//	waitKey(300);
	//}
	//////////////////////////////////////////////////////////////////////////
	////��ȡ����ͷ���˲�����ʴ����Ե���
	//VideoCapture capture;
	//capture.open(0);
	//while (1)
	//{
	//	Mat frame;
	//	capture >> frame;
	//	imshow("����ͼ��", frame);

	//	Mat frame_blur, frame_erode, frame_canndy;//��������ͼ������˲�����ʴ����Ե��

	//	medianBlur(frame, frame_blur, 5);
	//	imshow("��ֵ�˲�", frame_blur);//��ֵ�˲�Ч��

	//	Canny(frame_blur, frame_canndy, 1, 50, 3);
	//	imshow("��Ե���", frame_canndy);//��Ե���Ч��

	//	Mat element = getStructuringElement(MORPH_RECT, Size(5, 5));
	//	erode(frame, frame_erode, element);
	//	imshow("��ʴЧ��", frame_erode);//��ʴЧ��

	//	Rect ccomp;
	//	floodFill(frame, Point(20, 20), Scalar(155, 255, 55), &ccomp, Scalar(20, 20, 20), Scalar(20, 20, 20));
	//	imshow("��ˮ���", frame);

	//	waitKey(30);
	//}
	//////////////////////////////////////////////////////////////////////////
	//string filename = "I.xml";
	//FileStorage fs(filename, FileStorage::WRITE);
	//fs << "iterationNr" << 100;
	//Mat R = Mat_<uchar >::eye(3, 3),
	//	T = Mat_<double>::zeros(3, 1);
	//fs << "R" << R; // Write cv::Mat  
	//fs << "T" << T;
	//fs << "strings" << "["; // text - string sequence
	//fs << "image1.jpg" << "Awesomeness" << "baboon.jpg";
	//fs << "]"; // close sequence
	//fs << "Mapping"; // text - mapping  
	//fs << "{" << "One" << 1;
	//fs << "Two" << 2 << "}";
	//fs.release();

	//FileStorage fs2;
	//fs2.open(filename, FileStorage::READ);
	//Mat R2, T2;
	//fs2["R"] >> R2;
	//fs2["T"] >> T2;

	//FileNode n = fs2["strings"];
	//if (n.type() != FileNode::SEQ)
	//{
	//	cerr << "Not a squence!" << endl;
	//	return 1;
	//}
	//FileNodeIterator it=n.begin(), it_end = n.end();
	//for (; it != it_end; it++)
	//	std::cout << string(*it) << endl;

////////////////////////////////////////////////////////////////
vector<Point2f> list;
float inf = 10000000.0;
vector<bool>used;
for (int i = 0; i < 2 * list.size(); i++)
{
	used.push_back(false);
}
vector<vector<float>> dist;
for (int j = 0; j < list.size(); j++)
{
	vector<float>line;
	for (int i = 0; i < list.size(); i++)
	{
		if (i == j)
			line.push_back(inf);
		else
		{
			Point2f p;
			p = list[j] - list[i];
			line.push_back(sqrt(p.dot(p)));
		}
	}
	for (int i = 0; i < list.size(); i++)
		line.push_back(inf);
	dist.push_back(line);
}
for (int i = 0; i < list.size(); i++)
{
	vector<float>line;
	for (int j = 0; j < 2 * list.size(); j++)
		line.push_back(inf);
	dist.push_back(line);
}
	return 0;
}
コード例 #3
0
Point2f StopLineDetector::VerifyDetection(Mat &input, Mat &debugImage, list<Point2i> &foundStopMarkers, Point2f carPosition, Point2f steeringDirection, Point2f *direction, long long  int time) {
	Point2i mean = Point2f(0,0);
	int i = 0;
	for (list<Point2i>::iterator it = foundStopMarkers.begin(); it != foundStopMarkers.end(); it++) {
		mean += *it;
		i++;
	}
	Point2f stopPoint = Point2f(mean) / float(i);

	Point2f stopSteeringVector = (stopPoint - carPosition);
	float distance = cv::norm(stopSteeringVector);
	stopSteeringVector /= distance;

	steeringDirection /= cv::norm(steeringDirection);

	float cosAngle = abs(steeringDirection.dot(stopSteeringVector));

	if(cosAngle < AngleLimit(abs(atan(steeringDirection.x / steeringDirection.y) * 1.9099 /* 180 / CV_PI / 30*/))) {
		detectedStopPosition.push_front(Pose3D());
		return Point2f(0,0);
	}

	detectedStopPosition.push_front(Pose3D(stopPoint, time));

	if(debugEnabled) {
		Point2f a = *(foundStopMarkers.begin());
		Point2f b = *(foundStopMarkers.rbegin());
		line(debugImage, a, b, Scalar (0, 0, 255), 1);
	}

	int size = detectedStopPosition.size();
	int valIdx[3];
	int n = 0;
	for (int i = 0; i < size && n < 3; i++) {
		if(!detectedStopPosition[i].valid) {
			continue;
		} else {
			valIdx[n] = i;
			n++;
		}
	}

	if (n == 3) {
		Point2f p0 = detectedStopPosition[valIdx[0]].pos;
		Point2f p1 = detectedStopPosition[valIdx[1]].pos;
		Point2f p2 = detectedStopPosition[valIdx[2]].pos;

		if ((cv::norm(p0 - p1) < maxMovementDistance * (valIdx[1] - valIdx[0]))
				&& (cv::norm(p1 - p2) < maxMovementDistance * (valIdx[2] - valIdx[1]))
				&& (cv::norm(p0 - p2) < maxMovementDistance * (valIdx[2] - valIdx[0]))) {

			vector<Point2i> vec1(foundStopMarkers.begin(),foundStopMarkers.end());
			Vec4f tempStopLine;
			fitLine(vec1, tempStopLine, CV_DIST_L2, 0, 0.1, 0.1);
			CVMath::Line stopLine(Point2f(tempStopLine[2], tempStopLine[3]), Point2f(tempStopLine[0], tempStopLine[1]));

			Point2f begin = *(foundStopMarkers.begin());
			Point2f end = *(foundStopMarkers.rbegin());

			Rect roiRight = Rect(end.x - 20, end.y - 10, 80, 55) & binaryImageSize;
			Rect roiLeft = Rect(begin.x - 60, begin.y - 5, 80, 50) & binaryImageSize;

			if (debugEnabled) {
				line(debugImage, begin, end, Scalar(0, 0, 255), 3);
				cv::rectangle(debugImage, roiRight, Scalar(0, 0, 200), 1);
				cv::rectangle(debugImage, roiLeft, Scalar(0, 0, 200), 1);
			}

			CVMath::LineSegment tmp = DetectionCache::GetInstance().GetRightLine(time);
			CVMath::Line lineRight;
			bool validRightLine = false;
			if(tmp.Valid() && tmp.length > METER_TO_PIXEL(0.3)) {
				lineRight = tmp;
				validRightLine = true;
			} else {
				validRightLine = EstimateVerticalLine(input, debugImage, roiRight, stopLine.normal, &lineRight);
			}

			if (validRightLine) {
				float t_stopLine;
				Point2f intersection = CVMath::IntersectionTwoLines(stopLine, lineRight, &t_stopLine);
				if(lineRight.direction.y > 0) {
					lineRight.direction = - lineRight.direction;
				}

				Point2f resultingStopPoint;
				if(stopLine.direction.x > 0) {
					stopLine.direction = - stopLine.direction;
				}

				Point2f yawDirection = lineRight.direction - CVMath::RotateCW90(stopLine.direction);
				yawDirection /= cv::norm(yawDirection);

				*direction = yawDirection;

				resultingStopPoint = intersection + stopLine.direction * laneWidth_Pixel * 0.5;

				if (debugEnabled) {
					circle(debugImage, resultingStopPoint, 7.0, Scalar(0, 0, 255), 2, 8);
					line(debugImage, resultingStopPoint, resultingStopPoint + 20 * yawDirection, Scalar(255, 0, 0), 2);
				}

				return resultingStopPoint;
			}

			//EstimateVericalLine(input, debugImage, roiLeft, normalOfStopLine);

			return Point2f(0,0);

		} else {
			detectedStopPosition[valIdx[0]].valid = false;
		}
	}

	return Point2f(0,0);
}
コード例 #4
0
void StopLineDetector::EvaluateStopLineMarkers(list<Point2i> & stopMarkers, Point2f dir) {
	//clear upper row if two rows are detected

	Rect boundingRect = Rect(0, BINARY_IMAGE_HEIGHT, 0, 00);
	for (list<Point2i>::iterator it = stopMarkers.begin(); it != stopMarkers.end(); it++) {
		if(boundingRect.y > it->y) {
			boundingRect.y = it->y;
			boundingRect.height += boundingRect.y - it->y;
		}

		if(boundingRect.y + boundingRect.height < it->y) {
			boundingRect.height = it->y - boundingRect.y;
		}
	}

	if(boundingRect.height > METER_TO_PIXEL(0.10)) {
		int limit = boundingRect.y + boundingRect.height / 2;
		for (list<Point2i>::iterator it = stopMarkers.begin(); it != stopMarkers.end(); ) {
			if(it->y < limit) {
				it = stopMarkers.erase(it);
			} else {
				it++;
			}
		}
	}

	list<Point2i>::iterator it = stopMarkers.begin();
	int i = 0;
	list<Point2i>::iterator last = it;
	it++;
	if(it == stopMarkers.end()) {
		stopMarkers.clear();
		return;
	}

	dir = CVMath::RotateCW90(dir);

	float limit_ANG = cosf(15 * CV_PI / 180);
	float limit_LEN = METER_TO_PIXEL(0.44) / 3.9;

	list<Point2i>::iterator itC = last;

	bool chainFound = false;

	//find chain of three succeeding points
	for (i = 1; it != stopMarkers.end(); it++, i++) {
		if(i > 1) {
			last = it, last--;
			itC = last, itC--;

			Point2i c = *itC;
			Point2i b = *last;
			Point2i a = *it;

			Point2f CB = b-c;
			float lenCB = cv::norm(CB);
			CB /= lenCB;
			float cosAngleCB = abs(dir.dot(CB));

			Point2f BA = a-b;
			float lenBA = cv::norm(BA);
			BA /= lenBA;
			float cosAngleBA = abs(dir.dot(BA));

			Point2f CA = a-c;
			float lenCA = cv::norm(CA);
			CA /= lenCA;
			float cosAngleCA = abs(dir.dot(CA));

			if(Helper_CheckCondition(cosAngleCB, limit_ANG, lenCB, limit_LEN)) {
				if(Helper_CheckCondition(cosAngleBA, limit_ANG, lenBA, limit_LEN) || chainFound) {
					stopMarkers.erase(last);
					i--;
				}

				if(chainFound) {
					if(Helper_CheckCondition(cosAngleCA, limit_ANG, lenCA, limit_LEN)) {
						it = stopMarkers.erase(it, stopMarkers.end());
						//i--;
						break;
					}
				} else {
					if(Helper_CheckCondition(cosAngleCA, limit_ANG, lenCA, limit_LEN)) {
						stopMarkers.erase(itC);
						i--;
					}
				}
			} else {
				if(Helper_CheckCondition(cosAngleBA, limit_ANG, lenBA, limit_LEN)) {
					if(chainFound == false) {
						stopMarkers.erase(last);
						stopMarkers.erase(itC);
						i -= 2;
					} else if(Helper_CheckCondition(cosAngleCA, limit_ANG, lenCA, limit_LEN)) {
						it = stopMarkers.erase(it);
						it--;
						i--;
					}
				} else if (chainFound == false){
					chainFound = true;
					dir = (CB + BA + CA) * 0.3333333f;
					float dirLength = cv::norm(dir);
					dir /= dirLength;
					limit_LEN = lenCA * 0.5 * 1.1;
					limit_ANG = cosf(10 * CV_PI / 180);
				}
			}
		}
	}

	if (i < 3) {
		stopMarkers.clear();
	}
}
コード例 #5
0
void OpticalFlowTransitionModel::predict(vector<Sample>& samples, const Mat& image, const optional<Sample>& target) {
	points.clear();
	forwardPoints.clear();
	backwardPoints.clear();
	forwardStatus.clear();
	error.clear();
	squaredDistances.clear();
	correctFlowCount = 0;

	// build pyramid of the current image
	cv::buildOpticalFlowPyramid(makeGrayscale(image), currentPyramid, windowSize, maxLevel, true, BORDER_REPLICATE, BORDER_REPLICATE);
	if (previousPyramid.empty() || !target) { // optical flow cannot be computed if there is no previous pyramid or no current target
		swap(previousPyramid, currentPyramid);
		return fallback->predict(samples, image, target);
	}

	// compute grid of points at target location
	for (auto point = templatePoints.begin(); point != templatePoints.end(); ++point)
		points.push_back(Point2f(target->getWidth() * point->x + target->getX(), target->getHeight() * point->y + target->getY()));
	// compute forward and backward optical flow
	cv::calcOpticalFlowPyrLK(previousPyramid, currentPyramid, points, forwardPoints, forwardStatus, error, windowSize, maxLevel);
	cv::calcOpticalFlowPyrLK(currentPyramid, previousPyramid, forwardPoints, backwardPoints, backwardStatus, error, windowSize, maxLevel);
	swap(previousPyramid, currentPyramid);

	// compute flow and forward-backward-error
	vector<Point2f> flows;
	flows.reserve(points.size());
	squaredDistances.reserve(points.size());
	for (unsigned int i = 0; i < points.size(); ++i) {
		flows.push_back(forwardPoints[i] - points[i]);
		if (forwardStatus[i] && backwardStatus[i]) {
			Point2f difference = backwardPoints[i] - points[i];
			squaredDistances.push_back(make_pair(difference.dot(difference), i));
		}
	}
	if (squaredDistances.size() < points.size() / 2) // the flow for more than half of the points could not be computed
		return fallback->predict(samples, image, target);

	// find the flows with the least forward-backward-error (not more than 1 pixel)
	float maxError = 1 * 0.5f;
	vector<float> xs, ys;
	sort(squaredDistances.begin(), squaredDistances.end(), [](pair<float, int> lhs, pair<float, int> rhs) { return lhs.first < rhs.first; });
	xs.reserve(squaredDistances.size());
	ys.reserve(squaredDistances.size());
	for (correctFlowCount = 0; correctFlowCount < squaredDistances.size(); ++correctFlowCount) {
		if (squaredDistances[correctFlowCount].first > maxError)
			break;
		int index = squaredDistances[correctFlowCount].second;
		xs.push_back(flows[index].x);
		ys.push_back(flows[index].y);
	}
	if (correctFlowCount < points.size() / 2) // to little correct correspondences
		return fallback->predict(samples, image, target);

	// compute median flow (only change in position for now)
	sort(xs.begin(), xs.end());
	sort(ys.begin(), ys.end());
	float medianX = xs[xs.size() / 2];
	float medianY = ys[ys.size() / 2];
	Point2f medianFlow(medianX, medianY);

	// compute ratios of point distances in previous and current image
	vector<float> squaredRatios;
	squaredRatios.reserve(correctFlowCount * (correctFlowCount - 1) / 2);
	for (unsigned int i = 0; i < correctFlowCount; ++i) {
		for (unsigned int j = i + 1; j < correctFlowCount; ++j) {
			Point2f point1 = points[squaredDistances[i].second];
			Point2f forwardPoint1 = forwardPoints[squaredDistances[i].second];
			Point2f point2 = points[squaredDistances[j].second];
			Point2f forwardPoint2 = forwardPoints[squaredDistances[j].second];
			Point2f differenceBefore = point1 - point2;
			Point2f differenceAfter = forwardPoint1 - forwardPoint2;
			float squaredDistanceBefore = differenceBefore.dot(differenceBefore);
			float squaredDistanceAfter = differenceAfter.dot(differenceAfter);
			squaredRatios.push_back(squaredDistanceAfter / squaredDistanceBefore);
		}
	}

	// compute median ratio to complete the median flow
	sort(squaredRatios.begin(), squaredRatios.end());
	float medianRatio = sqrt(squaredRatios[squaredRatios.size() / 2]);

	// predict samples according to median flow and random noise
	for (auto sample = samples.begin(); sample != samples.end(); ++sample) {
		int oldX = sample->getX();
		int oldY = sample->getY();
		float oldSize = sample->getSize();
		// change position according to median flow
		double newX = oldX + medianX;
		double newY = oldY + medianY;
		double newSize = oldSize * medianRatio;
		// add noise to position
		double positionDeviation = scatter * sample->getSize();
		newX += positionDeviation * generator();
		newY += positionDeviation * generator();
		newSize *= pow(2, scatter * generator());
		// round to integer
		sample->setX((int)(newX + 0.5));
		sample->setY((int)(newY + 0.5));
		sample->setSize((int)(newSize + 0.5));
		// compute change
		sample->setVx(sample->getX() - oldX);
		sample->setVy(sample->getY() - oldY);
		sample->setVSize(sample->getSize() / oldSize);
	}
}