Ejemplo n.º 1
0
void
euclideanToHomogeneous(const InputArray _x, OutputArray _X)
{
    const Mat x = _x.getMat();  
    const Mat last_row = Mat::ones(1, x.cols, x.type());
    vconcat(x, last_row, _X);
}
Ejemplo n.º 2
0
cv::Mat ReidDescriptor::getWhsvFeature(cv::Mat img, cv::Mat MSK)
{
	int offset = img.rows / 5;
	vector<cv::Mat> sub(5);

	// Divide the image into 5x1 cells
	for(int i = 0 ; i < 4 ; i++) {
		sub[i] = img.rowRange(offset * i, offset * (i + 1));
	}
	sub[4] = img.rowRange(offset * 4, img.rows);
	// Debug this

	cv::Mat conc;
	cv::Mat temp;
	for(int i = 0 ; i < 5 ; i++) {
		cv::Mat HSV = HSVVector(sub[i]);
		if(i == 0) {
			conc = HSV;
		} else {
			vconcat(conc, HSV, conc);
		}
	}

	return conc;
    //return cv::Mat::zeros(2,2,CV_8U);
}
void Matcher::initialize(const vector<Point2f> & pts_fg_norm, const Mat desc_fg, const vector<int> & classes_fg,
        const Mat desc_bg, const Point2f center)
{
  

    //Copy normalized points
    this->pts_fg_norm = pts_fg_norm;

    //Remember number of background points
    this->num_bg_points = desc_bg.rows;

    //Form database by stacking background and foreground features
    if (desc_bg.rows > 0 && desc_fg.rows > 0)
        vconcat(desc_bg, desc_fg, database);
    else if (desc_bg.rows > 0)
        database = desc_bg;
    else
        database = desc_fg;

    //Extract descriptor length from features
    desc_length = database.cols*8;

    //Create background classes (-1)
    vector<int> classes_bg = vector<int>(desc_bg.rows,-1);

    //Concatenate fg and bg classes
    classes = classes_bg;
    classes.insert(classes.end(), classes_fg.begin(), classes_fg.end());

    //Create descriptor matcher
    bfmatcher = DescriptorMatcher::create("BruteForce-Hamming");

    
}
Ejemplo n.º 4
0
void FacenetClassifier::run (long start_index, long end_index) {
	string input_layer = "input:0";
	string phase_train_layer = "phase_train:0";
	string output_layer = "embeddings:0";
	std::vector<tensorflow::Tensor> outputs;
	std::vector<std::pair<string, tensorflow::Tensor>> feed_dict = {
		  {input_layer, input_tensor},  
		  {phase_train_layer, phase_tensor},
	};    
	tensorflow::SessionOptions options;
	tensorflow::NewSession (options, &session);
	session->Create (graph_def);
	cout << "Input Tensor: " << input_tensor.DebugString() << endl;
	Status run_status = session->Run(feed_dict, {output_layer}, {} , &outputs);
	if (!run_status.ok()) {
		  LOG(ERROR) << "Running model failed: " << run_status << "\n";
		  return;
	}
	cout << "Output: " << outputs[0].DebugString() << endl;

	float *p = outputs[0].flat<float>().data();
	Mat output_mat;
	for (int i = start_index; i < end_index; i++) {		
		Mat mat_row (cv::Size (128, 1), CV_32F, p + (i - start_index)*128, Mat::AUTO_STEP);
		mat_training_tensors.push_back (mat_row);
		if (output_mat.empty())
			output_mat = mat_row;
		else
			vconcat (output_mat, mat_row, output_mat);
	}
	if (this->output_mat.empty())
		this->output_mat = output_mat.clone ();
	else
		vconcat (this->output_mat, output_mat.clone (), this->output_mat);
	output_mat.release ();
	cout << "Output Mat: " << this->output_mat.size () << endl;
	session->Close ();
	delete session;
}
Ejemplo n.º 5
0
Mat ReID::HSVVector(Mat img) {
    cv::Mat img_hsv, hist, hist_h, hist_s, hist_v;
    cvtColor(img, img_hsv, CV_BGR2HSV);
    
    // Normalisation ?
    vector<cv::Mat> temp;
    split(img_hsv, temp);
    
    temp[0] = temp[0].reshape(0, 1);
    temp[1] = temp[1].reshape(0, 1);
    temp[2] = temp[2].reshape(0, 1);
    
    // Histogram computation
    float h_ranges[] = { 0, 180 };
    float s_ranges[] = { 0, 256 };
    float v_ranges[] = { 0, 256 };
    
    int histSize_h[] = { 180 };
    int histSize_s[] = { 256 };
    int histSize_v[] = { 256 };
    
    const float * ranges_h[] = { h_ranges };
    const float * ranges_s[] = { s_ranges };
    const float * ranges_v[] = { v_ranges };
    
    int channels[] = { 0 };
    
    calcHist(&temp[0], 1, channels, Mat(), hist_h, 1, histSize_h, ranges_h);
    normalize(hist_h, hist_h, 0, 1, NORM_MINMAX, -1, Mat());
    calcHist(&temp[1], 1, channels, Mat(), hist_s, 1, histSize_s, ranges_s);
    normalize(hist_s, hist_s, 0, 1, NORM_MINMAX, -1, Mat());
    calcHist(&temp[2], 1, channels, Mat(), hist_v, 1, histSize_v, ranges_v);
    normalize(hist_v, hist_v, 0, 1, NORM_MINMAX, -1, Mat());
    
    vconcat(hist_h, hist_s, hist);
    vconcat(hist, hist_v, hist);
    
    return hist;
}
Ejemplo n.º 6
0
//Transforms::Transforms(Mat &R, Mat &T, double baseline) :_baseline(baseline) {
Transforms::Transforms(double baseline, string cam_extrinsics_filename, string cam_dartboard_extrinsics_filename) :_baseline(baseline) {
  // add baseline, configuration in a yaml file
  FileStorage cam_extrinsics(cam_dartboard_extrinsics_filename, FileStorage::READ);
  FileStorage cam_dartboard_extrinsics(cam_dartboard_extrinsics_filename, FileStorage::READ);

  Mat cR, cT, cdR, cdT;
  cam_extrinsics["R"] >> cR;
  cam_extrinsics["T"] >> cT;
  cam_dartboard_extrinsics["R"] >> cdR;
  cam_dartboard_extrinsics["T"] >> cdT;

  Mat scratch;
  double h[4] = {0,0,0,1};
  Mat H = Mat(1,4, CV_64FC1, &h);

  // Set up camera extrinsics
  hconcat(cR, cT, scratch);
  vconcat(scratch, H, _rightToLeftCamTransform);

  // Set up cam dartboard extrinsics
  hconcat(cdR, cdT, scratch);
  vconcat(scratch, H, _leftCamToDartboardTransform);
}
/* Step 0: Check square matrix 
 * check if CostMatrix is square, if not, we have to add dummy row/col with their
 * entries are 0
 */
void HungarianAlgorithm::step_zero(int* step){ 
	
//	showCostMatrix(step);
//	adding row
 	if(CostMatrix.cols > CostMatrix.rows){ 
		Mat ZeroRow = Mat(1,CostMatrix.cols,CV_64FC1) ; 
		vconcat(CostMatrix,ZeroRow,CostMatrix); 
	}
	if(CostMatrix.cols < CostMatrix.rows){ 
		Mat ZeroCol = Mat(CostMatrix.rows,1,CV_64FC1); 
		hconcat(CostMatrix,ZeroCol,CostMatrix); 
	}
	showCostMatrix(step);
	*step = 1; 	 

}
void AutoSubClassingClassifier::train(
        const vector<Mat>& segmentedHands, 
        const vector<int>& expectedClass) {
    assert(segmentedHands.size() == expectedClass.size());
    this->subclassesOfTrainingBase.clear();
    int numberOfClasses = *max_element(expectedClass.begin(), expectedClass.end()) + 1;
    vector<Mat> samplesPerClass(numberOfClasses);
    for (int i = 0; i < segmentedHands.size(); i++) {
        Mat currentCaracteristicVector = this->caracteristicVector(segmentedHands[i]);
        
        if (samplesPerClass[expectedClass[i]].empty()) {
            samplesPerClass[expectedClass[i]] = currentCaracteristicVector;
        } else {
            vconcat(samplesPerClass[expectedClass[i]], currentCaracteristicVector, samplesPerClass[expectedClass[i]]);
        }
    }
    int accumulatedNumberOfClasses = 0;
    for (int i = 0; i < samplesPerClass.size(); i++) {
        Mat labels;
        int clusterCount = this->numberOfSubclasses[i];
        kmeans(samplesPerClass[i], clusterCount, labels, TermCriteria(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 0.0001, 10000), 5, KMEANS_RANDOM_CENTERS);
        this->subclassesOfTrainingBase.push_back(labels + (Mat::ones(samplesPerClass[i].rows, 1, CV_32S) * accumulatedNumberOfClasses));
        accumulatedNumberOfClasses += this->numberOfSubclasses[i];
    }
    Mat trainingInputs(segmentedHands.size(), this->caracteristicVectorLength(), CV_32F);
    Mat trainingOutputs(segmentedHands.size(), 1, CV_32F);
    int firstIndex = 0;
    for (int i = 0; i < samplesPerClass.size(); i++) {
        int lastIndex = firstIndex + samplesPerClass[i].rows;
        Mat samples = samplesPerClass[i];
        Mat range = trainingInputs.rowRange(firstIndex, lastIndex);
        samples.copyTo(range);
        Mat outputs = this->subclassesOfTrainingBase[i];
        outputs.copyTo(trainingOutputs.rowRange(firstIndex, lastIndex));
        firstIndex = lastIndex;
    }
    this->statisticalModel->train(trainingInputs, trainingOutputs);
}
Ejemplo n.º 9
0
Archivo: csk.cpp Proyecto: foolwood/CSK
void CircShift(Mat &x, Size k){
  int cx, cy;
  if (k.width < 0)
    cx = -k.width;
  else
    cx = x.cols - k.width;

  if (k.height < 0)
    cy = -k.height;
  else
    cy = x.rows - k.height;

  Mat q0(x, Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
  Mat q1(x, Rect(cx, 0, x.cols - cx, cy));  // Top-Right
  Mat q2(x, Rect(0, cy, cx, x.rows - cy));  // Bottom-Left
  Mat q3(x, Rect(cx, cy, x.cols - cx, x.rows - cy)); // Bottom-Right

  Mat tmp1, tmp2;                           // swap quadrants (Top-Left with Bottom-Right)
  hconcat(q3, q2, tmp1);
  hconcat(q1, q0, tmp2);
  vconcat(tmp1, tmp2, x);

}
Ejemplo n.º 10
0
    /**
     * Randomly expands the configuration
     */
    vector<MatchConfig> FAsTMatch::randomExpandConfigs( vector<MatchConfig>& configs, MatchNet& net, int level,
                                                int no_of_points, float delta_factor ) {
        
        float factor = pow(delta_factor, level);
        
        float   half_step_tx = net.stepsTransX / factor,
                half_step_ty = net.stepsTransY / factor,
                half_step_r  = net.stepsRotate / factor,
                half_step_s  = net.stepsScale / factor;
        
        int no_of_configs = static_cast<int>( configs.size() );
        
        /* Create random vectors that contain values which are either -1, 0, or 1 */
        Mat random_vec( no_of_points * no_of_configs, 6, CV_32SC1 );
        rng.fill( random_vec, RNG::NORMAL, 0, 0.5 );
        random_vec.convertTo( random_vec, CV_32FC1 );

        /* Convert our vector of configurations into a large matrix */
        vector<Mat> configs_mat(no_of_configs);
        for(int i = 0; i < no_of_configs; i++ )
            configs_mat[i] = configs[i].asMatrix();
        
        Mat expanded;
        vconcat( configs_mat, expanded );
        expanded = repeat( expanded, no_of_points, 1 );

        vector<float> ranges_vec = {
            half_step_tx, half_step_ty, half_step_r, half_step_s, half_step_s, half_step_r
        };
        
        Mat ranges = repeat(Mat(ranges_vec).t() , no_of_points * no_of_configs, 1);
        
        /* The expanded configs is the original configs plus some random changes */
        Mat expanded_configs = expanded + random_vec.mul( ranges );

        return MatchConfig::fromMatrix( expanded_configs );
    }
Ejemplo n.º 11
0
int main()
{
//#ifdef _DEBUG
//	int iOrg = _CrtSetDbgFlag(_CRTDBG_REPORT_FLAG);	
//	_CrtSetDbgFlag(iOrg | _CRTDBG_LEAK_CHECK_DF);
//#endif
	
	cout << "tmat demo program" << endl;

	// build two 16 element double arrays
	double aa[] = { 1, 2, 3, 4,
				    4, 1, 2, 3,
					3, 4, 1, 2,
					2, 3, 4, 1 };

	double bb[] = { 3, 4, 5, 6,
		            6, 3, 4, 5,
					5, 6, 3, 4,
					4, 5, 6, 3 };

	// instantiate arrays as 4 x 4 matrices
	tmat<double> A(4,4,aa); cout << A << endl;
	tmat<double> B(4,4,bb); cout << B << endl;

	// check determinants
	cout << "det(A) = " << det(A) << endl;
	cout << "det(B) = " << det(B) << endl << endl;

	// exercise the matrix operators
	tmat<double> C;
	C = A + B; cout << "A + B =" << endl << C << endl;
	C = A - B; cout << "A - B =" << endl << C << endl;
	C = A * B; cout << "A * B =" << endl << C << endl;

	// exercise unary operators
	C = transpose(A); cout << "transpose(A) =" << endl << C << endl;
	C = inv(A); cout << "inv(A) =" << endl << C << endl;

	// check error flag
	C = hconcat(A, B); cout << "A | B =" << endl << C << endl;
	C = vconcat(A, B); cout << "A , B =" << endl << C << endl;
//	C = A + C; // error here
	delrow(C,5);cout << "A , B row=" << endl << C << endl;

	// string matrices
	tmat<string> S(3,3);  
	S(1,1) = "an";  S(1,2) = "to";  S(1,3) = "if";
	S(2,1) = "or";	S(2,2) = "no";	S(2,3) = "on";
	S(3,1) = "be";	S(3,2) = "am";  S(3,3) = "as";
	cout << S << endl;

	S = transpose(S); cout << S << endl;

	// integer matrices
	tmat<int> H(3,3);
	H(1,1) = 1;  H(1,2) = 0; H(1,3) = 1;
	H(2,1) = 0;  H(2,2) = 1; H(2,3) = 1;
	H(3,1) = 1;  H(3,2) = 0; H(3,3) = 0;
	cout << H << endl;

	H = bin_add(H, transpose(H));  cout << H << endl;
		


	cout << "end program" << endl;

	return 0;

}// main()
Ejemplo n.º 12
0
/* Detection is for single scale detection
   Input:
           1. im: A grayscale image in height x width.
           2. ext: The pre-defined HOG extractor.
           3. threshold: The constant threshold to control the prediction.
   Output:
           1. bbox: The predicted bounding boxes in this format (n x 5 matrix):
                                    x11 y11 x12 y12 s1
                                    ... ... ... ... ...
                                    xi1 yi1 xi2 yi2 si
                                    ... ... ... ... ...
                                    xn1 yn1 xn2 yn2 sn
                    where n is the number of bounding boxes. For the ith 
                    bounding box, (xi1,yi1) and (xi2, yi2) correspond to its
                    top-left and bottom-right coordinates, and si is the score
                    of convolution. Please note that these coordinates are
                    in the input image im.
*/
Mat Detector::Detection(const Mat& im, HOGExtractor& ext, const float threshold) {

	/* TODO 4: 
       Obtain the HOG descriptor of the input im. And then convolute the linear
	   detector on the HOG descriptor. You may put the score of convolution in 
	   the structure "convolutionScore".
	*/
	Mat HOGDes = ext.ExtractHOG(im);
	Mat convolutionScore(HOGDes.size[0], HOGDes.size[1], CV_32FC1, Scalar(0));

	// Begin TODO 4
	Mat detector = GetDetector();
	vector<cv::Mat> channels_HOGDes(36);
	vector<cv::Mat> channels_detector(36);

	split(HOGDes, channels_HOGDes);
	split(detector, channels_detector);

	vector<cv::Mat>::iterator iter1;
	vector<cv::Mat>::iterator iter2;
	for (iter1 = channels_HOGDes.begin(), iter2 = channels_detector.begin();
		iter1 != channels_HOGDes.end(),iter2!=channels_detector.end(); iter1++, iter2++)
	{
		Mat t_res(HOGDes.size[0], HOGDes.size[1], CV_32FC1, Scalar(0));
		filter2D( (*iter1), t_res, (*iter1).depth(), (*iter2) );
		convolutionScore += t_res;
	}
//	cout << "HoGsize= " << HOGDes.size[0] << ", " << HOGDes.size[1] << endl;
//	cout << "Detectorsize= " << detector.size[0] << ", " << detector.size[1] << endl;
/*	for (int i; i < HOGDes.size[0]; i++)
	{
		for (int j; j < HOGDes.size[0]; j++)
		{
			cout << convolutionScore.at<float>(i,j) << ", ";
		}
		cout << endl;
	}
	cout << endl;*/
	// End TODO 4
	
	/* TODO 5: 
       Select out those positions where the score is above the threshold. Here,
	   the positions are in ConvolutionScore. To output the coordinates of the
	   bounding boxes, please remember to recover the positions to those in the
	   input image. Please put the predicted bounding boxes and their scores in
	   the below structure "bbox".
	*/
	Mat bbox;
	
	// Begin TODO 5
	float cells = ext.GetCells();
	float blocks = ext.GetBlocks();
	float maxScore = -5.0;
	for ( int i = 0; i < convolutionScore.size[0]; i++ )
	{
		for ( int j = 0; j < convolutionScore.size[1]; j++ )
		{
			if (convolutionScore.at<float>(i, j)>maxScore)
			{
				maxScore = convolutionScore.at<float>(i, j);
			}
			if (convolutionScore.at<float>(i, j) > threshold)
			{
				float x1, y1, x2, y2, score;
				score = convolutionScore.at<float>(i, j);
				x1 = (int)((float)i - ((float)detector.size[0] / 2)+0.5) * cells;
				y1 = (int)((float)j - ((float)detector.size[1] / 2) + 0.5) * cells;
				x2 = (int)((float)i + ((float)detector.size[0] / 2) + 0.5) * cells;
				y2 = (int)((float)j + ((float)detector.size[1] / 2) + 0.5) * cells;
				Mat t_bbox = (Mat_<float>(1, 5) << x1, y1, x2, y2, score);

				if (bbox.size[0]!=0)
				{
					vconcat(bbox, t_bbox, bbox);
				}
				else
				{
					bbox = t_bbox;	
					//bbox = Mat(t_bbox);
				}
			}
		}
	}
	cout << "Max Score = " << maxScore << endl;
	// End TODO 5
//	cout << "Here bbox = " <<bbox.rows<<", "<<bbox.cols<< endl;
	return bbox;
}
Ejemplo n.º 13
0
/* MultiscaleDetection is for multiscale detection
   Input:
           1. im: A grayscale image in height x width.
           2. ext: The pre-defined HOG extractor.
		   3. scales: The scales for resizeing the image.
		   4. numberOfScale: The number of different scales.
           5. threshold: The constant threshold to control the prediction.
   Output:
           1. bbox: The predicted bounding boxes in this format (n x 5 matrix):
                                    x11 y11 x12 y12 s1
                                    ... ... ... ... ...
                                    xi1 yi1 xi2 yi2 si
                                    ... ... ... ... ...
                                    xn1 yn1 xn2 yn2 sn
                    where n is the number of bounding boxes. For the ith 
                    bounding box, (xi1,yi1) and (xi2, yi2) correspond to its
                    top-left and bottom-right coordinates, and si is the score
                    of convolution. Please note that these coordinates are
                    in the input image im.
*/
Mat Detector::MultiscaleDetection(const Mat& im, HOGExtractor& ext, 
	const float* scales, const int numberOfScale, const float* threshold) {

	/* TODO 6: 
       You should firstly resize the input image by scales 
	   and store them in the structure pyra. 
	*/
	vector<Mat> pyra(numberOfScale);

	// Begin TODO 6
	int height = im.rows;
	int width = im.cols;
	int nchannel = im.channels();

	cout << scales << threshold << endl; //Active scales and threshold...Get around the bug...
	for ( int i = 0; i < numberOfScale; i++ )
	{
		int t_height = height * scales[i];
		int t_width = width * scales[i];
		Mat t_im;
		
//		cout << "t_height = " << t_height << " t_width = " << t_width << endl;
		resize( im, t_im, Size(t_width, t_height) );
		pyra[i] = t_im;
	}


	// End TODO 6
	

	/* TODO 7: 
       Perform detection with different scales. Please remember 
	   to transfer the coordinates of bounding box according to 
	   their scales. 
	   You should complete the helper-function  "Detection" and 
	   call it here. All the detected bounding boxes should be 
	   stored in the below structure "bbox".
	*/
	Mat bbox;

	// Begin TODO 7
	int i;
	for (int i=0; i<pyra.size(); i++)
	{
		Mat t_bbox = Detection( pyra[i], ext, threshold[i]);
		if (t_bbox.empty())
		{
			continue;
		}
		int t_row = t_bbox.rows;
		int t_col = t_bbox.cols;
		for (int m = 0; m < t_row; m++)
		{
			for (int n = 0; n < t_col-1; n++)
			{
				t_bbox.at<float>(m, n) = t_bbox.at<float>(m, n) / scales[i];
			}
		}
		if (!bbox.empty())
		{
			vconcat(bbox, t_bbox, bbox);
		}
		else
		{
			bbox = t_bbox;
			//bbox = Mat(t_bbox);
		}
	}
	// End TODO 7

	return bbox;
}
Ejemplo n.º 14
0
int main(int argc, char **argv) {
  if (argc < 5) {
    std::cout << "Usage: "
              << argv[0]
              << " path_to_scans/ output.ply icp_iters subsample_probability" << std::endl;
    return 1;
  }

  // Command line parameters
  string pc_filepath = argv[1];
  string pc_file_out_ply = argv[2];
  int icp_num_iters = std::atoi(argv[3]);
  double probability = std::atof(argv[4]);
  if (pc_filepath.c_str()[pc_filepath.length() - 1] != '/') {
    pc_filepath += "/";
  }

  std::ifstream file(pc_filepath + "config.txt");
  string line;
  getline(file, line);
  std::istringstream in(line);

  int num_images;
  in >> num_images;

  Mat pc_a = load_kinect_frame(pc_filepath + "image_0.png",
      pc_filepath + "depth_0.txt");

  Mat allSamples = selectRandomPoints(pc_a, probability);

  // Used for accumulating the rigid transformation matrix
  Mat transformation = Mat::eye(4, 4, CV_32F);

  Mat rotation, translation;
  clock_t time;

  for (int image_num = 1; image_num < num_images; ++image_num) {
    std::cout << "REGISTERING IMAGE " << image_num << std::endl;
    time = clock();

    // Load the point cloud to be registered
    string str_num = std::to_string(image_num);
    Mat pc_b = load_kinect_frame(pc_filepath + "image_" + str_num + ".png",
        pc_filepath + "depth_" + str_num + ".txt");
    Mat pc_b_sample = selectRandomPoints(pc_b, probability);

    // Apply the previous transformations to b so that it is positioned near
    // the last accumulated points
    extractRigidTransform(transformation, rotation, translation);
    pc_b_sample = applyTransformation(pc_b_sample, rotation, translation);
    pc_b = applyTransformation(pc_b, rotation, translation);

    // Perform the specified number of icp iterations
    for (int i = 0; i < icp_num_iters; ++i) {
      // Find the nearest neighbor pairs in the two point clouds
      Mat a, b;
      nearest_neighbors(allSamples, pc_b_sample, a, b);

      // Find the optimal rotation and translation matrix to transform b onto a
      Mat r, t;
      rigid_transform_3D(b, a, r, t);

      // Apply the rigid transformation to the b point cloud
      pc_b_sample = applyTransformation(pc_b_sample, r, t);
      pc_b = applyTransformation(pc_b, r, t);
      transformation *= getRigidTransform(r, t);
    }

    // Combine the two point clouds and save to disk
    Mat combined, combinedSample;
    vconcat(pc_a, pc_b, combined);
    vconcat(allSamples, pc_b_sample, combinedSample);
    pc_a = combined;
    allSamples = combinedSample;
    save_point_cloud(combined, pc_file_out_ply);

    std::cout << "complete " << ((float)(clock() - time)) / CLOCKS_PER_SEC << std::endl;
  }

  return 0;
}
int main(int argc, char* argv[]) {
    CvMemStorage *contStorage = cvCreateMemStorage(0);
    CvSeq *contours;
    CvTreeNodeIterator polyIterator;
    
    int found = 0;
    int i;
    CvPoint poly_point;
	int fps=30;
	
	// ポリライン近似
    CvMemStorage *polyStorage = cvCreateMemStorage(0);
    CvSeq *polys, *poly;

	// OpenCV variables
	CvFont font;
	
    printf("start!\n");

	//pwm initialize
	if(gpioInitialise() < 0) return -1;
	//pigpio CW/CCW pin setup
	//X:18, Y1:14, Y2:15
	gpioSetMode(18, PI_OUTPUT);
	gpioSetMode(14, PI_OUTPUT);
	gpioSetMode(15, PI_OUTPUT);
	//pigpio pulse setup
	//X:25, Y1:23, Y2:24
	gpioSetMode(25, PI_OUTPUT);
	gpioSetMode(23, PI_OUTPUT);
	gpioSetMode(24, PI_OUTPUT);
	//limit-switch setup
	gpioSetMode(5, PI_INPUT);
	gpioWrite(5, 0);
	gpioSetMode(6, PI_INPUT);
	gpioWrite(6, 0);
	gpioSetMode(7, PI_INPUT);
	gpioWrite(7, 0);
	gpioSetMode(8, PI_INPUT);
	gpioWrite(8, 0);
	gpioSetMode(13, PI_INPUT);
	gpioSetMode(19, PI_INPUT);
	gpioSetMode(26, PI_INPUT);
	gpioSetMode(21, PI_INPUT);
 
	CvCapture* capture_robot_side = cvCaptureFromCAM(0);
	CvCapture* capture_human_side = cvCaptureFromCAM(1);
    if(capture_robot_side == NULL){
		std::cout << "Robot Side Camera Capture FAILED" << std::endl;
		return -1;
	 }
	if(capture_human_side ==NULL){
		std::cout << "Human Side Camera Capture FAILED" << std::endl;
		return -1;
	}

	// size設定
    cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_WIDTH,CAM_PIX_WIDTH);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FRAME_HEIGHT,CAM_PIX_HEIGHT);
	//fps設定
	cvSetCaptureProperty(capture_robot_side,CV_CAP_PROP_FPS,fps);
	cvSetCaptureProperty(capture_human_side,CV_CAP_PROP_FPS,fps);

	// 画像の表示用ウィンドウ生成
	//cvNamedWindow("Previous Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("Now Image", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("pack", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow("mallet", CV_WINDOW_AUTOSIZE);
//	cvNamedWindow ("Poly", CV_WINDOW_AUTOSIZE);

	//Create trackbar to change brightness
	int iSliderValue1 = 50;
	cvCreateTrackbar("Brightness", "Now Image", &iSliderValue1, 100);
	//Create trackbar to change contrast
	int iSliderValue2 = 50;
	cvCreateTrackbar("Contrast", "Now Image", &iSliderValue2, 100);
	//pack threthold 0, 50, 120, 220, 100, 220
	int iSliderValuePack1 = 54; //80;
	cvCreateTrackbar("minH", "pack", &iSliderValuePack1, 255);
	int iSliderValuePack2 = 84;//106;
	cvCreateTrackbar("maxH", "pack", &iSliderValuePack2, 255);
	int iSliderValuePack3 = 100;//219;
	cvCreateTrackbar("minS", "pack", &iSliderValuePack3, 255);
	int iSliderValuePack4 = 255;//175;
	cvCreateTrackbar("maxS", "pack", &iSliderValuePack4, 255);
	int iSliderValuePack5 = 0;//29;
	cvCreateTrackbar("minV", "pack", &iSliderValuePack5, 255);
	int iSliderValuePack6 = 255;//203;
	cvCreateTrackbar("maxV", "pack", &iSliderValuePack6, 255);
	//mallet threthold 0, 255, 100, 255, 140, 200
	int iSliderValuemallet1 = 106;
	cvCreateTrackbar("minH", "mallet", &iSliderValuemallet1, 255);
	int iSliderValuemallet2 = 135;
	cvCreateTrackbar("maxH", "mallet", &iSliderValuemallet2, 255);
	int iSliderValuemallet3 = 218;//140
	cvCreateTrackbar("minS", "mallet", &iSliderValuemallet3, 255);
	int iSliderValuemallet4 = 255;
	cvCreateTrackbar("maxS", "mallet", &iSliderValuemallet4, 255);
	int iSliderValuemallet5 = 0;
	cvCreateTrackbar("minV", "mallet", &iSliderValuemallet5, 255);
	int iSliderValuemallet6 = 105;
	cvCreateTrackbar("maxV", "mallet", &iSliderValuemallet6, 255);
	
	// 画像ファイルポインタの宣言
	IplImage* img_robot_side = cvQueryFrame(capture_robot_side);
	IplImage* img_human_side = cvQueryFrame(capture_human_side);
	IplImage* img_all_round = cvCreateImage(cvSize(CAM_PIX_WIDTH, CAM_PIX_2HEIGHT), IPL_DEPTH_8U, 3);
	IplImage* tracking_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* img_all_round2  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* show_img  = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	
	cv::Mat mat_frame1;
	cv::Mat mat_frame2;
	cv::Mat dst_img_v;
	cv::Mat dst_bright_cont;
	int iBrightness  = iSliderValue1 - 50;
	double dContrast = iSliderValue2 / 50.0;
	IplImage* dst_img_frame = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
	IplImage* grayscale_img = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_tmp = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 1);
	IplImage* poly_dst = cvCreateImage( cvGetSize( img_all_round), IPL_DEPTH_8U, 3);
	IplImage* poly_gray = cvCreateImage( cvGetSize(img_all_round),IPL_DEPTH_8U,1);

	int rotate_times = 0;
	//IplImage* -> Mat
	mat_frame1 = cv::cvarrToMat(img_robot_side);
	mat_frame2 = cv::cvarrToMat(img_human_side);
	//上下左右を反転。本番環境では、mat_frame1を反転させる
	cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
	cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
	vconcat(mat_frame2, mat_frame1, dst_img_v);

	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
	//画像の膨張と縮小
//	cv::Mat close_img;
//	cv::Mat element(3,3,CV_8U, cv::Scalar::all(255));
//	cv::morphologyEx(dst_img_v, close_img, cv::MORPH_CLOSE, element, cv::Point(-1,-1), 3);
//	cv::imshow("morphologyEx", dst_img_v);
//	dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート

	//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
	*img_all_round = dst_bright_cont;

	cv_ColorExtraction(img_all_round, dst_img_frame, CV_BGR2HSV, 0, 54, 77, 255, 0, 255);

	cvCvtColor(dst_img_frame, grayscale_img, CV_BGR2GRAY);
	cv_Labelling(grayscale_img, tracking_img);

	cvCvtColor(tracking_img, poly_gray, CV_BGR2GRAY);

	cvCopy( poly_gray, poly_tmp);
	cvCvtColor( poly_gray, poly_dst, CV_GRAY2BGR);

	//画像の膨張と縮小
	//cvMorphologyEx(tracking_img, tracking_img,)

	// 輪郭抽出
	found = cvFindContours( poly_tmp, contStorage, &contours, sizeof( CvContour), CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);

	// ポリライン近似
	polys = cvApproxPoly( contours, sizeof( CvContour), polyStorage, CV_POLY_APPROX_DP, 8, 10);

	cvInitTreeNodeIterator( &polyIterator, ( void*)polys, 10);
	poly = (CvSeq *)cvNextTreeNode( &polyIterator);
	printf("sort before by X\n");
	for( i=0; i<poly->total; i++)
	{
		poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//		cvCircle( poly_dst, poly_point, 1, CV_RGB(255, 0 , 255), -1);
//		cvCircle( poly_dst, poly_point, 8, CV_RGB(255, 0 , 255));
		std::cout << "x:" << poly_point.x << ", y:" << poly_point.y  << std::endl;
	}
	printf("Poly FindTotal:%d\n",poly->total);

	//枠の座標決定
	//左上 の 壁サイド側 upper_left_f
	//左上 の ゴール寄り  upper_left_g
	//右上 の 壁サイド側 upper_right_f
	//右上 の ゴール寄り  upper_right_g
	//左下 の 壁サイド側 lower_left_f
	//左下 の ゴール寄り  lower_left_g
	//右下 の 壁サイド側 lower_right_f
	//右下 の ゴール寄り  lower_right_g
	CvPoint upper_left_f, upper_left_g, upper_right_f, upper_right_g,
			lower_left_f, lower_left_g, lower_right_f, lower_right_g,
			robot_goal_left, robot_goal_right;

	CvPoint frame_points[8];
//	if(poly->total == 8){
//		for( i=0; i<8; i++){
//			poly_point = *( CvPoint*)cvGetSeqElem( poly, i);
//			frame_points[i] = poly_point;
//		}
//		qsort(frame_points, 8, sizeof(CvPoint), compare_cvpoint);
//		printf("sort after by X\n");
//		for( i=0; i<8; i++){
//			std::cout << "x:" << frame_points[i].x << ", y:" << frame_points[i].y  << std::endl;
//		}
//		if(frame_points[0].y < frame_points[1].y){
//			upper_left_f = frame_points[0];
//			lower_left_f = frame_points[1];
//		}
//		else{
//			upper_left_f = frame_points[1];
//			lower_left_f = frame_points[0];
//		}
//		if(frame_points[2].y < frame_points[3].y){
//			upper_left_g = frame_points[2];
//			lower_left_g = frame_points[3];
//		}
//		else{
//			upper_left_g = frame_points[3];
//			lower_left_g = frame_points[2];
//		}
//		if(frame_points[4].y < frame_points[5].y){
//			upper_right_g = frame_points[4];
//			lower_right_g = frame_points[5];
//		}
//		else{
//			upper_right_g = frame_points[5];
//			lower_right_g = frame_points[4];
//		}
//		if(frame_points[6].y < frame_points[7].y){
//			upper_right_f = frame_points[6];
//			lower_right_f = frame_points[7];
//		}
//		else{
//			upper_right_f = frame_points[7];
//			lower_right_f = frame_points[6];
//		}
//	}
//	else{
		printf("Frame is not 8 Point\n");
		upper_left_f = cvPoint(26, 29);
		upper_right_f =  cvPoint(136, 29);
		lower_left_f = cvPoint(26, 220);
		lower_right_f =  cvPoint(136, 220);

		upper_left_g = cvPoint(38, 22);
		upper_right_g = cvPoint(125, 22);
		lower_left_g =  cvPoint(38, 226);
		lower_right_g = cvPoint(125, 226);

		robot_goal_left = cvPoint(60, 226);
		robot_goal_right = cvPoint(93, 226);

//		cvCopy(img_all_round, show_img);
//		cvLine(show_img, upper_left_f, upper_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, lower_left_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_right_f, lower_right_f, CV_RGB( 255, 255, 0 ));
//		cvLine(show_img, upper_left_f, lower_left_f, CV_RGB( 255, 255, 0 ));
//
//		cvLine(show_img, upper_left_g, upper_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, lower_left_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_right_g, lower_right_g, CV_RGB( 0, 255, 0 ));
//		cvLine(show_img, upper_left_g, lower_left_g, CV_RGB( 0, 255, 0 ));

		//while(1){
			//cvShowImage("Now Image", show_img);
			//cvShowImage ("Poly", poly_dst);
			//if(cv::waitKey(1) >= 0) {
				//break;
			//}
		//}
		//return -1;
//	}
	printf("upper_left_fX:%d, Y:%d\n",upper_left_f.x, upper_left_f.y);
	printf("upper_left_gX:%d, Y:%d\n",upper_left_g.x, upper_left_g.y);
	printf("upper_right_fX:%d,Y:%d\n", upper_right_f.x, upper_right_f.y);
	printf("upper_right_gX:%d, Y:%d\n" , upper_right_g.x, upper_right_g.y);
	printf("lower_left_fX:%d, Y:%d\n", lower_left_f.x, lower_left_f.y);
	printf("lower_left_gX:%d, Y:%d\n", lower_left_g.x, lower_left_g.y);
	printf("lower_right_fX:%d, Y:%d\n", lower_right_f.x, lower_right_f.y);
	printf("lower_right_gX:%d, Y:%d\n", lower_right_g.x, lower_right_g.y);
	printf("robot_goal_left:%d, Y:%d\n", robot_goal_left.x, robot_goal_left.y);
	printf("robot_goal_right:%d, Y:%d\n", robot_goal_right.x, robot_goal_right.y);

    cvReleaseImage(&dst_img_frame);
    cvReleaseImage(&grayscale_img);
    cvReleaseImage(&poly_tmp);
    cvReleaseImage(&poly_gray);

    cvReleaseMemStorage(&contStorage);
    cvReleaseMemStorage(&polyStorage);
	//return 1;
	// Init font
	cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX|CV_FONT_ITALIC, 0.4,0.4,0,1);
	bool is_pushed_decision_button = 1;//もう一方のラズパイ信号にする
	
	while(1){
		//決定ボタンが押されたらスタート
		if(gpioRead(8)==0 && is_pushed_decision_button==1){
			cvCopy(img_all_round, img_all_round2);
			cvCopy(img_all_round, show_img);
			img_robot_side = cvQueryFrame(capture_robot_side);
			img_human_side = cvQueryFrame(capture_human_side);
			//IplImage* -> Mat
			mat_frame1 = cv::cvarrToMat(img_robot_side);
			mat_frame2 = cv::cvarrToMat(img_human_side);
			//上下左右を反転。本番環境では、mat_frame1を反転させる
			cv::flip(mat_frame1, mat_frame1, 0); //水平軸で反転(垂直反転)
			cv::flip(mat_frame1, mat_frame1, 1); //垂直軸で反転(水平反転)
			vconcat(mat_frame2, mat_frame1, dst_img_v);

			iBrightness  = iSliderValue1 - 50;
			dContrast = iSliderValue2 / 50.0;
			dst_img_v.convertTo(dst_bright_cont, -1, dContrast, iBrightness); //1枚にした画像をコンバート
			//明るさ調整した結果を変換(Mat->IplImage*)して渡す。その後解放。
			*img_all_round = dst_bright_cont;
			mat_frame1.release();
			mat_frame2.release();
			dst_img_v.release();
	
			IplImage* dst_img_mallet = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img_pack = cvCreateImage(cvGetSize(img_all_round), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_mallet = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);
			IplImage* dst_img2_pack = cvCreateImage(cvGetSize(img_all_round2), IPL_DEPTH_8U, 3);

			cv_ColorExtraction(img_all_round, dst_img_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);
			cv_ColorExtraction(img_all_round, dst_img_mallet, CV_BGR2HSV, iSliderValuemallet1, iSliderValuemallet2, iSliderValuemallet3, iSliderValuemallet4, iSliderValuemallet5, iSliderValuemallet6);
			cv_ColorExtraction(img_all_round2, dst_img2_pack, CV_BGR2HSV, iSliderValuePack1, iSliderValuePack2, iSliderValuePack3, iSliderValuePack4, iSliderValuePack5, iSliderValuePack6);

			//CvMoments moment_mallet;
			CvMoments moment_pack;
			CvMoments moment_mallet;
			CvMoments moment2_pack;
			//cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img_pack, 1);
			cvSetImageCOI(dst_img_mallet, 1);
			cvSetImageCOI(dst_img2_pack, 1);

			//cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img_pack, &moment_pack, 0);
			cvMoments(dst_img_mallet, &moment_mallet, 0);
			cvMoments(dst_img2_pack, &moment2_pack, 0);

			//座標計算
			double m00_before = cvGetSpatialMoment(&moment2_pack, 0, 0);
			double m10_before = cvGetSpatialMoment(&moment2_pack, 1, 0);
			double m01_before = cvGetSpatialMoment(&moment2_pack, 0, 1);
			double m00_after = cvGetSpatialMoment(&moment_pack, 0, 0);
			double m10_after = cvGetSpatialMoment(&moment_pack, 1, 0);
			double m01_after = cvGetSpatialMoment(&moment_pack, 0, 1);
			double gX_before = m10_before/m00_before;
			double gY_before = m01_before/m00_before;
			double gX_after = m10_after/m00_after;
			double gY_after = m01_after/m00_after;
			double m00_mallet = cvGetSpatialMoment(&moment_mallet, 0, 0);
			double m10_mallet = cvGetSpatialMoment(&moment_mallet, 1, 0);
			double m01_mallet = cvGetSpatialMoment(&moment_mallet, 0, 1);
			double gX_now_mallet = m10_mallet/m00_mallet;
			double gY_now_mallet = m01_mallet/m00_mallet;

			int target_direction = -1; //目標とする向き 時計回り=1、 反時計回り=0
			//円の大きさは全体の1/10で描画
//			cvCircle(show_img, cvPoint(gX_before, gY_before), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvCircle(show_img, cvPoint(gX_now_mallet, gY_now_mallet), CAM_PIX_HEIGHT/10, CV_RGB(0,0,255), 6, 8, 0);
//			cvLine(show_img, cvPoint(gX_before, gY_before), cvPoint(gX_after, gY_after), cvScalar(0,255,0), 2);
//			cvLine(show_img, robot_goal_left, robot_goal_right, cvScalar(0,255,255), 2);
			printf("gX_after: %f\n",gX_after);
			printf("gY_after: %f\n",gY_after);
			printf("gX_before: %f\n",gX_before);
			printf("gY_before: %f\n",gY_before);
			printf("gX_now_mallet: %f\n",gX_now_mallet);
			printf("gY_now_mallet: %f\n",gY_now_mallet);
			int target_destanceY = CAM_PIX_2HEIGHT - 30; //Y座標の距離を一定にしている。ディフェンスライン。
			//パックの移動は直線のため、一次関数の計算を使って、その後の軌跡を予測する。
			double a_inclination;
			double b_intercept;

			int closest_frequency;

			int target_coordinateX;
			int origin_coordinateY;
			int target_coordinateY;

			double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;

			if(robot_goal_right.x < gX_now_mallet){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 0;//反時計回り
			}
			else if(gX_now_mallet < robot_goal_left.x){
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
			}
			else{
				//pwm output for rotate
				//台の揺れを想定してマージンをとる
				if(abs(gX_after - gX_before) <= 1 && abs(gY_after - gY_before) <= 1){//パックが動いてない場合一時停止
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
					a_inclination = 0;
					b_intercept=0;
				}
				else{
					a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
					b_intercept = gY_after - a_inclination * gX_after;
					//一次関数で目標X座標の計算
					if(a_inclination){
						target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
					}
					else{
						target_coordinateX = center_line;
					}

					origin_coordinateY = a_inclination * left_frame + b_intercept;

					int rebound_max = 5;
					int rebound_num = 0;

					while(target_coordinateX < left_frame || right_frame < target_coordinateX){
						if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
							target_coordinateX = 2 * left_frame - target_coordinateX;
							b_intercept -= 2 * ((-a_inclination) * left_frame);
							a_inclination = -a_inclination;
							origin_coordinateY = a_inclination * left_frame + b_intercept;
							if(target_coordinateX < right_frame){
							}
							else{
								//左側の枠から右側の枠に当たるときのY座標
								target_coordinateY = a_inclination * right_frame + b_intercept;
							}
						}
						else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
							target_coordinateX = 2 * right_frame - target_coordinateX;
							b_intercept += 2 * (a_inclination * right_frame);
							a_inclination= -a_inclination;
							//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
							origin_coordinateY = a_inclination * right_frame + b_intercept;
							if(left_frame < target_coordinateX){
							}
							else{
								//右側の枠から左側の枠に当たるときのY座標
								target_coordinateY = a_inclination * left_frame + b_intercept;
							}
						}
						rebound_num++;
						if(rebound_max < rebound_num){
							//跳ね返りが多すぎる時は、中央を指定
							target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
							break;
						}
					}
					if(target_coordinateX < center_line && center_line < gX_now_mallet){
						target_direction = 0;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else if(center_line < target_coordinateX && gX_now_mallet < center_line){
						target_direction = 1;
						gpioPWM(25, 128);
						closest_frequency = gpioSetPWMfrequency(25, 1000);
					}
					else{
						gpioPWM(25, 0);
						closest_frequency = gpioSetPWMfrequency(25, 0);
					}
				}
				printf("a_inclination: %f\n",a_inclination);
				printf("b_intercept: %f\n",b_intercept);
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}

			//pwm output for rotate
			//台の揺れを想定してマージンをとる
			/*if(abs(gX_after - gX_before) <= 1){//パックが動いてない場合一時停止
				gpioPWM(25, 0);
				closest_frequency = gpioSetPWMfrequency(25, 0);
				a_inclination = 0;
				b_intercept=0;
			}
			else if(gY_after-1 < gY_before ){	//packが離れていく時、台の中央に戻る
				a_inclination = 0;
				b_intercept=0;
				//目標値は中央。台のロボット側(4点)からを計算
				double center_line = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
				if(center_line + 3 < gX_now_mallet){ //+1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 0;//反時計回り
				}
				else if(gX_now_mallet < center_line-3){  //-1 マージン
					gpioPWM(25, 128);
					closest_frequency = gpioSetPWMfrequency(25, 1500);
					target_direction = 1;//時計回り
				}
				else{
					gpioPWM(25, 0);
					closest_frequency = gpioSetPWMfrequency(25, 0);
				}
			}
			else{
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				a_inclination = (gY_after - gY_before) / (gX_after - gX_before);
				b_intercept = gY_after - a_inclination * gX_after;
				//一次関数で目標X座標の計算
				if(a_inclination){
					target_coordinateX = (int)((target_destanceY - b_intercept) / a_inclination);
				}
				else{
					target_coordinateX = 0;
				}
			}

			printf("a_inclination: %f\n",a_inclination);
			printf("b_intercept: %f\n",b_intercept);

			int left_frame = (upper_left_f.x + lower_left_f.x)/2;
			int right_frame = (upper_right_f.x + lower_right_f.x)/2;
			origin_coordinateY = a_inclination * left_frame + b_intercept;
			if(target_coordinateX < left_frame){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(left_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else if(right_frame < target_coordinateX){
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint(right_frame, origin_coordinateY), cvScalar(0,255,255), 2);
			}
			else{
				cvLine(show_img, cvPoint((int)gX_after, (int)gY_after), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
			}

			int rebound_max = 5;
			int rebound_num = 0;

			while(target_coordinateX < left_frame || right_frame < target_coordinateX){
				if(target_coordinateX < left_frame){ //左側の枠での跳ね返り後の軌跡。左枠側平均
					target_coordinateX = 2 * left_frame - target_coordinateX;
					b_intercept -= 2 * ((-a_inclination) * left_frame);
					a_inclination = -a_inclination;
					origin_coordinateY = a_inclination * left_frame + b_intercept;
					if(target_coordinateX < right_frame){
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//左側の枠から右側の枠に当たるときのY座標
						target_coordinateY = a_inclination * right_frame + b_intercept;
						cvLine(show_img, cvPoint(left_frame, origin_coordinateY), cvPoint(right_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				else if(right_frame < target_coordinateX){ //右側の枠での跳ね返り後の軌跡。右枠側平均
					target_coordinateX = 2 * right_frame - target_coordinateX;
					b_intercept += 2 * (a_inclination * right_frame);
					a_inclination= -a_inclination;
					//cvLine(show_img, cvPoint(right_frame, b_intercept), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
					origin_coordinateY = a_inclination * right_frame + b_intercept;
					if(left_frame < target_coordinateX){
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,255,255), 2);
					}
					else{
						//右側の枠から左側の枠に当たるときのY座標
						target_coordinateY = a_inclination * left_frame + b_intercept;
						cvLine(show_img, cvPoint(right_frame, origin_coordinateY), cvPoint(left_frame, target_coordinateY), cvScalar(0,255,255), 2);
					}
				}
				rebound_num++;
				if(rebound_max < rebound_num){
					//跳ね返りが多すぎる時は、中央を指定
					target_coordinateX = (lower_right_f.x + lower_right_g.x + lower_left_f.x + lower_left_g.x)/4;
					break;
				}
			}

			printf("target_coordinateX: %d\n",target_coordinateX);
			//防御ラインの描画
			cvLine(show_img, cvPoint(CAM_PIX_WIDTH, target_destanceY), cvPoint(0, target_destanceY), cvScalar(255,255,0), 2);
			//マレットの動きの描画
			cvLine(show_img, cvPoint((int)gX_now_mallet, (int)gY_now_mallet), cvPoint((int)target_coordinateX, target_destanceY), cvScalar(0,0,255), 2);
			//cvPutText (show_img, to_c_char((int)gX_now_mallet), cvPoint(460,30), &font, cvScalar(220,50,50));
			//cvPutText (show_img, to_c_char((int)target_coordinateX), cvPoint(560,30), &font, cvScalar(50,220,220));

			int amount_movement = target_coordinateX - gX_now_mallet;

			//reacted limit-switch and target_direction rotate
//			if(gpioRead(6) == 1){//X軸右
//				gpioPWM(25, 128);
//				closest_frequency = gpioSetPWMfrequency(25, 1500);
//				target_direction = 0;//反時計回り
//				printf("X軸右リミット!反時計回り\n");
//			}
//			else
			if(gpioRead(26) == 1){//X軸左
				gpioPWM(25, 128);
				closest_frequency = gpioSetPWMfrequency(25, 1500);
				target_direction = 1;//時計回り
				printf("X軸左リミット!時計回り\n");
			}
			else if(gpioRead(5) == 1){//Y軸右上
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 0);
				printf("Y軸右上リミット!時計回り\n");
			}
			else if(gpioRead(13) == 1){//Y軸右下
				gpioPWM(23, 128);
				gpioSetPWMfrequency(23, 1500);
				gpioWrite(14, 1);
				printf("Y軸右下リミット!反時計回り\n");
			}
			else if(gpioRead(19) == 1){//Y軸左下
				gpioPWM(24, 128);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 0);
				printf("Y軸左下リミット!時計回り\n");
			}

			else if(gpioRead(21) == 1){//Y軸左上
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 1500);
				gpioWrite(15, 1);
				printf("Y軸左上リミット!反時計回り\n");
			}
			else{
				//Y軸固定のため
				gpioSetPWMfrequency(23, 0);
				gpioSetPWMfrequency(24, 0);

				if(amount_movement > 0){
					target_direction = 1;//時計回り
				}
				else if(amount_movement < 0){
					target_direction = 0;//反時計回り
				}
			}
			if(target_direction != -1){
				gpioWrite(18, target_direction);
			}
			else{
				gpioPWM(24, 0);
				gpioSetPWMfrequency(24, 0);
			}
			printf("setting_frequency: %d\n", closest_frequency);*/

			// 指定したウィンドウ内に画像を表示する
			//cvShowImage("Previous Image", img_all_round2);
//			cvShowImage("Now Image", show_img);
//			cvShowImage("pack", dst_img_pack);
//			cvShowImage("mallet", dst_img_mallet);
//			cvShowImage ("Poly", poly_dst);

			cvReleaseImage (&dst_img_mallet);
			cvReleaseImage (&dst_img_pack);
			cvReleaseImage (&dst_img2_mallet);
			cvReleaseImage (&dst_img2_pack);

			if(cv::waitKey(1) >= 0) {
				break;
			}
		}
		else{ //リセット信号が来た場合
			is_pushed_decision_button = 0;
		}
    }
    
    gpioTerminate();
    
    cvDestroyAllWindows();
	
	//Clean up used CvCapture*
	cvReleaseCapture(&capture_robot_side);
	cvReleaseCapture(&capture_human_side);
    //Clean up used images
	cvReleaseImage(&poly_dst);
	cvReleaseImage(&tracking_img);
    cvReleaseImage(&img_all_round);
    cvReleaseImage(&img_human_side);
    cvReleaseImage(&img_all_round2);
    cvReleaseImage(&show_img);
    cvReleaseImage(&img_robot_side);

    return 0;
}
Ejemplo n.º 16
0
	VOID CMT::initialize(const Mat& imGray, const Rect& rect)
	{

		//------------------------------------------------------------------------------------------
		// Import the first [imGray] and its [ROI information : center, and size of rect ]
		//------------------------------------------------------------------------------------------
		imPrev = imGray;
		
		xLimit = imGray.cols - 1;
		yLimit = imGray.rows - 1;

		center = Point2f(rect.x + rect.width / 2.0, rect.y + rect.height / 2.0);
		sizeInitial = rect.size();


		//------------------------------------------------------------------------------------------
		// Designate descriptor
		// this->detector : Assigned at object consturctor
		//------------------------------------------------------------------------------------------
		descriptor = BRISK::create();


		//------------------------------------------------------------------------------------------
		// Detect and Segmentate [foreground/background KeyPoints] by bounding box
		//------------------------------------------------------------------------------------------

		vector<KeyPoint> keypointsFG;
		vector<KeyPoint> keypointsBG;

		for (register UINT it = 0; it < this->keypoints.size(); ++it)
		{
			KeyPoint k = this->keypoints[it];
			Point2f pt = k.pt;

			if (pt.x > rect.x && pt.y > rect.y && pt.x < rect.br().x && pt.y < rect.br().y)
				 keypointsFG.push_back(k);
			else keypointsBG.push_back(k);
		}


		//------------------------------------------------------------------------------------------
		// Extract [foreground/background Descriptions]
		// and Build [database] & [classes] for matcher
		//------------------------------------------------------------------------------------------
		MatND descFG, descBG;
		descriptor->compute(imGray, keypointsFG, descFG);
		descriptor->compute(imGray, keypointsBG, descBG);
		
		MatND database;
		if (descBG.rows > 0 && descFG.rows > 0) vconcat(descBG, descFG, database);
		else if (descBG.rows > 0) database = descBG;
		else database = descFG;

		vector<int> classes = vector<int>(descBG.rows, -1);

		vector<int> classesFG;
		for (register UINT it = 0; it < keypointsFG.size(); ++it)
			classesFG.push_back(it);

		classes.insert(classes.end(), classesFG.begin(), classesFG.end());


		//------------------------------------------------------------------------------------------
		// Extract and Reserve [cv::Point2f] from [cv::KeyPoint]
		//------------------------------------------------------------------------------------------
		vector<Point2f> pointsFG;

		for (register UINT it = 0; it < keypointsFG.size(); ++it)
			pointsFG.push_back(keypointsFG[it].pt);


		//------------------------------------------------------------------------------------------
		// Calculate Foreground Vectors ( Constellation Origin : Center of ROI )
		//------------------------------------------------------------------------------------------		
		vector<Point2f> vectorsFG;
		for (register UINT it = 0; it < pointsFG.size(); ++it)
			vectorsFG.push_back(pointsFG[it] - center);


		//------------------------------------------------------------------------------------------
		// Initialize [matcher] and [consensus]
		//------------------------------------------------------------------------------------------
		matcher.initialize(
			vectorsFG,
			database,
			classes,
			descBG.rows
			);

		consensus.initialize(vectorsFG);


		//------------------------------------------------------------------------------------------
		// Reserve initial information into Archive
		//------------------------------------------------------------------------------------------
		imArchive = imGray;

		pointsActive.assign(pointsFG.begin(), pointsFG.end());
		pointsArchive.assign(pointsFG.begin(), pointsFG.end());
		
		classesActive.assign(classesFG.begin(), classesFG.end());
		classesArchive.assign(classesFG.begin(), classesFG.end());

		this->posPrev = this->center;

		this->isInit = TRUE;
	}
Ejemplo n.º 17
0
static GEN
nf_LLL_cmbf(nfcmbf_t *T, GEN p, long k, long rec)
{
  nflift_t *L = T->L;
  GEN pk = L->pk, PRK = L->prk, PRKinv = L->iprk, GSmin = L->GSmin;
  GEN Tpk = L->Tpk;

  GEN famod = T->fact, nf = T->nf, ZC = T->ZC, Br = T->Br;
  GEN Pbase = T->polbase, P = T->pol, dn = T->dn;
  GEN nfT = gel(nf,1);
  GEN Btra;
  long dnf = degpol(nfT), dP = degpol(P);

  double BitPerFactor = 0.5; /* nb bits / modular factor */
  long i, C, tmax, n0;
  GEN lP, Bnorm, Tra, T2, TT, CM_L, m, list, ZERO;
  double Bhigh;
  pari_sp av, av2, lim;
  long ti_LLL = 0, ti_CF = 0;
  pari_timer ti2, TI;

  lP = absi(leading_term(P));
  if (is_pm1(lP)) lP = NULL;

  n0 = lg(famod) - 1;
 /* Lattice: (S PRK), small vector (vS vP). To find k bound for the image,
  * write S = S1 q + S0, P = P1 q + P0
  * |S1 vS + P1 vP|^2 <= Bhigh for all (vS,vP) assoc. to true factors */
  Btra = mulrr(ZC, mulsr(dP*dP, normlp(Br, 2, dnf)));
  Bhigh = get_Bhigh(n0, dnf);
  C = (long)ceil(sqrt(Bhigh/n0)) + 1; /* C^2 n0 ~ Bhigh */
  Bnorm = dbltor( n0 * C * C + Bhigh );
  ZERO = zeromat(n0, dnf);

  av = avma; lim = stack_lim(av, 1);
  TT = cgetg(n0+1, t_VEC);
  Tra  = cgetg(n0+1, t_MAT);
  for (i=1; i<=n0; i++) TT[i] = 0;
  CM_L = gscalsmat(C, n0);
  /* tmax = current number of traces used (and computed so far) */
  for(tmax = 0;; tmax++)
  {
    long a, b, bmin, bgood, delta, tnew = tmax + 1, r = lg(CM_L)-1;
    GEN oldCM_L, M_L, q, S1, P1, VV;
    int first = 1;

    /* bound for f . S_k(genuine factor) = ZC * bound for T_2(S_tnew) */
    Btra = mulrr(ZC, mulsr(dP*dP, normlp(Br, 2*tnew, dnf)));
    bmin = logint(ceil_safe(sqrtr(Btra)), gen_2, NULL);
    if (DEBUGLEVEL>2)
      fprintferr("\nLLL_cmbf: %ld potential factors (tmax = %ld, bmin = %ld)\n",
                 r, tmax, bmin);

    /* compute Newton sums (possibly relifting first) */
    if (gcmp(GSmin, Btra) < 0)
    {
      nflift_t L1;
      GEN polred;

      bestlift_init(k<<1, nf, T->pr, Btra, &L1);
      polred = ZqX_normalize(Pbase, lP, &L1);
      k      = L1.k;
      pk     = L1.pk;
      PRK    = L1.prk;
      PRKinv = L1.iprk;
      GSmin  = L1.GSmin;
      Tpk    = L1.Tpk;
      famod = hensel_lift_fact(polred, famod, Tpk, p, pk, k);
      for (i=1; i<=n0; i++) TT[i] = 0;
    }
    for (i=1; i<=n0; i++)
    {
      GEN h, lPpow = lP? gpowgs(lP, tnew): NULL;
      GEN z = polsym_gen(gel(famod,i), gel(TT,i), tnew, Tpk, pk);
      gel(TT,i) = z;
      h = gel(z,tnew+1);
      /* make Newton sums integral */
      lPpow = mul_content(lPpow, dn);
      if (lPpow) h = FpX_red(gmul(h,lPpow), pk);
      gel(Tra,i) = nf_bestlift(h, NULL, L); /* S_tnew(famod) */
    }

    /* compute truncation parameter */
    if (DEBUGLEVEL>2) { TIMERstart(&ti2); TIMERstart(&TI); }
    oldCM_L = CM_L;
    av2 = avma;
    b = delta = 0; /* -Wall */
AGAIN:
    M_L = Q_div_to_int(CM_L, utoipos(C));
    VV = get_V(Tra, M_L, PRK, PRKinv, pk, &a);
    if (first)
    { /* initialize lattice, using few p-adic digits for traces */
      bgood = (long)(a - max(32, BitPerFactor * r));
      b = max(bmin, bgood);
      delta = a - b;
    }
    else
    { /* add more p-adic digits and continue reduction */
      if (a < b) b = a;
      b = max(b-delta, bmin);
      if (b - delta/2 < bmin) b = bmin; /* near there. Go all the way */
    }

    /* restart with truncated entries */
    q = int2n(b);
    P1 = gdivround(PRK, q);
    S1 = gdivround(Tra, q);
    T2 = gsub(gmul(S1, M_L), gmul(P1, VV));
    m = vconcat( CM_L, T2 );
    if (first)
    {
      first = 0;
      m = shallowconcat( m, vconcat(ZERO, P1) );
      /*     [ C M_L   0  ]
       * m = [            ]   square matrix
       *     [  T2'   PRK ]   T2' = Tra * M_L  truncated
       */
    }
    CM_L = LLL_check_progress(Bnorm, n0, m, b == bmin, /*dbg:*/ &ti_LLL);
    if (DEBUGLEVEL>2)
      fprintferr("LLL_cmbf: (a,b) =%4ld,%4ld; r =%3ld -->%3ld, time = %ld\n",
                 a,b, lg(m)-1, CM_L? lg(CM_L)-1: 1, TIMER(&TI));
    if (!CM_L) { list = mkcol(QXQX_normalize(P,nfT)); break; }
    if (b > bmin)
    {
      CM_L = gerepilecopy(av2, CM_L);
      goto AGAIN;
    }
    if (DEBUGLEVEL>2) msgTIMER(&ti2, "for this trace");

    i = lg(CM_L) - 1;
    if (i == r && gequal(CM_L, oldCM_L))
    {
      CM_L = oldCM_L;
      avma = av2; continue;
    }

    if (i <= r && i*rec < n0)
    {
      pari_timer ti;
      if (DEBUGLEVEL>2) TIMERstart(&ti);
      list = nf_chk_factors(T, P, Q_div_to_int(CM_L,utoipos(C)), famod, pk);
      if (DEBUGLEVEL>2) ti_CF += TIMER(&ti);
      if (list) break;
      CM_L = gerepilecopy(av2, CM_L);
    }
    if (low_stack(lim, stack_lim(av,1)))
    {
      if(DEBUGMEM>1) pari_warn(warnmem,"nf_LLL_cmbf");
      gerepileall(av, Tpk? 9: 8,
                      &CM_L,&TT,&Tra,&famod,&pk,&GSmin,&PRK,&PRKinv,&Tpk);
    }
  }
  if (DEBUGLEVEL>2)
    fprintferr("* Time LLL: %ld\n* Time Check Factor: %ld\n",ti_LLL,ti_CF);
  return list;
}