示例#1
0
void GazeTracker::updateLeft(const cv::Mat *image, const cv::Mat *eyeGrey) {
	if (isActive()) {
		output.gazePointLeft = Point(_gaussianProcessXLeft->getmean(Utils::SharedImage(image, &ignore)), _gaussianProcessYLeft->getmean(Utils::SharedImage(image, &ignore)));

		// Neural network
		// Resize image to 16x8
		cv::resize(*eyeGrey, *_nnEye, _nnEye->size());
		cv::equalizeHist(*_nnEye, *_nnEye);

		fann_type inputs[_nnEyeWidth * _nnEyeHeight];
		for (int i = 0; i < _nnEyeWidth * _nnEyeHeight; ++i) {
			inputs[i] = (float)(_nnEye->data[i] + 129) / 257.0f;
		}

		fann_type *outputs = fann_run(_ANNLeft, inputs);
		Utils::mapFromNeuralNetworkToScreenCoordinates(Point(outputs[0], outputs[1]), output.nnGazePointLeft);

		if (_gammaX != 0) {
			// Overwrite the NN output with the GP output with calibration errors removed
			output.nnGazePoint.x = (output.gazePoint.x + output.gazePointLeft.x) / 2;
			output.nnGazePoint.y = (output.gazePoint.y + output.gazePointLeft.y) / 2;

			removeCalibrationError(output.nnGazePoint);

			output.nnGazePointLeft.x = output.nnGazePoint.x;
			output.nnGazePointLeft.y = output.nnGazePoint.y;
		}
	}
}
示例#2
0
void GazeTracker::update_left(const IplImage *image, const IplImage *eyegrey, vector<vector<int> > vector_h_v_combined) {
    if (isActive()) {
    	// TODO ARCADI 23/12

		//output.gazepoint_left = Point(gpx_left->getmean(SharedImage(image, &ignore)), 
		//			 gpy_left->getmean(SharedImage(image, &ignore)));

// TODO ONUR COMBINED H AND V
//vector<vector<int> > vector_h_v_combined;
//vector_h_v_combined.insert(vector_h_v_combined.end(), vector_horizontal_left.begin(), vector_horizontal_left.end());
//vector_h_v_combined.insert(vector_h_v_combined.end(), vector_vertical_left.begin(), vector_vertical_left.end());

//cout << "UPDATE LEFT! Vertical Vector size: " << vector_h_v_combined.size() << endl;
//cout.flush();

		//output.gazepoint_left = Point(histx_left->getmean(vector_horizontal_left), 
		//			 histy_left->getmean(vector_vertical_left));

		// Use combined only for Y
		//output.gazepoint_left = Point(histx_left->getmean(vector_horizontal_left), 
		//			 histy_left->getmean(vector_h_v_combined));

		// Use combined for both directions
		output.gazepoint_left = Point(histx_left->getmean(vector_h_v_combined), 
					 histy_left->getmean(vector_h_v_combined));

/*
    	vector<int> vector_horizontal_and_vertical_left;
		
		copy( vector_horizontal_left.begin(), vector_horizontal_left.end(), back_inserter(vector_horizontal_and_vertical_left));
		copy( vector_vertical_left.begin(), vector_vertical_left.end(), back_inserter(vector_horizontal_and_vertical_left));

		output.gazepoint_left = Point(histx_left->getmean(vector_horizontal_and_vertical_left), 
					 histy_left->getmean(vector_horizontal_and_vertical_left));
*/
		// Neural network
		// Resize image to 16x8
		cvResize(eyegrey, nn_eye);
		cvEqualizeHist(nn_eye, nn_eye);

		fann_type inputs[nn_eyewidth * nn_eyeheight];
		for (int i = 0; i < nn_eyewidth * nn_eyeheight; ++i)
		{ 
			inputs[i] = (float)(nn_eye->imageData[i] + 129) / 257.0f;
		}

		fann_type* outputs = fann_run(ANN_left, inputs);
		mapFromNeuralNetworkToScreenCoordinates(Point(outputs[0], outputs[1]), output.nn_gazepoint_left);
		
		if(gamma_x != 0) {
			// Overwrite the NN output with the GP output with calibration errors removed
			output.nn_gazepoint.x = (output.gazepoint.x + output.gazepoint_left.x) / 2;
			output.nn_gazepoint.y = (output.gazepoint.y + output.gazepoint_left.y) / 2;
		
			removeCalibrationError(output.nn_gazepoint);
		
			output.nn_gazepoint_left.x = output.nn_gazepoint.x;
			output.nn_gazepoint_left.y = output.nn_gazepoint.y;
		}
    }
}