Esempio n. 1
0
int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);
    ImageProcessor processor;
    processor.run();
    return a.exec();
}
Esempio n. 2
0
int main(int argc, char** argv)
{
  ros::init(argc, argv, "computer_vision");

  // if (ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Debug))
  // {
  //   ros::console::notifyLoggerLevelsChanged();
  // }

  ImageProcessor ic;
  ROS_DEBUG("end of ImageProcessor initialization");

  ros::Rate r(12);  // 12Hz =  average frequency at which we receive images

  while ((!ic.pose_publishing || !ic.video_publishing) && ros::ok())
  {
    ros::spinOnce();
    r.sleep();
  }

  while (ros::ok())
  {
    ros::spinOnce();  // if we dont want this we have to place callback and services in threads

    ic.publishProcessedImg();
    r.sleep();
  }

  return 0;
}
void VisionWindow::updateCalibrationCheck(bool value) {
  doingCalibration_ = value;
  ImageProcessor *top = core_->vision_->top_processor_;
  ImageProcessor *bottom = core_->vision_->bottom_processor_;
  top->enableCalibration(value);
  bottom->enableCalibration(value);
  redrawImages();
}
/**
 * RatingsProcessor::onReplyFinished()
 *
 * Handler for the signal indicating the response for the previous network request.
 *
 * If the result was a success, it will start the thread of constructing the QImage object.
 */
void RatingsProcessor::onReplyFinished() {
	QNetworkReply* reply = qobject_cast<QNetworkReply*>(sender());

	QString response;
	if (reply) {
		if (reply->error() == QNetworkReply::NoError) {
			const int available = reply->bytesAvailable();
			if (available > 0) {

				const QByteArray data(reply->readAll());

				// Setup the image processing thread
				ImageProcessor *imageProcessor = new ImageProcessor(data);

				/*
				 * Invoke our onProcessingFinished slot after the processing has finished.
				 * Since imageProcessor and 'this' are located in different threads we use 'QueuedConnection' to
				 * allow a cross-thread boundary invocation. In this case the QImage parameter is copied in a thread-safe way
				 * from the worker thread to the main thread.
				 */
				connect(imageProcessor, SIGNAL(finished(QImage)), this,
						SLOT(onImageProcessingFinished(QImage)),
						Qt::QueuedConnection);

				imageProcessor->start();
			}
		} else {
			if (reply->error() < 100) {
				m_loading = false;
				emit loadingChanged();
				showError("Please check your internet connection");
				return;
			}
			m_label =
					tr("Error: %1 status: %2").arg(reply->errorString(),
							reply->attribute(
									QNetworkRequest::HttpStatusCodeAttribute).toString());

			emit labelChanged();

			m_loading = false;
			emit loadingChanged();

			emit ratValueChanged();
		}

		reply->deleteLater();
	} else {
		m_label = tr("Download failed.");

		emit labelChanged();

		m_loading = false;
		emit loadingChanged();

		emit ratValueChanged();
	}
}
Esempio n. 5
0
void test_video() {

	VideoCapture cap(CV_CAP_ANY);
	ImageProcessor processor;
	ImageLoader loader;
	NeuralNetwork net;
	net.load(NET_FILE_NAME);

	//net.visualize_hidden_units(1, 50);

	if (!cap.isOpened()) {
		cout << "Failed to initialize camera\n";
		return;
	}

	namedWindow("CameraCapture");
	namedWindow("ProcessedCapture");

	cv::Mat frame;
	while (true) {

		cap >> frame;

		cv::Mat processedFrame = processor.process_image(frame);

		if(processedFrame.rows * processedFrame.cols == INPUT_LAYER_SIZE) {

			mat input = loader.to_arma_mat(processedFrame);

			int label = net.predict(input);

			if(label == 0)
				putText(frame, "A", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
			else if(label == 1)
				putText(frame, "E", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
			else if(label == 2)
				putText(frame, "I", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
			else if(label == 3)
				putText(frame, "O", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
			else if(label == 4)
				putText(frame, "U", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
		}

		imshow("CameraCapture", frame);
		imshow("ProcessedCapture", processedFrame);

		int key = waitKey(5);

		if(key == 13) {
			imwrite("captura.jpg", frame);
		}
		if (key == 27)
			break;
	}

	destroyAllWindows();
}
Esempio n. 6
0
void ImageLoader::handleReply(AbstractProcessor *processor)
{
    Q_D(ImageLoader);
    ImageProcessor *imageProcessor = qobject_cast<ImageProcessor *>(processor);
    QString imagePath = imageProcessor->imagePath();
    if (d->imagePath != imagePath) {
        d->imagePath = imagePath;
        emit imagePathChanged();
    }
}
Esempio n. 7
0
void ImageLoader::onReplyFinished()
{
    QNetworkReply* reply = qobject_cast<QNetworkReply*>(sender());

    QString response;
    if (reply) {
        if (reply->error() == QNetworkReply::NoError) {
            const int available = reply->bytesAvailable();
            if (available > 0) {
                const QByteArray data(reply->readAll());

                // Setup the image processing thread
                ImageProcessor *imageProcessor = new ImageProcessor(data);
                m_thread = new QThread(this);

                // Move the image processor to the worker thread
                imageProcessor->moveToThread(m_thread);

                // Invoke ImageProcessor's start() slot as soon as the worker thread has started
                connect(m_thread, SIGNAL(started()), imageProcessor, SLOT(start()));

                // Delete the worker thread automatically after it has finished
                connect(m_thread, SIGNAL(finished()), m_thread, SLOT(deleteLater()));

                /*
                 * Invoke our onProcessingFinished slot after the processing has finished.
                 * Since imageProcessor and 'this' are located in different threads we use 'QueuedConnection' to
                 * allow a cross-thread boundary invocation. In this case the QImage parameter is copied in a thread-safe way
                 * from the worker thread to the main thread.
                 */
                connect(imageProcessor, SIGNAL(finished(QImage)), this, SLOT(onImageProcessingFinished(QImage)), Qt::QueuedConnection);

                // Terminate the thread after the processing has finished
                connect(imageProcessor, SIGNAL(finished(QImage)), m_thread, SLOT(quit()));

                m_thread->start();
            }
        } else {
            m_label = tr("Error: %1 status: %2").arg(reply->errorString(), reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toString());
            emit labelChanged();

            m_loading = false;
            emit loadingChanged();
        }

        reply->deleteLater();
    } else {
        m_label = tr("Download failed. Check internet connection");
        emit labelChanged();

        m_loading = false;
        emit loadingChanged();
    }
}
/*
 * tests the distance() function
 */
void testDistance(){
	double result = imageProcessor.distance(cv::Point(100, 15), cv::Point(75, 36));
	assert(result < 32.64 + DELTA && result > 32.64 - DELTA);
	
	result = imageProcessor.distance(cv::Point(3, 5), cv::Point(9, 7));
	assert(result < 6.32 + DELTA && result > 6.32 - DELTA);

	result = imageProcessor.distance(cv::Point(572, 641), cv::Point(894, 127));
	assert(result < 606.53 + DELTA && result > 606.53 - DELTA);

	printf("distance() test passed.\n");
}
/*
 * tests the contourDetection() function
*/
void testContourDetection(){
	cv::Mat img = cv::imread("images/two2.png", CV_LOAD_IMAGE_COLOR);
	assert(!img.empty());

	cv::vector<cv::Vec4i> lines = imageProcessor.lineDetection(img);

	cv::vector< cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	imageProcessor.contourDetection(img, contours, hierarchy);

	assert(!contours.empty());
	assert(!hierarchy.empty());
	assert((int)contours.size() == 53);

	printf("contourDetection() test passed.\n");
}
Esempio n. 10
0
/*
 * tests the lineDetection() function
*/
void testLineDetection(){
	cv::Mat img = cv::imread("images/line.png", CV_LOAD_IMAGE_COLOR);

	cv::vector<cv::Vec4i> line_vec = imageProcessor.lineDetection(img);
	assert(!line_vec.empty());
	assert(line_vec.size() <= 7);

	line_vec.clear();

	img = cv::imread("images/horizontal-lines.jpg", CV_LOAD_IMAGE_COLOR);

	line_vec = imageProcessor.lineDetection(img);
	assert(!line_vec.empty());
	assert(line_vec.size() <= 13);

	printf("lineDetection() test passed.\n");
}
void VisionWindow::updateClicked(int xIdx, int yIdx, int buttonIdx){
  if(!initialized_) return;
  int image = currentBigImageCam_;
  ImageProcessor* processor = getImageProcessor(image);
  unsigned char* colorTable = processor->getColorTable();
  const ImageParams& iparams = processor->getImageParams();

  if (doingCalibration_) {
    Sample s; s.x = xIdx; s.y = yIdx;
    if(image == IMAGE_TOP)
      s.camera = Camera::TOP;
    else
      s.camera = Camera::BOTTOM;
    emit calibrationSampleAdded(s);
    redrawImages();
  }

  if (doingClassification_) {
    if (buttonIdx == Qt::LeftButton) {

      //for(int i=0; i < LUT_SIZE; i++)
        //std::cout << colorTable[i] << "\,";
      //std::cout << "DONE\n";
      memcpy(tempTable,colorTable,LUT_SIZE);
      ColorTableMethods::xy2yuv(processor->getImg(), xIdx, yIdx, iparams.width, currentY_, currentU_, currentV_);
      updateTable(colorTable, currentY_, currentU_, currentV_);
      //for(int i=0; i < LUT_SIZE; i++)
        //std::cout << tempTable[i] << "\,";
        //sstd::cout << "\n";
      colorUpdateAvailable_ = true;
      redrawImages();
      processor->processFrame();
      memcpy(colorTable,tempTable,LUT_SIZE);

    } else if (buttonIdx == Qt::RightButton && colorUpdateAvailable_) {

      memcpy(undoTable, colorTable, LUT_SIZE);
      undoImage_ = image;
      updateTable(colorTable, currentY_, currentU_, currentV_);
      colorUpdateAvailable_ = false;

      redrawImages();
    }
  }
}
Esempio n. 12
0
/*
 * tests the processImage() function 
*/
void testProcessImage(){
	cv::Mat img = cv::imread("images/horizontal-lines.jpg", CV_LOAD_IMAGE_COLOR);
 	assert(!img.empty());

 	imageProcessor.processImage(img);

 	cv::imshow("processing horizontal-lines.jpg", img);

 	img = cv::imread("images/two2.png", CV_LOAD_IMAGE_COLOR);
 	assert(!img.empty());

 	imageProcessor.processImage(img);

 	cv::imshow("processing two2.png", img);

	cv::waitKey(0);

	printf("processImage() test passed.\n");
}
int main(int argc, char** argv) {
  if ( argc != 2 ) {
    printf("usage: testImageProcessor <Image_Path>\n");
    return -1;
  }

  ImageProcessor imgProc;
  imgProc.readImage(argv[1]);

  using namespace std;
  using namespace cv;
  Mat im = imgProc.getImage();
 /* cout << im << endl << endl;*/
  //cout << endl;
  //Point3_<uchar>* p = im.ptr<Point3_<uchar> >(1,1);
  //p = im.ptr<Point3_<uchar> >(1,4);
  //p = im.ptr<Point3_<uchar> >(1,7);
  imgProc.determinePerspTransforms(im);

  Mat im_trans_left = imgProc.perspTransIm(LEFT);
  Mat im_trans_right = imgProc.perspTransIm(RIGHT);
  Mat im_trans_up = imgProc.perspTransIm(UP);
  Mat im_trans_down = imgProc.perspTransIm(DOWN);
  //vector<uint8_t> vectorBGR = imgProc.convertToBGRVector(im);
  //vectorBGR = imProc.thresholdVec(vectorBGR);
  /*for (auto i: vectorBGR) {*/
    //cout << i << ' ';
  //}
  /*cout << endl;*/

  //imgProc.reconfigureImage(3, 3, 2, 3); 
  //imwrite("../bin/images/testReshapeImage2_Result.tif", imgProc.getImage());
  /*imshow("DEFAULT", im);*/
  //imshow("LEFT", im_trans_left);
  //imshow("RIGHT", im_trans_right);
  //imshow("UP", im_trans_up);
  /*imshow("DOWN", im_trans_down);*/
  imwrite("images/im_default.jpg", im);
  imwrite("images/im_down.jpg", im_trans_down);
  imwrite("images/im_left.jpg", im_trans_left);
  imwrite("images/im_right.jpg", im_trans_right);
  imwrite("images/im_up.jpg", im_trans_up);

  /*cout << "Press ENTER to finish" << endl;*/
  /*cin.ignore();*/

  return 0;
}
Esempio n. 14
0
void processImages( Camera& camera, ImageProcessor& processor, Detector& detector )
{
	cout << "Camera "<< camera.id << ": --Processing Images" << endl;
    	
	stringstream result_window;
	result_window << camera.name << ": detected particles";
	
	processor.addControls();
	detector.addControls();
	cv::waitKey(10);

	size_t number_of_frames = camera.imagelist.size();
	camera.frames.resize( number_of_frames );
	for (int i = 0; i < number_of_frames; ++i) {
		cv::Mat temp_image = camera.frames[i].image.clone();
		processor.processImage( temp_image );
		cv::imshow("processed image", temp_image );
		detector.detectFeatures( temp_image, camera.frames[i].particles);
		detector.drawResult( camera.frames[i] );
		cv::imshow(result_window.str(), camera.frames[i].image);
	}
	cv::destroyWindow( result_window.str() );
}
Esempio n. 15
0
int main(int argc, char **argv)
{
	if (argc != 2) {
		exit(1);
	}
	signal(SIGINT, intHandler);

	std::string arg = argv[1];
 	cv::VideoCapture capture(arg); //try to open string, this will attempt
        if (!capture.isOpened()) //if this fails, try to open as a video camera
                capture.open(atoi(arg.c_str()));
        if (!capture.isOpened()) {
                std::cerr << "Failed to open a video device or video file!\n" << std::endl;
                return 1;
        }

	ImageProcessor *ip = new HSV_Region_Processor_Min_Alloc(capture);
	BotController *bt = new BotController();
	Region *dp;
	cv::Mat frame;
	ip->initialiseWindow();
	std::vector<Region *> *regionList;
	while (keepRunning) {
		capture >> frame;
		ip->cleanRegionList();
		regionList = ip->processFrame(frame);
		std::sort(regionList->begin(), regionList->end(), compareBySize);
		dp = (*regionList)[0];
		if (dp != NULL && dp->getSize() > 100) {
			double angle = ip->angle(frame, *dp);
			double distance = ip->distance(frame, *dp);
			std::cout << angle << " - " << distance << std::endl;
			bt->move(angle, distance);
			ip->drawArrow(frame, angle, distance);

			//ip->saveFrame(frame);
		} else {
			std::cout << "No object found, sitting still" << std::endl;
			bt->stop();
		}
		ip->drawFrame(frame);
		cv::waitKey(5);
//		ip->processKeys(frame);
	}
	std::cout << "Shutting down" << std::endl;
	bt->stop();
}
Esempio n. 16
0
void CameraTest::Execute() {
    cameraLEDsSubsystem->GreenOn();
    cameraLEDsSubsystem->BlueOff();

    // Allow time for the LEDs to light up
    if(!timer->HasPeriodPassed(0.1)) { return; }   // min tested was 80ms

    Preferences * prefs = Preferences::GetInstance();

    // Capture an image from the camera and save it to flash
    timer->Reset();
    ColorImage * image;
    if (prefs->GetInt("image_retain", 1) == 1) {
        cameraSubsystem->RetainImage("/cameratest.jpg");
        image = cameraSubsystem->CaptureImage();
        cameraSubsystem->RetainImage(NULL);  // stop retaining
        printf("[CAMERA] Captured image and wrote to /cameratest.jpg in %.1f ms\n", timer->Get() * 1000);
    } else {
        image = cameraSubsystem->CaptureImage();
        printf("[CAMERA] Captured image in %.1f ms\n", timer->Get() * 1000);
    }

    // Load preferences for filtering threshold image
    Threshold threshold = Threshold(prefs->GetInt("hue_low", 100), prefs->GetInt("hue_high", 140),
                                    prefs->GetInt("sat_low", 90), prefs->GetInt("sat_high", 255),
                                    prefs->GetInt("lum_low", 20), prefs->GetInt("lum_high", 255));

    // Process the captured image
    timer->Reset();
    ImageProcessor * ip = new ImageProcessor();
    ip->SetThreshold(threshold);
    ip->Process(image);
    printf("[CAMERA] Image processed in %.1f ms\n", timer->Get() * 1000);

    // Write the processed images to flash
    if (prefs->GetInt("image_retain", 1) == 1) {
        timer->Reset();
        ip->WriteImages("/cameratest");
        printf("[CAMERA] Processed images written to /cameratest.*.bmp in %.1f ms\n", timer->Get() * 1000);
    }

    // Generate a target report
    timer->Reset();
    TargetReport * tr = new TargetReport(ip->GetThresholdImage(), ip->GetFilteredImage());
    tr->Generate();
    printf("[CAMERA] Target report generated in %.1f ms\n", timer->Get() * 1000);

    tr->OutputScores();

    finished = true;

    delete tr;
    delete ip;
}
Esempio n. 17
0
void SeamCarvingTransform(char * inputFile,
						  char * outputFile,
						  int	changeX,
						  int changeY) {
	
	Magick::Image myImageHandler(inputFile);
	myImageHandler.write("temp.png");

	if (changeX > 0) {
		ImageProcessor myImageProcessor;
		myImageProcessor.addCols( changeX );
	} else if (changeX < 0) {
		ImageProcessor myImageProcessor;
		myImageProcessor.removeCols( changeX*-1 );
	}

	if (changeY != 0) {
		Magick::Image img("temp.png");
		img.rotate(90);
		img.write("temp.png");
	}
	
	if (changeY > 0) {
		ImageProcessor myImageProcessor;
		myImageProcessor.addCols( changeY );
	} else if (changeY < 0) {
		ImageProcessor myImageProcessor;
		myImageProcessor.removeCols( changeY*-1 );
	}
	
	if (changeY != 0) {
		Magick::Image img("temp.png");
		img.rotate(-90);
		img.write("temp.png");
	}

	Magick::Image outputImageHandler("temp.png");
	outputImageHandler.write(outputFile);
	remove("temp.png");
	
}
Esempio n. 18
0
/*
 * tests the thinning of straight lines
 */
 void testLineThinning(){
 	cv::Mat img = cv::imread("images/horizontal-lines.jpg", CV_LOAD_IMAGE_COLOR);
 	assert(!img.empty());

 	cv::imshow("original image.", img);

 	//convert image to grayscale
	if(img.channels() > 1){
		cv::cvtColor(img, img, CV_RGB2GRAY);
	}

 	//convert image to a binary image
	cv::threshold(img, img, 10, 255, CV_THRESH_BINARY_INV);

 	imageProcessor.thinning(img);

 	cv::imshow("image after thinning.", img);

 	cv::waitKey(0);

	printf("thinning() test passed.\n");
 }
Esempio n. 19
0
/*
 * tests removeRedundantContours() function
 */
void testRemoveRedundantContours(){
	//test removing redundant contours with an image that should not have any contours after processing
	cv::Mat img = cv::imread("images/horizontal-lines.jpg", CV_LOAD_IMAGE_COLOR);
 	assert(!img.empty());

 	//process for lines and contours
	cv::vector<cv::Vec4i> lines = imageProcessor.lineDetection(img);
	assert(!lines.empty());
 	cv::vector< cv::vector<cv::Point> > contours;
	cv::vector<cv::Vec4i> hierarchy;
	imageProcessor.contourDetection(img, contours, hierarchy);

	cv::vector< cv::vector<cv::Point> > valid_contours = imageProcessor.removeRedundantContours(contours, lines);

	assert((int)valid_contours.size() == 0);

	//empty vectors for 2nd test
	lines.clear();
	contours.clear();
	hierarchy.clear();
	valid_contours.clear();

	//test removing redundant contours with an image that will have valid contours after processing the image
	img = cv::imread("images/two2.png", CV_LOAD_IMAGE_COLOR);
 	assert(!img.empty());

 	//process for lines and contours
	lines = imageProcessor.lineDetection(img);
	assert(!lines.empty());
	imageProcessor.contourDetection(img, contours, hierarchy);

	valid_contours = imageProcessor.removeRedundantContours(contours, lines);

	assert((int)valid_contours.size() == 48);

	printf("removeRedundantContours() test passed.\n");
}
Esempio n. 20
0
// Rotation varies from -30 to 30
// Max slider value is 60
// Smaller range to ensure 0 can be found again.
void rotationCallback(int val, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->rotation = (val - 3000) / -100;
    ip->processImage(&ip->hsv, true);
}
Esempio n. 21
0
// Sharpness varies from -1 to 2
// max slider value is 300
void sharpnessCallback(int val, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->sharpness = ((float)val / 100) - 1;
    ip->processImage(&ip->hsv, true);
}
Esempio n. 22
0
// Contrast varies from 0.5 to 2
// Max slider value is 200
void contrastCallback(int val, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->contrast = ((float)val / 100) + 0.5;
    ip->contrast = ip->contrast;
    ip->processImage(&ip->hsv, true);
}
Esempio n. 23
0
// Brightness varies from -100 to 100
// Max slider value is 200
void brightnessCallback(int val, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->brightness = val - 100;
    ip->processImage(&ip->hsv, true);
}
Esempio n. 24
0
// adjustColourfullness is on it's own branch for imagae processing
// send value to adjustColourfullness before calling processImage
void colourfullnessCallback(int val, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->adjustColourfullness(&val);
    ip->processImage(&ip->hsv, false);
}
Esempio n. 25
0
// If the slider is equal to 1 the histEq perfomed.
void histEqCallback(int pos, void *ipPtr){
    ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
    ip->eq = pos == 1;
    ip->processImage(&ip->hsv, true);
}
CvRect HoGProcessor::detectObject(CvSVM *svmModel, IplImage *input, IplImage *result, CvRect rectHead, int normalization){
	int StepWidth = 10;
	int StepHeight = 10;
	
	int nWindow = 3;
	
	int scaleWidth = 2;
	int scaleHeight = 2;

	ImageProcessor ip;

	//loai bo truong hop toc dai
	if(rectHead.height > rectHead.width)
		rectHead.height = rectHead.width;		

	CvRect rectHuman = cvRect(rectHead.x + rectHead.width/2 - rectHead.width*scaleWidth/2, 
		rectHead.y - 6, 
		rectHead.width*scaleWidth, 
		rectHead.height*scaleHeight);

	vector<CvRect> lstRect;
	CvMat* img_feature_vector;
	IplImage **newIntegrals;
	for(int i = 0; i < nWindow; i++)
	{
		for(int j = 0; j < nWindow; j++)
		{
			CvRect rect;
			rect.width = rectHuman.width + StepWidth*i;
			rect.height = rectHuman.height + StepHeight*j;
			rect.x = rectHuman.x - StepWidth*i/2;
			rect.y = rectHuman.y - StepHeight*j/2;
						
			if(rect.x < 0) rect.x = 0;
			if(rect.y < 0) rect.y = 0;
			if(rect.x + rect.width > input->width) rect.width = input->width - rect.x;
			if(rect.y + rect.height > input->height) rect.height = input->height - rect.y;		
			
			IplImage* candidate_img = ip.getSubImageAndResize(input, rect, 48, 48);
			if(candidate_img)
			{
				newIntegrals = calculateIntegralHOG(candidate_img);
				img_feature_vector = calculateHOG_window(newIntegrals,cvRect(0,0,48,48),4);
				
				for (int k = 0; k < 9; k++)
				{
					cvReleaseImage(&newIntegrals[k]);				
				}
				cvReleaseImage(newIntegrals);
				cvReleaseImage(&candidate_img);

				double predict_rs = svmModel->predict(img_feature_vector, true);				
				if(predict_rs >= -1)				
					lstRect.push_back(rect);		
								
				cvReleaseMat(&img_feature_vector);	
			}				
		}
	}	

	if(lstRect.size() > 0)
	{		
		return MergeRect(lstRect);				
	}	
	return cvRect(0,0,-1,-1);	
}
int main( int argc, char** argv )
{
	if(argc == 3)
	{
		ImageProcessor* proc = new ImageProcessor(argv[2]);

		VideoCapture cap;

		if(!cap.open(argv[1]))
		{
			cout << ERROR_STR << "Failed to open " << argv[1] << endl;
			return -1;
		}

		namedWindow("Video output", 1);
		namedWindow("Input", 1);

		namedWindow("Settings", 1);
		createTrackbar("hi", "Settings", &hi, 255);
	    createTrackbar("lo", "Settings", &lo, 255);

	    //createTrackbar("min_hue", "Settings", &minhue, 180);
	    //createTrackbar("max_hue", "Settings", &maxhue, 180);
	    createTrackbar("sat", "Settings", &minsat, 255);
	    createTrackbar("val", "Settings", &minval, 255);

	    namedWindow("Found circles", 1);

		for(;;)
		{
			Mat curFrame;

			cap >> curFrame;

			if (curFrame.empty())
			{
				cout << STATUS_STR << "Frame is empty" << endl;
				break;
			}
			else
			{
				//resize(curFrame, curFrame, Size(), 0.4, 0.4, INTER_AREA);

				Mat fr = proc->process_image(curFrame);

				imshow("Video output", fr);
				imshow("Input", curFrame);

				// cout << "hi = " << hi << " lo = " << lo << endl;

				if(fr.empty())
				{
					cout << STATUS_STR << "Processed frame is empty" << endl;
					break;
				}
			}

			cvWaitKey(5);

			if(waitKey(30) >= 0)
				break;
		}
	}