Beispiel #1
0
void HazePerfection::morphfill()
{
	readHotImage();
	createMask();
	invertImage(hotDataset, maskDataset, inverta);
	createMark(markB);
	morphologicalReconstruction(markDataset, maskDataset);
	invertImage(markDataset, maskDataset, inverta);
}
Beispiel #2
0
template<> uImage LodepngDecodePolicy::Decode(vector<char> const&data, uint channels, bool flip)
{
  LodePNGColorType colortype = LCT_RGB;
  switch(channels)
  {
    case 1: colortype = LCT_GREY; break;
    case 3: colortype = LCT_RGB;  break;
    case 4: colortype = LCT_RGBA; break;
    default: CASSERT(0, "Invalid channels for image loading");
  }

  uint w, h;
  vector<ubyte> img;
  if(Val error = lodepng::decode(img, w, h, reinterpret_cast<ubyte const*>(data.data()), data.size(), colortype, 8))
    CERROR("Lodepng error: "<<lodepng_error_text(error));

  return {  w, h, channels, flip ? invertImage(move(img), h, w * channels) : move(img) };
  }
Beispiel #3
0
ScreenWidget::ScreenWidget(QWidget *parent, QColor initialColor,
                           const QString &name, Separation mask,
                           const QSize &labelSize)
    : QFrame(parent)
{
    paintColor = initialColor;
    maskColor = mask;
    inverted = false;

    imageLabel = new QLabel;
    imageLabel->setFrameShadow(QFrame::Sunken);
    imageLabel->setFrameShape(QFrame::StyledPanel);
    imageLabel->setMinimumSize(labelSize);

    nameLabel = new QLabel(name);
    colorButton = new QPushButton(tr("Modify..."));
    colorButton->setBackgroundRole(QPalette::Button);
    colorButton->setMinimumSize(32, 32);

    QPalette palette(colorButton->palette());
    palette.setColor(QPalette::Button, initialColor);
    colorButton->setPalette(palette);

    invertButton = new QPushButton(tr("Invert"));
    //invertButton->setToggleButton(true);
    //invertButton->setOn(inverted);
    invertButton->setEnabled(false);

    connect(colorButton, SIGNAL(clicked()), this, SLOT(setColor()));
    connect(invertButton, SIGNAL(clicked()), this, SLOT(invertImage()));

    QGridLayout *gridLayout = new QGridLayout;
    gridLayout->addWidget(imageLabel, 0, 0, 1, 2);
    gridLayout->addWidget(nameLabel, 1, 0);
    gridLayout->addWidget(colorButton, 1, 1);
    gridLayout->addWidget(invertButton, 2, 1, 1, 1);
    setLayout(gridLayout);
}
Beispiel #4
0
//--------------------------------------------------------------
void testApp::update(){
#ifndef __APPLE__
#ifndef __NO_KINECT__
        kinect.update();
        if(kinect.isFrameNew()) {
                grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);
                colorImage.setFromPixels(kinect.getPixels(), kinect.width, kinect.height);
        }
		if(frameMerge) {
			frameMergeFilter();
			//ofLogNotice() << "Merging Frame";
		}
		if(filterOn) {
			grayImage.setFromPixels(filter->getFrame(grayImage), kinect.width, kinect.height);
		}
		if(sharpenOn)
			sharpenImage();
		if(invertOn)
			invertImage();
		if(contourOn)
			findContour();
#endif
#endif
}
Beispiel #5
0
int main(int argc,char *argv[])
{
  //Handles user input
  if(argc<4 || argc>5)
  {
    printf("Incorrect number of arguments\n");
    printf("Number of arguments: %d\n",argc);
    exit(1);
  }

  //const char *inputFilename=argv[1];
  const char *inputFilename=argv[1];
  printf("Inputfile: %s\n",inputFilename);
  const char *outputFilename=argv[2];
  char garbage[2];
  int command;
  double sigma=3;

  if(1!=sscanf(argv[3],"%d%1s",&command,garbage) || command<0 || command>11)
  {
    printf("Incorrect command\n");
    exit(1);
  }

  if(command>0 && command<11 && argc==5)
  {
    printf("Incorrect number of arguments, exclude the sigma value");
    exit(1);
  }

  if(((command==0 || command==11) && argc==5 && 1!=sscanf(argv[4],"%lf%1s",&sigma,garbage)) || sigma<0)
  {
    printf("Incorrect sigma value\n");
    exit(1);
  }

  Filter *filters=initializeFilters(sigma);
  Image *inputImage=decode(inputFilename);
  printf("Width: %d, height: %d\n",inputImage->width,inputImage->height);
  Image *outputImage=generateOutput(inputImage);

  uint8_t *inRed=inputImage->redChannel;
  uint8_t *inBlue=inputImage->blueChannel;
  uint8_t *inGreen=inputImage->greenChannel;
  uint8_t *inAlpha=inputImage->alphaChannel;
  uint8_t *outRed=outputImage->redChannel;
  uint8_t *outBlue=outputImage->blueChannel;
  uint8_t *outGreen=outputImage->greenChannel;
  uint8_t *outAlpha=outputImage->alphaChannel;
  int height=inputImage->height;
  int width=inputImage->width;
  switch(command)
  {
    case(0):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[0].filter,filters[0].radius,width,height);
      break;
    }
    case(1):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[1].filter,filters[1].radius,width,height);
      break;
    }
    case(2):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[2].filter,filters[2].radius,width,height);
      break;
    }
    case(3):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[3].filter,filters[3].radius,width,height);
      break;
    }
    case(4):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[4].filter,filters[4].radius,width,height);
      break;
    }
    case(5):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[5].filter,filters[5].radius,width,height);
      break;
    }
    case(6):
    {
      convolveImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,filters[6].filter,filters[6].radius,width,height);
      break;
    }
    case(7):
    {
      convertToGray(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                    outAlpha,gMonoMult,width,height);
      break;
     }
    case(8):
    {
      invertImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                  outAlpha,width,height);
      break;
    }
    case(9):
    {
      flipImage(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
                outAlpha,width,height);
      break;
    }
    case(10):
    {
      pixelate(inRed,inBlue,inGreen,inAlpha,outRed,outBlue,outGreen,
               outAlpha,8,8,width,height);
      break;
    }
    case(11):
    {
      Image *invImage=generateOutput(inputImage);
      Image *blurImage=generateOutput(inputImage);
      pencilSketch(inRed,inBlue,inGreen,inAlpha,invImage->redChannel,
                   invImage->blueChannel,invImage->greenChannel,
                   invImage->alphaChannel,blurImage->redChannel,
                   blurImage->blueChannel,blurImage->greenChannel,
                   blurImage->alphaChannel,outRed,outBlue,outGreen,
                   outAlpha,filters[0].filter,filters[0].radius,width,height,
                   gMonoMult);
      //NOTE THAT I NEED TO FREE EACH OF THE CHANNEL INDIVIDUALLY
      //MAKE A FREE IMAGE FUNCTION
      freeImage(invImage);
      freeImage(blurImage);
      break;
    }
    default:
      exit(1);
  }

  if(command!=12)
    encode(outputFilename,outputImage);

  free((double*)filters[0].filter);
  free(filters);
  freeImage(inputImage);
  freeImage(outputImage);
  return 0;
}
Beispiel #6
0
// main
// 
// This program accepts three arguments: a processing option
// ("blur", "invert", or "ascii"), a PGM file name for input,
// and a text file name for output.  It reads the PGM file and
// creates an output file with either an appropriate PGM (if 
// one of the first two options are given) or a text file (if
// the last option is given).
//
// Right now, only the "ascii" option works.
//
int main(int argc, char **argv) {

  // input and output file "handles"
  FILE *inf, *outf;

  if (argc < 4) {
    
    // whoops! not enough arguments
    fprintf(stderr,"Error: not enough arguments!\n");
    usage(argv[0]);
    return -1;

  } else {

    // open the input (PGM) file
    inf = fopen(argv[2],"r");
    if (inf == NULL) {
      fprintf(stderr,"Error: can't open file '%s' for reading.\n",argv[2]);
      return -1;
    }

    // open the output file
    outf = fopen(argv[3],"w");
    if (outf == NULL) {
      fprintf(stderr,"Error: can't open file '%s' for writing.\n",argv[3]);
      return -1;
    }

    if (strcmp(argv[1],"--blur") == 0) {

      blurImage(inf,outf);

    } else if (strcmp(argv[1],"--invert") == 0) {

      invertImage(inf,outf);
      //
      // write the code that inverts the image
      //  

    } else if (strcmp(argv[1],"--ascii") == 0) {

      echoASCII(inf,outf);
      //
      // change this so that it is given an image array and
      // the outf
      //

    } else {

      fprintf(stderr,"Error: unrecognized option '%s.'\n",argv[1]);
      usage(argv[0]);
      
      // return FAIL
      return -1;
    }

    // close the files
    fclose(inf);
    fclose(outf);

    // return OK
    return 0;
  }
}
Beispiel #7
0
/*
 * Menüpunkte erstellen
 */
void ProtoWindow::createActions()
{
	openAct = new QAction(trUtf8("Ö&ffnen..."), this);
	openAct->setShortcut(trUtf8("Ctrl+O"));
	connect(openAct, SIGNAL(triggered()), this, SLOT(open()));

	// TODO bei Speichern über STRG-S vermutlich nicht mehr nachfragen
	saveAct = new QAction(trUtf8("&Speichern"), this);
	saveAct->setShortcut(trUtf8("Ctrl+S"));
	saveAct->setEnabled(false);
	connect(saveAct, SIGNAL(triggered()), this, SLOT(save()));

	saveAsAct = new QAction(trUtf8("Speichern unter..."), this);
	saveAsAct->setEnabled(false);
	connect(saveAsAct, SIGNAL(triggered()), this, SLOT(saveAs()));

	//printAct = new QAction(trUtf8("&Drucken..."), this);
	//printAct->setShortcut(trUtf8("Ctrl+P"));
	//printAct->setEnabled(false);
	//connect(printAct, SIGNAL(triggered()), this, SLOT(print()));

	exitAct = new QAction(trUtf8("&Beenden"), this);
	exitAct->setShortcut(trUtf8("Ctrl+Q"));
	connect(exitAct, SIGNAL(triggered()), this, SLOT(close()));

	zoomInAct = new QAction(trUtf8("ver&größern (25%)"), this);
	zoomInAct->setShortcut(trUtf8("Ctrl++"));
	zoomInAct->setEnabled(false);
	connect(zoomInAct, SIGNAL(triggered()), this, SLOT(zoomIn()));

	zoomOutAct = new QAction(trUtf8("ver&kleinern (25%)"), this);
	zoomOutAct->setShortcut(trUtf8("Ctrl+-"));
	zoomOutAct->setEnabled(false);
	connect(zoomOutAct, SIGNAL(triggered()), this, SLOT(zoomOut()));

	normalSizeAct = new QAction(trUtf8("&Normale Größe"), this);
	normalSizeAct->setShortcut(trUtf8("Ctrl+N"));
	normalSizeAct->setEnabled(false);
	connect(normalSizeAct, SIGNAL(triggered()), this, SLOT(normalSize()));

	autoNormalSizeAct = new QAction(trUtf8("immer auf normale Größe einpassen"), this);
	autoNormalSizeAct->setCheckable(true);
	autoNormalSizeAct->setChecked(true);
	connect(autoNormalSizeAct, SIGNAL(triggered()), this, SLOT(autoNormalSize()));

	actualSizeAct = new QAction(trUtf8("&Tatsächliche Größe"), this);
	actualSizeAct->setShortcut(trUtf8("Ctrl+T"));
	actualSizeAct->setEnabled(false);
	connect(actualSizeAct, SIGNAL(triggered()), this, SLOT(actualSize()));

	invertImageAct = new QAction(trUtf8("Farben &invertieren"), this);
	invertImageAct->setEnabled(false);
	invertImageAct->setShortcut(trUtf8("I"));
	connect(invertImageAct, SIGNAL(triggered()), this, SLOT(invertImage()));

	rotateLeftAct = new QAction(trUtf8("90°-Drehung links"), this);
	rotateLeftAct->setEnabled(false);
	rotateLeftAct->setShortcut(trUtf8("Ctrl+Left"));
	connect(rotateLeftAct, SIGNAL(triggered()), this, SLOT(rotateLeft()));

	rotateRightAct = new QAction(trUtf8("90°-Drehung rechts"), this);
	rotateRightAct->setEnabled(false);
	rotateRightAct->setShortcut(trUtf8("Ctrl+Right"));
	connect(rotateRightAct, SIGNAL(triggered()), this, SLOT(rotateRight()));

	aboutAct = new QAction(trUtf8("&Über..."), this);
	connect(aboutAct, SIGNAL(triggered()), this, SLOT(about()));

	aboutQtAct = new QAction(trUtf8("Über &Qt"), this);
	connect(aboutQtAct, SIGNAL(triggered()), qApp, SLOT(aboutQt()));
}
Beispiel #8
0
void VideoDemos( VideoCapture& surveillance_video, int starting_frame, bool clean_binary_images )
{
	Mat previous_gray_frame, optical_flow, optical_flow_display;
	Mat current_frame, thresholded_image, closed_image, first_frame;
	Mat current_frame_gray, running_average_background;
	Mat temp_running_average_background, running_average_difference;
	Mat running_average_foreground_mask, running_average_foreground_image;
	Mat selective_running_average_background;
	Mat temp_selective_running_average_background, selective_running_average_difference;
	Mat selective_running_average_foreground_mask, selective_running_average_background_mask, selective_running_average_foreground_image;
	double running_average_learning_rate = 0.01;
	surveillance_video.set(CV_CAP_PROP_POS_FRAMES,starting_frame);
	surveillance_video >> current_frame;
	first_frame = current_frame.clone();
	cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);
	current_frame.convertTo(running_average_background, CV_32F);
	selective_running_average_background = running_average_background.clone();
	int rad = running_average_background.depth();
	MedianBackground median_background( current_frame, (float) 1.005, 1 );
	Mat median_background_image, median_foreground_image;

	int codec = static_cast<int>(surveillance_video.get(CV_CAP_PROP_FOURCC));
	// V3.0.0 update on next line.  OLD CODE was    BackgroundSubtractorMOG2 gmm; //(50,16,true);
    Ptr<BackgroundSubtractorMOG2> gmm = createBackgroundSubtractorMOG2();
	Mat foreground_mask, foreground_image = Mat::zeros(current_frame.size(), CV_8UC3);

	double frame_rate = surveillance_video.get(CV_CAP_PROP_FPS);
	double time_between_frames = 1000.0/frame_rate;
	Timestamper* timer = new Timestamper();
	int frame_count = 0;
	while ((!current_frame.empty()) && (frame_count++ < 1000))//1800))
    {
 		double duration = static_cast<double>(getTickCount());
		vector<Mat> input_planes(3);
		split(current_frame,input_planes);
		cvtColor(current_frame, current_frame_gray, CV_BGR2GRAY);

		if (frame_count%2 == 0)  // Skip every second frame so the flow is greater.
		{
			if ( previous_gray_frame.data )
			{
				Mat lucas_kanade_flow;
				timer->ignoreTimeSinceLastRecorded();
				LucasKanadeOpticalFlow(previous_gray_frame, current_frame_gray, lucas_kanade_flow);
				timer->recordTime("Lucas Kanade Optical Flow");
				calcOpticalFlowFarneback(previous_gray_frame, current_frame_gray, optical_flow, 0.5, 3, 15, 3, 5, 1.2, 0);
				cvtColor(previous_gray_frame, optical_flow_display, CV_GRAY2BGR);
				drawOpticalFlow(optical_flow, optical_flow_display, 8, Scalar(0, 255, 0), Scalar(0, 0, 255));
				timer->recordTime("Farneback Optical Flow");
				char frame_str[100];
				sprintf( frame_str, "Frame = %d", frame_count);
 				Mat temp_output = JoinImagesHorizontally( current_frame, frame_str, optical_flow_display, "Farneback Optical Flow", 4 );
				Mat optical_flow_output = JoinImagesHorizontally( temp_output, "", lucas_kanade_flow, "Lucas Kanade Optical Flow", 4 );
				imshow("Optical Flow", optical_flow_output );
			}
			std::swap(previous_gray_frame, current_frame_gray);
		}
	
		// Static background image
		Mat difference_frame, binary_difference;
		Mat structuring_element(3,3,CV_8U,Scalar(1));
		timer->ignoreTimeSinceLastRecorded();
		absdiff(current_frame,first_frame,difference_frame);
		cvtColor(difference_frame, thresholded_image, CV_BGR2GRAY);
		threshold(thresholded_image,thresholded_image,30,255,THRESH_BINARY);
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,binary_difference,MORPH_OPEN,structuring_element);
			current_frame.copyTo(binary_difference, thresholded_image);
		}
		else
		{
			binary_difference.setTo(Scalar(0,0,0));
		    current_frame.copyTo(binary_difference, thresholded_image);
		}
		timer->recordTime("Static difference");

		// Running Average (three channel version)
		vector<Mat> running_average_planes(3);
		split(running_average_background,running_average_planes);
		accumulateWeighted(input_planes[0], running_average_planes[0], running_average_learning_rate);
		accumulateWeighted(input_planes[1], running_average_planes[1], running_average_learning_rate);
		accumulateWeighted(input_planes[2], running_average_planes[2], running_average_learning_rate);
		merge(running_average_planes,running_average_background);
		running_average_background.convertTo(temp_running_average_background,CV_8U);
		absdiff(temp_running_average_background,current_frame,running_average_difference);
		split(running_average_difference,running_average_planes);
		// Determine foreground points as any point with a difference of more than 30 on any one channel:
		threshold(running_average_difference,running_average_foreground_mask,30,255,THRESH_BINARY);
		split(running_average_foreground_mask,running_average_planes);
		bitwise_or( running_average_planes[0], running_average_planes[1], running_average_foreground_mask );
		bitwise_or( running_average_planes[2], running_average_foreground_mask, running_average_foreground_mask );
		if (clean_binary_images)
		{
			morphologyEx(running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
		running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(running_average_foreground_image, running_average_foreground_mask);
		timer->recordTime("Running Average");

		// Running Average with selective update
		vector<Mat> selective_running_average_planes(3);
		// Find Foreground mask
		selective_running_average_background.convertTo(temp_selective_running_average_background,CV_8U);
		absdiff(temp_selective_running_average_background,current_frame,selective_running_average_difference);
		split(selective_running_average_difference,selective_running_average_planes);
		// Determine foreground points as any point with an average difference of more than 30 over all channels:
		Mat temp_sum = (selective_running_average_planes[0]/3 + selective_running_average_planes[1]/3 + selective_running_average_planes[2]/3);
		threshold(temp_sum,selective_running_average_foreground_mask,30,255,THRESH_BINARY_INV);
		// Update background
		split(selective_running_average_background,selective_running_average_planes);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate,selective_running_average_foreground_mask);
    	invertImage(selective_running_average_foreground_mask,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[0], selective_running_average_planes[0], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[1], selective_running_average_planes[1], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		accumulateWeighted(input_planes[2], selective_running_average_planes[2], running_average_learning_rate/3.0,selective_running_average_foreground_mask);
		merge(selective_running_average_planes,selective_running_average_background);
		if (clean_binary_images)
		{
			morphologyEx(selective_running_average_foreground_mask,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,selective_running_average_foreground_mask,MORPH_OPEN,structuring_element);
		}
 		selective_running_average_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(selective_running_average_foreground_image, selective_running_average_foreground_mask);
		timer->recordTime("Selective Running Average");

		// Median background
		timer->ignoreTimeSinceLastRecorded();
		median_background.UpdateBackground( current_frame );
		timer->recordTime("Median");
		median_background_image = median_background.GetBackgroundImage();
		Mat median_difference;
		absdiff(median_background_image,current_frame,median_difference);
		cvtColor(median_difference, median_difference, CV_BGR2GRAY);
		threshold(median_difference,median_difference,30,255,THRESH_BINARY);
		median_foreground_image.setTo(Scalar(0,0,0));
	    current_frame.copyTo(median_foreground_image, median_difference);

		// Update the Gaussian Mixture Model
 		// V3.0.0 update on next line.  OLD CODE was  gmm(current_frame, foreground_mask);
        gmm->apply(current_frame, foreground_mask);
		// Clean the resultant binary (moving pixel) mask using an opening.
		threshold(foreground_mask,thresholded_image,150,255,THRESH_BINARY);
		Mat moving_incl_shadows, shadow_points;
		threshold(foreground_mask,moving_incl_shadows,50,255,THRESH_BINARY);
		absdiff( thresholded_image, moving_incl_shadows, shadow_points );
		Mat cleaned_foreground_mask;
		if (clean_binary_images)
		{
			morphologyEx(thresholded_image,closed_image,MORPH_CLOSE,structuring_element);
			morphologyEx(closed_image,cleaned_foreground_mask,MORPH_OPEN,structuring_element);
		}
		else cleaned_foreground_mask = thresholded_image.clone();
 		foreground_image.setTo(Scalar(0,0,0));
        current_frame.copyTo(foreground_image, cleaned_foreground_mask);
		timer->recordTime("Gaussian Mixture Model");
		// Create an average background image (just for information)
        Mat mean_background_image;
		timer->ignoreTimeSinceLastRecorded();
		// V3.0.0 update on next line.  OLD CODE was   gmm.getBackgroundImage(mean_background_image);
        gmm->getBackgroundImage(mean_background_image);

		duration = static_cast<double>(getTickCount())-duration;
		duration /= getTickFrequency()/1000.0;
		int delay = (time_between_frames>duration) ? ((int) (time_between_frames-duration)) : 1;
		char c = cvWaitKey(delay);
		
		char frame_str[100];
		sprintf( frame_str, "Frame = %d", frame_count);
		Mat temp_static_output = JoinImagesHorizontally( current_frame, frame_str, first_frame, "Static Background", 4 );
		Mat static_output = JoinImagesHorizontally( temp_static_output, "", binary_difference, "Foreground", 4 );
        imshow("Static Background Model", static_output );
 		Mat temp_running_output = JoinImagesHorizontally( current_frame, frame_str, temp_running_average_background, "Running Average Background", 4 );
		Mat running_output = JoinImagesHorizontally( temp_running_output, "", running_average_foreground_image, "Foreground", 4 );
		imshow("Running Average Background Model", running_output );
 		Mat temp_selective_output = JoinImagesHorizontally( current_frame, frame_str, temp_selective_running_average_background, "Selective Running Average Background", 4 );
		Mat selective_output = JoinImagesHorizontally( temp_selective_output, "", selective_running_average_foreground_image, "Foreground", 4 );
        imshow("Selective Running Average Background Model", selective_output );
 		Mat temp_median_output = JoinImagesHorizontally( current_frame, frame_str, median_background_image, "Median Background", 4 );
		Mat median_output = JoinImagesHorizontally( temp_median_output, "", median_foreground_image, "Foreground", 4 );
        imshow("Median Background Model", median_output );
  		Mat temp_gaussian_output = JoinImagesHorizontally( current_frame, frame_str, mean_background_image, "GMM Background", 4 );
		Mat gaussian_output = JoinImagesHorizontally( temp_gaussian_output, "", foreground_image, "Foreground", 4 );
        imshow("Gaussian Mixture Model", gaussian_output );
		timer->putTimes( current_frame );
		imshow( "Computation Times", current_frame );
	 	surveillance_video >> current_frame;
	}
	cvDestroyAllWindows();
}