Exemplo n.º 1
0
GameScreen::GameScreen(void)
{

	IplImage* tempOrange = cvLoadImage("monster.png");
	IplImage* tempPurple = cvLoadImage("monster purple.jpg");
	IplImage* tempGreen = cvLoadImage("monster green.jpg");
	IplImage* tempWand = cvLoadImage("wand.jpg");
	orangeMonster = cvCreateImage(cvSize(tempOrange->width, tempOrange->height), 8, 3);
	purpleMonster = cvCreateImage(cvSize(tempPurple->width, tempPurple->height), 8, 3);
	greenMonster = cvCreateImage(cvSize(tempGreen->width, tempGreen->height), 8, 3);
	wand = cvCreateImage(cvSize(tempWand->width, tempWand->height), 8, 3);
	cvConvertImage(tempOrange, orangeMonster);
	cvConvertImage(tempPurple, purpleMonster);
	cvConvertImage(tempGreen, greenMonster);
	cvConvertImage(tempWand, wand);

	cvReleaseImage(&tempOrange);
	cvReleaseImage(&tempPurple);
	cvReleaseImage(&tempGreen);
	cvReleaseImage(&tempWand);

	CreateScene();


}
Exemplo n.º 2
0
/* Process a frame
 */
void Frame::processFrame( long int run_id )
{

	for (int i=0;i<_nImages;i++) {
		cvConvertImage(_grab_img[i],_tmp_img[i],0);
		cvCopyImage( _rotated_img[i], _copy_img[i] );
		rotateCW(_tmp_img[i],_rotated_img[i]);

		// resize and convert from 4-channel to 3-channel
		// (Ladybug outputs 4-channel images by default)
		cvResize(_rotated_img[i],_display_img[i]);
		cvConvertImage(_tmp_img[i],_grayscale_img[i],CV_RGB2GRAY);

		// save the image for debug
		if ( run_id > 0 ) {
			LOG(LEVEL_INFO, "saving image in %d_%d.bmp", run_id, i );
			char filename[256];
			sprintf( filename, "synthetic/%d_%d.bmp", run_id, i );
			cvSaveImage( filename, _tmp_img[i] );
		}
	}

	flipHorizontal(_display_img[5],temp_img); // flip image horizontally
	cvConvertImage(temp_img,_flipped_img,1); // flip image vertically
}
Exemplo n.º 3
0
void Capture::update()
{
    cvGrabFrame(m_capture);
    IplImage *frame = cvRetrieveFrame(m_capture); 

    TmpBufferImage btmp(CV_8UC3);
    
    if ( frame != NULL )
    {
        CvMat *mat = cvCreateMat(frame->height, frame->width, CV_8UC3);
        if ( m_invert )
        {
            CvMat *tmp = btmp.getImage(frame->width, frame->height);        
            
            cvConvertImage(frame, tmp, CV_CVTIMG_SWAP_RB);
            InvertImage(tmp, mat);            
        }
        else
        {
            
            cvConvertImage(frame, mat, CV_CVTIMG_SWAP_RB);
        }
        
        m_wpipe.write(mat);                                          
    }
}
Exemplo n.º 4
0
//--------------------------------------------------------------
void testApp::keyPressed(int key){
	if(key=='s')
	{
		cvConvertImage(disp,disp, CV_CVTIMG_SWAP_RB);
		cvSaveImage("data/save.bmp", disp);
		cvConvertImage(disp,disp, CV_CVTIMG_SWAP_RB);
	}
}
Exemplo n.º 5
0
IplImage *query_frame(CvCapture *video, IplImage **frame_buffer, IplImage *current_frame) {
  IplImage *frame = cvQueryFrame(video);

  if (frame) {
    cvConvertImage(frame, current_frame, 0);

    IplImage *temp = frame_buffer[0];
    frame_buffer[0] = frame_buffer[1];
    frame_buffer[1] = temp;
    cvConvertImage(frame, frame_buffer[0], 0);
  }

  return frame;
}
Exemplo n.º 6
0
IplImage *AVILibrary::aviGrabNextFrame(string fileName)
{
    printf("Grabbing next frame from %s\n", fileName.c_str());

    if (captureAVIInitialized == false) {
        aviInitialize(fileName);
        frameNumber=0;
        captureAVIInitialized = true;
    }

    if (frameNumber < captureAVIFrames) {
        cvSetCaptureProperty(captureAVI, CV_CAP_PROP_POS_FRAMES, frameNumber);

        IplImage *temp = cvQueryFrame(captureAVI);

        // this comes in as BGR, we have to flip it to RGB
        IplImage *ret = cvCreateImage(cvSize(temp->width, temp->height), IPL_DEPTH_8U, 3);

        cvConvertImage(temp, ret, CV_CVTIMG_SWAP_RB);

        frameNumber++;

        return ret;

    } else {

        return NULL;

    }

} // end aviGrabFrame
Exemplo n.º 7
0
bool FindTagNumber::Execute(IplImage *image){
	//	convert and get only red pixel
	IplImage*	redPixel = cvCreateImage(cvSize(image->width,image->height),8,1);
	IplImage*   hsvImage = cvCreateImage(cvSize(image->width,image->height),8,3);
	cvConvertImage(image,hsvImage,CV_BGR2HSV);


	for(int y=0;y<image->height;y++)
		for(int x=0;x<image->width;x++)		
		{
			// H
			if( ( ((uchar*)(hsvImage->imageData + hsvImage->widthStep*y))[x*3] < hlower 
				|| ((uchar*)(hsvImage->imageData + hsvImage->widthStep*y))[x*3] >= hupper )
				&& ((uchar*)(hsvImage->imageData + hsvImage->widthStep*y))[x*3+1] > sthreshold 
				&& ((uchar*)(hsvImage->imageData + hsvImage->widthStep*y))[x*3+2] > 70 )// S 
				((uchar*)(redPixel->imageData + redPixel->widthStep*y))[x] = 0;			
			else
				((uchar*)(redPixel->imageData + redPixel->widthStep*y))[x] = 255;	
		}

	cvShowImage("red only",redPixel);

	cvReleaseImage(&redPixel);
	cvReleaseImage(&hsvImage);
	
	return true;
}
Exemplo n.º 8
0
void CvvImage::CopyOf( IplImage* img, int desired_color )
{
	try
	{
		if (HG_IS_IMAGE(img))
		{
			int color = desired_color;
			CvSize size = cvGetSize(img);

			if (color < 0)
				color = img->nChannels > 1;

			if (Create(size.width, size.height,
				(!color ? 1 : img->nChannels > 1 ? img->nChannels : 3) * 8,
				img->origin))
			{
				cvConvertImage(img, m_img, 0);
			}
		}
	}
	catch (cv::Exception &exc)
	{
		char szLog[300] = { 0 };
		sprintf_s(szLog, "CvvImage::CopyOf error. detail: %s\n\n", exc.msg);
		TRACE(szLog);
	}
	catch (...)
	{
		char szLog[300] = { 0 };
		sprintf_s(szLog, "CvvImage::CopyOf error2. Unknown error.\n\n");
		TRACE(szLog);
	}
}
Exemplo n.º 9
0
// TESTING ONLY
bool Classifier::showRect(IplImage *image, CObject *rect, const vector<CvSURFPoint> *pts) {
    //char *WINDOW_NAME = "test";
    CvFont font;

    IplImage *frameCopy = cvCreateImage(cvGetSize(image), IPL_DEPTH_8U, 3);
    cvConvertImage(image, frameCopy);
        
    cvNamedWindow("test", CV_WINDOW_AUTOSIZE);
    cvInitFont(&font, CV_FONT_VECTOR0, 0.75, 0.75, 0.0f, 1, CV_AA);
    
    if (pts != NULL) {
        CvScalar color = CV_RGB(255, 255, 255);
        for (int i = 0; i < (int)pts->size(); ++i) {
            //printf("(%f %f)->(%d %d) ", (*pts)[i].x, (*pts)[i].y, frameCopy->width, frameCopy->height);
            cvRectangle(frameCopy, cvPoint((*pts)[i].pt.x, (*pts)[i].pt.y), 
                cvPoint((*pts)[i].pt.x + 3, (*pts)[i].pt.y + 3), color);
        }
    }
    
    rect->draw(frameCopy, CV_RGB(255, 0, 255), &font);
        
    cvShowImage("test", frameCopy);
    cvReleaseImage(&frameCopy);

    return cvWaitKey(10) != -1;
}
Exemplo n.º 10
0
void MyDisplay::camera_negatif()
{
    capture = cvCreateCameraCapture(CV_CAP_ANY);
    while (1){
        frame = cvQueryFrame(capture);
        img_nvg = cvCreateImage(cvGetSize(frame), frame->depth, 1);

        //conversion en niveau de gris
        cvConvertImage(frame, img_nvg, 0);

        stretch_histogram_NVG(img_nvg);
        negatif(img_nvg);

        //frame = negatif(frame);
        cvShowImage("test", img_nvg);
        int key = cvWaitKey(1);
        if (key == 'q')
        {
            break;
        }
        else {
            //nothing to do
        }
    }
    cvReleaseCapture(&capture);

}
Exemplo n.º 11
0
bool CvVideoWriter_VFW::writeFrame( const IplImage* image )
{
    bool result = false;
    CV_FUNCNAME( "CvVideoWriter_VFW::writeFrame" );

    __BEGIN__;

    if( !image )
        EXIT;

    if( !compressed && !createStreams( cvGetSize(image), image->nChannels > 1 ))
        EXIT;

    if( image->width != tempFrame->width || image->height != tempFrame->height )
        CV_ERROR( CV_StsUnmatchedSizes,
            "image size is different from the currently set frame size" );

    if( image->nChannels != tempFrame->nChannels ||
        image->depth != tempFrame->depth ||
        image->origin == 0 ||
        image->widthStep != cvAlign(image->width*image->nChannels*((image->depth & 255)/8), 4))
    {
        cvConvertImage( image, tempFrame, image->origin == 0 ? CV_CVTIMG_FLIP : 0 );
        image = (const IplImage*)tempFrame;
    }

    result = AVIStreamWrite( compressed, pos++, 1, image->imageData,
                             image->imageSize, AVIIF_KEYFRAME, 0, 0 ) == AVIERR_OK;

    __END__;

    return result;
}
void cvImageWidgetSetImage(CvImageWidget * widget, const CvArr *arr){
	CvMat * mat, stub;
	int origin=0;

	//printf("cvImageWidgetSetImage\n");

	if( CV_IS_IMAGE_HDR( arr ))
		origin = ((IplImage*)arr)->origin;

	mat = cvGetMat(arr, &stub);

	if(widget->original_image && !CV_ARE_SIZES_EQ(mat, widget->original_image)){
		cvReleaseMat( &widget->original_image );
	}
	if(!widget->original_image){
		widget->original_image = cvCreateMat( mat->rows, mat->cols, CV_8UC3 );
		gtk_widget_queue_resize( GTK_WIDGET( widget ) );
	}
	cvConvertImage( mat, widget->original_image,
			                (origin != 0 ? CV_CVTIMG_FLIP : 0) + CV_CVTIMG_SWAP_RB );
	if(widget->scaled_image){
		cvResize( widget->original_image, widget->scaled_image, CV_INTER_AREA );
	}

	// window does not refresh without this
	gtk_widget_queue_draw( GTK_WIDGET(widget) );
}
Exemplo n.º 13
0
void invert_image( IplImage* source, IplImage* result )
{
	// TO DO:  Write code to invert all points in the source image (i.e. for each channel for each pixel in the result
	//        image the value should be 255 less the corresponding value in the source image).

	 int width_step=source->widthStep;
	 int pixel_step=source->widthStep/source->width;
	 int number_channels=source->nChannels;
	 cvZero( result );
	 int row=0,col=0;

	 IplImage* grayscale_image= cvCloneImage(source);
	 cvZero(grayscale_image);
	 cvConvertImage(source,grayscale_image);
	 
	 for (row=0; row < result->height; row++)
	 {
		  for (col=0; col < result->width; col++)
		  {
			   unsigned char* curr_point = GETPIXELPTRMACRO( grayscale_image, col, row, width_step, pixel_step );
			   unsigned char* curr_point_temp = GETPIXELPTRMACRO( result, col, row, width_step, pixel_step );
			   curr_point_temp[0] =255-curr_point[0] ; 		//invert the pixel value
		  }
	 }
}
Exemplo n.º 14
0
bool CaptureManager::SaveMovie(const char* avi)
{
	bool resize = false;
	CvSize newsize = size;
	if ( Preferences::GetSavingSizeOverride() && !cvSizeEquals(Preferences::GetSavingSize(), size) ){
		resize = true;
		newsize = Preferences::GetSavingSize();
	}
	CvVideoWriter* writer = cvCreateVideoWriter(avi,
		Preferences::GetSavingCodec(),
		Preferences::GetSavingFpsOverride() || !fps ? Preferences::GetSavingFpsDefault() : fps,
		newsize, 1);
	IplImage *resized;
	if (resize)
		resized = cvCreateImage(newsize,8,3);
	IplImage *frame_flip = cvCreateImage(newsize,8,3);
	wxProgressDialog progressDlg(_T("Saving movie..."), wxString::Format(_T("Frame 0 of %d"), frameCount),frameCount, NULL, wxPD_APP_MODAL|wxPD_ELAPSED_TIME|wxPD_REMAINING_TIME|wxPD_AUTO_HIDE);
	for (int i=0; i<frameCount; i++) {
		progressDlg.Update(i+1, wxString::Format(_T("Frame %d of %d"), i+1, frameCount));
		if (resize)
			cvResize(book[i*offset]->ToIplImage(), resized);
		else
			resized = book[i*offset]->ToIplImage();
		cvConvertImage( resized, frame_flip, CV_CVTIMG_SWAP_RB );
		cvWriteFrame(writer, frame_flip);
	}
	cvReleaseVideoWriter(&writer);
	cvReleaseImage(&frame_flip);
	frame_flip = NULL;
	if (resize)
		cvReleaseImage(&resized);
	return true;
}
Exemplo n.º 15
0
// установка изображений для сравнения
void OpticalFlowLK::setA(IplImage* src)
{
	if(!src)
		return;
	if(imgA!=0)
	{
		cvReleaseImage(&imgA);
		imgA=0;
	}
	if(eig_image!=0)
	{
		cvReleaseImage(&eig_image);
		cvReleaseImage(&tmp_image);
		eig_image=0;
		tmp_image=0;
	}
	imgA = cvCreateImage(cvGetSize(src), IPL_DEPTH_8U, 1);
	eig_image = cvCreateImage(cvGetSize(src), IPL_DEPTH_32F, 1);
	tmp_image = cvCreateImage(cvGetSize(src), IPL_DEPTH_32F, 1);

	if(!imgA || !eig_image || !tmp_image)
	{
		fprintf(stderr, "[!][OpticalFlowLK] Error: cant allocate memory!\n");
		return;
	}

	cvConvertImage(src, imgA, CV_BGR2GRAY);
}
Exemplo n.º 16
0
TDV_NAMESPACE_BEGIN

bool ImageReader::update()
{
    WriteGuard<ReadWritePipe<CvMat*> > wg(m_wpipe);
    
    if ( m_cImg < m_filenames.size() )
    {
        const std::string &filename(m_filenames[m_cImg++]);
        IplImage *img = cvLoadImage(filename.c_str());
    
        if ( img != NULL )
        {   
#if 0
            CvMat *mat = cvCreateMatHeader(img->height, img->width, CV_8UC3);
            mat = cvGetMat(img, mat);        
#else
            CvMat *mat = cvCreateMat(img->height, img->width, CV_8UC3);
            cvConvertImage(img, mat, CV_CVTIMG_SWAP_RB);
            cvReleaseImage(&img);
#endif
            wg.write(mat);
        }
        else
        {
            throw Exception(boost::format("can't open image: %1%")
                            % filename);
        }                    
    }

    return wg.wasWrite();
}
Exemplo n.º 17
0
void
bline_image_set_visible (BlineImage *self, IplImage *image)
{
  GdkPixbuf *pixbuf;
  IplImage  *rgbimage;

  if (image == NULL)
    {
      gtk_image_clear (GTK_IMAGE (self));
      return;
    }

  rgbimage = cvCreateImage (cvSize (image->width, image->height),
                            image->depth, 3);

  cvConvertImage (image, rgbimage, 0);

  pixbuf = gdk_pixbuf_new_from_data ((const guchar *) rgbimage->imageData,
                                     GDK_COLORSPACE_RGB,
                                     FALSE,
                                     rgbimage->depth,
                                     rgbimage->width,
                                     rgbimage->height,
                                     rgbimage->widthStep,
                                     bline_image_release_pixbuf_ipl_image,
                                     (gpointer) rgbimage);

  gtk_image_set_from_pixbuf (GTK_IMAGE (self), pixbuf);

  g_object_unref (G_OBJECT (pixbuf));
}
Exemplo n.º 18
0
int main( int argc, char** argv ) {
	cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
	CvCapture* capture = cvCaptureFromCAM(-1);
	IplImage* frame, *simplifiedFrame = NULL, *simplifiedFrame2 = NULL, *frame2 = NULL;
	CvMemStorage* storage = cvCreateMemStorage(0);
	CvSeq* lines = NULL;
	int i;

	if (capture == NULL) {
		printf("Oooops .. I'm screwed :-(\n");
		exit(-1);
	}

	while(1) {
		frame = cvQueryFrame( capture );
		if( !frame ) break;

		if (simplifiedFrame == NULL) {
			simplifiedFrame = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);
		}

		if (simplifiedFrame2 == NULL) {
			simplifiedFrame2 = cvCreateImage(cvSize(frame->width, frame->height), IPL_DEPTH_8U, 1);
		}

		if (frame2 == NULL) {
			frame2 = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
		}

		cvConvertImage(frame, simplifiedFrame, 0);
		cvCanny(simplifiedFrame, simplifiedFrame2, 300 * 7 * 7, 400 * 7 * 7, 7);
		cvConvertImage(simplifiedFrame2, frame2, 0);
		cvAdd(frame, frame2, frame);
		cvShowImage( "Example2", frame );
		char c = cvWaitKey(33);
		if( c == 27 ) break;

	}

	cvReleaseImage(&frame);
	cvReleaseImage(&frame2);
	cvReleaseImage(&simplifiedFrame);
	cvReleaseImage(&simplifiedFrame2);
	cvReleaseMemStorage( &storage );
	cvReleaseCapture( &capture );
	cvDestroyWindow( "Example2" );
}
Exemplo n.º 19
0
CvScalar GetMaxColor(const IplImage* img)
{
	CvScalar scalar;
	IplImage* gray = cvCreateImage(cvGetSize(img),IPL_DEPTH_8U,1);
	cvConvertImage(img,gray,CV_BGR2GRAY);
	return scalar;
	//cvGetSi
}
Exemplo n.º 20
0
IplImage* CvCaptureCAM_CMU::retrieveFrame()
{
    C1394Camera* cmucam = camera();
    if( !cmucam )
        return 0;
    cmucam->getRGB((uchar*)image->imageData, image->imageSize);
    cvConvertImage( image, image, CV_CVTIMG_SWAP_RB );
    return image;
}
Exemplo n.º 21
0
IplImage* MyDisplay::image_camera_NVG(CvCapture* capture)
{
    frame = cvQueryFrame(capture);
    img_nvg = cvCreateImage(cvGetSize(frame), frame->depth, 1);

    //conversion en niveau de gris
    cvConvertImage(frame, img_nvg, 0);
    return frame;
}
Exemplo n.º 22
0
/**
 * @internal
 * @brief Find the blobs in the received image.
 * What it looks for in an image is bright areas, so typically 
 * the image result of a background subtraction is a good input.
 * 
 * @param[in] inImage image where the blobs will be searched
 */
void BlobFinder::update( const Image& inImage )
{
	// Check valid
	if ( !isValid() )
	THROW_EXCEPTION( "Trying to compute blobs, with the BlobFinder not initialized. Init method should be called" );

	// Check blob area... and if it has not been set, set it to the max and min (no lower than 10, to avoid opencv issues)
	if ( (m_minBlobArea < 0) || (m_maxBlobArea < 0) )
	{
		m_minBlobArea = 10;
		m_maxBlobArea = (float)inImage.getWidth() * (float)inImage.getHeight();
	}

	// Check both images have same size and it is the same than the filter size
	if( (inImage.getNChannels() != 1) && (inImage.getNChannels() != 3) )
	THROW_EXCEPTION( "Trying to compute blobs on images with non supporte format -> only RGB or GRAYSCALE images supported" );

	// Request temp image to work with
	IplImage* cvTempImage = ImageResourceManager::getSingleton().getImage( inImage.getWidth(), inImage.getHeight(), 1 );

	// If they have different number of channels -> convert them
	if ( inImage.getNChannels() == 3 )
		cvConvertImage( &inImage.getCVImage(), cvTempImage );
	// just one channel -> Copy the input image
	else 
		cvCopy( &inImage.getCVImage(), cvTempImage );

	// Find blobs (openCV contours)	
	int retrivalMode = CV_RETR_EXTERNAL; // CV_RETR_CCOMP
	cvFindContours( cvTempImage, m_findContoursStorage, &m_contour, sizeof(CvContour), retrivalMode, CV_CHAIN_APPROX_SIMPLE );

	// Extract found contours    

	// Iterate through found contours and store them..
	m_blobs.clear();
	for( ; m_contour != 0; m_contour = m_contour->h_next )
	{
		// Get contour area
		double area = fabs( cvContourArea( m_contour, CV_WHOLE_SEQ ) );

		// If it has a good size (between min and max)
		if ( ( area > m_maxBlobArea ) || ( area < m_minBlobArea ) )
		  continue;

		// Store new Blob
		m_blobs.push_back( Blob( area, m_contour ) );
	}

	// Release temp image
	ImageResourceManager::getSingleton().releaseImage( cvTempImage );

	// Extract information of found blobs
	extractBlobsInformation();

	// Clear OpenCV contours storage 
	cvClearMemStorage( m_findContoursStorage );
}
void FingertipPoseEstimation::OnCapture( IplImage * frame, int64 nTickCount, IplImage * gray )
{
    //
    // uhoh, let me do a little better way of this.
    // maybe we just don't need to copy when we have already another
    // copy of the images. anyway, i am gonna do this for now.
    //
    cvCopy( frame, _pImage );
    cvConvertImage( _pImage, _pGray, CV_BGR2GRAY );
}
Exemplo n.º 24
0
IplImage* Utils::qtToCv(QImage *qImage)
{
    IplImage* cvImage;
    cvImage = cvCreateImageHeader(cvSize(qImage->width(), qImage->height()), IPL_DEPTH_8U, 4);
    cvImage->imageData = (char*)qImage->bits();
    IplImage* colorImage = cvCreateImage( cvGetSize(cvImage), IPL_DEPTH_8U, 3 );
    cvConvertImage( cvImage, colorImage );
    cvReleaseImage(&cvImage);
    return colorImage;
}
void OpenCVOperations::flipVertically(SLImage * input, SLImage * output) {
	if (NULL == input || NULL == output)
		return;
	OpenCVImage * iplInput = dynamic_cast<OpenCVImage *>(input);
	OpenCVImage * iplOutput = dynamic_cast<OpenCVImage *>(output);
	if (0 == iplInput || 0 == iplOutput ||
			0 == iplInput->getIplImage() || 0 == iplOutput->getIplImage())
		return;
	cvConvertImage( iplInput->getIplImage(), iplOutput->getIplImage(), CV_CVTIMG_FLIP );

}
Exemplo n.º 26
0
// Convert a video to grayscale
// argv[1]: input video file
// argv[2]: name of new output file
//
main( int argc, char* argv[] ) {
    CvCapture* capture = cvCreateFileCapture( argv[1] );
    if (!capture){
        return -1;
    }
    IplImage* bgr_frame;
    double fps = cvGetCaptureProperty (
        capture,
        CV_CAP_PROP_FPS
    );

    CvSize size = cvSize(
        (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH),
        (int)cvGetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT)
    );
    
    CvVideoWriter* writer = cvCreateVideoWriter(
        argv[2],
        CV_FOURCC('M','J','P','G'),
        fps,
        size
    );
    
    IplImage* logpolar_frame = cvCreateImage(
        size,
        IPL_DEPTH_8U,
        3
    );

    IplImage* gray_frame = cvCreateImage(
        size,
        IPL_DEPTH_8U,
        1
    );

    while( (bgr_frame=cvQueryFrame(capture)) != NULL ) {
        cvConvertImage(
            bgr_frame,
            gray_frame,
            CV_RGB2GRAY
        );
        cvLogPolar( bgr_frame, logpolar_frame, 
                    cvPoint2D32f(bgr_frame->width/2,
                    bgr_frame->height/2), 
                    40, 
                    CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
        cvWriteToAVI( writer, logpolar_frame );
    }
    cvReleaseVideoWriter( &writer );
    cvReleaseImage( &gray_frame );
    cvReleaseImage( &logpolar_frame );
    cvReleaseImage( &bgr_frame );
    cvReleaseCapture( &capture );
}
void Frame::grayScale()
{
	int flip=0;
	if(image->origin!=IPL_ORIGIN_TL){
		flip=CV_CVTIMG_FLIP;
	}
	IplImage* img_nvg = cvCreateImage(cvGetSize(image), image->depth, 1);
	cvConvertImage(image, img_nvg, flip);
	cvReleaseImage(&image);
	image=img_nvg;
}
void OpenCVOperations::swapRB(SLImage * input, SLImage * output) {
	if (NULL == input || NULL == output)
		return;
	OpenCVImage * iplInput = dynamic_cast<OpenCVImage *>(input);
	OpenCVImage * iplOutput = dynamic_cast<OpenCVImage *>(output);
	if (0 == iplInput || 0 == iplOutput ||
			0 == iplInput->getIplImage() || 0 == iplOutput->getIplImage())
		return;
	cvConvertImage( iplInput->getIplImage(), iplOutput->getIplImage(),CV_CVTIMG_SWAP_RB );

}
Exemplo n.º 29
0
void determine_optimal_sign_classification( IplImage* original_image, IplImage* red_point_image, CvSeq* red_components, CvSeq* background_components, IplImage* result_image )
{
	int width_step=original_image->widthStep;
	int pixel_step=original_image->widthStep/original_image->width;
	IplImage* mask_image = cvCreateImage( cvGetSize(original_image), 8, 1 );
	IplImage* grayscale_image = cvCreateImage( cvGetSize(original_image), 8, 1 );
	cvConvertImage( original_image, grayscale_image );
	IplImage* thresholded_image = cvCreateImage( cvGetSize(original_image), 8, 1 );
	cvZero( thresholded_image );
	cvZero( result_image );
	int row=0,col=0;
	CvSeq* curr_red_region = red_components;
	
	while (curr_red_region != NULL)
	{
		cvZero( mask_image );
		CvScalar color = CV_RGB( 255, 255, 255 );
		CvScalar mask_value = cvScalar( 255 );	//white point
		// Determine which background components are contained within the red component (i.e. holes)
		//  and create a binary mask of those background components.
		CvSeq* curr_background_region = curr_red_region->v_next;
		if (curr_background_region != NULL)
		{
			while (curr_background_region != NULL)
			{
				cvDrawContours( mask_image, curr_background_region, mask_value, mask_value, -1, CV_FILLED, 8 );
				cvDrawContours( result_image, curr_background_region, color, color, -1, CV_FILLED, 8 );
				curr_background_region = curr_background_region->h_next;
			}
			int hist_size=256;
			CvHistogram* hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY );
			cvCalcHist( &grayscale_image, hist, 0, mask_image );
			// Determine an optimal threshold on the points within those components (using the mask)
			int optimal_threshold = determine_optimal_threshold( hist );
			apply_threshold_with_mask(grayscale_image,result_image,mask_image,optimal_threshold);
		}
		curr_red_region = curr_red_region->h_next;
	}

	for (row=0; row < result_image->height; row++)
	{
		unsigned char* curr_red = GETPIXELPTRMACRO( red_point_image, 0, row, width_step, pixel_step );
		unsigned char* curr_result = GETPIXELPTRMACRO( result_image, 0, row, width_step, pixel_step );
		for (col=0; col < result_image->width; col++)
		{
			curr_red += pixel_step;
			curr_result += pixel_step;
			if (curr_red[0] > 0)
				curr_result[2] = 255;
		}
	}

	cvReleaseImage( &mask_image );
}
Exemplo n.º 30
0
IplImage* BouyObject::GVColorMask(const IplImage * imgIn) const
{
    IplImage * imgOut = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);
    IplImage * temp = cvCreateImage(cvGetSize(imgIn),IPL_DEPTH_8U, 1);

    IplImage * thresh = NULL;
    mThresh->Setup(mNearColor.val[2],mNearColor.val[1],mNearColor.val[0],50,50,50);
    //mThresh->Setup(mNearTarget);
    mThresh->Process(imgIn,thresh);
    cvConvertImage(thresh, imgOut);

    mThresh->Setup(mFarColor.val[2],mFarColor.val[1],mFarColor.val[0],50,50,50);
    //mThresh->Setup(mNearTarget);
    mThresh->Process(imgIn,thresh);
    cvConvertImage(thresh, temp);

    VisionUtils::CombineMasks(imgOut,temp,imgOut);

    cvReleaseImage(&temp);
    return imgOut;
}