void rspfOpenCVSmoothFilter::runUcharTransformation(rspfImageData* tile)
{ 

	IplImage *input;
	IplImage *output;

	char* bSrc;
	char* bDst;

	int nChannels = tile->getNumberOfBands();

	for(int k=0; k<nChannels; k++) {
		printf("Channel %d\n",k);
		input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		bSrc = static_cast<char*>(tile->getBuf(k));
		input->imageData=bSrc;
		bDst = static_cast<char*>(theTile->getBuf(k));
		output->imageData=bDst;
		cvSmooth(input,output,theSmoothType,theParam1,theParam2,theParam3,theParam4);
		cvReleaseImageHeader(&input);
		cvReleaseImageHeader(&output);
	}

	theTile->validate(); 
 
   }
void rspfOpenCVSobelFilter::runUcharTransformation(rspfImageData* tile) {
   
	IplImage *input;
	IplImage *output;

	char* bSrc;
	char* bDst;
	
	int nChannels = tile->getNumberOfBands();

	for(int k=0; k<nChannels; k++) {
		printf("Channel %d\n",k);
		input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
		bSrc = static_cast<char*>(tile->getBuf(k));
		input->imageData=bSrc;
		bDst = static_cast<char*>(theTile->getBuf(k));
		output->imageData=bDst;
		IplImage * tmp = cvCreateImage(cvSize(tile->getWidth(),tile->getHeight()),IPL_DEPTH_16S,1);
		cvSobel(input,tmp,theXOrder,theYOrder,theApertureSize); 
		cvConvertScale(tmp,output);
		cvReleaseImageHeader(&input);
		cvReleaseImageHeader(&output);
		cvReleaseImage(&tmp);
	}

	theTile->validate(); 
}
Example #3
0
void radial_sample(int width, int height, char* data, IplImage *unwrapped, int slice)
{
	IplImage *cvcast = cvCreateImageHeader(cvSize(width, height),
			IPL_DEPTH_8U, 1);
	cvcast->imageData = data;

	// cvSaveImage("slice.png",cvcast);
	
	CvPoint center = cvPoint(cx,cy);

	unsigned char* linebuf;
	for(int sample = 0; sample < RADIAL_SAMPLES; sample++) {
		float theta = ((float)sample)*((2.0*PI)/(float)RADIAL_SAMPLES);
		CvPoint outer = calc_ray_outer(theta, center);

		// printf("%g:\t%d,%d\n", theta*(180.0/PI), outer.x, outer.y);
		cvClipLine(cvSize(width, height), &outer, &center);
		int linesize = abs(center.x-outer.x)+abs(center.y-outer.y)+1;
		linebuf = (unsigned char*)malloc(linesize);
		cvSampleLine(cvcast,outer,center,linebuf,4);
		
		IplImage *castline = cvCreateImageHeader(cvSize(linesize,1), IPL_DEPTH_8U, 1);
		castline->imageData = (char*)linebuf;

		IplImage *sobel = cvCreateImage(cvSize(linesize,1), IPL_DEPTH_8U, 1);

		cvSobel(castline, sobel, 1, 0, 3);

		int layer = 0;
		for(int i = 0; (i < linesize) && (layer < MAX_LAYERS); i++) {
			// printf(" %d,", (int)cvGetReal1D(sobel,i));
			if((int)cvGetReal1D(sobel,i) > SOBEL_THRESH) {
				int max = 0, max_i = 0;
				for(; i < linesize; i++) {
					int curval = (int)cvGetReal1D(sobel,i);
					if(curval == 0) break;
					if(curval > max) {
						max = curval;
						max_i = i;
					}
				}
				cvSetReal2D(unwrapped,slice,(layer*RADIAL_SAMPLES)+sample,cvGetReal1D(castline,max_i));
				// printf("%d\t",max);
				layer++;
			}
		}
		// printf("\n");
	
		/*	
		char filename[] = "line000.png";
		sprintf(filename,"line%03d.png",(int)(theta*(180.0/PI)));
		cvSaveImage(filename,sobel);
		*/
		
		cvReleaseImageHeader(&castline);
		cvReleaseImage(&sobel);

		free(linebuf);
	}
}
void rspfOpenCVErodeFilter::runUcharTransformation(rspfImageData* tile)
{   
  IplImage *input;
  IplImage *output;

  char* bSrc;
  char* bDst;

  int nChannels = tile->getNumberOfBands();

  for(int k=0; k<nChannels; k++) {
	printf("Channel %d\n",k);
	input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
	output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
	bSrc = static_cast<char*>(tile->getBuf(k));
	input->imageData=bSrc;
	bDst = static_cast<char*>(theTile->getBuf(k));
	output->imageData=bDst;
	cvErode(input,output,NULL,theIterations); // a 3x3 rectangular structuring element is used
	cvReleaseImageHeader(&input);
	cvReleaseImageHeader(&output);
	}
  theTile->validate(); 

}
void BleWindowsCaptureSource::run()
{
    // TODO make could select screen
    // QGuiApplication::screens();
    while (!m_stop) {
        QElapsedTimer elapsedTimer;
        elapsedTimer.start();

        QScreen *screen = QGuiApplication::primaryScreen();

        if (screen) {
            QPixmap pixmap = screen->grabWindow(m_wid, m_x, m_y, m_width, m_height);
#if 1
            // TODO to draw cursor to image
            QRect desktopRect = QRect(QPoint(0, 0), screen->size());
            if (desktopRect.contains(QCursor::pos())) {
                drawCursor(&pixmap);
            }
#endif
            QImage image = pixmap.toImage();

            m_modifyMutex.lock();           // Start lock

            BleImage be;
            be.width = image.width();
            be.height = image.height();

            int imageSize = be.width * be.height * 3;
            be.data = new char[imageSize];

            IplImage *oriImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 4);
            cvSetData(oriImage, image.bits(), image.bytesPerLine());

            IplImage *dstImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 3);
            cvSetData(dstImage, be.data, be.width * 3);

            cvCvtColor(oriImage, dstImage, CV_BGRA2BGR);

            be.dataSize = imageSize;
            be.format = BleImage_Format_BGR24;

            m_image = be;

            cvReleaseImageHeader(&oriImage);
            cvReleaseImageHeader(&dstImage);

            m_modifyMutex.unlock();        // End unlock
        }

        int elapsedMs = elapsedTimer.elapsed();
        int needSleepMs = m_interval - elapsedMs;
        if (needSleepMs < 0) {
            needSleepMs = 0;
        }
        msleep(needSleepMs);
    }

    log_trace("BleWindowsCaptureSource exit normally.");
}
void BleImageProcess::paintEvent(QPaintEvent *event)
{
    Q_UNUSED(event);

    QPainter p(this);

    p.setRenderHint(QPainter::SmoothPixmapTransform);

    // back ground
    p.fillRect(rect(), QBrush(QColor(48, 48, 48)));

    // element draw
    for (int i = 0; i < m_sources.size(); ++i) {
        const SourcePair & pair = m_sources.at(i);
        BleSourceAbstract *s = pair.source;

        // TODO image data may be used by other thread
        BleImage image = s->getImage();

        if (image.dataSize <= 0) continue;

        QImage qimage;
        if (image.format == BleImage_Format_BGR24) {
            IplImage *oriImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
            cvSetData(oriImage, image.data, image.width*3);

            IplImage *dstImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
            cvSetData(dstImage, image.data, image.width*3);

            cvCvtColor(oriImage, dstImage, CV_BGR2RGB);

            cvReleaseImageHeader(&oriImage);
            cvReleaseImageHeader(&dstImage);
        }
        qimage = QImage((uchar*)image.data, image.width, image.height, QImage::Format_RGB888);

        p.drawPixmap(pair.rect, QPixmap::fromImage(qimage));
        // p.drawImage(pair.rect, qimage);
    }

    if (m_activePair && m_activePair->rect.isValid()) {
        QPen pen(Qt::SolidLine);

        pen.setColor(Qt::white);
        pen.setWidth(2);
        pen.setStyle(Qt::DotLine);
        p.setPen(pen);
        p.drawRect(m_activePair->rect);

        QRect topLeftRect(m_activePair->rect.x(), m_activePair->rect.y(), 8, 8);
        p.fillRect(topLeftRect, QBrush(Qt::red));

        QRect bottomRightRect(m_activePair->rect.bottomRight().x(), m_activePair->rect.bottomRight().y(), -8, -8);
        p.fillRect(bottomRightRect, QBrush(Qt::red));
    }
}
static gboolean
gst_opencv_video_filter_set_caps (GstBaseTransform * trans, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstOpencvVideoFilter *transform = GST_OPENCV_VIDEO_FILTER (trans);
  GstOpencvVideoFilterClass *klass =
      GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
  gint in_width, in_height;
  gint in_depth, in_channels;
  gint out_width, out_height;
  gint out_depth, out_channels;
  GError *in_err = NULL;
  GError *out_err = NULL;

  if (!gst_opencv_parse_iplimage_params_from_caps (incaps, &in_width,
          &in_height, &in_depth, &in_channels, &in_err)) {
    GST_WARNING_OBJECT (transform, "Failed to parse input caps: %s",
        in_err->message);
    g_error_free (in_err);
    return FALSE;
  }

  if (!gst_opencv_parse_iplimage_params_from_caps (outcaps, &out_width,
          &out_height, &out_depth, &out_channels, &out_err)) {
    GST_WARNING_OBJECT (transform, "Failed to parse output caps: %s",
        out_err->message);
    g_error_free (out_err);
    return FALSE;
  }

  if (klass->cv_set_caps) {
    if (!klass->cv_set_caps (transform, in_width, in_height, in_depth,
            in_channels, out_width, out_height, out_depth, out_channels))
      return FALSE;
  }

  if (transform->cvImage) {
    cvReleaseImage (&transform->cvImage);
  }
  if (transform->out_cvImage) {
    cvReleaseImage (&transform->out_cvImage);
  }

  transform->cvImage =
      cvCreateImageHeader (cvSize (in_width, in_height), in_depth, in_channels);
  transform->out_cvImage =
      cvCreateImageHeader (cvSize (out_width, out_height), out_depth,
      out_channels);

  gst_base_transform_set_in_place (GST_BASE_TRANSFORM (transform),
      transform->in_place);
  return TRUE;
}
guint32 haarwrapper_flip(t_haarwrapper *hc, t_haarwrapper_image* im, t_haarwrapper_image* im2) 
{
 
  IplImage *img = cvCreateImageHeader(cvSize(im->width,im->height), IPL_DEPTH_8U, 3);
  img->widthStep = im->rowbytes;
  img->imageData = (char*)im->data[0];

  IplImage *img2 = cvCreateImageHeader(cvSize(im2->width,im2->height), IPL_DEPTH_8U, 3);
  img2->widthStep = im2->rowbytes;
  img2->imageData = (char*)im2->data[0];

  cvFlip(img, img2, 1);
  return(0);
}
int main (int argc,char* argv[]){
	if (argc != 2 && argc != 3){
		printf("usage:\n %s /path/to/recoding/filename.oni\n",argv[0]);
		return 0;
	}
	Xn_sensor sensor(WIDTH,HEIGHT);
	sensor.play(argv[1],false);
	cvNamedWindow( "Model Extractor Viewer", 1 );
	IplImage* rgb_image = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
	IplImage* test = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
	IplImage* gray = cvCreateImage(cvSize(WIDTH,HEIGHT), 8, 1);
	Mat img;
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
	pcl::PointCloud<pcl::PointXYZRGB>::Ptr model (new pcl::PointCloud<pcl::PointXYZRGB>);
	
	//pcl::visualization::CloudViewer viewer("Model Extractor Viewer");

	
	//Read Fiducial from file
	Fiducial fiducial("fiducial.yml");
	Pose pose;
	  while(/*!viewer.wasStopped() && */!sensor.endPlaying()){
		//Get the frame 
		sensor.update();
		sensor.getPCL(cloud);
		cvSetData(rgb_image,sensor.rgb,rgb_image->widthStep);
		//Estimate Camera Pose from fiducial
		cvCvtColor(rgb_image,gray,CV_BGR2GRAY);
		if (fiducial.find(gray,true)){
			pose.estimate(gray,fiducial);
			//fiducial.draw(rgb_image);
		}
		if (pose.isFound()){
			printf("Rotation");
			printMat<double>(pose.getR());
			printf("Translation");
			printMat<double>(pose.getT());
			//Segment volume around the fiducial
			boxFilter(cloud,pose);
			//Create 3D model
			buildModel(cloud,model);
		}
		//viewer.showCloud (model);
		
	}
	pcl::io::savePCDFileBinary ("model.pcd", *model);
	sensor.shutdown();
	return 0;
}
Example #10
0
int main(int argc, char* argv[])
{
	printf("DUOLib Version:       v%s\n", GetLibVersion());

	// Open DUO camera and start capturing
	if(!OpenDUOCamera(WIDTH, HEIGHT, FPS))
	{
		printf("Could not open DUO camera\n");
		return 0;
	}
	// Create OpenCV windows
	cvNamedWindow("Left");
	cvNamedWindow("Right");

	// Set exposure and LED brightness
	SetExposure(50);
	SetLed(25);

	// Create image headers for left & right frames
	IplImage *left = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
	IplImage *right = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);

	// Run capture loop until <Esc> key is pressed
	while((cvWaitKey(1) & 0xff) != 27)
	{
		// Capture DUO frame
		PDUOFrame pFrameData = GetDUOFrame();
		if(pFrameData == NULL) continue;

		// Set the image data
		left->imageData = (char*)pFrameData->leftData;
		right->imageData = (char*)pFrameData->rightData;

		// Process images here (optional)

		// Display images
		cvShowImage("Left", left);
		cvShowImage("Right", right);
	}

	// Release image headers
	cvReleaseImageHeader(&left);
	cvReleaseImageHeader(&right);

	// Close DUO camera
	CloseDUOCamera();
	return 0;
}
Example #11
0
//
// decode buffer
//
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
    if(!buffer)
        return false;

    if(!frame) {
        gint height, width;
        GstCaps *buff_caps = gst_buffer_get_caps(buffer);
        assert(gst_caps_get_size(buff_caps) == 1);
        GstStructure* structure = gst_caps_get_structure(buff_caps, 0);

        if(!gst_structure_get_int(structure, "width", &width) ||
                !gst_structure_get_int(structure, "height", &height))
            return false;

        frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
        gst_caps_unref(buff_caps);
    }

    // no need to memcpy, just use gstreamer's buffer :-)
    frame->imageData = (char *)GST_BUFFER_DATA(buffer);
    //memcpy (frame->imageData, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE (buffer));
    //gst_buffer_unref(buffer);
    //buffer = 0;
    return frame;
}
bool HiwrCameraControllerNodelet::copyRead(){
    std::unique_lock<std::mutex> lk(mutex_);

    if(!lk.owns_lock()){
        try {
            lk.lock();
        } catch(const std::system_error& e) {
            std::cout << "coin2 Caught system_error with code " << e.code()
                      << " meaning " << e.what() << '\n';
        }
    }

    if(!new_frame_){
        waiter_.wait(lk);
    }
    image_ipl_ = cvCreateImageHeader(cvSize(config_width_ ,config_height_), 8, 1);
    dealMemory();
    const int total = config_width_*config_height_*2;

    if(total!=bytes_used_)
        return false;

    int j=0;
    for(int i=0; i< total; j++){
        final_[j]=frame_[i];
        i+=2;
    };
    new_frame_=false;
    lk.unlock();

    return true;
}
Example #13
0
bool JpegSaver::saveNUimageAsJpeg(NUimage* image, const std::string& pFileName)
{
    unsigned char r,g,b;
    char* bgrBuffer = new char[image->width() * image->height() * 3];
    int bufferIndex = 0;
    pixels::Pixel temp; 
    for(int y = 0; y < image->height(); y++)
    {
        for (int x = 0; x < image->width(); x++)
        {
            temp = image->image[y][x];
            ColorModelConversions::fromYCbCrToRGB(temp.y,temp.cb,temp.cr,r,g,b);
            bgrBuffer[bufferIndex++] = b;
            bgrBuffer[bufferIndex++] = g;
            bgrBuffer[bufferIndex++] = r;
        }
    }

        IplImage* fIplImageHeader;
        fIplImageHeader = cvCreateImageHeader(cvSize(image->width(), image->height()), 8, 3);
        fIplImageHeader->imageData = bgrBuffer;
        cvSaveImage(pFileName.c_str(),fIplImageHeader);
        if (fIplImageHeader)
        {
            cvReleaseImageHeader(&fIplImageHeader);
        }
        delete [] bgrBuffer;
        return true;
}
Example #14
0
static gboolean
gst_motiondetect_set_caps (GstBaseTransform *trans, GstCaps *incaps,
    GstCaps *outcaps)
{
  gint width, height, depth, ipldepth, channels;
  GError *err = NULL;
  GstStructure *structure = gst_caps_get_structure (incaps, 0);
  StbtMotionDetect *filter = GST_MOTIONDETECT (trans);
  if (!filter) {
    return FALSE;
  }
  
  if (!gst_structure_get_int (structure, "width", &width) ||
      !gst_structure_get_int (structure, "height", &height) ||
      !gst_structure_get_int (structure, "depth", &depth)) {
    g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "No width/height/depth in caps");
    return FALSE;
  }
  if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
    channels = 3;
  } else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
    channels = 1;
  } else {
    g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "Unsupported caps %s", gst_structure_get_name (structure));
    return FALSE;
  }
  if (depth / channels == 8) {
    ipldepth = IPL_DEPTH_8U;
  } else {
    g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
        "Unsupported depth/channels %d/%d", depth, channels);
    return FALSE;
  }

  if (filter->cvCurrentImage) {
    cvReleaseImageHeader (&filter->cvCurrentImage);
    filter->cvCurrentImage = NULL;
  }
  if (filter->cvReferenceImageGray) {
    cvReleaseImage (&filter->cvReferenceImageGray);
    filter->cvReferenceImageGray = NULL;
  }
  if (filter->cvCurrentImageGray) {
    cvReleaseImage (&filter->cvCurrentImageGray);
    filter->cvCurrentImageGray = NULL;
  }

  filter->cvCurrentImage =
    cvCreateImageHeader (cvSize (width, height), ipldepth, channels);
  filter->cvReferenceImageGray = cvCreateImage(
    cvSize (width, height), IPL_DEPTH_8U, 1);
  filter->cvCurrentImageGray = cvCreateImage(
    cvSize (width, height), IPL_DEPTH_8U, 1);

  filter->state = MOTION_DETECT_STATE_ACQUIRING_REFERENCE_IMAGE;

  return gst_motiondetect_check_mask_compability(filter);
}
Example #15
0
// --------------------------------------------------------------------------
//! @brief   Get an image from the AR.Drone's camera.
//! @return  An OpenCV image data (IplImage or cv::Mat)
//! @retval  NULL Failure
// --------------------------------------------------------------------------
ARDRONE_IMAGE ARDrone::getImage(void)
{
    // There is no image
    if (!img) return ARDRONE_IMAGE(NULL);

    // Enable mutex lock
    if (mutexVideo) pthread_mutex_lock(mutexVideo);

    // AR.Drone 2.0
    if (version.major == ARDRONE_VERSION_2) {
        // Copy current frame to an IplImage
        memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
    }
    // AR.Drone 1.0
    else {
        // If the sizes of the buffer and the IplImage are differnt
        if (pCodecCtx->width != img->width || pCodecCtx->height != img->height) {
            // Resize the image to 320x240
            IplImage *small_img = cvCreateImageHeader(cvSize(pCodecCtx->width, pCodecCtx->height), IPL_DEPTH_8U, 3);
            small_img->imageData = (char*)bufferBGR;
            cvResize(small_img, img, CV_INTER_CUBIC);
            cvReleaseImageHeader(&small_img);
        }
        // For 320x240 image, just copy it
        else memcpy(img->imageData, bufferBGR, pCodecCtx->width * pCodecCtx->height * sizeof(uint8_t) * 3);
    }
    
    // The latest image has been read, so change newImage accordingly
    newImage = false;

    // Disable mutex lock
    if (mutexVideo) pthread_mutex_unlock(mutexVideo);

    return ARDRONE_IMAGE(img);
}
/* this function handles the link with other elements */
static gboolean
gst_template_match_handle_sink_event (GstPad * pad, GstObject * parent,
                                      GstEvent * event)
{
    GstTemplateMatch *filter;
    GstVideoInfo info;
    gboolean res = TRUE;

    filter = GST_TEMPLATE_MATCH (parent);
    switch (GST_EVENT_TYPE (event)) {
    case GST_EVENT_CAPS:
    {
        GstCaps *caps;
        gst_event_parse_caps (event, &caps);
        gst_video_info_from_caps (&info, caps);

        if (filter->cvImage) {
            cvReleaseImageHeader (&filter->cvImage);
        }
        filter->cvImage =
            cvCreateImageHeader (cvSize (info.width, info.height), IPL_DEPTH_8U,
                                 3);
        break;
    }
    default:
        break;
    }

    res = gst_pad_event_default (pad, parent, event);

    return res;


}
Example #17
0
/* Function: createIplImageFromImageInfo
 *
 * Description: Populates an IplImage with image information and data stored in 
 *     an ImageInfo struct.
 *
 * Parameters:
 *     image: empty IplImage to populate using values from ImageInfo
 *     imageInfo: ImageInfo struct populated with image info from MATLAB input
 * 
 * Returns: 0 on success, error code on error
 */
int createIplImageFromImageInfo(
    _InOut_ IplImage **image,
    _In_ ImageInfo imageInfo)
{ 
    // create single-channel IplImage with same height, width, and depth as
    // field values in ImageInfo struct
    *image = cvCreateImageHeader(cvSize(imageInfo.imageWidth, imageInfo.imageHeight), (imageInfo.imageDepth == UINT16) ? IPL_DEPTH_16U : IPL_DEPTH_8U, 1);
    
    // allocate memory for IplImage image data
    (*image)->imageData = (char *)malloc(imageInfo.imageWidthStep * imageInfo.imageHeight * sizeof(char));
    
    if ((*image)->imageData == NULL)
    {
        return OUT_OF_MEMORY_ERROR;
    }
    
    // copy image data into IplImage
    memcpy((*image)->imageData, imageInfo.imageData, imageInfo.imageWidthStep * imageInfo.imageHeight);        
        
    // set additioinal IplImage info
    (*image)->widthStep = imageInfo.imageWidthStep;
    (*image)->imageDataOrigin = (*image)->imageData;
    
    return 0;
}
Example #18
0
/******************将图片缩小至640*512方便显示***********************/
void CMainFrame::Resize()
 {
	CvSize cvSize;
	cvSize.width = Width;
 	cvSize.height = Height;
 	
	//生成支持OPENCV的IPLIMAGE数据结构,并使用相机采集的图像数据初始化
	IplImage *iplImage = cvCreateImageHeader(cvSize,IPL_DEPTH_8U,3);
 	cvSetData(iplImage,m_pImageBuffer,Width*3);  
	
    CvSize my_cvSize;           //新图片尺寸
	my_cvSize.width = Width/2;
 	my_cvSize.height = Height/2;

	IplImage *iplgraytemp = cvCreateImage(my_cvSize,IPL_DEPTH_8U,3); //新图片矩阵

	cvResize(iplImage,iplgraytemp,CV_INTER_NN);
    	
	//从LPLIMAGE数据结构中提取图像数据
    memcpy(DisplayBuffer,(BYTE*)iplgraytemp->imageData,Height/2*Width/2*3);

 	//释放申请的图象空间
	cvReleaseImage(&iplgraytemp);
	

 }
IplImage *boolarr2img(bool* barr, CvSize s) {

    IplImage *img = cvCreateImageHeader(s,IPL_DEPTH_8U,1);
    img->imageData = (char *)barr;
    cvConvertScale(img,img,255);
    return img;
}
Example #20
0
/** Initialize
*
* PRECONDITION
* REQUIRE(isColor = 0 / 1)
*
* WARNING
* EXAMPLES
*
* @param filename       the name of the output video file
* @param fps            frame per second
* @param height         image height
* @param width          image width
* @param isColor        whether the image is color
*
* @return void
*/
void CLHVideoWriter::Init(const char* filename, int codeType, float fps, int height, int width, int isColor)
{
    mHeight = height;
    mWidth = width;
    if (isColor != 0)
    {
        mpFrameImage = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
    }
    else
    {
        mpFrameImage = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 1);
    }
    //mpVideoWriter = cvCreateVideoWriter(filename, CV_FOURCC('D','I','V','X'), fps, cvSize(width, height), isColor);
	//mpVideoWriter = cvCreateVideoWriter(filename, CV_FOURCC('X','V','I','D'), fps, cvSize(width, height), isColor);
	mpVideoWriter = cvCreateVideoWriter(filename, codeType, fps, cvSize(width, height), isColor);
}
Example #21
0
static IplImage*
icvRetrieveImage( void* obj )
{
    IplImage* img = 0;

    if( CV_IS_IMAGE(obj) )
        img = (IplImage*)obj;
    else if( CV_IS_MAT(obj) )
    {
        CvMat* m = (CvMat*)obj;
        img = cvCreateImageHeader( cvSize(m->cols,m->rows),
                        CV_MAT_DEPTH(m->type), CV_MAT_CN(m->type) );
        cvSetData( img, m->data.ptr, m->step );
        img->imageDataOrigin = (char*)m->refcount;
        m->data.ptr = 0; m->step = 0;
        cvReleaseMat( &m );
    }
    else if( obj )
    {
        cvRelease( &obj );
        CV_Error( CV_StsUnsupportedFormat, "The object is neither an image, nor a matrix" );
    }

    return img;
}
Example #22
0
void 
BGRToIpl__v__event__v__input(
        struct bgrtoipl_module *module, 
        const char *bgr, 
        int width, 
        int height)
{
    void *output[] = { 
        "output", 
        "P9_IplImage", NULL, 
        NULL };
    IplImage *ipl = NULL;

    /* Send an BGR IplImage, it must be what everybody expect */
    ipl = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
    assert(ipl != NULL);

    ipl->imageData = (char *)bgr;
    ipl->widthStep = width*3*sizeof(char);

    output[2] = &ipl;
    module->framework("emit", output);

    cvReleaseImageHeader(&ipl);
}
bool CaptureDSCapture::start()
{
    if (m_pDSCapture) {
	HRESULT hr = m_pDSCapture->Init(true, false, mCaptureDevice.id().c_str(), NULL);

	if(SUCCEEDED(hr)){
		// Set video grabber media type
		AM_MEDIA_TYPE mt;
		ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
		mt.majortype = MEDIATYPE_Video;
		mt.subtype = MEDIASUBTYPE_RGB24;
		hr = m_pDSCapture->SetVideoGrabberFormat(&mt);
	}

	if(SUCCEEDED(hr))
		hr = m_pDSCapture->ShowVideoCaptureFormatDialog();

	// We must connect filters before we can get connected media type from grabber(s)
	if(SUCCEEDED(hr))
		hr = m_pDSCapture->Connect();

	if(SUCCEEDED(hr)){
		AM_MEDIA_TYPE mt;
		ZeroMemory(&mt, sizeof(AM_MEDIA_TYPE));
		HRESULT hr = m_pDSCapture->GetVideoGrabberFormat(&mt);
		if(SUCCEEDED(hr)){
			// Examine the format block.
			if ((mt.formattype == FORMAT_VideoInfo) && 
				(mt.cbFormat >= sizeof(VIDEOINFOHEADER)) &&
				(mt.pbFormat != NULL) ) 
			{
				if(mt.subtype == MEDIASUBTYPE_RGB24)
					m_nBpp = 24;
				else if(mt.subtype == MEDIASUBTYPE_RGB32)
					m_nBpp = 32;

				VIDEOINFOHEADER *pVih = (VIDEOINFOHEADER*)mt.pbFormat;
				//cout << "Video capture: "<<pVih->bmiHeader.biWidth<<"x"<<pVih->bmiHeader.biHeight<<" "<<pVih->bmiHeader.biBitCount<<" bpp "<<fps<<" fps";
				m_nVideo_x_res = pVih->bmiHeader.biWidth;
				m_nVideo_y_res = pVih->bmiHeader.biHeight;
			}
		}
	}

	if(FAILED(hr)){
		return false;
	}

	m_pDSCapture->AddVideoCallback(&sampler);

        buffer_size = (int)(m_nVideo_x_res * m_nVideo_y_res * (m_nBpp/8.0));
        imgBuffer = new BYTE[buffer_size];
        imgBufferForCallback = new BYTE[buffer_size];
        mReturnFrame = cvCreateImageHeader(cvSize(m_nVideo_x_res, m_nVideo_y_res), IPL_DEPTH_8U,m_nBpp / 8);
        mReturnFrame->imageData = (char*)imgBuffer;
    }
    m_pDSCapture->Start();
    mIsCapturing = true;
    return true;
}
Example #24
0
// OpenCLIF computations
CLIFGrayscaleResult
clifGrayscale(const IplImage* source,
              CLIFEnvironmentData* data,
              const cl_bool use_opencl)
{
    CLIFGrayscaleResult ret;
    if(!use_opencl) {
        ret.image = cvCreateImage(cvSize(source->width, source->height), IPL_DEPTH_8U, 1);
        cvCvtColor(source, ret.image, CV_BGR2GRAY);
        return ret;
    }
    
    cl_int error = CL_SUCCESS;
    
    // Init buffer
    error = clEnqueueWriteBuffer(data->environment.queue, data->bgr_to_gray_data.buffers[0], CL_FALSE, 0, source->widthStep * source->height, source->imageData, 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Run kernel
    error = clEnqueueNDRangeKernel(data->environment.queue, data->environment.kernels[0], 2, NULL, data->bgr_to_gray_data.global_size, data->bgr_to_gray_data.local_size, 0, NULL, NULL);
    clCheckOrExit(error);
    
    // Read result
    cl_uchar* result = (cl_uchar*)clEnqueueMapBuffer(data->environment.queue, data->bgr_to_gray_data.buffers[1], CL_TRUE, CL_MAP_READ, 0, source->width * source->height, 0, NULL, NULL, &error);
    clCheckOrExit(error);
    
    // Return
    ret.image = cvCreateImageHeader(cvSize(source->width, source->height), IPL_DEPTH_8U, 1);
    cvSetData(ret.image, result, source->width);
    return ret;
}
Example #25
0
PyObject *wrapped_People_detectAllFaces(PyObject *self, PyObject *args)
{
  wrapped_People_t *pp = (wrapped_People_t*)self;

  // IplImage *image, const char* haar_classifier_filename, double threshold, IplImage *disparity_image, CvStereoCamModel *cam_model, bool do_draw)
  IplImage *input;
  char *haar_filename;
  double threshold;
  PyObject *disparity_image, *cam_model;
  int do_draw;

  {
    char *imgdata;
    int imgdata_size, x, y;
    if (!PyArg_ParseTuple(args, "s#iisdOOi", &imgdata, &imgdata_size, &x, &y, &haar_filename, &threshold, &disparity_image, &cam_model, &do_draw))
      return NULL;
    input = cvCreateImageHeader(cvSize(x, y), IPL_DEPTH_8U, 1);
    cvSetData(input, imgdata, x);
  }

  vector<CvRect> faces_vector = pp->c.detectAllFaces(input, haar_filename, threshold, NULL, NULL, do_draw);
  PyObject *r = PyList_New(faces_vector.size());
  for (size_t i = 0; i < faces_vector.size(); i++)
    PyList_SetItem(r, i, Py_BuildValue("iiii",
                    faces_vector[i].x,
                    faces_vector[i].y,
                    faces_vector[i].width,
                    faces_vector[i].height));
  return r;
}
Example #26
0
C_RESULT output_gtk_stage_open( void *cfg, vp_api_io_data_t *in, vp_api_io_data_t *out)
{
  rgbHeader = cvCreateImageHeader(cvSize(QVGA_WIDTH, QVGA_HEIGHT), IPL_DEPTH_8U, 3);  //[for colour]
  rgbHeader->imageData = NULL;	     //[for colour]

  return (SUCCESS);
}
Example #27
0
int BinkFixBinkCopyToBuffer(cBinkDll* dll, BINK* bnk, void* dest, int destpitch, uInt destheight, uInt destx, uInt desty, uInt flags)
{
	if(!bnk || !(flags & BINK_SURFACE_32) || !scaleVideos)
		return dll->BinkCopyToBuffer(bnk, dest, destpitch, destheight, destx, desty, flags);

	uInt Index = GetBinkIndex(bnk);
	if(Index)
	{
		BinkFix& Fix = Binks[Index - 1];

		//Fix.DstWidth = destpitch / 4;
		//Fix.DstHeight = destheight;
		//ValidateAspect(Fix, destx, desty);

		bnk->Width = Fix.SrcWidth;
		bnk->Height = Fix.SrcHeight;

		int res = dll->BinkCopyToBuffer(bnk, Fix.Image->imageData, Fix.Image->widthStep, Fix.Image->height, Fix.SrcX, Fix.SrcY, flags);

		bnk->Width = Fix.DstWidth;
		bnk->Height = Fix.DstHeight;

		if(Fix.Resized && ((Fix.Resized->width != destpitch / 4) || (Fix.Resized->height != destheight)))
			cvReleaseImageHeader(&Fix.Resized);
		if(!Fix.Resized)
			Fix.Resized = cvCreateImageHeader(cvSize(destpitch / 4, destheight), Fix.Image->depth, Fix.Image->nChannels);
		Fix.Resized->imageData = (char*)dest;

		if((Fix.Resized->width != Fix.DstWidth) || (Fix.Resized->height != Fix.DstHeight))
			cvSetImageROI(Fix.Resized, cvRect(0, 0, Fix.DstWidth, Fix.DstHeight));

		FwiSize fwSrcSize;
		fwSrcSize.width = Fix.Image->width;
		fwSrcSize.height = Fix.Image->height;

		FwiRect fwSrcRoi;
		fwSrcRoi.x = 0;
		fwSrcRoi.y = 0;
		fwSrcRoi.width = fwSrcSize.width;
		fwSrcRoi.height = fwSrcSize.height;

		FwiSize fwDstRoi;
		fwDstRoi.width = Fix.DstWidth;
		fwDstRoi.height = Fix.DstHeight;

		float xFactor = (float)Fix.DstWidth / (float)Fix.Image->width;
		float yFactor = (float)Fix.DstHeight / (float)Fix.Image->height;

		//Timer Measure;
		//Measure.Start();
		fwiResize_8u_C4R((Fw8u*)Fix.Image->imageData, fwSrcSize, Fix.Image->widthStep, fwSrcRoi, (Fw8u*)Fix.Resized->imageData, Fix.Resized->widthStep, fwDstRoi, xFactor, yFactor, FWI_INTER_LINEAR);
		//cvResize(Fix.Image, Fix.Resized, CV_INTER_LINEAR);
		//printf("%f\n", Measure.GetElapsedTimeSeconds());

		return res;
	}

	return dll->BinkCopyToBuffer(bnk, dest, destpitch, destheight, destx, desty, flags);
}
Example #28
0
IplImage* cvGetImageSubRect(IplImage* image, CvRect* rect) 
{ 
	IplImage* res = cvCreateImageHeader(cvSize(rect->width, rect->height), image->depth, image->nChannels);
	CvMat mat;
	cvGetSubRect(image, &mat, *rect);
	cvGetImage(&mat, res);
	return res;
}
Example #29
0
void Open_GUI()
{
  int i;

  //Make the eye image (in monochrome):
  eye_image=cvCreateImageHeader(cvSize(RESOLUTION_WIDTH,RESOLUTION_HEIGHT), 8, 1 );
  eye_image->imageData=(char *)malloc(RESOLUTION_WIDTH*RESOLUTION_HEIGHT);

  //Make the eye image (in monochrome):
  threshold_image = cvCloneImage(eye_image);

  //Make the ellipse image (in RGB) :
  ellipse_image=cvCreateImageHeader(cvSize(RESOLUTION_WIDTH,RESOLUTION_HEIGHT), 8, 3 );
  ellipse_image->imageData=(char *)malloc(RESOLUTION_WIDTH*RESOLUTION_HEIGHT*3);
  
  //Make the scene image:    
  scene_image=cvCreateImageHeader(cvSize(RESOLUTION_WIDTH,RESOLUTION_HEIGHT), 8, 3 );
  scene_image->imageData=(char *)malloc(RESOLUTION_WIDTH*RESOLUTION_HEIGHT*3);

  //Make the grayscale scene image:    
  scene_image_grayscale=cvCreateImageHeader(cvSize(RESOLUTION_WIDTH,RESOLUTION_HEIGHT), 8, 1 );
  scene_image_grayscale->imageData=(char *)malloc(RESOLUTION_WIDTH*RESOLUTION_HEIGHT);

  //Create the windows
  cvNamedWindow(control_window, 1);
  cvNamedWindow(ellipse_window, 0);
  cvNamedWindow(scene_window, 0);
  cvNamedWindow(eye_window, 0);
  cvNamedWindow(original_eye_window, 0);

  //setup the mouse call back funtion here for calibration    
  cvSetMouseCallback(scene_window, on_mouse_scene, NULL);        
  cvSetMouseCallback(eye_window, on_mouse_eye, NULL);        

  cvCreateTrackbar("Edge Threshold", control_window, &pupil_edge_thres, 255, NULL );
  cvCreateTrackbar("Rays Number", control_window, &rays, 180, NULL );
  cvCreateTrackbar("Min Feature Candidates", control_window, &min_feature_candidates, 30, NULL );
  cvCreateTrackbar("Corneal Window Size",control_window, &cr_window_size, FRAMEH, NULL );

  //Init colors
  White = CV_RGB(255,255,255);
  Red = CV_RGB(255,0,0);
  Green = CV_RGB(0,255,0);
  Blue = CV_RGB(0,0,255);
  Yellow = CV_RGB(255,255,0);
}
vector<VisionRecognitionResult> KITECH_ERSPViPRObjectRecognitionComp::Recognize(vector<unsigned char> image,int width,int height,int pixelBytes)
{
	//PrintMessage("SUCCESS:KITECH_ERSPViPRObjectRecognitionComp::Recognize()\n");

	vector<VisionRecognitionResult> _recognitionResult(0);

	IplImage *cvImage = cvCreateImageHeader( cvSize(width, height), 8, pixelBytes );
	cvImage->imageData = (char *)&image[0];

	Evolution::Matrix<int> e_img;
	Evolution::Image capture_img;

	if (ImageConversion (&e_img, (BYTE *)cvImage->imageData, width, height, pixelBytes) < 0) {
		return _recognitionResult;
	}

	capture_img.copy_matrix(e_img);		//이미지를 얻음.

	//특징점 추출
	Evolution::ObjRecKeypointList* capture_features = new Evolution::ObjRecKeypointList ();
	if (capture_features->extract (&capture_img) != Evolution::RESULT_SUCCESS) {
		capture_features->remove_ref ();
		return _recognitionResult;
	}

	//특징점들을 비교하여 사물 인식
	Evolution::ObjRecQuery* query = new Evolution::ObjRecQuery ();
	query->recognize (_vipr->objrecDB, capture_features);

	// The image features are not needed after recognition is performed 
	// so we should release the resource.
	capture_features->remove_ref ();

	int n = query->get_num_matches ();
	_recognitionResult.resize(n);

	for( int i = 0  ;  i < n  ;  i++ )
	{
		Evolution::ObjRecQuery::MatchInfo info[1]; 
		query->get_match_info (i, info);
	
		_recognitionResult[i].name = info->label;
		_recognitionResult[i].point1X = (long)info->rectangle[0][0];
		_recognitionResult[i].point1Y = height-(long)info->rectangle[0][1];
		_recognitionResult[i].point2X = (long)info->rectangle[1][0];
		_recognitionResult[i].point2Y = height-(long)info->rectangle[1][1];
		_recognitionResult[i].point3X = (long)info->rectangle[2][0];
		_recognitionResult[i].point3Y = height-(long)info->rectangle[2][1];
		_recognitionResult[i].point4X = (long)info->rectangle[3][0];
		_recognitionResult[i].point4Y = height-(long)info->rectangle[3][1];
	}
	query->remove_ref (); // clean-up the query object

	cvReleaseImageHeader(&cvImage);


	return _recognitionResult;
}