/* * 4character code of codec. see http://www.fourcc.org/ */ VALUE rb_fourcc(VALUE self) { char str[4]; double fourcc = cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FOURCC); sprintf(str, "%s", (char*)&fourcc); return rb_str_new2(str); }
/* * Get boolean flags indicating whether images should be converted to RGB */ VALUE rb_get_convert_rgb(VALUE self) { int flag = 0; try { flag = (int)cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_CONVERT_RGB); } catch (cv::Exception& e) { raise_cverror(e); } return flag ? Qtrue : Qfalse; }
VALUE rb_set_capture_property(VALUE self, int id, VALUE value) { double result = 0; try { result = cvSetCaptureProperty(CVCAPTURE(self), id, NUM2DBL(value)); } catch (cv::Exception& e) { raise_cverror(e); } return rb_float_new(result); }
VALUE rb_get_capture_property(VALUE self, int id) { double result = 0; try { result = cvGetCaptureProperty(CVCAPTURE(self), id); } catch (cv::Exception& e) { raise_cverror(e); } return rb_float_new(result); }
/* * call-seq: * grab -> true or false * * Grabbed frame is stored internally. To grab frame * <i>fast</i> that is important for syncronization in case of reading from * several cameras simultaneously. The grabbed frames are not exposed because * they may be stored in compressed format (as defined by camera/driver). * To retrieve the grabbed frame, retrieve should be used. * * If grabbed frame was success, return true. Otherwise return false. */ VALUE rb_grab(VALUE self) { int grab = 0; try { grab = cvGrabFrame(CVCAPTURE(self)); } catch (cv::Exception& e) { raise_cverror(e); } return grab ? Qtrue : Qfalse; }
/* * Get size of frames in the video stream. */ VALUE rb_get_size(VALUE self) { CvSize size; try { CvCapture* self_ptr = CVCAPTURE(self); size = cvSize((int)cvGetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_WIDTH), (int)cvGetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_HEIGHT)); } catch (cv::Exception& e) { raise_cverror(e); } return cCvSize::new_object(size); }
/* * call-seq: * retrieve -> IplImage or nil * * Gets the image grabbed with grab. */ VALUE rb_retrieve(VALUE self) { IplImage *frame = cvRetrieveFrame(CVCAPTURE(self)); if(!frame) return Qnil; VALUE image = cIplImage::new_object(cvSize(frame->width, frame->height), CV_MAKETYPE(CV_8U, frame->nChannels)); if (frame->origin == IPL_ORIGIN_TL) { cvCopy(frame, CVARR(image)); } else { cvFlip(frame, CVARR(image)); } return image; }
/* * Set size of frames in the video stream. */ VALUE rb_set_size(VALUE self, VALUE value) { double result = 0; CvSize size = VALUE_TO_CVSIZE(value); try { CvCapture* self_ptr = CVCAPTURE(self); cvSetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_WIDTH, size.width); result = cvSetCaptureProperty(self_ptr, CV_CAP_PROP_FRAME_HEIGHT, size.height); } catch (cv::Exception& e) { raise_cverror(e); } return DBL2NUM(result); }
/* * call-seq: * query -> IplImage or nil * * Grabs and returns a frame camera or file. Just a combination of grab and retrieve in one call. */ VALUE rb_query(VALUE self) { VALUE image = Qnil; try { IplImage *frame = cvQueryFrame(CVCAPTURE(self)); if (!frame) return Qnil; image = cIplImage::new_object(cvSize(frame->width, frame->height), CV_MAKETYPE(CV_8U, frame->nChannels)); if (frame->origin == IPL_ORIGIN_TL) cvCopy(frame, CVARR(image)); else cvFlip(frame, CVARR(image)); } catch (cv::Exception& e) { raise_cverror(e); } return image; }
/* * Grabs, decodes and returns the next video frame. * @overload query * @return [IplImage] Next video frame * @return [nil] Failed to read next video frame * @opencv_func cvQueryFrame */ VALUE rb_query(VALUE self) { VALUE image = Qnil; IplImage *frame = NULL; try { if (!(frame = cvQueryFrame(CVCAPTURE(self)))) { return Qnil; } image = cIplImage::new_object(frame->width, frame->height, CV_MAKETYPE(IPL2CV_DEPTH(frame->depth), frame->nChannels)); if (frame->origin == IPL_ORIGIN_TL) { cvCopy(frame, CVARR(image)); } else { cvFlip(frame, CVARR(image)); } } catch (cv::Exception& e) { raise_cverror(e); } return image; }
/* * call-seq: * grab -> true or false * * Grabbed frame is stored internally. To grab frame * <i>fast</i> that is important for syncronization in case of reading from * several cameras simultaneously. The grabbed frames are not exposed because * they may be stored in compressed format (as defined by camera/driver). * To retrieve the grabbed frame, retrieve should be used. * * If grabbed frame was success, return true. Otherwise return false. */ VALUE rb_grab(VALUE self) { return cvGrabFrame(CVCAPTURE(self)) ? Qtrue : Qfalse; }
/* * Number of frames in video file. */ VALUE rb_frame_count(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FRAME_COUNT)); }
/* * Height of frames in the video stream. */ VALUE rb_height(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FRAME_HEIGHT)); }
/* * Frame rate */ VALUE rb_fps(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FPS)); }
/* * The format of the Mat objects returned by CvCapture#retrieve */ VALUE rb_format(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FORMAT)); }
/* * Currently unsupported */ VALUE rb_white_balance(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_WHITE_BALANCE)); }
/* * Exposure (only for cameras) */ VALUE rb_exposure(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_EXPOSURE)); }
/* * Hue of the image (only for cameras) */ VALUE rb_hue(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_HUE)); }
/* * Contrast of the image (only for cameras) */ VALUE rb_contrast(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_CONTRAST)); }
/* * Brightness of the image (only for cameras) */ VALUE rb_brightness(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_BRIGHTNESS)); }
/* * Film current position in milliseconds or video capture timestamp. */ VALUE rb_millisecond(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_POS_MSEC)); }
/* * Saturation of the image (only for cameras) */ VALUE rb_saturation(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_SATURATION)); }
/* * 0-based index of the frame to be decoded/captured next */ VALUE rb_frames(VALUE self) { return rb_float_new(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_POS_FRAMES)); }
/* * Gain of the image (only for cameras) */ VALUE rb_gain(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_GAIN)); }
/* * Relative position of video file (0 - start of the film, 1 - end of the film) */ VALUE rb_avi_ratio(VALUE self) { return rb_float_new(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_POS_AVI_RATIO)); }
/* * Boolean flags indicating whether images should be converted to RGB */ VALUE rb_convert_rgb(VALUE self) { int flag = (int)cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_CONVERT_RGB); return (flag == 1) ? Qtrue : Qfalse; }
/* * Size of frames in the video stream. */ VALUE rb_size(VALUE self) { return cCvSize::new_object(cvSize((int)cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FRAME_WIDTH), (int)cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FRAME_HEIGHT))); }
/* * TOWRITE (note: only supported by DC1394 v 2.x backend currently) */ VALUE rb_rectification(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_RECTIFICATION)); }
/* * Width of frames in the video stream. */ VALUE rb_width(VALUE self) { return rb_dbl2big(cvGetCaptureProperty(CVCAPTURE(self), CV_CAP_PROP_FRAME_WIDTH)); }